Merge branch 'master' into chore/authn-fields

This commit is contained in:
zhouzb 2022-04-28 16:09:26 +08:00 committed by GitHub
commit bd0d0d9797
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
136 changed files with 11263 additions and 6974 deletions

1
.gitattributes vendored
View File

@ -7,3 +7,4 @@ scripts/* text eol=lf
*.jpg -text *.jpg -text
*.png -text *.png -text
*.pdf -text *.pdf -text
scripts/erlfmt -text

View File

@ -23,7 +23,7 @@ on:
jobs: jobs:
linux: linux:
runs-on: ubuntu-20.04 runs-on: aws-amd64
strategy: strategy:
fail-fast: false fail-fast: false
@ -43,6 +43,9 @@ jobs:
container: "ghcr.io/emqx/emqx-builder/5.0-10:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" container: "ghcr.io/emqx/emqx-builder/5.0-10:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
steps: steps:
- name: cleanup
run: |
rm -rf "${GITHUB_WORKSPACE}/"
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- name: prepare - name: prepare
run: | run: |

View File

@ -604,8 +604,12 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 <code>zone<
mqtt_idle_timeout { mqtt_idle_timeout {
desc { desc {
en: """Close TCP connections from the clients that have not sent MQTT CONNECT message within this interval.""" en: """After the TCP connection is established, if the MQTT CONNECT packet from the client is not received within the time specified by <code>idle_timeout</code>, the connection will be disconnected."""
zh: """关闭在此时间间隔内未发送 MQTT CONNECT 消息的客户端的 TCP 连接。""" zh: """TCP 连接建立后,如果在 <code>idle_timeout</code> 指定的时间内未收到客户端的 MQTT CONNECT 报文,则连接将被断开。"""
}
label: {
en: """Idle Timeout"""
zh: """空闲超时"""
} }
} }
@ -614,19 +618,31 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 <code>zone<
en: """Maximum MQTT packet size allowed.""" en: """Maximum MQTT packet size allowed."""
zh: """允许的最大 MQTT 报文大小。""" zh: """允许的最大 MQTT 报文大小。"""
} }
label: {
en: """Max Packet Size"""
zh: """最大报文大小"""
}
} }
mqtt_max_clientid_len { mqtt_max_clientid_len {
desc { desc {
en: """Maximum allowed length of MQTT clientId.""" en: """Maximum allowed length of MQTT Client ID."""
zh: """允许的最大 MQTT Client ID 长度""" zh: """允许的最大 MQTT Client ID 长度。"""
}
label: {
en: """Max Client ID Length"""
zh: """最大 Client ID 长度"""
} }
} }
mqtt_max_topic_levels { mqtt_max_topic_levels {
desc { desc {
en: """Maximum topic levels allowed.""" en: """Maximum topic levels allowed."""
zh: """允许的 Topic 最大层级数""" zh: """允许的最大主题层级。"""
}
label: {
en: """Max Topic Levels"""
zh: """最大主题层级"""
} }
} }
@ -635,40 +651,64 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 <code>zone<
en: """Maximum QoS allowed.""" en: """Maximum QoS allowed."""
zh: """允许的最大 QoS 等级。""" zh: """允许的最大 QoS 等级。"""
} }
label: {
en: """Max QoS"""
zh: """最大 QoS"""
}
} }
mqtt_max_topic_alias { mqtt_max_topic_alias {
desc { desc {
en: """Maximum Topic Alias, 0 means no topic alias supported.""" en: """Maximum topic alias, 0 means no topic alias supported."""
zh: """允许的最大主题别名数0 表示不支持主题别名。""" zh: """允许的最大主题别名数0 表示不支持主题别名。"""
} }
label: {
en: """Max Topic Alias"""
zh: """最大主题别名"""
}
} }
mqtt_retain_available { mqtt_retain_available {
desc { desc {
en: """Support MQTT retained messages.""" en: """Whether to enable support for MQTT retained message."""
zh: """是否支持 retained 消息。""" zh: """是否启用对 MQTT 保留消息的支持。"""
}
label: {
en: """Retain Available"""
zh: """保留消息可用"""
} }
} }
mqtt_wildcard_subscription { mqtt_wildcard_subscription {
desc { desc {
en: """Support MQTT Wildcard Subscriptions.""" en: """Whether to enable support for MQTT wildcard subscription."""
zh: """是否支持主题的通配符订阅。""" zh: """是否启用对 MQTT 通配符订阅的支持。"""
}
label: {
en: """Wildcard Subscription Available"""
zh: """通配符订阅可用"""
} }
} }
mqtt_shared_subscription { mqtt_shared_subscription {
desc { desc {
en: """Support MQTT Shared Subscriptions.""" en: """Whether to enable support for MQTT shared subscription."""
zh: """是否支持 MQTT 共享订阅""" zh: """是否启用对 MQTT 共享订阅的支持。"""
}
label: {
en: """Shared Subscription Available"""
zh: """共享订阅可用"""
} }
} }
mqtt_ignore_loop_deliver { mqtt_ignore_loop_deliver {
desc { desc {
en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0.""" en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0, similar to <code>No Local</code> subscription option in MQTT 5.0"""
zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略接收自己发布出消息""" zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略投递自己发布的消息,类似于 MQTT 5.0 中的 <code>No Local</code> 订阅选项"""
}
label: {
en: """Ignore Loop Deliver"""
zh: """忽略循环投递"""
} }
} }
@ -679,35 +719,53 @@ When set to true, invalid utf8 strings in for example client ID, topic name, etc
zh: """是否以严格模式解析 MQTT 消息。 zh: """是否以严格模式解析 MQTT 消息。
当设置为 true 时,例如客户端 ID、主题名称等中的无效 utf8 字符串将导致客户端断开连接。""" 当设置为 true 时,例如客户端 ID、主题名称等中的无效 utf8 字符串将导致客户端断开连接。"""
} }
label: {
en: """Strict Mode"""
zh: """严格模式"""
}
} }
mqtt_response_information { mqtt_response_information {
desc { desc {
en: """Specify the response information returned to the client. This feature is disabled if is set to \"\".""" en: """Specify the response information returned to the client. This feature is disabled if is set to \"\". Applies only to clients using MQTT 5.0."""
zh: """指定返回给客户端的响应信息。如果设置为 \"\",则禁用此功能。""" zh: """指定返回给客户端的响应信息。如果设置为 \"\",则禁用此功能。仅适用于使用 MQTT 5.0 协议的客户端。"""
}
label: {
en: """Response Information"""
zh: """响应信息"""
} }
} }
mqtt_server_keepalive { mqtt_server_keepalive {
desc { desc {
en: """'Server Keep Alive' of MQTT 5.0. en: """The keep alive that EMQX requires the client to use. If configured as <code>disabled</code>, it means that the keep alive specified by the client will be used. Requires <code>Server Keep Alive</code> in MQTT 5.0, so it is only applicable to clients using MQTT 5.0 protocol."""
If the server returns a 'Server Keep Alive' in the CONNACK packet, the client MUST use that value instead of the value it sent as the 'Keep Alive'.""" zh: """EMQX 要求客户端使用的保活时间,配置为 <code>disabled</code> 表示将使用客户端指定的保活时间。需要用到 MQTT 5.0 中的 <code>Server Keep Alive</code>,因此仅适用于使用 MQTT 5.0 协议的客户端。"""
zh: """MQTT 5.0 的 'Server Keep Alive' 属性。 }
如果服务器在 CONNACK 数据包中返回'Server Keep Alive',则客户端必须使用该值作为实际的 'Keep Alive' 值。""" label: {
en: """Server Keep Alive"""
zh: """服务端保持连接"""
} }
} }
mqtt_keepalive_backoff { mqtt_keepalive_backoff {
desc { desc {
en: """The backoff for MQTT keepalive timeout. The broker will close the connection after idling for 'Keepalive * backoff * 2'.""" en: """The backoff multiplier used by the broker to determine the client keep alive timeout. If EMQX doesn't receive any packet in <code>Keep Alive * Backoff * 2</code> seconds, EMQX will close the current connection."""
zh: """Broker 判定客户端 Keep Alive 超时的退避乘数。EMQX 将在'Keepalive * backoff * 2' 空闲后关闭连接。""" zh: """Broker 判定客户端保活超时使用的退避乘数。如果 EMQX 在 <code>Keep Alive * Backoff * 2</code> 秒内未收到任何报文EMQX 将关闭当前连接。"""
}
label: {
en: """Keep Alive Backoff"""
zh: """保持连接退避乘数"""
} }
} }
mqtt_max_subscriptions { mqtt_max_subscriptions {
desc { desc {
en: """Maximum number of subscriptions allowed.""" en: """Maximum number of subscriptions allowed per client."""
zh: """允许的每个客户端最大订阅数""" zh: """允许每个客户端建立的最大订阅数量。"""
}
label: {
en: """Max Subscriptions"""
zh: """最大订阅数量"""
} }
} }
@ -716,40 +774,65 @@ If the server returns a 'Server Keep Alive' in the CONNACK packet, the client MU
en: """Force upgrade of QoS level according to subscription.""" en: """Force upgrade of QoS level according to subscription."""
zh: """投递消息时,是否根据订阅主题时的 QoS 等级来强制提升派发的消息的 QoS 等级。""" zh: """投递消息时,是否根据订阅主题时的 QoS 等级来强制提升派发的消息的 QoS 等级。"""
} }
label: {
en: """Upgrade QoS"""
zh: """升级 QoS"""
}
} }
mqtt_max_inflight { mqtt_max_inflight {
desc { desc {
en: """Maximum size of the Inflight Window storing QoS1/2 messages delivered but un-acked.""" en: """Maximum number of QoS 1 and QoS 2 messages that are allowed to be delivered simultaneously before completing the acknowledgment."""
zh: """飞行窗口的最大值。""" zh: """允许在完成应答前同时投递的 QoS 1 和 QoS 2 消息的最大数量。"""
} }
label: {
en: """Max Inflight"""
zh: """最大飞行窗口"""
}
} }
mqtt_retry_interval { mqtt_retry_interval {
desc { desc {
en: """Retry interval for QoS1/2 message delivering.""" en: """Retry interval for QoS 1/2 message delivering."""
zh: """QoS1/2 消息的重新投递间隔。""" zh: """QoS 1/2 消息的重新投递间隔。"""
}
label: {
en: """Retry Interval"""
zh: """重试间隔"""
} }
} }
mqtt_max_awaiting_rel { mqtt_max_awaiting_rel {
desc { desc {
en: """Maximum QoS2 packets (Client -> Broker) awaiting PUBREL.""" en: """Maximum QoS 2 packets (Client -> Broker) awaiting PUBREL."""
zh: """PUBREL (Client -> Broker) 最大等待队列长度。""" zh: """PUBREL (Client -> Broker) 最大等待队列长度。"""
} }
label: {
en: """Max Awaiting PUBREL"""
zh: """Max Awaiting PUBREL"""
}
} }
mqtt_await_rel_timeout { mqtt_await_rel_timeout {
desc { desc {
en: """The QoS2 messages (Client -> Broker) will be dropped if awaiting PUBREL timeout.""" en: """The QoS 2 messages (Client -> Broker) will be dropped if awaiting PUBREL timeout."""
zh: """PUBREL (Client -> Broker) 最大等待时间,超时则会被丢弃。""" zh: """PUBREL (Client -> Broker) 最大等待时间,超时则会被丢弃。"""
} }
label: {
en: """Max Awaiting PUBREL TIMEOUT"""
zh: """Max Awaiting PUBREL TIMEOUT"""
}
} }
mqtt_session_expiry_interval { mqtt_session_expiry_interval {
desc { desc {
en: """Default session expiry interval for MQTT V3.1.1 connections.""" en: """Specifies how long the session will expire after the connection is disconnected, only for non-MQTT 5.0 connections."""
zh: """Session 默认超时时间。""" zh: """指定会话将在连接断开后多久过期,仅适用于非 MQTT 5.0 的连接。"""
}
label: {
en: """Session Expiry Interval"""
zh: """会话过期间隔"""
} }
} }
@ -758,6 +841,10 @@ If the server returns a 'Server Keep Alive' in the CONNACK packet, the client MU
en: """Maximum queue length. Enqueued messages when persistent client disconnected, or inflight window is full.""" en: """Maximum queue length. Enqueued messages when persistent client disconnected, or inflight window is full."""
zh: """消息队列最大长度。持久客户端断开连接或飞行窗口已满时排队的消息长度。""" zh: """消息队列最大长度。持久客户端断开连接或飞行窗口已满时排队的消息长度。"""
} }
label: {
en: """Max Message Queue Length"""
zh: """最大消息队列长度"""
}
} }
mqtt_mqueue_priorities { mqtt_mqueue_priorities {
@ -783,42 +870,96 @@ To configure <code>\"topic/1\" > \"topic/2\"</code>:
<code>mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}</code> <code>mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}</code>
""" """
} }
label: {
en: """Topic Priorities"""
zh: """主题优先级"""
}
} }
mqtt_mqueue_default_priority { mqtt_mqueue_default_priority {
desc { desc {
en: """Default to the highest priority for topics not matching priority table.""" en: """Default topic priority, which will be used by topics not in <code>Topic Priorities</code> (<code>mqueue_priorities</code>)."""
zh: """主题默认的优先级,不在 <code>mqtt.mqueue_priorities</code> 中的主题将会使用该优先级。""" zh: """默认的主题优先级,不在 <code>主题优先级</code><code>mqueue_priorities</code> 中的主题将会使用该优先级。"""
}
label: {
en: """Default Topic Priorities"""
zh: """默认主题优先级"""
} }
} }
mqtt_mqueue_store_qos0 { mqtt_mqueue_store_qos0 {
desc { desc {
en: """Support enqueue QoS0 messages.""" en: """Specifies whether to store QoS 0 messages in the message queue while the connection is down but the session remains."""
zh: """消息队列是否存储 QoS0 消息。""" zh: """指定在连接断开但会话保持期间,是否需要在消息队列中存储 QoS 0 消息。"""
}
label: {
en: """Store QoS 0 Message"""
zh: """存储 QoS 0 消息"""
} }
} }
mqtt_use_username_as_clientid { mqtt_use_username_as_clientid {
desc { desc {
en: """Replace client ID with the username.""" en: """Whether to user Client ID as Username.
zh: """是否使用 Username 替换 Client ID。""" This setting takes effect later than <code>Use Peer Certificate as Username</code> (<code>peer_cert_as_username</code>) and <code>Use peer certificate as Client ID</code> (<code>peer_cert_as_clientid</code>).
"""
zh: """是否使用用户名作为客户端 ID。
此设置的作用时间晚于 <code>使用对端证书作为用户名</code><code>peer_cert_as_username</code> 和 <code>使用对端证书作为客户端 ID</code><code>peer_cert_as_clientid</code>)。
"""
}
label: {
en: """Use Username as Client ID"""
zh: """使用用户名作为客户端 ID"""
} }
} }
mqtt_peer_cert_as_username { mqtt_peer_cert_as_username {
desc { desc {
en: """Use the CN, DN or CRT field from the client certificate as a username. en: """Use the CN, DN field in the peer certificate or the entire certificate content as Username. Only works for the TLS connection.
Only works for the TLS connection.""" Supported configurations are the following:
zh: """使用客户端证书中的 CN, DN 字段或整个证书来作为客户端用户名。""" - <code>cn</code>: Take the CN field of the certificate as Username
- <code>dn</code>: Take the DN field of the certificate as Username
- <code>crt</code>: Take the content of the <code>DER</code> or <code>PEM</code> certificate as Username
- <code>pem</code>: Convert <code>DER</code> certificate content to <code>PEM</code> format as Username
- <code>md5</code>: Take the MD5 value of the content of the <code>DER</code> or <code>PEM</code> certificate as Username
"""
zh: """使用对端证书中的 CN, DN 字段或整个证书内容来作为用户名。仅适用于 TLS 连接。
目前支持配置为以下内容:
- <code>cn</code>: 取证书的 CN 字段作为 Username
- <code>dn</code>: 取证书的 DN 字段作为 Username
- <code>crt</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容作为 Username
- <code>pem</code>: 将 <code>DER</code> 证书内容转换为 <code>PEM</code> 格式后作为 Username
- <code>md5</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容的 MD5 值作为 Username
"""
}
label: {
en: """Use Peer Certificate as Username"""
zh: """使用对端证书作为用户名"""
} }
} }
mqtt_peer_cert_as_clientid { mqtt_peer_cert_as_clientid {
desc { desc {
en: """Use the CN, DN or CRT field from the client certificate as a clientid. en: """Use the CN, DN field in the peer certificate or the entire certificate content as Client ID. Only works for the TLS connection.
Only works for the TLS connection.""" Supported configurations are the following:
zh: """使用客户端证书中的 CN, DN 字段或整个证书来作为客户端 ID。""" - <code>cn</code>: Take the CN field of the certificate as Client ID
- <code>dn</code>: Take the DN field of the certificate as Client ID
- <code>crt</code>: Take the content of the <code>DER</code> or <code>PEM</code> certificate as Client ID
- <code>pem</code>: Convert <code>DER</code> certificate content to <code>PEM</code> format as Client ID
- <code>md5</code>: Take the MD5 value of the content of the <code>DER</code> or <code>PEM</code> certificate as Client ID
"""
zh: """使用对端证书中的 CN, DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接。
目前支持配置为以下内容:
- <code>cn</code>: 取证书的 CN 字段作为 Client ID
- <code>dn</code>: 取证书的 DN 字段作为 Client ID
- <code>crt</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容作为 Client ID
- <code>pem</code>: 将 <code>DER</code> 证书内容转换为 <code>PEM</code> 格式后作为 Client ID
- <code>md5</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容的 MD5 值作为 Client ID
"""
}
label: {
en: """Use Peer Certificate as Client ID"""
zh: """使用对端证书作为客户端 ID"""
} }
} }
@ -874,11 +1015,11 @@ Only works for the TLS connection."""
broker_shared_dispatch_ack_enabled { broker_shared_dispatch_ack_enabled {
desc { desc {
en: """Enable/disable shared dispatch acknowledgement for QoS1 and QoS2 messages. en: """Enable/disable shared dispatch acknowledgement for QoS 1 and QoS 2 messages.
This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline. This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline.
""" """
zh: """启用/禁用 QoS1 和 QoS2 消息的共享派发确认。 zh: """启用/禁用 QoS 1 和 QoS 2 消息的共享派发确认。
开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线)重新派发给另外一个订阅者。 开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线)重新派发给另外一个订阅者。
""" """
} }

View File

@ -29,7 +29,7 @@
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.4"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.4"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.3"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.4"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}

View File

@ -57,6 +57,7 @@
validate_heap_size/1, validate_heap_size/1,
parse_user_lookup_fun/1, parse_user_lookup_fun/1,
validate_alarm_actions/1, validate_alarm_actions/1,
non_empty_string/1,
validations/0 validations/0
]). ]).
@ -1515,7 +1516,7 @@ base_listener() ->
)}, )},
{"acceptors", {"acceptors",
sc( sc(
integer(), pos_integer(),
#{ #{
default => 16, default => 16,
desc => ?DESC(base_listener_acceptors) desc => ?DESC(base_listener_acceptors)
@ -1523,7 +1524,7 @@ base_listener() ->
)}, )},
{"max_connections", {"max_connections",
sc( sc(
hoconsc:union([infinity, integer()]), hoconsc:union([infinity, pos_integer()]),
#{ #{
default => infinity, default => infinity,
desc => ?DESC(base_listener_max_connections) desc => ?DESC(base_listener_max_connections)
@ -1823,13 +1824,7 @@ common_ssl_opts_schema(Defaults) ->
%% @doc Make schema for SSL listener options. %% @doc Make schema for SSL listener options.
%% When it's for ranch listener, an extra field `handshake_timeout' is added. %% When it's for ranch listener, an extra field `handshake_timeout' is added.
-spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema(). -spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema().
server_ssl_opts_schema(Defaults1, IsRanchListener) -> server_ssl_opts_schema(Defaults, IsRanchListener) ->
Defaults0 = #{
cacertfile => emqx:cert_file("cacert.pem"),
certfile => emqx:cert_file("cert.pem"),
keyfile => emqx:cert_file("key.pem")
},
Defaults = maps:merge(Defaults0, Defaults1),
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
common_ssl_opts_schema(Defaults) ++ common_ssl_opts_schema(Defaults) ++
@ -1882,15 +1877,7 @@ server_ssl_opts_schema(Defaults1, IsRanchListener) ->
%% @doc Make schema for SSL client. %% @doc Make schema for SSL client.
-spec client_ssl_opts_schema(map()) -> hocon_schema:field_schema(). -spec client_ssl_opts_schema(map()) -> hocon_schema:field_schema().
client_ssl_opts_schema(Defaults1) -> client_ssl_opts_schema(Defaults) ->
%% assert
true = lists:all(fun(K) -> is_atom(K) end, maps:keys(Defaults1)),
Defaults0 = #{
cacertfile => emqx:cert_file("cacert.pem"),
certfile => emqx:cert_file("client-cert.pem"),
keyfile => emqx:cert_file("client-key.pem")
},
Defaults = maps:merge(Defaults0, Defaults1),
common_ssl_opts_schema(Defaults) ++ common_ssl_opts_schema(Defaults) ++
[ [
{"server_name_indication", {"server_name_indication",
@ -1898,6 +1885,7 @@ client_ssl_opts_schema(Defaults1) ->
hoconsc:union([disable, string()]), hoconsc:union([disable, string()]),
#{ #{
required => false, required => false,
validator => fun emqx_schema:non_empty_string/1,
desc => ?DESC(client_ssl_opts_schema_server_name_indication) desc => ?DESC(client_ssl_opts_schema_server_name_indication)
} }
)} )}
@ -2177,3 +2165,8 @@ authentication(Type) ->
-spec qos() -> typerefl:type(). -spec qos() -> typerefl:type().
qos() -> qos() ->
typerefl:alias("qos", typerefl:union([0, 1, 2])). typerefl:alias("qos", typerefl:union([0, 1, 2])).
non_empty_string(<<>>) -> {error, empty_string_not_allowed};
non_empty_string("") -> {error, empty_string_not_allowed};
non_empty_string(S) when is_binary(S); is_list(S) -> ok;
non_empty_string(_) -> {error, invalid_string}.

View File

@ -476,7 +476,7 @@ to_client_opts(Opts) ->
CertFile = ensure_str(Get(certfile)), CertFile = ensure_str(Get(certfile)),
CAFile = ensure_str(Get(cacertfile)), CAFile = ensure_str(Get(cacertfile)),
Verify = GetD(verify, verify_none), Verify = GetD(verify, verify_none),
SNI = ensure_str(Get(server_name_indication)), SNI = ensure_sni(Get(server_name_indication)),
Versions = integral_versions(Get(versions)), Versions = integral_versions(Get(versions)),
Ciphers = integral_ciphers(Versions, Get(ciphers)), Ciphers = integral_ciphers(Versions, Get(ciphers)),
filter([ filter([
@ -505,6 +505,11 @@ fuzzy_map_get(Key, Options, Default) ->
Default Default
end. end.
ensure_sni(disable) -> disable;
ensure_sni(undefined) -> undefined;
ensure_sni(L) when is_list(L) -> L;
ensure_sni(B) when is_binary(B) -> unicode:characters_to_list(B, utf8).
ensure_str(undefined) -> undefined; ensure_str(undefined) -> undefined;
ensure_str(L) when is_list(L) -> L; ensure_str(L) when is_list(L) -> L;
ensure_str(B) when is_binary(B) -> unicode:characters_to_list(B, utf8). ensure_str(B) when is_binary(B) -> unicode:characters_to_list(B, utf8).

View File

@ -272,78 +272,65 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) ->
false -> ok false -> ok
end. end.
websocket_init([ websocket_init([Req, Opts]) ->
Req, #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts,
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts case check_max_connection(Type, Listener) of
]) -> allow ->
{Peername, Peercert} = {Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts),
case Sockname = cowboy_req:sock(Req),
emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso WsCookie = get_ws_cookie(Req),
maps:get(proxy_header, Req) ConnInfo = #{
of socktype => ws,
#{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} -> peername => Peername,
SourceName = {SrcAddr, SrcPort}, sockname => Sockname,
%% Notice: Only CN is available in Proxy Protocol V2 additional info peercert => PeerCert,
SourceSSL = ws_cookie => WsCookie,
case maps:get(cn, SSL, undefined) of conn_mod => ?MODULE
undeined -> nossl; },
CN -> [{pp2_ssl_cn, CN}] Limiter = emqx_limiter_container:get_limiter_by_names(
end, [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg
{SourceName, SourceSSL}; ),
#{src_address := SrcAddr, src_port := SrcPort} -> MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
SourceName = {SrcAddr, SrcPort}, FrameOpts = #{
{SourceName, nossl}; strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
_ -> max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
{get_peer(Req, Opts), cowboy_req:cert(Req)} },
end, ParseState = emqx_frame:initial_parse_state(FrameOpts),
Sockname = cowboy_req:sock(Req), Serialize = emqx_frame:serialize_opts(),
WsCookie = Channel = emqx_channel:init(ConnInfo, Opts),
try GcState = get_force_gc(Zone),
cowboy_req:parse_cookies(Req) StatsTimer = get_stats_enable(Zone),
catch %% MQTT Idle Timeout
error:badarg -> IdleTimeout = emqx_channel:get_mqtt_conf(Zone, idle_timeout),
?SLOG(error, #{msg => "bad_cookie"}), IdleTimer = start_timer(IdleTimeout, idle_timeout),
undefined; tune_heap_size(Channel),
Error:Reason -> emqx_logger:set_metadata_peername(esockd:format(Peername)),
?SLOG(error, #{ {ok,
msg => "failed_to_parse_cookie", #state{
exception => Error, peername = Peername,
reason => Reason sockname = Sockname,
}), sockstate = running,
undefined mqtt_piggyback = MQTTPiggyback,
end, limiter = Limiter,
ConnInfo = #{ parse_state = ParseState,
socktype => ws, serialize = Serialize,
peername => Peername, channel = Channel,
sockname => Sockname, gc_state = GcState,
peercert => Peercert, postponed = [],
ws_cookie => WsCookie, stats_timer = StatsTimer,
conn_mod => ?MODULE idle_timeout = IdleTimeout,
}, idle_timer = IdleTimer,
Limiter = emqx_limiter_container:get_limiter_by_names( zone = Zone,
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg listener = {Type, Listener},
), limiter_timer = undefined,
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback), limiter_cache = queue:new()
FrameOpts = #{ },
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]), hibernate};
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size]) {denny, Reason} ->
}, {stop, Reason}
ParseState = emqx_frame:initial_parse_state(FrameOpts), end.
Serialize = emqx_frame:serialize_opts(),
Channel = emqx_channel:init(ConnInfo, Opts), tune_heap_size(Channel) ->
GcState =
case emqx_config:get_zone_conf(Zone, [force_gc]) of
#{enable := false} -> undefined;
GcPolicy -> emqx_gc:init(GcPolicy)
end,
StatsTimer =
case emqx_config:get_zone_conf(Zone, [stats, enable]) of
true -> undefined;
false -> disabled
end,
%% MQTT Idle Timeout
IdleTimeout = emqx_channel:get_mqtt_conf(Zone, idle_timeout),
IdleTimer = start_timer(IdleTimeout, idle_timeout),
case case
emqx_config:get_zone_conf( emqx_config:get_zone_conf(
emqx_channel:info(zone, Channel), emqx_channel:info(zone, Channel),
@ -352,29 +339,56 @@ websocket_init([
of of
#{enable := false} -> ok; #{enable := false} -> ok;
ShutdownPolicy -> emqx_misc:tune_heap_size(ShutdownPolicy) ShutdownPolicy -> emqx_misc:tune_heap_size(ShutdownPolicy)
end, end.
emqx_logger:set_metadata_peername(esockd:format(Peername)),
{ok, get_stats_enable(Zone) ->
#state{ case emqx_config:get_zone_conf(Zone, [stats, enable]) of
peername = Peername, true -> undefined;
sockname = Sockname, false -> disabled
sockstate = running, end.
mqtt_piggyback = MQTTPiggyback,
limiter = Limiter, get_force_gc(Zone) ->
parse_state = ParseState, case emqx_config:get_zone_conf(Zone, [force_gc]) of
serialize = Serialize, #{enable := false} -> undefined;
channel = Channel, GcPolicy -> emqx_gc:init(GcPolicy)
gc_state = GcState, end.
postponed = [],
stats_timer = StatsTimer, get_ws_cookie(Req) ->
idle_timeout = IdleTimeout, try
idle_timer = IdleTimer, cowboy_req:parse_cookies(Req)
zone = Zone, catch
listener = {Type, Listener}, error:badarg ->
limiter_timer = undefined, ?SLOG(error, #{msg => "bad_cookie"}),
limiter_cache = queue:new() undefined;
}, Error:Reason ->
hibernate}. ?SLOG(error, #{
msg => "failed_to_parse_cookie",
exception => Error,
reason => Reason
}),
undefined
end.
get_peer_info(Type, Listener, Req, Opts) ->
case
emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso
maps:get(proxy_header, Req)
of
#{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} ->
SourceName = {SrcAddr, SrcPort},
%% Notice: Only CN is available in Proxy Protocol V2 additional info
SourceSSL =
case maps:get(cn, SSL, undefined) of
undeined -> nossl;
CN -> [{pp2_ssl_cn, CN}]
end,
{SourceName, SourceSSL};
#{src_address := SrcAddr, src_port := SrcPort} ->
SourceName = {SrcAddr, SrcPort},
{SourceName, nossl};
_ ->
{get_peer(Req, Opts), cowboy_req:cert(Req)}
end.
websocket_handle({binary, Data}, State) when is_list(Data) -> websocket_handle({binary, Data}, State) when is_list(Data) ->
websocket_handle({binary, iolist_to_binary(Data)}, State); websocket_handle({binary, iolist_to_binary(Data)}, State);
@ -1000,6 +1014,26 @@ get_peer(Req, #{listener := {Type, Listener}}) ->
_:_ -> {Addr, PeerPort} _:_ -> {Addr, PeerPort}
end. end.
check_max_connection(Type, Listener) ->
case emqx_config:get_listener_conf(Type, Listener, [max_connections]) of
infinity ->
allow;
Max ->
MatchSpec = [{{'_', emqx_ws_connection}, [], [true]}],
Curr = ets:select_count(emqx_channel_conn, MatchSpec),
case Curr >= Max of
false ->
allow;
true ->
Reason = #{
max => Max,
current => Curr,
msg => "websocket_max_connections_limited"
},
?SLOG(warning, Reason),
{denny, Reason}
end
end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% For CT tests %% For CT tests
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -351,7 +351,7 @@ Filter supports the following placeholders:
jwt { jwt {
desc { desc {
en: """Authorization using ACL rules from authentication JWT.""" en: """Authorization using ACL rules from authentication JWT."""
zh: """Authorization using ACL rules from authentication JWT.""" zh: """使用 JWT 登录认证中携带的 ACL 规则来进行发布和订阅的授权。"""
} }
label { label {
en: """jwt""" en: """jwt"""

View File

@ -180,7 +180,6 @@ do_post_config_update({{?CMD_REPLACE, Type}, RawNewSource}, Sources) ->
{OldSource, Front, Rear} = take(Type, OldSources), {OldSource, Front, Rear} = take(Type, OldSources),
NewSource = get_source_by_type(type(RawNewSource), Sources), NewSource = get_source_by_type(type(RawNewSource), Sources),
ok = ensure_resource_deleted(OldSource), ok = ensure_resource_deleted(OldSource),
clear_certs(OldSource),
InitedSources = init_source(NewSource), InitedSources = init_source(NewSource),
Front ++ [InitedSources] ++ Rear; Front ++ [InitedSources] ++ Rear;
do_post_config_update({{?CMD_DELETE, Type}, _RawNewSource}, _Sources) -> do_post_config_update({{?CMD_DELETE, Type}, _RawNewSource}, _Sources) ->

View File

@ -1,8 +1,9 @@
{erl_opts, [debug_info]}. {erl_opts, [debug_info]}.
{deps, [ {emqx, {path, "../emqx"}} {deps, [{emqx, {path, "../emqx"}}]}.
]}.
{shell, [ {shell, [
% {config, "config/sys.config"}, % {config, "config/sys.config"},
{apps, [emqx_bridge]} {apps, [emqx_bridge]}
]}. ]}.
{project_plugins, [erlfmt]}.

View File

@ -1,18 +1,18 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_bridge, {application, emqx_bridge, [
[{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.0"}, {vsn, "0.1.0"},
{registered, []}, {registered, []},
{mod, {emqx_bridge_app, []}}, {mod, {emqx_bridge_app, []}},
{applications, {applications, [
[kernel, kernel,
stdlib, stdlib,
emqx, emqx,
emqx_connector emqx_connector
]}, ]},
{env,[]}, {env, []},
{modules, []}, {modules, []},
{licenses, ["Apache 2.0"]}, {licenses, ["Apache 2.0"]},
{links, []} {links, []}
]}. ]}.

View File

@ -18,48 +18,48 @@
-include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-export([ post_config_update/5 -export([post_config_update/5]).
]).
-export([ load_hook/0 -export([
, unload_hook/0 load_hook/0,
]). unload_hook/0
]).
-export([on_message_publish/1]). -export([on_message_publish/1]).
-export([ resource_type/1 -export([
, bridge_type/1 resource_type/1,
, resource_id/1 bridge_type/1,
, resource_id/2 resource_id/1,
, bridge_id/2 resource_id/2,
, parse_bridge_id/1 bridge_id/2,
]). parse_bridge_id/1
]).
-export([ load/0 -export([
, lookup/1 load/0,
, lookup/2 lookup/1,
, lookup/3 lookup/2,
, list/0 lookup/3,
, list_bridges_by_connector/1 list/0,
, create/2 list_bridges_by_connector/1,
, create/3 create/2,
, recreate/2 create/3,
, recreate/3 recreate/2,
, create_dry_run/2 recreate/3,
, remove/1 create_dry_run/2,
, remove/2 remove/1,
, update/2 remove/2,
, update/3 update/2,
, stop/2 update/3,
, restart/2 stop/2,
, reset_metrics/1 restart/2,
]). reset_metrics/1
]).
-export([ send_message/2 -export([send_message/2]).
]).
-export([ config_key_path/0 -export([config_key_path/0]).
]).
%% exported for `emqx_telemetry' %% exported for `emqx_telemetry'
-export([get_basic_usage_info/0]). -export([get_basic_usage_info/0]).
@ -69,18 +69,25 @@ load_hook() ->
load_hook(Bridges). load_hook(Bridges).
load_hook(Bridges) -> load_hook(Bridges) ->
lists:foreach(fun({_Type, Bridge}) -> lists:foreach(
lists:foreach(fun({_Name, BridgeConf}) -> fun({_Type, Bridge}) ->
lists:foreach(
fun({_Name, BridgeConf}) ->
do_load_hook(BridgeConf) do_load_hook(BridgeConf)
end, maps:to_list(Bridge)) end,
end, maps:to_list(Bridges)). maps:to_list(Bridge)
)
end,
maps:to_list(Bridges)
).
do_load_hook(#{local_topic := _} = Conf) -> do_load_hook(#{local_topic := _} = Conf) ->
case maps:get(direction, Conf, egress) of case maps:get(direction, Conf, egress) of
egress -> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}); egress -> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []});
ingress -> ok ingress -> ok
end; end;
do_load_hook(_Conf) -> ok. do_load_hook(_Conf) ->
ok.
unload_hook() -> unload_hook() ->
ok = emqx_hooks:del('message.publish', {?MODULE, on_message_publish}). ok = emqx_hooks:del('message.publish', {?MODULE, on_message_publish}).
@ -90,23 +97,36 @@ on_message_publish(Message = #message{topic = Topic, flags = Flags}) ->
false -> false ->
Msg = emqx_rule_events:eventmsg_publish(Message), Msg = emqx_rule_events:eventmsg_publish(Message),
send_to_matched_egress_bridges(Topic, Msg); send_to_matched_egress_bridges(Topic, Msg);
true -> ok true ->
ok
end, end,
{ok, Message}. {ok, Message}.
send_to_matched_egress_bridges(Topic, Msg) -> send_to_matched_egress_bridges(Topic, Msg) ->
lists:foreach(fun (Id) -> lists:foreach(
try send_message(Id, Msg) of fun(Id) ->
{error, Reason} -> try send_message(Id, Msg) of
?SLOG(error, #{msg => "send_message_to_bridge_failed", {error, Reason} ->
bridge => Id, error => Reason}); ?SLOG(error, #{
_ -> ok msg => "send_message_to_bridge_failed",
catch Err:Reason:ST -> bridge => Id,
?SLOG(error, #{msg => "send_message_to_bridge_exception", error => Reason
bridge => Id, error => Err, reason => Reason, });
stacktrace => ST}) _ ->
end ok
end, get_matched_bridges(Topic)). catch
Err:Reason:ST ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
error => Err,
reason => Reason,
stacktrace => ST
})
end
end,
get_matched_bridges(Topic)
).
send_message(BridgeId, Message) -> send_message(BridgeId, Message) ->
{BridgeType, BridgeName} = parse_bridge_id(BridgeId), {BridgeType, BridgeName} = parse_bridge_id(BridgeId),
@ -132,8 +152,8 @@ bridge_type(emqx_connector_mqtt) -> mqtt;
bridge_type(emqx_connector_http) -> http. bridge_type(emqx_connector_http) -> http.
post_config_update(_, _Req, NewConf, OldConf, _AppEnv) -> post_config_update(_, _Req, NewConf, OldConf, _AppEnv) ->
#{added := Added, removed := Removed, changed := Updated} #{added := Added, removed := Removed, changed := Updated} =
= diff_confs(NewConf, OldConf), diff_confs(NewConf, OldConf),
%% The config update will be failed if any task in `perform_bridge_changes` failed. %% The config update will be failed if any task in `perform_bridge_changes` failed.
Result = perform_bridge_changes([ Result = perform_bridge_changes([
{fun remove/3, Removed}, {fun remove/3, Removed},
@ -150,15 +170,19 @@ perform_bridge_changes(Tasks) ->
perform_bridge_changes([], Result) -> perform_bridge_changes([], Result) ->
Result; Result;
perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) -> perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) ->
Result = maps:fold(fun Result = maps:fold(
({_Type, _Name}, _Conf, {error, Reason}) -> fun
{error, Reason}; ({_Type, _Name}, _Conf, {error, Reason}) ->
({Type, Name}, Conf, _) -> {error, Reason};
case Action(Type, Name, Conf) of ({Type, Name}, Conf, _) ->
{error, Reason} -> {error, Reason}; case Action(Type, Name, Conf) of
Return -> Return {error, Reason} -> {error, Reason};
end Return -> Return
end, Result0, MapConfs), end
end,
Result0,
MapConfs
),
perform_bridge_changes(Tasks, Result). perform_bridge_changes(Tasks, Result).
load() -> load() ->
@ -184,18 +208,29 @@ parse_bridge_id(BridgeId) ->
end. end.
list() -> list() ->
lists:foldl(fun({Type, NameAndConf}, Bridges) -> lists:foldl(
lists:foldl(fun({Name, RawConf}, Acc) -> fun({Type, NameAndConf}, Bridges) ->
lists:foldl(
fun({Name, RawConf}, Acc) ->
case lookup(Type, Name, RawConf) of case lookup(Type, Name, RawConf) of
{error, not_found} -> Acc; {error, not_found} -> Acc;
{ok, Res} -> [Res | Acc] {ok, Res} -> [Res | Acc]
end end
end, Bridges, maps:to_list(NameAndConf)) end,
end, [], maps:to_list(emqx:get_raw_config([bridges], #{}))). Bridges,
maps:to_list(NameAndConf)
)
end,
[],
maps:to_list(emqx:get_raw_config([bridges], #{}))
).
list_bridges_by_connector(ConnectorId) -> list_bridges_by_connector(ConnectorId) ->
[B || B = #{raw_config := #{<<"connector">> := Id}} <- list(), [
ConnectorId =:= Id]. B
|| B = #{raw_config := #{<<"connector">> := Id}} <- list(),
ConnectorId =:= Id
].
lookup(Id) -> lookup(Id) ->
{Type, Name} = parse_bridge_id(Id), {Type, Name} = parse_bridge_id(Id),
@ -206,10 +241,15 @@ lookup(Type, Name) ->
lookup(Type, Name, RawConf). lookup(Type, Name, RawConf).
lookup(Type, Name, RawConf) -> lookup(Type, Name, RawConf) ->
case emqx_resource:get_instance(resource_id(Type, Name)) of case emqx_resource:get_instance(resource_id(Type, Name)) of
{error, not_found} -> {error, not_found}; {error, not_found} ->
{error, not_found};
{ok, _, Data} -> {ok, _, Data} ->
{ok, #{type => Type, name => Name, resource_data => Data, {ok, #{
raw_config => RawConf}} type => Type,
name => Name,
resource_data => Data,
raw_config => RawConf
}}
end. end.
reset_metrics(ResourceId) -> reset_metrics(ResourceId) ->
@ -227,13 +267,21 @@ create(BridgeId, Conf) ->
create(BridgeType, BridgeName, Conf). create(BridgeType, BridgeName, Conf).
create(Type, Name, Conf) -> create(Type, Name, Conf) ->
?SLOG(info, #{msg => "create bridge", type => Type, name => Name, ?SLOG(info, #{
config => Conf}), msg => "create bridge",
case emqx_resource:create_local(resource_id(Type, Name), type => Type,
<<"emqx_bridge">>, name => Name,
emqx_bridge:resource_type(Type), config => Conf
parse_confs(Type, Name, Conf), }),
#{}) of case
emqx_resource:create_local(
resource_id(Type, Name),
<<"emqx_bridge">>,
emqx_bridge:resource_type(Type),
parse_confs(Type, Name, Conf),
#{}
)
of
{ok, already_created} -> maybe_disable_bridge(Type, Name, Conf); {ok, already_created} -> maybe_disable_bridge(Type, Name, Conf);
{ok, _} -> maybe_disable_bridge(Type, Name, Conf); {ok, _} -> maybe_disable_bridge(Type, Name, Conf);
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
@ -254,15 +302,25 @@ update(Type, Name, {OldConf, Conf}) ->
%% %%
case if_only_to_toggle_enable(OldConf, Conf) of case if_only_to_toggle_enable(OldConf, Conf) of
false -> false ->
?SLOG(info, #{msg => "update bridge", type => Type, name => Name, ?SLOG(info, #{
config => Conf}), msg => "update bridge",
type => Type,
name => Name,
config => Conf
}),
case recreate(Type, Name, Conf) of case recreate(Type, Name, Conf) of
{ok, _} -> maybe_disable_bridge(Type, Name, Conf); {ok, _} ->
maybe_disable_bridge(Type, Name, Conf);
{error, not_found} -> {error, not_found} ->
?SLOG(warning, #{ msg => "updating_a_non-exist_bridge_need_create_a_new_one" ?SLOG(warning, #{
, type => Type, name => Name, config => Conf}), msg => "updating_a_non-exist_bridge_need_create_a_new_one",
type => Type,
name => Name,
config => Conf
}),
create(Type, Name, Conf); create(Type, Name, Conf);
{error, Reason} -> {error, {update_bridge_failed, Reason}} {error, Reason} ->
{error, {update_bridge_failed, Reason}}
end; end;
true -> true ->
%% we don't need to recreate the bridge if this config change is only to %% we don't need to recreate the bridge if this config change is only to
@ -277,22 +335,25 @@ recreate(Type, Name) ->
recreate(Type, Name, emqx:get_config([bridges, Type, Name])). recreate(Type, Name, emqx:get_config([bridges, Type, Name])).
recreate(Type, Name, Conf) -> recreate(Type, Name, Conf) ->
emqx_resource:recreate_local(resource_id(Type, Name), emqx_resource:recreate_local(
resource_id(Type, Name),
emqx_bridge:resource_type(Type), emqx_bridge:resource_type(Type),
parse_confs(Type, Name, Conf), parse_confs(Type, Name, Conf),
#{}). #{}
).
create_dry_run(Type, Conf) -> create_dry_run(Type, Conf) ->
Conf0 = Conf#{
Conf0 = Conf#{<<"egress">> => <<"egress">> =>
#{ <<"remote_topic">> => <<"t">> #{
, <<"remote_qos">> => 0 <<"remote_topic">> => <<"t">>,
, <<"retain">> => true <<"remote_qos">> => 0,
, <<"payload">> => <<"val">> <<"retain">> => true,
}, <<"payload">> => <<"val">>
<<"ingress">> => },
#{ <<"remote_topic">> => <<"t">> <<"ingress">> =>
}}, #{<<"remote_topic">> => <<"t">>}
},
case emqx_resource:check_config(emqx_bridge:resource_type(Type), Conf0) of case emqx_resource:check_config(emqx_bridge:resource_type(Type), Conf0) of
{ok, Conf1} -> {ok, Conf1} ->
emqx_resource:create_dry_run_local(emqx_bridge:resource_type(Type), Conf1); emqx_resource:create_dry_run_local(emqx_bridge:resource_type(Type), Conf1);
@ -313,35 +374,48 @@ remove(Type, Name, _Conf) ->
case emqx_resource:remove_local(resource_id(Type, Name)) of case emqx_resource:remove_local(resource_id(Type, Name)) of
ok -> ok; ok -> ok;
{error, not_found} -> ok; {error, not_found} -> ok;
{error, Reason} -> {error, Reason} -> {error, Reason}
{error, Reason}
end. end.
diff_confs(NewConfs, OldConfs) -> diff_confs(NewConfs, OldConfs) ->
emqx_map_lib:diff_maps(flatten_confs(NewConfs), emqx_map_lib:diff_maps(
flatten_confs(OldConfs)). flatten_confs(NewConfs),
flatten_confs(OldConfs)
).
flatten_confs(Conf0) -> flatten_confs(Conf0) ->
maps:from_list( maps:from_list(
lists:flatmap(fun({Type, Conf}) -> lists:flatmap(
fun({Type, Conf}) ->
do_flatten_confs(Type, Conf) do_flatten_confs(Type, Conf)
end, maps:to_list(Conf0))). end,
maps:to_list(Conf0)
)
).
do_flatten_confs(Type, Conf0) -> do_flatten_confs(Type, Conf0) ->
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)]. [{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
get_matched_bridges(Topic) -> get_matched_bridges(Topic) ->
Bridges = emqx:get_config([bridges], #{}), Bridges = emqx:get_config([bridges], #{}),
maps:fold(fun (BType, Conf, Acc0) -> maps:fold(
maps:fold(fun fun(BType, Conf, Acc0) ->
%% Confs for MQTT, Kafka bridges have the `direction` flag maps:fold(
(_BName, #{direction := ingress}, Acc1) -> fun
Acc1; %% Confs for MQTT, Kafka bridges have the `direction` flag
(BName, #{direction := egress} = Egress, Acc1) -> (_BName, #{direction := ingress}, Acc1) ->
%% HTTP, MySQL bridges only have egress direction Acc1;
get_matched_bridge_id(Egress, Topic, BType, BName, Acc1) (BName, #{direction := egress} = Egress, Acc1) ->
end, Acc0, Conf) %% HTTP, MySQL bridges only have egress direction
end, [], Bridges). get_matched_bridge_id(Egress, Topic, BType, BName, Acc1)
end,
Acc0,
Conf
)
end,
[],
Bridges
).
get_matched_bridge_id(#{enable := false}, _Topic, _BType, _BName, Acc) -> get_matched_bridge_id(#{enable := false}, _Topic, _BType, _BName, Acc) ->
Acc; Acc;
@ -351,38 +425,56 @@ get_matched_bridge_id(#{local_topic := Filter}, Topic, BType, BName, Acc) ->
false -> Acc false -> Acc
end. end.
parse_confs(http, _Name, parse_confs(
#{ url := Url http,
, method := Method _Name,
, body := Body #{
, headers := Headers url := Url,
, request_timeout := ReqTimeout method := Method,
} = Conf) -> body := Body,
headers := Headers,
request_timeout := ReqTimeout
} = Conf
) ->
{BaseUrl, Path} = parse_url(Url), {BaseUrl, Path} = parse_url(Url),
{ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), {ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl),
Conf#{ base_url => BaseUrl2 Conf#{
, request => base_url => BaseUrl2,
#{ path => Path request =>
, method => Method #{
, body => Body path => Path,
, headers => Headers method => Method,
, request_timeout => ReqTimeout body => Body,
} headers => Headers,
}; request_timeout => ReqTimeout
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) }
when is_binary(ConnId) -> };
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when
is_binary(ConnId)
->
case emqx_connector:parse_connector_id(ConnId) of case emqx_connector:parse_connector_id(ConnId) of
{Type, ConnName} -> {Type, ConnName} ->
ConnectorConfs = emqx:get_config([connectors, Type, ConnName]), ConnectorConfs = emqx:get_config([connectors, Type, ConnName]),
make_resource_confs(Direction, ConnectorConfs, make_resource_confs(
maps:without([connector, direction], Conf), Type, Name); Direction,
ConnectorConfs,
maps:without([connector, direction], Conf),
Type,
Name
);
{_ConnType, _ConnName} -> {_ConnType, _ConnName} ->
error({cannot_use_connector_with_different_type, ConnId}) error({cannot_use_connector_with_different_type, ConnId})
end; end;
parse_confs(Type, Name, #{connector := ConnectorConfs, direction := Direction} = Conf) parse_confs(Type, Name, #{connector := ConnectorConfs, direction := Direction} = Conf) when
when is_map(ConnectorConfs) -> is_map(ConnectorConfs)
make_resource_confs(Direction, ConnectorConfs, ->
maps:without([connector, direction], Conf), Type, Name). make_resource_confs(
Direction,
ConnectorConfs,
maps:without([connector, direction], Conf),
Type,
Name
).
make_resource_confs(ingress, ConnectorConfs, BridgeConf, Type, Name) -> make_resource_confs(ingress, ConnectorConfs, BridgeConf, Type, Name) ->
BName = bridge_id(Type, Name), BName = bridge_id(Type, Name),
@ -417,39 +509,48 @@ if_only_to_toggle_enable(OldConf, Conf) ->
#{added := Added, removed := Removed, changed := Updated} = #{added := Added, removed := Removed, changed := Updated} =
emqx_map_lib:diff_maps(OldConf, Conf), emqx_map_lib:diff_maps(OldConf, Conf),
case {Added, Removed, Updated} of case {Added, Removed, Updated} of
{Added, Removed, #{enable := _}= Updated} {Added, Removed, #{enable := _} = Updated} when
when map_size(Added) =:= 0, map_size(Added) =:= 0,
map_size(Removed) =:= 0, map_size(Removed) =:= 0,
map_size(Updated) =:= 1 -> true; map_size(Updated) =:= 1
{_, _, _} -> false ->
true;
{_, _, _} ->
false
end. end.
-spec get_basic_usage_info() -> -spec get_basic_usage_info() ->
#{ num_bridges => non_neg_integer() #{
, count_by_type => num_bridges => non_neg_integer(),
#{ BridgeType => non_neg_integer() count_by_type =>
} #{BridgeType => non_neg_integer()}
} when BridgeType :: atom(). }
when
BridgeType :: atom().
get_basic_usage_info() -> get_basic_usage_info() ->
InitialAcc = #{num_bridges => 0, count_by_type => #{}}, InitialAcc = #{num_bridges => 0, count_by_type => #{}},
try try
lists:foldl( lists:foldl(
fun(#{resource_data := #{config := #{enable := false}}}, Acc) -> fun
Acc; (#{resource_data := #{config := #{enable := false}}}, Acc) ->
(#{type := BridgeType}, Acc) -> Acc;
NumBridges = maps:get(num_bridges, Acc), (#{type := BridgeType}, Acc) ->
CountByType0 = maps:get(count_by_type, Acc), NumBridges = maps:get(num_bridges, Acc),
CountByType = maps:update_with( CountByType0 = maps:get(count_by_type, Acc),
binary_to_atom(BridgeType, utf8), CountByType = maps:update_with(
fun(X) -> X + 1 end, binary_to_atom(BridgeType, utf8),
1, fun(X) -> X + 1 end,
CountByType0), 1,
Acc#{ num_bridges => NumBridges + 1 CountByType0
, count_by_type => CountByType ),
} Acc#{
end, num_bridges => NumBridges + 1,
InitialAcc, count_by_type => CountByType
list()) }
end,
InitialAcc,
list()
)
catch catch
%% for instance, when the bridge app is not ready yet. %% for instance, when the bridge app is not ready yet.
_:_ -> _:_ ->

View File

@ -24,22 +24,23 @@
-import(hoconsc, [mk/2, array/1, enum/1]). -import(hoconsc, [mk/2, array/1, enum/1]).
%% Swagger specs from hocon schema %% Swagger specs from hocon schema
-export([ api_spec/0 -export([
, paths/0 api_spec/0,
, schema/1 paths/0,
, namespace/0 schema/1,
]). namespace/0
]).
%% API callbacks %% API callbacks
-export([ '/bridges'/2 -export([
, '/bridges/:id'/2 '/bridges'/2,
, '/bridges/:id/operation/:operation'/2 '/bridges/:id'/2,
, '/nodes/:node/bridges/:id/operation/:operation'/2 '/bridges/:id/operation/:operation'/2,
, '/bridges/:id/reset_metrics'/2 '/nodes/:node/bridges/:id/operation/:operation'/2,
]). '/bridges/:id/reset_metrics'/2
]).
-export([ lookup_from_local_node/2 -export([lookup_from_local_node/2]).
]).
-define(TYPES, [mqtt, http]). -define(TYPES, [mqtt, http]).
@ -51,35 +52,45 @@
EXPR EXPR
catch catch
error:{invalid_bridge_id, Id0} -> error:{invalid_bridge_id, Id0} ->
{400, error_msg('INVALID_ID', <<"invalid_bridge_id: ", Id0/binary, {400,
". Bridge Ids must be of format {type}:{name}">>)} error_msg(
end). 'INVALID_ID',
<<"invalid_bridge_id: ", Id0/binary,
". Bridge Ids must be of format {type}:{name}">>
)}
end
).
-define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), -define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{
#{ matched => MATCH, matched => MATCH,
success => SUCC, success => SUCC,
failed => FAILED, failed => FAILED,
rate => RATE, rate => RATE,
rate_last5m => RATE_5, rate_last5m => RATE_5,
rate_max => RATE_MAX rate_max => RATE_MAX
}). }).
-define(metrics(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), -define(metrics(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{
#{ matched := MATCH, matched := MATCH,
success := SUCC, success := SUCC,
failed := FAILED, failed := FAILED,
rate := RATE, rate := RATE,
rate_last5m := RATE_5, rate_last5m := RATE_5,
rate_max := RATE_MAX rate_max := RATE_MAX
}). }).
namespace() -> "bridge". namespace() -> "bridge".
api_spec() -> api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}). emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}).
paths() -> ["/bridges", "/bridges/:id", "/bridges/:id/operation/:operation", paths() ->
"/nodes/:node/bridges/:id/operation/:operation", [
"/bridges/:id/reset_metrics"]. "/bridges",
"/bridges/:id",
"/bridges/:id/operation/:operation",
"/nodes/:node/bridges/:id/operation/:operation",
"/bridges/:id/reset_metrics"
].
error_schema(Code, Message) when is_atom(Code) -> error_schema(Code, Message) when is_atom(Code) ->
error_schema([Code], Message); error_schema([Code], Message);
@ -89,40 +100,58 @@ error_schema(Codes, Message) when is_list(Codes) andalso is_binary(Message) ->
emqx_dashboard_swagger:error_codes(Codes, Message). emqx_dashboard_swagger:error_codes(Codes, Message).
get_response_body_schema() -> get_response_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(emqx_bridge_schema:get_response(), emqx_dashboard_swagger:schema_with_examples(
bridge_info_examples(get)). emqx_bridge_schema:get_response(),
bridge_info_examples(get)
).
param_path_operation_cluster() -> param_path_operation_cluster() ->
{operation, mk(enum([enable, disable, stop, restart]), {operation,
#{ in => path mk(
, required => true enum([enable, disable, stop, restart]),
, example => <<"start">> #{
, desc => ?DESC("desc_param_path_operation_cluster") in => path,
})}. required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_cluster")
}
)}.
param_path_operation_on_node() -> param_path_operation_on_node() ->
{operation, mk(enum([stop, restart]), {operation,
#{ in => path mk(
, required => true enum([stop, restart]),
, example => <<"start">> #{
, desc => ?DESC("desc_param_path_operation_on_node") in => path,
})}. required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_on_node")
}
)}.
param_path_node() -> param_path_node() ->
{node, mk(binary(), {node,
#{ in => path mk(
, required => true binary(),
, example => <<"emqx@127.0.0.1">> #{
, desc => ?DESC("desc_param_path_node") in => path,
})}. required => true,
example => <<"emqx@127.0.0.1">>,
desc => ?DESC("desc_param_path_node")
}
)}.
param_path_id() -> param_path_id() ->
{id, mk(binary(), {id,
#{ in => path mk(
, required => true binary(),
, example => <<"http:my_http_bridge">> #{
, desc => ?DESC("desc_param_path_id") in => path,
})}. required => true,
example => <<"http:my_http_bridge">>,
desc => ?DESC("desc_param_path_id")
}
)}.
bridge_info_array_example(Method) -> bridge_info_array_example(Method) ->
[Config || #{value := Config} <- maps:values(bridge_info_examples(Method))]. [Config || #{value := Config} <- maps:values(bridge_info_examples(Method))].
@ -136,7 +165,8 @@ bridge_info_examples(Method) ->
}). }).
conn_bridge_examples(Method) -> conn_bridge_examples(Method) ->
lists:foldl(fun(Type, Acc) -> lists:foldl(
fun(Type, Acc) ->
SType = atom_to_list(Type), SType = atom_to_list(Type),
KeyIngress = bin(SType ++ "_ingress"), KeyIngress = bin(SType ++ "_ingress"),
KeyEgress = bin(SType ++ "_egress"), KeyEgress = bin(SType ++ "_egress"),
@ -150,19 +180,25 @@ conn_bridge_examples(Method) ->
value => info_example(Type, egress, Method) value => info_example(Type, egress, Method)
} }
}) })
end, #{}, ?CONN_TYPES). end,
#{},
?CONN_TYPES
).
info_example(Type, Direction, Method) -> info_example(Type, Direction, Method) ->
maps:merge(info_example_basic(Type, Direction), maps:merge(
method_example(Type, Direction, Method)). info_example_basic(Type, Direction),
method_example(Type, Direction, Method)
).
method_example(Type, Direction, Method) when Method == get; Method == post -> method_example(Type, Direction, Method) when Method == get; Method == post ->
SType = atom_to_list(Type), SType = atom_to_list(Type),
SDir = atom_to_list(Direction), SDir = atom_to_list(Direction),
SName = case Type of SName =
http -> "my_" ++ SType ++ "_bridge"; case Type of
_ -> "my_" ++ SDir ++ "_" ++ SType ++ "_bridge" http -> "my_" ++ SType ++ "_bridge";
end, _ -> "my_" ++ SDir ++ "_" ++ SType ++ "_bridge"
end,
TypeNameExamp = #{ TypeNameExamp = #{
type => bin(SType), type => bin(SType),
name => bin(SName) name => bin(SName)
@ -175,8 +211,10 @@ maybe_with_metrics_example(TypeNameExamp, get) ->
TypeNameExamp#{ TypeNameExamp#{
metrics => ?METRICS(0, 0, 0, 0, 0, 0), metrics => ?METRICS(0, 0, 0, 0, 0, 0),
node_metrics => [ node_metrics => [
#{node => node(), #{
metrics => ?METRICS(0, 0, 0, 0, 0, 0)} node => node(),
metrics => ?METRICS(0, 0, 0, 0, 0, 0)
}
] ]
}; };
maybe_with_metrics_example(TypeNameExamp, _) -> maybe_with_metrics_example(TypeNameExamp, _) ->
@ -231,8 +269,9 @@ schema("/bridges") ->
description => ?DESC("desc_api1"), description => ?DESC("desc_api1"),
responses => #{ responses => #{
200 => emqx_dashboard_swagger:schema_with_example( 200 => emqx_dashboard_swagger:schema_with_example(
array(emqx_bridge_schema:get_response()), array(emqx_bridge_schema:get_response()),
bridge_info_array_example(get)) bridge_info_array_example(get)
)
} }
}, },
post => #{ post => #{
@ -240,15 +279,15 @@ schema("/bridges") ->
summary => <<"Create Bridge">>, summary => <<"Create Bridge">>,
description => ?DESC("desc_api2"), description => ?DESC("desc_api2"),
'requestBody' => emqx_dashboard_swagger:schema_with_examples( 'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_schema:post_request(), emqx_bridge_schema:post_request(),
bridge_info_examples(post)), bridge_info_examples(post)
),
responses => #{ responses => #{
201 => get_response_body_schema(), 201 => get_response_body_schema(),
400 => error_schema('ALREADY_EXISTS', "Bridge already exists") 400 => error_schema('ALREADY_EXISTS', "Bridge already exists")
} }
} }
}; };
schema("/bridges/:id") -> schema("/bridges/:id") ->
#{ #{
'operationId' => '/bridges/:id', 'operationId' => '/bridges/:id',
@ -268,8 +307,9 @@ schema("/bridges/:id") ->
description => ?DESC("desc_api4"), description => ?DESC("desc_api4"),
parameters => [param_path_id()], parameters => [param_path_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples( 'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_schema:put_request(), emqx_bridge_schema:put_request(),
bridge_info_examples(put)), bridge_info_examples(put)
),
responses => #{ responses => #{
200 => get_response_body_schema(), 200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Bridge not found"), 404 => error_schema('NOT_FOUND', "Bridge not found"),
@ -287,7 +327,6 @@ schema("/bridges/:id") ->
} }
} }
}; };
schema("/bridges/:id/reset_metrics") -> schema("/bridges/:id/reset_metrics") ->
#{ #{
'operationId' => '/bridges/:id/reset_metrics', 'operationId' => '/bridges/:id/reset_metrics',
@ -319,7 +358,6 @@ schema("/bridges/:id/operation/:operation") ->
} }
} }
}; };
schema("/nodes/:node/bridges/:id/operation/:operation") -> schema("/nodes/:node/bridges/:id/operation/:operation") ->
#{ #{
'operationId' => '/nodes/:node/bridges/:id/operation/:operation', 'operationId' => '/nodes/:node/bridges/:id/operation/:operation',
@ -336,7 +374,6 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
200 => <<"Operation success">>, 200 => <<"Operation success">>,
400 => error_schema('INVALID_ID', "Bad bridge ID"), 400 => error_schema('INVALID_ID', "Bad bridge ID"),
403 => error_schema('FORBIDDEN_REQUEST', "forbidden operation") 403 => error_schema('FORBIDDEN_REQUEST', "forbidden operation")
} }
} }
}. }.
@ -353,15 +390,18 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
end end
end; end;
'/bridges'(get, _Params) -> '/bridges'(get, _Params) ->
{200, zip_bridges([[format_resp(Data) || Data <- emqx_bridge_proto_v1:list_bridges(Node)] {200,
|| Node <- mria_mnesia:running_nodes()])}. zip_bridges([
[format_resp(Data) || Data <- emqx_bridge_proto_v1:list_bridges(Node)]
|| Node <- mria_mnesia:running_nodes()
])}.
'/bridges/:id'(get, #{bindings := #{id := Id}}) -> '/bridges/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200)); ?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200));
'/bridges/:id'(put, #{bindings := #{id := Id}, body := Conf0}) -> '/bridges/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
Conf = filter_out_request_body(Conf0), Conf = filter_out_request_body(Conf0),
?TRY_PARSE_ID(Id, ?TRY_PARSE_ID(
Id,
case emqx_bridge:lookup(BridgeType, BridgeName) of case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} -> {ok, _} ->
case ensure_bridge_created(BridgeType, BridgeName, Conf) of case ensure_bridge_created(BridgeType, BridgeName, Conf) of
@ -371,24 +411,31 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
{400, Error} {400, Error}
end; end;
{error, not_found} -> {error, not_found} ->
{404, error_msg('NOT_FOUND',<<"bridge not found">>)} {404, error_msg('NOT_FOUND', <<"bridge not found">>)}
end); end
);
'/bridges/:id'(delete, #{bindings := #{id := Id}}) -> '/bridges/:id'(delete, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, ?TRY_PARSE_ID(
case emqx_conf:remove(emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], Id,
#{override_to => cluster}) of case
emqx_conf:remove(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
#{override_to => cluster}
)
of
{ok, _} -> {204}; {ok, _} -> {204};
{error, Reason} -> {error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)}
{500, error_msg('INTERNAL_ERROR', Reason)} end
end). ).
'/bridges/:id/reset_metrics'(put, #{bindings := #{id := Id}}) -> '/bridges/:id/reset_metrics'(put, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, ?TRY_PARSE_ID(
Id,
case emqx_bridge:reset_metrics(emqx_bridge:resource_id(BridgeType, BridgeName)) of case emqx_bridge:reset_metrics(emqx_bridge:resource_id(BridgeType, BridgeName)) of
ok -> {200, <<"Reset success">>}; ok -> {200, <<"Reset success">>};
Reason -> {400, error_msg('BAD_REQUEST', Reason)} Reason -> {400, error_msg('BAD_REQUEST', Reason)}
end). end
).
lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) -> lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
Nodes = mria_mnesia:running_nodes(), Nodes = mria_mnesia:running_nodes(),
@ -407,40 +454,58 @@ lookup_from_local_node(BridgeType, BridgeName) ->
Error -> Error Error -> Error
end. end.
'/bridges/:id/operation/:operation'(post, #{bindings := '/bridges/:id/operation/:operation'(post, #{
#{id := Id, operation := Op}}) -> bindings :=
?TRY_PARSE_ID(Id, case operation_func(Op) of #{id := Id, operation := Op}
invalid -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; }) ->
OperFunc when OperFunc == enable; OperFunc == disable -> ?TRY_PARSE_ID(
case emqx_conf:update(emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], Id,
{OperFunc, BridgeType, BridgeName}, #{override_to => cluster}) of case operation_func(Op) of
{ok, _} -> {200}; invalid ->
{error, {pre_config_update, _, bridge_not_found}} -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
{404, error_msg('NOT_FOUND', <<"bridge not found">>)}; OperFunc when OperFunc == enable; OperFunc == disable ->
{error, Reason} -> case
{500, error_msg('INTERNAL_ERROR', Reason)} emqx_conf:update(
end; emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
OperFunc -> {OperFunc, BridgeType, BridgeName},
Nodes = mria_mnesia:running_nodes(), #{override_to => cluster}
operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) )
end). of
{ok, _} ->
{200};
{error, {pre_config_update, _, bridge_not_found}} ->
{404, error_msg('NOT_FOUND', <<"bridge not found">>)};
{error, Reason} ->
{500, error_msg('INTERNAL_ERROR', Reason)}
end;
OperFunc ->
Nodes = mria_mnesia:running_nodes(),
operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName)
end
).
'/nodes/:node/bridges/:id/operation/:operation'(post, #{bindings := '/nodes/:node/bridges/:id/operation/:operation'(post, #{
#{id := Id, operation := Op}}) -> bindings :=
?TRY_PARSE_ID(Id, case operation_func(Op) of #{id := Id, operation := Op}
invalid -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; }) ->
OperFunc when OperFunc == restart; OperFunc == stop -> ?TRY_PARSE_ID(
ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]), Id,
case maps:get(enable, ConfMap, false) of case operation_func(Op) of
false -> {403, error_msg('FORBIDDEN_REQUEST', <<"forbidden operation">>)}; invalid ->
true -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
case emqx_bridge:OperFunc(BridgeType, BridgeName) of OperFunc when OperFunc == restart; OperFunc == stop ->
ok -> {200}; ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]),
{error, Reason} -> case maps:get(enable, ConfMap, false) of
{500, error_msg('INTERNAL_ERROR', Reason)} false ->
end {403, error_msg('FORBIDDEN_REQUEST', <<"forbidden operation">>)};
end true ->
end). case emqx_bridge:OperFunc(BridgeType, BridgeName) of
ok -> {200};
{error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)}
end
end
end
).
operation_func(<<"stop">>) -> stop; operation_func(<<"stop">>) -> stop;
operation_func(<<"restart">>) -> restart; operation_func(<<"restart">>) -> restart;
@ -449,10 +514,11 @@ operation_func(<<"disable">>) -> disable;
operation_func(_) -> invalid. operation_func(_) -> invalid.
operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) -> operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) ->
RpcFunc = case OperFunc of RpcFunc =
restart -> restart_bridges_to_all_nodes; case OperFunc of
stop -> stop_bridges_to_all_nodes restart -> restart_bridges_to_all_nodes;
end, stop -> stop_bridges_to_all_nodes
end,
case is_ok(emqx_bridge_proto_v1:RpcFunc(Nodes, BridgeType, BridgeName)) of case is_ok(emqx_bridge_proto_v1:RpcFunc(Nodes, BridgeType, BridgeName)) of
{ok, _} -> {ok, _} ->
{200}; {200};
@ -461,48 +527,70 @@ operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) ->
end. end.
ensure_bridge_created(BridgeType, BridgeName, Conf) -> ensure_bridge_created(BridgeType, BridgeName, Conf) ->
case emqx_conf:update(emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], case
Conf, #{override_to => cluster}) of emqx_conf:update(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
Conf,
#{override_to => cluster}
)
of
{ok, _} -> ok; {ok, _} -> ok;
{error, Reason} -> {error, Reason} -> {error, error_msg('BAD_REQUEST', Reason)}
{error, error_msg('BAD_REQUEST', Reason)}
end. end.
zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) -> zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) ->
lists:foldl(fun(#{type := Type, name := Name}, Acc) -> lists:foldl(
fun(#{type := Type, name := Name}, Acc) ->
Bridges = pick_bridges_by_id(Type, Name, BridgesAllNodes), Bridges = pick_bridges_by_id(Type, Name, BridgesAllNodes),
[format_bridge_info(Bridges) | Acc] [format_bridge_info(Bridges) | Acc]
end, [], BridgesFirstNode). end,
[],
BridgesFirstNode
).
pick_bridges_by_id(Type, Name, BridgesAllNodes) -> pick_bridges_by_id(Type, Name, BridgesAllNodes) ->
lists:foldl(fun(BridgesOneNode, Acc) -> lists:foldl(
case [Bridge || Bridge = #{type := Type0, name := Name0} <- BridgesOneNode, fun(BridgesOneNode, Acc) ->
Type0 == Type, Name0 == Name] of case
[BridgeInfo] -> [BridgeInfo | Acc]; [
Bridge
|| Bridge = #{type := Type0, name := Name0} <- BridgesOneNode,
Type0 == Type,
Name0 == Name
]
of
[BridgeInfo] ->
[BridgeInfo | Acc];
[] -> [] ->
?SLOG(warning, #{msg => "bridge_inconsistent_in_cluster", ?SLOG(warning, #{
bridge => emqx_bridge:bridge_id(Type, Name)}), msg => "bridge_inconsistent_in_cluster",
bridge => emqx_bridge:bridge_id(Type, Name)
}),
Acc Acc
end end
end, [], BridgesAllNodes). end,
[],
BridgesAllNodes
).
format_bridge_info([FirstBridge | _] = Bridges) -> format_bridge_info([FirstBridge | _] = Bridges) ->
Res = maps:remove(node, FirstBridge), Res = maps:remove(node, FirstBridge),
NodeStatus = collect_status(Bridges), NodeStatus = collect_status(Bridges),
NodeMetrics = collect_metrics(Bridges), NodeMetrics = collect_metrics(Bridges),
Res#{ status => aggregate_status(NodeStatus) Res#{
, node_status => NodeStatus status => aggregate_status(NodeStatus),
, metrics => aggregate_metrics(NodeMetrics) node_status => NodeStatus,
, node_metrics => NodeMetrics metrics => aggregate_metrics(NodeMetrics),
}. node_metrics => NodeMetrics
}.
collect_status(Bridges) -> collect_status(Bridges) ->
[maps:with([node, status], B) || B <- Bridges]. [maps:with([node, status], B) || B <- Bridges].
aggregate_status(AllStatus) -> aggregate_status(AllStatus) ->
Head = fun ([A | _]) -> A end, Head = fun([A | _]) -> A end,
HeadVal = maps:get(status, Head(AllStatus), connecting), HeadVal = maps:get(status, Head(AllStatus), connecting),
AllRes = lists:all(fun (#{status := Val}) -> Val == HeadVal end, AllStatus), AllRes = lists:all(fun(#{status := Val}) -> Val == HeadVal end, AllStatus),
case AllRes of case AllRes of
true -> HeadVal; true -> HeadVal;
false -> inconsistent false -> inconsistent
@ -512,15 +600,31 @@ collect_metrics(Bridges) ->
[maps:with([node, metrics], B) || B <- Bridges]. [maps:with([node, metrics], B) || B <- Bridges].
aggregate_metrics(AllMetrics) -> aggregate_metrics(AllMetrics) ->
InitMetrics = ?METRICS(0,0,0,0,0,0), InitMetrics = ?METRICS(0, 0, 0, 0, 0, 0),
lists:foldl(fun(#{metrics := ?metrics(Match1, Succ1, Failed1, Rate1, Rate5m1, RateMax1)}, lists:foldl(
?metrics(Match0, Succ0, Failed0, Rate0, Rate5m0, RateMax0)) -> fun(
?METRICS(Match1 + Match0, Succ1 + Succ0, Failed1 + Failed0, #{metrics := ?metrics(Match1, Succ1, Failed1, Rate1, Rate5m1, RateMax1)},
Rate1 + Rate0, Rate5m1 + Rate5m0, RateMax1 + RateMax0) ?metrics(Match0, Succ0, Failed0, Rate0, Rate5m0, RateMax0)
end, InitMetrics, AllMetrics). ) ->
?METRICS(
Match1 + Match0,
Succ1 + Succ0,
Failed1 + Failed0,
Rate1 + Rate0,
Rate5m1 + Rate5m0,
RateMax1 + RateMax0
)
end,
InitMetrics,
AllMetrics
).
format_resp(#{type := Type, name := BridgeName, raw_config := RawConf, format_resp(#{
resource_data := #{status := Status, metrics := Metrics}}) -> type := Type,
name := BridgeName,
raw_config := RawConf,
resource_data := #{status := Status, metrics := Metrics}
}) ->
RawConfFull = fill_defaults(Type, RawConf), RawConfFull = fill_defaults(Type, RawConf),
RawConfFull#{ RawConfFull#{
type => Type, type => Type,
@ -531,10 +635,11 @@ format_resp(#{type := Type, name := BridgeName, raw_config := RawConf,
}. }.
format_metrics(#{ format_metrics(#{
counters := #{failed := Failed, exception := Ex, matched := Match, success := Succ}, counters := #{failed := Failed, exception := Ex, matched := Match, success := Succ},
rate := #{ rate := #{
matched := #{current := Rate, last5m := Rate5m, max := RateMax} matched := #{current := Rate, last5m := Rate5m, max := RateMax}
} }) -> }
}) ->
?METRICS(Match, Succ, Failed + Ex, Rate, Rate5m, RateMax). ?METRICS(Match, Succ, Failed + Ex, Rate, Rate5m, RateMax).
fill_defaults(Type, RawConf) -> fill_defaults(Type, RawConf) ->
@ -551,14 +656,31 @@ unpack_bridge_conf(Type, PackedConf) ->
RawConf. RawConf.
is_ok(ResL) -> is_ok(ResL) ->
case lists:filter(fun({ok, _}) -> false; (ok) -> false; (_) -> true end, ResL) of case
lists:filter(
fun
({ok, _}) -> false;
(ok) -> false;
(_) -> true
end,
ResL
)
of
[] -> {ok, [Res || {ok, Res} <- ResL]}; [] -> {ok, [Res || {ok, Res} <- ResL]};
ErrL -> {error, ErrL} ErrL -> {error, ErrL}
end. end.
filter_out_request_body(Conf) -> filter_out_request_body(Conf) ->
ExtraConfs = [<<"id">>, <<"type">>, <<"name">>, <<"status">>, <<"node_status">>, ExtraConfs = [
<<"node_metrics">>, <<"metrics">>, <<"node">>], <<"id">>,
<<"type">>,
<<"name">>,
<<"status">>,
<<"node_status">>,
<<"node_metrics">>,
<<"metrics">>,
<<"node">>
],
maps:without(ExtraConfs, Conf). maps:without(ExtraConfs, Conf).
error_msg(Code, Msg) when is_binary(Msg) -> error_msg(Code, Msg) when is_binary(Msg) ->

View File

@ -19,8 +19,10 @@
-export([start/2, stop/1]). -export([start/2, stop/1]).
-export([ pre_config_update/3 -export([
]). pre_config_update/3,
post_config_update/5
]).
-define(TOP_LELVE_HDLR_PATH, (emqx_bridge:config_key_path())). -define(TOP_LELVE_HDLR_PATH, (emqx_bridge:config_key_path())).
-define(LEAF_NODE_HDLR_PATH, (emqx_bridge:config_key_path() ++ ['?', '?'])). -define(LEAF_NODE_HDLR_PATH, (emqx_bridge:config_key_path() ++ ['?', '?'])).
@ -46,8 +48,18 @@ pre_config_update(_, {_Oper, _, _}, undefined) ->
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) -> pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
%% to save the 'enable' to the config files %% to save the 'enable' to the config files
{ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}}; {ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}};
pre_config_update(_, Conf, _OldConfig) when is_map(Conf) -> pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
{ok, Conf}. case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
{ok, ConfNew}
end.
post_config_update(Path, '$remove', _, OldConf, _AppEnvs) ->
_ = emqx_connector_ssl:clear_certs(filename:join(Path), OldConf);
post_config_update(_Path, _Req, _, _OldConf, _AppEnvs) ->
ok.
%% internal functions %% internal functions
operation_to_enable(disable) -> false; operation_to_enable(disable) -> false;

View File

@ -15,45 +15,66 @@ roots() -> [].
fields("config") -> fields("config") ->
basic_config() ++ basic_config() ++
[ {url, mk(binary(), [
#{ required => true {url,
, desc => ?DESC("config_url") mk(
})} binary(),
, {local_topic, mk(binary(), #{
#{ desc => ?DESC("config_local_topic") required => true,
})} desc => ?DESC("config_url")
, {method, mk(method(), }
#{ default => post )},
, desc => ?DESC("config_method") {local_topic,
})} mk(
, {headers, mk(map(), binary(),
#{ default => #{ #{desc => ?DESC("config_local_topic")}
<<"accept">> => <<"application/json">>, )},
<<"cache-control">> => <<"no-cache">>, {method,
<<"connection">> => <<"keep-alive">>, mk(
<<"content-type">> => <<"application/json">>, method(),
<<"keep-alive">> => <<"timeout=5">>} #{
, desc => ?DESC("config_headers") default => post,
}) desc => ?DESC("config_method")
} }
, {body, mk(binary(), )},
#{ default => <<"${payload}">> {headers,
, desc => ?DESC("config_body") mk(
})} map(),
, {request_timeout, mk(emqx_schema:duration_ms(), #{
#{ default => <<"15s">> default => #{
, desc => ?DESC("config_request_timeout") <<"accept">> => <<"application/json">>,
})} <<"cache-control">> => <<"no-cache">>,
]; <<"connection">> => <<"keep-alive">>,
<<"content-type">> => <<"application/json">>,
<<"keep-alive">> => <<"timeout=5">>
},
desc => ?DESC("config_headers")
}
)},
{body,
mk(
binary(),
#{
default => <<"${payload}">>,
desc => ?DESC("config_body")
}
)},
{request_timeout,
mk(
emqx_schema:duration_ms(),
#{
default => <<"15s">>,
desc => ?DESC("config_request_timeout")
}
)}
];
fields("post") -> fields("post") ->
[ type_field() [
, name_field() type_field(),
name_field()
] ++ fields("config"); ] ++ fields("config");
fields("put") -> fields("put") ->
fields("config"); fields("config");
fields("get") -> fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post"). emqx_bridge_schema:metrics_status_fields() ++ fields("post").
@ -65,32 +86,47 @@ desc(_) ->
undefined. undefined.
basic_config() -> basic_config() ->
[ {enable, [
mk(boolean(), {enable,
#{ desc => ?DESC("config_enable") mk(
, default => true boolean(),
})} #{
, {direction, desc => ?DESC("config_enable"),
mk(egress, default => true
#{ desc => ?DESC("config_direction") }
, default => egress )},
})} {direction,
] mk(
++ proplists:delete(base_url, emqx_connector_http:fields(config)). egress,
#{
desc => ?DESC("config_direction"),
default => egress
}
)}
] ++
proplists:delete(base_url, emqx_connector_http:fields(config)).
%%====================================================================================== %%======================================================================================
type_field() -> type_field() ->
{type, mk(http, {type,
#{ required => true mk(
, desc => ?DESC("desc_type") http,
})}. #{
required => true,
desc => ?DESC("desc_type")
}
)}.
name_field() -> name_field() ->
{name, mk(binary(), {name,
#{ required => true mk(
, desc => ?DESC("desc_name") binary(),
})}. #{
required => true,
desc => ?DESC("desc_name")
}
)}.
method() -> method() ->
enum([post, put, get, delete]). enum([post, put, get, delete]).

View File

@ -22,17 +22,20 @@
-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% API functions %% API functions
-export([ start_link/0 -export([
, ensure_all_started/1 start_link/0,
]). ensure_all_started/1
]).
%% gen_server callbacks %% gen_server callbacks
-export([init/1, -export([
handle_call/3, init/1,
handle_cast/2, handle_call/3,
handle_info/2, handle_cast/2,
terminate/2, handle_info/2,
code_change/3]). terminate/2,
code_change/3
]).
-record(state, {}). -record(state, {}).
@ -52,7 +55,6 @@ handle_call(_Request, _From, State) ->
handle_cast({start_and_monitor, Configs}, State) -> handle_cast({start_and_monitor, Configs}, State) ->
ok = load_bridges(Configs), ok = load_bridges(Configs),
{noreply, State}; {noreply, State};
handle_cast(_Msg, State) -> handle_cast(_Msg, State) ->
{noreply, State}. {noreply, State}.
@ -67,13 +69,22 @@ code_change(_OldVsn, State, _Extra) ->
%%============================================================================ %%============================================================================
load_bridges(Configs) -> load_bridges(Configs) ->
lists:foreach(fun({Type, NamedConf}) -> lists:foreach(
lists:foreach(fun({Name, Conf}) -> fun({Type, NamedConf}) ->
lists:foreach(
fun({Name, Conf}) ->
_Res = emqx_bridge:create(Type, Name, Conf), _Res = emqx_bridge:create(Type, Name, Conf),
?tp(emqx_bridge_monitor_loaded_bridge, ?tp(
#{ type => Type emqx_bridge_monitor_loaded_bridge,
, name => Name #{
, res => _Res type => Type,
}) name => Name,
end, maps:to_list(NamedConf)) res => _Res
end, maps:to_list(Configs)). }
)
end,
maps:to_list(NamedConf)
)
end,
maps:to_list(Configs)
).

View File

@ -12,31 +12,27 @@
roots() -> []. roots() -> [].
fields("ingress") -> fields("ingress") ->
[ emqx_bridge_schema:direction_field(ingress, emqx_connector_mqtt_schema:ingress_desc()) [emqx_bridge_schema:direction_field(ingress, emqx_connector_mqtt_schema:ingress_desc())] ++
] emqx_bridge_schema:common_bridge_fields() ++
++ emqx_bridge_schema:common_bridge_fields() proplists:delete(hookpoint, emqx_connector_mqtt_schema:fields("ingress"));
++ proplists:delete(hookpoint, emqx_connector_mqtt_schema:fields("ingress"));
fields("egress") -> fields("egress") ->
[ emqx_bridge_schema:direction_field(egress, emqx_connector_mqtt_schema:egress_desc()) [emqx_bridge_schema:direction_field(egress, emqx_connector_mqtt_schema:egress_desc())] ++
] emqx_bridge_schema:common_bridge_fields() ++
++ emqx_bridge_schema:common_bridge_fields() emqx_connector_mqtt_schema:fields("egress");
++ emqx_connector_mqtt_schema:fields("egress");
fields("post_ingress") -> fields("post_ingress") ->
[ type_field() [
, name_field() type_field(),
name_field()
] ++ proplists:delete(enable, fields("ingress")); ] ++ proplists:delete(enable, fields("ingress"));
fields("post_egress") -> fields("post_egress") ->
[ type_field() [
, name_field() type_field(),
name_field()
] ++ proplists:delete(enable, fields("egress")); ] ++ proplists:delete(enable, fields("egress"));
fields("put_ingress") -> fields("put_ingress") ->
proplists:delete(enable, fields("ingress")); proplists:delete(enable, fields("ingress"));
fields("put_egress") -> fields("put_egress") ->
proplists:delete(enable, fields("egress")); proplists:delete(enable, fields("egress"));
fields("get_ingress") -> fields("get_ingress") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post_ingress"); emqx_bridge_schema:metrics_status_fields() ++ fields("post_ingress");
fields("get_egress") -> fields("get_egress") ->
@ -49,13 +45,21 @@ desc(_) ->
%%====================================================================================== %%======================================================================================
type_field() -> type_field() ->
{type, mk(mqtt, {type,
#{ required => true mk(
, desc => ?DESC("desc_type") mqtt,
})}. #{
required => true,
desc => ?DESC("desc_type")
}
)}.
name_field() -> name_field() ->
{name, mk(binary(), {name,
#{ required => true mk(
, desc => ?DESC("desc_name") binary(),
})}. #{
required => true,
desc => ?DESC("desc_name")
}
)}.

View File

@ -7,15 +7,17 @@
-export([roots/0, fields/1, desc/1, namespace/0]). -export([roots/0, fields/1, desc/1, namespace/0]).
-export([ get_response/0 -export([
, put_request/0 get_response/0,
, post_request/0 put_request/0,
]). post_request/0
]).
-export([ common_bridge_fields/0 -export([
, metrics_status_fields/0 common_bridge_fields/0,
, direction_field/2 metrics_status_fields/0,
]). direction_field/2
]).
%%====================================================================================== %%======================================================================================
%% Hocon Schema Definitions %% Hocon Schema Definitions
@ -34,43 +36,68 @@ post_request() ->
http_schema("post"). http_schema("post").
http_schema(Method) -> http_schema(Method) ->
Schemas = lists:flatmap(fun(Type) -> Schemas = lists:flatmap(
[ref(schema_mod(Type), Method ++ "_ingress"), fun(Type) ->
ref(schema_mod(Type), Method ++ "_egress")] [
end, ?CONN_TYPES), ref(schema_mod(Type), Method ++ "_ingress"),
hoconsc:union([ref(emqx_bridge_http_schema, Method) ref(schema_mod(Type), Method ++ "_egress")
| Schemas]). ]
end,
?CONN_TYPES
),
hoconsc:union([
ref(emqx_bridge_http_schema, Method)
| Schemas
]).
common_bridge_fields() -> common_bridge_fields() ->
[ {enable, [
mk(boolean(), {enable,
#{ desc => ?DESC("desc_enable") mk(
, default => true boolean(),
})} #{
, {connector, desc => ?DESC("desc_enable"),
mk(binary(), default => true
#{ required => true }
, example => <<"mqtt:my_mqtt_connector">> )},
, desc => ?DESC("desc_connector") {connector,
})} mk(
binary(),
#{
required => true,
example => <<"mqtt:my_mqtt_connector">>,
desc => ?DESC("desc_connector")
}
)}
]. ].
metrics_status_fields() -> metrics_status_fields() ->
[ {"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})} [
, {"node_metrics", mk(hoconsc:array(ref(?MODULE, "node_metrics")), {"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})},
#{ desc => ?DESC("desc_node_metrics")})} {"node_metrics",
, {"status", mk(status(), #{desc => ?DESC("desc_status")})} mk(
, {"node_status", mk(hoconsc:array(ref(?MODULE, "node_status")), hoconsc:array(ref(?MODULE, "node_metrics")),
#{ desc => ?DESC("desc_node_status")})} #{desc => ?DESC("desc_node_metrics")}
)},
{"status", mk(status(), #{desc => ?DESC("desc_status")})},
{"node_status",
mk(
hoconsc:array(ref(?MODULE, "node_status")),
#{desc => ?DESC("desc_node_status")}
)}
]. ].
direction_field(Dir, Desc) -> direction_field(Dir, Desc) ->
{direction, mk(Dir, {direction,
#{ required => true mk(
, default => egress Dir,
, desc => "The direction of the bridge. Can be one of 'ingress' or 'egress'.</br>" #{
++ Desc required => true,
})}. default => egress,
desc => "The direction of the bridge. Can be one of 'ingress' or 'egress'.</br>" ++
Desc
}
)}.
%%====================================================================================== %%======================================================================================
%% For config files %% For config files
@ -80,31 +107,49 @@ namespace() -> "bridge".
roots() -> [bridges]. roots() -> [bridges].
fields(bridges) -> fields(bridges) ->
[{http, mk(hoconsc:map(name, ref(emqx_bridge_http_schema, "config")), [
#{desc => ?DESC("bridges_http")})}] {http,
++ [{T, mk(hoconsc:map(name, hoconsc:union([ ref(schema_mod(T), "ingress") mk(
, ref(schema_mod(T), "egress") hoconsc:map(name, ref(emqx_bridge_http_schema, "config")),
])), #{desc => ?DESC("bridges_http")}
#{desc => ?DESC("bridges_name")})} || T <- ?CONN_TYPES]; )}
] ++
[
{T,
mk(
hoconsc:map(
name,
hoconsc:union([
ref(schema_mod(T), "ingress"),
ref(schema_mod(T), "egress")
])
),
#{desc => ?DESC("bridges_name")}
)}
|| T <- ?CONN_TYPES
];
fields("metrics") -> fields("metrics") ->
[ {"matched", mk(integer(), #{desc => ?DESC("metric_matched")})} [
, {"success", mk(integer(), #{desc => ?DESC("metric_success")})} {"matched", mk(integer(), #{desc => ?DESC("metric_matched")})},
, {"failed", mk(integer(), #{desc => ?DESC("metric_failed")})} {"success", mk(integer(), #{desc => ?DESC("metric_success")})},
, {"rate", mk(float(), #{desc => ?DESC("metric_rate")})} {"failed", mk(integer(), #{desc => ?DESC("metric_failed")})},
, {"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})} {"rate", mk(float(), #{desc => ?DESC("metric_rate")})},
, {"rate_last5m", mk(float(), {"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})},
#{desc => ?DESC("metric_rate_last5m")})} {"rate_last5m",
mk(
float(),
#{desc => ?DESC("metric_rate_last5m")}
)}
]; ];
fields("node_metrics") -> fields("node_metrics") ->
[ node_name() [
, {"metrics", mk(ref(?MODULE, "metrics"), #{})} node_name(),
{"metrics", mk(ref(?MODULE, "metrics"), #{})}
]; ];
fields("node_status") -> fields("node_status") ->
[ node_name() [
, {"status", mk(status(), #{})} node_name(),
{"status", mk(status(), #{})}
]. ].
desc(bridges) -> desc(bridges) ->

View File

@ -27,15 +27,19 @@ start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []). supervisor:start_link({local, ?SERVER}, ?MODULE, []).
init([]) -> init([]) ->
SupFlags = #{strategy => one_for_one, SupFlags = #{
intensity => 10, strategy => one_for_one,
period => 10}, intensity => 10,
period => 10
},
ChildSpecs = [ ChildSpecs = [
#{id => emqx_bridge_monitor, #{
start => {emqx_bridge_monitor, start_link, []}, id => emqx_bridge_monitor,
restart => permanent, start => {emqx_bridge_monitor, start_link, []},
type => worker, restart => permanent,
modules => [emqx_bridge_monitor]} type => worker,
modules => [emqx_bridge_monitor]
}
], ],
{ok, {SupFlags, ChildSpecs}}. {ok, {SupFlags, ChildSpecs}}.

View File

@ -18,13 +18,14 @@
-behaviour(emqx_bpapi). -behaviour(emqx_bpapi).
-export([ introduced_in/0 -export([
introduced_in/0,
, list_bridges/1 list_bridges/1,
, lookup_from_all_nodes/3 lookup_from_all_nodes/3,
, restart_bridges_to_all_nodes/3 restart_bridges_to_all_nodes/3,
, stop_bridges_to_all_nodes/3 stop_bridges_to_all_nodes/3
]). ]).
-include_lib("emqx/include/bpapi.hrl"). -include_lib("emqx/include/bpapi.hrl").
@ -40,19 +41,34 @@ list_bridges(Node) ->
-type key() :: atom() | binary() | [byte()]. -type key() :: atom() | binary() | [byte()].
-spec restart_bridges_to_all_nodes([node()], key(), key()) -> -spec restart_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall(). emqx_rpc:erpc_multicall().
restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(Nodes, emqx_bridge, restart, erpc:multicall(
[BridgeType, BridgeName], ?TIMEOUT). Nodes,
emqx_bridge,
restart,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec stop_bridges_to_all_nodes([node()], key(), key()) -> -spec stop_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall(). emqx_rpc:erpc_multicall().
stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(Nodes, emqx_bridge, stop, erpc:multicall(
[BridgeType, BridgeName], ?TIMEOUT). Nodes,
emqx_bridge,
stop,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec lookup_from_all_nodes([node()], key(), key()) -> -spec lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall(). emqx_rpc:erpc_multicall().
lookup_from_all_nodes(Nodes, BridgeType, BridgeName) -> lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(Nodes, emqx_bridge_api, lookup_from_local_node, erpc:multicall(
[BridgeType, BridgeName], ?TIMEOUT). Nodes,
emqx_bridge_api,
lookup_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).

View File

@ -23,7 +23,7 @@
-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() -> all() ->
emqx_common_test_helpers:all(?MODULE). emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) -> init_per_suite(Config) ->
%% to avoid inter-suite dependencies %% to avoid inter-suite dependencies
@ -32,8 +32,12 @@ init_per_suite(Config) ->
Config. Config.
end_per_suite(_Config) -> end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([emqx, emqx_bridge, emqx_common_test_helpers:stop_apps([
emqx_resource, emqx_connector]). emqx,
emqx_bridge,
emqx_resource,
emqx_connector
]).
init_per_testcase(t_get_basic_usage_info_1, Config) -> init_per_testcase(t_get_basic_usage_info_1, Config) ->
setup_fake_telemetry_data(), setup_fake_telemetry_data(),
@ -43,13 +47,15 @@ init_per_testcase(_TestCase, Config) ->
end_per_testcase(t_get_basic_usage_info_1, _Config) -> end_per_testcase(t_get_basic_usage_info_1, _Config) ->
lists:foreach( lists:foreach(
fun({BridgeType, BridgeName}) -> fun({BridgeType, BridgeName}) ->
ok = emqx_bridge:remove(BridgeType, BridgeName) ok = emqx_bridge:remove(BridgeType, BridgeName)
end, end,
[ {http, <<"basic_usage_info_http">>} [
, {http, <<"basic_usage_info_http_disabled">>} {http, <<"basic_usage_info_http">>},
, {mqtt, <<"basic_usage_info_mqtt">>} {http, <<"basic_usage_info_http_disabled">>},
]), {mqtt, <<"basic_usage_info_mqtt">>}
]
),
ok = emqx_config:delete_override_conf_files(), ok = emqx_config:delete_override_conf_files(),
ok = emqx_config:put([bridges], #{}), ok = emqx_config:put([bridges], #{}),
ok = emqx_config:put_raw([bridges], #{}), ok = emqx_config:put_raw([bridges], #{}),
@ -59,53 +65,68 @@ end_per_testcase(_TestCase, _Config) ->
t_get_basic_usage_info_0(_Config) -> t_get_basic_usage_info_0(_Config) ->
?assertEqual( ?assertEqual(
#{ num_bridges => 0 #{
, count_by_type => #{} num_bridges => 0,
count_by_type => #{}
}, },
emqx_bridge:get_basic_usage_info()). emqx_bridge:get_basic_usage_info()
).
t_get_basic_usage_info_1(_Config) -> t_get_basic_usage_info_1(_Config) ->
BasicUsageInfo = emqx_bridge:get_basic_usage_info(), BasicUsageInfo = emqx_bridge:get_basic_usage_info(),
?assertEqual( ?assertEqual(
#{ num_bridges => 2 #{
, count_by_type => #{ http => 1 num_bridges => 2,
, mqtt => 1 count_by_type => #{
} http => 1,
mqtt => 1
}
}, },
BasicUsageInfo). BasicUsageInfo
).
setup_fake_telemetry_data() -> setup_fake_telemetry_data() ->
ConnectorConf = ConnectorConf =
#{<<"connectors">> => #{
#{<<"mqtt">> => #{<<"my_mqtt_connector">> => <<"connectors">> =>
#{ server => "127.0.0.1:1883" }}}}, #{
MQTTConfig = #{ connector => <<"mqtt:my_mqtt_connector">> <<"mqtt">> => #{
, enable => true <<"my_mqtt_connector">> =>
, direction => ingress #{server => "127.0.0.1:1883"}
, remote_topic => <<"aws/#">> }
, remote_qos => 1
},
HTTPConfig = #{ url => <<"http://localhost:9901/messages/${topic}">>
, enable => true
, direction => egress
, local_topic => "emqx_http/#"
, method => post
, body => <<"${payload}">>
, headers => #{}
, request_timeout => "15s"
},
Conf =
#{ <<"bridges">> =>
#{ <<"http">> =>
#{ <<"basic_usage_info_http">> => HTTPConfig
, <<"basic_usage_info_http_disabled">> =>
HTTPConfig#{enable => false}
}
, <<"mqtt">> =>
#{ <<"basic_usage_info_mqtt">> => MQTTConfig
}
} }
}, },
MQTTConfig = #{
connector => <<"mqtt:my_mqtt_connector">>,
enable => true,
direction => ingress,
remote_topic => <<"aws/#">>,
remote_qos => 1
},
HTTPConfig = #{
url => <<"http://localhost:9901/messages/${topic}">>,
enable => true,
direction => egress,
local_topic => "emqx_http/#",
method => post,
body => <<"${payload}">>,
headers => #{},
request_timeout => "15s"
},
Conf =
#{
<<"bridges">> =>
#{
<<"http">> =>
#{
<<"basic_usage_info_http">> => HTTPConfig,
<<"basic_usage_info_http_disabled">> =>
HTTPConfig#{enable => false}
},
<<"mqtt">> =>
#{<<"basic_usage_info_mqtt">> => MQTTConfig}
}
},
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf), ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf), ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),

View File

@ -25,11 +25,15 @@
-define(CONF_DEFAULT, <<"bridges: {}">>). -define(CONF_DEFAULT, <<"bridges: {}">>).
-define(BRIDGE_TYPE, <<"http">>). -define(BRIDGE_TYPE, <<"http">>).
-define(BRIDGE_NAME, <<"test_bridge">>). -define(BRIDGE_NAME, <<"test_bridge">>).
-define(URL(PORT, PATH), list_to_binary( -define(URL(PORT, PATH),
io_lib:format("http://localhost:~s/~s", list_to_binary(
[integer_to_list(PORT), PATH]))). io_lib:format(
-define(HTTP_BRIDGE(URL, TYPE, NAME), "http://localhost:~s/~s",
#{ [integer_to_list(PORT), PATH]
)
)
).
-define(HTTP_BRIDGE(URL, TYPE, NAME), #{
<<"type">> => TYPE, <<"type">> => TYPE,
<<"name">> => NAME, <<"name">> => NAME,
<<"url">> => URL, <<"url">> => URL,
@ -40,7 +44,6 @@
<<"headers">> => #{ <<"headers">> => #{
<<"content-type">> => <<"application/json">> <<"content-type">> => <<"application/json">>
} }
}). }).
all() -> all() ->
@ -50,15 +53,17 @@ groups() ->
[]. [].
suite() -> suite() ->
[{timetrap,{seconds,60}}]. [{timetrap, {seconds, 60}}].
init_per_suite(Config) -> init_per_suite(Config) ->
_ = application:load(emqx_conf), _ = application:load(emqx_conf),
%% some testcases (may from other app) already get emqx_connector started %% some testcases (may from other app) already get emqx_connector started
_ = application:stop(emqx_resource), _ = application:stop(emqx_resource),
_ = application:stop(emqx_connector), _ = application:stop(emqx_connector),
ok = emqx_common_test_helpers:start_apps([emqx_bridge, emqx_dashboard], ok = emqx_common_test_helpers:start_apps(
fun set_special_configs/1), [emqx_bridge, emqx_dashboard],
fun set_special_configs/1
),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?CONF_DEFAULT), ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?CONF_DEFAULT),
Config. Config.
@ -79,9 +84,12 @@ end_per_testcase(_, _Config) ->
ok. ok.
clear_resources() -> clear_resources() ->
lists:foreach(fun(#{type := Type, name := Name}) -> lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_bridge:remove(Type, Name) ok = emqx_bridge:remove(Type, Name)
end, emqx_bridge:list()). end,
emqx_bridge:list()
).
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% HTTP server for testing %% HTTP server for testing
@ -95,12 +103,12 @@ start_http_server(HandleFun) ->
end), end),
receive receive
{port, Port} -> Port {port, Port} -> Port
after after 2000 -> error({timeout, start_http_server})
2000 -> error({timeout, start_http_server})
end. end.
listen_on_random_port() -> listen_on_random_port() ->
Min = 1024, Max = 65000, Min = 1024,
Max = 65000,
Port = rand:uniform(Max - Min) + Min, Port = rand:uniform(Max - Min) + Min,
case gen_tcp:listen(Port, [{active, false}, {reuseaddr, true}, binary]) of case gen_tcp:listen(Port, [{active, false}, {reuseaddr, true}, binary]) of
{ok, Sock} -> {Port, Sock}; {ok, Sock} -> {Port, Sock};
@ -109,16 +117,18 @@ listen_on_random_port() ->
loop(Sock, HandleFun, Parent) -> loop(Sock, HandleFun, Parent) ->
{ok, Conn} = gen_tcp:accept(Sock), {ok, Conn} = gen_tcp:accept(Sock),
Handler = spawn(fun () -> HandleFun(Conn, Parent) end), Handler = spawn(fun() -> HandleFun(Conn, Parent) end),
gen_tcp:controlling_process(Conn, Handler), gen_tcp:controlling_process(Conn, Handler),
loop(Sock, HandleFun, Parent). loop(Sock, HandleFun, Parent).
make_response(CodeStr, Str) -> make_response(CodeStr, Str) ->
B = iolist_to_binary(Str), B = iolist_to_binary(Str),
iolist_to_binary( iolist_to_binary(
io_lib:fwrite( io_lib:fwrite(
"HTTP/1.0 ~s\r\nContent-Type: text/html\r\nContent-Length: ~p\r\n\r\n~s", "HTTP/1.0 ~s\r\nContent-Type: text/html\r\nContent-Length: ~p\r\n\r\n~s",
[CodeStr, size(B), B])). [CodeStr, size(B), B]
)
).
handle_fun_200_ok(Conn, Parent) -> handle_fun_200_ok(Conn, Parent) ->
case gen_tcp:recv(Conn, 0) of case gen_tcp:recv(Conn, 0) of
@ -151,18 +161,22 @@ t_http_crud_apis(_) ->
%% then we add a http bridge, using POST %% then we add a http bridge, using POST
%% POST /bridges/ will create a bridge %% POST /bridges/ will create a bridge
URL1 = ?URL(Port, "path1"), URL1 = ?URL(Port, "path1"),
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)), post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
),
%ct:pal("---bridge: ~p", [Bridge]), %ct:pal("---bridge: ~p", [Bridge]),
#{ <<"type">> := ?BRIDGE_TYPE #{
, <<"name">> := ?BRIDGE_NAME <<"type">> := ?BRIDGE_TYPE,
, <<"status">> := _ <<"name">> := ?BRIDGE_NAME,
, <<"node_status">> := [_|_] <<"status">> := _,
, <<"metrics">> := _ <<"node_status">> := [_ | _],
, <<"node_metrics">> := [_|_] <<"metrics">> := _,
, <<"url">> := URL1 <<"node_metrics">> := [_ | _],
} = jsx:decode(Bridge), <<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
%% send an message to emqx and the message should be forwarded to the HTTP server %% send an message to emqx and the message should be forwarded to the HTTP server
@ -170,49 +184,70 @@ t_http_crud_apis(_) ->
emqx:publish(emqx_message:make(<<"emqx_http/1">>, Body)), emqx:publish(emqx_message:make(<<"emqx_http/1">>, Body)),
?assert( ?assert(
receive receive
{http_server, received, #{method := <<"POST">>, path := <<"/path1">>, {http_server, received, #{
body := Body}} -> method := <<"POST">>,
path := <<"/path1">>,
body := Body
}} ->
true; true;
Msg -> Msg ->
ct:pal("error: http got unexpected request: ~p", [Msg]), ct:pal("error: http got unexpected request: ~p", [Msg]),
false false
after 100 -> after 100 ->
false false
end), end
),
%% update the request-path of the bridge %% update the request-path of the bridge
URL2 = ?URL(Port, "path2"), URL2 = ?URL(Port, "path2"),
{ok, 200, Bridge2} = request(put, uri(["bridges", BridgeID]), {ok, 200, Bridge2} = request(
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)), put,
?assertMatch(#{ <<"type">> := ?BRIDGE_TYPE uri(["bridges", BridgeID]),
, <<"name">> := ?BRIDGE_NAME ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)
, <<"status">> := _ ),
, <<"node_status">> := [_|_] ?assertMatch(
, <<"metrics">> := _ #{
, <<"node_metrics">> := [_|_] <<"type">> := ?BRIDGE_TYPE,
, <<"url">> := URL2 <<"name">> := ?BRIDGE_NAME,
}, jsx:decode(Bridge2)), <<"status">> := _,
<<"node_status">> := [_ | _],
<<"metrics">> := _,
<<"node_metrics">> := [_ | _],
<<"url">> := URL2
},
jsx:decode(Bridge2)
),
%% list all bridges again, assert Bridge2 is in it %% list all bridges again, assert Bridge2 is in it
{ok, 200, Bridge2Str} = request(get, uri(["bridges"]), []), {ok, 200, Bridge2Str} = request(get, uri(["bridges"]), []),
?assertMatch([#{ <<"type">> := ?BRIDGE_TYPE ?assertMatch(
, <<"name">> := ?BRIDGE_NAME [
, <<"status">> := _ #{
, <<"node_status">> := [_|_] <<"type">> := ?BRIDGE_TYPE,
, <<"metrics">> := _ <<"name">> := ?BRIDGE_NAME,
, <<"node_metrics">> := [_|_] <<"status">> := _,
, <<"url">> := URL2 <<"node_status">> := [_ | _],
}], jsx:decode(Bridge2Str)), <<"metrics">> := _,
<<"node_metrics">> := [_ | _],
<<"url">> := URL2
}
],
jsx:decode(Bridge2Str)
),
%% get the bridge by id %% get the bridge by id
{ok, 200, Bridge3Str} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge3Str} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"type">> := ?BRIDGE_TYPE ?assertMatch(
, <<"name">> := ?BRIDGE_NAME #{
, <<"status">> := _ <<"type">> := ?BRIDGE_TYPE,
, <<"node_status">> := [_|_] <<"name">> := ?BRIDGE_NAME,
, <<"metrics">> := _ <<"status">> := _,
, <<"node_metrics">> := [_|_] <<"node_status">> := [_ | _],
, <<"url">> := URL2 <<"metrics">> := _,
}, jsx:decode(Bridge3Str)), <<"node_metrics">> := [_ | _],
<<"url">> := URL2
},
jsx:decode(Bridge3Str)
),
%% send an message to emqx again, check the path has been changed %% send an message to emqx again, check the path has been changed
emqx:publish(emqx_message:make(<<"emqx_http/1">>, Body)), emqx:publish(emqx_message:make(<<"emqx_http/1">>, Body)),
@ -225,25 +260,35 @@ t_http_crud_apis(_) ->
false false
after 100 -> after 100 ->
false false
end), end
),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% update a deleted bridge returns an error %% update a deleted bridge returns an error
{ok, 404, ErrMsg2} = request(put, uri(["bridges", BridgeID]), {ok, 404, ErrMsg2} = request(
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)), put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)
),
?assertMatch( ?assertMatch(
#{ <<"code">> := _ #{
, <<"message">> := <<"bridge not found">> <<"code">> := _,
}, jsx:decode(ErrMsg2)), <<"message">> := <<"bridge not found">>
},
jsx:decode(ErrMsg2)
),
ok. ok.
t_start_stop_bridges(_) -> t_start_stop_bridges(_) ->
lists:foreach(fun(Type) -> lists:foreach(
fun(Type) ->
do_start_stop_bridges(Type) do_start_stop_bridges(Type)
end, [node, cluster]). end,
[node, cluster]
).
do_start_stop_bridges(Type) -> do_start_stop_bridges(Type) ->
%% assert we there's no bridges at first %% assert we there's no bridges at first
@ -251,40 +296,40 @@ do_start_stop_bridges(Type) ->
Port = start_http_server(fun handle_fun_200_ok/2), Port = start_http_server(fun handle_fun_200_ok/2),
URL1 = ?URL(Port, "abc"), URL1 = ?URL(Port, "abc"),
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)), post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
),
%ct:pal("the bridge ==== ~p", [Bridge]), %ct:pal("the bridge ==== ~p", [Bridge]),
#{ <<"type">> := ?BRIDGE_TYPE #{
, <<"name">> := ?BRIDGE_NAME <<"type">> := ?BRIDGE_TYPE,
, <<"status">> := <<"connected">> <<"name">> := ?BRIDGE_NAME,
, <<"node_status">> := [_|_] <<"status">> := <<"connected">>,
, <<"metrics">> := _ <<"node_status">> := [_ | _],
, <<"node_metrics">> := [_|_] <<"metrics">> := _,
, <<"url">> := URL1 <<"node_metrics">> := [_ | _],
} = jsx:decode(Bridge), <<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
%% stop it %% stop it
{ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>),
{ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"disconnected">> ?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)),
}, jsx:decode(Bridge2)),
%% start again %% start again
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
}, jsx:decode(Bridge3)),
%% restart an already started bridge %% restart an already started bridge
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
}, jsx:decode(Bridge3)),
%% stop it again %% stop it again
{ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>),
%% restart a stopped bridge %% restart a stopped bridge
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
{ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)),
}, jsx:decode(Bridge4)),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []).
@ -295,33 +340,34 @@ t_enable_disable_bridges(_) ->
Port = start_http_server(fun handle_fun_200_ok/2), Port = start_http_server(fun handle_fun_200_ok/2),
URL1 = ?URL(Port, "abc"), URL1 = ?URL(Port, "abc"),
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)), post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
),
%ct:pal("the bridge ==== ~p", [Bridge]), %ct:pal("the bridge ==== ~p", [Bridge]),
#{ <<"type">> := ?BRIDGE_TYPE #{
, <<"name">> := ?BRIDGE_NAME <<"type">> := ?BRIDGE_TYPE,
, <<"status">> := <<"connected">> <<"name">> := ?BRIDGE_NAME,
, <<"node_status">> := [_|_] <<"status">> := <<"connected">>,
, <<"metrics">> := _ <<"node_status">> := [_ | _],
, <<"node_metrics">> := [_|_] <<"metrics">> := _,
, <<"url">> := URL1 <<"node_metrics">> := [_ | _],
} = jsx:decode(Bridge), <<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
%% disable it %% disable it
{ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>),
{ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"disconnected">> ?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)),
}, jsx:decode(Bridge2)),
%% enable again %% enable again
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
}, jsx:decode(Bridge3)),
%% enable an already started bridge %% enable an already started bridge
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
}, jsx:decode(Bridge3)),
%% disable it again %% disable it again
{ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>),
@ -331,8 +377,7 @@ t_enable_disable_bridges(_) ->
%% enable a stopped bridge %% enable a stopped bridge
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
{ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []), {ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)),
}, jsx:decode(Bridge4)),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []).
@ -343,17 +388,21 @@ t_reset_bridges(_) ->
Port = start_http_server(fun handle_fun_200_ok/2), Port = start_http_server(fun handle_fun_200_ok/2),
URL1 = ?URL(Port, "abc"), URL1 = ?URL(Port, "abc"),
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)), post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
),
%ct:pal("the bridge ==== ~p", [Bridge]), %ct:pal("the bridge ==== ~p", [Bridge]),
#{ <<"type">> := ?BRIDGE_TYPE #{
, <<"name">> := ?BRIDGE_NAME <<"type">> := ?BRIDGE_TYPE,
, <<"status">> := <<"connected">> <<"name">> := ?BRIDGE_NAME,
, <<"node_status">> := [_|_] <<"status">> := <<"connected">>,
, <<"metrics">> := _ <<"node_status">> := [_ | _],
, <<"node_metrics">> := [_|_] <<"metrics">> := _,
, <<"url">> := URL1 <<"node_metrics">> := [_ | _],
} = jsx:decode(Bridge), <<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
{ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []), {ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []),

View File

@ -941,26 +941,15 @@ until the RPC connection is considered lost."""
log_file_handlers { log_file_handlers {
desc { desc {
en: """Key-value list of file-based log handlers.""" en: """Key-value list of file-based log handlers."""
zh: """基于文件的日志处理进程的键值列表。""" zh: """需要持久化到文件的日志处理进程列表。默认只有 default 一个处理进程。"""
} }
label { label {
en: "Log Handlers Key Val List" en: "Log Handlers Key Val List"
zh: "日志处理进程键值列表" zh: "日志 Handler 列表"
} }
} }
log_error_logger { common_handler_enable {
desc {
en: """Deprecated."""
zh: """该配置已弃用。"""
}
label {
en: "Deprecate"
zh: "配置已弃用"
}
}
console_handler_enable {
desc { desc {
en: """Enable this log handler.""" en: """Enable this log handler."""
zh: """启用此日志处理进程。""" zh: """启用此日志处理进程。"""
@ -971,21 +960,23 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_level { common_handler_level {
desc { desc {
en: """Global log level. This includes the primary log level and all log handlers.""" en: """Global log level. This includes the primary log level and all log handlers."""
zh: """全局日志级别。 这包括主日志级别和所有日志处理进程。""" zh: """设置日志级别。 默认为warning。"""
} }
label { label {
en: "Global Log Level" en: "Global Log Level"
zh: "全局日志级别" zh: "日志级别"
} }
} }
console_handler_time_offset { common_handler_time_offset {
desc { desc {
en: """The time offset to be used when formatting the timestamp.""" en: """The time offset to be used when formatting the timestamp."""
zh: """格式化时间戳时,使用的时间偏移量。""" zh: """日志格式中的时间戳使用的时间偏移量。默认使用系统时区system当为utc为无时间偏移量
为具体的N(1-24)数字时,则代表时间偏移量+N。
"""
} }
label { label {
en: "Time Offset" en: "Time Offset"
@ -993,10 +984,10 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_chars_limit { common_handler_chars_limit {
desc { desc {
en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated.""" en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated."""
zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。""" zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。最小可设置的长度为100。"""
} }
label { label {
en: "Single Log Max Length" en: "Single Log Max Length"
@ -1004,10 +995,10 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_formatter { common_handler_formatter {
desc { desc {
en: """Choose log format. <code>text</code> for free text, and <code>json</code> for structured logging.""" en: """Choose log format. <code>text</code> for free text, and <code>json</code> for structured logging."""
zh: """选择日志格式。 <code>text</code> 用于自由文本,<code>json</code> 用于结构化日志记录。""" zh: """选择日志格式。 <code>text</code> 用于文本,<code>json</code> 用于结构化日志记录。"""
} }
label { label {
en: "Log Format" en: "Log Format"
@ -1015,10 +1006,10 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_single_line { common_handler_single_line {
desc { desc {
en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines.""" en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines."""
zh: """如果设置为 true在一行中打印日志。 否则,日志消息可能跨越多行。""" zh: """如果设置为 true单行打印日志。 否则,日志消息可能跨越多行。"""
} }
label { label {
en: "Single Line Mode" en: "Single Line Mode"
@ -1026,10 +1017,24 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_sync_mode_qlen { common_handler_sync_mode_qlen {
desc { desc {
en: """As long as the number of buffered log events is lower than this value, all log events are handled asynchronously.""" en: """As long as the number of buffered log events is lower than this value,
zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。""" all log events are handled asynchronously. This means that the client process sending the log event,
by calling a log function in the Logger API, does not wait for a response from the handler
but continues executing immediately after the event is sent.
It is not affected by the time it takes the handler to print the event to the log device.
If the message queue grows larger than this value,
the handler starts handling log events synchronously instead,
meaning that the client process sending the event must wait for a response.
When the handler reduces the message queue to a level below the sync_mode_qlen threshold,
asynchronous operation is resumed.
"""
zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。
这意味着,日志落地速度不会影响正常的业务进程,因为它们不需要等待日志处理进程的响应。
如果消息队列的增长超过了这个值,处理程序开始同步处理日志事件。也就是说,发送事件的客户进程必须等待响应。
当处理程序将消息队列减少到低于sync_mode_qlen阈值的水平时异步操作就会恢复。
默认为100条信息当等待的日志事件大于100条时就开始同步处理日志。"""
} }
label { label {
en: "Sync Mode Max Log Events" en: "Sync Mode Max Log Events"
@ -1037,10 +1042,17 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_drop_mode_qlen { common_handler_drop_mode_qlen {
desc { desc {
en: """When the number of buffered log events is larger than this value, the new log events are dropped.</br>When drop mode is activated or deactivated, a message is printed in the logs.""" en: """When the number of buffered log events is larger than this value, the new log events are dropped.
zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。</br>启用或停用丢弃模式时,会在日志中打印一条消息。""" When drop mode is activated or deactivated, a message is printed in the logs."""
zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。起到过载保护的功能。
为了使过载保护算法正常工作必须要:<code> sync_mode_qlen =< drop_mode_qlen =< flush_qlen <\code> 且 drop_mode_qlen > 1
要禁用某些模式,请执行以下操作。
- 如果sync_mode_qlen被设置为0所有的日志事件都被同步处理。也就是说异步日志被禁用。
- 如果sync_mode_qlen被设置为与drop_mode_qlen相同的值同步模式被禁用。也就是说处理程序总是以异步模式运行除非调用drop或flushing。
- 如果drop_mode_qlen被设置为与flush_qlen相同的值则drop模式被禁用永远不会发生。
"""
} }
label { label {
en: "Drop Mode Max Log Events" en: "Drop Mode Max Log Events"
@ -1048,10 +1060,11 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_flush_qlen { common_handler_flush_qlen {
desc { desc {
en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. To flush events, the handler discards the buffered log messages without logging.""" en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. To flush events, the handler discards the buffered log messages without logging."""
zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生刷新(删除)操作。 为了完成刷新事件,处理进程丢弃缓冲的日志消息。""" zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生刷新(删除)操作。 日志处理进程会丢弃缓冲的日志消息。
来缓解自身不会由于内存瀑涨而影响其它业务进程。日志内容会提醒有多少事件被删除。"""
} }
label { label {
en: "Flush Threshold" en: "Flush Threshold"
@ -1059,14 +1072,14 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_supervisor_reports { common_handler_supervisor_reports {
desc { desc {
en: """Type of supervisor reports that are logged. en: """Type of supervisor reports that are logged.
- `error`: only log errors in the Erlang processes. - `error`: only log errors in the Erlang processes.
- `progress`: log process startup.""" - `progress`: log process startup."""
zh: """ supervisor 报告的类型。 zh: """ supervisor 报告的类型。默认为 error 类型。
- `error`:仅记录 Erlang 进程中的错误。 - `error`:仅记录 Erlang 进程中的错误。
- `progress`:记录进程启动。""" - `progress`除了 error 信息外,还需要记录进程启动的详细信息。"""
} }
label { label {
en: "Report Type" en: "Report Type"
@ -1074,7 +1087,7 @@ until the RPC connection is considered lost."""
} }
} }
console_handler_max_depth { common_handler_max_depth {
desc { desc {
en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection.""" en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection."""
zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。""" zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。"""
@ -1088,7 +1101,7 @@ until the RPC connection is considered lost."""
log_file_handler_file { log_file_handler_file {
desc { desc {
en: """Name the log file.""" en: """Name the log file."""
zh: """日志文件名字。""" zh: """日志文件路径及名字。"""
} }
label { label {
en: "Log File Name" en: "Log File Name"
@ -1099,7 +1112,9 @@ until the RPC connection is considered lost."""
log_file_handler_max_size { log_file_handler_max_size {
desc { desc {
en: """This parameter controls log file rotation. The value `infinity` means the log file will grow indefinitely, otherwise the log file will be rotated once it reaches `max_size` in bytes.""" en: """This parameter controls log file rotation. The value `infinity` means the log file will grow indefinitely, otherwise the log file will be rotated once it reaches `max_size` in bytes."""
zh: """此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。""" zh: """此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。
与 rotation count配合使用。如果 counter 为 10则是10个文件轮换。
"""
} }
label { label {
en: "Rotation Size" en: "Rotation Size"
@ -1107,128 +1122,14 @@ until the RPC connection is considered lost."""
} }
} }
log_file_handler_enable { log_error_logger {
desc { desc {
en: """Enable this log handler.""" en: """Keep error_logger silent."""
zh: """启用此日志处理进程。""" zh: """让 error_logger 日志处理进程关闭,防止一条异常信息被记录多次。"""
} }
label { label {
en: "Enable Log Handler" en: "error_logger"
zh: "启用此日志处理进程" zh: "error_logger"
}
}
log_file_handler_level {
desc {
en: """Global log level. This includes the primary log level and all log handlers."""
zh: """全局日志级别。 这包括主日志级别和所有日志处理进程。"""
}
label {
en: "Global Level"
zh: "全局日志级别"
}
}
log_file_handler_time_offset {
desc {
en: """The time offset to be used when formatting the timestamp."""
zh: """格式化时间戳时要使用的时间偏移量。"""
}
label {
en: "Time Offset"
zh: "时间偏移"
}
}
log_file_handler_chars_limit {
desc {
en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated."""
zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。"""
}
label {
en: "Single Log Max Length"
zh: "单个日志消息最大长度"
}
}
log_file_handler_formatter {
desc {
en: """Choose log format. <code>text</code> for free text, and <code>json</code> for structured logging."""
zh: """选择日志格式。 <code>text</code> 用于自由文本,<code>json</code> 用于结构化日志记录。"""
}
label {
en: "Log Format"
zh: "日志格式"
}
}
log_file_handler_single_line {
desc {
en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines."""
zh: """如果设置为 true则在一行中打印日志。 否则,日志消息可能跨越多行。"""
}
label {
en: "Single Line Mode"
zh: "单行模式"
}
}
log_file_handler_sync_mode_qlen {
desc {
en: """As long as the number of buffered log events is lower than this value, all log events are handled asynchronously."""
zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。"""
}
label {
en: "Sync Mode Max Log Events"
zh: "异步模式最大事件数"
}
}
log_file_handler_drop_mode_qlen {
desc {
en: """When the number of buffered log events is larger than this value, the new log events are dropped.</br>When drop mode is activated or deactivated, a message is printed in the logs."""
zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。</br>启用或停用丢弃模式时,会在日志中打印一条消息。"""
}
label {
en: "Drop Mode Max Log Events"
zh: "缓存最大日志事件数"
}
}
log_file_handler_flush_qlen {
desc {
en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. To flush events, the handler discards the buffered log messages without logging."""
zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生刷新(删除)操作。 为了完成刷新事件,处理进程丢弃缓冲的日志消息。"""
}
label {
en: "Flush Threshold"
zh: "刷新阈值"
}
}
log_file_handler_supervisor_reports {
desc {
en: """Type of supervisor reports that are logged.
- `error`: only log errors in the Erlang processes.
- `progress`: log process startup."""
zh: """ supervisor 报告的类型。
- `error`:仅记录 Erlang 进程中的错误。
- `progress`:记录进程启动。"""
}
label {
en: "Report Type"
zh: "报告类型"
}
}
log_file_handler_max_depth {
desc {
en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection."""
zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。"""
}
label {
en: "Max Depth"
zh: "最大深度"
} }
} }
@ -1257,11 +1158,11 @@ until the RPC connection is considered lost."""
log_overload_kill_enable { log_overload_kill_enable {
desc { desc {
en: """Enable log handler overload kill feature.""" en: """Enable log handler overload kill feature."""
zh: """启用日志处理进程过载终止功能。""" zh: """日志处理进程过载时为保护自己节点其它的业务能正常,强制杀死日志处理进程。"""
} }
label { label {
en: "Log Handler Overload Kill" en: "Log Handler Overload Kill"
zh: "日志处理进程过载终止" zh: "日志处理进程过载保护"
} }
} }
@ -1290,22 +1191,22 @@ until the RPC connection is considered lost."""
log_overload_kill_restart_after { log_overload_kill_restart_after {
desc { desc {
en: """If the handler is terminated, it restarts automatically after a delay specified in milliseconds. The value `infinity` prevents restarts.""" en: """If the handler is terminated, it restarts automatically after a delay specified in milliseconds. The value `infinity` prevents restarts."""
zh: """如果处理进程终止,它会在以毫秒为单位指定的延迟后自动重新启动。 `infinity` 防止重新启动。""" zh: """如果处理进程终止,它会在以指定的时间后后自动重新启动。 `infinity` 不自动重启。"""
} }
label { label {
en: "Handler Restart Delay" en: "Handler Restart Timer"
zh: "处理进程重启延迟" zh: "处理进程重启机制"
} }
} }
log_burst_limit_enable { log_burst_limit_enable {
desc { desc {
en: """Enable log burst control feature.""" en: """Enable log burst control feature."""
zh: """启用日志突发控制功能。""" zh: """启用日志限流保护机制。"""
} }
label { label {
en: "Enable Burst" en: "Enable Burst"
zh: "启用日志突发控制" zh: "日志限流保护"
} }
} }
@ -1509,10 +1410,10 @@ By default, the logs are stored in `./log` directory (for installation from zip
This section of the configuration controls the number of files kept for each log handler. This section of the configuration controls the number of files kept for each log handler.
""" """
zh: zh:
""" """
默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。</br> 默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。</br>
这部分配置,控制每个日志处理进程保留的文件数量。 这部分配置,控制每个日志处理进程保留的文件数量。
""" """
} }
label { label {
en: "Log Rotation" en: "Log Rotation"
@ -1568,5 +1469,4 @@ Log burst limit feature can temporarily disable logging to avoid these issues.""
zh: "授权" zh: "授权"
} }
} }
} }

View File

@ -42,6 +42,8 @@
code_change/3 code_change/3
]). ]).
-export([get_tables_status/0]).
-export_type([txn_id/0, succeed_num/0, multicall_return/1, multicall_return/0]). -export_type([txn_id/0, succeed_num/0, multicall_return/1, multicall_return/0]).
-ifdef(TEST). -ifdef(TEST).
@ -172,6 +174,29 @@ get_node_tnx_id(Node) ->
[#cluster_rpc_commit{tnx_id = TnxId}] -> TnxId [#cluster_rpc_commit{tnx_id = TnxId}] -> TnxId
end. end.
%% Checks whether the Mnesia tables used by this module are waiting to
%% be loaded and from where.
-spec get_tables_status() -> #{atom() => {waiting, [node()]} | {disc | network, node()}}.
get_tables_status() ->
maps:from_list([
{Tab, do_get_tables_status(Tab)}
|| Tab <- [?CLUSTER_COMMIT, ?CLUSTER_MFA]
]).
do_get_tables_status(Tab) ->
Props = mnesia:table_info(Tab, all),
TabNodes = proplists:get_value(all_nodes, Props),
KnownDown = mnesia_recover:get_mnesia_downs(),
LocalNode = node(),
case proplists:get_value(load_node, Props) of
unknown ->
{waiting, TabNodes -- [LocalNode | KnownDown]};
LocalNode ->
{disc, LocalNode};
Node ->
{network, Node}
end.
%% Regardless of what MFA is returned, consider it a success), %% Regardless of what MFA is returned, consider it a success),
%% then move to the next tnxId. %% then move to the next tnxId.
%% if the next TnxId failed, need call the function again to skip. %% if the next TnxId failed, need call the function again to skip.

View File

@ -24,6 +24,8 @@
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-include("emqx_conf.hrl"). -include("emqx_conf.hrl").
-define(DEFAULT_INIT_TXN_ID, -1).
start(_StartType, _StartArgs) -> start(_StartType, _StartArgs) ->
init_conf(), init_conf(),
emqx_conf_sup:start_link(). emqx_conf_sup:start_link().
@ -31,19 +33,48 @@ start(_StartType, _StartArgs) ->
stop(_State) -> stop(_State) ->
ok. ok.
%% internal functions get_override_config_file() ->
Node = node(),
case emqx_app:get_init_config_load_done() of
false ->
{error, #{node => Node, msg => "init_conf_load_not_done"}};
true ->
case erlang:whereis(emqx_config_handler) of
undefined ->
{error, #{node => Node, msg => "emqx_config_handler_not_ready"}};
_ ->
Fun = fun() ->
TnxId = emqx_cluster_rpc:get_node_tnx_id(Node),
WallClock = erlang:statistics(wall_clock),
Conf = emqx_config_handler:get_raw_cluster_override_conf(),
#{wall_clock => WallClock, conf => Conf, tnx_id => TnxId, node => Node}
end,
case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of
{atomic, Res} -> {ok, Res};
{aborted, Reason} -> {error, #{node => Node, msg => Reason}}
end
end
end.
%% ------------------------------------------------------------------------------
%% Internal functions
%% ------------------------------------------------------------------------------
init_conf() -> init_conf() ->
{ok, TnxId} = copy_override_conf_from_core_node(), {ok, TnxId} = copy_override_conf_from_core_node(),
emqx_app:set_init_tnx_id(TnxId), emqx_app:set_init_tnx_id(TnxId),
emqx_config:init_load(emqx_conf:schema_module()), emqx_config:init_load(emqx_conf:schema_module()),
emqx_app:set_init_config_load_done(). emqx_app:set_init_config_load_done().
cluster_nodes() ->
maps:get(running_nodes, ekka_cluster:info()) -- [node()].
copy_override_conf_from_core_node() -> copy_override_conf_from_core_node() ->
case mria_mnesia:running_nodes() -- [node()] of case cluster_nodes() of
%% The first core nodes is self. %% The first core nodes is self.
[] -> [] ->
?SLOG(debug, #{msg => "skip_copy_overide_conf_from_core_node"}), ?SLOG(debug, #{msg => "skip_copy_overide_conf_from_core_node"}),
{ok, -1}; {ok, ?DEFAULT_INIT_TXN_ID};
Nodes -> Nodes ->
{Results, Failed} = emqx_conf_proto_v1:get_override_config_file(Nodes), {Results, Failed} = emqx_conf_proto_v1:get_override_config_file(Nodes),
{Ready, NotReady0} = lists:partition(fun(Res) -> element(1, Res) =:= ok end, Results), {Ready, NotReady0} = lists:partition(fun(Res) -> element(1, Res) =:= ok end, Results),
@ -64,12 +95,39 @@ copy_override_conf_from_core_node() ->
[] -> [] ->
%% Other core nodes running but no one replicated it successfully. %% Other core nodes running but no one replicated it successfully.
?SLOG(error, #{ ?SLOG(error, #{
msg => "copy_overide_conf_from_core_node_failed", msg => "copy_override_conf_from_core_node_failed",
nodes => Nodes, nodes => Nodes,
failed => Failed, failed => Failed,
not_ready => NotReady not_ready => NotReady
}), }),
{error, "core node not ready"};
case should_proceed_with_boot() of
true ->
%% Act as if this node is alone, so it can
%% finish the boot sequence and load the
%% config for other nodes to copy it.
?SLOG(info, #{
msg => "skip_copy_overide_conf_from_core_node",
loading_from_disk => true,
nodes => Nodes,
failed => Failed,
not_ready => NotReady
}),
{ok, ?DEFAULT_INIT_TXN_ID};
false ->
%% retry in some time
Jitter = rand:uniform(2_000),
Timeout = 10_000 + Jitter,
?SLOG(info, #{
msg => "copy_overide_conf_from_core_node_retry",
timeout => Timeout,
nodes => Nodes,
failed => Failed,
not_ready => NotReady
}),
timer:sleep(Timeout),
copy_override_conf_from_core_node()
end;
_ -> _ ->
SortFun = fun( SortFun = fun(
{ok, #{wall_clock := W1}}, {ok, #{wall_clock := W1}},
@ -79,7 +137,10 @@ copy_override_conf_from_core_node() ->
end, end,
[{ok, Info} | _] = lists:sort(SortFun, Ready), [{ok, Info} | _] = lists:sort(SortFun, Ready),
#{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info, #{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info,
Msg = #{msg => "copy_overide_conf_from_core_node_success", node => Node}, Msg = #{
msg => "copy_overide_conf_from_core_node_success",
node => Node
},
?SLOG(debug, Msg), ?SLOG(debug, Msg),
ok = emqx_config:save_to_override_conf( ok = emqx_config:save_to_override_conf(
RawOverrideConf, RawOverrideConf,
@ -89,28 +150,16 @@ copy_override_conf_from_core_node() ->
end end
end. end.
get_override_config_file() -> should_proceed_with_boot() ->
Node = node(), TablesStatus = emqx_cluster_rpc:get_tables_status(),
Role = mria_rlog:role(), LocalNode = node(),
case emqx_app:get_init_config_load_done() of case maps:get(?CLUSTER_COMMIT, TablesStatus) of
false -> {disc, LocalNode} ->
{error, #{node => Node, msg => "init_conf_load_not_done"}}; %% Loading locally; let this node finish its boot sequence
true when Role =:= core -> %% so others can copy the config from this one.
case erlang:whereis(emqx_config_handler) of true;
undefined -> _ ->
{error, #{node => Node, msg => "emqx_config_handler_not_ready"}}; %% Loading from another node or still waiting for nodes to
_ -> %% be up. Try again.
Fun = fun() -> false
TnxId = emqx_cluster_rpc:get_node_tnx_id(Node),
WallClock = erlang:statistics(wall_clock),
Conf = emqx_config_handler:get_raw_cluster_override_conf(),
#{wall_clock => WallClock, conf => Conf, tnx_id => TnxId, node => Node}
end,
case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of
{atomic, Res} -> {ok, Res};
{aborted, Reason} -> {error, #{node => Node, msg => Reason}}
end
end;
true when Role =:= replicant ->
{ignore, #{node => Node}}
end. end.

View File

@ -800,6 +800,7 @@ fields("log") ->
#{ #{
mapping => "kernel.error_logger", mapping => "kernel.error_logger",
default => silent, default => silent,
'readOnly' => true,
desc => ?DESC("log_error_logger") desc => ?DESC("log_error_logger")
} }
)} )}
@ -811,7 +812,10 @@ fields("log_file_handler") ->
{"file", {"file",
sc( sc(
file(), file(),
#{desc => ?DESC("log_file_handler_file")} #{
desc => ?DESC("log_file_handler_file"),
validator => fun file_location/1
}
)}, )},
{"rotation", {"rotation",
sc( sc(
@ -822,7 +826,7 @@ fields("log_file_handler") ->
sc( sc(
hoconsc:union([infinity, emqx_schema:bytesize()]), hoconsc:union([infinity, emqx_schema:bytesize()]),
#{ #{
default => "10MB", default => "50MB",
desc => ?DESC("log_file_handler_max_size") desc => ?DESC("log_file_handler_max_size")
} }
)} )}
@ -866,7 +870,7 @@ fields("log_overload_kill") ->
)}, )},
{"qlen", {"qlen",
sc( sc(
integer(), pos_integer(),
#{ #{
default => 20000, default => 20000,
desc => ?DESC("log_overload_kill_qlen") desc => ?DESC("log_overload_kill_qlen")
@ -874,7 +878,7 @@ fields("log_overload_kill") ->
)}, )},
{"restart_after", {"restart_after",
sc( sc(
hoconsc:union([emqx_schema:duration(), infinity]), hoconsc:union([emqx_schema:duration_ms(), infinity]),
#{ #{
default => "5s", default => "5s",
desc => ?DESC("log_overload_kill_restart_after") desc => ?DESC("log_overload_kill_restart_after")
@ -893,7 +897,7 @@ fields("log_burst_limit") ->
)}, )},
{"max_count", {"max_count",
sc( sc(
integer(), pos_integer(),
#{ #{
default => 10000, default => 10000,
desc => ?DESC("log_burst_limit_max_count") desc => ?DESC("log_burst_limit_max_count")
@ -1073,7 +1077,7 @@ log_handler_common_confs() ->
boolean(), boolean(),
#{ #{
default => false, default => false,
desc => ?DESC("log_file_handler_enable") desc => ?DESC("common_handler_enable")
} }
)}, )},
{"level", {"level",
@ -1081,7 +1085,7 @@ log_handler_common_confs() ->
log_level(), log_level(),
#{ #{
default => warning, default => warning,
desc => ?DESC("log_file_handler_level") desc => ?DESC("common_handler_level")
} }
)}, )},
{"time_offset", {"time_offset",
@ -1089,15 +1093,15 @@ log_handler_common_confs() ->
string(), string(),
#{ #{
default => "system", default => "system",
desc => ?DESC("log_file_handler_time_offset") desc => ?DESC("common_handler_time_offset")
} }
)}, )},
{"chars_limit", {"chars_limit",
sc( sc(
hoconsc:union([unlimited, range(1, inf)]), hoconsc:union([unlimited, range(100, inf)]),
#{ #{
default => unlimited, default => unlimited,
desc => ?DESC("log_file_handler_chars_limit") desc => ?DESC("common_handler_chars_limit")
} }
)}, )},
{"formatter", {"formatter",
@ -1105,7 +1109,7 @@ log_handler_common_confs() ->
hoconsc:enum([text, json]), hoconsc:enum([text, json]),
#{ #{
default => text, default => text,
desc => ?DESC("log_file_handler_formatter") desc => ?DESC("common_handler_formatter")
} }
)}, )},
{"single_line", {"single_line",
@ -1113,31 +1117,31 @@ log_handler_common_confs() ->
boolean(), boolean(),
#{ #{
default => true, default => true,
desc => ?DESC("log_file_handler_single_line") desc => ?DESC("common_handler_single_line")
} }
)}, )},
{"sync_mode_qlen", {"sync_mode_qlen",
sc( sc(
integer(), non_neg_integer(),
#{ #{
default => 100, default => 100,
desc => ?DESC("log_file_handler_sync_mode_qlen") desc => ?DESC("common_handler_sync_mode_qlen")
} }
)}, )},
{"drop_mode_qlen", {"drop_mode_qlen",
sc( sc(
integer(), pos_integer(),
#{ #{
default => 3000, default => 3000,
desc => ?DESC("log_file_handler_drop_mode_qlen") desc => ?DESC("common_handler_drop_mode_qlen")
} }
)}, )},
{"flush_qlen", {"flush_qlen",
sc( sc(
integer(), pos_integer(),
#{ #{
default => 8000, default => 8000,
desc => ?DESC("log_file_handler_flush_qlen") desc => ?DESC("common_handler_flush_qlen")
} }
)}, )},
{"overload_kill", sc(ref("log_overload_kill"), #{})}, {"overload_kill", sc(ref("log_overload_kill"), #{})},
@ -1147,7 +1151,7 @@ log_handler_common_confs() ->
hoconsc:enum([error, progress]), hoconsc:enum([error, progress]),
#{ #{
default => error, default => error,
desc => ?DESC("log_file_handler_supervisor_reports") desc => ?DESC("common_handler_supervisor_reports")
} }
)}, )},
{"max_depth", {"max_depth",
@ -1155,7 +1159,7 @@ log_handler_common_confs() ->
hoconsc:union([unlimited, non_neg_integer()]), hoconsc:union([unlimited, non_neg_integer()]),
#{ #{
default => 100, default => 100,
desc => ?DESC("log_file_handler_max_depth") desc => ?DESC("common_handler_max_depth")
} }
)} )}
]. ].
@ -1328,3 +1332,15 @@ emqx_schema_high_prio_roots() ->
#{desc => ?DESC(authorization)} #{desc => ?DESC(authorization)}
)}, )},
lists:keyreplace("authorization", 1, Roots, Authz). lists:keyreplace("authorization", 1, Roots, Authz).
-define(VALID_FILE, "^[/\_a-zA-Z0-9\.\-]*$").
file_location(File) ->
Error = {error, "Invalid file name: " ++ ?VALID_FILE},
try
case re:run(File, ?VALID_FILE) of
nomatch -> Error;
_ -> ok
end
catch
_:_ -> Error
end.

View File

@ -0,0 +1,222 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_conf_app_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include("emqx_conf.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() ->
emqx_common_test_helpers:all(?MODULE).
t_copy_conf_override_on_restarts(_Config) ->
ct:timetrap({seconds, 120}),
snabbkaffe:fix_ct_logging(),
Cluster = cluster([core, core, core]),
try
%% 1. Start all nodes
Nodes = start_cluster(Cluster),
[join_cluster(Spec) || Spec <- Cluster],
assert_config_load_done(Nodes),
%% 2. Stop each in order.
lists:foreach(fun stop_slave/1, Nodes),
%% 3. Restart nodes in the same order. This should not
%% crash and eventually all nodes should be ready.
start_cluster_async(Cluster),
timer:sleep(15_000),
assert_config_load_done(Nodes),
ok
after
teardown_cluster(Cluster)
end.
%%------------------------------------------------------------------------------
%% Helper functions
%%------------------------------------------------------------------------------
assert_config_load_done(Nodes) ->
lists:foreach(
fun(Node) ->
Done = rpc:call(Node, emqx_app, get_init_config_load_done, []),
?assert(Done, #{node => Node})
end,
Nodes
).
start_cluster(Specs) ->
[start_slave(I) || I <- Specs].
start_cluster_async(Specs) ->
[
begin
spawn_link(fun() -> start_slave(I) end),
timer:sleep(7_000)
end
|| I <- Specs
].
cluster(Specs) ->
cluster(Specs, []).
cluster(Specs0, CommonEnv) ->
Specs1 = lists:zip(Specs0, lists:seq(1, length(Specs0))),
Specs = expand_node_specs(Specs1, CommonEnv),
CoreNodes = [node_id(Name) || {{core, Name, _}, _} <- Specs],
%% Assign grpc ports:
BaseGenRpcPort = 9000,
GenRpcPorts = maps:from_list([
{node_id(Name), {tcp, BaseGenRpcPort + Num}}
|| {{_, Name, _}, Num} <- Specs
]),
%% Set the default node of the cluster:
JoinTo =
case CoreNodes of
[First | _] -> #{join_to => First};
_ -> #{}
end,
[
JoinTo#{
name => Name,
node => node_id(Name),
env => [
{mria, core_nodes, CoreNodes},
{mria, node_role, Role},
{gen_rpc, tcp_server_port, BaseGenRpcPort + Number},
{gen_rpc, client_config_per_node, {internal, GenRpcPorts}}
| Env
],
number => Number,
role => Role
}
|| {{Role, Name, Env}, Number} <- Specs
].
start_apps(Node) ->
Handler = fun
(emqx) ->
application:set_env(emqx, boot_modules, []),
ok;
(_) ->
ok
end,
{Node, ok} =
{Node, rpc:call(Node, emqx_common_test_helpers, start_apps, [[emqx_conf], Handler])},
ok.
stop_apps(Node) ->
ok = rpc:call(Node, emqx_common_test_helpers, stop_apps, [[emqx_conf]]).
join_cluster(#{node := Node, join_to := JoinTo}) ->
case rpc:call(Node, ekka, join, [JoinTo]) of
ok -> ok;
ignore -> ok;
Err -> error({failed_to_join_cluster, #{node => Node, error => Err}})
end.
start_slave(#{node := Node, env := Env}) ->
%% We want VMs to only occupy a single core
CommonBeamOpts =
"+S 1:1 " ++
%% redirect logs to the master test node
" -master " ++ atom_to_list(node()) ++ " ",
%% We use `ct_slave' instead of `slave' because, in
%% `t_copy_conf_override_on_restarts', the nodes might be stuck
%% some time during boot up, and `slave' has a hard-coded boot
%% timeout.
{ok, Node} = ct_slave:start(
Node,
[
{erl_flags, CommonBeamOpts ++ ebin_path()},
{kill_if_fail, true},
{monitor_master, true},
{init_timeout, 30_000},
{startup_timeout, 30_000}
]
),
%% Load apps before setting the enviroment variables to avoid
%% overriding the environment during app start:
[rpc:call(Node, application, load, [App]) || App <- [gen_rpc]],
%% Disable gen_rpc listener by default:
Env1 = [{gen_rpc, tcp_server_port, false} | Env],
setenv(Node, Env1),
ok = start_apps(Node),
Node.
expand_node_specs(Specs, CommonEnv) ->
lists:map(
fun({Spec, Num}) ->
{
case Spec of
core ->
{core, gen_node_name(Num), CommonEnv};
replicant ->
{replicant, gen_node_name(Num), CommonEnv};
{Role, Name} when is_atom(Name) ->
{Role, Name, CommonEnv};
{Role, Env} when is_list(Env) ->
{Role, gen_node_name(Num), CommonEnv ++ Env};
{Role, Name, Env} ->
{Role, Name, CommonEnv ++ Env}
end,
Num
}
end,
Specs
).
setenv(Node, Env) ->
[rpc:call(Node, application, set_env, [App, Key, Val]) || {App, Key, Val} <- Env].
teardown_cluster(Specs) ->
Nodes = [I || #{node := I} <- Specs],
[rpc:call(I, emqx_common_test_helpers, stop_apps, [emqx_conf]) || I <- Nodes],
[stop_slave(I) || I <- Nodes],
ok.
stop_slave(Node) ->
ct_slave:stop(Node).
host() ->
[_, Host] = string:tokens(atom_to_list(node()), "@"),
Host.
node_id(Name) ->
list_to_atom(lists:concat([Name, "@", host()])).
gen_node_name(N) ->
list_to_atom("n" ++ integer_to_list(N)).
ebin_path() ->
string:join(["-pa" | paths()], " ").
paths() ->
[
Path
|| Path <- code:get_path(),
string:prefix(Path, code:lib_dir()) =:= nomatch,
string:str(Path, "_build/default/plugins") =:= 0
].

View File

@ -24,13 +24,16 @@
-define(REDIS_DEFAULT_PORT, 6379). -define(REDIS_DEFAULT_PORT, 6379).
-define(PGSQL_DEFAULT_PORT, 5432). -define(PGSQL_DEFAULT_PORT, 5432).
-define(SERVERS_DESC, "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` -define(SERVERS_DESC,
For each Node should be: "). "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`\n"
"For each Node should be: "
).
-define(SERVER_DESC(TYPE, DEFAULT_PORT), " -define(SERVER_DESC(TYPE, DEFAULT_PORT),
The IPv4 or IPv6 address or the hostname to connect to.</br> "\n"
A host entry has the following form: `Host[:Port]`.</br> "The IPv4 or IPv6 address or the hostname to connect to.</br>\n"
The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified." "A host entry has the following form: `Host[:Port]`.</br>\n"
"The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified."
). ).
-define(THROW_ERROR(Str), erlang:throw({error, Str})). -define(THROW_ERROR(Str), erlang:throw({error, Str})).

View File

@ -1,30 +1,32 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{erl_opts, [ {erl_opts, [
nowarn_unused_import, nowarn_unused_import,
debug_info debug_info
]}. ]}.
{deps, [ {deps, [
{emqx, {path, "../emqx"}}, {emqx, {path, "../emqx"}},
{eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}}, {eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}},
{mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.1"}}}, {mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.1"}}},
{epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7-emqx.2"}}}, {epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7-emqx.2"}}},
%% NOTE: mind poolboy version when updating mongodb-erlang version %% NOTE: mind poolboy version when updating mongodb-erlang version
{mongodb, {git,"https://github.com/emqx/mongodb-erlang", {tag, "v3.0.13"}}}, {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.13"}}},
%% NOTE: mind poolboy version when updating eredis_cluster version %% NOTE: mind poolboy version when updating eredis_cluster version
{eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.7.1"}}}, {eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.7.1"}}},
%% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git %% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git
%% (which has overflow_ttl feature added). %% (which has overflow_ttl feature added).
%% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07). %% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07).
%% By accident, We have always been using the upstream fork due to %% By accident, We have always been using the upstream fork due to
%% eredis_cluster's dependency getting resolved earlier. %% eredis_cluster's dependency getting resolved earlier.
%% Here we pin 1.5.2 to avoid surprises in the future. %% Here we pin 1.5.2 to avoid surprises in the future.
{poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}}, {poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.5.0"}}} {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.5.0"}}}
]}. ]}.
{shell, [ {shell, [
% {config, "config/sys.config"}, % {config, "config/sys.config"},
{apps, [emqx_connector]} {apps, [emqx_connector]}
]}. ]}.
{project_plugins, [erlfmt]}.

View File

@ -1,27 +1,27 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_connector, {application, emqx_connector, [
[{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.1"}, {vsn, "0.1.1"},
{registered, []}, {registered, []},
{mod, {emqx_connector_app, []}}, {mod, {emqx_connector_app, []}},
{applications, {applications, [
[kernel, kernel,
stdlib, stdlib,
ecpool, ecpool,
emqx_resource, emqx_resource,
eredis_cluster, eredis_cluster,
eredis, eredis,
epgsql, epgsql,
eldap2, eldap2,
mysql, mysql,
mongodb, mongodb,
ehttpc, ehttpc,
emqx, emqx,
emqtt emqtt
]}, ]},
{env,[]}, {env, []},
{modules, []}, {modules, []},
{licenses, ["Apache 2.0"]}, {licenses, ["Apache 2.0"]},
{links, []} {links, []}
]}. ]}.

View File

@ -15,47 +15,68 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_connector). -module(emqx_connector).
-export([config_key_path/0]). -export([
config_key_path/0,
pre_config_update/3,
post_config_update/5
]).
-export([ parse_connector_id/1 -export([
, connector_id/2 parse_connector_id/1,
]). connector_id/2
]).
-export([ list_raw/0 -export([
, lookup_raw/1 list_raw/0,
, lookup_raw/2 lookup_raw/1,
, create_dry_run/2 lookup_raw/2,
, update/2 create_dry_run/2,
, update/3 update/2,
, delete/1 update/3,
, delete/2 delete/1,
]). delete/2
]).
-export([ post_config_update/5
]).
config_key_path() -> config_key_path() ->
[connectors]. [connectors].
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
{ok, ConfNew}
end.
-dialyzer([{nowarn_function, [post_config_update/5]}, error_handling]). -dialyzer([{nowarn_function, [post_config_update/5]}, error_handling]).
post_config_update([connectors, Type, Name], '$remove', _, _OldConf, _AppEnvs) -> post_config_update([connectors, Type, Name] = Path, '$remove', _, OldConf, _AppEnvs) ->
ConnId = connector_id(Type, Name), ConnId = connector_id(Type, Name),
try foreach_linked_bridges(ConnId, fun(#{type := BType, name := BName}) -> try
foreach_linked_bridges(ConnId, fun(#{type := BType, name := BName}) ->
throw({dependency_bridges_exist, emqx_bridge:bridge_id(BType, BName)}) throw({dependency_bridges_exist, emqx_bridge:bridge_id(BType, BName)})
end) end),
catch throw:Error -> {error, Error} _ = emqx_connector_ssl:clear_certs(filename:join(Path), OldConf)
catch
throw:Error -> {error, Error}
end; end;
post_config_update([connectors, Type, Name], _Req, NewConf, OldConf, _AppEnvs) -> post_config_update([connectors, Type, Name], _Req, NewConf, OldConf, _AppEnvs) ->
ConnId = connector_id(Type, Name), ConnId = connector_id(Type, Name),
foreach_linked_bridges(ConnId, foreach_linked_bridges(
ConnId,
fun(#{type := BType, name := BName}) -> fun(#{type := BType, name := BName}) ->
BridgeConf = emqx:get_config([bridges, BType, BName]), BridgeConf = emqx:get_config([bridges, BType, BName]),
case emqx_bridge:update(BType, BName, {BridgeConf#{connector => OldConf}, case
BridgeConf#{connector => NewConf}}) of emqx_bridge:update(
BType,
BName,
{BridgeConf#{connector => OldConf}, BridgeConf#{connector => NewConf}}
)
of
ok -> ok; ok -> ok;
{error, Reason} -> error({update_bridge_error, Reason}) {error, Reason} -> error({update_bridge_error, Reason})
end end
end). end
).
connector_id(Type0, Name0) -> connector_id(Type0, Name0) ->
Type = bin(Type0), Type = bin(Type0),
@ -70,13 +91,22 @@ parse_connector_id(ConnectorId) ->
list_raw() -> list_raw() ->
case get_raw_connector_conf() of case get_raw_connector_conf() of
not_found -> []; not_found ->
[];
Config -> Config ->
lists:foldl(fun({Type, NameAndConf}, Connectors) -> lists:foldl(
lists:foldl(fun({Name, RawConf}, Acc) -> fun({Type, NameAndConf}, Connectors) ->
[RawConf#{<<"type">> => Type, <<"name">> => Name} | Acc] lists:foldl(
end, Connectors, maps:to_list(NameAndConf)) fun({Name, RawConf}, Acc) ->
end, [], maps:to_list(Config)) [RawConf#{<<"type">> => Type, <<"name">> => Name} | Acc]
end,
Connectors,
maps:to_list(NameAndConf)
)
end,
[],
maps:to_list(Config)
)
end. end.
lookup_raw(Id) when is_binary(Id) -> lookup_raw(Id) when is_binary(Id) ->
@ -86,7 +116,8 @@ lookup_raw(Id) when is_binary(Id) ->
lookup_raw(Type, Name) -> lookup_raw(Type, Name) ->
Path = [bin(P) || P <- [Type, Name]], Path = [bin(P) || P <- [Type, Name]],
case get_raw_connector_conf() of case get_raw_connector_conf() of
not_found -> {error, not_found}; not_found ->
{error, not_found};
Conf -> Conf ->
case emqx_map_lib:deep_get(Path, Conf, not_found) of case emqx_map_lib:deep_get(Path, Conf, not_found) of
not_found -> {error, not_found}; not_found -> {error, not_found};
@ -113,7 +144,8 @@ delete(Type, Name) ->
get_raw_connector_conf() -> get_raw_connector_conf() ->
case emqx:get_raw_config(config_key_path(), not_found) of case emqx:get_raw_config(config_key_path(), not_found) of
not_found -> not_found; not_found ->
not_found;
RawConf -> RawConf ->
#{<<"connectors">> := Conf} = #{<<"connectors">> := Conf} =
emqx_config:fill_defaults(#{<<"connectors">> => RawConf}), emqx_config:fill_defaults(#{<<"connectors">> => RawConf}),
@ -125,8 +157,12 @@ bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
foreach_linked_bridges(ConnId, Do) -> foreach_linked_bridges(ConnId, Do) ->
lists:foreach(fun lists:foreach(
(#{raw_config := #{<<"connector">> := ConnId0}} = Bridge) when ConnId0 == ConnId -> fun
Do(Bridge); (#{raw_config := #{<<"connector">> := ConnId0}} = Bridge) when ConnId0 == ConnId ->
(_) -> ok Do(Bridge);
end, emqx_bridge:list()). (_) ->
ok
end,
emqx_bridge:list()
).

View File

@ -40,9 +40,14 @@
EXPR EXPR
catch catch
error:{invalid_connector_id, Id0} -> error:{invalid_connector_id, Id0} ->
{400, #{code => 'INVALID_ID', message => <<"invalid_connector_id: ", Id0/binary, {400, #{
". Connector Ids must be of format {type}:{name}">>}} code => 'INVALID_ID',
end). message =>
<<"invalid_connector_id: ", Id0/binary,
". Connector Ids must be of format {type}:{name}">>
}}
end
).
namespace() -> "connector". namespace() -> "connector".
@ -58,21 +63,25 @@ error_schema(Codes, Message) when is_binary(Message) ->
put_request_body_schema() -> put_request_body_schema() ->
emqx_dashboard_swagger:schema_with_examples( emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:put_request(), connector_info_examples(put)). emqx_connector_schema:put_request(), connector_info_examples(put)
).
post_request_body_schema() -> post_request_body_schema() ->
emqx_dashboard_swagger:schema_with_examples( emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:post_request(), connector_info_examples(post)). emqx_connector_schema:post_request(), connector_info_examples(post)
).
get_response_body_schema() -> get_response_body_schema() ->
emqx_dashboard_swagger:schema_with_examples( emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:get_response(), connector_info_examples(get)). emqx_connector_schema:get_response(), connector_info_examples(get)
).
connector_info_array_example(Method) -> connector_info_array_example(Method) ->
[Config || #{value := Config} <- maps:values(connector_info_examples(Method))]. [Config || #{value := Config} <- maps:values(connector_info_examples(Method))].
connector_info_examples(Method) -> connector_info_examples(Method) ->
lists:foldl(fun(Type, Acc) -> lists:foldl(
fun(Type, Acc) ->
SType = atom_to_list(Type), SType = atom_to_list(Type),
maps:merge(Acc, #{ maps:merge(Acc, #{
Type => #{ Type => #{
@ -80,11 +89,16 @@ connector_info_examples(Method) ->
value => info_example(Type, Method) value => info_example(Type, Method)
} }
}) })
end, #{}, ?CONN_TYPES). end,
#{},
?CONN_TYPES
).
info_example(Type, Method) -> info_example(Type, Method) ->
maps:merge(info_example_basic(Type), maps:merge(
method_example(Type, Method)). info_example_basic(Type),
method_example(Type, Method)
).
method_example(Type, Method) when Method == get; Method == post -> method_example(Type, Method) when Method == get; Method == post ->
SType = atom_to_list(Type), SType = atom_to_list(Type),
@ -115,11 +129,17 @@ info_example_basic(mqtt) ->
}. }.
param_path_id() -> param_path_id() ->
[{id, mk(binary(), [
#{ in => path {id,
, example => <<"mqtt:my_mqtt_connector">> mk(
, desc => ?DESC("id") binary(),
})}]. #{
in => path,
example => <<"mqtt:my_mqtt_connector">>,
desc => ?DESC("id")
}
)}
].
schema("/connectors_test") -> schema("/connectors_test") ->
#{ #{
@ -135,7 +155,6 @@ schema("/connectors_test") ->
} }
} }
}; };
schema("/connectors") -> schema("/connectors") ->
#{ #{
'operationId' => '/connectors', 'operationId' => '/connectors',
@ -145,8 +164,9 @@ schema("/connectors") ->
summary => <<"List connectors">>, summary => <<"List connectors">>,
responses => #{ responses => #{
200 => emqx_dashboard_swagger:schema_with_example( 200 => emqx_dashboard_swagger:schema_with_example(
array(emqx_connector_schema:get_response()), array(emqx_connector_schema:get_response()),
connector_info_array_example(get)) connector_info_array_example(get)
)
} }
}, },
post => #{ post => #{
@ -160,7 +180,6 @@ schema("/connectors") ->
} }
} }
}; };
schema("/connectors/:id") -> schema("/connectors/:id") ->
#{ #{
'operationId' => '/connectors/:id', 'operationId' => '/connectors/:id',
@ -185,7 +204,8 @@ schema("/connectors/:id") ->
200 => get_response_body_schema(), 200 => get_response_body_schema(),
404 => error_schema(['NOT_FOUND'], "Connector not found"), 404 => error_schema(['NOT_FOUND'], "Connector not found"),
400 => error_schema(['INVALID_ID'], "Bad connector ID") 400 => error_schema(['INVALID_ID'], "Bad connector ID")
}}, }
},
delete => #{ delete => #{
tags => [<<"connectors">>], tags => [<<"connectors">>],
desc => ?DESC("conn_id_delete"), desc => ?DESC("conn_id_delete"),
@ -196,7 +216,8 @@ schema("/connectors/:id") ->
403 => error_schema(['DEPENDENCY_EXISTS'], "Cannot remove dependent connector"), 403 => error_schema(['DEPENDENCY_EXISTS'], "Cannot remove dependent connector"),
404 => error_schema(['NOT_FOUND'], "Delete failed, not found"), 404 => error_schema(['NOT_FOUND'], "Delete failed, not found"),
400 => error_schema(['INVALID_ID'], "Bad connector ID") 400 => error_schema(['INVALID_ID'], "Bad connector ID")
}} }
}
}. }.
'/connectors_test'(post, #{body := #{<<"type">> := ConnType} = Params}) -> '/connectors_test'(post, #{body := #{<<"type">> := ConnType} = Params}) ->
@ -209,67 +230,83 @@ schema("/connectors/:id") ->
'/connectors'(get, _Request) -> '/connectors'(get, _Request) ->
{200, [format_resp(Conn) || Conn <- emqx_connector:list_raw()]}; {200, [format_resp(Conn) || Conn <- emqx_connector:list_raw()]};
'/connectors'(post, #{body := #{<<"type">> := ConnType, <<"name">> := ConnName} = Params}) -> '/connectors'(post, #{body := #{<<"type">> := ConnType, <<"name">> := ConnName} = Params}) ->
case emqx_connector:lookup_raw(ConnType, ConnName) of case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, _} -> {ok, _} ->
{400, error_msg('ALREADY_EXISTS', <<"connector already exists">>)}; {400, error_msg('ALREADY_EXISTS', <<"connector already exists">>)};
{error, not_found} -> {error, not_found} ->
case emqx_connector:update(ConnType, ConnName, case
filter_out_request_body(Params)) of emqx_connector:update(
ConnType,
ConnName,
filter_out_request_body(Params)
)
of
{ok, #{raw_config := RawConf}} -> {ok, #{raw_config := RawConf}} ->
{201, format_resp(RawConf#{<<"type">> => ConnType, {201,
<<"name">> => ConnName})}; format_resp(RawConf#{
<<"type">> => ConnType,
<<"name">> => ConnName
})};
{error, Error} -> {error, Error} ->
{400, error_msg('BAD_REQUEST', Error)} {400, error_msg('BAD_REQUEST', Error)}
end end
end; end;
'/connectors'(post, _) -> '/connectors'(post, _) ->
{400, error_msg('BAD_REQUEST', <<"missing some required fields: [name, type]">>)}. {400, error_msg('BAD_REQUEST', <<"missing some required fields: [name, type]">>)}.
'/connectors/:id'(get, #{bindings := #{id := Id}}) -> '/connectors/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, ?TRY_PARSE_ID(
Id,
case emqx_connector:lookup_raw(ConnType, ConnName) of case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, Conf} -> {ok, Conf} ->
{200, format_resp(Conf)}; {200, format_resp(Conf)};
{error, not_found} -> {error, not_found} ->
{404, error_msg('NOT_FOUND', <<"connector not found">>)} {404, error_msg('NOT_FOUND', <<"connector not found">>)}
end); end
);
'/connectors/:id'(put, #{bindings := #{id := Id}, body := Params0}) -> '/connectors/:id'(put, #{bindings := #{id := Id}, body := Params0}) ->
Params = filter_out_request_body(Params0), Params = filter_out_request_body(Params0),
?TRY_PARSE_ID(Id, ?TRY_PARSE_ID(
Id,
case emqx_connector:lookup_raw(ConnType, ConnName) of case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, _} -> {ok, _} ->
case emqx_connector:update(ConnType, ConnName, Params) of case emqx_connector:update(ConnType, ConnName, Params) of
{ok, #{raw_config := RawConf}} -> {ok, #{raw_config := RawConf}} ->
{200, format_resp(RawConf#{<<"type">> => ConnType, {200,
<<"name">> => ConnName})}; format_resp(RawConf#{
<<"type">> => ConnType,
<<"name">> => ConnName
})};
{error, Error} -> {error, Error} ->
{500, error_msg('INTERNAL_ERROR', Error)} {500, error_msg('INTERNAL_ERROR', Error)}
end; end;
{error, not_found} -> {error, not_found} ->
{404, error_msg('NOT_FOUND', <<"connector not found">>)} {404, error_msg('NOT_FOUND', <<"connector not found">>)}
end); end
);
'/connectors/:id'(delete, #{bindings := #{id := Id}}) -> '/connectors/:id'(delete, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, ?TRY_PARSE_ID(
Id,
case emqx_connector:lookup_raw(ConnType, ConnName) of case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, _} -> {ok, _} ->
case emqx_connector:delete(ConnType, ConnName) of case emqx_connector:delete(ConnType, ConnName) of
{ok, _} -> {ok, _} ->
{204}; {204};
{error, {post_config_update, _, {dependency_bridges_exist, BridgeID}}} -> {error, {post_config_update, _, {dependency_bridges_exist, BridgeID}}} ->
{403, error_msg('DEPENDENCY_EXISTS', {403,
<<"Cannot remove the connector as it's in use by a bridge: ", error_msg(
BridgeID/binary>>)}; 'DEPENDENCY_EXISTS',
<<"Cannot remove the connector as it's in use by a bridge: ",
BridgeID/binary>>
)};
{error, Error} -> {error, Error} ->
{500, error_msg('INTERNAL_ERROR', Error)} {500, error_msg('INTERNAL_ERROR', Error)}
end; end;
{error, not_found} -> {error, not_found} ->
{404, error_msg('NOT_FOUND', <<"connector not found">>)} {404, error_msg('NOT_FOUND', <<"connector not found">>)}
end). end
).
error_msg(Code, Msg) when is_binary(Msg) -> error_msg(Code, Msg) when is_binary(Msg) ->
#{code => Code, message => Msg}; #{code => Code, message => Msg};
@ -277,8 +314,11 @@ error_msg(Code, Msg) ->
#{code => Code, message => bin(io_lib:format("~p", [Msg]))}. #{code => Code, message => bin(io_lib:format("~p", [Msg]))}.
format_resp(#{<<"type">> := ConnType, <<"name">> := ConnName} = RawConf) -> format_resp(#{<<"type">> := ConnType, <<"name">> := ConnName} = RawConf) ->
NumOfBridges = length(emqx_bridge:list_bridges_by_connector( NumOfBridges = length(
emqx_connector:connector_id(ConnType, ConnName))), emqx_bridge:list_bridges_by_connector(
emqx_connector:connector_id(ConnType, ConnName)
)
),
RawConf#{ RawConf#{
<<"type">> => ConnType, <<"type">> => ConnType,
<<"name">> => ConnName, <<"name">> => ConnName,

View File

@ -25,32 +25,34 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
-type url() :: emqx_http_lib:uri_map(). -type url() :: emqx_http_lib:uri_map().
-reflect_type([url/0]). -reflect_type([url/0]).
-typerefl_from_string({url/0, emqx_http_lib, uri_parse}). -typerefl_from_string({url/0, emqx_http_lib, uri_parse}).
-export([ roots/0 -export([
, fields/1 roots/0,
, desc/1 fields/1,
, validations/0 desc/1,
, namespace/0 validations/0,
]). namespace/0
]).
-export([ check_ssl_opts/2 -export([check_ssl_opts/2]).
]).
-type connect_timeout() :: emqx_schema:duration() | infinity. -type connect_timeout() :: emqx_schema:duration() | infinity.
-type pool_type() :: random | hash. -type pool_type() :: random | hash.
-reflect_type([ connect_timeout/0 -reflect_type([
, pool_type/0 connect_timeout/0,
]). pool_type/0
]).
%%===================================================================== %%=====================================================================
%% Hocon schema %% Hocon schema
@ -61,63 +63,96 @@ roots() ->
fields(config). fields(config).
fields(config) -> fields(config) ->
[ {base_url, [
sc(url(), {base_url,
#{ required => true sc(
, validator => fun(#{query := _Query}) -> url(),
#{
required => true,
validator => fun
(#{query := _Query}) ->
{error, "There must be no query in the base_url"}; {error, "There must be no query in the base_url"};
(_) -> ok (_) ->
end ok
, desc => ?DESC("base_url") end,
})} desc => ?DESC("base_url")
, {connect_timeout, }
sc(emqx_schema:duration_ms(), )},
#{ default => "15s" {connect_timeout,
, desc => ?DESC("connect_timeout") sc(
})} emqx_schema:duration_ms(),
, {max_retries, #{
sc(non_neg_integer(), default => "15s",
#{ default => 5 desc => ?DESC("connect_timeout")
, desc => ?DESC("max_retries") }
})} )},
, {retry_interval, {max_retries,
sc(emqx_schema:duration(), sc(
#{ default => "1s" non_neg_integer(),
, desc => ?DESC("retry_interval") #{
})} default => 5,
, {pool_type, desc => ?DESC("max_retries")
sc(pool_type(), }
#{ default => random )},
, desc => ?DESC("pool_type") {retry_interval,
})} sc(
, {pool_size, emqx_schema:duration(),
sc(pos_integer(), #{
#{ default => 8 default => "1s",
, desc => ?DESC("pool_size") desc => ?DESC("retry_interval")
})} }
, {enable_pipelining, )},
sc(boolean(), {pool_type,
#{ default => true sc(
, desc => ?DESC("enable_pipelining") pool_type(),
})} #{
, {request, hoconsc:mk( default => random,
ref("request"), desc => ?DESC("pool_type")
#{ default => undefined }
, required => false )},
, desc => ?DESC("request") {pool_size,
})} sc(
pos_integer(),
#{
default => 8,
desc => ?DESC("pool_size")
}
)},
{enable_pipelining,
sc(
boolean(),
#{
default => true,
desc => ?DESC("enable_pipelining")
}
)},
{request,
hoconsc:mk(
ref("request"),
#{
default => undefined,
required => false,
desc => ?DESC("request")
}
)}
] ++ emqx_connector_schema_lib:ssl_fields(); ] ++ emqx_connector_schema_lib:ssl_fields();
fields("request") -> fields("request") ->
[ {method, hoconsc:mk(hoconsc:enum([post, put, get, delete]), #{required => false, desc => ?DESC("method")})} [
, {path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})} {method,
, {body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})} hoconsc:mk(hoconsc:enum([post, put, get, delete]), #{
, {headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})} required => false, desc => ?DESC("method")
, {request_timeout, })},
sc(emqx_schema:duration_ms(), {path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})},
#{ required => false {body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})},
, desc => ?DESC("request_timeout") {headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})},
})} {request_timeout,
sc(
emqx_schema:duration_ms(),
#{
required => false,
desc => ?DESC("request_timeout")
}
)}
]. ].
desc(config) -> desc(config) ->
@ -128,24 +163,34 @@ desc(_) ->
undefined. undefined.
validations() -> validations() ->
[ {check_ssl_opts, fun check_ssl_opts/1} ]. [{check_ssl_opts, fun check_ssl_opts/1}].
sc(Type, Meta) -> hoconsc:mk(Type, Meta). sc(Type, Meta) -> hoconsc:mk(Type, Meta).
ref(Field) -> hoconsc:ref(?MODULE, Field). ref(Field) -> hoconsc:ref(?MODULE, Field).
%% =================================================================== %% ===================================================================
on_start(InstId, #{base_url := #{scheme := Scheme, on_start(
host := Host, InstId,
port := Port, #{
path := BasePath}, base_url := #{
connect_timeout := ConnectTimeout, scheme := Scheme,
max_retries := MaxRetries, host := Host,
retry_interval := RetryInterval, port := Port,
pool_type := PoolType, path := BasePath
pool_size := PoolSize} = Config) -> },
?SLOG(info, #{msg => "starting_http_connector", connect_timeout := ConnectTimeout,
connector => InstId, config => Config}), max_retries := MaxRetries,
retry_interval := RetryInterval,
pool_type := PoolType,
pool_size := PoolSize
} = Config
) ->
?SLOG(info, #{
msg => "starting_http_connector",
connector => InstId,
config => Config
}),
{Transport, TransportOpts} = {Transport, TransportOpts} =
case Scheme of case Scheme of
http -> http ->
@ -155,16 +200,18 @@ on_start(InstId, #{base_url := #{scheme := Scheme,
{tls, SSLOpts} {tls, SSLOpts}
end, end,
NTransportOpts = emqx_misc:ipv6_probe(TransportOpts), NTransportOpts = emqx_misc:ipv6_probe(TransportOpts),
PoolOpts = [ {host, Host} PoolOpts = [
, {port, Port} {host, Host},
, {connect_timeout, ConnectTimeout} {port, Port},
, {retry, MaxRetries} {connect_timeout, ConnectTimeout},
, {retry_timeout, RetryInterval} {retry, MaxRetries},
, {keepalive, 30000} {retry_timeout, RetryInterval},
, {pool_type, PoolType} {keepalive, 30000},
, {pool_size, PoolSize} {pool_type, PoolType},
, {transport, Transport} {pool_size, PoolSize},
, {transport_opts, NTransportOpts}], {transport, Transport},
{transport_opts, NTransportOpts}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolName = emqx_plugin_libs_pool:pool_name(InstId),
State = #{ State = #{
pool_name => PoolName, pool_name => PoolName,
@ -177,54 +224,84 @@ on_start(InstId, #{base_url := #{scheme := Scheme,
case ehttpc_sup:start_pool(PoolName, PoolOpts) of case ehttpc_sup:start_pool(PoolName, PoolOpts) of
{ok, _} -> {ok, State}; {ok, _} -> {ok, State};
{error, {already_started, _}} -> {ok, State}; {error, {already_started, _}} -> {ok, State};
{error, Reason} -> {error, Reason} -> {error, Reason}
{error, Reason}
end. end.
on_stop(InstId, #{pool_name := PoolName}) -> on_stop(InstId, #{pool_name := PoolName}) ->
?SLOG(info, #{msg => "stopping_http_connector", ?SLOG(info, #{
connector => InstId}), msg => "stopping_http_connector",
connector => InstId
}),
ehttpc_sup:stop_pool(PoolName). ehttpc_sup:stop_pool(PoolName).
on_query(InstId, {send_message, Msg}, AfterQuery, State) -> on_query(InstId, {send_message, Msg}, AfterQuery, State) ->
case maps:get(request, State, undefined) of case maps:get(request, State, undefined) of
undefined -> ?SLOG(error, #{msg => "request_not_found", connector => InstId}); undefined ->
?SLOG(error, #{msg => "request_not_found", connector => InstId});
Request -> Request ->
#{method := Method, path := Path, body := Body, headers := Headers, #{
request_timeout := Timeout} = process_request(Request, Msg), method := Method,
path := Path,
body := Body,
headers := Headers,
request_timeout := Timeout
} = process_request(Request, Msg),
on_query(InstId, {Method, {Path, Headers, Body}, Timeout}, AfterQuery, State) on_query(InstId, {Method, {Path, Headers, Body}, Timeout}, AfterQuery, State)
end; end;
on_query(InstId, {Method, Request}, AfterQuery, State) -> on_query(InstId, {Method, Request}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, 5000}, AfterQuery, State); on_query(InstId, {undefined, Method, Request, 5000}, AfterQuery, State);
on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) -> on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, Timeout}, AfterQuery, State); on_query(InstId, {undefined, Method, Request, Timeout}, AfterQuery, State);
on_query(InstId, {KeyOrNum, Method, Request, Timeout}, AfterQuery, on_query(
#{pool_name := PoolName, base_path := BasePath} = State) -> InstId,
?TRACE("QUERY", "http_connector_received", {KeyOrNum, Method, Request, Timeout},
#{request => Request, connector => InstId, state => State}), AfterQuery,
#{pool_name := PoolName, base_path := BasePath} = State
) ->
?TRACE(
"QUERY",
"http_connector_received",
#{request => Request, connector => InstId, state => State}
),
NRequest = formalize_request(Method, BasePath, Request), NRequest = formalize_request(Method, BasePath, Request),
case Result = ehttpc:request(case KeyOrNum of case
undefined -> PoolName; Result = ehttpc:request(
_ -> {PoolName, KeyOrNum} case KeyOrNum of
end, Method, NRequest, Timeout) of undefined -> PoolName;
_ -> {PoolName, KeyOrNum}
end,
Method,
NRequest,
Timeout
)
of
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "http_connector_do_reqeust_failed", ?SLOG(error, #{
request => NRequest, reason => Reason, msg => "http_connector_do_reqeust_failed",
connector => InstId}), request => NRequest,
reason => Reason,
connector => InstId
}),
emqx_resource:query_failed(AfterQuery); emqx_resource:query_failed(AfterQuery);
{ok, StatusCode, _} when StatusCode >= 200 andalso StatusCode < 300 -> {ok, StatusCode, _} when StatusCode >= 200 andalso StatusCode < 300 ->
emqx_resource:query_success(AfterQuery); emqx_resource:query_success(AfterQuery);
{ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 -> {ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 ->
emqx_resource:query_success(AfterQuery); emqx_resource:query_success(AfterQuery);
{ok, StatusCode, _} -> {ok, StatusCode, _} ->
?SLOG(error, #{msg => "http connector do request, received error response", ?SLOG(error, #{
request => NRequest, connector => InstId, msg => "http connector do request, received error response",
status_code => StatusCode}), request => NRequest,
connector => InstId,
status_code => StatusCode
}),
emqx_resource:query_failed(AfterQuery); emqx_resource:query_failed(AfterQuery);
{ok, StatusCode, _, _} -> {ok, StatusCode, _, _} ->
?SLOG(error, #{msg => "http connector do request, received error response", ?SLOG(error, #{
request => NRequest, connector => InstId, msg => "http connector do request, received error response",
status_code => StatusCode}), request => NRequest,
connector => InstId,
status_code => StatusCode
}),
emqx_resource:query_failed(AfterQuery) emqx_resource:query_failed(AfterQuery)
end, end,
Result. Result.
@ -232,14 +309,16 @@ on_query(InstId, {KeyOrNum, Method, Request, Timeout}, AfterQuery,
on_health_check(_InstId, #{host := Host, port := Port, connect_timeout := Timeout} = State) -> on_health_check(_InstId, #{host := Host, port := Port, connect_timeout := Timeout} = State) ->
case do_health_check(Host, Port, Timeout) of case do_health_check(Host, Port, Timeout) of
ok -> {ok, State}; ok -> {ok, State};
{error, Reason} -> {error, Reason} -> {error, {http_health_check_failed, Reason}, State}
{error, {http_health_check_failed, Reason}, State}
end. end.
do_health_check(Host, Port, Timeout) -> do_health_check(Host, Port, Timeout) ->
case gen_tcp:connect(Host, Port, emqx_misc:ipv6_probe([]), Timeout) of case gen_tcp:connect(Host, Port, emqx_misc:ipv6_probe([]), Timeout) of
{ok, Sock} -> gen_tcp:close(Sock), ok; {ok, Sock} ->
{error, Reason} -> {error, Reason} gen_tcp:close(Sock),
ok;
{error, Reason} ->
{error, Reason}
end. end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -250,47 +329,64 @@ preprocess_request(undefined) ->
undefined; undefined;
preprocess_request(Req) when map_size(Req) == 0 -> preprocess_request(Req) when map_size(Req) == 0 ->
undefined; undefined;
preprocess_request(#{ preprocess_request(
method := Method, #{
path := Path, method := Method,
body := Body, path := Path,
headers := Headers body := Body,
} = Req) -> headers := Headers
#{ method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method)) } = Req
, path => emqx_plugin_libs_rule:preproc_tmpl(Path) ) ->
, body => emqx_plugin_libs_rule:preproc_tmpl(Body) #{
, headers => preproc_headers(Headers) method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method)),
, request_timeout => maps:get(request_timeout, Req, 30000) path => emqx_plugin_libs_rule:preproc_tmpl(Path),
}. body => emqx_plugin_libs_rule:preproc_tmpl(Body),
headers => preproc_headers(Headers),
request_timeout => maps:get(request_timeout, Req, 30000)
}.
preproc_headers(Headers) when is_map(Headers) -> preproc_headers(Headers) when is_map(Headers) ->
maps:fold(fun(K, V, Acc) -> maps:fold(
[{ fun(K, V, Acc) ->
[
{
emqx_plugin_libs_rule:preproc_tmpl(bin(K)),
emqx_plugin_libs_rule:preproc_tmpl(bin(V))
}
| Acc
]
end,
[],
Headers
);
preproc_headers(Headers) when is_list(Headers) ->
lists:map(
fun({K, V}) ->
{
emqx_plugin_libs_rule:preproc_tmpl(bin(K)), emqx_plugin_libs_rule:preproc_tmpl(bin(K)),
emqx_plugin_libs_rule:preproc_tmpl(bin(V)) emqx_plugin_libs_rule:preproc_tmpl(bin(V))
} | Acc] }
end, [], Headers); end,
preproc_headers(Headers) when is_list(Headers) -> Headers
lists:map(fun({K, V}) -> ).
{
emqx_plugin_libs_rule:preproc_tmpl(bin(K)),
emqx_plugin_libs_rule:preproc_tmpl(bin(V))
}
end, Headers).
process_request(#{ process_request(
method := MethodTks, #{
path := PathTks, method := MethodTks,
body := BodyTks, path := PathTks,
headers := HeadersTks, body := BodyTks,
request_timeout := ReqTimeout headers := HeadersTks,
} = Conf, Msg) -> request_timeout := ReqTimeout
Conf#{ method => make_method(emqx_plugin_libs_rule:proc_tmpl(MethodTks, Msg)) } = Conf,
, path => emqx_plugin_libs_rule:proc_tmpl(PathTks, Msg) Msg
, body => process_request_body(BodyTks, Msg) ) ->
, headers => proc_headers(HeadersTks, Msg) Conf#{
, request_timeout => ReqTimeout method => make_method(emqx_plugin_libs_rule:proc_tmpl(MethodTks, Msg)),
}. path => emqx_plugin_libs_rule:proc_tmpl(PathTks, Msg),
body => process_request_body(BodyTks, Msg),
headers => proc_headers(HeadersTks, Msg),
request_timeout => ReqTimeout
}.
process_request_body([], Msg) -> process_request_body([], Msg) ->
emqx_json:encode(Msg); emqx_json:encode(Msg);
@ -298,12 +394,15 @@ process_request_body(BodyTks, Msg) ->
emqx_plugin_libs_rule:proc_tmpl(BodyTks, Msg). emqx_plugin_libs_rule:proc_tmpl(BodyTks, Msg).
proc_headers(HeaderTks, Msg) -> proc_headers(HeaderTks, Msg) ->
lists:map(fun({K, V}) -> lists:map(
fun({K, V}) ->
{ {
emqx_plugin_libs_rule:proc_tmpl(K, Msg), emqx_plugin_libs_rule:proc_tmpl(K, Msg),
emqx_plugin_libs_rule:proc_tmpl(V, Msg) emqx_plugin_libs_rule:proc_tmpl(V, Msg)
} }
end, HeaderTks). end,
HeaderTks
).
make_method(M) when M == <<"POST">>; M == <<"post">> -> post; make_method(M) when M == <<"POST">>; M == <<"post">> -> post;
make_method(M) when M == <<"PUT">>; M == <<"put">> -> put; make_method(M) when M == <<"PUT">>; M == <<"put">> -> put;
@ -315,19 +414,19 @@ check_ssl_opts(Conf) ->
check_ssl_opts(URLFrom, Conf) -> check_ssl_opts(URLFrom, Conf) ->
#{scheme := Scheme} = hocon_maps:get(URLFrom, Conf), #{scheme := Scheme} = hocon_maps:get(URLFrom, Conf),
SSL= hocon_maps:get("ssl", Conf), SSL = hocon_maps:get("ssl", Conf),
case {Scheme, maps:get(enable, SSL, false)} of case {Scheme, maps:get(enable, SSL, false)} of
{http, false} -> true; {http, false} -> true;
{https, true} -> true; {https, true} -> true;
{_, _} -> false {_, _} -> false
end. end.
formalize_request(Method, BasePath, {Path, Headers, _Body}) formalize_request(Method, BasePath, {Path, Headers, _Body}) when
when Method =:= get; Method =:= delete -> Method =:= get; Method =:= delete
->
formalize_request(Method, BasePath, {Path, Headers}); formalize_request(Method, BasePath, {Path, Headers});
formalize_request(_Method, BasePath, {Path, Headers, Body}) -> formalize_request(_Method, BasePath, {Path, Headers, Body}) ->
{filename:join(BasePath, Path), Headers, Body}; {filename:join(BasePath, Path), Headers, Body};
formalize_request(_Method, BasePath, {Path, Headers}) -> formalize_request(_Method, BasePath, {Path, Headers}) ->
{filename:join(BasePath, Path), Headers}. {filename:join(BasePath, Path), Headers}.

View File

@ -24,11 +24,12 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
-export([do_health_check/1]). -export([do_health_check/1]).
@ -43,54 +44,84 @@ roots() ->
fields(_) -> []. fields(_) -> [].
%% =================================================================== %% ===================================================================
on_start(InstId, #{servers := Servers0, on_start(
port := Port, InstId,
bind_dn := BindDn, #{
bind_password := BindPassword, servers := Servers0,
timeout := Timeout, port := Port,
pool_size := PoolSize, bind_dn := BindDn,
auto_reconnect := AutoReconn, bind_password := BindPassword,
ssl := SSL} = Config) -> timeout := Timeout,
?SLOG(info, #{msg => "starting_ldap_connector", pool_size := PoolSize,
connector => InstId, config => Config}), auto_reconnect := AutoReconn,
Servers = [begin proplists:get_value(host, S) end || S <- Servers0], ssl := SSL
SslOpts = case maps:get(enable, SSL) of } = Config
true -> ) ->
[{ssl, true}, ?SLOG(info, #{
{sslopts, emqx_tls_lib:to_client_opts(SSL)} msg => "starting_ldap_connector",
]; connector => InstId,
false -> [{ssl, false}] config => Config
end, }),
Opts = [{servers, Servers}, Servers = [
{port, Port}, begin
{bind_dn, BindDn}, proplists:get_value(host, S)
{bind_password, BindPassword}, end
{timeout, Timeout}, || S <- Servers0
{pool_size, PoolSize}, ],
{auto_reconnect, reconn_interval(AutoReconn)}, SslOpts =
{servers, Servers}], case maps:get(enable, SSL) of
true ->
[
{ssl, true},
{sslopts, emqx_tls_lib:to_client_opts(SSL)}
];
false ->
[{ssl, false}]
end,
Opts = [
{servers, Servers},
{port, Port},
{bind_dn, BindDn},
{bind_password, BindPassword},
{timeout, Timeout},
{pool_size, PoolSize},
{auto_reconnect, reconn_interval(AutoReconn)},
{servers, Servers}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolName = emqx_plugin_libs_pool:pool_name(InstId),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of
ok -> {ok, #{poolname => PoolName}}; ok -> {ok, #{poolname => PoolName}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end. end.
on_stop(InstId, #{poolname := PoolName}) -> on_stop(InstId, #{poolname := PoolName}) ->
?SLOG(info, #{msg => "stopping_ldap_connector", ?SLOG(info, #{
connector => InstId}), msg => "stopping_ldap_connector",
connector => InstId
}),
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := PoolName} = State) -> on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := PoolName} = State) ->
Request = {Base, Filter, Attributes}, Request = {Base, Filter, Attributes},
?TRACE("QUERY", "ldap_connector_received", ?TRACE(
#{request => Request, connector => InstId, state => State}), "QUERY",
case Result = ecpool:pick_and_do( "ldap_connector_received",
PoolName, #{request => Request, connector => InstId, state => State}
{?MODULE, search, [Base, Filter, Attributes]}, ),
no_handover) of case
Result = ecpool:pick_and_do(
PoolName,
{?MODULE, search, [Base, Filter, Attributes]},
no_handover
)
of
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "ldap_connector_do_request_failed", ?SLOG(error, #{
request => Request, connector => InstId, reason => Reason}), msg => "ldap_connector_do_request_failed",
request => Request,
connector => InstId,
reason => Reason
}),
emqx_resource:query_failed(AfterQuery); emqx_resource:query_failed(AfterQuery);
_ -> _ ->
emqx_resource:query_success(AfterQuery) emqx_resource:query_success(AfterQuery)
@ -107,38 +138,45 @@ reconn_interval(true) -> 15;
reconn_interval(false) -> false. reconn_interval(false) -> false.
search(Conn, Base, Filter, Attributes) -> search(Conn, Base, Filter, Attributes) ->
eldap2:search(Conn, [{base, Base}, eldap2:search(Conn, [
{filter, Filter}, {base, Base},
{attributes, Attributes}, {filter, Filter},
{deref, eldap2:'derefFindingBaseObj'()}]). {attributes, Attributes},
{deref, eldap2:'derefFindingBaseObj'()}
]).
%% =================================================================== %% ===================================================================
connect(Opts) -> connect(Opts) ->
Servers = proplists:get_value(servers, Opts, ["localhost"]), Servers = proplists:get_value(servers, Opts, ["localhost"]),
Port = proplists:get_value(port, Opts, 389), Port = proplists:get_value(port, Opts, 389),
Timeout = proplists:get_value(timeout, Opts, 30), Timeout = proplists:get_value(timeout, Opts, 30),
BindDn = proplists:get_value(bind_dn, Opts), BindDn = proplists:get_value(bind_dn, Opts),
BindPassword = proplists:get_value(bind_password, Opts), BindPassword = proplists:get_value(bind_password, Opts),
SslOpts = case proplists:get_value(ssl, Opts, false) of SslOpts =
true -> case proplists:get_value(ssl, Opts, false) of
[{sslopts, proplists:get_value(sslopts, Opts, [])}, {ssl, true}]; true ->
false -> [{sslopts, proplists:get_value(sslopts, Opts, [])}, {ssl, true}];
[{ssl, false}] false ->
end, [{ssl, false}]
LdapOpts = [{port, Port}, end,
{timeout, Timeout}] ++ SslOpts, LdapOpts =
[
{port, Port},
{timeout, Timeout}
] ++ SslOpts,
{ok, LDAP} = eldap2:open(Servers, LdapOpts), {ok, LDAP} = eldap2:open(Servers, LdapOpts),
ok = eldap2:simple_bind(LDAP, BindDn, BindPassword), ok = eldap2:simple_bind(LDAP, BindDn, BindPassword),
{ok, LDAP}. {ok, LDAP}.
ldap_fields() -> ldap_fields() ->
[ {servers, fun servers/1} [
, {port, fun port/1} {servers, fun servers/1},
, {pool_size, fun emqx_connector_schema_lib:pool_size/1} {port, fun port/1},
, {bind_dn, fun bind_dn/1} {pool_size, fun emqx_connector_schema_lib:pool_size/1},
, {bind_password, fun emqx_connector_schema_lib:password/1} {bind_dn, fun bind_dn/1},
, {timeout, fun duration/1} {bind_password, fun emqx_connector_schema_lib:password/1},
, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} {timeout, fun duration/1},
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
]. ].
servers(type) -> list(); servers(type) -> list();
@ -159,14 +197,18 @@ duration(type) -> emqx_schema:duration_ms();
duration(_) -> undefined. duration(_) -> undefined.
to_servers_raw(Servers) -> to_servers_raw(Servers) ->
{ok, lists:map( fun(Server) -> {ok,
case string:tokens(Server, ": ") of lists:map(
[Ip] -> fun(Server) ->
[{host, Ip}]; case string:tokens(Server, ": ") of
[Ip, Port] -> [Ip] ->
[{host, Ip}, {port, list_to_integer(Port)}] [{host, Ip}];
end [Ip, Port] ->
end, string:tokens(str(Servers), ", "))}. [{host, Ip}, {port, list_to_integer(Port)}]
end
end,
string:tokens(str(Servers), ", ")
)}.
str(A) when is_atom(A) -> str(A) when is_atom(A) ->
atom_to_list(A); atom_to_list(A);

View File

@ -24,11 +24,12 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
%% ecpool callback %% ecpool callback
-export([connect/1]). -export([connect/1]).
@ -40,57 +41,73 @@
-define(HEALTH_CHECK_TIMEOUT, 10000). -define(HEALTH_CHECK_TIMEOUT, 10000).
%% mongo servers don't need parse %% mongo servers don't need parse
-define( MONGO_HOST_OPTIONS -define(MONGO_HOST_OPTIONS, #{
, #{ host_type => hostname host_type => hostname,
, default_port => ?MONGO_DEFAULT_PORT}). default_port => ?MONGO_DEFAULT_PORT
}).
%%===================================================================== %%=====================================================================
roots() -> roots() ->
[ {config, #{type => hoconsc:union( [
[ hoconsc:ref(?MODULE, single) {config, #{
, hoconsc:ref(?MODULE, rs) type => hoconsc:union(
, hoconsc:ref(?MODULE, sharded) [
])}} hoconsc:ref(?MODULE, single),
hoconsc:ref(?MODULE, rs),
hoconsc:ref(?MODULE, sharded)
]
)
}}
]. ].
fields(single) -> fields(single) ->
[ {mongo_type, #{type => single, [
default => single, {mongo_type, #{
required => true, type => single,
desc => ?DESC("single_mongo_type")}} default => single,
, {server, fun server/1} required => true,
, {w_mode, fun w_mode/1} desc => ?DESC("single_mongo_type")
}},
{server, fun server/1},
{w_mode, fun w_mode/1}
] ++ mongo_fields(); ] ++ mongo_fields();
fields(rs) -> fields(rs) ->
[ {mongo_type, #{type => rs, [
default => rs, {mongo_type, #{
required => true, type => rs,
desc => ?DESC("rs_mongo_type")}} default => rs,
, {servers, fun servers/1} required => true,
, {w_mode, fun w_mode/1} desc => ?DESC("rs_mongo_type")
, {r_mode, fun r_mode/1} }},
, {replica_set_name, fun replica_set_name/1} {servers, fun servers/1},
{w_mode, fun w_mode/1},
{r_mode, fun r_mode/1},
{replica_set_name, fun replica_set_name/1}
] ++ mongo_fields(); ] ++ mongo_fields();
fields(sharded) -> fields(sharded) ->
[ {mongo_type, #{type => sharded, [
default => sharded, {mongo_type, #{
required => true, type => sharded,
desc => ?DESC("sharded_mongo_type")}} default => sharded,
, {servers, fun servers/1} required => true,
, {w_mode, fun w_mode/1} desc => ?DESC("sharded_mongo_type")
}},
{servers, fun servers/1},
{w_mode, fun w_mode/1}
] ++ mongo_fields(); ] ++ mongo_fields();
fields(topology) -> fields(topology) ->
[ {pool_size, fun emqx_connector_schema_lib:pool_size/1} [
, {max_overflow, fun max_overflow/1} {pool_size, fun emqx_connector_schema_lib:pool_size/1},
, {overflow_ttl, fun duration/1} {max_overflow, fun max_overflow/1},
, {overflow_check_period, fun duration/1} {overflow_ttl, fun duration/1},
, {local_threshold_ms, fun duration/1} {overflow_check_period, fun duration/1},
, {connect_timeout_ms, fun duration/1} {local_threshold_ms, fun duration/1},
, {socket_timeout_ms, fun duration/1} {connect_timeout_ms, fun duration/1},
, {server_selection_timeout_ms, fun duration/1} {socket_timeout_ms, fun duration/1},
, {wait_queue_timeout_ms, fun duration/1} {server_selection_timeout_ms, fun duration/1},
, {heartbeat_frequency_ms, fun duration/1} {wait_queue_timeout_ms, fun duration/1},
, {min_heartbeat_frequency_ms, fun duration/1} {heartbeat_frequency_ms, fun duration/1},
{min_heartbeat_frequency_ms, fun duration/1}
]. ].
desc(single) -> desc(single) ->
@ -105,53 +122,68 @@ desc(_) ->
undefined. undefined.
mongo_fields() -> mongo_fields() ->
[ {srv_record, fun srv_record/1} [
, {pool_size, fun emqx_connector_schema_lib:pool_size/1} {srv_record, fun srv_record/1},
, {username, fun emqx_connector_schema_lib:username/1} {pool_size, fun emqx_connector_schema_lib:pool_size/1},
, {password, fun emqx_connector_schema_lib:password/1} {username, fun emqx_connector_schema_lib:username/1},
, {auth_source, #{ type => binary() {password, fun emqx_connector_schema_lib:password/1},
, required => false {auth_source, #{
, desc => ?DESC("auth_source") type => binary(),
}} required => false,
, {database, fun emqx_connector_schema_lib:database/1} desc => ?DESC("auth_source")
, {topology, #{type => hoconsc:ref(?MODULE, topology), required => false}} }},
{database, fun emqx_connector_schema_lib:database/1},
{topology, #{type => hoconsc:ref(?MODULE, topology), required => false}}
] ++ ] ++
emqx_connector_schema_lib:ssl_fields(). emqx_connector_schema_lib:ssl_fields().
%% =================================================================== %% ===================================================================
on_start(InstId, Config = #{mongo_type := Type, on_start(
pool_size := PoolSize, InstId,
ssl := SSL}) -> Config = #{
Msg = case Type of mongo_type := Type,
single -> "starting_mongodb_single_connector"; pool_size := PoolSize,
rs -> "starting_mongodb_replica_set_connector"; ssl := SSL
sharded -> "starting_mongodb_sharded_connector" }
end, ) ->
Msg =
case Type of
single -> "starting_mongodb_single_connector";
rs -> "starting_mongodb_replica_set_connector";
sharded -> "starting_mongodb_sharded_connector"
end,
?SLOG(info, #{msg => Msg, connector => InstId, config => Config}), ?SLOG(info, #{msg => Msg, connector => InstId, config => Config}),
NConfig = #{hosts := Hosts} = may_parse_srv_and_txt_records(Config), NConfig = #{hosts := Hosts} = may_parse_srv_and_txt_records(Config),
SslOpts = case maps:get(enable, SSL) of SslOpts =
true -> case maps:get(enable, SSL) of
[{ssl, true}, true ->
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)} [
]; {ssl, true},
false -> [{ssl, false}] {ssl_opts, emqx_tls_lib:to_client_opts(SSL)}
end, ];
false ->
[{ssl, false}]
end,
Topology = maps:get(topology, NConfig, #{}), Topology = maps:get(topology, NConfig, #{}),
Opts = [{mongo_type, init_type(NConfig)}, Opts = [
{hosts, Hosts}, {mongo_type, init_type(NConfig)},
{pool_size, PoolSize}, {hosts, Hosts},
{options, init_topology_options(maps:to_list(Topology), [])}, {pool_size, PoolSize},
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}], {options, init_topology_options(maps:to_list(Topology), [])},
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolName = emqx_plugin_libs_pool:pool_name(InstId),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of
ok -> {ok, #{poolname => PoolName, type => Type}}; ok -> {ok, #{poolname => PoolName, type => Type}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end. end.
on_stop(InstId, #{poolname := PoolName}) -> on_stop(InstId, #{poolname := PoolName}) ->
?SLOG(info, #{msg => "stopping_mongodb_connector", ?SLOG(info, #{
connector => InstId}), msg => "stopping_mongodb_connector",
connector => InstId
}),
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, on_query(InstId,
@ -164,10 +196,32 @@ on_query(InstId,
case ecpool:pick_and_do(PoolName, case ecpool:pick_and_do(PoolName,
{?MODULE, mongo_query, [Action, Collection, Filter, Projector]}, {?MODULE, mongo_query, [Action, Collection, Filter, Projector]},
no_handover) of no_handover) of
on_query(
InstId,
{Action, Collection, Filter, Projector},
AfterQuery,
#{poolname := PoolName} = State
) ->
Request = {Action, Collection, Selector, Projector},
?TRACE(
"QUERY",
"mongodb_connector_received",
#{request => Request, connector => InstId, state => State}
),
case
ecpool:pick_and_do(
PoolName,
{?MODULE, mongo_query, [Action, Collection, Selector, Projector]},
no_handover
)
of
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "mongodb_connector_do_query_failed", ?SLOG(error, #{
request => Request, reason => Reason, msg => "mongodb_connector_do_query_failed",
connector => InstId}), request => Request,
reason => Reason,
connector => InstId
}),
emqx_resource:query_failed(AfterQuery), emqx_resource:query_failed(AfterQuery),
{error, Reason}; {error, Reason};
{ok, Cursor} when is_pid(Cursor) -> {ok, Cursor} when is_pid(Cursor) ->
@ -182,12 +236,16 @@ on_query(InstId,
on_health_check(InstId, #{poolname := PoolName} = State) -> on_health_check(InstId, #{poolname := PoolName} = State) ->
case health_check(PoolName) of case health_check(PoolName) of
true -> true ->
?tp(debug, emqx_connector_mongo_health_check, #{instance_id => InstId, ?tp(debug, emqx_connector_mongo_health_check, #{
status => ok}), instance_id => InstId,
status => ok
}),
{ok, State}; {ok, State};
false -> false ->
?tp(warning, emqx_connector_mongo_health_check, #{instance_id => InstId, ?tp(warning, emqx_connector_mongo_health_check, #{
status => failed}), instance_id => InstId,
status => failed
}),
{error, health_check_failed, State} {error, health_check_failed, State}
end. end.
@ -204,36 +262,43 @@ check_worker_health(Worker) ->
%% we don't care if this returns something or not, we just to test the connection %% we don't care if this returns something or not, we just to test the connection
try do_test_query(Conn) of try do_test_query(Conn) of
{error, Reason} -> {error, Reason} ->
?SLOG(warning, #{msg => "mongo_connection_health_check_error", ?SLOG(warning, #{
worker => Worker, msg => "mongo_connection_health_check_error",
reason => Reason}), worker => Worker,
reason => Reason
}),
false; false;
_ -> _ ->
true true
catch catch
Class:Error -> Class:Error ->
?SLOG(warning, #{msg => "mongo_connection_health_check_exception", ?SLOG(warning, #{
worker => Worker, msg => "mongo_connection_health_check_exception",
class => Class, worker => Worker,
error => Error}), class => Class,
error => Error
}),
false false
end; end;
_ -> _ ->
?SLOG(warning, #{msg => "mongo_connection_health_check_error", ?SLOG(warning, #{
worker => Worker, msg => "mongo_connection_health_check_error",
reason => worker_not_found}), worker => Worker,
reason => worker_not_found
}),
false false
end. end.
do_test_query(Conn) -> do_test_query(Conn) ->
mongoc:transaction_query( mongoc:transaction_query(
Conn, Conn,
fun(Conf = #{pool := Worker}) -> fun(Conf = #{pool := Worker}) ->
Query = mongoc:find_one_query(Conf, <<"foo">>, #{}, #{}, 0), Query = mongoc:find_one_query(Conf, <<"foo">>, #{}, #{}, 0),
mc_worker_api:find_one(Worker, Query) mc_worker_api:find_one(Worker, Query)
end, end,
#{}, #{},
?HEALTH_CHECK_TIMEOUT). ?HEALTH_CHECK_TIMEOUT
).
connect(Opts) -> connect(Opts) ->
Type = proplists:get_value(mongo_type, Opts, single), Type = proplists:get_value(mongo_type, Opts, single),
@ -244,10 +309,8 @@ connect(Opts) ->
mongo_query(Conn, find, Collection, Filter, Projector) -> mongo_query(Conn, find, Collection, Filter, Projector) ->
mongo_api:find(Conn, Collection, Filter, Projector); mongo_api:find(Conn, Collection, Filter, Projector);
mongo_query(Conn, find_one, Collection, Filter, Projector) -> mongo_query(Conn, find_one, Collection, Filter, Projector) ->
mongo_api:find_one(Conn, Collection, Filter, Projector); mongo_api:find_one(Conn, Collection, Filter, Projector);
%% Todo xxx %% Todo xxx
mongo_query(_Conn, _Action, _Collection, _Filter, _Projector) -> mongo_query(_Conn, _Action, _Collection, _Filter, _Projector) ->
ok. ok.
@ -298,7 +361,8 @@ init_worker_options([{r_mode, V} | R], Acc) ->
init_worker_options(R, [{r_mode, V} | Acc]); init_worker_options(R, [{r_mode, V} | Acc]);
init_worker_options([_ | R], Acc) -> init_worker_options([_ | R], Acc) ->
init_worker_options(R, Acc); init_worker_options(R, Acc);
init_worker_options([], Acc) -> Acc. init_worker_options([], Acc) ->
Acc.
%% =================================================================== %% ===================================================================
%% Schema funcs %% Schema funcs
@ -356,59 +420,76 @@ may_parse_srv_and_txt_records(#{server := Server} = Config) ->
may_parse_srv_and_txt_records(Config) -> may_parse_srv_and_txt_records(Config) ->
may_parse_srv_and_txt_records_(Config). may_parse_srv_and_txt_records_(Config).
may_parse_srv_and_txt_records_(#{mongo_type := Type, may_parse_srv_and_txt_records_(
srv_record := false, #{
servers := Servers} = Config) -> mongo_type := Type,
srv_record := false,
servers := Servers
} = Config
) ->
case Type =:= rs andalso maps:is_key(replica_set_name, Config) =:= false of case Type =:= rs andalso maps:is_key(replica_set_name, Config) =:= false of
true -> true ->
error({missing_parameter, replica_set_name}); error({missing_parameter, replica_set_name});
false -> false ->
Config#{hosts => servers_to_bin(Servers)} Config#{hosts => servers_to_bin(Servers)}
end; end;
may_parse_srv_and_txt_records_(#{mongo_type := Type, may_parse_srv_and_txt_records_(
srv_record := true, #{
servers := Servers} = Config) -> mongo_type := Type,
srv_record := true,
servers := Servers
} = Config
) ->
Hosts = parse_srv_records(Type, Servers), Hosts = parse_srv_records(Type, Servers),
ExtraOpts = parse_txt_records(Type, Servers), ExtraOpts = parse_txt_records(Type, Servers),
maps:merge(Config#{hosts => Hosts}, ExtraOpts). maps:merge(Config#{hosts => Hosts}, ExtraOpts).
parse_srv_records(Type, Servers) -> parse_srv_records(Type, Servers) ->
Fun = fun(AccIn, {IpOrHost, _Port}) -> Fun = fun(AccIn, {IpOrHost, _Port}) ->
case inet_res:lookup("_mongodb._tcp." case
++ ip_or_host_to_string(IpOrHost), in, srv) of inet_res:lookup(
[] -> "_mongodb._tcp." ++
error(service_not_found); ip_or_host_to_string(IpOrHost),
Services -> in,
[ [server_to_bin({Host, Port}) || {_, _, Port, Host} <- Services] srv
| AccIn] )
end of
end, [] ->
error(service_not_found);
Services ->
[
[server_to_bin({Host, Port}) || {_, _, Port, Host} <- Services]
| AccIn
]
end
end,
Res = lists:foldl(Fun, [], Servers), Res = lists:foldl(Fun, [], Servers),
case Type of case Type of
single -> lists:nth(1, Res); single -> lists:nth(1, Res);
_ -> Res _ -> Res
end. end.
parse_txt_records(Type, Servers) -> parse_txt_records(Type, Servers) ->
Fields = case Type of Fields =
rs -> ["authSource", "replicaSet"]; case Type of
_ -> ["authSource"] rs -> ["authSource", "replicaSet"];
end, _ -> ["authSource"]
end,
Fun = fun(AccIn, {IpOrHost, _Port}) -> Fun = fun(AccIn, {IpOrHost, _Port}) ->
case inet_res:lookup(IpOrHost, in, txt) of case inet_res:lookup(IpOrHost, in, txt) of
[] -> [] ->
#{}; #{};
[[QueryString]] -> [[QueryString]] ->
case uri_string:dissect_query(QueryString) of case uri_string:dissect_query(QueryString) of
{error, _, _} -> {error, _, _} ->
error({invalid_txt_record, invalid_query_string}); error({invalid_txt_record, invalid_query_string});
Options -> Options ->
maps:merge(AccIn, take_and_convert(Fields, Options)) maps:merge(AccIn, take_and_convert(Fields, Options))
end; end;
_ -> _ ->
error({invalid_txt_record, multiple_records}) error({invalid_txt_record, multiple_records})
end end
end, end,
lists:foldl(Fun, #{}, Servers). lists:foldl(Fun, #{}, Servers).
take_and_convert(Fields, Options) -> take_and_convert(Fields, Options) ->
@ -430,8 +511,8 @@ take_and_convert([Field | More], Options, Acc) ->
take_and_convert(More, Options, Acc) take_and_convert(More, Options, Acc)
end. end.
-spec ip_or_host_to_string(binary() | string() | tuple()) -spec ip_or_host_to_string(binary() | string() | tuple()) ->
-> string(). string().
ip_or_host_to_string(Ip) when is_tuple(Ip) -> ip_or_host_to_string(Ip) when is_tuple(Ip) ->
inet:ntoa(Ip); inet:ntoa(Ip);
ip_or_host_to_string(Host) -> ip_or_host_to_string(Host) ->
@ -448,18 +529,20 @@ server_to_bin({IpOrHost, Port}) ->
%% =================================================================== %% ===================================================================
%% typereflt funcs %% typereflt funcs
-spec to_server_raw(string()) -spec to_server_raw(string()) ->
-> {string(), pos_integer()}. {string(), pos_integer()}.
to_server_raw(Server) -> to_server_raw(Server) ->
emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS). emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS).
-spec to_servers_raw(string()) -spec to_servers_raw(string()) ->
-> [{string(), pos_integer()}]. [{string(), pos_integer()}].
to_servers_raw(Servers) -> to_servers_raw(Servers) ->
lists:map( fun(Server) -> lists:map(
emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS) fun(Server) ->
end emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS)
, string:tokens(str(Servers), ", ")). end,
string:tokens(str(Servers), ", ")
).
str(A) when is_atom(A) -> str(A) when is_atom(A) ->
atom_to_list(A); atom_to_list(A);

View File

@ -23,28 +23,32 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% API and callbacks for supervisor %% API and callbacks for supervisor
-export([ start_link/0 -export([
, init/1 start_link/0,
, create_bridge/1 init/1,
, drop_bridge/1 create_bridge/1,
, bridges/0 drop_bridge/1,
]). bridges/0
]).
-export([on_message_received/3]). -export([on_message_received/3]).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
-behaviour(hocon_schema). -behaviour(hocon_schema).
-import(hoconsc, [mk/2]). -import(hoconsc, [mk/2]).
-export([ roots/0 -export([
, fields/1]). roots/0,
fields/1
]).
%%===================================================================== %%=====================================================================
%% Hocon schema %% Hocon schema
@ -53,25 +57,34 @@ roots() ->
fields("config") -> fields("config") ->
emqx_connector_mqtt_schema:fields("config"); emqx_connector_mqtt_schema:fields("config");
fields("get") -> fields("get") ->
[ {num_of_bridges, mk(integer(), [
#{ desc => ?DESC("num_of_bridges") {num_of_bridges,
})} mk(
integer(),
#{desc => ?DESC("num_of_bridges")}
)}
] ++ fields("post"); ] ++ fields("post");
fields("put") -> fields("put") ->
emqx_connector_mqtt_schema:fields("connector"); emqx_connector_mqtt_schema:fields("connector");
fields("post") -> fields("post") ->
[ {type, mk(mqtt, [
#{ required => true {type,
, desc => ?DESC("type") mk(
})} mqtt,
, {name, mk(binary(), #{
#{ required => true required => true,
, desc => ?DESC("name") desc => ?DESC("type")
})} }
)},
{name,
mk(
binary(),
#{
required => true,
desc => ?DESC("name")
}
)}
] ++ fields("put"). ] ++ fields("put").
%% =================================================================== %% ===================================================================
@ -80,23 +93,29 @@ start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []). supervisor:start_link({local, ?MODULE}, ?MODULE, []).
init([]) -> init([]) ->
SupFlag = #{strategy => one_for_one, SupFlag = #{
intensity => 100, strategy => one_for_one,
period => 10}, intensity => 100,
period => 10
},
{ok, {SupFlag, []}}. {ok, {SupFlag, []}}.
bridge_spec(Config) -> bridge_spec(Config) ->
#{id => maps:get(name, Config), #{
start => {emqx_connector_mqtt_worker, start_link, [Config]}, id => maps:get(name, Config),
restart => permanent, start => {emqx_connector_mqtt_worker, start_link, [Config]},
shutdown => 5000, restart => permanent,
type => worker, shutdown => 5000,
modules => [emqx_connector_mqtt_worker]}. type => worker,
modules => [emqx_connector_mqtt_worker]
}.
-spec(bridges() -> [{node(), map()}]). -spec bridges() -> [{node(), map()}].
bridges() -> bridges() ->
[{Name, emqx_connector_mqtt_worker:status(Name)} [
|| {Name, _Pid, _, _} <- supervisor:which_children(?MODULE)]. {Name, emqx_connector_mqtt_worker:status(Name)}
|| {Name, _Pid, _, _} <- supervisor:which_children(?MODULE)
].
create_bridge(Config) -> create_bridge(Config) ->
supervisor:start_child(?MODULE, bridge_spec(Config)). supervisor:start_child(?MODULE, bridge_spec(Config)).
@ -121,8 +140,11 @@ on_message_received(Msg, HookPoint, InstId) ->
%% =================================================================== %% ===================================================================
on_start(InstId, Conf) -> on_start(InstId, Conf) ->
InstanceId = binary_to_atom(InstId, utf8), InstanceId = binary_to_atom(InstId, utf8),
?SLOG(info, #{msg => "starting_mqtt_connector", ?SLOG(info, #{
connector => InstanceId, config => Conf}), msg => "starting_mqtt_connector",
connector => InstanceId,
config => Conf
}),
BasicConf = basic_config(Conf), BasicConf = basic_config(Conf),
BridgeConf = BasicConf#{ BridgeConf = BasicConf#{
name => InstanceId, name => InstanceId,
@ -142,19 +164,25 @@ on_start(InstId, Conf) ->
end. end.
on_stop(_InstId, #{name := InstanceId}) -> on_stop(_InstId, #{name := InstanceId}) ->
?SLOG(info, #{msg => "stopping_mqtt_connector", ?SLOG(info, #{
connector => InstanceId}), msg => "stopping_mqtt_connector",
connector => InstanceId
}),
case ?MODULE:drop_bridge(InstanceId) of case ?MODULE:drop_bridge(InstanceId) of
ok -> ok; ok ->
{error, not_found} -> ok; ok;
{error, not_found} ->
ok;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "stop_mqtt_connector", ?SLOG(error, #{
connector => InstanceId, reason => Reason}) msg => "stop_mqtt_connector",
connector => InstanceId,
reason => Reason
})
end. end.
on_query(_InstId, {message_received, _Msg}, AfterQuery, _State) -> on_query(_InstId, {message_received, _Msg}, AfterQuery, _State) ->
emqx_resource:query_success(AfterQuery); emqx_resource:query_success(AfterQuery);
on_query(_InstId, {send_message, Msg}, AfterQuery, #{name := InstanceId}) -> on_query(_InstId, {send_message, Msg}, AfterQuery, #{name := InstanceId}) ->
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), ?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg), emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg),
@ -178,7 +206,8 @@ make_sub_confs(undefined, _) ->
undefined; undefined;
make_sub_confs(SubRemoteConf, InstId) -> make_sub_confs(SubRemoteConf, InstId) ->
case maps:take(hookpoint, SubRemoteConf) of case maps:take(hookpoint, SubRemoteConf) of
error -> SubRemoteConf; error ->
SubRemoteConf;
{HookPoint, SubConf} -> {HookPoint, SubConf} ->
MFA = {?MODULE, on_message_received, [HookPoint, InstId]}, MFA = {?MODULE, on_message_received, [HookPoint, InstId]},
SubConf#{on_message_received => MFA} SubConf#{on_message_received => MFA}
@ -192,22 +221,24 @@ make_forward_confs(FrowardConf) ->
FrowardConf. FrowardConf.
basic_config(#{ basic_config(#{
server := Server, server := Server,
reconnect_interval := ReconnIntv, reconnect_interval := ReconnIntv,
proto_ver := ProtoVer, proto_ver := ProtoVer,
username := User, username := User,
password := Password, password := Password,
clean_start := CleanStart, clean_start := CleanStart,
keepalive := KeepAlive, keepalive := KeepAlive,
retry_interval := RetryIntv, retry_interval := RetryIntv,
max_inflight := MaxInflight, max_inflight := MaxInflight,
replayq := ReplayQ, replayq := ReplayQ,
ssl := #{enable := EnableSsl} = Ssl}) -> ssl := #{enable := EnableSsl} = Ssl
}) ->
#{ #{
replayq => ReplayQ, replayq => ReplayQ,
%% connection opts %% connection opts
server => Server, server => Server,
connect_timeout => 30, %% 30s %% 30s
connect_timeout => 30,
reconnect_interval => ReconnIntv, reconnect_interval => ReconnIntv,
proto_ver => ProtoVer, proto_ver => ProtoVer,
bridge_mode => true, bridge_mode => true,

View File

@ -23,11 +23,12 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
%% ecpool connect & reconnect %% ecpool connect & reconnect
-export([connect/1, prepare_sql_to_conn/2]). -export([connect/1, prepare_sql_to_conn/2]).
@ -38,9 +39,10 @@
-export([do_health_check/1]). -export([do_health_check/1]).
-define( MYSQL_HOST_OPTIONS -define(MYSQL_HOST_OPTIONS, #{
, #{ host_type => inet_addr host_type => inet_addr,
, default_port => ?MYSQL_DEFAULT_PORT}). default_port => ?MYSQL_DEFAULT_PORT
}).
%%===================================================================== %%=====================================================================
%% Hocon schema %% Hocon schema
@ -48,11 +50,10 @@ roots() ->
[{config, #{type => hoconsc:ref(?MODULE, config)}}]. [{config, #{type => hoconsc:ref(?MODULE, config)}}].
fields(config) -> fields(config) ->
[ {server, fun server/1} [{server, fun server/1}] ++
] ++ emqx_connector_schema_lib:relational_db_fields() ++
emqx_connector_schema_lib:relational_db_fields() ++ emqx_connector_schema_lib:ssl_fields() ++
emqx_connector_schema_lib:ssl_fields() ++ emqx_connector_schema_lib:prepare_statement_fields().
emqx_connector_schema_lib:prepare_statement_fields().
server(type) -> emqx_schema:ip_port(); server(type) -> emqx_schema:ip_port();
server(required) -> true; server(required) -> true;
@ -62,47 +63,64 @@ server(desc) -> ?DESC("server");
server(_) -> undefined. server(_) -> undefined.
%% =================================================================== %% ===================================================================
on_start(InstId, #{server := {Host, Port}, on_start(
database := DB, InstId,
username := User, #{
password := Password, server := {Host, Port},
auto_reconnect := AutoReconn, database := DB,
pool_size := PoolSize, username := User,
ssl := SSL } = Config) -> password := Password,
?SLOG(info, #{msg => "starting_mysql_connector", auto_reconnect := AutoReconn,
connector => InstId, config => Config}), pool_size := PoolSize,
SslOpts = case maps:get(enable, SSL) of ssl := SSL
true -> } = Config
[{ssl, emqx_tls_lib:to_client_opts(SSL)}]; ) ->
false -> ?SLOG(info, #{
[] msg => "starting_mysql_connector",
end, connector => InstId,
Options = [{host, Host}, config => Config
{port, Port}, }),
{user, User}, SslOpts =
{password, Password}, case maps:get(enable, SSL) of
{database, DB}, true ->
{auto_reconnect, reconn_interval(AutoReconn)}, [{ssl, emqx_tls_lib:to_client_opts(SSL)}];
{pool_size, PoolSize}], false ->
[]
end,
Options = [
{host, Host},
{port, Port},
{user, User},
{password, Password},
{database, DB},
{auto_reconnect, reconn_interval(AutoReconn)},
{pool_size, PoolSize}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolName = emqx_plugin_libs_pool:pool_name(InstId),
Prepares = maps:get(prepare_statement, Config, #{}), Prepares = maps:get(prepare_statement, Config, #{}),
State = init_prepare(#{poolname => PoolName, prepare_statement => Prepares}), State = init_prepare(#{poolname => PoolName, prepare_statement => Prepares}),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
ok -> {ok, State}; ok -> {ok, State};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end. end.
on_stop(InstId, #{poolname := PoolName}) -> on_stop(InstId, #{poolname := PoolName}) ->
?SLOG(info, #{msg => "stopping_mysql_connector", ?SLOG(info, #{
connector => InstId}), msg => "stopping_mysql_connector",
connector => InstId
}),
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, {Type, SQLOrKey}, AfterQuery, State) -> on_query(InstId, {Type, SQLOrKey}, AfterQuery, State) ->
on_query(InstId, {Type, SQLOrKey, [], default_timeout}, AfterQuery, State); on_query(InstId, {Type, SQLOrKey, [], default_timeout}, AfterQuery, State);
on_query(InstId, {Type, SQLOrKey, Params}, AfterQuery, State) -> on_query(InstId, {Type, SQLOrKey, Params}, AfterQuery, State) ->
on_query(InstId, {Type, SQLOrKey, Params, default_timeout}, AfterQuery, State); on_query(InstId, {Type, SQLOrKey, Params, default_timeout}, AfterQuery, State);
on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, on_query(
#{poolname := PoolName, prepare_statement := Prepares} = State) -> InstId,
{Type, SQLOrKey, Params, Timeout},
AfterQuery,
#{poolname := PoolName, prepare_statement := Prepares} = State
) ->
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
?TRACE("QUERY", "mysql_connector_received", LogMeta), ?TRACE("QUERY", "mysql_connector_received", LogMeta),
Worker = ecpool:get_client(PoolName), Worker = ecpool:get_client(PoolName),
@ -111,28 +129,36 @@ on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery,
Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey, Params, Timeout]), Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey, Params, Timeout]),
case Result of case Result of
{error, disconnected} -> {error, disconnected} ->
?SLOG(error, ?SLOG(
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected}), error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected}
),
%% kill the poll worker to trigger reconnection %% kill the poll worker to trigger reconnection
_ = exit(Conn, restart), _ = exit(Conn, restart),
emqx_resource:query_failed(AfterQuery), emqx_resource:query_failed(AfterQuery),
Result; Result;
{error, not_prepared} -> {error, not_prepared} ->
?SLOG(warning, ?SLOG(
LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared}), warning,
LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared}
),
case prepare_sql(Prepares, PoolName) of case prepare_sql(Prepares, PoolName) of
ok -> ok ->
%% not return result, next loop will try again %% not return result, next loop will try again
on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, State); on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, State);
{error, Reason} -> {error, Reason} ->
?SLOG(error, ?SLOG(
LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason}), error,
LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason}
),
emqx_resource:query_failed(AfterQuery), emqx_resource:query_failed(AfterQuery),
{error, Reason} {error, Reason}
end; end;
{error, Reason} -> {error, Reason} ->
?SLOG(error, ?SLOG(
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}), error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}
),
emqx_resource:query_failed(AfterQuery), emqx_resource:query_failed(AfterQuery),
Result; Result;
_ -> _ ->
@ -147,7 +173,7 @@ on_health_check(_InstId, #{poolname := PoolName} = State) ->
case emqx_plugin_libs_pool:health_check(PoolName, fun ?MODULE:do_health_check/1, State) of case emqx_plugin_libs_pool:health_check(PoolName, fun ?MODULE:do_health_check/1, State) of
{ok, State} -> {ok, State} ->
case do_health_check_prepares(State) of case do_health_check_prepares(State) of
ok-> ok ->
{ok, State}; {ok, State};
{ok, NState} -> {ok, NState} ->
{ok, NState}; {ok, NState};
@ -161,7 +187,7 @@ on_health_check(_InstId, #{poolname := PoolName} = State) ->
do_health_check(Conn) -> do_health_check(Conn) ->
ok == element(1, mysql:query(Conn, <<"SELECT count(1) AS T">>)). ok == element(1, mysql:query(Conn, <<"SELECT count(1) AS T">>)).
do_health_check_prepares(#{prepare_statement := Prepares})when is_map(Prepares) -> do_health_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) ->
ok; ok;
do_health_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) -> do_health_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) ->
%% retry to prepare %% retry to prepare
@ -180,8 +206,8 @@ reconn_interval(false) -> false.
connect(Options) -> connect(Options) ->
mysql:start_link(Options). mysql:start_link(Options).
-spec to_server(string()) -spec to_server(string()) ->
-> {inet:ip_address() | inet:hostname(), pos_integer()}. {inet:ip_address() | inet:hostname(), pos_integer()}.
to_server(Str) -> to_server(Str) ->
emqx_connector_schema_lib:parse_server(Str, ?MYSQL_HOST_OPTIONS). emqx_connector_schema_lib:parse_server(Str, ?MYSQL_HOST_OPTIONS).
@ -215,20 +241,27 @@ prepare_sql(Prepares, PoolName) ->
do_prepare_sql(Prepares, PoolName) -> do_prepare_sql(Prepares, PoolName) ->
Conns = Conns =
[begin [
{ok, Conn} = ecpool_worker:client(Worker), begin
Conn {ok, Conn} = ecpool_worker:client(Worker),
end || {_Name, Worker} <- ecpool:workers(PoolName)], Conn
end
|| {_Name, Worker} <- ecpool:workers(PoolName)
],
prepare_sql_to_conn_list(Conns, Prepares). prepare_sql_to_conn_list(Conns, Prepares).
prepare_sql_to_conn_list([], _PrepareList) -> ok; prepare_sql_to_conn_list([], _PrepareList) ->
ok;
prepare_sql_to_conn_list([Conn | ConnList], PrepareList) -> prepare_sql_to_conn_list([Conn | ConnList], PrepareList) ->
case prepare_sql_to_conn(Conn, PrepareList) of case prepare_sql_to_conn(Conn, PrepareList) of
ok -> ok ->
prepare_sql_to_conn_list(ConnList, PrepareList); prepare_sql_to_conn_list(ConnList, PrepareList);
{error, R} -> {error, R} ->
%% rollback %% rollback
Fun = fun({Key, _}) -> _ = unprepare_sql_to_conn(Conn, Key), ok end, Fun = fun({Key, _}) ->
_ = unprepare_sql_to_conn(Conn, Key),
ok
end,
lists:foreach(Fun, PrepareList), lists:foreach(Fun, PrepareList),
{error, R} {error, R}
end. end.

View File

@ -26,24 +26,26 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
-export([connect/1]). -export([connect/1]).
-export([ query/3 -export([
, prepared_query/3 query/3,
]). prepared_query/3
]).
-export([do_health_check/1]). -export([do_health_check/1]).
-define( PGSQL_HOST_OPTIONS -define(PGSQL_HOST_OPTIONS, #{
, #{ host_type => inet_addr host_type => inet_addr,
, default_port => ?PGSQL_DEFAULT_PORT}). default_port => ?PGSQL_DEFAULT_PORT
}).
%%===================================================================== %%=====================================================================
@ -52,9 +54,9 @@ roots() ->
fields(config) -> fields(config) ->
[{server, fun server/1}] ++ [{server, fun server/1}] ++
emqx_connector_schema_lib:relational_db_fields() ++ emqx_connector_schema_lib:relational_db_fields() ++
emqx_connector_schema_lib:ssl_fields() ++ emqx_connector_schema_lib:ssl_fields() ++
emqx_connector_schema_lib:prepare_statement_fields(). emqx_connector_schema_lib:prepare_statement_fields().
server(type) -> emqx_schema:ip_port(); server(type) -> emqx_schema:ip_port();
server(required) -> true; server(required) -> true;
@ -64,52 +66,73 @@ server(desc) -> ?DESC("server");
server(_) -> undefined. server(_) -> undefined.
%% =================================================================== %% ===================================================================
on_start(InstId, #{server := {Host, Port}, on_start(
database := DB, InstId,
username := User, #{
password := Password, server := {Host, Port},
auto_reconnect := AutoReconn, database := DB,
pool_size := PoolSize, username := User,
ssl := SSL} = Config) -> password := Password,
?SLOG(info, #{msg => "starting_postgresql_connector", auto_reconnect := AutoReconn,
connector => InstId, config => Config}), pool_size := PoolSize,
SslOpts = case maps:get(enable, SSL) of ssl := SSL
true -> } = Config
[{ssl, true}, ) ->
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)}]; ?SLOG(info, #{
false -> msg => "starting_postgresql_connector",
[{ssl, false}] connector => InstId,
end, config => Config
Options = [{host, Host}, }),
{port, Port}, SslOpts =
{username, User}, case maps:get(enable, SSL) of
{password, Password}, true ->
{database, DB}, [
{auto_reconnect, reconn_interval(AutoReconn)}, {ssl, true},
{pool_size, PoolSize}, {ssl_opts, emqx_tls_lib:to_client_opts(SSL)}
{prepare_statement, maps:to_list(maps:get(prepare_statement, Config, #{}))}], ];
false ->
[{ssl, false}]
end,
Options = [
{host, Host},
{port, Port},
{username, User},
{password, Password},
{database, DB},
{auto_reconnect, reconn_interval(AutoReconn)},
{pool_size, PoolSize},
{prepare_statement, maps:to_list(maps:get(prepare_statement, Config, #{}))}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolName = emqx_plugin_libs_pool:pool_name(InstId),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
ok -> {ok, #{poolname => PoolName}}; ok -> {ok, #{poolname => PoolName}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end. end.
on_stop(InstId, #{poolname := PoolName}) -> on_stop(InstId, #{poolname := PoolName}) ->
?SLOG(info, #{msg => "stopping postgresql connector", ?SLOG(info, #{
connector => InstId}), msg => "stopping postgresql connector",
connector => InstId
}),
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, {Type, NameOrSQL}, AfterQuery, #{poolname := _PoolName} = State) -> on_query(InstId, {Type, NameOrSQL}, AfterQuery, #{poolname := _PoolName} = State) ->
on_query(InstId, {Type, NameOrSQL, []}, AfterQuery, State); on_query(InstId, {Type, NameOrSQL, []}, AfterQuery, State);
on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} = State) -> on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} = State) ->
?SLOG(debug, #{msg => "postgresql connector received sql query", ?SLOG(debug, #{
connector => InstId, sql => NameOrSQL, state => State}), msg => "postgresql connector received sql query",
connector => InstId,
sql => NameOrSQL,
state => State
}),
case Result = ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Params]}, no_handover) of case Result = ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Params]}, no_handover) of
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "postgresql connector do sql query failed", msg => "postgresql connector do sql query failed",
connector => InstId, sql => NameOrSQL, reason => Reason}), connector => InstId,
sql => NameOrSQL,
reason => Reason
}),
emqx_resource:query_failed(AfterQuery); emqx_resource:query_failed(AfterQuery);
_ -> _ ->
emqx_resource:query_success(AfterQuery) emqx_resource:query_success(AfterQuery)
@ -127,7 +150,7 @@ reconn_interval(true) -> 15;
reconn_interval(false) -> false. reconn_interval(false) -> false.
connect(Opts) -> connect(Opts) ->
Host = proplists:get_value(host, Opts), Host = proplists:get_value(host, Opts),
Username = proplists:get_value(username, Opts), Username = proplists:get_value(username, Opts),
Password = proplists:get_value(password, Opts), Password = proplists:get_value(password, Opts),
PrepareStatement = proplists:get_value(prepare_statement, Opts), PrepareStatement = proplists:get_value(prepare_statement, Opts),
@ -177,7 +200,7 @@ conn_opts([_Opt | Opts], Acc) ->
%% =================================================================== %% ===================================================================
%% typereflt funcs %% typereflt funcs
-spec to_server(string()) -spec to_server(string()) ->
-> {inet:ip_address() | inet:hostname(), pos_integer()}. {inet:ip_address() | inet:hostname(), pos_integer()}.
to_server(Str) -> to_server(Str) ->
emqx_connector_schema_lib:parse_server(Str, ?PGSQL_HOST_OPTIONS). emqx_connector_schema_lib:parse_server(Str, ?PGSQL_HOST_OPTIONS).

View File

@ -25,11 +25,12 @@
-behaviour(emqx_resource). -behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource %% callbacks of behaviour emqx_resource
-export([ on_start/2 -export([
, on_stop/2 on_start/2,
, on_query/4 on_stop/2,
, on_health_check/2 on_query/4,
]). on_health_check/2
]).
-export([do_health_check/1]). -export([do_health_check/1]).
@ -38,50 +39,59 @@
-export([cmd/3]). -export([cmd/3]).
%% redis host don't need parse %% redis host don't need parse
-define( REDIS_HOST_OPTIONS -define(REDIS_HOST_OPTIONS, #{
, #{ host_type => hostname host_type => hostname,
, default_port => ?REDIS_DEFAULT_PORT}). default_port => ?REDIS_DEFAULT_PORT
}).
%%===================================================================== %%=====================================================================
roots() -> roots() ->
[ {config, #{type => hoconsc:union( [
[ hoconsc:ref(?MODULE, cluster) {config, #{
, hoconsc:ref(?MODULE, single) type => hoconsc:union(
, hoconsc:ref(?MODULE, sentinel) [
])} hoconsc:ref(?MODULE, cluster),
} hoconsc:ref(?MODULE, single),
hoconsc:ref(?MODULE, sentinel)
]
)
}}
]. ].
fields(single) -> fields(single) ->
[ {server, fun server/1} [
, {redis_type, #{type => hoconsc:enum([single]), {server, fun server/1},
required => true, {redis_type, #{
desc => ?DESC("single") type => hoconsc:enum([single]),
}} required => true,
desc => ?DESC("single")
}}
] ++ ] ++
redis_fields() ++ redis_fields() ++
emqx_connector_schema_lib:ssl_fields(); emqx_connector_schema_lib:ssl_fields();
fields(cluster) -> fields(cluster) ->
[ {servers, fun servers/1} [
, {redis_type, #{type => hoconsc:enum([cluster]), {servers, fun servers/1},
required => true, {redis_type, #{
desc => ?DESC("cluster") type => hoconsc:enum([cluster]),
}} required => true,
desc => ?DESC("cluster")
}}
] ++ ] ++
redis_fields() ++ redis_fields() ++
emqx_connector_schema_lib:ssl_fields(); emqx_connector_schema_lib:ssl_fields();
fields(sentinel) -> fields(sentinel) ->
[ {servers, fun servers/1} [
, {redis_type, #{type => hoconsc:enum([sentinel]), {servers, fun servers/1},
required => true, {redis_type, #{
desc => ?DESC("sentinel") type => hoconsc:enum([sentinel]),
}} required => true,
, {sentinel, #{type => string(), desc => ?DESC("sentinel_desc") desc => ?DESC("sentinel")
}} }},
{sentinel, #{type => string(), desc => ?DESC("sentinel_desc")}}
] ++ ] ++
redis_fields() ++ redis_fields() ++
emqx_connector_schema_lib:ssl_fields(). emqx_connector_schema_lib:ssl_fields().
server(type) -> emqx_schema:ip_port(); server(type) -> emqx_schema:ip_port();
server(required) -> true; server(required) -> true;
@ -98,62 +108,89 @@ servers(desc) -> ?DESC("servers");
servers(_) -> undefined. servers(_) -> undefined.
%% =================================================================== %% ===================================================================
on_start(InstId, #{redis_type := Type, on_start(
database := Database, InstId,
pool_size := PoolSize, #{
auto_reconnect := AutoReconn, redis_type := Type,
ssl := SSL } = Config) -> database := Database,
?SLOG(info, #{msg => "starting_redis_connector", pool_size := PoolSize,
connector => InstId, config => Config}), auto_reconnect := AutoReconn,
Servers = case Type of ssl := SSL
single -> [{servers, [maps:get(server, Config)]}]; } = Config
_ ->[{servers, maps:get(servers, Config)}] ) ->
end, ?SLOG(info, #{
Opts = [{pool_size, PoolSize}, msg => "starting_redis_connector",
connector => InstId,
config => Config
}),
Servers =
case Type of
single -> [{servers, [maps:get(server, Config)]}];
_ -> [{servers, maps:get(servers, Config)}]
end,
Opts =
[
{pool_size, PoolSize},
{database, Database}, {database, Database},
{password, maps:get(password, Config, "")}, {password, maps:get(password, Config, "")},
{auto_reconnect, reconn_interval(AutoReconn)} {auto_reconnect, reconn_interval(AutoReconn)}
] ++ Servers, ] ++ Servers,
Options = case maps:get(enable, SSL) of Options =
true -> case maps:get(enable, SSL) of
[{ssl, true}, true ->
{ssl_options, emqx_tls_lib:to_client_opts(SSL)}]; [
false -> [{ssl, false}] {ssl, true},
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}], {ssl_options, emqx_tls_lib:to_client_opts(SSL)}
];
false ->
[{ssl, false}]
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolName = emqx_plugin_libs_pool:pool_name(InstId),
case Type of case Type of
cluster -> cluster ->
case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of
{ok, _} -> {ok, #{poolname => PoolName, type => Type}}; {ok, _} -> {ok, #{poolname => PoolName, type => Type}};
{ok, _, _} -> {ok, #{poolname => PoolName, type => Type}}; {ok, _, _} -> {ok, #{poolname => PoolName, type => Type}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end; end;
_ -> _ ->
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}]) of case
ok -> {ok, #{poolname => PoolName, type => Type}}; emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}])
of
ok -> {ok, #{poolname => PoolName, type => Type}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end end
end. end.
on_stop(InstId, #{poolname := PoolName, type := Type}) -> on_stop(InstId, #{poolname := PoolName, type := Type}) ->
?SLOG(info, #{msg => "stopping_redis_connector", ?SLOG(info, #{
connector => InstId}), msg => "stopping_redis_connector",
connector => InstId
}),
case Type of case Type of
cluster -> eredis_cluster:stop_pool(PoolName); cluster -> eredis_cluster:stop_pool(PoolName);
_ -> emqx_plugin_libs_pool:stop_pool(PoolName) _ -> emqx_plugin_libs_pool:stop_pool(PoolName)
end. end.
on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := Type} = State) -> on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := Type} = State) ->
?TRACE("QUERY", "redis_connector_received", ?TRACE(
#{connector => InstId, sql => Command, state => State}), "QUERY",
Result = case Type of "redis_connector_received",
cluster -> eredis_cluster:q(PoolName, Command); #{connector => InstId, sql => Command, state => State}
_ -> ecpool:pick_and_do(PoolName, {?MODULE, cmd, [Type, Command]}, no_handover) ),
end, Result =
case Type of
cluster -> eredis_cluster:q(PoolName, Command);
_ -> ecpool:pick_and_do(PoolName, {?MODULE, cmd, [Type, Command]}, no_handover)
end,
case Result of case Result of
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "redis_connector_do_cmd_query_failed", ?SLOG(error, #{
connector => InstId, sql => Command, reason => Reason}), msg => "redis_connector_do_cmd_query_failed",
connector => InstId,
sql => Command,
reason => Reason
}),
emqx_resource:query_failed(AfterCommand); emqx_resource:query_failed(AfterCommand);
_ -> _ ->
emqx_resource:query_success(AfterCommand) emqx_resource:query_success(AfterCommand)
@ -161,14 +198,19 @@ on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := T
Result. Result.
extract_eredis_cluster_workers(PoolName) -> extract_eredis_cluster_workers(PoolName) ->
lists:flatten([gen_server:call(PoolPid, get_all_workers) || lists:flatten([
PoolPid <- eredis_cluster_monitor:get_all_pools(PoolName)]). gen_server:call(PoolPid, get_all_workers)
|| PoolPid <- eredis_cluster_monitor:get_all_pools(PoolName)
]).
eredis_cluster_workers_exist_and_are_connected(Workers) -> eredis_cluster_workers_exist_and_are_connected(Workers) ->
length(Workers) > 0 andalso lists:all( length(Workers) > 0 andalso
fun({_, Pid, _, _}) -> lists:all(
eredis_cluster_pool_worker:is_connected(Pid) =:= true fun({_, Pid, _, _}) ->
end, Workers). eredis_cluster_pool_worker:is_connected(Pid) =:= true
end,
Workers
).
on_health_check(_InstId, #{type := cluster, poolname := PoolName} = State) -> on_health_check(_InstId, #{type := cluster, poolname := PoolName} = State) ->
case eredis_cluster:pool_exists(PoolName) of case eredis_cluster:pool_exists(PoolName) of
@ -178,12 +220,9 @@ on_health_check(_InstId, #{type := cluster, poolname := PoolName} = State) ->
true -> {ok, State}; true -> {ok, State};
false -> {error, health_check_failed, State} false -> {error, health_check_failed, State}
end; end;
false -> false ->
{error, health_check_failed, State} {error, health_check_failed, State}
end; end;
on_health_check(_InstId, #{poolname := PoolName} = State) -> on_health_check(_InstId, #{poolname := PoolName} = State) ->
emqx_plugin_libs_pool:health_check(PoolName, fun ?MODULE:do_health_check/1, State). emqx_plugin_libs_pool:health_check(PoolName, fun ?MODULE:do_health_check/1, State).
@ -206,28 +245,32 @@ connect(Opts) ->
eredis:start_link(Opts). eredis:start_link(Opts).
redis_fields() -> redis_fields() ->
[ {pool_size, fun emqx_connector_schema_lib:pool_size/1} [
, {password, fun emqx_connector_schema_lib:password/1} {pool_size, fun emqx_connector_schema_lib:pool_size/1},
, {database, #{type => integer(), {password, fun emqx_connector_schema_lib:password/1},
default => 0, {database, #{
required => true, type => integer(),
desc => ?DESC("database") default => 0,
}} required => true,
, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} desc => ?DESC("database")
}},
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
]. ].
-spec to_server_raw(string()) -spec to_server_raw(string()) ->
-> {string(), pos_integer()}. {string(), pos_integer()}.
to_server_raw(Server) -> to_server_raw(Server) ->
emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS). emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS).
-spec to_servers_raw(string()) -spec to_servers_raw(string()) ->
-> [{string(), pos_integer()}]. [{string(), pos_integer()}].
to_servers_raw(Servers) -> to_servers_raw(Servers) ->
lists:map( fun(Server) -> lists:map(
emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS) fun(Server) ->
end emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS)
, string:tokens(str(Servers), ", ")). end,
string:tokens(str(Servers), ", ")
).
str(A) when is_atom(A) -> str(A) when is_atom(A) ->
atom_to_list(A); atom_to_list(A);

View File

@ -24,10 +24,11 @@
-export([namespace/0, roots/0, fields/1, desc/1]). -export([namespace/0, roots/0, fields/1, desc/1]).
-export([ get_response/0 -export([
, put_request/0 get_response/0,
, post_request/0 put_request/0,
]). post_request/0
]).
%% the config for http bridges do not need connectors %% the config for http bridges do not need connectors
-define(CONN_TYPES, [mqtt]). -define(CONN_TYPES, [mqtt]).
@ -55,18 +56,25 @@ namespace() -> connector.
roots() -> ["connectors"]. roots() -> ["connectors"].
fields(connectors) -> fields("connectors"); fields(connectors) ->
fields("connectors");
fields("connectors") -> fields("connectors") ->
[ {mqtt, [
mk(hoconsc:map(name, {mqtt,
hoconsc:union([ ref(emqx_connector_mqtt_schema, "connector") mk(
])), hoconsc:map(
#{ desc => ?DESC("mqtt") name,
})} hoconsc:union([ref(emqx_connector_mqtt_schema, "connector")])
),
#{desc => ?DESC("mqtt")}
)}
]. ].
desc(Record) when Record =:= connectors; desc(Record) when
Record =:= "connectors" -> ?DESC("desc_connector"); Record =:= connectors;
Record =:= "connectors"
->
?DESC("desc_connector");
desc(_) -> desc(_) ->
undefined. undefined.

View File

@ -19,32 +19,36 @@
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
-export([ relational_db_fields/0 -export([
, ssl_fields/0 relational_db_fields/0,
, prepare_statement_fields/0 ssl_fields/0,
]). prepare_statement_fields/0
]).
-export([ ip_port_to_string/1 -export([
, parse_server/2 ip_port_to_string/1,
]). parse_server/2
]).
-export([ pool_size/1 -export([
, database/1 pool_size/1,
, username/1 database/1,
, password/1 username/1,
, auto_reconnect/1 password/1,
]). auto_reconnect/1
]).
-type database() :: binary(). -type database() :: binary().
-type pool_size() :: pos_integer(). -type pool_size() :: pos_integer().
-type username() :: binary(). -type username() :: binary().
-type password() :: binary(). -type password() :: binary().
-reflect_type([ database/0 -reflect_type([
, pool_size/0 database/0,
, username/0 pool_size/0,
, password/0 username/0,
]). password/0
]).
-export([roots/0, fields/1]). -export([roots/0, fields/1]).
@ -53,24 +57,25 @@ roots() -> [].
fields(_) -> []. fields(_) -> [].
ssl_fields() -> ssl_fields() ->
[ {ssl, #{type => hoconsc:ref(emqx_schema, "ssl_client_opts"), [
default => #{<<"enable">> => false}, {ssl, #{
desc => ?DESC("ssl") type => hoconsc:ref(emqx_schema, "ssl_client_opts"),
} default => #{<<"enable">> => false},
} desc => ?DESC("ssl")
}}
]. ].
relational_db_fields() -> relational_db_fields() ->
[ {database, fun database/1} [
, {pool_size, fun pool_size/1} {database, fun database/1},
, {username, fun username/1} {pool_size, fun pool_size/1},
, {password, fun password/1} {username, fun username/1},
, {auto_reconnect, fun auto_reconnect/1} {password, fun password/1},
{auto_reconnect, fun auto_reconnect/1}
]. ].
prepare_statement_fields() -> prepare_statement_fields() ->
[ {prepare_statement, fun prepare_statement/1} [{prepare_statement, fun prepare_statement/1}].
].
prepare_statement(type) -> map(); prepare_statement(type) -> map();
prepare_statement(desc) -> ?DESC("prepare_statement"); prepare_statement(desc) -> ?DESC("prepare_statement");
@ -113,16 +118,16 @@ parse_server(Str, #{host_type := inet_addr, default_port := DefaultPort}) ->
try string:tokens(str(Str), ": ") of try string:tokens(str(Str), ": ") of
[Ip, Port] -> [Ip, Port] ->
case parse_ip(Ip) of case parse_ip(Ip) of
{ok, R} -> {R, list_to_integer(Port)} {ok, R} -> {R, list_to_integer(Port)}
end; end;
[Ip] -> [Ip] ->
case parse_ip(Ip) of case parse_ip(Ip) of
{ok, R} -> {R, DefaultPort} {ok, R} -> {R, DefaultPort}
end; end;
_ -> _ ->
?THROW_ERROR("Bad server schema.") ?THROW_ERROR("Bad server schema.")
catch catch
error : Reason -> error:Reason ->
?THROW_ERROR(Reason) ?THROW_ERROR(Reason)
end; end;
parse_server(Str, #{host_type := hostname, default_port := DefaultPort}) -> parse_server(Str, #{host_type := hostname, default_port := DefaultPort}) ->
@ -134,7 +139,7 @@ parse_server(Str, #{host_type := hostname, default_port := DefaultPort}) ->
_ -> _ ->
?THROW_ERROR("Bad server schema.") ?THROW_ERROR("Bad server schema.")
catch catch
error : Reason -> error:Reason ->
?THROW_ERROR(Reason) ?THROW_ERROR(Reason)
end; end;
parse_server(_, _) -> parse_server(_, _) ->

View File

@ -1,4 +1,3 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%% %%
@ -15,37 +14,38 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_resource_ssl). -module(emqx_connector_ssl).
-export([ convert_certs/2 -export([
, convert_certs/3 convert_certs/2,
, clear_certs/2 clear_certs/2
]). ]).
convert_certs(ResId, NewConfig) -> convert_certs(RltvDir, NewConfig) ->
convert_certs(ResId, NewConfig, #{}). NewSSL = drop_invalid_certs(maps:get(<<"ssl">>, NewConfig, undefined)),
case emqx_tls_lib:ensure_ssl_files(RltvDir, NewSSL) of
convert_certs(ResId, NewConfig, OldConfig) ->
OldSSL = drop_invalid_certs(maps:get(ssl, OldConfig, undefined)),
NewSSL = drop_invalid_certs(maps:get(ssl, NewConfig, undefined)),
CertsDir = cert_dir(ResId),
case emqx_tls_lib:ensure_ssl_files(CertsDir, NewSSL) of
{ok, NewSSL1} -> {ok, NewSSL1} ->
ok = emqx_tls_lib:delete_ssl_files(CertsDir, NewSSL1, OldSSL),
{ok, new_ssl_config(NewConfig, NewSSL1)}; {ok, new_ssl_config(NewConfig, NewSSL1)};
{error, Reason} -> {error, Reason} ->
{error, {bad_ssl_config, Reason}} {error, {bad_ssl_config, Reason}}
end. end.
clear_certs(ResId, Config) -> clear_certs(RltvDir, Config) ->
OldSSL = drop_invalid_certs(maps:get(ssl, Config, undefined)), OldSSL = drop_invalid_certs(map_get_oneof([<<"ssl">>, ssl], Config, undefined)),
ok = emqx_tls_lib:delete_ssl_files(cert_dir(ResId), undefined, OldSSL). ok = emqx_tls_lib:delete_ssl_files(RltvDir, undefined, OldSSL).
cert_dir(ResId) ->
filename:join(["resources", ResId]).
new_ssl_config(Config, undefined) -> Config; new_ssl_config(Config, undefined) -> Config;
new_ssl_config(Config, SSL) -> Config#{ssl => SSL}. new_ssl_config(Config, SSL) -> Config#{<<"ssl">> => SSL}.
drop_invalid_certs(undefined) -> undefined; drop_invalid_certs(undefined) -> undefined;
drop_invalid_certs(SSL) -> emqx_tls_lib:drop_invalid_certs(SSL). drop_invalid_certs(SSL) -> emqx_tls_lib:drop_invalid_certs(SSL).
map_get_oneof([], _Map, Default) ->
Default;
map_get_oneof([Key | Keys], Map, Default) ->
case maps:find(Key, Map) of
error ->
map_get_oneof(Keys, Map, Default);
{ok, Value} ->
Value
end.

View File

@ -27,20 +27,24 @@ start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []). supervisor:start_link({local, ?SERVER}, ?MODULE, []).
init([]) -> init([]) ->
SupFlags = #{strategy => one_for_all, SupFlags = #{
intensity => 5, strategy => one_for_all,
period => 20}, intensity => 5,
period => 20
},
ChildSpecs = [ ChildSpecs = [
child_spec(emqx_connector_mqtt) child_spec(emqx_connector_mqtt)
], ],
{ok, {SupFlags, ChildSpecs}}. {ok, {SupFlags, ChildSpecs}}.
child_spec(Mod) -> child_spec(Mod) ->
#{id => Mod, #{
start => {Mod, start_link, []}, id => Mod,
restart => permanent, start => {Mod, start_link, []},
shutdown => 3000, restart => permanent,
type => supervisor, shutdown => 3000,
modules => [Mod]}. type => supervisor,
modules => [Mod]
}.
%% internal functions %% internal functions

View File

@ -18,21 +18,24 @@
-module(emqx_connector_mqtt_mod). -module(emqx_connector_mqtt_mod).
-export([ start/1 -export([
, send/2 start/1,
, stop/1 send/2,
, ping/1 stop/1,
]). ping/1
]).
-export([ ensure_subscribed/3 -export([
, ensure_unsubscribed/2 ensure_subscribed/3,
]). ensure_unsubscribed/2
]).
%% callbacks for emqtt %% callbacks for emqtt
-export([ handle_puback/2 -export([
, handle_publish/3 handle_puback/2,
, handle_disconnected/2 handle_publish/3,
]). handle_disconnected/2
]).
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl").
@ -69,7 +72,7 @@ start(Config) ->
ok = sub_remote_topics(Pid, Subscriptions), ok = sub_remote_topics(Pid, Subscriptions),
{ok, #{client_pid => Pid, subscriptions => Subscriptions}} {ok, #{client_pid => Pid, subscriptions => Subscriptions}}
catch catch
throw : Reason -> throw:Reason ->
ok = stop(#{client_pid => Pid}), ok = stop(#{client_pid => Pid}),
{error, error_reason(Reason, ServerStr)} {error, error_reason(Reason, ServerStr)}
end; end;
@ -90,13 +93,14 @@ stop(#{client_pid := Pid}) ->
ping(undefined) -> ping(undefined) ->
pang; pang;
ping(#{client_pid := Pid}) -> ping(#{client_pid := Pid}) ->
emqtt:ping(Pid). emqtt:ping(Pid).
ensure_subscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic, QoS) when is_pid(Pid) -> ensure_subscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic, QoS) when
is_pid(Pid)
->
case emqtt:subscribe(Pid, Topic, QoS) of case emqtt:subscribe(Pid, Topic, QoS) of
{ok, _, _} -> Conn#{subscriptions => [{Topic, QoS}|Subs]}; {ok, _, _} -> Conn#{subscriptions => [{Topic, QoS} | Subs]};
Error -> {error, Error} Error -> {error, Error}
end; end;
ensure_subscribed(_Conn, _Topic, _QoS) -> ensure_subscribed(_Conn, _Topic, _QoS) ->
@ -120,15 +124,14 @@ safe_stop(Pid, StopF, Timeout) ->
try try
StopF() StopF()
catch catch
_ : _ -> _:_ ->
ok ok
end, end,
receive receive
{'DOWN', MRef, _, _, _} -> {'DOWN', MRef, _, _, _} ->
ok ok
after after Timeout ->
Timeout -> exit(Pid, kill)
exit(Pid, kill)
end. end.
send(Conn, Msgs) -> send(Conn, Msgs) ->
@ -157,26 +160,38 @@ send(#{client_pid := ClientPid} = Conn, [Msg | Rest], PktIds) ->
{error, Reason} {error, Reason}
end. end.
handle_puback(#{packet_id := PktId, reason_code := RC}, Parent) handle_puback(#{packet_id := PktId, reason_code := RC}, Parent) when
when RC =:= ?RC_SUCCESS; RC =:= ?RC_SUCCESS;
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS -> RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
Parent ! {batch_ack, PktId}, ok; ->
Parent ! {batch_ack, PktId},
ok;
handle_puback(#{packet_id := PktId, reason_code := RC}, _Parent) -> handle_puback(#{packet_id := PktId, reason_code := RC}, _Parent) ->
?SLOG(warning, #{msg => "publish_to_remote_node_falied", ?SLOG(warning, #{
packet_id => PktId, reason_code => RC}). msg => "publish_to_remote_node_falied",
packet_id => PktId,
reason_code => RC
}).
handle_publish(Msg, undefined, _Opts) -> handle_publish(Msg, undefined, _Opts) ->
?SLOG(error, #{msg => "cannot_publish_to_local_broker_as" ?SLOG(error, #{
"_'ingress'_is_not_configured", msg =>
message => Msg}); "cannot_publish_to_local_broker_as"
"_'ingress'_is_not_configured",
message => Msg
});
handle_publish(#{properties := Props} = Msg0, Vars, Opts) -> handle_publish(#{properties := Props} = Msg0, Vars, Opts) ->
Msg = format_msg_received(Msg0, Opts), Msg = format_msg_received(Msg0, Opts),
?SLOG(debug, #{msg => "publish_to_local_broker", ?SLOG(debug, #{
message => Msg, vars => Vars}), msg => "publish_to_local_broker",
message => Msg,
vars => Vars
}),
case Vars of case Vars of
#{on_message_received := {Mod, Func, Args}} -> #{on_message_received := {Mod, Func, Args}} ->
_ = erlang:apply(Mod, Func, [Msg | Args]); _ = erlang:apply(Mod, Func, [Msg | Args]);
_ -> ok _ ->
ok
end, end,
maybe_publish_to_local_broker(Msg, Vars, Props). maybe_publish_to_local_broker(Msg, Vars, Props).
@ -184,12 +199,14 @@ handle_disconnected(Reason, Parent) ->
Parent ! {disconnected, self(), Reason}. Parent ! {disconnected, self(), Reason}.
make_hdlr(Parent, Vars, Opts) -> make_hdlr(Parent, Vars, Opts) ->
#{puback => {fun ?MODULE:handle_puback/2, [Parent]}, #{
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]}, puback => {fun ?MODULE:handle_puback/2, [Parent]},
disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]} publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
}. disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]}
}.
sub_remote_topics(_ClientPid, undefined) -> ok; sub_remote_topics(_ClientPid, undefined) ->
ok;
sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) -> sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) ->
case emqtt:subscribe(ClientPid, FromTopic, QoS) of case emqtt:subscribe(ClientPid, FromTopic, QoS) of
{ok, _, _} -> ok; {ok, _, _} -> ok;
@ -199,52 +216,82 @@ sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) ->
process_config(Config) -> process_config(Config) ->
maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config). maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config).
maybe_publish_to_local_broker(#{topic := Topic} = Msg, #{remote_topic := SubTopic} = Vars, maybe_publish_to_local_broker(
Props) -> #{topic := Topic} = Msg,
#{remote_topic := SubTopic} = Vars,
Props
) ->
case maps:get(local_topic, Vars, undefined) of case maps:get(local_topic, Vars, undefined) of
undefined -> undefined ->
ok; %% local topic is not set, discard it %% local topic is not set, discard it
ok;
_ -> _ ->
case emqx_topic:match(Topic, SubTopic) of case emqx_topic:match(Topic, SubTopic) of
true -> true ->
_ = emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)), _ = emqx_broker:publish(
emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)
),
ok; ok;
false -> false ->
?SLOG(warning, #{msg => "discard_message_as_topic_not_matched", ?SLOG(warning, #{
message => Msg, subscribed => SubTopic, got_topic => Topic}) msg => "discard_message_as_topic_not_matched",
message => Msg,
subscribed => SubTopic,
got_topic => Topic
})
end end
end. end.
format_msg_received(#{dup := Dup, payload := Payload, properties := Props, format_msg_received(
qos := QoS, retain := Retain, topic := Topic}, #{server := Server}) -> #{
#{ id => emqx_guid:to_hexstr(emqx_guid:gen()) dup := Dup,
, server => Server payload := Payload,
, payload => Payload properties := Props,
, topic => Topic qos := QoS,
, qos => QoS retain := Retain,
, dup => Dup topic := Topic
, retain => Retain },
, pub_props => printable_maps(Props) #{server := Server}
, message_received_at => erlang:system_time(millisecond) ) ->
}. #{
id => emqx_guid:to_hexstr(emqx_guid:gen()),
server => Server,
payload => Payload,
topic => Topic,
qos => QoS,
dup => Dup,
retain => Retain,
pub_props => printable_maps(Props),
message_received_at => erlang:system_time(millisecond)
}.
printable_maps(undefined) -> #{}; printable_maps(undefined) ->
#{};
printable_maps(Headers) -> printable_maps(Headers) ->
maps:fold( maps:fold(
fun ('User-Property', V0, AccIn) when is_list(V0) -> fun
('User-Property', V0, AccIn) when is_list(V0) ->
AccIn#{ AccIn#{
'User-Property' => maps:from_list(V0), 'User-Property' => maps:from_list(V0),
'User-Property-Pairs' => [#{ 'User-Property-Pairs' => [
key => Key, #{
value => Value key => Key,
} || {Key, Value} <- V0] value => Value
}
|| {Key, Value} <- V0
]
}; };
(K, V0, AccIn) -> AccIn#{K => V0} (K, V0, AccIn) ->
end, #{}, Headers). AccIn#{K => V0}
end,
#{},
Headers
).
ip_port_to_server_str(Host, Port) -> ip_port_to_server_str(Host, Port) ->
HostStr = case inet:ntoa(Host) of HostStr =
{error, einval} -> Host; case inet:ntoa(Host) of
IPStr -> IPStr {error, einval} -> Host;
end, IPStr -> IPStr
end,
list_to_binary(io_lib:format("~s:~w", [HostStr, Port])). list_to_binary(io_lib:format("~s:~w", [HostStr, Port])).

View File

@ -16,17 +16,19 @@
-module(emqx_connector_mqtt_msg). -module(emqx_connector_mqtt_msg).
-export([ to_binary/1 -export([
, from_binary/1 to_binary/1,
, make_pub_vars/2 from_binary/1,
, to_remote_msg/2 make_pub_vars/2,
, to_broker_msg/3 to_remote_msg/2,
, estimate_size/1 to_broker_msg/3,
]). estimate_size/1
]).
-export([ replace_vars_in_str/2 -export([
, replace_simple_var/2 replace_vars_in_str/2,
]). replace_simple_var/2
]).
-export_type([msg/0]). -export_type([msg/0]).
@ -34,7 +36,6 @@
-include_lib("emqtt/include/emqtt.hrl"). -include_lib("emqtt/include/emqtt.hrl").
-type msg() :: emqx_types:message(). -type msg() :: emqx_types:message().
-type exp_msg() :: emqx_types:message() | #mqtt_msg{}. -type exp_msg() :: emqx_types:message() | #mqtt_msg{}.
@ -46,7 +47,8 @@
payload := binary() payload := binary()
}. }.
make_pub_vars(_, undefined) -> undefined; make_pub_vars(_, undefined) ->
undefined;
make_pub_vars(Mountpoint, Conf) when is_map(Conf) -> make_pub_vars(Mountpoint, Conf) when is_map(Conf) ->
Conf#{mountpoint => Mountpoint}. Conf#{mountpoint => Mountpoint}.
@ -57,37 +59,56 @@ make_pub_vars(Mountpoint, Conf) when is_map(Conf) ->
%% Shame that we have to know the callback module here %% Shame that we have to know the callback module here
%% would be great if we can get rid of #mqtt_msg{} record %% would be great if we can get rid of #mqtt_msg{} record
%% and use #message{} in all places. %% and use #message{} in all places.
-spec to_remote_msg(msg() | map(), variables()) -spec to_remote_msg(msg() | map(), variables()) ->
-> exp_msg(). exp_msg().
to_remote_msg(#message{flags = Flags0} = Msg, Vars) -> to_remote_msg(#message{flags = Flags0} = Msg, Vars) ->
Retain0 = maps:get(retain, Flags0, false), Retain0 = maps:get(retain, Flags0, false),
MapMsg = maps:put(retain, Retain0, emqx_rule_events:eventmsg_publish(Msg)), MapMsg = maps:put(retain, Retain0, emqx_rule_events:eventmsg_publish(Msg)),
to_remote_msg(MapMsg, Vars); to_remote_msg(MapMsg, Vars);
to_remote_msg(MapMsg, #{remote_topic := TopicToken, payload := PayloadToken, to_remote_msg(MapMsg, #{
remote_qos := QoSToken, retain := RetainToken, mountpoint := Mountpoint}) when is_map(MapMsg) -> remote_topic := TopicToken,
payload := PayloadToken,
remote_qos := QoSToken,
retain := RetainToken,
mountpoint := Mountpoint
}) when is_map(MapMsg) ->
Topic = replace_vars_in_str(TopicToken, MapMsg), Topic = replace_vars_in_str(TopicToken, MapMsg),
Payload = process_payload(PayloadToken, MapMsg), Payload = process_payload(PayloadToken, MapMsg),
QoS = replace_simple_var(QoSToken, MapMsg), QoS = replace_simple_var(QoSToken, MapMsg),
Retain = replace_simple_var(RetainToken, MapMsg), Retain = replace_simple_var(RetainToken, MapMsg),
#mqtt_msg{qos = QoS, #mqtt_msg{
retain = Retain, qos = QoS,
topic = topic(Mountpoint, Topic), retain = Retain,
props = #{}, topic = topic(Mountpoint, Topic),
payload = Payload}; props = #{},
payload = Payload
};
to_remote_msg(#message{topic = Topic} = Msg, #{mountpoint := Mountpoint}) -> to_remote_msg(#message{topic = Topic} = Msg, #{mountpoint := Mountpoint}) ->
Msg#message{topic = topic(Mountpoint, Topic)}. Msg#message{topic = topic(Mountpoint, Topic)}.
%% published from remote node over a MQTT connection %% published from remote node over a MQTT connection
to_broker_msg(#{dup := Dup} = MapMsg, to_broker_msg(
#{local_topic := TopicToken, payload := PayloadToken, #{dup := Dup} = MapMsg,
local_qos := QoSToken, retain := RetainToken, mountpoint := Mountpoint}, Props) -> #{
local_topic := TopicToken,
payload := PayloadToken,
local_qos := QoSToken,
retain := RetainToken,
mountpoint := Mountpoint
},
Props
) ->
Topic = replace_vars_in_str(TopicToken, MapMsg), Topic = replace_vars_in_str(TopicToken, MapMsg),
Payload = process_payload(PayloadToken, MapMsg), Payload = process_payload(PayloadToken, MapMsg),
QoS = replace_simple_var(QoSToken, MapMsg), QoS = replace_simple_var(QoSToken, MapMsg),
Retain = replace_simple_var(RetainToken, MapMsg), Retain = replace_simple_var(RetainToken, MapMsg),
set_headers(Props, set_headers(
emqx_message:set_flags(#{dup => Dup, retain => Retain}, Props,
emqx_message:make(bridge, QoS, topic(Mountpoint, Topic), Payload))). emqx_message:set_flags(
#{dup => Dup, retain => Retain},
emqx_message:make(bridge, QoS, topic(Mountpoint, Topic), Payload)
)
).
process_payload([], Msg) -> process_payload([], Msg) ->
emqx_json:encode(Msg); emqx_json:encode(Msg);

View File

@ -21,17 +21,17 @@
-behaviour(hocon_schema). -behaviour(hocon_schema).
-export([ namespace/0 -export([
, roots/0 namespace/0,
, fields/1 roots/0,
, desc/1 fields/1,
]). desc/1
]).
-export([ ingress_desc/0 -export([
, egress_desc/0 ingress_desc/0,
]). egress_desc/0
]).
-export([non_empty_string/1]).
-import(emqx_schema, [mk_duration/2]). -import(emqx_schema, [mk_duration/2]).
@ -42,146 +42,210 @@ roots() ->
fields("config") -> fields("config") ->
fields("connector") ++ fields("connector") ++
topic_mappings(); topic_mappings();
fields("connector") -> fields("connector") ->
[ {mode, [
sc(hoconsc:enum([cluster_shareload]), {mode,
#{ default => cluster_shareload sc(
, desc => ?DESC("mode") hoconsc:enum([cluster_shareload]),
})} #{
, {server, default => cluster_shareload,
sc(emqx_schema:ip_port(), desc => ?DESC("mode")
#{ required => true }
, desc => ?DESC("server") )},
})} {server,
, {reconnect_interval, mk_duration( sc(
"Reconnect interval. Delay for the MQTT bridge to retry establishing the connection " emqx_schema:ip_port(),
"in case of transportation failure.", #{
#{default => "15s"})} required => true,
, {proto_ver, desc => ?DESC("server")
sc(hoconsc:enum([v3, v4, v5]), }
#{ default => v4 )},
, desc => ?DESC("proto_ver") {reconnect_interval,
})} mk_duration(
, {username, "Reconnect interval. Delay for the MQTT bridge to retry establishing the connection "
sc(binary(), "in case of transportation failure.",
#{ default => "emqx" #{default => "15s"}
, desc => ?DESC("username") )},
})} {proto_ver,
, {password, sc(
sc(binary(), hoconsc:enum([v3, v4, v5]),
#{ default => "emqx" #{
, desc => ?DESC("password") default => v4,
})} desc => ?DESC("proto_ver")
, {clean_start, }
sc(boolean(), )},
#{ default => true {username,
, desc => ?DESC("clean_start") sc(
})} binary(),
, {keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})} #{
, {retry_interval, mk_duration( default => "emqx",
"Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 " desc => ?DESC("username")
"messages in case of ACK not received.", }
#{default => "15s"})} )},
, {max_inflight, {password,
sc(non_neg_integer(), sc(
#{ default => 32 binary(),
, desc => ?DESC("max_inflight") #{
})} default => "emqx",
, {replayq, desc => ?DESC("password")
sc(ref("replayq"), #{})} }
)},
{clean_start,
sc(
boolean(),
#{
default => true,
desc => ?DESC("clean_start")
}
)},
{keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})},
{retry_interval,
mk_duration(
"Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 "
"messages in case of ACK not received.",
#{default => "15s"}
)},
{max_inflight,
sc(
non_neg_integer(),
#{
default => 32,
desc => ?DESC("max_inflight")
}
)},
{replayq, sc(ref("replayq"), #{})}
] ++ emqx_connector_schema_lib:ssl_fields(); ] ++ emqx_connector_schema_lib:ssl_fields();
fields("ingress") -> fields("ingress") ->
%% the message maybe subscribed by rules, in this case 'local_topic' is not necessary %% the message maybe subscribed by rules, in this case 'local_topic' is not necessary
[ {remote_topic, [
sc(binary(), {remote_topic,
#{ required => true sc(
, validator => fun ?MODULE:non_empty_string/1 binary(),
, desc => ?DESC("ingress_remote_topic") #{
})} required => true,
, {remote_qos, validator => fun emqx_schema:non_empty_string/1,
sc(qos(), desc => ?DESC("ingress_remote_topic")
#{ default => 1 }
, desc => ?DESC("ingress_remote_qos") )},
})} {remote_qos,
, {local_topic, sc(
sc(binary(), qos(),
#{ validator => fun ?MODULE:non_empty_string/1 #{
, desc => ?DESC("ingress_local_topic") default => 1,
})} desc => ?DESC("ingress_remote_qos")
, {local_qos, }
sc(qos(), )},
#{ default => <<"${qos}">> {local_topic,
, desc => ?DESC("ingress_local_qos") sc(
})} binary(),
, {hookpoint, #{
sc(binary(), validator => fun emqx_schema:non_empty_string/1,
#{ desc => ?DESC("ingress_hookpoint") desc => ?DESC("ingress_local_topic")
})} }
)},
{local_qos,
sc(
qos(),
#{
default => <<"${qos}">>,
desc => ?DESC("ingress_local_qos")
}
)},
{hookpoint,
sc(
binary(),
#{desc => ?DESC("ingress_hookpoint")}
)},
, {retain, {retain,
sc(hoconsc:union([boolean(), binary()]), sc(
#{ default => <<"${retain}">> hoconsc:union([boolean(), binary()]),
, desc => ?DESC("retain") #{
})} default => <<"${retain}">>,
desc => ?DESC("retain")
}
)},
, {payload, {payload,
sc(binary(), sc(
#{ default => <<"${payload}">> binary(),
, desc => ?DESC("payload") #{
})} default => <<"${payload}">>,
desc => ?DESC("payload")
}
)}
]; ];
fields("egress") -> fields("egress") ->
%% the message maybe sent from rules, in this case 'local_topic' is not necessary %% the message maybe sent from rules, in this case 'local_topic' is not necessary
[ {local_topic, [
sc(binary(), {local_topic,
#{ desc => ?DESC("egress_local_topic") sc(
, validator => fun ?MODULE:non_empty_string/1 binary(),
})} #{
, {remote_topic, desc => ?DESC("egress_local_topic"),
sc(binary(), validator => fun emqx_schema:non_empty_string/1
#{ required => true }
, validator => fun ?MODULE:non_empty_string/1 )},
, desc => ?DESC("egress_remote_topic") {remote_topic,
})} sc(
, {remote_qos, binary(),
sc(qos(), #{
#{ required => true required => true,
, desc => ?DESC("egress_remote_qos") validator => fun emqx_schema:non_empty_string/1,
})} desc => ?DESC("egress_remote_topic")
}
)},
{remote_qos,
sc(
qos(),
#{
required => true,
desc => ?DESC("egress_remote_qos")
}
)},
, {retain, {retain,
sc(hoconsc:union([boolean(), binary()]), sc(
#{ required => true hoconsc:union([boolean(), binary()]),
, desc => ?DESC("retain") #{
})} required => true,
desc => ?DESC("retain")
}
)},
, {payload, {payload,
sc(binary(), sc(
#{ required => true binary(),
, desc => ?DESC("payload") #{
})} required => true,
desc => ?DESC("payload")
}
)}
]; ];
fields("replayq") -> fields("replayq") ->
[ {dir, [
sc(hoconsc:union([boolean(), string()]), {dir,
#{ desc => ?DESC("dir") sc(
})} hoconsc:union([boolean(), string()]),
, {seg_bytes, #{desc => ?DESC("dir")}
sc(emqx_schema:bytesize(), )},
#{ default => "100MB" {seg_bytes,
, desc => ?DESC("seg_bytes") sc(
})} emqx_schema:bytesize(),
, {offload, #{
sc(boolean(), default => "100MB",
#{ default => false desc => ?DESC("seg_bytes")
, desc => ?DESC("offload") }
})} )},
{offload,
sc(
boolean(),
#{
default => false,
desc => ?DESC("offload")
}
)}
]. ].
desc("connector") -> desc("connector") ->
@ -196,42 +260,40 @@ desc(_) ->
undefined. undefined.
topic_mappings() -> topic_mappings() ->
[ {ingress, [
sc(ref("ingress"), {ingress,
#{ default => #{} sc(
})} ref("ingress"),
, {egress, #{default => #{}}
sc(ref("egress"), )},
#{ default => #{} {egress,
})} sc(
ref("egress"),
#{default => #{}}
)}
]. ].
ingress_desc() -> " ingress_desc() ->
The ingress config defines how this bridge receive messages from the remote MQTT broker, and then "\n"
send them to the local broker.</br> "The ingress config defines how this bridge receive messages from the remote MQTT broker, and then\n"
Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain', "send them to the local broker.</br>\n"
'payload'.</br> "Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain',\n"
NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is "'payload'.</br>\n"
configured, then messages got from the remote broker will be sent to both the 'local_topic' and "NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is\n"
the rule. "configured, then messages got from the remote broker will be sent to both the 'local_topic' and\n"
". "the rule.\n".
egress_desc() -> " egress_desc() ->
The egress config defines how this bridge forwards messages from the local broker to the remote "\n"
broker.</br> "The egress config defines how this bridge forwards messages from the local broker to the remote\n"
Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.</br> "broker.</br>\n"
NOTE: if this bridge is used as the output of a rule (emqx rule engine), and also local_topic "Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.</br>\n"
is configured, then both the data got from the rule and the MQTT messages that matches "NOTE: if this bridge is used as the output of a rule (emqx rule engine), and also local_topic\n"
local_topic will be forwarded. "is configured, then both the data got from the rule and the MQTT messages that matches\n"
". "local_topic will be forwarded.\n".
qos() -> qos() ->
hoconsc:union([emqx_schema:qos(), binary()]). hoconsc:union([emqx_schema:qos(), binary()]).
non_empty_string(<<>>) -> {error, empty_string_not_allowed};
non_empty_string("") -> {error, empty_string_not_allowed};
non_empty_string(S) when is_binary(S); is_list(S) -> ok;
non_empty_string(_) -> {error, invalid_string}.
sc(Type, Meta) -> hoconsc:mk(Type, Meta). sc(Type, Meta) -> hoconsc:mk(Type, Meta).
ref(Field) -> hoconsc:ref(?MODULE, Field). ref(Field) -> hoconsc:ref(?MODULE, Field).

View File

@ -66,43 +66,46 @@
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
%% APIs %% APIs
-export([ start_link/1 -export([
, register_metrics/0 start_link/1,
, stop/1 register_metrics/0,
]). stop/1
]).
%% gen_statem callbacks %% gen_statem callbacks
-export([ terminate/3 -export([
, code_change/4 terminate/3,
, init/1 code_change/4,
, callback_mode/0 init/1,
]). callback_mode/0
]).
%% state functions %% state functions
-export([ idle/3 -export([
, connected/3 idle/3,
]). connected/3
]).
%% management APIs %% management APIs
-export([ ensure_started/1 -export([
, ensure_stopped/1 ensure_started/1,
, status/1 ensure_stopped/1,
, ping/1 status/1,
, send_to_remote/2 ping/1,
]). send_to_remote/2
]).
-export([ get_forwards/1 -export([get_forwards/1]).
]).
-export([ get_subscriptions/1 -export([get_subscriptions/1]).
]).
%% Internal %% Internal
-export([msg_marshaller/1]). -export([msg_marshaller/1]).
-export_type([ config/0 -export_type([
, ack_ref/0 config/0,
]). ack_ref/0
]).
-type id() :: atom() | string() | pid(). -type id() :: atom() | string() | pid().
-type qos() :: emqx_types:qos(). -type qos() :: emqx_types:qos().
@ -113,7 +116,6 @@
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl").
%% same as default in-flight limit for emqtt %% same as default in-flight limit for emqtt
-define(DEFAULT_INFLIGHT_SIZE, 32). -define(DEFAULT_INFLIGHT_SIZE, 32).
-define(DEFAULT_RECONNECT_DELAY_MS, timer:seconds(5)). -define(DEFAULT_RECONNECT_DELAY_MS, timer:seconds(5)).
@ -188,8 +190,10 @@ callback_mode() -> [state_functions].
%% @doc Config should be a map(). %% @doc Config should be a map().
init(#{name := Name} = ConnectOpts) -> init(#{name := Name} = ConnectOpts) ->
?SLOG(debug, #{msg => "starting_bridge_worker", ?SLOG(debug, #{
name => Name}), msg => "starting_bridge_worker",
name => Name
}),
erlang:process_flag(trap_exit, true), erlang:process_flag(trap_exit, true),
Queue = open_replayq(Name, maps:get(replayq, ConnectOpts, #{})), Queue = open_replayq(Name, maps:get(replayq, ConnectOpts, #{})),
State = init_state(ConnectOpts), State = init_state(ConnectOpts),
@ -205,31 +209,44 @@ init_state(Opts) ->
Mountpoint = maps:get(forward_mountpoint, Opts, undefined), Mountpoint = maps:get(forward_mountpoint, Opts, undefined),
MaxInflightSize = maps:get(max_inflight, Opts, ?DEFAULT_INFLIGHT_SIZE), MaxInflightSize = maps:get(max_inflight, Opts, ?DEFAULT_INFLIGHT_SIZE),
Name = maps:get(name, Opts, undefined), Name = maps:get(name, Opts, undefined),
#{start_type => StartType, #{
reconnect_interval => ReconnDelayMs, start_type => StartType,
mountpoint => format_mountpoint(Mountpoint), reconnect_interval => ReconnDelayMs,
inflight => [], mountpoint => format_mountpoint(Mountpoint),
max_inflight => MaxInflightSize, inflight => [],
connection => undefined, max_inflight => MaxInflightSize,
name => Name}. connection => undefined,
name => Name
}.
open_replayq(Name, QCfg) -> open_replayq(Name, QCfg) ->
Dir = maps:get(dir, QCfg, undefined), Dir = maps:get(dir, QCfg, undefined),
SegBytes = maps:get(seg_bytes, QCfg, ?DEFAULT_SEG_BYTES), SegBytes = maps:get(seg_bytes, QCfg, ?DEFAULT_SEG_BYTES),
MaxTotalSize = maps:get(max_total_size, QCfg, ?DEFAULT_MAX_TOTAL_SIZE), MaxTotalSize = maps:get(max_total_size, QCfg, ?DEFAULT_MAX_TOTAL_SIZE),
QueueConfig = case Dir =:= undefined orelse Dir =:= "" of QueueConfig =
true -> #{mem_only => true}; case Dir =:= undefined orelse Dir =:= "" of
false -> #{dir => filename:join([Dir, node(), Name]), true ->
seg_bytes => SegBytes, max_total_size => MaxTotalSize} #{mem_only => true};
end, false ->
replayq:open(QueueConfig#{sizer => fun emqx_connector_mqtt_msg:estimate_size/1, #{
marshaller => fun ?MODULE:msg_marshaller/1}). dir => filename:join([Dir, node(), Name]),
seg_bytes => SegBytes,
max_total_size => MaxTotalSize
}
end,
replayq:open(QueueConfig#{
sizer => fun emqx_connector_mqtt_msg:estimate_size/1,
marshaller => fun ?MODULE:msg_marshaller/1
}).
pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) -> pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) ->
ConnectOpts#{subscriptions => pre_process_in_out(in, InConf), ConnectOpts#{
forwards => pre_process_in_out(out, OutConf)}. subscriptions => pre_process_in_out(in, InConf),
forwards => pre_process_in_out(out, OutConf)
}.
pre_process_in_out(_, undefined) -> undefined; pre_process_in_out(_, undefined) ->
undefined;
pre_process_in_out(in, Conf) when is_map(Conf) -> pre_process_in_out(in, Conf) when is_map(Conf) ->
Conf1 = pre_process_conf(local_topic, Conf), Conf1 = pre_process_conf(local_topic, Conf),
Conf2 = pre_process_conf(local_qos, Conf1), Conf2 = pre_process_conf(local_qos, Conf1),
@ -245,7 +262,8 @@ pre_process_in_out_common(Conf) ->
pre_process_conf(Key, Conf) -> pre_process_conf(Key, Conf) ->
case maps:find(Key, Conf) of case maps:find(Key, Conf) of
error -> Conf; error ->
Conf;
{ok, Val} when is_binary(Val) -> {ok, Val} when is_binary(Val) ->
Conf#{Key => emqx_plugin_libs_rule:preproc_tmpl(Val)}; Conf#{Key => emqx_plugin_libs_rule:preproc_tmpl(Val)};
{ok, Val} -> {ok, Val} ->
@ -276,7 +294,6 @@ idle(info, idle, #{start_type := auto} = State) ->
connecting(State); connecting(State);
idle(state_timeout, reconnect, State) -> idle(state_timeout, reconnect, State) ->
connecting(State); connecting(State);
idle(Type, Content, State) -> idle(Type, Content, State) ->
common(idle, Type, Content, State). common(idle, Type, Content, State).
@ -298,13 +315,16 @@ connected(state_timeout, connected, #{inflight := Inflight} = State) ->
connected(internal, maybe_send, State) -> connected(internal, maybe_send, State) ->
{_, NewState} = pop_and_send(State), {_, NewState} = pop_and_send(State),
{keep_state, NewState}; {keep_state, NewState};
connected(
connected(info, {disconnected, Conn, Reason}, info,
#{connection := Connection, name := Name, reconnect_interval := ReconnectDelayMs} = State) -> {disconnected, Conn, Reason},
#{connection := Connection, name := Name, reconnect_interval := ReconnectDelayMs} = State
) ->
?tp(info, disconnected, #{name => Name, reason => Reason}), ?tp(info, disconnected, #{name => Name, reason => Reason}),
case Conn =:= maps:get(client_pid, Connection, undefined) of case Conn =:= maps:get(client_pid, Connection, undefined) of
true -> true ->
{next_state, idle, State#{connection => undefined}, {state_timeout, ReconnectDelayMs, reconnect}}; {next_state, idle, State#{connection => undefined},
{state_timeout, ReconnectDelayMs, reconnect}};
false -> false ->
keep_state_and_data keep_state_and_data
end; end;
@ -317,7 +337,7 @@ connected(Type, Content, State) ->
%% Common handlers %% Common handlers
common(StateName, {call, From}, status, _State) -> common(StateName, {call, From}, status, _State) ->
{keep_state_and_data, [{reply, From, StateName}]}; {keep_state_and_data, [{reply, From, StateName}]};
common(_StateName, {call, From}, ping, #{connection := Conn} =_State) -> common(_StateName, {call, From}, ping, #{connection := Conn} = _State) ->
Reply = emqx_connector_mqtt_mod:ping(Conn), Reply = emqx_connector_mqtt_mod:ping(Conn),
{keep_state_and_data, [{reply, From, Reply}]}; {keep_state_and_data, [{reply, From, Reply}]};
common(_StateName, {call, From}, ensure_stopped, #{connection := undefined} = _State) -> common(_StateName, {call, From}, ensure_stopped, #{connection := undefined} = _State) ->
@ -335,27 +355,39 @@ common(_StateName, cast, {send_to_remote, Msg}, #{replayq := Q} = State) ->
NewQ = replayq:append(Q, [Msg]), NewQ = replayq:append(Q, [Msg]),
{keep_state, State#{replayq => NewQ}, {next_event, internal, maybe_send}}; {keep_state, State#{replayq => NewQ}, {next_event, internal, maybe_send}};
common(StateName, Type, Content, #{name := Name} = State) -> common(StateName, Type, Content, #{name := Name} = State) ->
?SLOG(notice, #{msg => "bridge_discarded_event", ?SLOG(notice, #{
name => Name, type => Type, state_name => StateName, msg => "bridge_discarded_event",
content => Content}), name => Name,
type => Type,
state_name => StateName,
content => Content
}),
{keep_state, State}. {keep_state, State}.
do_connect(#{connect_opts := ConnectOpts, do_connect(
inflight := Inflight, #{
name := Name} = State) -> connect_opts := ConnectOpts,
inflight := Inflight,
name := Name
} = State
) ->
case emqx_connector_mqtt_mod:start(ConnectOpts) of case emqx_connector_mqtt_mod:start(ConnectOpts) of
{ok, Conn} -> {ok, Conn} ->
?tp(info, connected, #{name => Name, inflight => length(Inflight)}), ?tp(info, connected, #{name => Name, inflight => length(Inflight)}),
{ok, State#{connection => Conn}}; {ok, State#{connection => Conn}};
{error, Reason} -> {error, Reason} ->
ConnectOpts1 = obfuscate(ConnectOpts), ConnectOpts1 = obfuscate(ConnectOpts),
?SLOG(error, #{msg => "failed_to_connect", ?SLOG(error, #{
config => ConnectOpts1, reason => Reason}), msg => "failed_to_connect",
config => ConnectOpts1,
reason => Reason
}),
{error, Reason, State} {error, Reason, State}
end. end.
%% Retry all inflight (previously sent but not acked) batches. %% Retry all inflight (previously sent but not acked) batches.
retry_inflight(State, []) -> {ok, State}; retry_inflight(State, []) ->
{ok, State};
retry_inflight(State, [#{q_ack_ref := QAckRef, msg := Msg} | Rest] = OldInf) -> retry_inflight(State, [#{q_ack_ref := QAckRef, msg := Msg} | Rest] = OldInf) ->
case do_send(State, QAckRef, Msg) of case do_send(State, QAckRef, Msg) of
{ok, State1} -> {ok, State1} ->
@ -386,28 +418,49 @@ pop_and_send_loop(#{replayq := Q} = State, N) ->
end. end.
do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) -> do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) ->
?SLOG(error, #{msg => "cannot_forward_messages_to_remote_broker" ?SLOG(error, #{
"_as_'egress'_is_not_configured", msg =>
messages => Msg}); "cannot_forward_messages_to_remote_broker"
do_send(#{inflight := Inflight, "_as_'egress'_is_not_configured",
connection := Connection, messages => Msg
mountpoint := Mountpoint, });
connect_opts := #{forwards := Forwards}} = State, QAckRef, Msg) -> do_send(
#{
inflight := Inflight,
connection := Connection,
mountpoint := Mountpoint,
connect_opts := #{forwards := Forwards}
} = State,
QAckRef,
Msg
) ->
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards), Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
ExportMsg = fun(Message) -> ExportMsg = fun(Message) ->
emqx_metrics:inc('bridge.mqtt.message_sent_to_remote'), emqx_metrics:inc('bridge.mqtt.message_sent_to_remote'),
emqx_connector_mqtt_msg:to_remote_msg(Message, Vars) emqx_connector_mqtt_msg:to_remote_msg(Message, Vars)
end, end,
?SLOG(debug, #{msg => "publish_to_remote_broker", ?SLOG(debug, #{
message => Msg, vars => Vars}), msg => "publish_to_remote_broker",
message => Msg,
vars => Vars
}),
case emqx_connector_mqtt_mod:send(Connection, [ExportMsg(Msg)]) of case emqx_connector_mqtt_mod:send(Connection, [ExportMsg(Msg)]) of
{ok, Refs} -> {ok, Refs} ->
{ok, State#{inflight := Inflight ++ [#{q_ack_ref => QAckRef, {ok, State#{
send_ack_ref => map_set(Refs), inflight := Inflight ++
msg => Msg}]}}; [
#{
q_ack_ref => QAckRef,
send_ack_ref => map_set(Refs),
msg => Msg
}
]
}};
{error, Reason} -> {error, Reason} ->
?SLOG(info, #{msg => "mqtt_bridge_produce_failed", ?SLOG(info, #{
reason => Reason}), msg => "mqtt_bridge_produce_failed",
reason => Reason
}),
{error, State} {error, State}
end. end.
@ -427,8 +480,10 @@ handle_batch_ack(#{inflight := Inflight0, replayq := Q} = State, Ref) ->
State#{inflight := Inflight}. State#{inflight := Inflight}.
do_ack([], Ref) -> do_ack([], Ref) ->
?SLOG(debug, #{msg => "stale_batch_ack_reference", ?SLOG(debug, #{
ref => Ref}), msg => "stale_batch_ack_reference",
ref => Ref
}),
[]; [];
do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) -> do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) ->
case maps:is_key(Ref, Refs) of case maps:is_key(Ref, Refs) of
@ -443,8 +498,16 @@ do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) ->
drop_acked_batches(_Q, []) -> drop_acked_batches(_Q, []) ->
?tp(debug, inflight_drained, #{}), ?tp(debug, inflight_drained, #{}),
[]; [];
drop_acked_batches(Q, [#{send_ack_ref := Refs, drop_acked_batches(
q_ack_ref := QAckRef} | Rest] = All) -> Q,
[
#{
send_ack_ref := Refs,
q_ack_ref := QAckRef
}
| Rest
] = All
) ->
case maps:size(Refs) of case maps:size(Refs) of
0 -> 0 ->
%% all messages are acked by bridge target %% all messages are acked by bridge target
@ -475,18 +538,25 @@ format_mountpoint(Prefix) ->
name(Id) -> list_to_atom(str(Id)). name(Id) -> list_to_atom(str(Id)).
register_metrics() -> register_metrics() ->
lists:foreach(fun emqx_metrics:ensure/1, lists:foreach(
['bridge.mqtt.message_sent_to_remote', fun emqx_metrics:ensure/1,
'bridge.mqtt.message_received_from_remote' [
]). 'bridge.mqtt.message_sent_to_remote',
'bridge.mqtt.message_received_from_remote'
]
).
obfuscate(Map) -> obfuscate(Map) ->
maps:fold(fun(K, V, Acc) -> maps:fold(
case is_sensitive(K) of fun(K, V, Acc) ->
true -> [{K, '***'} | Acc]; case is_sensitive(K) of
false -> [{K, V} | Acc] true -> [{K, '***'} | Acc];
end false -> [{K, V} | Acc]
end, [], Map). end
end,
[],
Map
).
is_sensitive(password) -> true; is_sensitive(password) -> true;
is_sensitive(_) -> false. is_sensitive(_) -> false.

View File

@ -26,27 +26,23 @@
-include("emqx_dashboard/include/emqx_dashboard.hrl"). -include("emqx_dashboard/include/emqx_dashboard.hrl").
%% output functions %% output functions
-export([ inspect/3 -export([inspect/3]).
]).
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>). -define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
-define(CONNECTR_TYPE, <<"mqtt">>). -define(CONNECTR_TYPE, <<"mqtt">>).
-define(CONNECTR_NAME, <<"test_connector">>). -define(CONNECTR_NAME, <<"test_connector">>).
-define(BRIDGE_NAME_INGRESS, <<"ingress_test_bridge">>). -define(BRIDGE_NAME_INGRESS, <<"ingress_test_bridge">>).
-define(BRIDGE_NAME_EGRESS, <<"egress_test_bridge">>). -define(BRIDGE_NAME_EGRESS, <<"egress_test_bridge">>).
-define(MQTT_CONNECTOR(Username), -define(MQTT_CONNECTOR(Username), #{
#{
<<"server">> => <<"127.0.0.1:1883">>, <<"server">> => <<"127.0.0.1:1883">>,
<<"username">> => Username, <<"username">> => Username,
<<"password">> => <<"">>, <<"password">> => <<"">>,
<<"proto_ver">> => <<"v4">>, <<"proto_ver">> => <<"v4">>,
<<"ssl">> => #{<<"enable">> => false} <<"ssl">> => #{<<"enable">> => false}
}). }).
-define(MQTT_CONNECTOR2(Server), -define(MQTT_CONNECTOR2(Server), ?MQTT_CONNECTOR(<<"user1">>)#{<<"server">> => Server}).
?MQTT_CONNECTOR(<<"user1">>)#{<<"server">> => Server}).
-define(MQTT_BRIDGE_INGRESS(ID), -define(MQTT_BRIDGE_INGRESS(ID), #{
#{
<<"connector">> => ID, <<"connector">> => ID,
<<"direction">> => <<"ingress">>, <<"direction">> => <<"ingress">>,
<<"remote_topic">> => <<"remote_topic/#">>, <<"remote_topic">> => <<"remote_topic/#">>,
@ -57,8 +53,7 @@
<<"retain">> => <<"${retain}">> <<"retain">> => <<"${retain}">>
}). }).
-define(MQTT_BRIDGE_EGRESS(ID), -define(MQTT_BRIDGE_EGRESS(ID), #{
#{
<<"connector">> => ID, <<"connector">> => ID,
<<"direction">> => <<"egress">>, <<"direction">> => <<"egress">>,
<<"local_topic">> => <<"local_topic/#">>, <<"local_topic">> => <<"local_topic/#">>,
@ -68,10 +63,14 @@
<<"retain">> => <<"${retain}">> <<"retain">> => <<"${retain}">>
}). }).
-define(metrics(MATCH, SUCC, FAILED, SPEED, SPEED5M, SPEEDMAX), -define(metrics(MATCH, SUCC, FAILED, SPEED, SPEED5M, SPEEDMAX), #{
#{<<"matched">> := MATCH, <<"success">> := SUCC, <<"matched">> := MATCH,
<<"failed">> := FAILED, <<"rate">> := SPEED, <<"success">> := SUCC,
<<"rate_last5m">> := SPEED5M, <<"rate_max">> := SPEEDMAX}). <<"failed">> := FAILED,
<<"rate">> := SPEED,
<<"rate_last5m">> := SPEED5M,
<<"rate_max">> := SPEEDMAX
}).
inspect(Selected, _Envs, _Args) -> inspect(Selected, _Envs, _Args) ->
persistent_term:put(?MODULE, #{inspect => Selected}). persistent_term:put(?MODULE, #{inspect => Selected}).
@ -83,24 +82,37 @@ groups() ->
[]. [].
suite() -> suite() ->
[{timetrap,{seconds,30}}]. [{timetrap, {seconds, 30}}].
init_per_suite(Config) -> init_per_suite(Config) ->
_ = application:load(emqx_conf), _ = application:load(emqx_conf),
%% some testcases (may from other app) already get emqx_connector started %% some testcases (may from other app) already get emqx_connector started
_ = application:stop(emqx_resource), _ = application:stop(emqx_resource),
_ = application:stop(emqx_connector), _ = application:stop(emqx_connector),
ok = emqx_common_test_helpers:start_apps([emqx_rule_engine, emqx_connector, ok = emqx_common_test_helpers:start_apps(
emqx_bridge, emqx_dashboard], fun set_special_configs/1), [
emqx_rule_engine,
emqx_connector,
emqx_bridge,
emqx_dashboard
],
fun set_special_configs/1
),
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>), ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>),
ok = emqx_common_test_helpers:load_config(emqx_rule_engine_schema, ok = emqx_common_test_helpers:load_config(
<<"rule_engine {rules {}}">>), emqx_rule_engine_schema,
<<"rule_engine {rules {}}">>
),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT), ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT),
Config. Config.
end_per_suite(_Config) -> end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([emqx_rule_engine, emqx_connector, emqx_bridge, emqx_common_test_helpers:stop_apps([
emqx_dashboard]), emqx_rule_engine,
emqx_connector,
emqx_bridge,
emqx_dashboard
]),
ok. ok.
set_special_configs(emqx_dashboard) -> set_special_configs(emqx_dashboard) ->
@ -116,15 +128,24 @@ end_per_testcase(_, _Config) ->
ok. ok.
clear_resources() -> clear_resources() ->
lists:foreach(fun(#{id := Id}) -> lists:foreach(
fun(#{id := Id}) ->
ok = emqx_rule_engine:delete_rule(Id) ok = emqx_rule_engine:delete_rule(Id)
end, emqx_rule_engine:get_rules()), end,
lists:foreach(fun(#{type := Type, name := Name}) -> emqx_rule_engine:get_rules()
),
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_bridge:remove(Type, Name) ok = emqx_bridge:remove(Type, Name)
end, emqx_bridge:list()), end,
lists:foreach(fun(#{<<"type">> := Type, <<"name">> := Name}) -> emqx_bridge:list()
),
lists:foreach(
fun(#{<<"type">> := Type, <<"name">> := Name}) ->
ok = emqx_connector:delete(Type, Name) ok = emqx_connector:delete(Type, Name)
end, emqx_connector:list_raw()). end,
emqx_connector:list_raw()
).
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% Testcases %% Testcases
@ -137,103 +158,144 @@ t_mqtt_crud_apis(_) ->
%% then we add a mqtt connector, using POST %% then we add a mqtt connector, using POST
%% POST /connectors/ will create a connector %% POST /connectors/ will create a connector
User1 = <<"user1">>, User1 = <<"user1">>,
{ok, 400, <<"{\"code\":\"BAD_REQUEST\",\"message\"" {ok, 400, <<
":\"missing some required fields: [name, type]\"}">>} "{\"code\":\"BAD_REQUEST\",\"message\""
= request(post, uri(["connectors"]), ":\"missing some required fields: [name, type]\"}"
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE >>} =
}), request(
{ok, 201, Connector} = request(post, uri(["connectors"]), post,
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE uri(["connectors"]),
, <<"name">> => ?CONNECTR_NAME ?MQTT_CONNECTOR(User1)#{<<"type">> => ?CONNECTR_TYPE}
}), ),
{ok, 201, Connector} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(User1)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
#{ <<"type">> := ?CONNECTR_TYPE #{
, <<"name">> := ?CONNECTR_NAME <<"type">> := ?CONNECTR_TYPE,
, <<"server">> := <<"127.0.0.1:1883">> <<"name">> := ?CONNECTR_NAME,
, <<"username">> := User1 <<"server">> := <<"127.0.0.1:1883">>,
, <<"password">> := <<"">> <<"username">> := User1,
, <<"proto_ver">> := <<"v4">> <<"password">> := <<"">>,
, <<"ssl">> := #{<<"enable">> := false} <<"proto_ver">> := <<"v4">>,
} = jsx:decode(Connector), <<"ssl">> := #{<<"enable">> := false}
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% update the request-path of the connector %% update the request-path of the connector
User2 = <<"user2">>, User2 = <<"user2">>,
{ok, 200, Connector2} = request(put, uri(["connectors", ConnctorID]), {ok, 200, Connector2} = request(
?MQTT_CONNECTOR(User2)), put,
?assertMatch(#{ <<"type">> := ?CONNECTR_TYPE uri(["connectors", ConnctorID]),
, <<"name">> := ?CONNECTR_NAME ?MQTT_CONNECTOR(User2)
, <<"server">> := <<"127.0.0.1:1883">> ),
, <<"username">> := User2 ?assertMatch(
, <<"password">> := <<"">> #{
, <<"proto_ver">> := <<"v4">> <<"type">> := ?CONNECTR_TYPE,
, <<"ssl">> := #{<<"enable">> := false} <<"name">> := ?CONNECTR_NAME,
}, jsx:decode(Connector2)), <<"server">> := <<"127.0.0.1:1883">>,
<<"username">> := User2,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
},
jsx:decode(Connector2)
),
%% list all connectors again, assert Connector2 is in it %% list all connectors again, assert Connector2 is in it
{ok, 200, Connector2Str} = request(get, uri(["connectors"]), []), {ok, 200, Connector2Str} = request(get, uri(["connectors"]), []),
?assertMatch([#{ <<"type">> := ?CONNECTR_TYPE ?assertMatch(
, <<"name">> := ?CONNECTR_NAME [
, <<"server">> := <<"127.0.0.1:1883">> #{
, <<"username">> := User2 <<"type">> := ?CONNECTR_TYPE,
, <<"password">> := <<"">> <<"name">> := ?CONNECTR_NAME,
, <<"proto_ver">> := <<"v4">> <<"server">> := <<"127.0.0.1:1883">>,
, <<"ssl">> := #{<<"enable">> := false} <<"username">> := User2,
}], jsx:decode(Connector2Str)), <<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
}
],
jsx:decode(Connector2Str)
),
%% get the connector by id %% get the connector by id
{ok, 200, Connector3Str} = request(get, uri(["connectors", ConnctorID]), []), {ok, 200, Connector3Str} = request(get, uri(["connectors", ConnctorID]), []),
?assertMatch(#{ <<"type">> := ?CONNECTR_TYPE ?assertMatch(
, <<"name">> := ?CONNECTR_NAME #{
, <<"server">> := <<"127.0.0.1:1883">> <<"type">> := ?CONNECTR_TYPE,
, <<"username">> := User2 <<"name">> := ?CONNECTR_NAME,
, <<"password">> := <<"">> <<"server">> := <<"127.0.0.1:1883">>,
, <<"proto_ver">> := <<"v4">> <<"username">> := User2,
, <<"ssl">> := #{<<"enable">> := false} <<"password">> := <<"">>,
}, jsx:decode(Connector3Str)), <<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
},
jsx:decode(Connector3Str)
),
%% delete the connector %% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []), {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []), {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []),
%% update a deleted connector returns an error %% update a deleted connector returns an error
{ok, 404, ErrMsg2} = request(put, uri(["connectors", ConnctorID]), {ok, 404, ErrMsg2} = request(
?MQTT_CONNECTOR(User2)), put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR(User2)
),
?assertMatch( ?assertMatch(
#{ <<"code">> := _ #{
, <<"message">> := <<"connector not found">> <<"code">> := _,
}, jsx:decode(ErrMsg2)), <<"message">> := <<"connector not found">>
},
jsx:decode(ErrMsg2)
),
ok. ok.
t_mqtt_conn_bridge_ingress(_) -> t_mqtt_conn_bridge_ingress(_) ->
%% then we add a mqtt connector, using POST %% then we add a mqtt connector, using POST
User1 = <<"user1">>, User1 = <<"user1">>,
{ok, 201, Connector} = request(post, uri(["connectors"]), {ok, 201, Connector} = request(
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE post,
, <<"name">> => ?CONNECTR_NAME uri(["connectors"]),
}), ?MQTT_CONNECTOR(User1)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
#{ <<"type">> := ?CONNECTR_TYPE #{
, <<"name">> := ?CONNECTR_NAME <<"type">> := ?CONNECTR_TYPE,
, <<"server">> := <<"127.0.0.1:1883">> <<"name">> := ?CONNECTR_NAME,
, <<"num_of_bridges">> := 0 <<"server">> := <<"127.0.0.1:1883">>,
, <<"username">> := User1 <<"num_of_bridges">> := 0,
, <<"password">> := <<"">> <<"username">> := User1,
, <<"proto_ver">> := <<"v4">> <<"password">> := <<"">>,
, <<"ssl">> := #{<<"enable">> := false} <<"proto_ver">> := <<"v4">>,
} = jsx:decode(Connector), <<"ssl">> := #{<<"enable">> := false}
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST %% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now %% we bind this bridge to the connector created just now
timer:sleep(50), timer:sleep(50),
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_INGRESS(ConnctorID)#{ ?MQTT_BRIDGE_INGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_INGRESS <<"name">> => ?BRIDGE_NAME_INGRESS
}), }
#{ <<"type">> := ?CONNECTR_TYPE ),
, <<"name">> := ?BRIDGE_NAME_INGRESS #{
, <<"connector">> := ConnctorID <<"type">> := ?CONNECTR_TYPE,
} = jsx:decode(Bridge), <<"name">> := ?BRIDGE_NAME_INGRESS,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDIngress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS), BridgeIDIngress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS),
wait_for_resource_ready(BridgeIDIngress, 5), wait_for_resource_ready(BridgeIDIngress, 5),
@ -257,12 +319,12 @@ t_mqtt_conn_bridge_ingress(_) ->
false false
after 100 -> after 100 ->
false false
end), end
),
%% get the connector by id, verify the num_of_bridges now is 1 %% get the connector by id, verify the num_of_bridges now is 1
{ok, 200, Connector1Str} = request(get, uri(["connectors", ConnctorID]), []), {ok, 200, Connector1Str} = request(get, uri(["connectors", ConnctorID]), []),
?assertMatch(#{ <<"num_of_bridges">> := 1 ?assertMatch(#{<<"num_of_bridges">> := 1}, jsx:decode(Connector1Str)),
}, jsx:decode(Connector1Str)),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
@ -276,30 +338,39 @@ t_mqtt_conn_bridge_ingress(_) ->
t_mqtt_conn_bridge_egress(_) -> t_mqtt_conn_bridge_egress(_) ->
%% then we add a mqtt connector, using POST %% then we add a mqtt connector, using POST
User1 = <<"user1">>, User1 = <<"user1">>,
{ok, 201, Connector} = request(post, uri(["connectors"]), {ok, 201, Connector} = request(
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE post,
, <<"name">> => ?CONNECTR_NAME uri(["connectors"]),
}), ?MQTT_CONNECTOR(User1)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
%ct:pal("---connector: ~p", [Connector]), %ct:pal("---connector: ~p", [Connector]),
#{ <<"server">> := <<"127.0.0.1:1883">> #{
, <<"username">> := User1 <<"server">> := <<"127.0.0.1:1883">>,
, <<"password">> := <<"">> <<"username">> := User1,
, <<"proto_ver">> := <<"v4">> <<"password">> := <<"">>,
, <<"ssl">> := #{<<"enable">> := false} <<"proto_ver">> := <<"v4">>,
} = jsx:decode(Connector), <<"ssl">> := #{<<"enable">> := false}
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST %% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now %% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{ ?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}), }
#{ <<"type">> := ?CONNECTR_TYPE ),
, <<"name">> := ?BRIDGE_NAME_EGRESS #{
, <<"connector">> := ConnctorID <<"type">> := ?CONNECTR_TYPE,
} = jsx:decode(Bridge), <<"name">> := ?BRIDGE_NAME_EGRESS,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
wait_for_resource_ready(BridgeIDEgress, 5), wait_for_resource_ready(BridgeIDEgress, 5),
@ -324,14 +395,19 @@ t_mqtt_conn_bridge_egress(_) ->
false false
after 100 -> after 100 ->
false false
end), end
),
%% verify the metrics of the bridge %% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(#{ <<"metrics">> := ?metrics(1, 1, 0, _, _, _) ?assertMatch(
, <<"node_metrics">> := #{
[#{<<"node">> := _, <<"metrics">> := ?metrics(1, 1, 0, _, _, _)}] <<"metrics">> := ?metrics(1, 1, 0, _, _, _),
}, jsx:decode(BridgeStr)), <<"node_metrics">> :=
[#{<<"node">> := _, <<"metrics">> := ?metrics(1, 1, 0, _, _, _)}]
},
jsx:decode(BridgeStr)
),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
@ -347,38 +423,50 @@ t_mqtt_conn_bridge_egress(_) ->
%% - cannot delete a connector that is used by at least one bridge %% - cannot delete a connector that is used by at least one bridge
t_mqtt_conn_update(_) -> t_mqtt_conn_update(_) ->
%% then we add a mqtt connector, using POST %% then we add a mqtt connector, using POST
{ok, 201, Connector} = request(post, uri(["connectors"]), {ok, 201, Connector} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>) post,
#{ <<"type">> => ?CONNECTR_TYPE uri(["connectors"]),
, <<"name">> => ?CONNECTR_NAME ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
}), <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
%ct:pal("---connector: ~p", [Connector]), %ct:pal("---connector: ~p", [Connector]),
#{ <<"server">> := <<"127.0.0.1:1883">> #{<<"server">> := <<"127.0.0.1:1883">>} = jsx:decode(Connector),
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST %% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now %% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{ ?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}), }
#{ <<"type">> := ?CONNECTR_TYPE ),
, <<"name">> := ?BRIDGE_NAME_EGRESS #{
, <<"connector">> := ConnctorID <<"type">> := ?CONNECTR_TYPE,
} = jsx:decode(Bridge), <<"name">> := ?BRIDGE_NAME_EGRESS,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
wait_for_resource_ready(BridgeIDEgress, 5), wait_for_resource_ready(BridgeIDEgress, 5),
%% Then we try to update 'server' of the connector, to an unavailable IP address %% Then we try to update 'server' of the connector, to an unavailable IP address
%% The update OK, we recreate the resource even if the resource is current connected, %% The update OK, we recreate the resource even if the resource is current connected,
%% and the target resource we're going to update is unavailable. %% and the target resource we're going to update is unavailable.
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]), {ok, 200, _} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)), put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)
),
%% we fix the 'server' parameter to a normal one, it should work %% we fix the 'server' parameter to a normal one, it should work
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]), {ok, 200, _} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1 : 1883">>)), put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1 : 1883">>)
),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
@ -390,40 +478,51 @@ t_mqtt_conn_update(_) ->
t_mqtt_conn_update2(_) -> t_mqtt_conn_update2(_) ->
%% then we add a mqtt connector, using POST %% then we add a mqtt connector, using POST
%% but this connector is point to a unreachable server "2603" %% but this connector is point to a unreachable server "2603"
{ok, 201, Connector} = request(post, uri(["connectors"]), {ok, 201, Connector} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>) post,
#{ <<"type">> => ?CONNECTR_TYPE uri(["connectors"]),
, <<"name">> => ?CONNECTR_NAME ?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)#{
}), <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
#{ <<"server">> := <<"127.0.0.1:2603">> #{<<"server">> := <<"127.0.0.1:2603">>} = jsx:decode(Connector),
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST %% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now %% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{ ?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}), }
#{ <<"type">> := ?CONNECTR_TYPE ),
, <<"name">> := ?BRIDGE_NAME_EGRESS #{
, <<"status">> := <<"disconnected">> <<"type">> := ?CONNECTR_TYPE,
, <<"connector">> := ConnctorID <<"name">> := ?BRIDGE_NAME_EGRESS,
} = jsx:decode(Bridge), <<"status">> := <<"disconnected">>,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
%% We try to fix the 'server' parameter, to another unavailable server.. %% We try to fix the 'server' parameter, to another unavailable server..
%% The update should success: we don't check the connectivity of the new config %% The update should success: we don't check the connectivity of the new config
%% if the resource is now disconnected. %% if the resource is now disconnected.
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]), {ok, 200, _} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1:2604">>)), put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2604">>)
),
%% we fix the 'server' parameter to a normal one, it should work %% we fix the 'server' parameter to a normal one, it should work
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]), {ok, 200, _} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)), put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)
),
wait_for_resource_ready(BridgeIDEgress, 5), wait_for_resource_ready(BridgeIDEgress, 5),
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(#{ <<"status">> := <<"connected">> ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(BridgeStr)),
}, jsx:decode(BridgeStr)),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
@ -434,21 +533,26 @@ t_mqtt_conn_update2(_) ->
t_mqtt_conn_update3(_) -> t_mqtt_conn_update3(_) ->
%% we add a mqtt connector, using POST %% we add a mqtt connector, using POST
{ok, 201, _} = request(post, uri(["connectors"]), {ok, 201, _} = request(
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>) post,
#{ <<"type">> => ?CONNECTR_TYPE uri(["connectors"]),
, <<"name">> => ?CONNECTR_NAME ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
}), <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST %% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now %% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{ ?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}), }
#{ <<"connector">> := ConnctorID ),
} = jsx:decode(Bridge), #{<<"connector">> := ConnctorID} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
wait_for_resource_ready(BridgeIDEgress, 5), wait_for_resource_ready(BridgeIDEgress, 5),
@ -462,37 +566,54 @@ t_mqtt_conn_update3(_) ->
t_mqtt_conn_testing(_) -> t_mqtt_conn_testing(_) ->
%% APIs for testing the connectivity %% APIs for testing the connectivity
%% then we add a mqtt connector, using POST %% then we add a mqtt connector, using POST
{ok, 204, <<>>} = request(post, uri(["connectors_test"]), {ok, 204, <<>>} = request(
post,
uri(["connectors_test"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{ ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}), }
{ok, 400, _} = request(post, uri(["connectors_test"]), ),
{ok, 400, _} = request(
post,
uri(["connectors_test"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2883">>)#{ ?MQTT_CONNECTOR2(<<"127.0.0.1:2883">>)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}). }
).
t_ingress_mqtt_bridge_with_rules(_) -> t_ingress_mqtt_bridge_with_rules(_) ->
{ok, 201, _} = request(post, uri(["connectors"]), {ok, 201, _} = request(
?MQTT_CONNECTOR(<<"user1">>)#{ <<"type">> => ?CONNECTR_TYPE post,
, <<"name">> => ?CONNECTR_NAME uri(["connectors"]),
}), ?MQTT_CONNECTOR(<<"user1">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
{ok, 201, _} = request(post, uri(["bridges"]), {ok, 201, _} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_INGRESS(ConnctorID)#{ ?MQTT_BRIDGE_INGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_INGRESS <<"name">> => ?BRIDGE_NAME_INGRESS
}), }
),
BridgeIDIngress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS), BridgeIDIngress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS),
{ok, 201, Rule} = request(post, uri(["rules"]), {ok, 201, Rule} = request(
#{<<"name">> => <<"A rule get messages from a source mqtt bridge">>, post,
<<"enable">> => true, uri(["rules"]),
<<"outputs">> => [#{<<"function">> => "emqx_connector_api_SUITE:inspect"}], #{
<<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">> <<"name">> => <<"A rule get messages from a source mqtt bridge">>,
}), <<"enable">> => true,
<<"outputs">> => [#{<<"function">> => "emqx_connector_api_SUITE:inspect"}],
<<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule), #{<<"id">> := RuleId} = jsx:decode(Rule),
%% we now test if the bridge works as expected %% we now test if the bridge works as expected
@ -517,63 +638,81 @@ t_ingress_mqtt_bridge_with_rules(_) ->
false false
after 100 -> after 100 ->
false false
end), end
),
%% and also the rule should be matched, with matched + 1: %% and also the rule should be matched, with matched + 1:
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
#{ <<"id">> := RuleId #{
, <<"metrics">> := #{ <<"id">> := RuleId,
<<"sql.matched">> := 1, <<"metrics">> := #{
<<"sql.passed">> := 1, <<"sql.matched">> := 1,
<<"sql.failed">> := 0, <<"sql.passed">> := 1,
<<"sql.failed.exception">> := 0, <<"sql.failed">> := 0,
<<"sql.failed.no_result">> := 0, <<"sql.failed.exception">> := 0,
<<"sql.matched.rate">> := _, <<"sql.failed.no_result">> := 0,
<<"sql.matched.rate.max">> := _, <<"sql.matched.rate">> := _,
<<"sql.matched.rate.last5m">> := _, <<"sql.matched.rate.max">> := _,
<<"outputs.total">> := 1, <<"sql.matched.rate.last5m">> := _,
<<"outputs.success">> := 1, <<"outputs.total">> := 1,
<<"outputs.failed">> := 0, <<"outputs.success">> := 1,
<<"outputs.failed.out_of_service">> := 0, <<"outputs.failed">> := 0,
<<"outputs.failed.unknown">> := 0 <<"outputs.failed.out_of_service">> := 0,
} <<"outputs.failed.unknown">> := 0
} = jsx:decode(Rule1), }
} = jsx:decode(Rule1),
%% we also check if the outputs of the rule is triggered %% we also check if the outputs of the rule is triggered
?assertMatch(#{inspect := #{ ?assertMatch(
event := <<"$bridges/mqtt", _/binary>>, #{
id := MsgId, inspect := #{
payload := Payload, event := <<"$bridges/mqtt", _/binary>>,
topic := RemoteTopic, id := MsgId,
qos := 0, payload := Payload,
dup := false, topic := RemoteTopic,
retain := false, qos := 0,
pub_props := #{}, dup := false,
timestamp := _ retain := false,
}} when is_binary(MsgId), persistent_term:get(?MODULE)), pub_props := #{},
timestamp := _
}
} when is_binary(MsgId),
persistent_term:get(?MODULE)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []). {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []).
t_egress_mqtt_bridge_with_rules(_) -> t_egress_mqtt_bridge_with_rules(_) ->
{ok, 201, _} = request(post, uri(["connectors"]), {ok, 201, _} = request(
?MQTT_CONNECTOR(<<"user1">>)#{ <<"type">> => ?CONNECTR_TYPE post,
, <<"name">> => ?CONNECTR_NAME uri(["connectors"]),
}), ?MQTT_CONNECTOR(<<"user1">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
{ok, 201, Bridge} = request(post, uri(["bridges"]), {ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{ ?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE, <<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS <<"name">> => ?BRIDGE_NAME_EGRESS
}), }
#{ <<"type">> := ?CONNECTR_TYPE, <<"name">> := ?BRIDGE_NAME_EGRESS } = jsx:decode(Bridge), ),
#{<<"type">> := ?CONNECTR_TYPE, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
{ok, 201, Rule} = request(post, uri(["rules"]), {ok, 201, Rule} = request(
#{<<"name">> => <<"A rule send messages to a sink mqtt bridge">>, post,
<<"enable">> => true, uri(["rules"]),
<<"outputs">> => [BridgeIDEgress], #{
<<"sql">> => <<"SELECT * from \"t/1\"">> <<"name">> => <<"A rule send messages to a sink mqtt bridge">>,
}), <<"enable">> => true,
<<"outputs">> => [BridgeIDEgress],
<<"sql">> => <<"SELECT * from \"t/1\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule), #{<<"id">> := RuleId} = jsx:decode(Rule),
%% we now test if the bridge works as expected %% we now test if the bridge works as expected
@ -597,7 +736,8 @@ t_egress_mqtt_bridge_with_rules(_) ->
false false
after 100 -> after 100 ->
false false
end), end
),
emqx:unsubscribe(RemoteTopic), emqx:unsubscribe(RemoteTopic),
%% PUBLISH a message to the rule. %% PUBLISH a message to the rule.
@ -609,23 +749,24 @@ t_egress_mqtt_bridge_with_rules(_) ->
wait_for_resource_ready(BridgeIDEgress, 5), wait_for_resource_ready(BridgeIDEgress, 5),
emqx:publish(emqx_message:make(RuleTopic, Payload2)), emqx:publish(emqx_message:make(RuleTopic, Payload2)),
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
#{ <<"id">> := RuleId #{
, <<"metrics">> := #{ <<"id">> := RuleId,
<<"sql.matched">> := 1, <<"metrics">> := #{
<<"sql.passed">> := 1, <<"sql.matched">> := 1,
<<"sql.failed">> := 0, <<"sql.passed">> := 1,
<<"sql.failed.exception">> := 0, <<"sql.failed">> := 0,
<<"sql.failed.no_result">> := 0, <<"sql.failed.exception">> := 0,
<<"sql.matched.rate">> := _, <<"sql.failed.no_result">> := 0,
<<"sql.matched.rate.max">> := _, <<"sql.matched.rate">> := _,
<<"sql.matched.rate.last5m">> := _, <<"sql.matched.rate.max">> := _,
<<"outputs.total">> := 1, <<"sql.matched.rate.last5m">> := _,
<<"outputs.success">> := 1, <<"outputs.total">> := 1,
<<"outputs.failed">> := 0, <<"outputs.success">> := 1,
<<"outputs.failed.out_of_service">> := 0, <<"outputs.failed">> := 0,
<<"outputs.failed.unknown">> := 0 <<"outputs.failed.out_of_service">> := 0,
} <<"outputs.failed.unknown">> := 0
} = jsx:decode(Rule1), }
} = jsx:decode(Rule1),
%% we should receive a message on the "remote" broker, with specified topic %% we should receive a message on the "remote" broker, with specified topic
?assert( ?assert(
receive receive
@ -637,14 +778,19 @@ t_egress_mqtt_bridge_with_rules(_) ->
false false
after 100 -> after 100 ->
false false
end), end
),
%% verify the metrics of the bridge %% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(#{ <<"metrics">> := ?metrics(2, 2, 0, _, _, _) ?assertMatch(
, <<"node_metrics">> := #{
[#{<<"node">> := _, <<"metrics">> := ?metrics(2, 2, 0, _, _, _)}] <<"metrics">> := ?metrics(2, 2, 0, _, _, _),
}, jsx:decode(BridgeStr)), <<"node_metrics">> :=
[#{<<"node">> := _, <<"metrics">> := ?metrics(2, 2, 0, _, _, _)}]
},
jsx:decode(BridgeStr)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
@ -658,8 +804,9 @@ wait_for_resource_ready(InstId, 0) ->
ct:fail(wait_resource_timeout); ct:fail(wait_resource_timeout);
wait_for_resource_ready(InstId, Retry) -> wait_for_resource_ready(InstId, Retry) ->
case emqx_bridge:lookup(InstId) of case emqx_bridge:lookup(InstId) of
{ok, #{resource_data := #{status := connected}}} -> ok; {ok, #{resource_data := #{status := connected}}} ->
ok;
_ -> _ ->
timer:sleep(100), timer:sleep(100),
wait_for_resource_ready(InstId, Retry-1) wait_for_resource_ready(InstId, Retry - 1)
end. end.

View File

@ -65,20 +65,24 @@ t_lifecycle(_Config) ->
perform_lifecycle_check(PoolName, InitialConfig) -> perform_lifecycle_check(PoolName, InitialConfig) ->
{ok, #{config := CheckedConfig}} = {ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig), emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig),
{ok, #{state := #{poolname := ReturnedPoolName} = State, {ok, #{
status := InitialStatus}} state := #{poolname := ReturnedPoolName} = State,
= emqx_resource:create_local( status := InitialStatus
PoolName, }} =
?CONNECTOR_RESOURCE_GROUP, emqx_resource:create_local(
?MONGO_RESOURCE_MOD, PoolName,
CheckedConfig, ?CONNECTOR_RESOURCE_GROUP,
#{} ?MONGO_RESOURCE_MOD,
), CheckedConfig,
#{}
),
?assertEqual(InitialStatus, connected), ?assertEqual(InitialStatus, connected),
% Instance should match the state and status of the just started resource % Instance should match the state and status of the just started resource
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := InitialStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := InitialStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
% % Perform query as further check that the resource is working as expected % % Perform query as further check that the resource is working as expected
?assertMatch([], emqx_resource:query(PoolName, test_query_find())), ?assertMatch([], emqx_resource:query(PoolName, test_query_find())),
@ -86,11 +90,13 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(PoolName)),
% Resource will be listed still, but state will be changed and healthcheck will fail % Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists. % as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := StoppedStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := StoppedStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(StoppedStatus, disconnected), ?assertEqual(StoppedStatus, disconnected),
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)), ?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
% Can call stop/1 again on an already stopped instance % Can call stop/1 again on an already stopped instance
@ -99,8 +105,8 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
?assertEqual(ok, emqx_resource:restart(PoolName)), ?assertEqual(ok, emqx_resource:restart(PoolName)),
% async restart, need to wait resource % async restart, need to wait resource
timer:sleep(500), timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
= emqx_resource:get_instance(PoolName), emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
?assertMatch([], emqx_resource:query(PoolName, test_query_find())), ?assertMatch([], emqx_resource:query(PoolName, test_query_find())),
?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())), ?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())),
@ -115,12 +121,19 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
% %%------------------------------------------------------------------------------ % %%------------------------------------------------------------------------------
mongo_config() -> mongo_config() ->
RawConfig = list_to_binary(io_lib:format(""" RawConfig = list_to_binary(
mongo_type = single io_lib:format(
database = mqtt ""
pool_size = 8 "\n"
server = \"~s:~b\" " mongo_type = single\n"
""", [?MONGO_HOST, ?MONGO_DEFAULT_PORT])), " database = mqtt\n"
" pool_size = 8\n"
" server = \"~s:~b\"\n"
" "
"",
[?MONGO_HOST, ?MONGO_DEFAULT_PORT]
)
),
{ok, Config} = hocon:binary(RawConfig), {ok, Config} = hocon:binary(RawConfig),
#{<<"config">> => Config}. #{<<"config">> => Config}.

View File

@ -22,23 +22,36 @@
send_and_ack_test() -> send_and_ack_test() ->
%% delegate from gen_rpc to rpc for unit test %% delegate from gen_rpc to rpc for unit test
meck:new(emqtt, [passthrough, no_history]), meck:new(emqtt, [passthrough, no_history]),
meck:expect(emqtt, start_link, 1, meck:expect(
fun(_) -> emqtt,
{ok, spawn_link(fun() -> ok end)} start_link,
end), 1,
fun(_) ->
{ok, spawn_link(fun() -> ok end)}
end
),
meck:expect(emqtt, connect, 1, {ok, dummy}), meck:expect(emqtt, connect, 1, {ok, dummy}),
meck:expect(emqtt, stop, 1, meck:expect(
fun(Pid) -> Pid ! stop end), emqtt,
meck:expect(emqtt, publish, 2, stop,
fun(Client, Msg) -> 1,
Client ! {publish, Msg}, fun(Pid) -> Pid ! stop end
{ok, Msg} %% as packet id ),
end), meck:expect(
emqtt,
publish,
2,
fun(Client, Msg) ->
Client ! {publish, Msg},
%% as packet id
{ok, Msg}
end
),
try try
Max = 1, Max = 1,
Batch = lists:seq(1, Max), Batch = lists:seq(1, Max),
{ok, Conn} = emqx_connector_mqtt_mod:start(#{server => {{127,0,0,1}, 1883}}), {ok, Conn} = emqx_connector_mqtt_mod:start(#{server => {{127, 0, 0, 1}, 1883}}),
% %% return last packet id as batch reference % %% return last packet id as batch reference
{ok, _AckRef} = emqx_connector_mqtt_mod:send(Conn, Batch), {ok, _AckRef} = emqx_connector_mqtt_mod:send(Conn, Batch),
ok = emqx_connector_mqtt_mod:stop(Conn) ok = emqx_connector_mqtt_mod:stop(Conn)

View File

@ -23,13 +23,13 @@
-define(BRIDGE_NAME, test). -define(BRIDGE_NAME, test).
-define(BRIDGE_REG_NAME, emqx_connector_mqtt_worker_test). -define(BRIDGE_REG_NAME, emqx_connector_mqtt_worker_test).
-define(WAIT(PATTERN, TIMEOUT), -define(WAIT(PATTERN, TIMEOUT),
receive receive
PATTERN -> PATTERN ->
ok ok
after after TIMEOUT ->
TIMEOUT -> error(timeout)
error(timeout) end
end). ).
-export([start/1, send/2, stop/1]). -export([start/1, send/2, stop/1]).
@ -125,7 +125,7 @@ manual_start_stop_test() ->
Ref = make_ref(), Ref = make_ref(),
TestPid = self(), TestPid = self(),
BridgeName = manual_start_stop, BridgeName = manual_start_stop,
Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}), Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}),
Config = Config0#{start_type := manual}, Config = Config0#{start_type := manual},
{ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}), {ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}),
%% call ensure_started again should yield the same result %% call ensure_started again should yield the same result

View File

@ -64,9 +64,11 @@ t_lifecycle(_Config) ->
perform_lifecycle_check(PoolName, InitialConfig) -> perform_lifecycle_check(PoolName, InitialConfig) ->
{ok, #{config := CheckedConfig}} = {ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig), emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig),
{ok, #{state := #{poolname := ReturnedPoolName} = State, {ok, #{
status := InitialStatus}} = emqx_resource:create_local( state := #{poolname := ReturnedPoolName} = State,
status := InitialStatus
}} = emqx_resource:create_local(
PoolName, PoolName,
?CONNECTOR_RESOURCE_GROUP, ?CONNECTOR_RESOURCE_GROUP,
?MYSQL_RESOURCE_MOD, ?MYSQL_RESOURCE_MOD,
@ -75,23 +77,32 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
), ),
?assertEqual(InitialStatus, connected), ?assertEqual(InitialStatus, connected),
% Instance should match the state and status of the just started resource % Instance should match the state and status of the just started resource
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := InitialStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := InitialStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
% % Perform query as further check that the resource is working as expected % % Perform query as further check that the resource is working as expected
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, ?assertMatch(
test_query_with_params_and_timeout())), {ok, _, [[1]]},
emqx_resource:query(
PoolName,
test_query_with_params_and_timeout()
)
),
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(PoolName)),
% Resource will be listed still, but state will be changed and healthcheck will fail % Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists. % as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := StoppedStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := StoppedStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(StoppedStatus, disconnected), ?assertEqual(StoppedStatus, disconnected),
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)), ?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
% Can call stop/1 again on an already stopped instance % Can call stop/1 again on an already stopped instance
@ -105,8 +116,13 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, ?assertMatch(
test_query_with_params_and_timeout())), {ok, _, [[1]]},
emqx_resource:query(
PoolName,
test_query_with_params_and_timeout()
)
),
% Stop and remove the resource in one go. % Stop and remove the resource in one go.
?assertEqual(ok, emqx_resource:remove_local(PoolName)), ?assertEqual(ok, emqx_resource:remove_local(PoolName)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
@ -118,14 +134,21 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
% %%------------------------------------------------------------------------------ % %%------------------------------------------------------------------------------
mysql_config() -> mysql_config() ->
RawConfig = list_to_binary(io_lib:format(""" RawConfig = list_to_binary(
auto_reconnect = true io_lib:format(
database = mqtt ""
username= root "\n"
password = public " auto_reconnect = true\n"
pool_size = 8 " database = mqtt\n"
server = \"~s:~b\" " username= root\n"
""", [?MYSQL_HOST, ?MYSQL_DEFAULT_PORT])), " password = public\n"
" pool_size = 8\n"
" server = \"~s:~b\"\n"
" "
"",
[?MYSQL_HOST, ?MYSQL_DEFAULT_PORT]
)
),
{ok, Config} = hocon:binary(RawConfig), {ok, Config} = hocon:binary(RawConfig),
#{<<"config">> => Config}. #{<<"config">> => Config}.

View File

@ -65,20 +65,24 @@ t_lifecycle(_Config) ->
perform_lifecycle_check(PoolName, InitialConfig) -> perform_lifecycle_check(PoolName, InitialConfig) ->
{ok, #{config := CheckedConfig}} = {ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig), emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig),
{ok, #{state := #{poolname := ReturnedPoolName} = State, {ok, #{
status := InitialStatus}} state := #{poolname := ReturnedPoolName} = State,
= emqx_resource:create_local( status := InitialStatus
PoolName, }} =
?CONNECTOR_RESOURCE_GROUP, emqx_resource:create_local(
?PGSQL_RESOURCE_MOD, PoolName,
CheckedConfig, ?CONNECTOR_RESOURCE_GROUP,
#{} ?PGSQL_RESOURCE_MOD,
), CheckedConfig,
#{}
),
?assertEqual(InitialStatus, connected), ?assertEqual(InitialStatus, connected),
% Instance should match the state and status of the just started resource % Instance should match the state and status of the just started resource
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := InitialStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := InitialStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
% % Perform query as further check that the resource is working as expected % % Perform query as further check that the resource is working as expected
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
@ -86,11 +90,13 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(PoolName)),
% Resource will be listed still, but state will be changed and healthcheck will fail % Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists. % as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := StoppedStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := StoppedStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(StoppedStatus, disconnected), ?assertEqual(StoppedStatus, disconnected),
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)), ?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
% Can call stop/1 again on an already stopped instance % Can call stop/1 again on an already stopped instance
@ -99,8 +105,8 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
?assertEqual(ok, emqx_resource:restart(PoolName)), ?assertEqual(ok, emqx_resource:restart(PoolName)),
% async restart, need to wait resource % async restart, need to wait resource
timer:sleep(500), timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
= emqx_resource:get_instance(PoolName), emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())),
@ -115,14 +121,21 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
% %%------------------------------------------------------------------------------ % %%------------------------------------------------------------------------------
pgsql_config() -> pgsql_config() ->
RawConfig = list_to_binary(io_lib:format(""" RawConfig = list_to_binary(
auto_reconnect = true io_lib:format(
database = mqtt ""
username= root "\n"
password = public " auto_reconnect = true\n"
pool_size = 8 " database = mqtt\n"
server = \"~s:~b\" " username= root\n"
""", [?PGSQL_HOST, ?PGSQL_DEFAULT_PORT])), " password = public\n"
" pool_size = 8\n"
" server = \"~s:~b\"\n"
" "
"",
[?PGSQL_HOST, ?PGSQL_DEFAULT_PORT]
)
),
{ok, Config} = hocon:binary(RawConfig), {ok, Config} = hocon:binary(RawConfig),
#{<<"config">> => Config}. #{<<"config">> => Config}.

View File

@ -80,8 +80,10 @@ t_sentinel_lifecycle(_Config) ->
perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
{ok, #{config := CheckedConfig}} = {ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig), emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig),
{ok, #{state := #{poolname := ReturnedPoolName} = State, {ok, #{
status := InitialStatus}} = emqx_resource:create_local( state := #{poolname := ReturnedPoolName} = State,
status := InitialStatus
}} = emqx_resource:create_local(
PoolName, PoolName,
?CONNECTOR_RESOURCE_GROUP, ?CONNECTOR_RESOURCE_GROUP,
?REDIS_RESOURCE_MOD, ?REDIS_RESOURCE_MOD,
@ -90,20 +92,24 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
), ),
?assertEqual(InitialStatus, connected), ?assertEqual(InitialStatus, connected),
% Instance should match the state and status of the just started resource % Instance should match the state and status of the just started resource
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := InitialStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := InitialStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
% Perform query as further check that the resource is working as expected % Perform query as further check that the resource is working as expected
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(PoolName)),
% Resource will be listed still, but state will be changed and healthcheck will fail % Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists. % as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State, {ok, ?CONNECTOR_RESOURCE_GROUP, #{
status := StoppedStatus}} state := State,
= emqx_resource:get_instance(PoolName), status := StoppedStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual(StoppedStatus, disconnected), ?assertEqual(StoppedStatus, disconnected),
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)), ?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
% Can call stop/1 again on an already stopped instance % Can call stop/1 again on an already stopped instance
@ -112,8 +118,8 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
?assertEqual(ok, emqx_resource:restart(PoolName)), ?assertEqual(ok, emqx_resource:restart(PoolName)),
% async restart, need to wait resource % async restart, need to wait resource
timer:sleep(500), timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
= emqx_resource:get_instance(PoolName), emqx_resource:get_instance(PoolName),
?assertEqual(ok, emqx_resource:health_check(PoolName)), ?assertEqual(ok, emqx_resource:health_check(PoolName)),
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
% Stop and remove the resource in one go. % Stop and remove the resource in one go.
@ -136,14 +142,21 @@ redis_config_sentinel() ->
redis_config_base("sentinel", "servers"). redis_config_base("sentinel", "servers").
redis_config_base(Type, ServerKey) -> redis_config_base(Type, ServerKey) ->
RawConfig = list_to_binary(io_lib:format(""" RawConfig = list_to_binary(
auto_reconnect = true io_lib:format(
database = 1 ""
pool_size = 8 "\n"
redis_type = ~s " auto_reconnect = true\n"
password = public " database = 1\n"
~s = \"~s:~b\" " pool_size = 8\n"
""", [Type, ServerKey, ?REDIS_HOST, ?REDIS_PORT])), " redis_type = ~s\n"
" password = public\n"
" ~s = \"~s:~b\"\n"
" "
"",
[Type, ServerKey, ?REDIS_HOST, ?REDIS_PORT]
)
),
{ok, Config} = hocon:binary(RawConfig), {ok, Config} = hocon:binary(RawConfig),
#{<<"config">> => Config}. #{<<"config">> => Config}.

View File

@ -19,10 +19,11 @@
-include_lib("common_test/include/ct.hrl"). -include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-export([ check_fields/1 -export([
, start_apps/1 check_fields/1,
, stop_apps/1 start_apps/1,
]). stop_apps/1
]).
check_fields({FieldName, FieldValue}) -> check_fields({FieldName, FieldValue}) ->
?assert(is_atom(FieldName)), ?assert(is_atom(FieldName)),
@ -30,10 +31,10 @@ check_fields({FieldName, FieldValue}) ->
is_map(FieldValue) -> is_map(FieldValue) ->
ct:pal("~p~n", [{FieldName, FieldValue}]), ct:pal("~p~n", [{FieldName, FieldValue}]),
?assert( ?assert(
(maps:is_key(type, FieldValue) (maps:is_key(type, FieldValue) andalso
andalso maps:is_key(default, FieldValue)) maps:is_key(default, FieldValue)) orelse
orelse ((maps:is_key(required, FieldValue) (maps:is_key(required, FieldValue) andalso
andalso maps:get(required, FieldValue) =:= false)) maps:get(required, FieldValue) =:= false)
); );
true -> true ->
?assert(is_function(FieldValue)) ?assert(is_function(FieldValue))

View File

@ -74,8 +74,8 @@ Note: `sample_interval` should be a divisor of 60."""
} }
inet6 { inet6 {
desc { desc {
en: "Enable IPv6 support." en: "Enable IPv6 support, default is false, which means IPv4 only."
zh: "启用IPv6" zh: "启用IPv6 如果机器不支持IPv6请关闭此选项否则会导致仪表盘无法使用。"
} }
label { label {
en: "IPv6" en: "IPv6"
@ -85,7 +85,7 @@ Note: `sample_interval` should be a divisor of 60."""
ipv6_v6only { ipv6_v6only {
desc { desc {
en: "Disable IPv4-to-IPv6 mapping for the listener." en: "Disable IPv4-to-IPv6 mapping for the listener."
zh: "禁用IPv4-to-IPv6映射" zh: "当开启 inet6 功能的同时禁用 IPv4-to-IPv6 映射。该配置仅在 inet6 功能开启时有效。"
} }
label { label {
en: "IPv6 only" en: "IPv6 only"
@ -132,6 +132,16 @@ Note: `sample_interval` should be a divisor of 60."""
zh: "HTTPS" zh: "HTTPS"
} }
} }
listener_enable {
desc {
en: "Ignore or enable this listener"
zh: "忽略或启用该监听器配置"
}
label {
en: "Enable"
zh: "启用"
}
}
bind { bind {
desc { desc {
en: "Port without IP(18083) or port with specified IP(127.0.0.1:18083)." en: "Port without IP(18083) or port with specified IP(127.0.0.1:18083)."

View File

@ -153,10 +153,13 @@ apps() ->
]. ].
listeners(Listeners) -> listeners(Listeners) ->
lists:map( lists:filtermap(
fun({Protocol, Conf}) -> fun({Protocol, Conf}) ->
{Conf1, Bind} = ip_port(Conf), maps:get(enable, Conf) andalso
{listener_name(Protocol, Conf1), Protocol, Bind, ranch_opts(Conf1)} begin
{Conf1, Bind} = ip_port(Conf),
{true, {listener_name(Protocol, Conf1), Protocol, Bind, ranch_opts(Conf1)}}
end
end, end,
maps:to_list(Listeners) maps:to_list(Listeners)
). ).
@ -172,34 +175,34 @@ init_i18n() ->
Lang = emqx_conf:get([dashboard, i18n_lang], en), Lang = emqx_conf:get([dashboard, i18n_lang], en),
init_i18n(File, Lang). init_i18n(File, Lang).
ranch_opts(RanchOptions) -> ranch_opts(Options) ->
Keys = [ Keys = [
{ack_timeout, handshake_timeout}, handshake_timeout,
connection_type, connection_type,
max_connections, max_connections,
num_acceptors, num_acceptors,
shutdown, shutdown,
socket socket
], ],
{S, R} = lists:foldl(fun key_take/2, {RanchOptions, #{}}, Keys), RanchOpts = maps:with(Keys, Options),
R#{socket_opts => maps:fold(fun key_only/3, [], S)}. SocketOpts = maps:fold(
fun filter_false/3,
key_take(Key, {All, R}) -> [],
{K, KX} = maps:without([enable, inet6, ipv6_v6only | Keys], Options)
case Key of ),
{K1, K2} -> {K1, K2}; InetOpts =
_ -> {Key, Key} case Options of
#{inet6 := true, ipv6_v6only := true} ->
[inet6, {ipv6_v6only, true}];
#{inet6 := true, ipv6_v6only := false} ->
[inet6];
_ ->
[inet]
end, end,
case maps:get(K, All, undefined) of RanchOpts#{socket_opts => InetOpts ++ SocketOpts}.
undefined ->
{All, R};
V ->
{maps:remove(K, All), R#{KX => V}}
end.
key_only(K, true, S) -> [K | S]; filter_false(_K, false, S) -> S;
key_only(_K, false, S) -> S; filter_false(K, V, S) -> [{K, V} | S].
key_only(K, V, S) -> [{K, V} | S].
listener_name(Protocol, #{port := Port, ip := IP}) -> listener_name(Protocol, #{port := Port, ip := IP}) ->
Name = Name =

View File

@ -63,22 +63,42 @@ remove_handler() ->
ok. ok.
pre_config_update(_Path, UpdateConf0, RawConf) -> pre_config_update(_Path, UpdateConf0, RawConf) ->
UpdateConf = UpdateConf = remove_sensitive_data(UpdateConf0),
case UpdateConf0 of
#{<<"default_password">> := <<"******">>} ->
maps:remove(<<"default_password">>, UpdateConf0);
_ ->
UpdateConf0
end,
NewConf = emqx_map_lib:deep_merge(RawConf, UpdateConf), NewConf = emqx_map_lib:deep_merge(RawConf, UpdateConf),
{ok, NewConf}. {ok, NewConf}.
-define(SENSITIVE_PASSWORD, <<"******">>).
remove_sensitive_data(Conf0) ->
Conf1 =
case Conf0 of
#{<<"default_password">> := ?SENSITIVE_PASSWORD} ->
maps:remove(<<"default_password">>, Conf0);
_ ->
Conf0
end,
case Conf1 of
#{<<"listeners">> := #{<<"https">> := #{<<"password">> := ?SENSITIVE_PASSWORD}}} ->
emqx_map_lib:deep_remove([<<"listeners">>, <<"https">>, <<"password">>], Conf1);
_ ->
Conf1
end.
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
#{listeners := NewListeners} = NewConf, #{listeners := #{http := NewHttp, https := NewHttps}} = NewConf,
#{listeners := OldListeners} = OldConf, #{listeners := #{http := OldHttp, https := OldHttps}} = OldConf,
_ = _ =
case NewListeners =:= OldListeners of case diff_listeners(OldHttp, NewHttp, OldHttps, NewHttps) of
true -> ok; identical -> ok;
false -> erlang:send_after(500, ?MODULE, {update_listeners, OldListeners, NewListeners}) {Stop, Start} -> erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start})
end, end,
ok. ok.
diff_listeners(Http, Http, Https, Https) ->
identical;
diff_listeners(OldHttp, NewHttp, Https, Https) ->
{#{http => OldHttp}, #{http => NewHttp}};
diff_listeners(Http, Http, OldHttps, NewHttps) ->
{#{https => OldHttps}, #{https => NewHttps}};
diff_listeners(OldHttp, NewHttp, OldHttps, NewHttps) ->
{#{http => OldHttp, https => OldHttps}, #{http => NewHttp, https => NewHttps}}.

View File

@ -77,7 +77,32 @@ fields("listeners") ->
]; ];
fields("http") -> fields("http") ->
[ [
{"bind", fun bind/1}, enable(true),
bind(18803)
| common_listener_fields()
];
fields("https") ->
[
enable(false),
bind(18804)
| common_listener_fields() ++
exclude_fields(
["enable", "fail_if_no_peer_cert"],
emqx_schema:server_ssl_opts_schema(#{}, true)
)
].
exclude_fields([], Fields) ->
Fields;
exclude_fields([FieldName | Rest], Fields) ->
%% assert field exists
case lists:keytake(FieldName, 1, Fields) of
{value, _, New} -> exclude_fields(Rest, New);
false -> error({FieldName, Fields})
end.
common_listener_fields() ->
[
{"num_acceptors", {"num_acceptors",
sc( sc(
integer(), integer(),
@ -126,25 +151,40 @@ fields("http") ->
desc => ?DESC(ipv6_v6only) desc => ?DESC(ipv6_v6only)
} }
)} )}
]; ].
fields("https") ->
fields("http") ++
proplists:delete(
"fail_if_no_peer_cert",
emqx_schema:server_ssl_opts_schema(#{}, true)
).
desc("dashboard") -> ?DESC(desc_dashboard); enable(Bool) ->
desc("listeners") -> ?DESC(desc_listeners); {"enable",
desc("http") -> ?DESC(desc_http); sc(
desc("https") -> ?DESC(desc_https); boolean(),
desc(_) -> undefined. #{
default => Bool,
required => true,
desc => ?DESC(listener_enable)
}
)}.
bind(type) -> hoconsc:union([non_neg_integer(), emqx_schema:ip_port()]); bind(Port) ->
bind(default) -> 18083; {"bind",
bind(required) -> true; sc(
bind(desc) -> ?DESC(bind); hoconsc:union([non_neg_integer(), emqx_schema:ip_port()]),
bind(_) -> undefined. #{
default => Port,
required => true,
desc => ?DESC(bind)
}
)}.
desc("dashboard") ->
?DESC(desc_dashboard);
desc("listeners") ->
?DESC(desc_listeners);
desc("http") ->
?DESC(desc_http);
desc("https") ->
?DESC(desc_https);
desc(_) ->
undefined.
default_username(type) -> binary(); default_username(type) -> binary();
default_username(default) -> "admin"; default_username(default) -> "admin";

View File

@ -37,6 +37,7 @@ set_default_config(DefaultUsername) ->
Config = #{ Config = #{
listeners => #{ listeners => #{
http => #{ http => #{
enable => true,
port => 18083 port => 18083
} }
}, },

View File

@ -90,4 +90,12 @@ emqx_gateway_api_authn {
zh: """Client ID 模糊搜索""" zh: """Client ID 模糊搜索"""
} }
} }
is_superuser {
desc {
en: """Is superuser"""
zh: """是否是超级用户"""
}
}
} }

View File

@ -195,7 +195,8 @@ parse_qstring(Qs) ->
<<"page">>, <<"page">>,
<<"limit">>, <<"limit">>,
<<"like_username">>, <<"like_username">>,
<<"like_clientid">> <<"like_clientid">>,
<<"is_superuser">>
], ],
Qs Qs
). ).
@ -397,6 +398,15 @@ params_fuzzy_in_qs() ->
desc => ?DESC(like_clientid), desc => ?DESC(like_clientid),
example => <<"clientid">> example => <<"clientid">>
} }
)},
{is_superuser,
mk(
boolean(),
#{
in => query,
required => false,
desc => ?DESC(is_superuser)
}
)} )}
]. ].

View File

@ -415,6 +415,53 @@ t_listeners_authn_data_mgmt(_) ->
), ),
{204, _} = request(delete, "/gateway/stomp"). {204, _} = request(delete, "/gateway/stomp").
t_authn_fuzzy_search(_) ->
GwConf = #{name => <<"stomp">>},
{201, _} = request(post, "/gateway", GwConf),
{204, _} = request(get, "/gateway/stomp/authentication"),
AuthConf = #{
mechanism => <<"password_based">>,
backend => <<"built_in_database">>,
user_id_type => <<"clientid">>
},
{201, _} = request(post, "/gateway/stomp/authentication", AuthConf),
{200, ConfResp} = request(get, "/gateway/stomp/authentication"),
assert_confs(AuthConf, ConfResp),
Checker = fun({User, Fuzzy}) ->
{200, #{data := [UserRespd]}} = request(
get, "/gateway/stomp/authentication/users", Fuzzy
),
assert_confs(UserRespd, User)
end,
Create = fun(User) ->
{201, _} = request(post, "/gateway/stomp/authentication/users", User)
end,
UserDatas = [
#{
user_id => <<"test">>,
password => <<"123456">>,
is_superuser => false
},
#{
user_id => <<"foo">>,
password => <<"123456">>,
is_superuser => true
}
],
FuzzyDatas = [[{<<"like_username">>, <<"test">>}], [{<<"is_superuser">>, <<"true">>}]],
lists:foreach(Create, UserDatas),
lists:foreach(Checker, lists:zip(UserDatas, FuzzyDatas)),
{204, _} = request(delete, "/gateway/stomp/authentication"),
{204, _} = request(get, "/gateway/stomp/authentication"),
{204, _} = request(delete, "/gateway/stomp").
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Asserts %% Asserts

View File

@ -141,7 +141,8 @@ fields(app) ->
binary(), binary(),
#{example => <<"Note">>, required => false} #{example => <<"Note">>, required => false}
)}, )},
{enable, hoconsc:mk(boolean(), #{desc => "Enable/Disable", required => false})} {enable, hoconsc:mk(boolean(), #{desc => "Enable/Disable", required => false})},
{expired, hoconsc:mk(boolean(), #{desc => "Expired", required => false})}
]; ];
fields(name) -> fields(name) ->
[ [

View File

@ -513,7 +513,10 @@ fields(keepalive) ->
fields(subscribe) -> fields(subscribe) ->
[ [
{topic, hoconsc:mk(binary(), #{desc => <<"Topic">>})}, {topic, hoconsc:mk(binary(), #{desc => <<"Topic">>})},
{qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})} {qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})},
{nl, hoconsc:mk(integer(), #{default => 0, desc => <<"No Local">>})},
{rap, hoconsc:mk(integer(), #{default => 0, desc => <<"Retain as Published">>})},
{rh, hoconsc:mk(integer(), #{default => 0, desc => <<"Retain Handling">>})}
]; ];
fields(unsubscribe) -> fields(unsubscribe) ->
[ [
@ -536,9 +539,8 @@ authz_cache(delete, #{bindings := Bindings}) ->
clean_authz_cache(Bindings). clean_authz_cache(Bindings).
subscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) -> subscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) ->
Topic = maps:get(<<"topic">>, TopicInfo), Opts = emqx_map_lib:unsafe_atom_key_map(TopicInfo),
Qos = maps:get(<<"qos">>, TopicInfo, 0), subscribe(Opts#{clientid => ClientID}).
subscribe(#{clientid => ClientID, topic => Topic, qos => Qos}).
unsubscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) -> unsubscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) ->
Topic = maps:get(<<"topic">>, TopicInfo), Topic = maps:get(<<"topic">>, TopicInfo),
@ -548,11 +550,7 @@ unsubscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) ->
subscribe_batch(post, #{bindings := #{clientid := ClientID}, body := TopicInfos}) -> subscribe_batch(post, #{bindings := #{clientid := ClientID}, body := TopicInfos}) ->
Topics = Topics =
[ [
begin emqx_map_lib:unsafe_atom_key_map(TopicInfo)
Topic = maps:get(<<"topic">>, TopicInfo),
Qos = maps:get(<<"qos">>, TopicInfo, 0),
#{topic => Topic, qos => Qos}
end
|| TopicInfo <- TopicInfos || TopicInfo <- TopicInfos
], ],
subscribe_batch(#{clientid => ClientID, topics => Topics}). subscribe_batch(#{clientid => ClientID, topics => Topics}).
@ -564,12 +562,14 @@ subscriptions(get, #{bindings := #{clientid := ClientID}}) ->
{Node, Subs} -> {Node, Subs} ->
Formatter = Formatter =
fun({Topic, SubOpts}) -> fun({Topic, SubOpts}) ->
#{ maps:merge(
node => Node, #{
clientid => ClientID, node => Node,
topic => Topic, clientid => ClientID,
qos => maps:get(qos, SubOpts) topic => Topic
} },
maps:with([qos, nl, rap, rh], SubOpts)
)
end, end,
{200, lists:map(Formatter, Subs)} {200, lists:map(Formatter, Subs)}
end. end.
@ -659,21 +659,16 @@ clean_authz_cache(#{clientid := ClientID}) ->
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}} {500, #{code => <<"UNKNOW_ERROR">>, message => Message}}
end. end.
subscribe(#{clientid := ClientID, topic := Topic, qos := Qos}) -> subscribe(#{clientid := ClientID, topic := Topic} = Sub) ->
case do_subscribe(ClientID, Topic, Qos) of Opts = maps:with([qos, nl, rap, rh], Sub),
case do_subscribe(ClientID, Topic, Opts) of
{error, channel_not_found} -> {error, channel_not_found} ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENT_ID_NOT_FOUND};
{error, Reason} -> {error, Reason} ->
Message = list_to_binary(io_lib:format("~p", [Reason])), Message = list_to_binary(io_lib:format("~p", [Reason])),
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}}; {500, #{code => <<"UNKNOW_ERROR">>, message => Message}};
{ok, Node} -> {ok, Node} ->
Response = Response = Sub#{node => Node},
#{
clientid => ClientID,
topic => Topic,
qos => Qos,
node => Node
},
{200, Response} {200, Response}
end. end.
@ -686,15 +681,18 @@ unsubscribe(#{clientid := ClientID, topic := Topic}) ->
end. end.
subscribe_batch(#{clientid := ClientID, topics := Topics}) -> subscribe_batch(#{clientid := ClientID, topics := Topics}) ->
ArgList = [[ClientID, Topic, Qos] || #{topic := Topic, qos := Qos} <- Topics], ArgList = [
[ClientID, Topic, maps:with([qos, nl, rap, rh], Sub)]
|| #{topic := Topic} = Sub <- Topics
],
emqx_mgmt_util:batch_operation(?MODULE, do_subscribe, ArgList). emqx_mgmt_util:batch_operation(?MODULE, do_subscribe, ArgList).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% internal function %% internal function
do_subscribe(ClientID, Topic0, Qos) -> do_subscribe(ClientID, Topic0, Options) ->
{Topic, Opts} = emqx_topic:parse(Topic0), {Topic, Opts} = emqx_topic:parse(Topic0, Options),
TopicTable = [{Topic, Opts#{qos => Qos}}], TopicTable = [{Topic, Opts}],
case emqx_mgmt:subscribe(ClientID, TopicTable) of case emqx_mgmt:subscribe(ClientID, TopicTable) of
{error, Reason} -> {error, Reason} ->
{error, Reason}; {error, Reason};

View File

@ -43,7 +43,8 @@ schema("/cluster") ->
responses => #{ responses => #{
200 => [ 200 => [
{name, ?HOCON(string(), #{desc => "Cluster name"})}, {name, ?HOCON(string(), #{desc => "Cluster name"})},
{nodes, ?HOCON(?ARRAY(string()), #{desc => "Node name"})} {nodes, ?HOCON(?ARRAY(string()), #{desc => "Node name"})},
{self, ?HOCON(string(), #{desc => "Self node name"})}
] ]
} }
} }
@ -97,7 +98,8 @@ cluster_info(get, _) ->
ClusterName = application:get_env(ekka, cluster_name, emqxcl), ClusterName = application:get_env(ekka, cluster_name, emqxcl),
Info = #{ Info = #{
name => ClusterName, name => ClusterName,
nodes => mria_mnesia:running_nodes() nodes => mria_mnesia:running_nodes(),
self => node()
}, },
{200, Info}. {200, Info}.

View File

@ -72,7 +72,10 @@ fields(subscription) ->
{node, hoconsc:mk(binary(), #{desc => <<"Access type">>})}, {node, hoconsc:mk(binary(), #{desc => <<"Access type">>})},
{topic, hoconsc:mk(binary(), #{desc => <<"Topic name">>})}, {topic, hoconsc:mk(binary(), #{desc => <<"Topic name">>})},
{clientid, hoconsc:mk(binary(), #{desc => <<"Client identifier">>})}, {clientid, hoconsc:mk(binary(), #{desc => <<"Client identifier">>})},
{qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})} {qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})},
{nl, hoconsc:mk(integer(), #{desc => <<"No Local">>})},
{rap, hoconsc:mk(integer(), #{desc => <<"Retain as Published">>})},
{rh, hoconsc:mk(integer(), #{desc => <<"Retain Handling">>})}
]. ].
parameters() -> parameters() ->
@ -163,22 +166,20 @@ format(Items) when is_list(Items) ->
[format(Item) || Item <- Items]; [format(Item) || Item <- Items];
format({{Subscriber, Topic}, Options}) -> format({{Subscriber, Topic}, Options}) ->
format({Subscriber, Topic, Options}); format({Subscriber, Topic, Options});
format({_Subscriber, Topic, Options = #{share := Group}}) ->
QoS = maps:get(qos, Options),
#{
topic => filename:join([<<"$share">>, Group, Topic]),
clientid => maps:get(subid, Options),
qos => QoS,
node => node()
};
format({_Subscriber, Topic, Options}) -> format({_Subscriber, Topic, Options}) ->
QoS = maps:get(qos, Options), maps:merge(
#{ #{
topic => Topic, topic => get_topic(Topic, Options),
clientid => maps:get(subid, Options), clientid => maps:get(subid, Options),
qos => QoS, node => node()
node => node() },
}. maps:with([qos, nl, rap, rh], Options)
).
get_topic(Topic, #{share := Group}) ->
filename:join([<<"$share">>, Group, Topic]);
get_topic(Topic, _) ->
Topic.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Query Function %% Query Function

View File

@ -129,17 +129,20 @@ ensure_not_undefined(undefined, Old) -> Old;
ensure_not_undefined(New, _Old) -> New. ensure_not_undefined(New, _Old) -> New.
to_map(Apps) when is_list(Apps) -> to_map(Apps) when is_list(Apps) ->
Fields = record_info(fields, ?APP), [to_map(App) || App <- Apps];
lists:map( to_map(#?APP{name = N, api_key = K, enable = E, expired_at = ET, created_at = CT, desc = D}) ->
fun(Trace0 = #?APP{}) -> #{
[_ | Values] = tuple_to_list(Trace0), name => N,
maps:remove(api_secret_hash, maps:from_list(lists:zip(Fields, Values))) api_key => K,
end, enable => E,
Apps expired_at => ET,
); created_at => CT,
to_map(App0) -> desc => D,
[App] = to_map([App0]), expired => is_expired(ET)
App. }.
is_expired(undefined) -> false;
is_expired(ExpiredTime) -> ExpiredTime < erlang:system_time(second).
create_app(Name, Enable, ExpiredAt, Desc) -> create_app(Name, Enable, ExpiredAt, Desc) ->
ApiSecret = generate_api_secret(), ApiSecret = generate_api_secret(),

View File

@ -43,7 +43,9 @@ t_clients(_) ->
AuthHeader = emqx_mgmt_api_test_util:auth_header_(), AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
{ok, C1} = emqtt:start_link(#{username => Username1, clientid => ClientId1}), {ok, C1} = emqtt:start_link(#{
username => Username1, clientid => ClientId1, proto_ver => v5
}),
{ok, _} = emqtt:connect(C1), {ok, _} = emqtt:connect(C1),
{ok, C2} = emqtt:start_link(#{username => Username2, clientid => ClientId2}), {ok, C2} = emqtt:start_link(#{username => Username2, clientid => ClientId2}),
{ok, _} = emqtt:connect(C2), {ok, _} = emqtt:connect(C2),
@ -87,7 +89,7 @@ t_clients(_) ->
?assertEqual("[]", Client1AuthzCache), ?assertEqual("[]", Client1AuthzCache),
%% post /clients/:clientid/subscribe %% post /clients/:clientid/subscribe
SubscribeBody = #{topic => Topic, qos => Qos}, SubscribeBody = #{topic => Topic, qos => Qos, nl => 1, rh => 1},
SubscribePath = emqx_mgmt_api_test_util:api_path([ SubscribePath = emqx_mgmt_api_test_util:api_path([
"clients", "clients",
binary_to_list(ClientId1), binary_to_list(ClientId1),
@ -105,6 +107,32 @@ t_clients(_) ->
?assertEqual(AfterSubTopic, Topic), ?assertEqual(AfterSubTopic, Topic),
?assertEqual(AfterSubQos, Qos), ?assertEqual(AfterSubQos, Qos),
%% get /clients/:clientid/subscriptions
SubscriptionsPath = emqx_mgmt_api_test_util:api_path([
"clients",
binary_to_list(ClientId1),
"subscriptions"
]),
{ok, SubscriptionsRes} = emqx_mgmt_api_test_util:request_api(
get,
SubscriptionsPath,
"",
AuthHeader
),
[SubscriptionsData] = emqx_json:decode(SubscriptionsRes, [return_maps]),
?assertMatch(
#{
<<"clientid">> := ClientId1,
<<"nl">> := 1,
<<"rap">> := 0,
<<"rh">> := 1,
<<"node">> := _,
<<"qos">> := Qos,
<<"topic">> := Topic
},
SubscriptionsData
),
%% post /clients/:clientid/unsubscribe %% post /clients/:clientid/unsubscribe
UnSubscribePath = emqx_mgmt_api_test_util:api_path([ UnSubscribePath = emqx_mgmt_api_test_util:api_path([
"clients", "clients",

View File

@ -72,19 +72,19 @@ t_wss_crud_listeners_by_id(_) ->
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
TcpPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]),
NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]), NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]),
TcpListener = request(get, TcpPath, [], []), OriginListener = request(get, OriginPath, [], []),
%% create with full options %% create with full options
?assertEqual({error, not_found}, is_running(NewListenerId)), ?assertEqual({error, not_found}, is_running(NewListenerId)),
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])), ?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
NewConf = TcpListener#{ NewConf = OriginListener#{
<<"id">> => NewListenerId, <<"id">> => NewListenerId,
<<"bind">> => <<"0.0.0.0:2883">> <<"bind">> => <<"0.0.0.0:2883">>
}, },
Create = request(post, NewPath, [], NewConf), Create = request(post, NewPath, [], NewConf),
?assertEqual(lists:sort(maps:keys(TcpListener)), lists:sort(maps:keys(Create))), ?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))),
Get1 = request(get, NewPath, [], []), Get1 = request(get, NewPath, [], []),
?assertMatch(Create, Get1), ?assertMatch(Create, Get1),
?assert(is_running(NewListenerId)), ?assert(is_running(NewListenerId)),
@ -93,20 +93,42 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
MinPath = emqx_mgmt_api_test_util:api_path(["listeners", MinListenerId]), MinPath = emqx_mgmt_api_test_util:api_path(["listeners", MinListenerId]),
?assertEqual({error, not_found}, is_running(MinListenerId)), ?assertEqual({error, not_found}, is_running(MinListenerId)),
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, MinPath, [], [])), ?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, MinPath, [], [])),
MinConf = #{ MinConf =
<<"id">> => MinListenerId, case OriginListener of
<<"bind">> => <<"0.0.0.0:3883">>, #{
<<"type">> => Type <<"ssl">> :=
}, #{
<<"cacertfile">> := CaCertFile,
<<"certfile">> := CertFile,
<<"keyfile">> := KeyFile
}
} ->
#{
<<"id">> => MinListenerId,
<<"bind">> => <<"0.0.0.0:3883">>,
<<"type">> => Type,
<<"ssl">> => #{
<<"cacertfile">> => CaCertFile,
<<"certfile">> => CertFile,
<<"keyfile">> => KeyFile
}
};
_ ->
#{
<<"id">> => MinListenerId,
<<"bind">> => <<"0.0.0.0:3883">>,
<<"type">> => Type
}
end,
MinCreate = request(post, MinPath, [], MinConf), MinCreate = request(post, MinPath, [], MinConf),
?assertEqual(lists:sort(maps:keys(TcpListener)), lists:sort(maps:keys(MinCreate))), ?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(MinCreate))),
MinGet = request(get, MinPath, [], []), MinGet = request(get, MinPath, [], []),
?assertMatch(MinCreate, MinGet), ?assertMatch(MinCreate, MinGet),
?assert(is_running(MinListenerId)), ?assert(is_running(MinListenerId)),
%% bad create(same port) %% bad create(same port)
BadPath = emqx_mgmt_api_test_util:api_path(["listeners", BadId]), BadPath = emqx_mgmt_api_test_util:api_path(["listeners", BadId]),
BadConf = TcpListener#{ BadConf = OriginListener#{
<<"id">> => BadId, <<"id">> => BadId,
<<"bind">> => <<"0.0.0.0:2883">> <<"bind">> => <<"0.0.0.0:2883">>
}, },

View File

@ -25,6 +25,10 @@
%% notice: integer topic for sort response %% notice: integer topic for sort response
-define(TOPIC1, <<"t/0000">>). -define(TOPIC1, <<"t/0000">>).
-define(TOPIC1RH, 1).
-define(TOPIC1RAP, false).
-define(TOPIC1NL, false).
-define(TOPIC1QOS, 1).
-define(TOPIC2, <<"$share/test_group/t/0001">>). -define(TOPIC2, <<"$share/test_group/t/0001">>).
-define(TOPIC2_TOPIC_ONLY, <<"t/0001">>). -define(TOPIC2_TOPIC_ONLY, <<"t/0001">>).
@ -41,9 +45,13 @@ end_per_suite(_) ->
emqx_mgmt_api_test_util:end_suite(). emqx_mgmt_api_test_util:end_suite().
t_subscription_api(_) -> t_subscription_api(_) ->
{ok, Client} = emqtt:start_link(#{username => ?USERNAME, clientid => ?CLIENTID}), {ok, Client} = emqtt:start_link(#{username => ?USERNAME, clientid => ?CLIENTID, proto_ver => v5}),
{ok, _} = emqtt:connect(Client), {ok, _} = emqtt:connect(Client),
{ok, _, _} = emqtt:subscribe(Client, ?TOPIC1), {ok, _, _} = emqtt:subscribe(
Client, [
{?TOPIC1, [{rh, ?TOPIC1RH}, {rap, ?TOPIC1RAP}, {nl, ?TOPIC1NL}, {qos, ?TOPIC1QOS}]}
]
),
{ok, _, _} = emqtt:subscribe(Client, ?TOPIC2), {ok, _, _} = emqtt:subscribe(Client, ?TOPIC2),
Path = emqx_mgmt_api_test_util:api_path(["subscriptions"]), Path = emqx_mgmt_api_test_util:api_path(["subscriptions"]),
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path), {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path),
@ -59,9 +67,21 @@ t_subscription_api(_) ->
maps:get(T1, ?TOPIC_SORT) =< maps:get(T2, ?TOPIC_SORT) maps:get(T1, ?TOPIC_SORT) =< maps:get(T2, ?TOPIC_SORT)
end, end,
[Subscriptions1, Subscriptions2] = lists:sort(Sort, Subscriptions), [Subscriptions1, Subscriptions2] = lists:sort(Sort, Subscriptions),
?assertEqual(maps:get(<<"topic">>, Subscriptions1), ?TOPIC1),
?assertMatch(
#{
<<"topic">> := ?TOPIC1,
<<"qos">> := ?TOPIC1QOS,
<<"nl">> := _,
<<"rap">> := _,
<<"rh">> := ?TOPIC1RH,
<<"clientid">> := ?CLIENTID,
<<"node">> := _
},
Subscriptions1
),
?assertEqual(maps:get(<<"topic">>, Subscriptions2), ?TOPIC2), ?assertEqual(maps:get(<<"topic">>, Subscriptions2), ?TOPIC2),
?assertEqual(maps:get(<<"clientid">>, Subscriptions1), ?CLIENTID),
?assertEqual(maps:get(<<"clientid">>, Subscriptions2), ?CLIENTID), ?assertEqual(maps:get(<<"clientid">>, Subscriptions2), ?CLIENTID),
QS = uri_string:compose_query([ QS = uri_string:compose_query([
@ -94,8 +114,8 @@ t_subscription_api(_) ->
MatchMeta = maps:get(<<"meta">>, MatchData), MatchMeta = maps:get(<<"meta">>, MatchData),
?assertEqual(1, maps:get(<<"page">>, MatchMeta)), ?assertEqual(1, maps:get(<<"page">>, MatchMeta)),
?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, MatchMeta)), ?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, MatchMeta)),
?assertEqual(2, maps:get(<<"count">>, MatchMeta)), ?assertEqual(1, maps:get(<<"count">>, MatchMeta)),
MatchSubs = maps:get(<<"data">>, MatchData), MatchSubs = maps:get(<<"data">>, MatchData),
?assertEqual(length(MatchSubs), 2), ?assertEqual(1, length(MatchSubs)),
emqtt:disconnect(Client). emqtt:disconnect(Client).

View File

@ -143,10 +143,10 @@ store(DelayedMsg) ->
gen_server:call(?SERVER, {store, DelayedMsg}, infinity). gen_server:call(?SERVER, {store, DelayedMsg}, infinity).
enable() -> enable() ->
gen_server:call(?SERVER, enable). enable(true).
disable() -> disable() ->
gen_server:call(?SERVER, disable). enable(false).
set_max_delayed_messages(Max) -> set_max_delayed_messages(Max) ->
gen_server:call(?SERVER, {set_max_delayed_messages, Max}). gen_server:call(?SERVER, {set_max_delayed_messages, Max}).
@ -238,21 +238,7 @@ update_config(Config) ->
emqx_conf:update([delayed], Config, #{rawconf_with_defaults => true, override_to => cluster}). emqx_conf:update([delayed], Config, #{rawconf_with_defaults => true, override_to => cluster}).
post_config_update(_KeyPath, Config, _NewConf, _OldConf, _AppEnvs) -> post_config_update(_KeyPath, Config, _NewConf, _OldConf, _AppEnvs) ->
case maps:get(<<"enable">>, Config, undefined) of gen_server:call(?SERVER, {update_config, Config}).
undefined ->
ignore;
true ->
emqx_delayed:enable();
false ->
emqx_delayed:disable()
end,
case maps:get(<<"max_delayed_messages">>, Config, undefined) of
undefined ->
ignore;
Max ->
ok = emqx_delayed:set_max_delayed_messages(Max)
end,
ok.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% gen_server callback %% gen_server callback
@ -262,7 +248,7 @@ init([Opts]) ->
erlang:process_flag(trap_exit, true), erlang:process_flag(trap_exit, true),
emqx_conf:add_handler([delayed], ?MODULE), emqx_conf:add_handler([delayed], ?MODULE),
MaxDelayedMessages = maps:get(max_delayed_messages, Opts, 0), MaxDelayedMessages = maps:get(max_delayed_messages, Opts, 0),
{ok, State =
ensure_stats_event( ensure_stats_event(
ensure_publish_timer(#{ ensure_publish_timer(#{
publish_timer => undefined, publish_timer => undefined,
@ -271,7 +257,8 @@ init([Opts]) ->
stats_fun => undefined, stats_fun => undefined,
max_delayed_messages => MaxDelayedMessages max_delayed_messages => MaxDelayedMessages
}) })
)}. ),
{ok, ensure_enable(emqx:get_config([delayed, enable]), State)}.
handle_call({set_max_delayed_messages, Max}, _From, State) -> handle_call({set_max_delayed_messages, Max}, _From, State) ->
{reply, ok, State#{max_delayed_messages => Max}}; {reply, ok, State#{max_delayed_messages => Max}};
@ -293,12 +280,11 @@ handle_call(
emqx_metrics:inc('messages.delayed'), emqx_metrics:inc('messages.delayed'),
{reply, ok, ensure_publish_timer(Key, State)} {reply, ok, ensure_publish_timer(Key, State)}
end; end;
handle_call(enable, _From, State) -> handle_call({update_config, Config}, _From, #{max_delayed_messages := Max} = State) ->
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}), Max2 = maps:get(<<"max_delayed_messages">>, Config, Max),
{reply, ok, State}; State2 = State#{max_delayed_messages := Max2},
handle_call(disable, _From, State) -> State3 = ensure_enable(maps:get(<<"enable">>, Config, undefined), State2),
emqx_hooks:del('message.publish', {?MODULE, on_message_publish}), {reply, ok, State3};
{reply, ok, State};
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?tp(error, emqx_delayed_unexpected_call, #{call => Req}), ?tp(error, emqx_delayed_unexpected_call, #{call => Req}),
{reply, ignored, State}. {reply, ignored, State}.
@ -320,10 +306,10 @@ handle_info(Info, State) ->
?tp(error, emqx_delayed_unexpected_info, #{info => Info}), ?tp(error, emqx_delayed_unexpected_info, #{info => Info}),
{noreply, State}. {noreply, State}.
terminate(_Reason, #{publish_timer := PublishTimer, stats_timer := StatsTimer}) -> terminate(_Reason, #{stats_timer := StatsTimer} = State) ->
emqx_conf:remove_handler([delayed]), emqx_conf:remove_handler([delayed]),
emqx_misc:cancel_timer(PublishTimer), emqx_misc:cancel_timer(StatsTimer),
emqx_misc:cancel_timer(StatsTimer). ensure_enable(false, State).
code_change(_Vsn, State, _Extra) -> code_change(_Vsn, State, _Extra) ->
{ok, State}. {ok, State}.
@ -378,3 +364,23 @@ do_publish(Key = {Ts, _Id}, Now, Acc) when Ts =< Now ->
-spec delayed_count() -> non_neg_integer(). -spec delayed_count() -> non_neg_integer().
delayed_count() -> mnesia:table_info(?TAB, size). delayed_count() -> mnesia:table_info(?TAB, size).
enable(Enable) ->
case emqx:get_raw_config([delayed]) of
#{<<"enable">> := Enable} ->
ok;
Cfg ->
{ok, _} = update_config(Cfg#{<<"enable">> := Enable}),
ok
end.
ensure_enable(true, State) ->
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}),
State;
ensure_enable(false, #{publish_timer := PubTimer} = State) ->
emqx_hooks:del('message.publish', {?MODULE, on_message_publish}),
emqx_misc:cancel_timer(PubTimer),
ets:delete_all_objects(?TAB),
State#{publish_timer := undefined, publish_at := 0};
ensure_enable(_, State) ->
State.

View File

@ -55,13 +55,26 @@ end_per_testcase(_Case, _Config) ->
%% Test cases %% Test cases
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
t_load_case(_) -> t_enable_disable_case(_) ->
emqx_delayed:disable(),
Hooks = emqx_hooks:lookup('message.publish'), Hooks = emqx_hooks:lookup('message.publish'),
MFA = {emqx_delayed, on_message_publish, []}, MFA = {emqx_delayed, on_message_publish, []},
?assertEqual(false, lists:keyfind(MFA, 2, Hooks)), ?assertEqual(false, lists:keyfind(MFA, 2, Hooks)),
ok = emqx_delayed:enable(), ok = emqx_delayed:enable(),
Hooks1 = emqx_hooks:lookup('message.publish'), Hooks1 = emqx_hooks:lookup('message.publish'),
?assertNotEqual(false, lists:keyfind(MFA, 2, Hooks1)), ?assertNotEqual(false, lists:keyfind(MFA, 2, Hooks1)),
Ts0 = integer_to_binary(erlang:system_time(second) + 10),
DelayedMsg0 = emqx_message:make(
?MODULE, 1, <<"$delayed/", Ts0/binary, "/publish">>, <<"delayed_abs">>
),
_ = on_message_publish(DelayedMsg0),
?assertMatch(#{data := Datas} when Datas =/= [], emqx_delayed:list(#{})),
emqx_delayed:disable(),
?assertEqual(false, lists:keyfind(MFA, 2, Hooks)),
?assertMatch(#{data := []}, emqx_delayed:list(#{})),
ok. ok.
t_delayed_message(_) -> t_delayed_message(_) ->
@ -76,7 +89,7 @@ t_delayed_message(_) ->
[#delayed_message{msg = #message{payload = Payload}}] = ets:tab2list(emqx_delayed), [#delayed_message{msg = #message{payload = Payload}}] = ets:tab2list(emqx_delayed),
?assertEqual(<<"delayed_m">>, Payload), ?assertEqual(<<"delayed_m">>, Payload),
ct:sleep(2000), ct:sleep(2500),
EmptyKey = mnesia:dirty_all_keys(emqx_delayed), EmptyKey = mnesia:dirty_all_keys(emqx_delayed),
?assertEqual([], EmptyKey). ?assertEqual([], EmptyKey).

View File

@ -98,6 +98,7 @@ t_status(_Config) ->
t_messages(_) -> t_messages(_) ->
clear_all_record(), clear_all_record(),
emqx_delayed:enable(),
{ok, C1} = emqtt:start_link([{clean_start, true}]), {ok, C1} = emqtt:start_link([{clean_start, true}]),
{ok, _} = emqtt:connect(C1), {ok, _} = emqtt:connect(C1),
@ -114,7 +115,7 @@ t_messages(_) ->
end, end,
lists:foreach(Each, lists:seq(1, 5)), lists:foreach(Each, lists:seq(1, 5)),
timer:sleep(500), timer:sleep(1000),
Msgs = get_messages(5), Msgs = get_messages(5),
[First | _] = Msgs, [First | _] = Msgs,
@ -197,6 +198,7 @@ t_messages(_) ->
t_large_payload(_) -> t_large_payload(_) ->
clear_all_record(), clear_all_record(),
emqx_delayed:enable(),
{ok, C1} = emqtt:start_link([{clean_start, true}]), {ok, C1} = emqtt:start_link([{clean_start, true}]),
{ok, _} = emqtt:connect(C1), {ok, _} = emqtt:connect(C1),
@ -209,7 +211,7 @@ t_large_payload(_) ->
[{qos, 0}, {retain, true}] [{qos, 0}, {retain, true}]
), ),
timer:sleep(500), timer:sleep(1000),
[#{msgid := MsgId}] = get_messages(1), [#{msgid := MsgId}] = get_messages(1),
@ -241,8 +243,13 @@ get_messages(Len) ->
{ok, 200, MsgsJson} = request(get, uri(["mqtt", "delayed", "messages"])), {ok, 200, MsgsJson} = request(get, uri(["mqtt", "delayed", "messages"])),
#{data := Msgs} = decode_json(MsgsJson), #{data := Msgs} = decode_json(MsgsJson),
MsgLen = erlang:length(Msgs), MsgLen = erlang:length(Msgs),
?assert( ?assertEqual(
MsgLen =:= Len, Len,
lists:flatten(io_lib:format("message length is:~p~n", [MsgLen])) MsgLen,
lists:flatten(
io_lib:format("message length is:~p~nWhere:~p~nHooks:~p~n", [
MsgLen, erlang:whereis(emqx_delayed), ets:tab2list(emqx_hooks)
])
)
), ),
Msgs. Msgs.

View File

@ -1,4 +1,5 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{deps, [ {emqx, {path, "../emqx"}} {deps, [{emqx, {path, "../emqx"}}]}.
]}.
{project_plugins, [erlfmt]}.

View File

@ -1,9 +1,9 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_plugins, {application, emqx_plugins, [
[{description, "EMQX Plugin Management"}, {description, "EMQX Plugin Management"},
{vsn, "0.1.0"}, {vsn, "0.1.0"},
{modules, []}, {modules, []},
{mod, {emqx_plugins_app,[]}}, {mod, {emqx_plugins_app, []}},
{applications, [kernel,stdlib,emqx]}, {applications, [kernel, stdlib, emqx]},
{env, []} {env, []}
]}. ]}.

View File

@ -19,35 +19,37 @@
-include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-export([ ensure_installed/1 -export([
, ensure_uninstalled/1 ensure_installed/1,
, ensure_enabled/1 ensure_uninstalled/1,
, ensure_enabled/2 ensure_enabled/1,
, ensure_disabled/1 ensure_enabled/2,
, purge/1 ensure_disabled/1,
, delete_package/1 purge/1,
]). delete_package/1
]).
-export([ ensure_started/0 -export([
, ensure_started/1 ensure_started/0,
, ensure_stopped/0 ensure_started/1,
, ensure_stopped/1 ensure_stopped/0,
, restart/1 ensure_stopped/1,
, list/0 restart/1,
, describe/1 list/0,
, parse_name_vsn/1 describe/1,
]). parse_name_vsn/1
]).
-export([ get_config/2 -export([
, put_config/2 get_config/2,
]). put_config/2
]).
%% internal %% internal
-export([ do_ensure_started/1 -export([do_ensure_started/1]).
]).
-export([ -export([
install_dir/0 install_dir/0
]). ]).
-ifdef(TEST). -ifdef(TEST).
-compile(export_all). -compile(export_all).
@ -58,8 +60,10 @@
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-include("emqx_plugins.hrl"). -include("emqx_plugins.hrl").
-type name_vsn() :: binary() | string(). %% "my_plugin-0.1.0" %% "my_plugin-0.1.0"
-type plugin() :: map(). %% the parse result of the JSON info file -type name_vsn() :: binary() | string().
%% the parse result of the JSON info file
-type plugin() :: map().
-type position() :: no_move | front | rear | {before, name_vsn()} | {behind, name_vsn()}. -type position() :: no_move | front | rear | {before, name_vsn()} | {behind, name_vsn()}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -86,22 +90,25 @@ do_ensure_installed(NameVsn) ->
case erl_tar:extract(TarGz, [{cwd, install_dir()}, compressed]) of case erl_tar:extract(TarGz, [{cwd, install_dir()}, compressed]) of
ok -> ok ->
case read_plugin(NameVsn, #{}) of case read_plugin(NameVsn, #{}) of
{ok, _} -> ok; {ok, _} ->
ok;
{error, Reason} -> {error, Reason} ->
?SLOG(warning, Reason#{msg => "failed_to_read_after_install"}), ?SLOG(warning, Reason#{msg => "failed_to_read_after_install"}),
_ = ensure_uninstalled(NameVsn), _ = ensure_uninstalled(NameVsn),
{error, Reason} {error, Reason}
end; end;
{error, {_, enoent}} -> {error, {_, enoent}} ->
{error, #{ reason => "failed_to_extract_plugin_package" {error, #{
, path => TarGz reason => "failed_to_extract_plugin_package",
, return => not_found path => TarGz,
}}; return => not_found
}};
{error, Reason} -> {error, Reason} ->
{error, #{ reason => "bad_plugin_package" {error, #{
, path => TarGz reason => "bad_plugin_package",
, return => Reason path => TarGz,
}} return => Reason
}}
end. end.
%% @doc Ensure files and directories for the given plugin are delete. %% @doc Ensure files and directories for the given plugin are delete.
@ -110,13 +117,15 @@ do_ensure_installed(NameVsn) ->
ensure_uninstalled(NameVsn) -> ensure_uninstalled(NameVsn) ->
case read_plugin(NameVsn, #{}) of case read_plugin(NameVsn, #{}) of
{ok, #{running_status := RunningSt}} when RunningSt =/= stopped -> {ok, #{running_status := RunningSt}} when RunningSt =/= stopped ->
{error, #{reason => "bad_plugin_running_status", {error, #{
hint => "stop_the_plugin_first" reason => "bad_plugin_running_status",
}}; hint => "stop_the_plugin_first"
}};
{ok, #{config_status := enabled}} -> {ok, #{config_status := enabled}} ->
{error, #{reason => "bad_plugin_config_status", {error, #{
hint => "disable_the_plugin_first" reason => "bad_plugin_config_status",
}}; hint => "disable_the_plugin_first"
}};
_ -> _ ->
purge(NameVsn) purge(NameVsn)
end. end.
@ -141,9 +150,10 @@ ensure_state(NameVsn, Position, State) when is_binary(NameVsn) ->
ensure_state(NameVsn, Position, State) -> ensure_state(NameVsn, Position, State) ->
case read_plugin(NameVsn, #{}) of case read_plugin(NameVsn, #{}) of
{ok, _} -> {ok, _} ->
Item = #{ name_vsn => NameVsn Item = #{
, enable => State name_vsn => NameVsn,
}, enable => State
},
tryit("ensure_state", fun() -> ensure_configured(Item, Position) end); tryit("ensure_state", fun() -> ensure_configured(Item, Position) end);
{error, Reason} -> {error, Reason} ->
{error, Reason} {error, Reason}
@ -175,18 +185,19 @@ add_new_configured(Configured, {Action, NameVsn}, Item) ->
SplitFun = fun(#{name_vsn := Nv}) -> bin(Nv) =/= bin(NameVsn) end, SplitFun = fun(#{name_vsn := Nv}) -> bin(Nv) =/= bin(NameVsn) end,
{Front, Rear} = lists:splitwith(SplitFun, Configured), {Front, Rear} = lists:splitwith(SplitFun, Configured),
Rear =:= [] andalso Rear =:= [] andalso
throw(#{error => "position_anchor_plugin_not_configured", throw(#{
hint => "maybe_install_and_configure", error => "position_anchor_plugin_not_configured",
name_vsn => NameVsn hint => "maybe_install_and_configure",
}), name_vsn => NameVsn
}),
case Action of case Action of
before -> Front ++ [Item | Rear]; before ->
Front ++ [Item | Rear];
behind -> behind ->
[Anchor | Rear0] = Rear, [Anchor | Rear0] = Rear,
Front ++ [Anchor, Item | Rear0] Front ++ [Anchor, Item | Rear0]
end. end.
%% @doc Delete the package file. %% @doc Delete the package file.
-spec delete_package(name_vsn()) -> ok. -spec delete_package(name_vsn()) -> ok.
delete_package(NameVsn) -> delete_package(NameVsn) ->
@ -198,9 +209,11 @@ delete_package(NameVsn) ->
{error, enoent} -> {error, enoent} ->
ok; ok;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "failed_to_delete_package_file", ?SLOG(error, #{
path => File, msg => "failed_to_delete_package_file",
reason => Reason}), path => File,
reason => Reason
}),
{error, Reason} {error, Reason}
end. end.
@ -219,9 +232,11 @@ purge(NameVsn) ->
{error, enoent} -> {error, enoent} ->
ok; ok;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "failed_to_purge_plugin_dir", ?SLOG(error, #{
dir => Dir, msg => "failed_to_purge_plugin_dir",
reason => Reason}), dir => Dir,
reason => Reason
}),
{error, Reason} {error, Reason}
end. end.
@ -235,10 +250,13 @@ ensure_started() ->
-spec ensure_started(name_vsn()) -> ok | {error, term()}. -spec ensure_started(name_vsn()) -> ok | {error, term()}.
ensure_started(NameVsn) -> ensure_started(NameVsn) ->
case do_ensure_started(NameVsn) of case do_ensure_started(NameVsn) of
ok -> ok; ok ->
ok;
{error, Reason} -> {error, Reason} ->
?SLOG(alert, #{msg => "failed_to_start_plugin", ?SLOG(alert, #{
reason => Reason}), msg => "failed_to_start_plugin",
reason => Reason
}),
{error, Reason} {error, Reason}
end. end.
@ -250,11 +268,13 @@ ensure_stopped() ->
%% @doc Stop a plugin from Management API or CLI. %% @doc Stop a plugin from Management API or CLI.
-spec ensure_stopped(name_vsn()) -> ok | {error, term()}. -spec ensure_stopped(name_vsn()) -> ok | {error, term()}.
ensure_stopped(NameVsn) -> ensure_stopped(NameVsn) ->
tryit("stop_plugin", tryit(
fun() -> "stop_plugin",
Plugin = do_read_plugin(NameVsn), fun() ->
ensure_apps_stopped(Plugin) Plugin = do_read_plugin(NameVsn),
end). ensure_apps_stopped(Plugin)
end
).
%% @doc Stop and then start the plugin. %% @doc Stop and then start the plugin.
restart(NameVsn) -> restart(NameVsn) ->
@ -269,39 +289,45 @@ restart(NameVsn) ->
list() -> list() ->
Pattern = filename:join([install_dir(), "*", "release.json"]), Pattern = filename:join([install_dir(), "*", "release.json"]),
All = lists:filtermap( All = lists:filtermap(
fun(JsonFile) -> fun(JsonFile) ->
case read_plugin({file, JsonFile}, #{}) of case read_plugin({file, JsonFile}, #{}) of
{ok, Info} -> {ok, Info} ->
{true, Info}; {true, Info};
{error, Reason} -> {error, Reason} ->
?SLOG(warning, Reason), ?SLOG(warning, Reason),
false false
end end
end, filelib:wildcard(Pattern)), end,
filelib:wildcard(Pattern)
),
list(configured(), All). list(configured(), All).
%% Make sure configured ones are ordered in front. %% Make sure configured ones are ordered in front.
list([], All) -> All; list([], All) ->
All;
list([#{name_vsn := NameVsn} | Rest], All) -> list([#{name_vsn := NameVsn} | Rest], All) ->
SplitF = fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) -> SplitF = fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) ->
bin([Name, "-", Vsn]) =/= bin(NameVsn) bin([Name, "-", Vsn]) =/= bin(NameVsn)
end, end,
case lists:splitwith(SplitF, All) of case lists:splitwith(SplitF, All) of
{_, []} -> {_, []} ->
?SLOG(warning, #{msg => "configured_plugin_not_installed", ?SLOG(warning, #{
name_vsn => NameVsn msg => "configured_plugin_not_installed",
}), name_vsn => NameVsn
}),
list(Rest, All); list(Rest, All);
{Front, [I | Rear]} -> {Front, [I | Rear]} ->
[I | list(Rest, Front ++ Rear)] [I | list(Rest, Front ++ Rear)]
end. end.
do_ensure_started(NameVsn) -> do_ensure_started(NameVsn) ->
tryit("start_plugins", tryit(
fun() -> "start_plugins",
Plugin = do_read_plugin(NameVsn), fun() ->
ok = load_code_start_apps(NameVsn, Plugin) Plugin = do_read_plugin(NameVsn),
end). ok = load_code_start_apps(NameVsn, Plugin)
end
).
%% try the function, catch 'throw' exceptions as normal 'error' return %% try the function, catch 'throw' exceptions as normal 'error' return
%% other exceptions with stacktrace returned. %% other exceptions with stacktrace returned.
@ -309,25 +335,28 @@ tryit(WhichOp, F) ->
try try
F() F()
catch catch
throw : Reason -> throw:Reason ->
%% thrown exceptions are known errors %% thrown exceptions are known errors
%% translate to a return value without stacktrace %% translate to a return value without stacktrace
{error, Reason}; {error, Reason};
error : Reason : Stacktrace -> error:Reason:Stacktrace ->
%% unexpected errors, log stacktrace %% unexpected errors, log stacktrace
?SLOG(warning, #{ msg => "plugin_op_failed" ?SLOG(warning, #{
, which_op => WhichOp msg => "plugin_op_failed",
, exception => Reason which_op => WhichOp,
, stacktrace => Stacktrace exception => Reason,
}), stacktrace => Stacktrace
}),
{error, {failed, WhichOp}} {error, {failed, WhichOp}}
end. end.
%% read plugin info from the JSON file %% read plugin info from the JSON file
%% returns {ok, Info} or {error, Reason} %% returns {ok, Info} or {error, Reason}
read_plugin(NameVsn, Options) -> read_plugin(NameVsn, Options) ->
tryit("read_plugin_info", tryit(
fun() -> {ok, do_read_plugin(NameVsn, Options)} end). "read_plugin_info",
fun() -> {ok, do_read_plugin(NameVsn, Options)} end
).
do_read_plugin(Plugin) -> do_read_plugin(Plugin, #{}). do_read_plugin(Plugin) -> do_read_plugin(Plugin, #{}).
@ -339,10 +368,11 @@ do_read_plugin({file, InfoFile}, Options) ->
Info1 = plugins_readme(NameVsn, Options, Info0), Info1 = plugins_readme(NameVsn, Options, Info0),
plugin_status(NameVsn, Info1); plugin_status(NameVsn, Info1);
{error, Reason} -> {error, Reason} ->
throw(#{error => "bad_info_file", throw(#{
path => InfoFile, error => "bad_info_file",
return => Reason path => InfoFile,
}) return => Reason
})
end; end;
do_read_plugin(NameVsn, Options) -> do_read_plugin(NameVsn, Options) ->
do_read_plugin({file, info_file(NameVsn)}, Options). do_read_plugin({file, info_file(NameVsn)}, Options).
@ -352,7 +382,8 @@ plugins_readme(NameVsn, #{fill_readme := true}, Info) ->
{ok, Bin} -> Info#{readme => Bin}; {ok, Bin} -> Info#{readme => Bin};
_ -> Info#{readme => <<>>} _ -> Info#{readme => <<>>}
end; end;
plugins_readme(_NameVsn, _Options, Info) -> Info. plugins_readme(_NameVsn, _Options, Info) ->
Info.
plugin_status(NameVsn, Info) -> plugin_status(NameVsn, Info) ->
{AppName, _AppVsn} = parse_name_vsn(NameVsn), {AppName, _AppVsn} = parse_name_vsn(NameVsn),
@ -368,74 +399,91 @@ plugin_status(NameVsn, Info) ->
end, end,
Configured = lists:filtermap( Configured = lists:filtermap(
fun(#{name_vsn := Nv, enable := St}) -> fun(#{name_vsn := Nv, enable := St}) ->
case bin(Nv) =:= bin(NameVsn) of case bin(Nv) =:= bin(NameVsn) of
true -> {true, St}; true -> {true, St};
false -> false false -> false
end end
end, configured()), end,
ConfSt = case Configured of configured()
[] -> not_configured; ),
[true] -> enabled; ConfSt =
[false] -> disabled case Configured of
end, [] -> not_configured;
Info#{ running_status => RunningSt [true] -> enabled;
, config_status => ConfSt [false] -> disabled
end,
Info#{
running_status => RunningSt,
config_status => ConfSt
}. }.
bin(A) when is_atom(A) -> atom_to_binary(A, utf8); bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
bin(L) when is_list(L) -> unicode:characters_to_binary(L, utf8); bin(L) when is_list(L) -> unicode:characters_to_binary(L, utf8);
bin(B) when is_binary(B) -> B. bin(B) when is_binary(B) -> B.
check_plugin(#{ <<"name">> := Name check_plugin(
, <<"rel_vsn">> := Vsn #{
, <<"rel_apps">> := Apps <<"name">> := Name,
, <<"description">> := _ <<"rel_vsn">> := Vsn,
} = Info, NameVsn, File) -> <<"rel_apps">> := Apps,
<<"description">> := _
} = Info,
NameVsn,
File
) ->
case bin(NameVsn) =:= bin([Name, "-", Vsn]) of case bin(NameVsn) =:= bin([Name, "-", Vsn]) of
true -> true ->
try try
[_ | _ ] = Apps, %% assert %% assert
[_ | _] = Apps,
%% validate if the list is all <app>-<vsn> strings %% validate if the list is all <app>-<vsn> strings
lists:foreach(fun parse_name_vsn/1, Apps) lists:foreach(fun parse_name_vsn/1, Apps)
catch catch
_ : _ -> _:_ ->
throw(#{ error => "bad_rel_apps" throw(#{
, rel_apps => Apps error => "bad_rel_apps",
, hint => "A non-empty string list of app_name-app_vsn format" rel_apps => Apps,
}) hint => "A non-empty string list of app_name-app_vsn format"
})
end, end,
Info; Info;
false -> false ->
throw(#{ error => "name_vsn_mismatch" throw(#{
, name_vsn => NameVsn error => "name_vsn_mismatch",
, path => File name_vsn => NameVsn,
, name => Name path => File,
, rel_vsn => Vsn name => Name,
}) rel_vsn => Vsn
})
end; end;
check_plugin(_What, NameVsn, File) -> check_plugin(_What, NameVsn, File) ->
throw(#{ error => "bad_info_file_content" throw(#{
, mandatory_fields => [rel_vsn, name, rel_apps, description] error => "bad_info_file_content",
, name_vsn => NameVsn mandatory_fields => [rel_vsn, name, rel_apps, description],
, path => File name_vsn => NameVsn,
}). path => File
}).
load_code_start_apps(RelNameVsn, #{<<"rel_apps">> := Apps}) -> load_code_start_apps(RelNameVsn, #{<<"rel_apps">> := Apps}) ->
LibDir = filename:join([install_dir(), RelNameVsn]), LibDir = filename:join([install_dir(), RelNameVsn]),
RunningApps = running_apps(), RunningApps = running_apps(),
%% load plugin apps and beam code %% load plugin apps and beam code
AppNames = AppNames =
lists:map(fun(AppNameVsn) -> lists:map(
{AppName, AppVsn} = parse_name_vsn(AppNameVsn), fun(AppNameVsn) ->
EbinDir = filename:join([LibDir, AppNameVsn, "ebin"]), {AppName, AppVsn} = parse_name_vsn(AppNameVsn),
ok = load_plugin_app(AppName, AppVsn, EbinDir, RunningApps), EbinDir = filename:join([LibDir, AppNameVsn, "ebin"]),
AppName ok = load_plugin_app(AppName, AppVsn, EbinDir, RunningApps),
end, Apps), AppName
end,
Apps
),
lists:foreach(fun start_app/1, AppNames). lists:foreach(fun start_app/1, AppNames).
load_plugin_app(AppName, AppVsn, Ebin, RunningApps) -> load_plugin_app(AppName, AppVsn, Ebin, RunningApps) ->
case lists:keyfind(AppName, 1, RunningApps) of case lists:keyfind(AppName, 1, RunningApps) of
false -> do_load_plugin_app(AppName, Ebin); false ->
do_load_plugin_app(AppName, Ebin);
{_, Vsn} -> {_, Vsn} ->
case bin(Vsn) =:= bin(AppVsn) of case bin(Vsn) =:= bin(AppVsn) of
true -> true ->
@ -443,10 +491,12 @@ load_plugin_app(AppName, AppVsn, Ebin, RunningApps) ->
ok; ok;
false -> false ->
%% running but a different version %% running but a different version
?SLOG(warning, #{msg => "plugin_app_already_running", name => AppName, ?SLOG(warning, #{
running_vsn => Vsn, msg => "plugin_app_already_running",
loading_vsn => AppVsn name => AppName,
}) running_vsn => Vsn,
loading_vsn => AppVsn
})
end end
end. end.
@ -457,21 +507,31 @@ do_load_plugin_app(AppName, Ebin) ->
Modules = filelib:wildcard(filename:join([Ebin, "*.beam"])), Modules = filelib:wildcard(filename:join([Ebin, "*.beam"])),
lists:foreach( lists:foreach(
fun(BeamFile) -> fun(BeamFile) ->
Module = list_to_atom(filename:basename(BeamFile, ".beam")), Module = list_to_atom(filename:basename(BeamFile, ".beam")),
case code:load_file(Module) of case code:load_file(Module) of
{module, _} -> ok; {module, _} ->
{error, Reason} -> throw(#{error => "failed_to_load_plugin_beam", ok;
path => BeamFile, {error, Reason} ->
reason => Reason throw(#{
}) error => "failed_to_load_plugin_beam",
end path => BeamFile,
end, Modules), reason => Reason
})
end
end,
Modules
),
case application:load(AppName) of case application:load(AppName) of
ok -> ok; ok ->
{error, {already_loaded, _}} -> ok; ok;
{error, Reason} -> throw(#{error => "failed_to_load_plugin_app", {error, {already_loaded, _}} ->
name => AppName, ok;
reason => Reason}) {error, Reason} ->
throw(#{
error => "failed_to_load_plugin_app",
name => AppName,
reason => Reason
})
end. end.
start_app(App) -> start_app(App) ->
@ -484,11 +544,12 @@ start_app(App) ->
?SLOG(debug, #{msg => "started_plugin_app", app => App}), ?SLOG(debug, #{msg => "started_plugin_app", app => App}),
ok; ok;
{error, {ErrApp, Reason}} -> {error, {ErrApp, Reason}} ->
throw(#{error => "failed_to_start_plugin_app", throw(#{
app => App, error => "failed_to_start_plugin_app",
err_app => ErrApp, app => App,
reason => Reason err_app => ErrApp,
}) reason => Reason
})
end. end.
%% Stop all apps installed by the plugin package, %% Stop all apps installed by the plugin package,
@ -496,18 +557,22 @@ start_app(App) ->
ensure_apps_stopped(#{<<"rel_apps">> := Apps}) -> ensure_apps_stopped(#{<<"rel_apps">> := Apps}) ->
%% load plugin apps and beam code %% load plugin apps and beam code
AppsToStop = AppsToStop =
lists:map(fun(NameVsn) -> lists:map(
{AppName, _AppVsn} = parse_name_vsn(NameVsn), fun(NameVsn) ->
AppName {AppName, _AppVsn} = parse_name_vsn(NameVsn),
end, Apps), AppName
end,
Apps
),
case tryit("stop_apps", fun() -> stop_apps(AppsToStop) end) of case tryit("stop_apps", fun() -> stop_apps(AppsToStop) end) of
{ok, []} -> {ok, []} ->
%% all apps stopped %% all apps stopped
ok; ok;
{ok, Left} -> {ok, Left} ->
?SLOG(warning, #{msg => "unabled_to_stop_plugin_apps", ?SLOG(warning, #{
apps => Left msg => "unabled_to_stop_plugin_apps",
}), apps => Left
}),
ok; ok;
{error, Reason} -> {error, Reason} ->
{error, Reason} {error, Reason}
@ -516,9 +581,12 @@ ensure_apps_stopped(#{<<"rel_apps">> := Apps}) ->
stop_apps(Apps) -> stop_apps(Apps) ->
RunningApps = running_apps(), RunningApps = running_apps(),
case do_stop_apps(Apps, [], RunningApps) of case do_stop_apps(Apps, [], RunningApps) of
{ok, []} -> {ok, []}; %% all stopped %% all stopped
{ok, Remain} when Remain =:= Apps -> {ok, Apps}; %% no progress {ok, []} -> {ok, []};
{ok, Remain} -> stop_apps(Remain) %% try again %% no progress
{ok, Remain} when Remain =:= Apps -> {ok, Apps};
%% try again
{ok, Remain} -> stop_apps(Remain)
end. end.
do_stop_apps([], Remain, _AllApps) -> do_stop_apps([], Remain, _AllApps) ->
@ -553,11 +621,15 @@ unload_moudle_and_app(App) ->
ok. ok.
is_needed_by_any(AppToStop, RunningApps) -> is_needed_by_any(AppToStop, RunningApps) ->
lists:any(fun({RunningApp, _RunningAppVsn}) -> lists:any(
is_needed_by(AppToStop, RunningApp) fun({RunningApp, _RunningAppVsn}) ->
end, RunningApps). is_needed_by(AppToStop, RunningApp)
end,
RunningApps
).
is_needed_by(AppToStop, AppToStop) -> false; is_needed_by(AppToStop, AppToStop) ->
false;
is_needed_by(AppToStop, RunningApp) -> is_needed_by(AppToStop, RunningApp) ->
case application:get_key(RunningApp, applications) of case application:get_key(RunningApp, applications) of
{ok, Deps} -> lists:member(AppToStop, Deps); {ok, Deps} -> lists:member(AppToStop, Deps);
@ -577,7 +649,8 @@ bin_key(Map) when is_map(Map) ->
maps:fold(fun(K, V, Acc) -> Acc#{bin(K) => V} end, #{}, Map); maps:fold(fun(K, V, Acc) -> Acc#{bin(K) => V} end, #{}, Map);
bin_key(List = [#{} | _]) -> bin_key(List = [#{} | _]) ->
lists:map(fun(M) -> bin_key(M) end, List); lists:map(fun(M) -> bin_key(M) end, List);
bin_key(Term) -> Term. bin_key(Term) ->
Term.
get_config(Key, Default) when is_atom(Key) -> get_config(Key, Default) when is_atom(Key) ->
get_config([Key], Default); get_config([Key], Default);
@ -604,8 +677,10 @@ for_plugin(#{name_vsn := NameVsn, enable := true}, Fun) ->
{error, Reason} -> [{NameVsn, Reason}] {error, Reason} -> [{NameVsn, Reason}]
end; end;
for_plugin(#{name_vsn := NameVsn, enable := false}, _Fun) -> for_plugin(#{name_vsn := NameVsn, enable := false}, _Fun) ->
?SLOG(debug, #{msg => "plugin_disabled", ?SLOG(debug, #{
name_vsn => NameVsn}), msg => "plugin_disabled",
name_vsn => NameVsn
}),
[]. [].
parse_name_vsn(NameVsn) when is_binary(NameVsn) -> parse_name_vsn(NameVsn) when is_binary(NameVsn) ->
@ -627,6 +702,9 @@ readme_file(NameVsn) ->
filename:join([dir(NameVsn), "README.md"]). filename:join([dir(NameVsn), "README.md"]).
running_apps() -> running_apps() ->
lists:map(fun({N, _, V}) -> lists:map(
{N, V} fun({N, _, V}) ->
end, application:which_applications(infinity)). {N, V}
end,
application:which_applications(infinity)
).

View File

@ -18,12 +18,14 @@
-behaviour(application). -behaviour(application).
-export([ start/2 -export([
, stop/1 start/2,
]). stop/1
]).
start(_Type, _Args) -> start(_Type, _Args) ->
ok = emqx_plugins:ensure_started(), %% load all pre-configured %% load all pre-configured
ok = emqx_plugins:ensure_started(),
{ok, Sup} = emqx_plugins_sup:start_link(), {ok, Sup} = emqx_plugins_sup:start_link(),
{ok, Sup}. {ok, Sup}.

View File

@ -16,21 +16,23 @@
-module(emqx_plugins_cli). -module(emqx_plugins_cli).
-export([ list/1 -export([
, describe/2 list/1,
, ensure_installed/2 describe/2,
, ensure_uninstalled/2 ensure_installed/2,
, ensure_started/2 ensure_uninstalled/2,
, ensure_stopped/2 ensure_started/2,
, restart/2 ensure_stopped/2,
, ensure_disabled/2 restart/2,
, ensure_enabled/3 ensure_disabled/2,
]). ensure_enabled/3
]).
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-define(PRINT(EXPR, LOG_FUN), -define(PRINT(EXPR, LOG_FUN),
print(NameVsn, fun()-> EXPR end(), LOG_FUN, ?FUNCTION_NAME)). print(NameVsn, fun() -> EXPR end(), LOG_FUN, ?FUNCTION_NAME)
).
list(LogFun) -> list(LogFun) ->
LogFun("~ts~n", [to_json(emqx_plugins:list())]). LogFun("~ts~n", [to_json(emqx_plugins:list())]).
@ -43,9 +45,11 @@ describe(NameVsn, LogFun) ->
%% this should not happen unless the package is manually installed %% this should not happen unless the package is manually installed
%% corrupted packages installed from emqx_plugins:ensure_installed %% corrupted packages installed from emqx_plugins:ensure_installed
%% should not leave behind corrupted files %% should not leave behind corrupted files
?SLOG(error, #{msg => "failed_to_describe_plugin", ?SLOG(error, #{
name_vsn => NameVsn, msg => "failed_to_describe_plugin",
cause => Reason}), name_vsn => NameVsn,
cause => Reason
}),
%% do nothing to the CLI console %% do nothing to the CLI console
ok ok
end. end.
@ -75,14 +79,18 @@ to_json(Input) ->
emqx_logger_jsonfmt:best_effort_json(Input). emqx_logger_jsonfmt:best_effort_json(Input).
print(NameVsn, Res, LogFun, Action) -> print(NameVsn, Res, LogFun, Action) ->
Obj = #{action => Action, Obj = #{
name_vsn => NameVsn}, action => Action,
name_vsn => NameVsn
},
JsonReady = JsonReady =
case Res of case Res of
ok -> ok ->
Obj#{result => ok}; Obj#{result => ok};
{error, Reason} -> {error, Reason} ->
Obj#{result => not_ok, Obj#{
cause => Reason} result => not_ok,
cause => Reason
}
end, end,
LogFun("~ts~n", [to_json(JsonReady)]). LogFun("~ts~n", [to_json(JsonReady)]).

View File

@ -18,10 +18,11 @@
-behaviour(hocon_schema). -behaviour(hocon_schema).
-export([ roots/0 -export([
, fields/1 roots/0,
, namespace/0 fields/1,
]). namespace/0
]).
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
-include("emqx_plugins.hrl"). -include("emqx_plugins.hrl").
@ -31,31 +32,41 @@ namespace() -> "plugin".
roots() -> [?CONF_ROOT]. roots() -> [?CONF_ROOT].
fields(?CONF_ROOT) -> fields(?CONF_ROOT) ->
#{fields => root_fields(), #{
desc => ?DESC(?CONF_ROOT) fields => root_fields(),
}; desc => ?DESC(?CONF_ROOT)
};
fields(state) -> fields(state) ->
#{ fields => state_fields(), #{
desc => ?DESC(state) fields => state_fields(),
}. desc => ?DESC(state)
}.
state_fields() -> state_fields() ->
[ {name_vsn, [
hoconsc:mk(string(), {name_vsn,
#{ desc => ?DESC(name_vsn) hoconsc:mk(
, required => true string(),
})} #{
, {enable, desc => ?DESC(name_vsn),
hoconsc:mk(boolean(), required => true
#{ desc => ?DESC(enable) }
, required => true )},
})} {enable,
hoconsc:mk(
boolean(),
#{
desc => ?DESC(enable),
required => true
}
)}
]. ].
root_fields() -> root_fields() ->
[ {states, fun states/1} [
, {install_dir, fun install_dir/1} {states, fun states/1},
, {check_interval, fun check_interval/1} {install_dir, fun install_dir/1},
{check_interval, fun check_interval/1}
]. ].
states(type) -> hoconsc:array(hoconsc:ref(?MODULE, state)); states(type) -> hoconsc:array(hoconsc:ref(?MODULE, state));
@ -66,7 +77,8 @@ states(_) -> undefined.
install_dir(type) -> string(); install_dir(type) -> string();
install_dir(required) -> false; install_dir(required) -> false;
install_dir(default) -> "plugins"; %% runner's root dir %% runner's root dir
install_dir(default) -> "plugins";
install_dir(T) when T =/= desc -> undefined; install_dir(T) when T =/= desc -> undefined;
install_dir(desc) -> ?DESC(install_dir). install_dir(desc) -> ?DESC(install_dir).

View File

@ -29,7 +29,8 @@ init([]) ->
%% TODO: Add monitor plugins change. %% TODO: Add monitor plugins change.
Monitor = emqx_plugins_monitor, Monitor = emqx_plugins_monitor,
_Children = [ _Children = [
#{id => Monitor, #{
id => Monitor,
start => {Monitor, start_link, []}, start => {Monitor, start_link, []},
restart => permanent, restart => permanent,
shutdown => brutal_kill, shutdown => brutal_kill,

View File

@ -48,9 +48,12 @@ end_per_suite(Config) ->
init_per_testcase(TestCase, Config) -> init_per_testcase(TestCase, Config) ->
emqx_plugins:put_configured([]), emqx_plugins:put_configured([]),
lists:foreach(fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) -> lists:foreach(
emqx_plugins:purge(bin([Name, "-", Vsn])) fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) ->
end, emqx_plugins:list()), emqx_plugins:purge(bin([Name, "-", Vsn]))
end,
emqx_plugins:list()
),
?MODULE:TestCase({init, Config}). ?MODULE:TestCase({init, Config}).
end_per_testcase(TestCase, Config) -> end_per_testcase(TestCase, Config) ->
@ -59,35 +62,46 @@ end_per_testcase(TestCase, Config) ->
build_demo_plugin_package() -> build_demo_plugin_package() ->
build_demo_plugin_package( build_demo_plugin_package(
#{ target_path => "_build/default/emqx_plugrel" #{
, release_name => "emqx_plugin_template" target_path => "_build/default/emqx_plugrel",
, git_url => "https://github.com/emqx/emqx-plugin-template.git" release_name => "emqx_plugin_template",
, vsn => ?EMQX_PLUGIN_TEMPLATE_VSN git_url => "https://github.com/emqx/emqx-plugin-template.git",
, workdir => "demo_src" vsn => ?EMQX_PLUGIN_TEMPLATE_VSN,
, shdir => emqx_plugins:install_dir() workdir => "demo_src",
}). shdir => emqx_plugins:install_dir()
}
).
build_demo_plugin_package(#{ target_path := TargetPath build_demo_plugin_package(
, release_name := ReleaseName #{
, git_url := GitUrl target_path := TargetPath,
, vsn := PluginVsn release_name := ReleaseName,
, workdir := DemoWorkDir git_url := GitUrl,
, shdir := WorkDir vsn := PluginVsn,
} = Opts) -> workdir := DemoWorkDir,
shdir := WorkDir
} = Opts
) ->
BuildSh = filename:join([WorkDir, "build-demo-plugin.sh"]), BuildSh = filename:join([WorkDir, "build-demo-plugin.sh"]),
Cmd = string:join([ BuildSh Cmd = string:join(
, PluginVsn [
, TargetPath BuildSh,
, ReleaseName PluginVsn,
, GitUrl TargetPath,
, DemoWorkDir ReleaseName,
], GitUrl,
" "), DemoWorkDir
],
" "
),
case emqx_run_sh:do(Cmd, [{cd, WorkDir}]) of case emqx_run_sh:do(Cmd, [{cd, WorkDir}]) of
{ok, _} -> {ok, _} ->
Pkg = filename:join([WorkDir, ReleaseName ++ "-" ++ Pkg = filename:join([
PluginVsn ++ WorkDir,
?PACKAGE_SUFFIX]), ReleaseName ++ "-" ++
PluginVsn ++
?PACKAGE_SUFFIX
]),
case filelib:is_regular(Pkg) of case filelib:is_regular(Pkg) of
true -> Opts#{package => Pkg}; true -> Opts#{package => Pkg};
false -> error(#{reason => unexpected_build_result, not_found => Pkg}) false -> error(#{reason => unexpected_build_result, not_found => Pkg})
@ -104,16 +118,19 @@ bin(B) when is_binary(B) -> B.
t_demo_install_start_stop_uninstall({init, Config}) -> t_demo_install_start_stop_uninstall({init, Config}) ->
Opts = #{package := Package} = build_demo_plugin_package(), Opts = #{package := Package} = build_demo_plugin_package(),
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
[ {name_vsn, NameVsn} [
, {plugin_opts, Opts} {name_vsn, NameVsn},
| Config {plugin_opts, Opts}
| Config
]; ];
t_demo_install_start_stop_uninstall({'end', _Config}) -> ok; t_demo_install_start_stop_uninstall({'end', _Config}) ->
ok;
t_demo_install_start_stop_uninstall(Config) -> t_demo_install_start_stop_uninstall(Config) ->
NameVsn = proplists:get_value(name_vsn, Config), NameVsn = proplists:get_value(name_vsn, Config),
#{ release_name := ReleaseName #{
, vsn := PluginVsn release_name := ReleaseName,
} = proplists:get_value(plugin_opts, Config), vsn := PluginVsn
} = proplists:get_value(plugin_opts, Config),
ok = emqx_plugins:ensure_installed(NameVsn), ok = emqx_plugins:ensure_installed(NameVsn),
%% idempotent %% idempotent
ok = emqx_plugins:ensure_installed(NameVsn), ok = emqx_plugins:ensure_installed(NameVsn),
@ -129,8 +146,10 @@ t_demo_install_start_stop_uninstall(Config) ->
ok = assert_app_running(map_sets, true), ok = assert_app_running(map_sets, true),
%% running app can not be un-installed %% running app can not be un-installed
?assertMatch({error, _}, ?assertMatch(
emqx_plugins:ensure_uninstalled(NameVsn)), {error, _},
emqx_plugins:ensure_uninstalled(NameVsn)
),
%% stop %% stop
ok = emqx_plugins:ensure_stopped(NameVsn), ok = emqx_plugins:ensure_stopped(NameVsn),
@ -143,9 +162,15 @@ t_demo_install_start_stop_uninstall(Config) ->
%% still listed after stopped %% still listed after stopped
ReleaseNameBin = list_to_binary(ReleaseName), ReleaseNameBin = list_to_binary(ReleaseName),
PluginVsnBin = list_to_binary(PluginVsn), PluginVsnBin = list_to_binary(PluginVsn),
?assertMatch([#{<<"name">> := ReleaseNameBin, ?assertMatch(
<<"rel_vsn">> := PluginVsnBin [
}], emqx_plugins:list()), #{
<<"name">> := ReleaseNameBin,
<<"rel_vsn">> := PluginVsnBin
}
],
emqx_plugins:list()
),
ok = emqx_plugins:ensure_uninstalled(NameVsn), ok = emqx_plugins:ensure_uninstalled(NameVsn),
?assertEqual([], emqx_plugins:list()), ?assertEqual([], emqx_plugins:list()),
ok. ok.
@ -164,23 +189,29 @@ t_position({init, Config}) ->
#{package := Package} = build_demo_plugin_package(), #{package := Package} = build_demo_plugin_package(),
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
[{name_vsn, NameVsn} | Config]; [{name_vsn, NameVsn} | Config];
t_position({'end', _Config}) -> ok; t_position({'end', _Config}) ->
ok;
t_position(Config) -> t_position(Config) ->
NameVsn = proplists:get_value(name_vsn, Config), NameVsn = proplists:get_value(name_vsn, Config),
ok = emqx_plugins:ensure_installed(NameVsn), ok = emqx_plugins:ensure_installed(NameVsn),
ok = emqx_plugins:ensure_enabled(NameVsn), ok = emqx_plugins:ensure_enabled(NameVsn),
FakeInfo = "name=position, rel_vsn=\"2\", rel_apps=[\"position-9\"]," FakeInfo =
"description=\"desc fake position app\"", "name=position, rel_vsn=\"2\", rel_apps=[\"position-9\"],"
"description=\"desc fake position app\"",
PosApp2 = <<"position-2">>, PosApp2 = <<"position-2">>,
ok = write_info_file(Config, PosApp2, FakeInfo), ok = write_info_file(Config, PosApp2, FakeInfo),
%% fake a disabled plugin in config %% fake a disabled plugin in config
ok = emqx_plugins:ensure_state(PosApp2, {before, NameVsn}, false), ok = emqx_plugins:ensure_state(PosApp2, {before, NameVsn}, false),
ListFun = fun() -> ListFun = fun() ->
lists:map(fun( lists:map(
#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) -> fun(
<<Name/binary, "-", Vsn/binary>> #{<<"name">> := Name, <<"rel_vsn">> := Vsn}
end, emqx_plugins:list()) ) ->
end, <<Name/binary, "-", Vsn/binary>>
end,
emqx_plugins:list()
)
end,
?assertEqual([PosApp2, list_to_binary(NameVsn)], ListFun()), ?assertEqual([PosApp2, list_to_binary(NameVsn)], ListFun()),
emqx_plugins:ensure_enabled(PosApp2, {behind, NameVsn}), emqx_plugins:ensure_enabled(PosApp2, {behind, NameVsn}),
?assertEqual([list_to_binary(NameVsn), PosApp2], ListFun()), ?assertEqual([list_to_binary(NameVsn), PosApp2], ListFun()),
@ -197,13 +228,15 @@ t_start_restart_and_stop({init, Config}) ->
#{package := Package} = build_demo_plugin_package(), #{package := Package} = build_demo_plugin_package(),
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
[{name_vsn, NameVsn} | Config]; [{name_vsn, NameVsn} | Config];
t_start_restart_and_stop({'end', _Config}) -> ok; t_start_restart_and_stop({'end', _Config}) ->
ok;
t_start_restart_and_stop(Config) -> t_start_restart_and_stop(Config) ->
NameVsn = proplists:get_value(name_vsn, Config), NameVsn = proplists:get_value(name_vsn, Config),
ok = emqx_plugins:ensure_installed(NameVsn), ok = emqx_plugins:ensure_installed(NameVsn),
ok = emqx_plugins:ensure_enabled(NameVsn), ok = emqx_plugins:ensure_enabled(NameVsn),
FakeInfo = "name=bar, rel_vsn=\"2\", rel_apps=[\"bar-9\"]," FakeInfo =
"description=\"desc bar\"", "name=bar, rel_vsn=\"2\", rel_apps=[\"bar-9\"],"
"description=\"desc bar\"",
Bar2 = <<"bar-2">>, Bar2 = <<"bar-2">>,
ok = write_info_file(Config, Bar2, FakeInfo), ok = write_info_file(Config, Bar2, FakeInfo),
%% fake a disabled plugin in config %% fake a disabled plugin in config
@ -216,8 +249,10 @@ t_start_restart_and_stop(Config) ->
%% fake enable bar-2 %% fake enable bar-2
ok = emqx_plugins:ensure_state(Bar2, rear, true), ok = emqx_plugins:ensure_state(Bar2, rear, true),
%% should cause an error %% should cause an error
?assertError(#{function := _, errors := [_ | _]}, ?assertError(
emqx_plugins:ensure_started()), #{function := _, errors := [_ | _]},
emqx_plugins:ensure_started()
),
%% but demo plugin should still be running %% but demo plugin should still be running
assert_app_running(emqx_plugin_template, true), assert_app_running(emqx_plugin_template, true),
@ -255,9 +290,13 @@ t_enable_disable(Config) ->
?assertEqual([#{name_vsn => NameVsn, enable => false}], emqx_plugins:configured()), ?assertEqual([#{name_vsn => NameVsn, enable => false}], emqx_plugins:configured()),
ok = emqx_plugins:ensure_enabled(bin(NameVsn)), ok = emqx_plugins:ensure_enabled(bin(NameVsn)),
?assertEqual([#{name_vsn => NameVsn, enable => true}], emqx_plugins:configured()), ?assertEqual([#{name_vsn => NameVsn, enable => true}], emqx_plugins:configured()),
?assertMatch({error, #{reason := "bad_plugin_config_status", ?assertMatch(
hint := "disable_the_plugin_first" {error, #{
}}, emqx_plugins:ensure_uninstalled(NameVsn)), reason := "bad_plugin_config_status",
hint := "disable_the_plugin_first"
}},
emqx_plugins:ensure_uninstalled(NameVsn)
),
ok = emqx_plugins:ensure_disabled(bin(NameVsn)), ok = emqx_plugins:ensure_disabled(bin(NameVsn)),
ok = emqx_plugins:ensure_uninstalled(NameVsn), ok = emqx_plugins:ensure_uninstalled(NameVsn),
?assertMatch({error, _}, emqx_plugins:ensure_enabled(NameVsn)), ?assertMatch({error, _}, emqx_plugins:ensure_enabled(NameVsn)),
@ -271,20 +310,28 @@ assert_app_running(Name, false) ->
AllApps = application:which_applications(), AllApps = application:which_applications(),
?assertEqual(false, lists:keyfind(Name, 1, AllApps)). ?assertEqual(false, lists:keyfind(Name, 1, AllApps)).
t_bad_tar_gz({init, Config}) -> Config; t_bad_tar_gz({init, Config}) ->
t_bad_tar_gz({'end', _Config}) -> ok; Config;
t_bad_tar_gz({'end', _Config}) ->
ok;
t_bad_tar_gz(Config) -> t_bad_tar_gz(Config) ->
WorkDir = proplists:get_value(data_dir, Config), WorkDir = proplists:get_value(data_dir, Config),
FakeTarTz = filename:join([WorkDir, "fake-vsn.tar.gz"]), FakeTarTz = filename:join([WorkDir, "fake-vsn.tar.gz"]),
ok = file:write_file(FakeTarTz, "a\n"), ok = file:write_file(FakeTarTz, "a\n"),
?assertMatch({error, #{reason := "bad_plugin_package", ?assertMatch(
return := eof {error, #{
}}, reason := "bad_plugin_package",
emqx_plugins:ensure_installed("fake-vsn")), return := eof
?assertMatch({error, #{reason := "failed_to_extract_plugin_package", }},
return := not_found emqx_plugins:ensure_installed("fake-vsn")
}}, ),
emqx_plugins:ensure_installed("nonexisting")), ?assertMatch(
{error, #{
reason := "failed_to_extract_plugin_package",
return := not_found
}},
emqx_plugins:ensure_installed("nonexisting")
),
?assertEqual([], emqx_plugins:list()), ?assertEqual([], emqx_plugins:list()),
ok = emqx_plugins:delete_package("fake-vsn"), ok = emqx_plugins:delete_package("fake-vsn"),
%% idempotent %% idempotent
@ -292,8 +339,10 @@ t_bad_tar_gz(Config) ->
%% create a corrupted .tar.gz %% create a corrupted .tar.gz
%% failed install attempts should not leave behind extracted dir %% failed install attempts should not leave behind extracted dir
t_bad_tar_gz2({init, Config}) -> Config; t_bad_tar_gz2({init, Config}) ->
t_bad_tar_gz2({'end', _Config}) -> ok; Config;
t_bad_tar_gz2({'end', _Config}) ->
ok;
t_bad_tar_gz2(Config) -> t_bad_tar_gz2(Config) ->
WorkDir = proplists:get_value(data_dir, Config), WorkDir = proplists:get_value(data_dir, Config),
NameVsn = "foo-0.2", NameVsn = "foo-0.2",
@ -310,45 +359,57 @@ t_bad_tar_gz2(Config) ->
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))), ?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))),
ok = emqx_plugins:delete_package(NameVsn). ok = emqx_plugins:delete_package(NameVsn).
t_bad_info_json({init, Config}) -> Config; t_bad_info_json({init, Config}) ->
t_bad_info_json({'end', _}) -> ok; Config;
t_bad_info_json({'end', _}) ->
ok;
t_bad_info_json(Config) -> t_bad_info_json(Config) ->
NameVsn = "test-2", NameVsn = "test-2",
ok = write_info_file(Config, NameVsn, "bad-syntax"), ok = write_info_file(Config, NameVsn, "bad-syntax"),
?assertMatch({error, #{error := "bad_info_file", ?assertMatch(
return := {parse_error, _} {error, #{
}}, error := "bad_info_file",
emqx_plugins:describe(NameVsn)), return := {parse_error, _}
}},
emqx_plugins:describe(NameVsn)
),
ok = write_info_file(Config, NameVsn, "{\"bad\": \"obj\"}"), ok = write_info_file(Config, NameVsn, "{\"bad\": \"obj\"}"),
?assertMatch({error, #{error := "bad_info_file_content", ?assertMatch(
mandatory_fields := _ {error, #{
}}, error := "bad_info_file_content",
emqx_plugins:describe(NameVsn)), mandatory_fields := _
}},
emqx_plugins:describe(NameVsn)
),
?assertEqual([], emqx_plugins:list()), ?assertEqual([], emqx_plugins:list()),
emqx_plugins:purge(NameVsn), emqx_plugins:purge(NameVsn),
ok. ok.
t_elixir_plugin({init, Config}) -> t_elixir_plugin({init, Config}) ->
Opts0 = Opts0 =
#{ target_path => "_build/prod/plugrelex/elixir_plugin_template" #{
, release_name => "elixir_plugin_template" target_path => "_build/prod/plugrelex/elixir_plugin_template",
, git_url => "https://github.com/emqx/emqx-elixir-plugin.git" release_name => "elixir_plugin_template",
, vsn => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_VSN git_url => "https://github.com/emqx/emqx-elixir-plugin.git",
, workdir => "demo_src_elixir" vsn => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_VSN,
, shdir => emqx_plugins:install_dir() workdir => "demo_src_elixir",
}, shdir => emqx_plugins:install_dir()
},
Opts = #{package := Package} = build_demo_plugin_package(Opts0), Opts = #{package := Package} = build_demo_plugin_package(Opts0),
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
[ {name_vsn, NameVsn} [
, {plugin_opts, Opts} {name_vsn, NameVsn},
| Config {plugin_opts, Opts}
| Config
]; ];
t_elixir_plugin({'end', _Config}) -> ok; t_elixir_plugin({'end', _Config}) ->
ok;
t_elixir_plugin(Config) -> t_elixir_plugin(Config) ->
NameVsn = proplists:get_value(name_vsn, Config), NameVsn = proplists:get_value(name_vsn, Config),
#{ release_name := ReleaseName #{
, vsn := PluginVsn release_name := ReleaseName,
} = proplists:get_value(plugin_opts, Config), vsn := PluginVsn
} = proplists:get_value(plugin_opts, Config),
ok = emqx_plugins:ensure_installed(NameVsn), ok = emqx_plugins:ensure_installed(NameVsn),
%% idempotent %% idempotent
ok = emqx_plugins:ensure_installed(NameVsn), ok = emqx_plugins:ensure_installed(NameVsn),
@ -368,8 +429,10 @@ t_elixir_plugin(Config) ->
3 = 'Elixir.Kernel':'+'(1, 2), 3 = 'Elixir.Kernel':'+'(1, 2),
%% running app can not be un-installed %% running app can not be un-installed
?assertMatch({error, _}, ?assertMatch(
emqx_plugins:ensure_uninstalled(NameVsn)), {error, _},
emqx_plugins:ensure_uninstalled(NameVsn)
),
%% stop %% stop
ok = emqx_plugins:ensure_stopped(NameVsn), ok = emqx_plugins:ensure_stopped(NameVsn),
@ -382,9 +445,15 @@ t_elixir_plugin(Config) ->
%% still listed after stopped %% still listed after stopped
ReleaseNameBin = list_to_binary(ReleaseName), ReleaseNameBin = list_to_binary(ReleaseName),
PluginVsnBin = list_to_binary(PluginVsn), PluginVsnBin = list_to_binary(PluginVsn),
?assertMatch([#{<<"name">> := ReleaseNameBin, ?assertMatch(
<<"rel_vsn">> := PluginVsnBin [
}], emqx_plugins:list()), #{
<<"name">> := ReleaseNameBin,
<<"rel_vsn">> := PluginVsnBin
}
],
emqx_plugins:list()
),
ok = emqx_plugins:ensure_uninstalled(NameVsn), ok = emqx_plugins:ensure_uninstalled(NameVsn),
?assertEqual([], emqx_plugins:list()), ?assertEqual([], emqx_plugins:list()),
ok. ok.

View File

@ -23,23 +23,26 @@
ensure_configured_test_todo() -> ensure_configured_test_todo() ->
meck_emqx(), meck_emqx(),
try test_ensure_configured() try
after emqx_plugins:put_configured([]) test_ensure_configured()
after
emqx_plugins:put_configured([])
end, end,
meck:unload(emqx). meck:unload(emqx).
test_ensure_configured() -> test_ensure_configured() ->
ok = emqx_plugins:put_configured([]), ok = emqx_plugins:put_configured([]),
P1 =#{name_vsn => "p-1", enable => true}, P1 = #{name_vsn => "p-1", enable => true},
P2 =#{name_vsn => "p-2", enable => true}, P2 = #{name_vsn => "p-2", enable => true},
P3 =#{name_vsn => "p-3", enable => false}, P3 = #{name_vsn => "p-3", enable => false},
emqx_plugins:ensure_configured(P1, front), emqx_plugins:ensure_configured(P1, front),
emqx_plugins:ensure_configured(P2, {before, <<"p-1">>}), emqx_plugins:ensure_configured(P2, {before, <<"p-1">>}),
emqx_plugins:ensure_configured(P3, {before, <<"p-1">>}), emqx_plugins:ensure_configured(P3, {before, <<"p-1">>}),
?assertEqual([P2, P3, P1], emqx_plugins:configured()), ?assertEqual([P2, P3, P1], emqx_plugins:configured()),
?assertThrow(#{error := "position_anchor_plugin_not_configured"}, ?assertThrow(
emqx_plugins:ensure_configured(P3, {before, <<"unknown-x">>})). #{error := "position_anchor_plugin_not_configured"},
emqx_plugins:ensure_configured(P3, {before, <<"unknown-x">>})
).
read_plugin_test() -> read_plugin_test() ->
meck_emqx(), meck_emqx(),
@ -47,16 +50,20 @@ read_plugin_test() ->
fun(_Dir) -> fun(_Dir) ->
NameVsn = "bar-5", NameVsn = "bar-5",
InfoFile = emqx_plugins:info_file(NameVsn), InfoFile = emqx_plugins:info_file(NameVsn),
FakeInfo = "name=bar, rel_vsn=\"5\", rel_apps=[justname_no_vsn]," FakeInfo =
"description=\"desc bar\"", "name=bar, rel_vsn=\"5\", rel_apps=[justname_no_vsn],"
"description=\"desc bar\"",
try try
ok = write_file(InfoFile, FakeInfo), ok = write_file(InfoFile, FakeInfo),
?assertMatch({error, #{error := "bad_rel_apps"}}, ?assertMatch(
emqx_plugins:read_plugin(NameVsn, #{})) {error, #{error := "bad_rel_apps"}},
emqx_plugins:read_plugin(NameVsn, #{})
)
after after
emqx_plugins:purge(NameVsn) emqx_plugins:purge(NameVsn)
end end
end), end
),
meck:unload(emqx). meck:unload(emqx).
with_rand_install_dir(F) -> with_rand_install_dir(F) ->
@ -91,7 +98,8 @@ delete_package_test() ->
Dir = File, Dir = File,
ok = filelib:ensure_dir(filename:join([Dir, "foo"])), ok = filelib:ensure_dir(filename:join([Dir, "foo"])),
?assertMatch({error, _}, emqx_plugins:delete_package("a-1")) ?assertMatch({error, _}, emqx_plugins:delete_package("a-1"))
end), end
),
meck:unload(emqx). meck:unload(emqx).
%% purge plugin's install dir should mostly work and return ok %% purge plugin's install dir should mostly work and return ok
@ -110,15 +118,19 @@ purge_test() ->
%% write a file for the dir path %% write a file for the dir path
ok = file:write_file(Dir, "a"), ok = file:write_file(Dir, "a"),
?assertEqual(ok, emqx_plugins:purge("a-1")) ?assertEqual(ok, emqx_plugins:purge("a-1"))
end), end
),
meck:unload(emqx). meck:unload(emqx).
meck_emqx() -> meck_emqx() ->
meck:new(emqx, [unstick, passthrough]), meck:new(emqx, [unstick, passthrough]),
meck:expect(emqx, update_config, meck:expect(
emqx,
update_config,
fun(Path, Values, _Opts) -> fun(Path, Values, _Opts) ->
emqx_config:put(Path, Values) emqx_config:put(Path, Values)
end), end
),
%meck:expect(emqx, get_config, %meck:expect(emqx, get_config,
% fun(KeyPath, Default) -> % fun(KeyPath, Default) ->
% Map = emqx:get_raw_config(KeyPath, Default), % Map = emqx:get_raw_config(KeyPath, Default),

View File

@ -1,23 +1,32 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{deps, {deps, [
[ {emqx, {path, "../emqx"}}, {emqx, {path, "../emqx"}},
%% FIXME: tag this as v3.1.3 %% FIXME: tag this as v3.1.3
{prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}}, {prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.3"}}} {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.4"}}}
]}. ]}.
{edoc_opts, [{preprocess, true}]}. {edoc_opts, [{preprocess, true}]}.
{erl_opts, [warn_unused_vars, {erl_opts, [
warn_shadow_vars, warn_unused_vars,
warn_unused_import, warn_shadow_vars,
warn_obsolete_guard, warn_unused_import,
debug_info, warn_obsolete_guard,
{parse_transform}]}. debug_info,
{parse_transform}
]}.
{xref_checks, [undefined_function_calls, undefined_functions, {xref_checks, [
locals_not_used, deprecated_function_calls, undefined_function_calls,
warnings_as_errors, deprecated_functions]}. undefined_functions,
locals_not_used,
deprecated_function_calls,
warnings_as_errors,
deprecated_functions
]}.
{cover_enabled, true}. {cover_enabled, true}.
{cover_opts, [verbose]}. {cover_opts, [verbose]}.
{cover_export_enabled, true}. {cover_export_enabled, true}.
{project_plugins, [erlfmt]}.

View File

@ -1,15 +1,17 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_prometheus, {application, emqx_prometheus, [
[{description, "Prometheus for EMQX"}, {description, "Prometheus for EMQX"},
{vsn, "5.0.0"}, % strict semver, bump manually! % strict semver, bump manually!
{modules, []}, {vsn, "5.0.0"},
{registered, [emqx_prometheus_sup]}, {modules, []},
{applications, [kernel,stdlib,prometheus,emqx]}, {registered, [emqx_prometheus_sup]},
{mod, {emqx_prometheus_app,[]}}, {applications, [kernel, stdlib, prometheus, emqx]},
{env, []}, {mod, {emqx_prometheus_app, []}},
{licenses, ["Apache-2.0"]}, {env, []},
{maintainers, ["EMQX Team <contact@emqx.io>"]}, {licenses, ["Apache-2.0"]},
{links, [{"Homepage", "https://emqx.io/"}, {maintainers, ["EMQX Team <contact@emqx.io>"]},
{"Github", "https://github.com/emqx/emqx-prometheus"} {links, [
]} {"Homepage", "https://emqx.io/"},
]}. {"Github", "https://github.com/emqx/emqx-prometheus"}
]}
]}.

View File

@ -28,38 +28,44 @@
-include_lib("prometheus/include/prometheus_model.hrl"). -include_lib("prometheus/include/prometheus_model.hrl").
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-import(prometheus_model_helpers, -import(
[ create_mf/5 prometheus_model_helpers,
, gauge_metric/1 [
, counter_metric/1 create_mf/5,
]). gauge_metric/1,
counter_metric/1
]
).
-export([ update/1 -export([
, start/0 update/1,
, stop/0 start/0,
, restart/0 stop/0,
% for rpc restart/0,
, do_start/0 % for rpc
, do_stop/0 do_start/0,
]). do_stop/0
]).
%% APIs %% APIs
-export([start_link/1]). -export([start_link/1]).
%% gen_server callbacks %% gen_server callbacks
-export([ init/1 -export([
, handle_call/3 init/1,
, handle_cast/2 handle_call/3,
, handle_info/2 handle_cast/2,
, code_change/3 handle_info/2,
, terminate/2 code_change/3,
]). terminate/2
]).
%% prometheus_collector callback %% prometheus_collector callback
-export([ deregister_cleanup/1 -export([
, collect_mf/2 deregister_cleanup/1,
, collect_metrics/2 collect_mf/2,
]). collect_metrics/2
]).
-export([collect/1]). -export([collect/1]).
@ -72,8 +78,13 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% update new config %% update new config
update(Config) -> update(Config) ->
case emqx_conf:update([prometheus], Config, case
#{rawconf_with_defaults => true, override_to => cluster}) of emqx_conf:update(
[prometheus],
Config,
#{rawconf_with_defaults => true, override_to => cluster}
)
of
{ok, #{raw_config := NewConfigRows}} -> {ok, #{raw_config := NewConfigRows}} ->
case maps:get(<<"enable">>, Config, true) of case maps:get(<<"enable">>, Config, true) of
true -> true ->
@ -131,13 +142,12 @@ handle_call(_Msg, _From, State) ->
handle_cast(_Msg, State) -> handle_cast(_Msg, State) ->
{noreply, State}. {noreply, State}.
handle_info({timeout, R, ?TIMER_MSG}, State = #state{timer=R, push_gateway=Uri}) -> handle_info({timeout, R, ?TIMER_MSG}, State = #state{timer = R, push_gateway = Uri}) ->
[Name, Ip] = string:tokens(atom_to_list(node()), "@"), [Name, Ip] = string:tokens(atom_to_list(node()), "@"),
Url = lists:concat([Uri, "/metrics/job/", Name, "/instance/",Name, "~", Ip]), Url = lists:concat([Uri, "/metrics/job/", Name, "/instance/", Name, "~", Ip]),
Data = prometheus_text_format:format(), Data = prometheus_text_format:format(),
httpc:request(post, {Url, [], "text/plain", Data}, [{autoredirect, true}], []), httpc:request(post, {Url, [], "text/plain", Data}, [{autoredirect, true}], []),
{noreply, ensure_timer(State)}; {noreply, ensure_timer(State)};
handle_info(_Msg, State) -> handle_info(_Msg, State) ->
{noreply, State}. {noreply, State}.
@ -176,14 +186,15 @@ collect(<<"json">>) ->
Metrics = emqx_metrics:all(), Metrics = emqx_metrics:all(),
Stats = emqx_stats:getstats(), Stats = emqx_stats:getstats(),
VMData = emqx_vm_data(), VMData = emqx_vm_data(),
#{stats => maps:from_list([collect_stats(Name, Stats) || Name <- emqx_stats()]), #{
metrics => maps:from_list([collect_stats(Name, VMData) || Name <- emqx_vm()]), stats => maps:from_list([collect_stats(Name, Stats) || Name <- emqx_stats()]),
packets => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_packets()]), metrics => maps:from_list([collect_stats(Name, VMData) || Name <- emqx_vm()]),
messages => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_messages()]), packets => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_packets()]),
delivery => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_delivery()]), messages => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_messages()]),
client => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_client()]), delivery => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_delivery()]),
session => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_session()])}; client => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_client()]),
session => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_session()])
};
collect(<<"prometheus">>) -> collect(<<"prometheus">>) ->
prometheus_text_format:format(). prometheus_text_format:format().
@ -219,13 +230,11 @@ emqx_collect(emqx_connections_count, Stats) ->
gauge_metric(?C('connections.count', Stats)); gauge_metric(?C('connections.count', Stats));
emqx_collect(emqx_connections_max, Stats) -> emqx_collect(emqx_connections_max, Stats) ->
gauge_metric(?C('connections.max', Stats)); gauge_metric(?C('connections.max', Stats));
%% sessions %% sessions
emqx_collect(emqx_sessions_count, Stats) -> emqx_collect(emqx_sessions_count, Stats) ->
gauge_metric(?C('sessions.count', Stats)); gauge_metric(?C('sessions.count', Stats));
emqx_collect(emqx_sessions_max, Stats) -> emqx_collect(emqx_sessions_max, Stats) ->
gauge_metric(?C('sessions.max', Stats)); gauge_metric(?C('sessions.max', Stats));
%% pub/sub stats %% pub/sub stats
emqx_collect(emqx_topics_count, Stats) -> emqx_collect(emqx_topics_count, Stats) ->
gauge_metric(?C('topics.count', Stats)); gauge_metric(?C('topics.count', Stats));
@ -247,13 +256,11 @@ emqx_collect(emqx_subscriptions_shared_count, Stats) ->
gauge_metric(?C('subscriptions.shared.count', Stats)); gauge_metric(?C('subscriptions.shared.count', Stats));
emqx_collect(emqx_subscriptions_shared_max, Stats) -> emqx_collect(emqx_subscriptions_shared_max, Stats) ->
gauge_metric(?C('subscriptions.shared.max', Stats)); gauge_metric(?C('subscriptions.shared.max', Stats));
%% retained %% retained
emqx_collect(emqx_retained_count, Stats) -> emqx_collect(emqx_retained_count, Stats) ->
gauge_metric(?C('retained.count', Stats)); gauge_metric(?C('retained.count', Stats));
emqx_collect(emqx_retained_max, Stats) -> emqx_collect(emqx_retained_max, Stats) ->
gauge_metric(?C('retained.max', Stats)); gauge_metric(?C('retained.max', Stats));
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Metrics - packets & bytes %% Metrics - packets & bytes
@ -262,13 +269,11 @@ emqx_collect(emqx_bytes_received, Metrics) ->
counter_metric(?C('bytes.received', Metrics)); counter_metric(?C('bytes.received', Metrics));
emqx_collect(emqx_bytes_sent, Metrics) -> emqx_collect(emqx_bytes_sent, Metrics) ->
counter_metric(?C('bytes.sent', Metrics)); counter_metric(?C('bytes.sent', Metrics));
%% received.sent %% received.sent
emqx_collect(emqx_packets_received, Metrics) -> emqx_collect(emqx_packets_received, Metrics) ->
counter_metric(?C('packets.received', Metrics)); counter_metric(?C('packets.received', Metrics));
emqx_collect(emqx_packets_sent, Metrics) -> emqx_collect(emqx_packets_sent, Metrics) ->
counter_metric(?C('packets.sent', Metrics)); counter_metric(?C('packets.sent', Metrics));
%% connect %% connect
emqx_collect(emqx_packets_connect, Metrics) -> emqx_collect(emqx_packets_connect, Metrics) ->
counter_metric(?C('packets.connect.received', Metrics)); counter_metric(?C('packets.connect.received', Metrics));
@ -278,7 +283,6 @@ emqx_collect(emqx_packets_connack_error, Metrics) ->
counter_metric(?C('packets.connack.error', Metrics)); counter_metric(?C('packets.connack.error', Metrics));
emqx_collect(emqx_packets_connack_auth_error, Metrics) -> emqx_collect(emqx_packets_connack_auth_error, Metrics) ->
counter_metric(?C('packets.connack.auth_error', Metrics)); counter_metric(?C('packets.connack.auth_error', Metrics));
%% sub.unsub %% sub.unsub
emqx_collect(emqx_packets_subscribe_received, Metrics) -> emqx_collect(emqx_packets_subscribe_received, Metrics) ->
counter_metric(?C('packets.subscribe.received', Metrics)); counter_metric(?C('packets.subscribe.received', Metrics));
@ -294,7 +298,6 @@ emqx_collect(emqx_packets_unsubscribe_error, Metrics) ->
counter_metric(?C('packets.unsubscribe.error', Metrics)); counter_metric(?C('packets.unsubscribe.error', Metrics));
emqx_collect(emqx_packets_unsuback_sent, Metrics) -> emqx_collect(emqx_packets_unsuback_sent, Metrics) ->
counter_metric(?C('packets.unsuback.sent', Metrics)); counter_metric(?C('packets.unsuback.sent', Metrics));
%% publish.puback %% publish.puback
emqx_collect(emqx_packets_publish_received, Metrics) -> emqx_collect(emqx_packets_publish_received, Metrics) ->
counter_metric(?C('packets.publish.received', Metrics)); counter_metric(?C('packets.publish.received', Metrics));
@ -308,7 +311,6 @@ emqx_collect(emqx_packets_publish_auth_error, Metrics) ->
counter_metric(?C('packets.publish.auth_error', Metrics)); counter_metric(?C('packets.publish.auth_error', Metrics));
emqx_collect(emqx_packets_publish_dropped, Metrics) -> emqx_collect(emqx_packets_publish_dropped, Metrics) ->
counter_metric(?C('packets.publish.dropped', Metrics)); counter_metric(?C('packets.publish.dropped', Metrics));
%% puback %% puback
emqx_collect(emqx_packets_puback_received, Metrics) -> emqx_collect(emqx_packets_puback_received, Metrics) ->
counter_metric(?C('packets.puback.received', Metrics)); counter_metric(?C('packets.puback.received', Metrics));
@ -318,7 +320,6 @@ emqx_collect(emqx_packets_puback_inuse, Metrics) ->
counter_metric(?C('packets.puback.inuse', Metrics)); counter_metric(?C('packets.puback.inuse', Metrics));
emqx_collect(emqx_packets_puback_missed, Metrics) -> emqx_collect(emqx_packets_puback_missed, Metrics) ->
counter_metric(?C('packets.puback.missed', Metrics)); counter_metric(?C('packets.puback.missed', Metrics));
%% pubrec %% pubrec
emqx_collect(emqx_packets_pubrec_received, Metrics) -> emqx_collect(emqx_packets_pubrec_received, Metrics) ->
counter_metric(?C('packets.pubrec.received', Metrics)); counter_metric(?C('packets.pubrec.received', Metrics));
@ -328,7 +329,6 @@ emqx_collect(emqx_packets_pubrec_inuse, Metrics) ->
counter_metric(?C('packets.pubrec.inuse', Metrics)); counter_metric(?C('packets.pubrec.inuse', Metrics));
emqx_collect(emqx_packets_pubrec_missed, Metrics) -> emqx_collect(emqx_packets_pubrec_missed, Metrics) ->
counter_metric(?C('packets.pubrec.missed', Metrics)); counter_metric(?C('packets.pubrec.missed', Metrics));
%% pubrel %% pubrel
emqx_collect(emqx_packets_pubrel_received, Metrics) -> emqx_collect(emqx_packets_pubrel_received, Metrics) ->
counter_metric(?C('packets.pubrel.received', Metrics)); counter_metric(?C('packets.pubrel.received', Metrics));
@ -336,7 +336,6 @@ emqx_collect(emqx_packets_pubrel_sent, Metrics) ->
counter_metric(?C('packets.pubrel.sent', Metrics)); counter_metric(?C('packets.pubrel.sent', Metrics));
emqx_collect(emqx_packets_pubrel_missed, Metrics) -> emqx_collect(emqx_packets_pubrel_missed, Metrics) ->
counter_metric(?C('packets.pubrel.missed', Metrics)); counter_metric(?C('packets.pubrel.missed', Metrics));
%% pubcomp %% pubcomp
emqx_collect(emqx_packets_pubcomp_received, Metrics) -> emqx_collect(emqx_packets_pubcomp_received, Metrics) ->
counter_metric(?C('packets.pubcomp.received', Metrics)); counter_metric(?C('packets.pubcomp.received', Metrics));
@ -346,77 +345,59 @@ emqx_collect(emqx_packets_pubcomp_inuse, Metrics) ->
counter_metric(?C('packets.pubcomp.inuse', Metrics)); counter_metric(?C('packets.pubcomp.inuse', Metrics));
emqx_collect(emqx_packets_pubcomp_missed, Metrics) -> emqx_collect(emqx_packets_pubcomp_missed, Metrics) ->
counter_metric(?C('packets.pubcomp.missed', Metrics)); counter_metric(?C('packets.pubcomp.missed', Metrics));
%% pingreq %% pingreq
emqx_collect(emqx_packets_pingreq_received, Metrics) -> emqx_collect(emqx_packets_pingreq_received, Metrics) ->
counter_metric(?C('packets.pingreq.received', Metrics)); counter_metric(?C('packets.pingreq.received', Metrics));
emqx_collect(emqx_packets_pingresp_sent, Metrics) -> emqx_collect(emqx_packets_pingresp_sent, Metrics) ->
counter_metric(?C('packets.pingresp.sent', Metrics)); counter_metric(?C('packets.pingresp.sent', Metrics));
%% disconnect %% disconnect
emqx_collect(emqx_packets_disconnect_received, Metrics) -> emqx_collect(emqx_packets_disconnect_received, Metrics) ->
counter_metric(?C('packets.disconnect.received', Metrics)); counter_metric(?C('packets.disconnect.received', Metrics));
emqx_collect(emqx_packets_disconnect_sent, Metrics) -> emqx_collect(emqx_packets_disconnect_sent, Metrics) ->
counter_metric(?C('packets.disconnect.sent', Metrics)); counter_metric(?C('packets.disconnect.sent', Metrics));
%% auth %% auth
emqx_collect(emqx_packets_auth_received, Metrics) -> emqx_collect(emqx_packets_auth_received, Metrics) ->
counter_metric(?C('packets.auth.received', Metrics)); counter_metric(?C('packets.auth.received', Metrics));
emqx_collect(emqx_packets_auth_sent, Metrics) -> emqx_collect(emqx_packets_auth_sent, Metrics) ->
counter_metric(?C('packets.auth.sent', Metrics)); counter_metric(?C('packets.auth.sent', Metrics));
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Metrics - messages %% Metrics - messages
%% messages %% messages
emqx_collect(emqx_messages_received, Metrics) -> emqx_collect(emqx_messages_received, Metrics) ->
counter_metric(?C('messages.received', Metrics)); counter_metric(?C('messages.received', Metrics));
emqx_collect(emqx_messages_sent, Metrics) -> emqx_collect(emqx_messages_sent, Metrics) ->
counter_metric(?C('messages.sent', Metrics)); counter_metric(?C('messages.sent', Metrics));
emqx_collect(emqx_messages_qos0_received, Metrics) -> emqx_collect(emqx_messages_qos0_received, Metrics) ->
counter_metric(?C('messages.qos0.received', Metrics)); counter_metric(?C('messages.qos0.received', Metrics));
emqx_collect(emqx_messages_qos0_sent, Metrics) -> emqx_collect(emqx_messages_qos0_sent, Metrics) ->
counter_metric(?C('messages.qos0.sent', Metrics)); counter_metric(?C('messages.qos0.sent', Metrics));
emqx_collect(emqx_messages_qos1_received, Metrics) -> emqx_collect(emqx_messages_qos1_received, Metrics) ->
counter_metric(?C('messages.qos1.received', Metrics)); counter_metric(?C('messages.qos1.received', Metrics));
emqx_collect(emqx_messages_qos1_sent, Metrics) -> emqx_collect(emqx_messages_qos1_sent, Metrics) ->
counter_metric(?C('messages.qos1.sent', Metrics)); counter_metric(?C('messages.qos1.sent', Metrics));
emqx_collect(emqx_messages_qos2_received, Metrics) -> emqx_collect(emqx_messages_qos2_received, Metrics) ->
counter_metric(?C('messages.qos2.received', Metrics)); counter_metric(?C('messages.qos2.received', Metrics));
emqx_collect(emqx_messages_qos2_sent, Metrics) -> emqx_collect(emqx_messages_qos2_sent, Metrics) ->
counter_metric(?C('messages.qos2.sent', Metrics)); counter_metric(?C('messages.qos2.sent', Metrics));
emqx_collect(emqx_messages_publish, Metrics) -> emqx_collect(emqx_messages_publish, Metrics) ->
counter_metric(?C('messages.publish', Metrics)); counter_metric(?C('messages.publish', Metrics));
emqx_collect(emqx_messages_dropped, Metrics) -> emqx_collect(emqx_messages_dropped, Metrics) ->
counter_metric(?C('messages.dropped', Metrics)); counter_metric(?C('messages.dropped', Metrics));
emqx_collect(emqx_messages_dropped_expired, Metrics) -> emqx_collect(emqx_messages_dropped_expired, Metrics) ->
counter_metric(?C('messages.dropped.await_pubrel_timeout', Metrics)); counter_metric(?C('messages.dropped.await_pubrel_timeout', Metrics));
emqx_collect(emqx_messages_dropped_no_subscribers, Metrics) -> emqx_collect(emqx_messages_dropped_no_subscribers, Metrics) ->
counter_metric(?C('messages.dropped.no_subscribers', Metrics)); counter_metric(?C('messages.dropped.no_subscribers', Metrics));
emqx_collect(emqx_messages_forward, Metrics) -> emqx_collect(emqx_messages_forward, Metrics) ->
counter_metric(?C('messages.forward', Metrics)); counter_metric(?C('messages.forward', Metrics));
emqx_collect(emqx_messages_retained, Metrics) -> emqx_collect(emqx_messages_retained, Metrics) ->
counter_metric(?C('messages.retained', Metrics)); counter_metric(?C('messages.retained', Metrics));
emqx_collect(emqx_messages_delayed, Stats) -> emqx_collect(emqx_messages_delayed, Stats) ->
counter_metric(?C('messages.delayed', Stats)); counter_metric(?C('messages.delayed', Stats));
emqx_collect(emqx_messages_delivered, Stats) -> emqx_collect(emqx_messages_delivered, Stats) ->
counter_metric(?C('messages.delivered', Stats)); counter_metric(?C('messages.delivered', Stats));
emqx_collect(emqx_messages_acked, Stats) -> emqx_collect(emqx_messages_acked, Stats) ->
counter_metric(?C('messages.acked', Stats)); counter_metric(?C('messages.acked', Stats));
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Metrics - delivery %% Metrics - delivery
@ -432,7 +413,6 @@ emqx_collect(emqx_delivery_dropped_queue_full, Stats) ->
counter_metric(?C('delivery.dropped.queue_full', Stats)); counter_metric(?C('delivery.dropped.queue_full', Stats));
emqx_collect(emqx_delivery_dropped_expired, Stats) -> emqx_collect(emqx_delivery_dropped_expired, Stats) ->
counter_metric(?C('delivery.dropped.expired', Stats)); counter_metric(?C('delivery.dropped.expired', Stats));
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Metrics - client %% Metrics - client
@ -450,7 +430,6 @@ emqx_collect(emqx_client_unsubscribe, Stats) ->
counter_metric(?C('client.unsubscribe', Stats)); counter_metric(?C('client.unsubscribe', Stats));
emqx_collect(emqx_client_disconnected, Stats) -> emqx_collect(emqx_client_disconnected, Stats) ->
counter_metric(?C('client.disconnected', Stats)); counter_metric(?C('client.disconnected', Stats));
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Metrics - session %% Metrics - session
@ -464,31 +443,23 @@ emqx_collect(emqx_session_discarded, Stats) ->
counter_metric(?C('session.discarded', Stats)); counter_metric(?C('session.discarded', Stats));
emqx_collect(emqx_session_terminated, Stats) -> emqx_collect(emqx_session_terminated, Stats) ->
counter_metric(?C('session.terminated', Stats)); counter_metric(?C('session.terminated', Stats));
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% VM %% VM
emqx_collect(emqx_vm_cpu_use, VMData) -> emqx_collect(emqx_vm_cpu_use, VMData) ->
gauge_metric(?C(cpu_use, VMData)); gauge_metric(?C(cpu_use, VMData));
emqx_collect(emqx_vm_cpu_idle, VMData) -> emqx_collect(emqx_vm_cpu_idle, VMData) ->
gauge_metric(?C(cpu_idle, VMData)); gauge_metric(?C(cpu_idle, VMData));
emqx_collect(emqx_vm_run_queue, VMData) -> emqx_collect(emqx_vm_run_queue, VMData) ->
gauge_metric(?C(run_queue, VMData)); gauge_metric(?C(run_queue, VMData));
emqx_collect(emqx_vm_process_messages_in_queues, VMData) -> emqx_collect(emqx_vm_process_messages_in_queues, VMData) ->
gauge_metric(?C(process_total_messages, VMData)); gauge_metric(?C(process_total_messages, VMData));
emqx_collect(emqx_vm_total_memory, VMData) -> emqx_collect(emqx_vm_total_memory, VMData) ->
gauge_metric(?C(total_memory, VMData)); gauge_metric(?C(total_memory, VMData));
emqx_collect(emqx_vm_used_memory, VMData) -> emqx_collect(emqx_vm_used_memory, VMData) ->
gauge_metric(?C(used_memory, VMData)); gauge_metric(?C(used_memory, VMData));
emqx_collect(emqx_cluster_nodes_running, ClusterData) -> emqx_collect(emqx_cluster_nodes_running, ClusterData) ->
gauge_metric(?C(nodes_running, ClusterData)); gauge_metric(?C(nodes_running, ClusterData));
emqx_collect(emqx_cluster_nodes_stopped, ClusterData) -> emqx_collect(emqx_cluster_nodes_stopped, ClusterData) ->
gauge_metric(?C(nodes_stopped, ClusterData)). gauge_metric(?C(nodes_stopped, ClusterData)).
@ -497,142 +468,157 @@ emqx_collect(emqx_cluster_nodes_stopped, ClusterData) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
emqx_stats() -> emqx_stats() ->
[ emqx_connections_count [
, emqx_connections_max emqx_connections_count,
, emqx_sessions_count emqx_connections_max,
, emqx_sessions_max emqx_sessions_count,
, emqx_topics_count emqx_sessions_max,
, emqx_topics_max emqx_topics_count,
, emqx_suboptions_count emqx_topics_max,
, emqx_suboptions_max emqx_suboptions_count,
, emqx_subscribers_count emqx_suboptions_max,
, emqx_subscribers_max emqx_subscribers_count,
, emqx_subscriptions_count emqx_subscribers_max,
, emqx_subscriptions_max emqx_subscriptions_count,
, emqx_subscriptions_shared_count emqx_subscriptions_max,
, emqx_subscriptions_shared_max emqx_subscriptions_shared_count,
, emqx_retained_count emqx_subscriptions_shared_max,
, emqx_retained_max emqx_retained_count,
emqx_retained_max
]. ].
emqx_metrics_packets() -> emqx_metrics_packets() ->
[ emqx_bytes_received [
, emqx_bytes_sent emqx_bytes_received,
, emqx_packets_received emqx_bytes_sent,
, emqx_packets_sent emqx_packets_received,
, emqx_packets_connect emqx_packets_sent,
, emqx_packets_connack_sent emqx_packets_connect,
, emqx_packets_connack_error emqx_packets_connack_sent,
, emqx_packets_connack_auth_error emqx_packets_connack_error,
, emqx_packets_publish_received emqx_packets_connack_auth_error,
, emqx_packets_publish_sent emqx_packets_publish_received,
, emqx_packets_publish_inuse emqx_packets_publish_sent,
, emqx_packets_publish_error emqx_packets_publish_inuse,
, emqx_packets_publish_auth_error emqx_packets_publish_error,
, emqx_packets_publish_dropped emqx_packets_publish_auth_error,
, emqx_packets_puback_received emqx_packets_publish_dropped,
, emqx_packets_puback_sent emqx_packets_puback_received,
, emqx_packets_puback_inuse emqx_packets_puback_sent,
, emqx_packets_puback_missed emqx_packets_puback_inuse,
, emqx_packets_pubrec_received emqx_packets_puback_missed,
, emqx_packets_pubrec_sent emqx_packets_pubrec_received,
, emqx_packets_pubrec_inuse emqx_packets_pubrec_sent,
, emqx_packets_pubrec_missed emqx_packets_pubrec_inuse,
, emqx_packets_pubrel_received emqx_packets_pubrec_missed,
, emqx_packets_pubrel_sent emqx_packets_pubrel_received,
, emqx_packets_pubrel_missed emqx_packets_pubrel_sent,
, emqx_packets_pubcomp_received emqx_packets_pubrel_missed,
, emqx_packets_pubcomp_sent emqx_packets_pubcomp_received,
, emqx_packets_pubcomp_inuse emqx_packets_pubcomp_sent,
, emqx_packets_pubcomp_missed emqx_packets_pubcomp_inuse,
, emqx_packets_subscribe_received emqx_packets_pubcomp_missed,
, emqx_packets_subscribe_error emqx_packets_subscribe_received,
, emqx_packets_subscribe_auth_error emqx_packets_subscribe_error,
, emqx_packets_suback_sent emqx_packets_subscribe_auth_error,
, emqx_packets_unsubscribe_received emqx_packets_suback_sent,
, emqx_packets_unsubscribe_error emqx_packets_unsubscribe_received,
, emqx_packets_unsuback_sent emqx_packets_unsubscribe_error,
, emqx_packets_pingreq_received emqx_packets_unsuback_sent,
, emqx_packets_pingresp_sent emqx_packets_pingreq_received,
, emqx_packets_disconnect_received emqx_packets_pingresp_sent,
, emqx_packets_disconnect_sent emqx_packets_disconnect_received,
, emqx_packets_auth_received emqx_packets_disconnect_sent,
, emqx_packets_auth_sent emqx_packets_auth_received,
emqx_packets_auth_sent
]. ].
emqx_metrics_messages() -> emqx_metrics_messages() ->
[ emqx_messages_received [
, emqx_messages_sent emqx_messages_received,
, emqx_messages_qos0_received emqx_messages_sent,
, emqx_messages_qos0_sent emqx_messages_qos0_received,
, emqx_messages_qos1_received emqx_messages_qos0_sent,
, emqx_messages_qos1_sent emqx_messages_qos1_received,
, emqx_messages_qos2_received emqx_messages_qos1_sent,
, emqx_messages_qos2_sent emqx_messages_qos2_received,
, emqx_messages_publish emqx_messages_qos2_sent,
, emqx_messages_dropped emqx_messages_publish,
, emqx_messages_dropped_expired emqx_messages_dropped,
, emqx_messages_dropped_no_subscribers emqx_messages_dropped_expired,
, emqx_messages_forward emqx_messages_dropped_no_subscribers,
, emqx_messages_retained emqx_messages_forward,
, emqx_messages_delayed emqx_messages_retained,
, emqx_messages_delivered emqx_messages_delayed,
, emqx_messages_acked emqx_messages_delivered,
emqx_messages_acked
]. ].
emqx_metrics_delivery() -> emqx_metrics_delivery() ->
[ emqx_delivery_dropped [
, emqx_delivery_dropped_no_local emqx_delivery_dropped,
, emqx_delivery_dropped_too_large emqx_delivery_dropped_no_local,
, emqx_delivery_dropped_qos0_msg emqx_delivery_dropped_too_large,
, emqx_delivery_dropped_queue_full emqx_delivery_dropped_qos0_msg,
, emqx_delivery_dropped_expired emqx_delivery_dropped_queue_full,
emqx_delivery_dropped_expired
]. ].
emqx_metrics_client() -> emqx_metrics_client() ->
[ emqx_client_connected [
, emqx_client_authenticate emqx_client_connected,
, emqx_client_auth_anonymous emqx_client_authenticate,
, emqx_client_authorize emqx_client_auth_anonymous,
, emqx_client_subscribe emqx_client_authorize,
, emqx_client_unsubscribe emqx_client_subscribe,
, emqx_client_disconnected emqx_client_unsubscribe,
emqx_client_disconnected
]. ].
emqx_metrics_session() -> emqx_metrics_session() ->
[ emqx_session_created [
, emqx_session_resumed emqx_session_created,
, emqx_session_takenover emqx_session_resumed,
, emqx_session_discarded emqx_session_takenover,
, emqx_session_terminated emqx_session_discarded,
emqx_session_terminated
]. ].
emqx_vm() -> emqx_vm() ->
[ emqx_vm_cpu_use [
, emqx_vm_cpu_idle emqx_vm_cpu_use,
, emqx_vm_run_queue emqx_vm_cpu_idle,
, emqx_vm_process_messages_in_queues emqx_vm_run_queue,
, emqx_vm_total_memory emqx_vm_process_messages_in_queues,
, emqx_vm_used_memory emqx_vm_total_memory,
emqx_vm_used_memory
]. ].
emqx_vm_data() -> emqx_vm_data() ->
Idle = case cpu_sup:util([detailed]) of Idle =
{_, 0, 0, _} -> 0; %% Not support for Windows case cpu_sup:util([detailed]) of
{_Num, _Use, IdleList, _} -> ?C(idle, IdleList) %% Not support for Windows
end, {_, 0, 0, _} -> 0;
{_Num, _Use, IdleList, _} -> ?C(idle, IdleList)
end,
RunQueue = erlang:statistics(run_queue), RunQueue = erlang:statistics(run_queue),
[{run_queue, RunQueue}, [
{process_total_messages, 0}, %% XXX: Plan removed at v5.0 {run_queue, RunQueue},
{cpu_idle, Idle}, %% XXX: Plan removed at v5.0
{cpu_use, 100 - Idle}] ++ emqx_vm:mem_info(). {process_total_messages, 0},
{cpu_idle, Idle},
{cpu_use, 100 - Idle}
] ++ emqx_vm:mem_info().
emqx_cluster() -> emqx_cluster() ->
[ emqx_cluster_nodes_running [
, emqx_cluster_nodes_stopped emqx_cluster_nodes_running,
emqx_cluster_nodes_stopped
]. ].
emqx_cluster_data() -> emqx_cluster_data() ->
#{running_nodes := Running, stopped_nodes := Stopped} = mria_mnesia:cluster_info(), #{running_nodes := Running, stopped_nodes := Stopped} = mria_mnesia:cluster_info(),
[{nodes_running, length(Running)}, [
{nodes_stopped, length(Stopped)}]. {nodes_running, length(Running)},
{nodes_stopped, length(Stopped)}
].

View File

@ -22,14 +22,16 @@
-import(hoconsc, [ref/2]). -import(hoconsc, [ref/2]).
-export([ api_spec/0 -export([
, paths/0 api_spec/0,
, schema/1 paths/0,
]). schema/1
]).
-export([ prometheus/2 -export([
, stats/2 prometheus/2,
]). stats/2
]).
-define(SCHEMA_MODULE, emqx_prometheus_schema). -define(SCHEMA_MODULE, emqx_prometheus_schema).
@ -37,32 +39,38 @@ api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() -> paths() ->
[ "/prometheus" [
, "/prometheus/stats" "/prometheus",
"/prometheus/stats"
]. ].
schema("/prometheus") -> schema("/prometheus") ->
#{ 'operationId' => prometheus #{
, get => 'operationId' => prometheus,
#{ description => <<"Get Prometheus config info">> get =>
, responses => #{
#{200 => prometheus_config_schema()} description => <<"Get Prometheus config info">>,
responses =>
#{200 => prometheus_config_schema()}
},
put =>
#{
description => <<"Update Prometheus config">>,
'requestBody' => prometheus_config_schema(),
responses =>
#{200 => prometheus_config_schema()}
} }
, put => };
#{ description => <<"Update Prometheus config">>
, 'requestBody' => prometheus_config_schema()
, responses =>
#{200 => prometheus_config_schema()}
}
};
schema("/prometheus/stats") -> schema("/prometheus/stats") ->
#{ 'operationId' => stats #{
, get => 'operationId' => stats,
#{ description => <<"Get Prometheus Data">> get =>
, responses => #{
#{200 => prometheus_data_schema()} description => <<"Get Prometheus Data">>,
responses =>
#{200 => prometheus_data_schema()}
} }
}. }.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API Handler funcs %% API Handler funcs
@ -70,7 +78,6 @@ schema("/prometheus/stats") ->
prometheus(get, _Params) -> prometheus(get, _Params) ->
{200, emqx:get_raw_config([<<"prometheus">>], #{})}; {200, emqx:get_raw_config([<<"prometheus">>], #{})};
prometheus(put, #{body := Body}) -> prometheus(put, #{body := Body}) ->
case emqx_prometheus:update(Body) of case emqx_prometheus:update(Body) of
{ok, NewConfig} -> {ok, NewConfig} ->
@ -100,21 +107,25 @@ stats(get, #{headers := Headers}) ->
prometheus_config_schema() -> prometheus_config_schema() ->
emqx_dashboard_swagger:schema_with_example( emqx_dashboard_swagger:schema_with_example(
ref(?SCHEMA_MODULE, "prometheus"), ref(?SCHEMA_MODULE, "prometheus"),
prometheus_config_example()). prometheus_config_example()
).
prometheus_config_example() -> prometheus_config_example() ->
#{ enable => true #{
, interval => "15s" enable => true,
, push_gateway_server => <<"http://127.0.0.1:9091">> interval => "15s",
}. push_gateway_server => <<"http://127.0.0.1:9091">>
}.
prometheus_data_schema() -> prometheus_data_schema() ->
#{ description => <<"Get Prometheus Data">> #{
, content => description => <<"Get Prometheus Data">>,
#{ 'application/json' => content =>
#{schema => #{type => object}} #{
, 'text/plain' => 'application/json' =>
#{schema => #{type => string}} #{schema => #{type => object}},
'text/plain' =>
#{schema => #{type => string}}
} }
}. }.

View File

@ -21,9 +21,10 @@
-include("emqx_prometheus.hrl"). -include("emqx_prometheus.hrl").
%% Application callbacks %% Application callbacks
-export([ start/2 -export([
, stop/1 start/2,
]). stop/1
]).
start(_StartType, _StartArgs) -> start(_StartType, _StartArgs) ->
{ok, Sup} = emqx_prometheus_sup:start_link(), {ok, Sup} = emqx_prometheus_sup:start_link(),

View File

@ -15,9 +15,10 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_prometheus_mria). -module(emqx_prometheus_mria).
-export([deregister_cleanup/1, -export([
collect_mf/2 deregister_cleanup/1,
]). collect_mf/2
]).
-include_lib("prometheus/include/prometheus.hrl"). -include_lib("prometheus/include/prometheus.hrl").
@ -43,39 +44,45 @@ deregister_cleanup(_) -> ok.
_Registry :: prometheus_registry:registry(), _Registry :: prometheus_registry:registry(),
Callback :: prometheus_collector:callback(). Callback :: prometheus_collector:callback().
collect_mf(_Registry, Callback) -> collect_mf(_Registry, Callback) ->
case mria_rlog:backend() of case mria_rlog:backend() of
rlog -> rlog ->
Metrics = metrics(), Metrics = metrics(),
_ = [add_metric_family(Metric, Callback) || Metric <- Metrics], _ = [add_metric_family(Metric, Callback) || Metric <- Metrics],
ok; ok;
mnesia -> mnesia ->
ok ok
end. end.
add_metric_family({Name, Metrics}, Callback) -> add_metric_family({Name, Metrics}, Callback) ->
Callback(prometheus_model_helpers:create_mf( ?METRIC_NAME(Name) Callback(
, <<"">> prometheus_model_helpers:create_mf(
, gauge ?METRIC_NAME(Name),
, catch_all(Metrics) <<"">>,
)). gauge,
catch_all(Metrics)
)
).
%%==================================================================== %%====================================================================
%% Internal functions %% Internal functions
%%==================================================================== %%====================================================================
metrics() -> metrics() ->
Metrics = case mria_rlog:role() of Metrics =
replicant -> case mria_rlog:role() of
[lag, bootstrap_time, bootstrap_num_keys, message_queue_len, replayq_len]; replicant ->
core -> [lag, bootstrap_time, bootstrap_num_keys, message_queue_len, replayq_len];
[last_intercepted_trans, weight, replicants, server_mql] core ->
end, [last_intercepted_trans, weight, replicants, server_mql]
end,
[{MetricId, fun() -> get_shard_metric(MetricId) end} || MetricId <- Metrics]. [{MetricId, fun() -> get_shard_metric(MetricId) end} || MetricId <- Metrics].
get_shard_metric(Metric) -> get_shard_metric(Metric) ->
%% TODO: only report shards that are up %% TODO: only report shards that are up
[{[{shard, Shard}], get_shard_metric(Metric, Shard)} || [
Shard <- mria_schema:shards(), Shard =/= undefined]. {[{shard, Shard}], get_shard_metric(Metric, Shard)}
|| Shard <- mria_schema:shards(), Shard =/= undefined
].
get_shard_metric(replicants, Shard) -> get_shard_metric(replicants, Shard) ->
length(mria_status:agents(Shard)); length(mria_status:agents(Shard));
@ -88,6 +95,8 @@ get_shard_metric(Metric, Shard) ->
end. end.
catch_all(DataFun) -> catch_all(DataFun) ->
try DataFun() try
catch _:_ -> undefined DataFun()
catch
_:_ -> undefined
end. end.

View File

@ -20,11 +20,12 @@
-behaviour(hocon_schema). -behaviour(hocon_schema).
-export([ namespace/0 -export([
, roots/0 namespace/0,
, fields/1 roots/0,
, desc/1 fields/1,
]). desc/1
]).
namespace() -> "prometheus". namespace() -> "prometheus".
@ -32,25 +33,36 @@ roots() -> ["prometheus"].
fields("prometheus") -> fields("prometheus") ->
[ [
{push_gateway_server, sc(string(), {push_gateway_server,
#{ default => "http://127.0.0.1:9091" sc(
, required => true string(),
, desc => ?DESC(push_gateway_server) #{
})}, default => "http://127.0.0.1:9091",
{interval, sc(emqx_schema:duration_ms(), required => true,
#{ default => "15s" desc => ?DESC(push_gateway_server)
, required => true }
, desc => ?DESC(interval) )},
})}, {interval,
{enable, sc(boolean(), sc(
#{ default => false emqx_schema:duration_ms(),
, required => true #{
, desc => ?DESC(enable) default => "15s",
})} required => true,
desc => ?DESC(interval)
}
)},
{enable,
sc(
boolean(),
#{
default => false,
required => true,
desc => ?DESC(enable)
}
)}
]. ].
desc("prometheus") -> ?DESC(prometheus); desc("prometheus") -> ?DESC(prometheus);
desc(_) -> desc(_) -> undefined.
undefined.
sc(Type, Meta) -> hoconsc:mk(Type, Meta). sc(Type, Meta) -> hoconsc:mk(Type, Meta).

View File

@ -18,21 +18,24 @@
-behaviour(supervisor). -behaviour(supervisor).
-export([ start_link/0 -export([
, start_child/1 start_link/0,
, start_child/2 start_child/1,
, stop_child/1 start_child/2,
]). stop_child/1
]).
-export([init/1]). -export([init/1]).
%% Helper macro for declaring children of supervisor %% Helper macro for declaring children of supervisor
-define(CHILD(Mod, Opts), #{id => Mod, -define(CHILD(Mod, Opts), #{
start => {Mod, start_link, [Opts]}, id => Mod,
restart => permanent, start => {Mod, start_link, [Opts]},
shutdown => 5000, restart => permanent,
type => worker, shutdown => 5000,
modules => [Mod]}). type => worker,
modules => [Mod]
}).
start_link() -> start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []). supervisor:start_link({local, ?MODULE}, ?MODULE, []).
@ -45,7 +48,7 @@ start_child(ChildSpec) when is_map(ChildSpec) ->
start_child(Mod, Opts) when is_atom(Mod) andalso is_map(Opts) -> start_child(Mod, Opts) when is_atom(Mod) andalso is_map(Opts) ->
assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, Opts))). assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, Opts))).
-spec(stop_child(any()) -> ok | {error, term()}). -spec stop_child(any()) -> ok | {error, term()}.
stop_child(ChildId) -> stop_child(ChildId) ->
case supervisor:terminate_child(?MODULE, ChildId) of case supervisor:terminate_child(?MODULE, ChildId) of
ok -> supervisor:delete_child(?MODULE, ChildId); ok -> supervisor:delete_child(?MODULE, ChildId);

View File

@ -18,11 +18,12 @@
-behaviour(emqx_bpapi). -behaviour(emqx_bpapi).
-export([ introduced_in/0 -export([
introduced_in/0,
, start/1 start/1,
, stop/1 stop/1
]). ]).
-include_lib("emqx/include/bpapi.hrl"). -include_lib("emqx/include/bpapi.hrl").

View File

@ -22,13 +22,14 @@
-compile(export_all). -compile(export_all).
-define(CLUSTER_RPC_SHARD, emqx_cluster_rpc_shard). -define(CLUSTER_RPC_SHARD, emqx_cluster_rpc_shard).
-define(CONF_DEFAULT, <<" -define(CONF_DEFAULT, <<
prometheus { "\n"
push_gateway_server = \"http://127.0.0.1:9091\" "prometheus {\n"
interval = \"1s\" " push_gateway_server = \"http://127.0.0.1:9091\"\n"
enable = true " interval = \"1s\"\n"
} " enable = true\n"
">>). "}\n"
>>).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Setups %% Setups

View File

@ -67,9 +67,14 @@ t_prometheus_api(_) ->
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth), {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth),
Conf = emqx_json:decode(Response, [return_maps]), Conf = emqx_json:decode(Response, [return_maps]),
?assertMatch(#{<<"push_gateway_server">> := _, ?assertMatch(
<<"interval">> := _, #{
<<"enable">> := _}, Conf), <<"push_gateway_server">> := _,
<<"interval">> := _,
<<"enable">> := _
},
Conf
),
NewConf = Conf#{<<"interval">> := <<"2s">>}, NewConf = Conf#{<<"interval">> := <<"2s">>},
{ok, Response2} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf), {ok, Response2} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf),

View File

@ -30,12 +30,13 @@
}. }.
-type resource_group() :: binary(). -type resource_group() :: binary().
-type create_opts() :: #{ -type create_opts() :: #{
health_check_interval => integer(), health_check_interval => integer(),
health_check_timeout => integer(), health_check_timeout => integer(),
waiting_connect_complete => integer() waiting_connect_complete => integer()
}. }.
-type after_query() :: {[OnSuccess :: after_query_fun()], [OnFailed :: after_query_fun()]} | -type after_query() ::
undefined. {[OnSuccess :: after_query_fun()], [OnFailed :: after_query_fun()]}
| undefined.
%% the `after_query_fun()` is mainly for callbacks that increment counters or do some fallback %% the `after_query_fun()` is mainly for callbacks that increment counters or do some fallback
%% actions upon query failure %% actions upon query failure

View File

@ -15,13 +15,17 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-define(SAFE_CALL(_EXP_), -define(SAFE_CALL(_EXP_),
?SAFE_CALL(_EXP_, ok)). ?SAFE_CALL(_EXP_, ok)
).
-define(SAFE_CALL(_EXP_, _EXP_ON_FAIL_), -define(SAFE_CALL(_EXP_, _EXP_ON_FAIL_),
fun() -> fun() ->
try (_EXP_) try
catch _EXCLASS_:_EXCPTION_:_ST_ -> (_EXP_)
catch
_EXCLASS_:_EXCPTION_:_ST_ ->
_EXP_ON_FAIL_, _EXP_ON_FAIL_,
{error, {_EXCLASS_, _EXCPTION_, _ST_}} {error, {_EXCLASS_, _EXCPTION_, _ST_}}
end end
end()). end()
).

View File

@ -1,9 +1,10 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{erl_opts, [ debug_info {erl_opts, [
, nowarn_unused_import debug_info,
%, {d, 'RESOURCE_DEBUG'} nowarn_unused_import
]}. %, {d, 'RESOURCE_DEBUG'}
]}.
{erl_first_files, ["src/emqx_resource_transform.erl"]}. {erl_first_files, ["src/emqx_resource_transform.erl"]}.
@ -11,9 +12,11 @@
%% try to override the dialyzer 'race_conditions' defined in the top-level dir, %% try to override the dialyzer 'race_conditions' defined in the top-level dir,
%% but it doesn't work %% but it doesn't work
{dialyzer, [{warnings, [unmatched_returns, error_handling]} {dialyzer, [{warnings, [unmatched_returns, error_handling]}]}.
]}.
{deps, [ {jsx, {git, "https://github.com/talentdeficit/jsx", {tag, "v3.1.0"}}} {deps, [
, {emqx, {path, "../emqx"}} {jsx, {git, "https://github.com/talentdeficit/jsx", {tag, "v3.1.0"}}},
]}. {emqx, {path, "../emqx"}}
]}.
{project_plugins, [erlfmt]}.

View File

@ -1,19 +1,19 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_resource, {application, emqx_resource, [
[{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.0"}, {vsn, "0.1.0"},
{registered, []}, {registered, []},
{mod, {emqx_resource_app, []}}, {mod, {emqx_resource_app, []}},
{applications, {applications, [
[kernel, kernel,
stdlib, stdlib,
gproc, gproc,
jsx, jsx,
emqx emqx
]}, ]},
{env,[]}, {env, []},
{modules, []}, {modules, []},
{licenses, ["Apache 2.0"]}, {licenses, ["Apache 2.0"]},
{links, []} {links, []}
]}. ]}.

View File

@ -25,66 +25,93 @@
%% APIs for behaviour implementations %% APIs for behaviour implementations
-export([ query_success/1 -export([
, query_failed/1 query_success/1,
]). query_failed/1
]).
%% APIs for instances %% APIs for instances
-export([ check_config/2 -export([
, check_and_create/4 check_config/2,
, check_and_create/5 check_and_create/4,
, check_and_create_local/4 check_and_create/5,
, check_and_create_local/5 check_and_create_local/4,
, check_and_recreate/4 check_and_create_local/5,
, check_and_recreate_local/4 check_and_recreate/4,
]). check_and_recreate_local/4
]).
%% Sync resource instances and files %% Sync resource instances and files
%% provisional solution: rpc:multicall to all the nodes for creating/updating/removing %% provisional solution: rpc:multicall to all the nodes for creating/updating/removing
%% todo: replicate operations %% todo: replicate operations
-export([ create/4 %% store the config and start the instance
, create/5 %% store the config and start the instance
, create_local/4 -export([
, create_local/5 create/4,
, create_dry_run/2 %% run start/2, health_check/2 and stop/1 sequentially create/5,
, create_dry_run_local/2 create_local/4,
, recreate/4 %% this will do create_dry_run, stop the old instance and start a new one create_local/5,
, recreate_local/4 %% run start/2, health_check/2 and stop/1 sequentially
, remove/1 %% remove the config and stop the instance create_dry_run/2,
, remove_local/1 create_dry_run_local/2,
, reset_metrics/1 %% this will do create_dry_run, stop the old instance and start a new one
, reset_metrics_local/1 recreate/4,
]). recreate_local/4,
%% remove the config and stop the instance
remove/1,
remove_local/1,
reset_metrics/1,
reset_metrics_local/1
]).
%% Calls to the callback module with current resource state %% Calls to the callback module with current resource state
%% They also save the state after the call finished (except query/2,3). %% They also save the state after the call finished (except query/2,3).
-export([ restart/1 %% restart the instance.
, restart/2 %% restart the instance.
, health_check/1 %% verify if the resource is working normally -export([
, set_resource_status_connecting/1 %% set resource status to disconnected restart/1,
, stop/1 %% stop the instance restart/2,
, query/2 %% query the instance %% verify if the resource is working normally
, query/3 %% query the instance with after_query() health_check/1,
]). %% set resource status to disconnected
set_resource_status_connecting/1,
%% stop the instance
stop/1,
%% query the instance
query/2,
%% query the instance with after_query()
query/3
]).
%% Direct calls to the callback module %% Direct calls to the callback module
-export([ call_start/3 %% start the instance
, call_health_check/3 %% verify if the resource is working normally
, call_stop/3 %% stop the instance
]).
-export([ list_instances/0 %% list all the instances, id only. %% start the instance
, list_instances_verbose/0 %% list all the instances -export([
, get_instance/1 %% return the data of the instance call_start/3,
, list_instances_by_type/1 %% return all the instances of the same resource type %% verify if the resource is working normally
, generate_id/1 call_health_check/3,
, list_group_instances/1 %% stop the instance
]). call_stop/3
]).
-optional_callbacks([ on_query/4 %% list all the instances, id only.
, on_health_check/2 -export([
]). list_instances/0,
%% list all the instances
list_instances_verbose/0,
%% return the data of the instance
get_instance/1,
%% return all the instances of the same resource type
list_instances_by_type/1,
generate_id/1,
list_group_instances/1
]).
-optional_callbacks([
on_query/4,
on_health_check/2
]).
%% when calling emqx_resource:start/1 %% when calling emqx_resource:start/1
-callback on_start(instance_id(), resource_config()) -> -callback on_start(instance_id(), resource_config()) ->
@ -98,7 +125,7 @@
%% when calling emqx_resource:health_check/2 %% when calling emqx_resource:health_check/2
-callback on_health_check(instance_id(), resource_state()) -> -callback on_health_check(instance_id(), resource_state()) ->
{ok, resource_state()} | {error, Reason:: term(), resource_state()}. {ok, resource_state()} | {error, Reason :: term(), resource_state()}.
-spec list_types() -> [module()]. -spec list_types() -> [module()].
list_types() -> list_types() ->
@ -111,24 +138,26 @@ discover_resource_mods() ->
-spec is_resource_mod(module()) -> boolean(). -spec is_resource_mod(module()) -> boolean().
is_resource_mod(Module) -> is_resource_mod(Module) ->
Info = Module:module_info(attributes), Info = Module:module_info(attributes),
Behaviour = proplists:get_value(behavior, Info, []) ++ Behaviour =
proplists:get_value(behaviour, Info, []), proplists:get_value(behavior, Info, []) ++
proplists:get_value(behaviour, Info, []),
lists:member(?MODULE, Behaviour). lists:member(?MODULE, Behaviour).
-spec query_success(after_query()) -> ok. -spec query_success(after_query()) -> ok.
query_success(undefined) -> ok; query_success(undefined) -> ok;
query_success({OnSucc, _}) -> query_success({OnSucc, _}) -> apply_query_after_calls(OnSucc).
apply_query_after_calls(OnSucc).
-spec query_failed(after_query()) -> ok. -spec query_failed(after_query()) -> ok.
query_failed(undefined) -> ok; query_failed(undefined) -> ok;
query_failed({_, OnFailed}) -> query_failed({_, OnFailed}) -> apply_query_after_calls(OnFailed).
apply_query_after_calls(OnFailed).
apply_query_after_calls(Funcs) -> apply_query_after_calls(Funcs) ->
lists:foreach(fun({Fun, Args}) -> lists:foreach(
fun({Fun, Args}) ->
safe_apply(Fun, Args) safe_apply(Fun, Args)
end, Funcs). end,
Funcs
).
%% ================================================================================= %% =================================================================================
%% APIs for resource instances %% APIs for resource instances
@ -149,11 +178,13 @@ create(InstId, Group, ResourceType, Config, Opts) ->
create_local(InstId, Group, ResourceType, Config) -> create_local(InstId, Group, ResourceType, Config) ->
create_local(InstId, Group, ResourceType, Config, #{}). create_local(InstId, Group, ResourceType, Config, #{}).
-spec create_local(instance_id(), -spec create_local(
resource_group(), instance_id(),
resource_type(), resource_group(),
resource_config(), resource_type(),
create_opts()) -> resource_config(),
create_opts()
) ->
{ok, resource_data() | 'already_created'} | {error, Reason :: term()}. {ok, resource_data() | 'already_created'} | {error, Reason :: term()}.
create_local(InstId, Group, ResourceType, Config, Opts) -> create_local(InstId, Group, ResourceType, Config, Opts) ->
call_instance(InstId, {create, InstId, Group, ResourceType, Config, Opts}). call_instance(InstId, {create, InstId, Group, ResourceType, Config, Opts}).
@ -206,19 +237,25 @@ query(InstId, Request) ->
query(InstId, Request, AfterQuery) -> query(InstId, Request, AfterQuery) ->
case get_instance(InstId) of case get_instance(InstId) of
{ok, _Group, #{status := connecting}} -> {ok, _Group, #{status := connecting}} ->
query_error(connecting, <<"cannot serve query when the resource " query_error(connecting, <<
"instance is still connecting">>); "cannot serve query when the resource "
"instance is still connecting"
>>);
{ok, _Group, #{status := disconnected}} -> {ok, _Group, #{status := disconnected}} ->
query_error(disconnected, <<"cannot serve query when the resource " query_error(disconnected, <<
"instance is disconnected">>); "cannot serve query when the resource "
"instance is disconnected"
>>);
{ok, _Group, #{mod := Mod, state := ResourceState, status := connected}} -> {ok, _Group, #{mod := Mod, state := ResourceState, status := connected}} ->
%% the resource state is readonly to Module:on_query/4 %% the resource state is readonly to Module:on_query/4
%% and the `after_query()` functions should be thread safe %% and the `after_query()` functions should be thread safe
ok = emqx_plugin_libs_metrics:inc(resource_metrics, InstId, matched), ok = emqx_plugin_libs_metrics:inc(resource_metrics, InstId, matched),
try Mod:on_query(InstId, Request, AfterQuery, ResourceState) try
catch Err:Reason:ST -> Mod:on_query(InstId, Request, AfterQuery, ResourceState)
emqx_plugin_libs_metrics:inc(resource_metrics, InstId, exception), catch
erlang:raise(Err, Reason, ST) Err:Reason:ST ->
emqx_plugin_libs_metrics:inc(resource_metrics, InstId, exception),
erlang:raise(Err, Reason, ST)
end; end;
{error, not_found} -> {error, not_found} ->
query_error(not_found, <<"the resource id not exists">>) query_error(not_found, <<"the resource id not exists">>)
@ -258,9 +295,10 @@ list_instances_verbose() ->
-spec list_instances_by_type(module()) -> [instance_id()]. -spec list_instances_by_type(module()) -> [instance_id()].
list_instances_by_type(ResourceType) -> list_instances_by_type(ResourceType) ->
filter_instances(fun(_, RT) when RT =:= ResourceType -> true; filter_instances(fun
(_, _) -> false (_, RT) when RT =:= ResourceType -> true;
end). (_, _) -> false
end).
-spec generate_id(term()) -> instance_id(). -spec generate_id(term()) -> instance_id().
generate_id(Name) when is_binary(Name) -> generate_id(Name) when is_binary(Name) ->
@ -276,7 +314,9 @@ call_start(InstId, Mod, Config) ->
?SAFE_CALL(Mod:on_start(InstId, Config)). ?SAFE_CALL(Mod:on_start(InstId, Config)).
-spec call_health_check(instance_id(), module(), resource_state()) -> -spec call_health_check(instance_id(), module(), resource_state()) ->
{ok, resource_state()} | {error, Reason:: term()} | {error, Reason:: term(), resource_state()}. {ok, resource_state()}
| {error, Reason :: term()}
| {error, Reason :: term(), resource_state()}.
call_health_check(InstId, Mod, ResourceState) -> call_health_check(InstId, Mod, ResourceState) ->
?SAFE_CALL(Mod:on_health_check(InstId, ResourceState)). ?SAFE_CALL(Mod:on_health_check(InstId, ResourceState)).
@ -289,58 +329,82 @@ call_stop(InstId, Mod, ResourceState) ->
check_config(ResourceType, Conf) -> check_config(ResourceType, Conf) ->
emqx_hocon:check(ResourceType, Conf). emqx_hocon:check(ResourceType, Conf).
-spec check_and_create(instance_id(), -spec check_and_create(
resource_group(), instance_id(),
resource_type(), resource_group(),
raw_resource_config()) -> resource_type(),
raw_resource_config()
) ->
{ok, resource_data() | 'already_created'} | {error, term()}. {ok, resource_data() | 'already_created'} | {error, term()}.
check_and_create(InstId, Group, ResourceType, RawConfig) -> check_and_create(InstId, Group, ResourceType, RawConfig) ->
check_and_create(InstId, Group, ResourceType, RawConfig, #{}). check_and_create(InstId, Group, ResourceType, RawConfig, #{}).
-spec check_and_create(instance_id(), -spec check_and_create(
resource_group(), instance_id(),
resource_type(), resource_group(),
raw_resource_config(), resource_type(),
create_opts()) -> raw_resource_config(),
create_opts()
) ->
{ok, resource_data() | 'already_created'} | {error, term()}. {ok, resource_data() | 'already_created'} | {error, term()}.
check_and_create(InstId, Group, ResourceType, RawConfig, Opts) -> check_and_create(InstId, Group, ResourceType, RawConfig, Opts) ->
check_and_do(ResourceType, RawConfig, check_and_do(
fun(InstConf) -> create(InstId, Group, ResourceType, InstConf, Opts) end). ResourceType,
RawConfig,
fun(InstConf) -> create(InstId, Group, ResourceType, InstConf, Opts) end
).
-spec check_and_create_local(instance_id(), -spec check_and_create_local(
resource_group(), instance_id(),
resource_type(), resource_group(),
raw_resource_config()) -> resource_type(),
raw_resource_config()
) ->
{ok, resource_data()} | {error, term()}. {ok, resource_data()} | {error, term()}.
check_and_create_local(InstId, Group, ResourceType, RawConfig) -> check_and_create_local(InstId, Group, ResourceType, RawConfig) ->
check_and_create_local(InstId, Group, ResourceType, RawConfig, #{}). check_and_create_local(InstId, Group, ResourceType, RawConfig, #{}).
-spec check_and_create_local(instance_id(), -spec check_and_create_local(
resource_group(), instance_id(),
resource_type(), resource_group(),
raw_resource_config(), resource_type(),
create_opts()) -> {ok, resource_data()} | {error, term()}. raw_resource_config(),
create_opts()
) -> {ok, resource_data()} | {error, term()}.
check_and_create_local(InstId, Group, ResourceType, RawConfig, Opts) -> check_and_create_local(InstId, Group, ResourceType, RawConfig, Opts) ->
check_and_do(ResourceType, RawConfig, check_and_do(
fun(InstConf) -> create_local(InstId, Group, ResourceType, InstConf, Opts) end). ResourceType,
RawConfig,
fun(InstConf) -> create_local(InstId, Group, ResourceType, InstConf, Opts) end
).
-spec check_and_recreate(instance_id(), -spec check_and_recreate(
resource_type(), instance_id(),
raw_resource_config(), resource_type(),
create_opts()) -> raw_resource_config(),
create_opts()
) ->
{ok, resource_data()} | {error, term()}. {ok, resource_data()} | {error, term()}.
check_and_recreate(InstId, ResourceType, RawConfig, Opts) -> check_and_recreate(InstId, ResourceType, RawConfig, Opts) ->
check_and_do(ResourceType, RawConfig, check_and_do(
fun(InstConf) -> recreate(InstId, ResourceType, InstConf, Opts) end). ResourceType,
RawConfig,
fun(InstConf) -> recreate(InstId, ResourceType, InstConf, Opts) end
).
-spec check_and_recreate_local(instance_id(), -spec check_and_recreate_local(
resource_type(), instance_id(),
raw_resource_config(), resource_type(),
create_opts()) -> raw_resource_config(),
create_opts()
) ->
{ok, resource_data()} | {error, term()}. {ok, resource_data()} | {error, term()}.
check_and_recreate_local(InstId, ResourceType, RawConfig, Opts) -> check_and_recreate_local(InstId, ResourceType, RawConfig, Opts) ->
check_and_do(ResourceType, RawConfig, check_and_do(
fun(InstConf) -> recreate_local(InstId, ResourceType, InstConf, Opts) end). ResourceType,
RawConfig,
fun(InstConf) -> recreate_local(InstId, ResourceType, InstConf, Opts) end
).
check_and_do(ResourceType, RawConfig, Do) when is_function(Do) -> check_and_do(ResourceType, RawConfig, Do) when is_function(Do) ->
case check_config(ResourceType, RawConfig) of case check_config(ResourceType, RawConfig) of
@ -355,8 +419,7 @@ filter_instances(Filter) ->
inc_metrics_funcs(InstId) -> inc_metrics_funcs(InstId) ->
OnFailed = [{fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, failed]}], OnFailed = [{fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, failed]}],
OnSucc = [ {fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, success]} OnSucc = [{fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, success]}],
],
{OnSucc, OnFailed}. {OnSucc, OnFailed}.
call_instance(InstId, Query) -> call_instance(InstId, Query) ->

View File

@ -15,23 +15,29 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_resource_health_check). -module(emqx_resource_health_check).
-export([ start_link/3 -export([
, create_checker/3 start_link/3,
, delete_checker/1 create_checker/3,
]). delete_checker/1
]).
-export([ start_health_check/3 -export([
, health_check_timeout_checker/4 start_health_check/3,
]). health_check_timeout_checker/4
]).
-define(SUP, emqx_resource_health_check_sup). -define(SUP, emqx_resource_health_check_sup).
-define(ID(NAME), {resource_health_check, NAME}). -define(ID(NAME), {resource_health_check, NAME}).
child_spec(Name, Sleep, Timeout) -> child_spec(Name, Sleep, Timeout) ->
#{id => ?ID(Name), #{
start => {?MODULE, start_link, [Name, Sleep, Timeout]}, id => ?ID(Name),
restart => transient, start => {?MODULE, start_link, [Name, Sleep, Timeout]},
shutdown => 5000, type => worker, modules => [?MODULE]}. restart => transient,
shutdown => 5000,
type => worker,
modules => [?MODULE]
}.
start_link(Name, Sleep, Timeout) -> start_link(Name, Sleep, Timeout) ->
Pid = proc_lib:spawn_link(?MODULE, start_health_check, [Name, Sleep, Timeout]), Pid = proc_lib:spawn_link(?MODULE, start_health_check, [Name, Sleep, Timeout]),
@ -42,19 +48,22 @@ create_checker(Name, Sleep, Timeout) ->
create_checker(Name, Sleep, Retry, Timeout) -> create_checker(Name, Sleep, Retry, Timeout) ->
case supervisor:start_child(?SUP, child_spec(Name, Sleep, Timeout)) of case supervisor:start_child(?SUP, child_spec(Name, Sleep, Timeout)) of
{ok, _} -> ok; {ok, _} ->
{error, already_present} -> ok; ok;
{error, already_present} ->
ok;
{error, {already_started, _}} when Retry == false -> {error, {already_started, _}} when Retry == false ->
ok = delete_checker(Name), ok = delete_checker(Name),
create_checker(Name, Sleep, true, Timeout); create_checker(Name, Sleep, true, Timeout);
Error -> Error Error ->
Error
end. end.
delete_checker(Name) -> delete_checker(Name) ->
case supervisor:terminate_child(?SUP, ?ID(Name)) of case supervisor:terminate_child(?SUP, ?ID(Name)) of
ok -> supervisor:delete_child(?SUP, ?ID(Name)); ok -> supervisor:delete_child(?SUP, ?ID(Name));
Error -> Error Error -> Error
end. end.
start_health_check(Name, Sleep, Timeout) -> start_health_check(Name, Sleep, Timeout) ->
Pid = self(), Pid = self(),
@ -63,13 +72,16 @@ start_health_check(Name, Sleep, Timeout) ->
health_check(Name) -> health_check(Name) ->
receive receive
{Pid, begin_health_check} -> {Pid, begin_health_check} ->
case emqx_resource:health_check(Name) of case emqx_resource:health_check(Name) of
ok -> ok ->
emqx_alarm:deactivate(Name); emqx_alarm:deactivate(Name);
{error, _} -> {error, _} ->
emqx_alarm:activate(Name, #{name => Name}, emqx_alarm:activate(
<<Name/binary, " health check failed">>) Name,
#{name => Name},
<<Name/binary, " health check failed">>
)
end, end,
Pid ! health_check_finish Pid ! health_check_finish
end, end,
@ -81,8 +93,11 @@ health_check_timeout_checker(Pid, Name, SleepTime, Timeout) ->
receive receive
health_check_finish -> timer:sleep(SleepTime) health_check_finish -> timer:sleep(SleepTime)
after Timeout -> after Timeout ->
emqx_alarm:activate(Name, #{name => Name}, emqx_alarm:activate(
<<Name/binary, " health check timeout">>), Name,
#{name => Name},
<<Name/binary, " health check timeout">>
),
emqx_resource:set_resource_status_connecting(Name), emqx_resource:set_resource_status_connecting(Name),
receive receive
health_check_finish -> timer:sleep(SleepTime) health_check_finish -> timer:sleep(SleepTime)

Some files were not shown because too many files have changed in this diff Show More