diff --git a/apps/emqx/include/emqx.hrl b/apps/emqx/include/emqx.hrl index f23fb8812..88e61f9b3 100644 --- a/apps/emqx/include/emqx.hrl +++ b/apps/emqx/include/emqx.hrl @@ -25,11 +25,12 @@ -define(ROUTE_SHARD, route_shard). -define(PERSISTENT_SESSION_SHARD, emqx_persistent_session_shard). --define(BOOT_SHARDS, [ ?ROUTE_SHARD - , ?COMMON_SHARD - , ?SHARED_SUB_SHARD - , ?PERSISTENT_SESSION_SHARD - ]). +-define(BOOT_SHARDS, [ + ?ROUTE_SHARD, + ?COMMON_SHARD, + ?SHARED_SUB_SHARD, + ?PERSISTENT_SESSION_SHARD +]). %% Banner %%-------------------------------------------------------------------- @@ -63,96 +64,99 @@ %% See 'Application Message' in MQTT Version 5.0 -record(message, { - %% Global unique message ID - id :: binary(), - %% Message QoS - qos = 0, - %% Message from - from :: atom() | binary(), - %% Message flags - flags = #{} :: emqx_types:flags(), - %% Message headers. May contain any metadata. e.g. the - %% protocol version number, username, peerhost or - %% the PUBLISH properties (MQTT 5.0). - headers = #{} :: emqx_types:headers(), - %% Topic that the message is published to - topic :: emqx_types:topic(), - %% Message Payload - payload :: emqx_types:payload(), - %% Timestamp (Unit: millisecond) - timestamp :: integer(), - %% not used so far, for future extension - extra = [] :: term() - }). + %% Global unique message ID + id :: binary(), + %% Message QoS + qos = 0, + %% Message from + from :: atom() | binary(), + %% Message flags + flags = #{} :: emqx_types:flags(), + %% Message headers. May contain any metadata. e.g. the + %% protocol version number, username, peerhost or + %% the PUBLISH properties (MQTT 5.0). + headers = #{} :: emqx_types:headers(), + %% Topic that the message is published to + topic :: emqx_types:topic(), + %% Message Payload + payload :: emqx_types:payload(), + %% Timestamp (Unit: millisecond) + timestamp :: integer(), + %% not used so far, for future extension + extra = [] :: term() +}). -record(delivery, { - sender :: pid(), %% Sender of the delivery - message :: #message{} %% The message delivered - }). + %% Sender of the delivery + sender :: pid(), + %% The message delivered + message :: #message{} +}). %%-------------------------------------------------------------------- %% Route %%-------------------------------------------------------------------- -record(route, { - topic :: binary(), - dest :: node() | {binary(), node()} | emqx_session:sessionID() - }). + topic :: binary(), + dest :: node() | {binary(), node()} | emqx_session:sessionID() +}). %%-------------------------------------------------------------------- %% Plugin %%-------------------------------------------------------------------- -record(plugin, { - name :: atom(), - dir :: string() | undefined, - descr :: string(), - vendor :: string() | undefined, - active = false :: boolean(), - info = #{} :: map() - }). + name :: atom(), + dir :: string() | undefined, + descr :: string(), + vendor :: string() | undefined, + active = false :: boolean(), + info = #{} :: map() +}). %%-------------------------------------------------------------------- %% Command %%-------------------------------------------------------------------- -record(command, { - name :: atom(), - action :: atom(), - args = [] :: list(), - opts = [] :: list(), - usage :: string(), - descr :: string() - }). + name :: atom(), + action :: atom(), + args = [] :: list(), + opts = [] :: list(), + usage :: string(), + descr :: string() +}). %%-------------------------------------------------------------------- %% Banned %%-------------------------------------------------------------------- -record(banned, { - who :: {clientid, binary()} - | {peerhost, inet:ip_address()} - | {username, binary()}, - by :: binary(), - reason :: binary(), - at :: integer(), - until :: integer() - }). + who :: + {clientid, binary()} + | {peerhost, inet:ip_address()} + | {username, binary()}, + by :: binary(), + reason :: binary(), + at :: integer(), + until :: integer() +}). %%-------------------------------------------------------------------- %% Authentication %%-------------------------------------------------------------------- --record(authenticator, - { id :: binary() - , provider :: module() - , enable :: boolean() - , state :: map() - }). +-record(authenticator, { + id :: binary(), + provider :: module(), + enable :: boolean(), + state :: map() +}). --record(chain, - { name :: atom() - , authenticators :: [#authenticator{}] - }). +-record(chain, { + name :: atom(), + authenticators :: [#authenticator{}] +}). -endif. diff --git a/apps/emqx/include/emqx_mqtt.hrl b/apps/emqx/include/emqx_mqtt.hrl index 2e30cf31c..6c546c789 100644 --- a/apps/emqx/include/emqx_mqtt.hrl +++ b/apps/emqx/include/emqx_mqtt.hrl @@ -23,8 +23,13 @@ %% MQTT SockOpts %%-------------------------------------------------------------------- --define(MQTT_SOCKOPTS, [binary, {packet, raw}, {reuseaddr, true}, - {backlog, 512}, {nodelay, true}]). +-define(MQTT_SOCKOPTS, [ + binary, + {packet, raw}, + {reuseaddr, true}, + {backlog, 512}, + {nodelay, true} +]). %%-------------------------------------------------------------------- %% MQTT Protocol Version and Names @@ -36,40 +41,45 @@ -define(MQTT_PROTO_V5, 5). -define(PROTOCOL_NAMES, [ - {?MQTT_SN_PROTO_V1, <<"MQTT-SN">>}, %% XXX:Compatible with emqx-sn plug-in + %% XXX:Compatible with emqx-sn plug-in + {?MQTT_SN_PROTO_V1, <<"MQTT-SN">>}, {?MQTT_PROTO_V3, <<"MQIsdp">>}, {?MQTT_PROTO_V4, <<"MQTT">>}, - {?MQTT_PROTO_V5, <<"MQTT">>}]). + {?MQTT_PROTO_V5, <<"MQTT">>} +]). %%-------------------------------------------------------------------- %% MQTT QoS Levels %%-------------------------------------------------------------------- --define(QOS_0, 0). %% At most once --define(QOS_1, 1). %% At least once --define(QOS_2, 2). %% Exactly once +%% At most once +-define(QOS_0, 0). +%% At least once +-define(QOS_1, 1). +%% Exactly once +-define(QOS_2, 2). -define(IS_QOS(I), (I >= ?QOS_0 andalso I =< ?QOS_2)). --define(QOS_I(Name), - begin - (case Name of - ?QOS_0 -> ?QOS_0; - qos0 -> ?QOS_0; - at_most_once -> ?QOS_0; - ?QOS_1 -> ?QOS_1; - qos1 -> ?QOS_1; - at_least_once -> ?QOS_1; - ?QOS_2 -> ?QOS_2; - qos2 -> ?QOS_2; - exactly_once -> ?QOS_2 - end) - end). +-define(QOS_I(Name), begin + (case Name of + ?QOS_0 -> ?QOS_0; + qos0 -> ?QOS_0; + at_most_once -> ?QOS_0; + ?QOS_1 -> ?QOS_1; + qos1 -> ?QOS_1; + at_least_once -> ?QOS_1; + ?QOS_2 -> ?QOS_2; + qos2 -> ?QOS_2; + exactly_once -> ?QOS_2 + end) +end). -define(IS_QOS_NAME(I), - (I =:= qos0 orelse I =:= at_most_once orelse - I =:= qos1 orelse I =:= at_least_once orelse - I =:= qos2 orelse I =:= exactly_once)). + (I =:= qos0 orelse I =:= at_most_once orelse + I =:= qos1 orelse I =:= at_least_once orelse + I =:= qos2 orelse I =:= exactly_once) +). %%-------------------------------------------------------------------- %% Maximum ClientId Length. @@ -81,83 +91,105 @@ %% MQTT Control Packet Types %%-------------------------------------------------------------------- --define(RESERVED, 0). %% Reserved --define(CONNECT, 1). %% Client request to connect to Server --define(CONNACK, 2). %% Server to Client: Connect acknowledgment --define(PUBLISH, 3). %% Publish message --define(PUBACK, 4). %% Publish acknowledgment --define(PUBREC, 5). %% Publish received (assured delivery part 1) --define(PUBREL, 6). %% Publish release (assured delivery part 2) --define(PUBCOMP, 7). %% Publish complete (assured delivery part 3) --define(SUBSCRIBE, 8). %% Client subscribe request --define(SUBACK, 9). %% Server Subscribe acknowledgment --define(UNSUBSCRIBE, 10). %% Unsubscribe request --define(UNSUBACK, 11). %% Unsubscribe acknowledgment --define(PINGREQ, 12). %% PING request --define(PINGRESP, 13). %% PING response --define(DISCONNECT, 14). %% Client or Server is disconnecting --define(AUTH, 15). %% Authentication exchange +%% Reserved +-define(RESERVED, 0). +%% Client request to connect to Server +-define(CONNECT, 1). +%% Server to Client: Connect acknowledgment +-define(CONNACK, 2). +%% Publish message +-define(PUBLISH, 3). +%% Publish acknowledgment +-define(PUBACK, 4). +%% Publish received (assured delivery part 1) +-define(PUBREC, 5). +%% Publish release (assured delivery part 2) +-define(PUBREL, 6). +%% Publish complete (assured delivery part 3) +-define(PUBCOMP, 7). +%% Client subscribe request +-define(SUBSCRIBE, 8). +%% Server Subscribe acknowledgment +-define(SUBACK, 9). +%% Unsubscribe request +-define(UNSUBSCRIBE, 10). +%% Unsubscribe acknowledgment +-define(UNSUBACK, 11). +%% PING request +-define(PINGREQ, 12). +%% PING response +-define(PINGRESP, 13). +%% Client or Server is disconnecting +-define(DISCONNECT, 14). +%% Authentication exchange +-define(AUTH, 15). %%-------------------------------------------------------------------- %% MQTT V3.1.1 Connect Return Codes %%-------------------------------------------------------------------- --define(CONNACK_ACCEPT, 0). %% Connection accepted --define(CONNACK_PROTO_VER, 1). %% Unacceptable protocol version --define(CONNACK_INVALID_ID, 2). %% Client Identifier is correct UTF-8 but not allowed by the Server --define(CONNACK_SERVER, 3). %% Server unavailable --define(CONNACK_CREDENTIALS, 4). %% Username or password is malformed --define(CONNACK_AUTH, 5). %% Client is not authorized to connect +%% Connection accepted +-define(CONNACK_ACCEPT, 0). +%% Unacceptable protocol version +-define(CONNACK_PROTO_VER, 1). +%% Client Identifier is correct UTF-8 but not allowed by the Server +-define(CONNACK_INVALID_ID, 2). +%% Server unavailable +-define(CONNACK_SERVER, 3). +%% Username or password is malformed +-define(CONNACK_CREDENTIALS, 4). +%% Client is not authorized to connect +-define(CONNACK_AUTH, 5). %%-------------------------------------------------------------------- %% MQTT V5.0 Reason Codes %%-------------------------------------------------------------------- --define(RC_SUCCESS, 16#00). --define(RC_NORMAL_DISCONNECTION, 16#00). --define(RC_GRANTED_QOS_0, 16#00). --define(RC_GRANTED_QOS_1, 16#01). --define(RC_GRANTED_QOS_2, 16#02). --define(RC_DISCONNECT_WITH_WILL_MESSAGE, 16#04). --define(RC_NO_MATCHING_SUBSCRIBERS, 16#10). --define(RC_NO_SUBSCRIPTION_EXISTED, 16#11). --define(RC_CONTINUE_AUTHENTICATION, 16#18). --define(RC_RE_AUTHENTICATE, 16#19). --define(RC_UNSPECIFIED_ERROR, 16#80). --define(RC_MALFORMED_PACKET, 16#81). --define(RC_PROTOCOL_ERROR, 16#82). --define(RC_IMPLEMENTATION_SPECIFIC_ERROR, 16#83). --define(RC_UNSUPPORTED_PROTOCOL_VERSION, 16#84). --define(RC_CLIENT_IDENTIFIER_NOT_VALID, 16#85). --define(RC_BAD_USER_NAME_OR_PASSWORD, 16#86). --define(RC_NOT_AUTHORIZED, 16#87). --define(RC_SERVER_UNAVAILABLE, 16#88). --define(RC_SERVER_BUSY, 16#89). --define(RC_BANNED, 16#8A). --define(RC_SERVER_SHUTTING_DOWN, 16#8B). --define(RC_BAD_AUTHENTICATION_METHOD, 16#8C). --define(RC_KEEP_ALIVE_TIMEOUT, 16#8D). --define(RC_SESSION_TAKEN_OVER, 16#8E). --define(RC_TOPIC_FILTER_INVALID, 16#8F). --define(RC_TOPIC_NAME_INVALID, 16#90). --define(RC_PACKET_IDENTIFIER_IN_USE, 16#91). --define(RC_PACKET_IDENTIFIER_NOT_FOUND, 16#92). --define(RC_RECEIVE_MAXIMUM_EXCEEDED, 16#93). --define(RC_TOPIC_ALIAS_INVALID, 16#94). --define(RC_PACKET_TOO_LARGE, 16#95). --define(RC_MESSAGE_RATE_TOO_HIGH, 16#96). --define(RC_QUOTA_EXCEEDED, 16#97). --define(RC_ADMINISTRATIVE_ACTION, 16#98). --define(RC_PAYLOAD_FORMAT_INVALID, 16#99). --define(RC_RETAIN_NOT_SUPPORTED, 16#9A). --define(RC_QOS_NOT_SUPPORTED, 16#9B). --define(RC_USE_ANOTHER_SERVER, 16#9C). --define(RC_SERVER_MOVED, 16#9D). --define(RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED, 16#9E). --define(RC_CONNECTION_RATE_EXCEEDED, 16#9F). --define(RC_MAXIMUM_CONNECT_TIME, 16#A0). +-define(RC_SUCCESS, 16#00). +-define(RC_NORMAL_DISCONNECTION, 16#00). +-define(RC_GRANTED_QOS_0, 16#00). +-define(RC_GRANTED_QOS_1, 16#01). +-define(RC_GRANTED_QOS_2, 16#02). +-define(RC_DISCONNECT_WITH_WILL_MESSAGE, 16#04). +-define(RC_NO_MATCHING_SUBSCRIBERS, 16#10). +-define(RC_NO_SUBSCRIPTION_EXISTED, 16#11). +-define(RC_CONTINUE_AUTHENTICATION, 16#18). +-define(RC_RE_AUTHENTICATE, 16#19). +-define(RC_UNSPECIFIED_ERROR, 16#80). +-define(RC_MALFORMED_PACKET, 16#81). +-define(RC_PROTOCOL_ERROR, 16#82). +-define(RC_IMPLEMENTATION_SPECIFIC_ERROR, 16#83). +-define(RC_UNSUPPORTED_PROTOCOL_VERSION, 16#84). +-define(RC_CLIENT_IDENTIFIER_NOT_VALID, 16#85). +-define(RC_BAD_USER_NAME_OR_PASSWORD, 16#86). +-define(RC_NOT_AUTHORIZED, 16#87). +-define(RC_SERVER_UNAVAILABLE, 16#88). +-define(RC_SERVER_BUSY, 16#89). +-define(RC_BANNED, 16#8A). +-define(RC_SERVER_SHUTTING_DOWN, 16#8B). +-define(RC_BAD_AUTHENTICATION_METHOD, 16#8C). +-define(RC_KEEP_ALIVE_TIMEOUT, 16#8D). +-define(RC_SESSION_TAKEN_OVER, 16#8E). +-define(RC_TOPIC_FILTER_INVALID, 16#8F). +-define(RC_TOPIC_NAME_INVALID, 16#90). +-define(RC_PACKET_IDENTIFIER_IN_USE, 16#91). +-define(RC_PACKET_IDENTIFIER_NOT_FOUND, 16#92). +-define(RC_RECEIVE_MAXIMUM_EXCEEDED, 16#93). +-define(RC_TOPIC_ALIAS_INVALID, 16#94). +-define(RC_PACKET_TOO_LARGE, 16#95). +-define(RC_MESSAGE_RATE_TOO_HIGH, 16#96). +-define(RC_QUOTA_EXCEEDED, 16#97). +-define(RC_ADMINISTRATIVE_ACTION, 16#98). +-define(RC_PAYLOAD_FORMAT_INVALID, 16#99). +-define(RC_RETAIN_NOT_SUPPORTED, 16#9A). +-define(RC_QOS_NOT_SUPPORTED, 16#9B). +-define(RC_USE_ANOTHER_SERVER, 16#9C). +-define(RC_SERVER_MOVED, 16#9D). +-define(RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED, 16#9E). +-define(RC_CONNECTION_RATE_EXCEEDED, 16#9F). +-define(RC_MAXIMUM_CONNECT_TIME, 16#A0). -define(RC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED, 16#A1). --define(RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED, 16#A2). +-define(RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED, 16#A2). %%-------------------------------------------------------------------- %% Maximum MQTT Packet ID and Length @@ -180,367 +212,455 @@ %%-------------------------------------------------------------------- -record(mqtt_packet_header, { - type = ?RESERVED, - dup = false, - qos = ?QOS_0, - retain = false - }). + type = ?RESERVED, + dup = false, + qos = ?QOS_0, + retain = false +}). %%-------------------------------------------------------------------- %% MQTT Packets %%-------------------------------------------------------------------- --define(DEFAULT_SUBOPTS, #{rh => 0, %% Retain Handling - rap => 0, %% Retain as Publish - nl => 0, %% No Local - qos => 0 %% QoS - }). +%% Retain Handling +-define(DEFAULT_SUBOPTS, #{ + rh => 0, + %% Retain as Publish + rap => 0, + %% No Local + nl => 0, + %% QoS + qos => 0 +}). -record(mqtt_packet_connect, { - proto_name = <<"MQTT">>, - proto_ver = ?MQTT_PROTO_V4, - is_bridge = false, - clean_start = true, - will_flag = false, - will_qos = ?QOS_0, - will_retain = false, - keepalive = 0, - properties = #{}, - clientid = <<>>, - will_props = #{}, - will_topic = undefined, - will_payload = undefined, - username = undefined, - password = undefined - }). + proto_name = <<"MQTT">>, + proto_ver = ?MQTT_PROTO_V4, + is_bridge = false, + clean_start = true, + will_flag = false, + will_qos = ?QOS_0, + will_retain = false, + keepalive = 0, + properties = #{}, + clientid = <<>>, + will_props = #{}, + will_topic = undefined, + will_payload = undefined, + username = undefined, + password = undefined +}). -record(mqtt_packet_connack, { - ack_flags, - reason_code, - properties = #{} - }). + ack_flags, + reason_code, + properties = #{} +}). -record(mqtt_packet_publish, { - topic_name, - packet_id, - properties = #{} - }). + topic_name, + packet_id, + properties = #{} +}). -record(mqtt_packet_puback, { - packet_id, - reason_code, - properties = #{} - }). + packet_id, + reason_code, + properties = #{} +}). -record(mqtt_packet_subscribe, { - packet_id, - properties = #{}, - topic_filters - }). + packet_id, + properties = #{}, + topic_filters +}). -record(mqtt_packet_suback, { - packet_id, - properties = #{}, - reason_codes - }). + packet_id, + properties = #{}, + reason_codes +}). -record(mqtt_packet_unsubscribe, { - packet_id, - properties = #{}, - topic_filters - }). + packet_id, + properties = #{}, + topic_filters +}). -record(mqtt_packet_unsuback, { - packet_id, - properties = #{}, - reason_codes - }). + packet_id, + properties = #{}, + reason_codes +}). -record(mqtt_packet_disconnect, { - reason_code, - properties = #{} - }). + reason_code, + properties = #{} +}). -record(mqtt_packet_auth, { - reason_code, - properties = #{} - }). + reason_code, + properties = #{} +}). %%-------------------------------------------------------------------- %% MQTT Control Packet %%-------------------------------------------------------------------- -record(mqtt_packet, { - header :: #mqtt_packet_header{}, - variable :: #mqtt_packet_connect{} - | #mqtt_packet_connack{} - | #mqtt_packet_publish{} - | #mqtt_packet_puback{} - | #mqtt_packet_subscribe{} - | #mqtt_packet_suback{} - | #mqtt_packet_unsubscribe{} - | #mqtt_packet_unsuback{} - | #mqtt_packet_disconnect{} - | #mqtt_packet_auth{} - | pos_integer() - | undefined, - payload :: binary() | undefined - }). + header :: #mqtt_packet_header{}, + variable :: + #mqtt_packet_connect{} + | #mqtt_packet_connack{} + | #mqtt_packet_publish{} + | #mqtt_packet_puback{} + | #mqtt_packet_subscribe{} + | #mqtt_packet_suback{} + | #mqtt_packet_unsubscribe{} + | #mqtt_packet_unsuback{} + | #mqtt_packet_disconnect{} + | #mqtt_packet_auth{} + | pos_integer() + | undefined, + payload :: binary() | undefined +}). %%-------------------------------------------------------------------- %% MQTT Message Internal %%-------------------------------------------------------------------- -record(mqtt_msg, { - qos = ?QOS_0, - retain = false, - dup = false, - packet_id, - topic, - props, - payload - }). + qos = ?QOS_0, + retain = false, + dup = false, + packet_id, + topic, + props, + payload +}). %%-------------------------------------------------------------------- %% MQTT Packet Match %%-------------------------------------------------------------------- --define(CONNECT_PACKET(), - #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}}). +-define(CONNECT_PACKET(), #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}}). --define(CONNECT_PACKET(Var), - #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}, - variable = Var}). +-define(CONNECT_PACKET(Var), #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNECT}, + variable = Var +}). --define(CONNACK_PACKET(ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK}, - variable = #mqtt_packet_connack{ack_flags = 0, - reason_code = ReasonCode} - }). +-define(CONNACK_PACKET(ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNACK}, + variable = #mqtt_packet_connack{ + ack_flags = 0, + reason_code = ReasonCode + } +}). --define(CONNACK_PACKET(ReasonCode, SessPresent), - #mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK}, - variable = #mqtt_packet_connack{ack_flags = SessPresent, - reason_code = ReasonCode} - }). +-define(CONNACK_PACKET(ReasonCode, SessPresent), #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNACK}, + variable = #mqtt_packet_connack{ + ack_flags = SessPresent, + reason_code = ReasonCode + } +}). --define(CONNACK_PACKET(ReasonCode, SessPresent, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK}, - variable = #mqtt_packet_connack{ack_flags = SessPresent, - reason_code = ReasonCode, - properties = Properties} - }). +-define(CONNACK_PACKET(ReasonCode, SessPresent, Properties), #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNACK}, + variable = #mqtt_packet_connack{ + ack_flags = SessPresent, + reason_code = ReasonCode, + properties = Properties + } +}). --define(AUTH_PACKET(), - #mqtt_packet{header = #mqtt_packet_header{type = ?AUTH}, - variable = #mqtt_packet_auth{reason_code = 0} - }). +-define(AUTH_PACKET(), #mqtt_packet{ + header = #mqtt_packet_header{type = ?AUTH}, + variable = #mqtt_packet_auth{reason_code = 0} +}). --define(AUTH_PACKET(ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?AUTH}, - variable = #mqtt_packet_auth{reason_code = ReasonCode} - }). +-define(AUTH_PACKET(ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{type = ?AUTH}, + variable = #mqtt_packet_auth{reason_code = ReasonCode} +}). --define(AUTH_PACKET(ReasonCode, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?AUTH}, - variable = #mqtt_packet_auth{reason_code = ReasonCode, - properties = Properties} - }). +-define(AUTH_PACKET(ReasonCode, Properties), #mqtt_packet{ + header = #mqtt_packet_header{type = ?AUTH}, + variable = #mqtt_packet_auth{ + reason_code = ReasonCode, + properties = Properties + } +}). --define(PUBLISH_PACKET(QoS), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, qos = QoS}}). +-define(PUBLISH_PACKET(QoS), #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, qos = QoS}}). --define(PUBLISH_PACKET(QoS, PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = QoS}, - variable = #mqtt_packet_publish{packet_id = PacketId} - }). +-define(PUBLISH_PACKET(QoS, PacketId), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = QoS + }, + variable = #mqtt_packet_publish{packet_id = PacketId} +}). --define(PUBLISH_PACKET(QoS, Topic, PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = QoS}, - variable = #mqtt_packet_publish{topic_name = Topic, - packet_id = PacketId} - }). +-define(PUBLISH_PACKET(QoS, Topic, PacketId), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = QoS + }, + variable = #mqtt_packet_publish{ + topic_name = Topic, + packet_id = PacketId + } +}). --define(PUBLISH_PACKET(QoS, Topic, PacketId, Payload), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = QoS}, - variable = #mqtt_packet_publish{topic_name = Topic, - packet_id = PacketId}, - payload = Payload - }). +-define(PUBLISH_PACKET(QoS, Topic, PacketId, Payload), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = QoS + }, + variable = #mqtt_packet_publish{ + topic_name = Topic, + packet_id = PacketId + }, + payload = Payload +}). --define(PUBLISH_PACKET(QoS, Topic, PacketId, Properties, Payload), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = QoS}, - variable = #mqtt_packet_publish{topic_name = Topic, - packet_id = PacketId, - properties = Properties}, - payload = Payload - }). +-define(PUBLISH_PACKET(QoS, Topic, PacketId, Properties, Payload), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = QoS + }, + variable = #mqtt_packet_publish{ + topic_name = Topic, + packet_id = PacketId, + properties = Properties + }, + payload = Payload +}). --define(PUBACK_PACKET(PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = 0} - }). +-define(PUBACK_PACKET(PacketId), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBACK}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = 0 + } +}). --define(PUBACK_PACKET(PacketId, ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode} - }). +-define(PUBACK_PACKET(PacketId, ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBACK}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode + } +}). --define(PUBACK_PACKET(PacketId, ReasonCode, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode, - properties = Properties} - }). +-define(PUBACK_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBACK}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode, + properties = Properties + } +}). --define(PUBREC_PACKET(PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREC}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = 0} - }). +-define(PUBREC_PACKET(PacketId), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBREC}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = 0 + } +}). --define(PUBREC_PACKET(PacketId, ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREC}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode} - }). +-define(PUBREC_PACKET(PacketId, ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBREC}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode + } +}). --define(PUBREC_PACKET(PacketId, ReasonCode, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREC}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode, - properties = Properties} - }). +-define(PUBREC_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBREC}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode, + properties = Properties + } +}). --define(PUBREL_PACKET(PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL, - qos = ?QOS_1}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = 0} - }). +-define(PUBREL_PACKET(PacketId), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBREL, + qos = ?QOS_1 + }, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = 0 + } +}). --define(PUBREL_PACKET(PacketId, ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL, - qos = ?QOS_1}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode} - }). +-define(PUBREL_PACKET(PacketId, ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBREL, + qos = ?QOS_1 + }, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode + } +}). --define(PUBREL_PACKET(PacketId, ReasonCode, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL, - qos = ?QOS_1}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode, - properties = Properties} - }). +-define(PUBREL_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBREL, + qos = ?QOS_1 + }, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode, + properties = Properties + } +}). --define(PUBCOMP_PACKET(PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBCOMP}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = 0} - }). +-define(PUBCOMP_PACKET(PacketId), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBCOMP}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = 0 + } +}). --define(PUBCOMP_PACKET(PacketId, ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBCOMP}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode} - }). +-define(PUBCOMP_PACKET(PacketId, ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBCOMP}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode + } +}). --define(PUBCOMP_PACKET(PacketId, ReasonCode, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBCOMP}, - variable = #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode, - properties = Properties} - }). +-define(PUBCOMP_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBCOMP}, + variable = #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode, + properties = Properties + } +}). --define(SUBSCRIBE_PACKET(PacketId, TopicFilters), - #mqtt_packet{header = #mqtt_packet_header{type = ?SUBSCRIBE, - qos = ?QOS_1}, - variable = #mqtt_packet_subscribe{packet_id = PacketId, - topic_filters = TopicFilters} - }). +-define(SUBSCRIBE_PACKET(PacketId, TopicFilters), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?SUBSCRIBE, + qos = ?QOS_1 + }, + variable = #mqtt_packet_subscribe{ + packet_id = PacketId, + topic_filters = TopicFilters + } +}). --define(SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), - #mqtt_packet{header = #mqtt_packet_header{type = ?SUBSCRIBE, - qos = ?QOS_1}, - variable = #mqtt_packet_subscribe{packet_id = PacketId, - properties = Properties, - topic_filters = TopicFilters} - }). +-define(SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?SUBSCRIBE, + qos = ?QOS_1 + }, + variable = #mqtt_packet_subscribe{ + packet_id = PacketId, + properties = Properties, + topic_filters = TopicFilters + } +}). --define(SUBACK_PACKET(PacketId, ReasonCodes), - #mqtt_packet{header = #mqtt_packet_header{type = ?SUBACK}, - variable = #mqtt_packet_suback{packet_id = PacketId, - reason_codes = ReasonCodes} - }). +-define(SUBACK_PACKET(PacketId, ReasonCodes), #mqtt_packet{ + header = #mqtt_packet_header{type = ?SUBACK}, + variable = #mqtt_packet_suback{ + packet_id = PacketId, + reason_codes = ReasonCodes + } +}). --define(SUBACK_PACKET(PacketId, Properties, ReasonCodes), - #mqtt_packet{header = #mqtt_packet_header{type = ?SUBACK}, - variable = #mqtt_packet_suback{packet_id = PacketId, - properties = Properties, - reason_codes = ReasonCodes} - }). +-define(SUBACK_PACKET(PacketId, Properties, ReasonCodes), #mqtt_packet{ + header = #mqtt_packet_header{type = ?SUBACK}, + variable = #mqtt_packet_suback{ + packet_id = PacketId, + properties = Properties, + reason_codes = ReasonCodes + } +}). --define(UNSUBSCRIBE_PACKET(PacketId, TopicFilters), - #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBSCRIBE, - qos = ?QOS_1}, - variable = #mqtt_packet_unsubscribe{packet_id = PacketId, - topic_filters = TopicFilters} - }). +-define(UNSUBSCRIBE_PACKET(PacketId, TopicFilters), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?UNSUBSCRIBE, + qos = ?QOS_1 + }, + variable = #mqtt_packet_unsubscribe{ + packet_id = PacketId, + topic_filters = TopicFilters + } +}). --define(UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), - #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBSCRIBE, - qos = ?QOS_1}, - variable = #mqtt_packet_unsubscribe{packet_id = PacketId, - properties = Properties, - topic_filters = TopicFilters} - }). +-define(UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?UNSUBSCRIBE, + qos = ?QOS_1 + }, + variable = #mqtt_packet_unsubscribe{ + packet_id = PacketId, + properties = Properties, + topic_filters = TopicFilters + } +}). --define(UNSUBACK_PACKET(PacketId), - #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK}, - variable = #mqtt_packet_unsuback{packet_id = PacketId} - }). +-define(UNSUBACK_PACKET(PacketId), #mqtt_packet{ + header = #mqtt_packet_header{type = ?UNSUBACK}, + variable = #mqtt_packet_unsuback{packet_id = PacketId} +}). --define(UNSUBACK_PACKET(PacketId, ReasonCodes), - #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK}, - variable = #mqtt_packet_unsuback{packet_id = PacketId, - reason_codes = ReasonCodes} - }). +-define(UNSUBACK_PACKET(PacketId, ReasonCodes), #mqtt_packet{ + header = #mqtt_packet_header{type = ?UNSUBACK}, + variable = #mqtt_packet_unsuback{ + packet_id = PacketId, + reason_codes = ReasonCodes + } +}). --define(UNSUBACK_PACKET(PacketId, Properties, ReasonCodes), - #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK}, - variable = #mqtt_packet_unsuback{packet_id = PacketId, - properties = Properties, - reason_codes = ReasonCodes} - }). +-define(UNSUBACK_PACKET(PacketId, Properties, ReasonCodes), #mqtt_packet{ + header = #mqtt_packet_header{type = ?UNSUBACK}, + variable = #mqtt_packet_unsuback{ + packet_id = PacketId, + properties = Properties, + reason_codes = ReasonCodes + } +}). --define(DISCONNECT_PACKET(), - #mqtt_packet{header = #mqtt_packet_header{type = ?DISCONNECT}, - variable = #mqtt_packet_disconnect{reason_code = 0} - }). +-define(DISCONNECT_PACKET(), #mqtt_packet{ + header = #mqtt_packet_header{type = ?DISCONNECT}, + variable = #mqtt_packet_disconnect{reason_code = 0} +}). --define(DISCONNECT_PACKET(ReasonCode), - #mqtt_packet{header = #mqtt_packet_header{type = ?DISCONNECT}, - variable = #mqtt_packet_disconnect{reason_code = ReasonCode} - }). +-define(DISCONNECT_PACKET(ReasonCode), #mqtt_packet{ + header = #mqtt_packet_header{type = ?DISCONNECT}, + variable = #mqtt_packet_disconnect{reason_code = ReasonCode} +}). --define(DISCONNECT_PACKET(ReasonCode, Properties), - #mqtt_packet{header = #mqtt_packet_header{type = ?DISCONNECT}, - variable = #mqtt_packet_disconnect{reason_code = ReasonCode, - properties = Properties} - }). +-define(DISCONNECT_PACKET(ReasonCode, Properties), #mqtt_packet{ + header = #mqtt_packet_header{type = ?DISCONNECT}, + variable = #mqtt_packet_disconnect{ + reason_code = ReasonCode, + properties = Properties + } +}). -define(PACKET(Type), #mqtt_packet{header = #mqtt_packet_header{type = Type}}). -define(SHARE, "$share"). -define(SHARE(Group, Topic), emqx_topic:join([<>, Group, Topic])). --define(IS_SHARE(Topic), case Topic of <> -> true; _ -> false end). +-define(IS_SHARE(Topic), + case Topic of + <> -> true; + _ -> false + end +). -define(FRAME_PARSE_ERROR(Reason), {frame_parse_error, Reason}). -define(FRAME_SERIALIZE_ERROR(Reason), {frame_serialize_error, Reason}). diff --git a/apps/emqx/include/emqx_placeholder.hrl b/apps/emqx/include/emqx_placeholder.hrl index 0d743565b..db7576b67 100644 --- a/apps/emqx/include/emqx_placeholder.hrl +++ b/apps/emqx/include/emqx_placeholder.hrl @@ -17,99 +17,99 @@ -ifndef(EMQX_PLACEHOLDER_HRL). -define(EMQX_PLACEHOLDER_HRL, true). --define(PH(Type), <<"${", Type/binary, "}">> ). +-define(PH(Type), <<"${", Type/binary, "}">>). %% action: publish/subscribe/all --define(PH_ACTION, <<"${action}">> ). +-define(PH_ACTION, <<"${action}">>). %% cert --define(PH_CERT_SUBJECT, <<"${cert_subject}">> ). --define(PH_CERT_CN_NAME, <<"${cert_common_name}">> ). +-define(PH_CERT_SUBJECT, <<"${cert_subject}">>). +-define(PH_CERT_CN_NAME, <<"${cert_common_name}">>). %% MQTT --define(PH_PASSWORD, <<"${password}">> ). --define(PH_CLIENTID, <<"${clientid}">> ). --define(PH_FROM_CLIENTID, <<"${from_clientid}">> ). --define(PH_USERNAME, <<"${username}">> ). --define(PH_FROM_USERNAME, <<"${from_username}">> ). --define(PH_TOPIC, <<"${topic}">> ). +-define(PH_PASSWORD, <<"${password}">>). +-define(PH_CLIENTID, <<"${clientid}">>). +-define(PH_FROM_CLIENTID, <<"${from_clientid}">>). +-define(PH_USERNAME, <<"${username}">>). +-define(PH_FROM_USERNAME, <<"${from_username}">>). +-define(PH_TOPIC, <<"${topic}">>). %% MQTT payload --define(PH_PAYLOAD, <<"${payload}">> ). +-define(PH_PAYLOAD, <<"${payload}">>). %% client IPAddress --define(PH_PEERHOST, <<"${peerhost}">> ). +-define(PH_PEERHOST, <<"${peerhost}">>). %% ip & port --define(PH_HOST, <<"${host}">> ). --define(PH_PORT, <<"${port}">> ). +-define(PH_HOST, <<"${host}">>). +-define(PH_PORT, <<"${port}">>). %% Enumeration of message QoS 0,1,2 --define(PH_QOS, <<"${qos}">> ). --define(PH_FLAGS, <<"${flags}">> ). +-define(PH_QOS, <<"${qos}">>). +-define(PH_FLAGS, <<"${flags}">>). %% Additional data related to process within the MQTT message --define(PH_HEADERS, <<"${headers}">> ). +-define(PH_HEADERS, <<"${headers}">>). %% protocol name --define(PH_PROTONAME, <<"${proto_name}">> ). +-define(PH_PROTONAME, <<"${proto_name}">>). %% protocol version --define(PH_PROTOVER, <<"${proto_ver}">> ). +-define(PH_PROTOVER, <<"${proto_ver}">>). %% MQTT keepalive interval --define(PH_KEEPALIVE, <<"${keepalive}">> ). +-define(PH_KEEPALIVE, <<"${keepalive}">>). %% MQTT clean_start --define(PH_CLEAR_START, <<"${clean_start}">> ). +-define(PH_CLEAR_START, <<"${clean_start}">>). %% MQTT Session Expiration time --define(PH_EXPIRY_INTERVAL, <<"${expiry_interval}">> ). +-define(PH_EXPIRY_INTERVAL, <<"${expiry_interval}">>). %% Time when PUBLISH message reaches Broker (ms) --define(PH_PUBLISH_RECEIVED_AT, <<"${publish_received_at}">>). +-define(PH_PUBLISH_RECEIVED_AT, <<"${publish_received_at}">>). %% Mountpoint for bridging messages --define(PH_MOUNTPOINT, <<"${mountpoint}">> ). +-define(PH_MOUNTPOINT, <<"${mountpoint}">>). %% IPAddress and Port of terminal --define(PH_PEERNAME, <<"${peername}">> ). +-define(PH_PEERNAME, <<"${peername}">>). %% IPAddress and Port listened by emqx --define(PH_SOCKNAME, <<"${sockname}">> ). +-define(PH_SOCKNAME, <<"${sockname}">>). %% whether it is MQTT bridge connection --define(PH_IS_BRIDGE, <<"${is_bridge}">> ). +-define(PH_IS_BRIDGE, <<"${is_bridge}">>). %% Terminal connection completion time (s) --define(PH_CONNECTED_AT, <<"${connected_at}">> ). +-define(PH_CONNECTED_AT, <<"${connected_at}">>). %% Event trigger time(millisecond) --define(PH_TIMESTAMP, <<"${timestamp}">> ). +-define(PH_TIMESTAMP, <<"${timestamp}">>). %% Terminal disconnection completion time (s) --define(PH_DISCONNECTED_AT, <<"${disconnected_at}">> ). +-define(PH_DISCONNECTED_AT, <<"${disconnected_at}">>). --define(PH_NODE, <<"${node}">> ). --define(PH_REASON, <<"${reason}">> ). +-define(PH_NODE, <<"${node}">>). +-define(PH_REASON, <<"${reason}">>). --define(PH_ENDPOINT_NAME, <<"${endpoint_name}">> ). +-define(PH_ENDPOINT_NAME, <<"${endpoint_name}">>). %% sync change these place holder with binary def. --define(PH_S_ACTION, "${action}" ). --define(PH_S_CERT_SUBJECT, "${cert_subject}" ). --define(PH_S_CERT_CN_NAME, "${cert_common_name}" ). --define(PH_S_PASSWORD, "${password}" ). --define(PH_S_CLIENTID, "${clientid}" ). --define(PH_S_FROM_CLIENTID, "${from_clientid}" ). --define(PH_S_USERNAME, "${username}" ). --define(PH_S_FROM_USERNAME, "${from_username}" ). --define(PH_S_TOPIC, "${topic}" ). --define(PH_S_PAYLOAD, "${payload}" ). --define(PH_S_PEERHOST, "${peerhost}" ). --define(PH_S_HOST, "${host}" ). --define(PH_S_PORT, "${port}" ). --define(PH_S_QOS, "${qos}" ). --define(PH_S_FLAGS, "${flags}" ). --define(PH_S_HEADERS, "${headers}" ). --define(PH_S_PROTONAME, "${proto_name}" ). --define(PH_S_PROTOVER, "${proto_ver}" ). --define(PH_S_KEEPALIVE, "${keepalive}" ). --define(PH_S_CLEAR_START, "${clean_start}" ). --define(PH_S_EXPIRY_INTERVAL, "${expiry_interval}" ). --define(PH_S_PUBLISH_RECEIVED_AT, "${publish_received_at}" ). --define(PH_S_MOUNTPOINT, "${mountpoint}" ). --define(PH_S_PEERNAME, "${peername}" ). --define(PH_S_SOCKNAME, "${sockname}" ). --define(PH_S_IS_BRIDGE, "${is_bridge}" ). --define(PH_S_CONNECTED_AT, "${connected_at}" ). --define(PH_S_TIMESTAMP, "${timestamp}" ). --define(PH_S_DISCONNECTED_AT, "${disconnected_at}" ). --define(PH_S_NODE, "${node}" ). --define(PH_S_REASON, "${reason}" ). --define(PH_S_ENDPOINT_NAME, "${endpoint_name}" ). +-define(PH_S_ACTION, "${action}"). +-define(PH_S_CERT_SUBJECT, "${cert_subject}"). +-define(PH_S_CERT_CN_NAME, "${cert_common_name}"). +-define(PH_S_PASSWORD, "${password}"). +-define(PH_S_CLIENTID, "${clientid}"). +-define(PH_S_FROM_CLIENTID, "${from_clientid}"). +-define(PH_S_USERNAME, "${username}"). +-define(PH_S_FROM_USERNAME, "${from_username}"). +-define(PH_S_TOPIC, "${topic}"). +-define(PH_S_PAYLOAD, "${payload}"). +-define(PH_S_PEERHOST, "${peerhost}"). +-define(PH_S_HOST, "${host}"). +-define(PH_S_PORT, "${port}"). +-define(PH_S_QOS, "${qos}"). +-define(PH_S_FLAGS, "${flags}"). +-define(PH_S_HEADERS, "${headers}"). +-define(PH_S_PROTONAME, "${proto_name}"). +-define(PH_S_PROTOVER, "${proto_ver}"). +-define(PH_S_KEEPALIVE, "${keepalive}"). +-define(PH_S_CLEAR_START, "${clean_start}"). +-define(PH_S_EXPIRY_INTERVAL, "${expiry_interval}"). +-define(PH_S_PUBLISH_RECEIVED_AT, "${publish_received_at}"). +-define(PH_S_MOUNTPOINT, "${mountpoint}"). +-define(PH_S_PEERNAME, "${peername}"). +-define(PH_S_SOCKNAME, "${sockname}"). +-define(PH_S_IS_BRIDGE, "${is_bridge}"). +-define(PH_S_CONNECTED_AT, "${connected_at}"). +-define(PH_S_TIMESTAMP, "${timestamp}"). +-define(PH_S_DISCONNECTED_AT, "${disconnected_at}"). +-define(PH_S_NODE, "${node}"). +-define(PH_S_REASON, "${reason}"). +-define(PH_S_ENDPOINT_NAME, "${endpoint_name}"). -endif. diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index 1664ed951..870f25c18 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -15,66 +15,66 @@ %%-------------------------------------------------------------------- %% Bad Request --define(BAD_REQUEST, 'BAD_REQUEST'). --define(NOT_MATCH, 'NOT_MATCH'). +-define(BAD_REQUEST, 'BAD_REQUEST'). +-define(NOT_MATCH, 'NOT_MATCH'). --define(ALREADY_EXISTS, 'ALREADY_EXISTS'). --define(BAD_CONFIG_SCHEMA, 'BAD_CONFIG_SCHEMA'). --define(BAD_LISTENER_ID, 'BAD_LISTENER_ID'). --define(BAD_NODE_NAME, 'BAD_NODE_NAME'). --define(BAD_RPC, 'BAD_RPC'). --define(BAD_TOPIC, 'BAD_TOPIC'). --define(EXCEED_LIMIT, 'EXCEED_LIMIT'). --define(INVALID_PARAMETER, 'INVALID_PARAMETER'). --define(CONFLICT, 'CONFLICT'). --define(NO_DEFAULT_VALUE, 'NO_DEFAULT_VALUE'). --define(DEPENDENCY_EXISTS, 'DEPENDENCY_EXISTS'). --define(MESSAGE_ID_SCHEMA_ERROR, 'MESSAGE_ID_SCHEMA_ERROR'). --define(INVALID_ID, 'INVALID_ID'). +-define(ALREADY_EXISTS, 'ALREADY_EXISTS'). +-define(BAD_CONFIG_SCHEMA, 'BAD_CONFIG_SCHEMA'). +-define(BAD_LISTENER_ID, 'BAD_LISTENER_ID'). +-define(BAD_NODE_NAME, 'BAD_NODE_NAME'). +-define(BAD_RPC, 'BAD_RPC'). +-define(BAD_TOPIC, 'BAD_TOPIC'). +-define(EXCEED_LIMIT, 'EXCEED_LIMIT'). +-define(INVALID_PARAMETER, 'INVALID_PARAMETER'). +-define(CONFLICT, 'CONFLICT'). +-define(NO_DEFAULT_VALUE, 'NO_DEFAULT_VALUE'). +-define(DEPENDENCY_EXISTS, 'DEPENDENCY_EXISTS'). +-define(MESSAGE_ID_SCHEMA_ERROR, 'MESSAGE_ID_SCHEMA_ERROR'). +-define(INVALID_ID, 'INVALID_ID'). %% Resource Not Found --define(NOT_FOUND, 'NOT_FOUND'). --define(CLIENTID_NOT_FOUND, 'CLIENTID_NOT_FOUND'). --define(CLIENT_NOT_FOUND, 'CLIENT_NOT_FOUND'). --define(MESSAGE_ID_NOT_FOUND, 'MESSAGE_ID_NOT_FOUND'). --define(RESOURCE_NOT_FOUND, 'RESOURCE_NOT_FOUND'). --define(TOPIC_NOT_FOUND, 'TOPIC_NOT_FOUND'). --define(USER_NOT_FOUND, 'USER_NOT_FOUND'). +-define(NOT_FOUND, 'NOT_FOUND'). +-define(CLIENTID_NOT_FOUND, 'CLIENTID_NOT_FOUND'). +-define(CLIENT_NOT_FOUND, 'CLIENT_NOT_FOUND'). +-define(MESSAGE_ID_NOT_FOUND, 'MESSAGE_ID_NOT_FOUND'). +-define(RESOURCE_NOT_FOUND, 'RESOURCE_NOT_FOUND'). +-define(TOPIC_NOT_FOUND, 'TOPIC_NOT_FOUND'). +-define(USER_NOT_FOUND, 'USER_NOT_FOUND'). %% Internal error --define(INTERNAL_ERROR, 'INTERNAL_ERROR'). --define(SOURCE_ERROR, 'SOURCE_ERROR'). --define(UPDATE_FAILED, 'UPDATE_FAILED'). --define(REST_FAILED, 'REST_FAILED'). --define(CLIENT_NOT_RESPONSE, 'CLIENT_NOT_RESPONSE'). +-define(INTERNAL_ERROR, 'INTERNAL_ERROR'). +-define(SOURCE_ERROR, 'SOURCE_ERROR'). +-define(UPDATE_FAILED, 'UPDATE_FAILED'). +-define(REST_FAILED, 'REST_FAILED'). +-define(CLIENT_NOT_RESPONSE, 'CLIENT_NOT_RESPONSE'). %% All codes --define(ERROR_CODES, - [ {'BAD_REQUEST', <<"Request parameters are not legal">>} - , {'NOT_MATCH', <<"Conditions are not matched">>} - , {'ALREADY_EXISTS', <<"Resource already existed">>} - , {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>} - , {'BAD_LISTENER_ID', <<"Bad listener ID">>} - , {'BAD_NODE_NAME', <<"Bad Node Name">>} - , {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>} - , {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>} - , {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>} - , {'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>} - , {'CONFLICT', <<"Conflicting request resources">>} - , {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>} - , {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>} - , {'MESSAGE_ID_SCHEMA_ERROR', <<"Message ID parsing error">>} - , {'INVALID_ID', <<"Bad ID schema">>} - , {'MESSAGE_ID_NOT_FOUND', <<"Message ID does not exist">>} - , {'NOT_FOUND', <<"Resource was not found or does not exist">>} - , {'CLIENTID_NOT_FOUND', <<"Client ID was not found or does not exist">>} - , {'CLIENT_NOT_FOUND', <<"Client was not found or does not exist(usually not a MQTT client)">>} - , {'RESOURCE_NOT_FOUND', <<"Resource not found">>} - , {'TOPIC_NOT_FOUND', <<"Topic not found">>} - , {'USER_NOT_FOUND', <<"User not found">>} - , {'INTERNAL_ERROR', <<"Server inter error">>} - , {'SOURCE_ERROR', <<"Source error">>} - , {'UPDATE_FAILED', <<"Update failed">>} - , {'REST_FAILED', <<"Reset source or config failed">>} - , {'CLIENT_NOT_RESPONSE', <<"Client not responding">>} - ]). +-define(ERROR_CODES, [ + {'BAD_REQUEST', <<"Request parameters are not legal">>}, + {'NOT_MATCH', <<"Conditions are not matched">>}, + {'ALREADY_EXISTS', <<"Resource already existed">>}, + {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, + {'BAD_LISTENER_ID', <<"Bad listener ID">>}, + {'BAD_NODE_NAME', <<"Bad Node Name">>}, + {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}, + {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}, + {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}, + {'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}, + {'CONFLICT', <<"Conflicting request resources">>}, + {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}, + {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}, + {'MESSAGE_ID_SCHEMA_ERROR', <<"Message ID parsing error">>}, + {'INVALID_ID', <<"Bad ID schema">>}, + {'MESSAGE_ID_NOT_FOUND', <<"Message ID does not exist">>}, + {'NOT_FOUND', <<"Resource was not found or does not exist">>}, + {'CLIENTID_NOT_FOUND', <<"Client ID was not found or does not exist">>}, + {'CLIENT_NOT_FOUND', <<"Client was not found or does not exist(usually not a MQTT client)">>}, + {'RESOURCE_NOT_FOUND', <<"Resource not found">>}, + {'TOPIC_NOT_FOUND', <<"Topic not found">>}, + {'USER_NOT_FOUND', <<"User not found">>}, + {'INTERNAL_ERROR', <<"Server inter error">>}, + {'SOURCE_ERROR', <<"Source error">>}, + {'UPDATE_FAILED', <<"Update failed">>}, + {'REST_FAILED', <<"Reset source or config failed">>}, + {'CLIENT_NOT_RESPONSE', <<"Client not responding">>} +]). diff --git a/apps/emqx/include/logger.hrl b/apps/emqx/include/logger.hrl index 2d039c4e1..05e5359b5 100644 --- a/apps/emqx/include/logger.hrl +++ b/apps/emqx/include/logger.hrl @@ -19,34 +19,43 @@ %% structured logging -define(SLOG(Level, Data), - ?SLOG(Level, Data, #{})). + ?SLOG(Level, Data, #{}) +). %% structured logging, meta is for handler's filter. -define(SLOG(Level, Data, Meta), -%% check 'allow' here, only evaluate Data and Meta when necessary + %% check 'allow' here, only evaluate Data and Meta when necessary case logger:allow(Level, ?MODULE) of true -> - logger:log(Level, (Data), (Meta#{ mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY} - , line => ?LINE - })); + logger:log( + Level, + (Data), + (Meta#{ + mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY}, + line => ?LINE + }) + ); false -> ok - end). + end +). -define(TRACE_FILTER, emqx_trace_filter). %% Only evaluate when necessary %% Always debug the trace events. --define(TRACE(Tag, Msg, Meta), - begin +-define(TRACE(Tag, Msg, Meta), begin case persistent_term:get(?TRACE_FILTER, undefined) of undefined -> ok; [] -> ok; List -> emqx_trace:log(List, Msg, Meta#{trace_tag => Tag}) end, - ?SLOG(debug, (emqx_trace_formatter:format_meta(Meta))#{msg => Msg, tag => Tag}, - #{is_trace => false}) - end). + ?SLOG( + debug, + (emqx_trace_formatter:format_meta(Meta))#{msg => Msg, tag => Tag}, + #{is_trace => false} + ) +end). %% print to 'user' group leader -define(ULOG(Fmt, Args), io:format(user, Fmt, Args)). diff --git a/apps/emqx/include/types.hrl b/apps/emqx/include/types.hrl index 066696b0a..a36ad28b2 100644 --- a/apps/emqx/include/types.hrl +++ b/apps/emqx/include/types.hrl @@ -14,13 +14,12 @@ %% limitations under the License. %%-------------------------------------------------------------------- --type(maybe(T) :: undefined | T). +-type maybe(T) :: undefined | T. --type(startlink_ret() :: {ok, pid()} | ignore | {error, term()}). +-type startlink_ret() :: {ok, pid()} | ignore | {error, term()}. --type(ok_or_error(Reason) :: ok | {error, Reason}). +-type ok_or_error(Reason) :: ok | {error, Reason}. --type(ok_or_error(Value, Reason) :: {ok, Value} | {error, Reason}). - --type(mfargs() :: {module(), atom(), [term()]}). +-type ok_or_error(Value, Reason) :: {ok, Value} | {error, Reason}. +-type mfargs() :: {module(), atom(), [term()]}. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 96190f7fb..fdea25e7e 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -1,42 +1,53 @@ %% -*- mode: erlang -*- -{erl_opts, [warn_unused_vars,warn_shadow_vars,warn_unused_import, - warn_obsolete_guard,compressed]}. +{erl_opts, [ + warn_unused_vars, + warn_shadow_vars, + warn_unused_import, + warn_obsolete_guard, + compressed +]}. -{xref_checks,[undefined_function_calls,undefined_functions,locals_not_used, - deprecated_function_calls,warnings_as_errors,deprecated_functions]}. +{xref_checks, [ + undefined_function_calls, + undefined_functions, + locals_not_used, + deprecated_function_calls, + warnings_as_errors, + deprecated_functions +]}. %% Deps here may duplicate with emqx.git root level rebar.config %% but there not be any descrpancy. %% This rebar.config is necessary because the app may be used as a %% `git_subdir` dependency in other projects. -{deps, - [ {lc, {git, "https://github.com/qzhuyan/lc.git", {tag, "0.1.2"}}} - , {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}} - , {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.8.6"}}} - , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} - , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} - , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.2"}}} - , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} - , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.26.3"}}} - , {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}} - , {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}} - , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "0.18.0"}}} - ]}. +{deps, [ + {lc, {git, "https://github.com/qzhuyan/lc.git", {tag, "0.1.2"}}}, + {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, + {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.8.6"}}}, + {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, + {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, + {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.2"}}}, + {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.26.3"}}}, + {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, + {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, + {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "0.18.0"}}} +]}. {plugins, [{rebar3_proper, "0.12.1"}]}. {extra_src_dirs, [{"etc", [recursive]}]}. {profiles, [ - {test, - [{deps, - [ {meck, "0.9.2"} - , {proper, "1.4.0"} - , {bbmustache,"1.10.0"} - , {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.4.8"}}} - ]}, - {extra_src_dirs, [{"test",[recursive]}]} - ]} + {test, [ + {deps, [ + {meck, "0.9.2"}, + {proper, "1.4.0"}, + {bbmustache, "1.10.0"}, + {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.4.8"}}} + ]}, + {extra_src_dirs, [{"test", [recursive]}]} + ]} ]}. {dialyzer, [ @@ -46,5 +57,6 @@ {plt_apps, all_apps}, {plt_extra_apps, [hocon]}, {statistics, true} - ] -}. +]}. + +{project_plugins, [erlfmt]}. diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index 41fc14352..3621b3f0f 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -1,28 +1,31 @@ %% -*- mode: erlang -*- -{application, emqx, - [{id, "emqx"}, - {description, "EMQX Core"}, - {vsn, "5.0.0"}, % strict semver, bump manually! - {modules, []}, - {registered, []}, - {applications, [ kernel - , stdlib - , gproc - , gen_rpc - , mria - , esockd - , cowboy - , sasl - , os_mon - , jiffy - , lc - , hocon - ]}, - {mod, {emqx_app,[]}}, - {env, []}, - {licenses, ["Apache-2.0"]}, - {maintainers, ["EMQX Team "]}, - {links, [{"Homepage", "https://emqx.io/"}, - {"Github", "https://github.com/emqx/emqx"} - ]} +{application, emqx, [ + {id, "emqx"}, + {description, "EMQX Core"}, + % strict semver, bump manually! + {vsn, "5.0.0"}, + {modules, []}, + {registered, []}, + {applications, [ + kernel, + stdlib, + gproc, + gen_rpc, + mria, + esockd, + cowboy, + sasl, + os_mon, + jiffy, + lc, + hocon + ]}, + {mod, {emqx_app, []}}, + {env, []}, + {licenses, ["Apache-2.0"]}, + {maintainers, ["EMQX Team "]}, + {links, [ + {"Homepage", "https://emqx.io/"}, + {"Github", "https://github.com/emqx/emqx"} + ]} ]}. diff --git a/apps/emqx/src/emqx.erl b/apps/emqx/src/emqx.erl index ca46eadc4..c31ba3d6d 100644 --- a/apps/emqx/src/emqx.erl +++ b/apps/emqx/src/emqx.erl @@ -23,49 +23,54 @@ -elvis([{elvis_style, god_modules, disable}]). %% Start/Stop the application --export([ start/0 - , is_running/0 - , is_running/1 - , stop/0 - ]). +-export([ + start/0, + is_running/0, + is_running/1, + stop/0 +]). %% PubSub API --export([ subscribe/1 - , subscribe/2 - , subscribe/3 - , publish/1 - , unsubscribe/1 - ]). +-export([ + subscribe/1, + subscribe/2, + subscribe/3, + publish/1, + unsubscribe/1 +]). %% PubSub management API --export([ topics/0 - , subscriptions/1 - , subscribers/1 - , subscribed/2 - ]). +-export([ + topics/0, + subscriptions/1, + subscribers/1, + subscribed/2 +]). %% Hooks API --export([ hook/2 - , hook/3 - , hook/4 - , unhook/2 - , run_hook/2 - , run_fold_hook/3 - ]). +-export([ + hook/2, + hook/3, + hook/4, + unhook/2, + run_hook/2, + run_fold_hook/3 +]). %% Configs APIs --export([ get_config/1 - , get_config/2 - , get_raw_config/1 - , get_raw_config/2 - , update_config/2 - , update_config/3 - , remove_config/1 - , remove_config/2 - , reset_config/2 - , data_dir/0 - , certs_dir/0 - ]). +-export([ + get_config/1, + get_config/2, + get_raw_config/1, + get_raw_config/2, + update_config/2, + update_config/3, + remove_config/1, + remove_config/2, + reset_config/2, + data_dir/0, + certs_dir/0 +]). -define(APP, ?MODULE). @@ -74,55 +79,58 @@ %%-------------------------------------------------------------------- %% @doc Start emqx application --spec(start() -> {ok, list(atom())} | {error, term()}). +-spec start() -> {ok, list(atom())} | {error, term()}. start() -> application:ensure_all_started(?APP). %% @doc Stop emqx application. --spec(stop() -> ok | {error, term()}). +-spec stop() -> ok | {error, term()}. stop() -> application:stop(?APP). %% @doc Is emqx running? --spec(is_running(node()) -> boolean()). +-spec is_running(node()) -> boolean(). is_running(Node) -> case emqx_proto_v1:is_running(Node) of - {badrpc, _} -> false; - Result -> Result + {badrpc, _} -> false; + Result -> Result end. %% @doc Is emqx running on this node? --spec(is_running() -> boolean()). +-spec is_running() -> boolean(). is_running() -> case whereis(?APP) of undefined -> false; - _ -> true + _ -> true end. %%-------------------------------------------------------------------- %% PubSub API %%-------------------------------------------------------------------- --spec(subscribe(emqx_types:topic() | string()) -> ok). +-spec subscribe(emqx_types:topic() | string()) -> ok. subscribe(Topic) -> emqx_broker:subscribe(iolist_to_binary(Topic)). --spec(subscribe(emqx_types:topic() | string(), emqx_types:subid() | emqx_types:subopts()) -> ok). -subscribe(Topic, SubId) when is_atom(SubId); is_binary(SubId)-> +-spec subscribe(emqx_types:topic() | string(), emqx_types:subid() | emqx_types:subopts()) -> ok. +subscribe(Topic, SubId) when is_atom(SubId); is_binary(SubId) -> emqx_broker:subscribe(iolist_to_binary(Topic), SubId); subscribe(Topic, SubOpts) when is_map(SubOpts) -> emqx_broker:subscribe(iolist_to_binary(Topic), SubOpts). --spec(subscribe(emqx_types:topic() | string(), - emqx_types:subid() | pid(), emqx_types:subopts()) -> ok). +-spec subscribe( + emqx_types:topic() | string(), + emqx_types:subid() | pid(), + emqx_types:subopts() +) -> ok. subscribe(Topic, SubId, SubOpts) when (is_atom(SubId) orelse is_binary(SubId)), is_map(SubOpts) -> emqx_broker:subscribe(iolist_to_binary(Topic), SubId, SubOpts). --spec(publish(emqx_types:message()) -> emqx_types:publish_result()). +-spec publish(emqx_types:message()) -> emqx_types:publish_result(). publish(Msg) -> emqx_broker:publish(Msg). --spec(unsubscribe(emqx_types:topic() | string()) -> ok). +-spec unsubscribe(emqx_types:topic() | string()) -> ok. unsubscribe(Topic) -> emqx_broker:unsubscribe(iolist_to_binary(Topic)). @@ -130,18 +138,18 @@ unsubscribe(Topic) -> %% PubSub management API %%-------------------------------------------------------------------- --spec(topics() -> list(emqx_types:topic())). +-spec topics() -> list(emqx_types:topic()). topics() -> emqx_router:topics(). --spec(subscribers(emqx_types:topic() | string()) -> [pid()]). +-spec subscribers(emqx_types:topic() | string()) -> [pid()]. subscribers(Topic) -> emqx_broker:subscribers(iolist_to_binary(Topic)). --spec(subscriptions(pid()) -> [{emqx_types:topic(), emqx_types:subopts()}]). +-spec subscriptions(pid()) -> [{emqx_types:topic(), emqx_types:subopts()}]. subscriptions(SubPid) when is_pid(SubPid) -> emqx_broker:subscriptions(SubPid). --spec(subscribed(pid() | emqx_types:subid(), emqx_types:topic() | string()) -> boolean()). +-spec subscribed(pid() | emqx_types:subid(), emqx_types:topic() | string()) -> boolean(). subscribed(SubPid, Topic) when is_pid(SubPid) -> emqx_broker:subscribed(SubPid, iolist_to_binary(Topic)); subscribed(SubId, Topic) when is_atom(SubId); is_binary(SubId) -> @@ -151,33 +159,35 @@ subscribed(SubId, Topic) when is_atom(SubId); is_binary(SubId) -> %% Hooks API %%-------------------------------------------------------------------- --spec(hook(emqx_hooks:hookpoint(), emqx_hooks:action()) -> ok | {error, already_exists}). +-spec hook(emqx_hooks:hookpoint(), emqx_hooks:action()) -> ok | {error, already_exists}. hook(HookPoint, Action) -> emqx_hooks:add(HookPoint, Action). --spec(hook(emqx_hooks:hookpoint(), - emqx_hooks:action(), - emqx_hooks:filter() | integer() | list()) - -> ok | {error, already_exists}). +-spec hook( + emqx_hooks:hookpoint(), + emqx_hooks:action(), + emqx_hooks:filter() | integer() | list() +) -> + ok | {error, already_exists}. hook(HookPoint, Action, Priority) when is_integer(Priority) -> emqx_hooks:add(HookPoint, Action, Priority); -hook(HookPoint, Action, {_M, _F, _A} = Filter ) -> +hook(HookPoint, Action, {_M, _F, _A} = Filter) -> emqx_hooks:add(HookPoint, Action, Filter). --spec(hook(emqx_hooks:hookpoint(), emqx_hooks:action(), emqx_hooks:filter(), integer()) - -> ok | {error, already_exists}). +-spec hook(emqx_hooks:hookpoint(), emqx_hooks:action(), emqx_hooks:filter(), integer()) -> + ok | {error, already_exists}. hook(HookPoint, Action, Filter, Priority) -> emqx_hooks:add(HookPoint, Action, Filter, Priority). --spec(unhook(emqx_hooks:hookpoint(), emqx_hooks:action() | {module(), atom()}) -> ok). +-spec unhook(emqx_hooks:hookpoint(), emqx_hooks:action() | {module(), atom()}) -> ok. unhook(HookPoint, Action) -> emqx_hooks:del(HookPoint, Action). --spec(run_hook(emqx_hooks:hookpoint(), list(any())) -> ok | stop). +-spec run_hook(emqx_hooks:hookpoint(), list(any())) -> ok | stop. run_hook(HookPoint, Args) -> emqx_hooks:run(HookPoint, Args). --spec(run_fold_hook(emqx_hooks:hookpoint(), list(any()), any()) -> any()). +-spec run_fold_hook(emqx_hooks:hookpoint(), list(any()), any()) -> any(). run_fold_hook(HookPoint, Args, Acc) -> emqx_hooks:run_fold(HookPoint, Args, Acc). @@ -202,12 +212,18 @@ get_raw_config(KeyPath, Default) -> update_config(KeyPath, UpdateReq) -> update_config(KeyPath, UpdateReq, #{}). --spec update_config(emqx_map_lib:config_key_path(), emqx_config:update_request(), - emqx_config:update_opts()) -> +-spec update_config( + emqx_map_lib:config_key_path(), + emqx_config:update_request(), + emqx_config:update_opts() +) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. update_config([RootName | _] = KeyPath, UpdateReq, Opts) -> - emqx_config_handler:update_config(emqx_config:get_schema_mod(RootName), KeyPath, - {{update, UpdateReq}, Opts}). + emqx_config_handler:update_config( + emqx_config:get_schema_mod(RootName), + KeyPath, + {{update, UpdateReq}, Opts} + ). -spec remove_config(emqx_map_lib:config_key_path()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. @@ -217,16 +233,22 @@ remove_config(KeyPath) -> -spec remove_config(emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. remove_config([RootName | _] = KeyPath, Opts) -> - emqx_config_handler:update_config(emqx_config:get_schema_mod(RootName), - KeyPath, {remove, Opts}). + emqx_config_handler:update_config( + emqx_config:get_schema_mod(RootName), + KeyPath, + {remove, Opts} + ). -spec reset_config(emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. reset_config([RootName | _] = KeyPath, Opts) -> case emqx_config:get_default_value(KeyPath) of {ok, Default} -> - emqx_config_handler:update_config(emqx_config:get_schema_mod(RootName), KeyPath, - {{update, Default}, Opts}); + emqx_config_handler:update_config( + emqx_config:get_schema_mod(RootName), + KeyPath, + {{update, Default}, Opts} + ); {error, _} = Error -> Error end. diff --git a/apps/emqx/src/emqx_access_control.erl b/apps/emqx/src/emqx_access_control.erl index 6306503d7..565644cf8 100644 --- a/apps/emqx/src/emqx_access_control.erl +++ b/apps/emqx/src/emqx_access_control.erl @@ -18,16 +18,21 @@ -include("emqx.hrl"). --export([ authenticate/1 - , authorize/3 - ]). +-export([ + authenticate/1, + authorize/3 +]). %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- --spec(authenticate(emqx_types:clientinfo()) -> - {ok, map()} | {ok, map(), binary()} | {continue, map()} | {continue, binary(), map()} | {error, term()}). +-spec authenticate(emqx_types:clientinfo()) -> + {ok, map()} + | {ok, map(), binary()} + | {continue, map()} + | {continue, binary(), map()} + | {error, term()}. authenticate(Credential) -> case run_hooks('client.authenticate', [Credential], {ok, #{is_superuser => false}}) of ok -> @@ -37,14 +42,16 @@ authenticate(Credential) -> end. %% @doc Check Authorization --spec authorize(emqx_types:clientinfo(), emqx_types:pubsub(), emqx_types:topic()) - -> allow | deny. +-spec authorize(emqx_types:clientinfo(), emqx_types:pubsub(), emqx_types:topic()) -> + allow | deny. authorize(ClientInfo, PubSub, Topic) -> - Result = case emqx_authz_cache:is_enabled() of - true -> check_authorization_cache(ClientInfo, PubSub, Topic); - false -> do_authorize(ClientInfo, PubSub, Topic) - end, - inc_acl_metrics(Result), Result. + Result = + case emqx_authz_cache:is_enabled() of + true -> check_authorization_cache(ClientInfo, PubSub, Topic); + false -> do_authorize(ClientInfo, PubSub, Topic) + end, + inc_acl_metrics(Result), + Result. check_authorization_cache(ClientInfo, PubSub, Topic) -> case emqx_authz_cache:get_authz_cache(PubSub, Topic) of @@ -60,7 +67,7 @@ check_authorization_cache(ClientInfo, PubSub, Topic) -> do_authorize(ClientInfo, PubSub, Topic) -> NoMatch = emqx:get_config([authorization, no_match], allow), case run_hooks('client.authorize', [ClientInfo, PubSub, Topic], NoMatch) of - allow -> allow; + allow -> allow; _Other -> deny end. diff --git a/apps/emqx/src/emqx_alarm.erl b/apps/emqx/src/emqx_alarm.erl index 291c10572..9ef410299 100644 --- a/apps/emqx/src/emqx_alarm.erl +++ b/apps/emqx/src/emqx_alarm.erl @@ -26,44 +26,45 @@ -boot_mnesia({mnesia, [boot]}). --export([start_link/0 - ]). +-export([start_link/0]). %% API --export([ activate/1 - , activate/2 - , activate/3 - , deactivate/1 - , deactivate/2 - , deactivate/3 - , delete_all_deactivated_alarms/0 - , get_alarms/0 - , get_alarms/1 - , format/1 - ]). +-export([ + activate/1, + activate/2, + activate/3, + deactivate/1, + deactivate/2, + deactivate/3, + delete_all_deactivated_alarms/0, + get_alarms/0, + get_alarms/1, + format/1 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -record(activated_alarm, { - name :: binary() | atom(), - details :: map() | list(), - message :: binary(), - activate_at :: integer() - }). + name :: binary() | atom(), + details :: map() | list(), + message :: binary(), + activate_at :: integer() +}). -record(deactivated_alarm, { - activate_at :: integer(), - name :: binary() | atom(), - details :: map() | list(), - message :: binary(), - deactivate_at :: integer() | infinity - }). + activate_at :: integer(), + name :: binary() | atom(), + details :: map() | list(), + message :: binary(), + deactivate_at :: integer() | infinity +}). -ifdef(TEST). -compile(export_all). @@ -75,18 +76,26 @@ %%-------------------------------------------------------------------- mnesia(boot) -> - ok = mria:create_table(?ACTIVATED_ALARM, - [{type, set}, - {storage, disc_copies}, - {local_content, true}, - {record_name, activated_alarm}, - {attributes, record_info(fields, activated_alarm)}]), - ok = mria:create_table(?DEACTIVATED_ALARM, - [{type, ordered_set}, - {storage, disc_copies}, - {local_content, true}, - {record_name, deactivated_alarm}, - {attributes, record_info(fields, deactivated_alarm)}]). + ok = mria:create_table( + ?ACTIVATED_ALARM, + [ + {type, set}, + {storage, disc_copies}, + {local_content, true}, + {record_name, activated_alarm}, + {attributes, record_info(fields, activated_alarm)} + ] + ), + ok = mria:create_table( + ?DEACTIVATED_ALARM, + [ + {type, ordered_set}, + {storage, disc_copies}, + {local_content, true}, + {record_name, deactivated_alarm}, + {attributes, record_info(fields, deactivated_alarm)} + ] + ). %%-------------------------------------------------------------------- %% API @@ -124,10 +133,8 @@ get_alarms() -> -spec get_alarms(all | activated | deactivated) -> [map()]. get_alarms(all) -> gen_server:call(?MODULE, {get_alarms, all}); - get_alarms(activated) -> gen_server:call(?MODULE, {get_alarms, activated}); - get_alarms(deactivated) -> gen_server:call(?MODULE, {get_alarms, deactivated}). @@ -139,17 +146,24 @@ format(#activated_alarm{name = Name, message = Message, activate_at = At, detail node => node(), name => Name, message => Message, - duration => (Now - At) div 1000, %% to millisecond + %% to millisecond + duration => (Now - At) div 1000, activate_at => to_rfc3339(At), details => Details }; -format(#deactivated_alarm{name = Name, message = Message, activate_at = At, details = Details, - deactivate_at = DAt}) -> +format(#deactivated_alarm{ + name = Name, + message = Message, + activate_at = At, + details = Details, + deactivate_at = DAt +}) -> #{ node => node(), name => Name, message => Message, - duration => (DAt - At) div 1000, %% to millisecond + %% to millisecond + duration => (DAt - At) div 1000, activate_at => to_rfc3339(At), deactivate_at => to_rfc3339(DAt), details => Details @@ -169,9 +183,11 @@ init([]) -> {ok, #{}, get_validity_period()}. handle_call({activate_alarm, Name, Details, Message}, _From, State) -> - Res = mria:transaction(mria:local_content_shard(), + Res = mria:transaction( + mria:local_content_shard(), fun create_activate_alarm/3, - [Name, Details, Message]), + [Name, Details, Message] + ), case Res of {atomic, Alarm} -> do_actions(activate, Alarm, emqx:get_config([alarm, actions])), @@ -179,7 +195,6 @@ handle_call({activate_alarm, Name, Details, Message}, _From, State) -> {aborted, Reason} -> {reply, Reason, State, get_validity_period()} end; - handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of [] -> @@ -188,30 +203,29 @@ handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> deactivate_alarm(Alarm, Details, Message), {reply, ok, State, get_validity_period()} end; - handle_call(delete_all_deactivated_alarms, _From, State) -> clear_table(?DEACTIVATED_ALARM), {reply, ok, State, get_validity_period()}; - handle_call({get_alarms, all}, _From, State) -> {atomic, Alarms} = mria:ro_transaction( - mria:local_content_shard(), - fun() -> - [normalize(Alarm) || - Alarm <- ets:tab2list(?ACTIVATED_ALARM) - ++ ets:tab2list(?DEACTIVATED_ALARM)] - end), + mria:local_content_shard(), + fun() -> + [ + normalize(Alarm) + || Alarm <- + ets:tab2list(?ACTIVATED_ALARM) ++ + ets:tab2list(?DEACTIVATED_ALARM) + ] + end + ), {reply, Alarms, State, get_validity_period()}; - handle_call({get_alarms, activated}, _From, State) -> Alarms = [normalize(Alarm) || Alarm <- ets:tab2list(?ACTIVATED_ALARM)], {reply, Alarms, State, get_validity_period()}; - handle_call({get_alarms, deactivated}, _From, State) -> Alarms = [normalize(Alarm) || Alarm <- ets:tab2list(?DEACTIVATED_ALARM)], {reply, Alarms, State, get_validity_period()}; - handle_call(Req, From, State) -> ?SLOG(error, #{msg => "unexpected_call", call_req => Req, from => From}), {reply, ignored, State, get_validity_period()}. @@ -224,7 +238,6 @@ handle_info(timeout, State) -> Period = get_validity_period(), delete_expired_deactivated_alarms(erlang:system_time(microsecond) - Period * 1000), {noreply, State, Period}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info_req => Info}), {noreply, State, get_validity_period()}. @@ -247,31 +260,50 @@ create_activate_alarm(Name, Details, Message) -> [#activated_alarm{name = Name}] -> mnesia:abort({error, already_existed}); [] -> - Alarm = #activated_alarm{name = Name, + Alarm = #activated_alarm{ + name = Name, details = Details, message = normalize_message(Name, iolist_to_binary(Message)), - activate_at = erlang:system_time(microsecond)}, + activate_at = erlang:system_time(microsecond) + }, ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write), Alarm end. -deactivate_alarm(#activated_alarm{activate_at = ActivateAt, name = Name, - details = Details0, message = Msg0}, Details, Message) -> +deactivate_alarm( + #activated_alarm{ + activate_at = ActivateAt, + name = Name, + details = Details0, + message = Msg0 + }, + Details, + Message +) -> SizeLimit = emqx:get_config([alarm, size_limit]), case SizeLimit > 0 andalso (mnesia:table_info(?DEACTIVATED_ALARM, size) >= SizeLimit) of true -> case mnesia:dirty_first(?DEACTIVATED_ALARM) of '$end_of_table' -> ok; - ActivateAt2 -> - mria:dirty_delete(?DEACTIVATED_ALARM, ActivateAt2) + ActivateAt2 -> mria:dirty_delete(?DEACTIVATED_ALARM, ActivateAt2) end; - false -> ok + false -> + ok end, - HistoryAlarm = make_deactivated_alarm(ActivateAt, Name, Details0, Msg0, - erlang:system_time(microsecond)), - DeActAlarm = make_deactivated_alarm(ActivateAt, Name, Details, - normalize_message(Name, iolist_to_binary(Message)), - erlang:system_time(microsecond)), + HistoryAlarm = make_deactivated_alarm( + ActivateAt, + Name, + Details0, + Msg0, + erlang:system_time(microsecond) + ), + DeActAlarm = make_deactivated_alarm( + ActivateAt, + Name, + Details, + normalize_message(Name, iolist_to_binary(Message)), + erlang:system_time(microsecond) + ), mria:dirty_write(?DEACTIVATED_ALARM, HistoryAlarm), mria:dirty_delete(?ACTIVATED_ALARM, Name), do_actions(deactivate, DeActAlarm, emqx:get_config([alarm, actions])). @@ -282,22 +314,32 @@ make_deactivated_alarm(ActivateAt, Name, Details, Message, DeActivateAt) -> name = Name, details = Details, message = Message, - deactivate_at = DeActivateAt}. + deactivate_at = DeActivateAt + }. deactivate_all_alarms() -> lists:foreach( - fun(#activated_alarm{name = Name, - details = Details, - message = Message, - activate_at = ActivateAt}) -> - mria:dirty_write(?DEACTIVATED_ALARM, + fun( + #activated_alarm{ + name = Name, + details = Details, + message = Message, + activate_at = ActivateAt + } + ) -> + mria:dirty_write( + ?DEACTIVATED_ALARM, #deactivated_alarm{ activate_at = ActivateAt, name = Name, details = Details, message = Message, - deactivate_at = erlang:system_time(microsecond)}) - end, ets:tab2list(?ACTIVATED_ALARM)), + deactivate_at = erlang:system_time(microsecond) + } + ) + end, + ets:tab2list(?ACTIVATED_ALARM) + ), clear_table(?ACTIVATED_ALARM). %% Delete all records from the given table, ignore result. @@ -346,8 +388,14 @@ do_actions(deactivate, Alarm = #deactivated_alarm{name = Name}, [log | More]) -> do_actions(Operation, Alarm, [publish | More]) -> Topic = topic(Operation), {ok, Payload} = emqx_json:safe_encode(normalize(Alarm)), - Message = emqx_message:make(?MODULE, 0, Topic, Payload, #{sys => true}, - #{properties => #{'Content-Type' => <<"application/json">>}}), + Message = emqx_message:make( + ?MODULE, + 0, + Topic, + Payload, + #{sys => true}, + #{properties => #{'Content-Type' => <<"application/json">>}} + ), _ = emqx_broker:safe_publish(Message), do_actions(Operation, Alarm, More). @@ -356,28 +404,37 @@ topic(activate) -> topic(deactivate) -> emqx_topic:systop(<<"alarms/deactivate">>). -normalize(#activated_alarm{name = Name, - details = Details, - message = Message, - activate_at = ActivateAt}) -> - #{name => Name, - details => Details, - message => Message, - activate_at => ActivateAt, - deactivate_at => infinity, - activated => true}; -normalize(#deactivated_alarm{activate_at = ActivateAt, - name = Name, - details = Details, - message = Message, - deactivate_at = DeactivateAt}) -> - #{name => Name, - details => Details, - message => Message, - activate_at => ActivateAt, - deactivate_at => DeactivateAt, - activated => false}. +normalize(#activated_alarm{ + name = Name, + details = Details, + message = Message, + activate_at = ActivateAt +}) -> + #{ + name => Name, + details => Details, + message => Message, + activate_at => ActivateAt, + deactivate_at => infinity, + activated => true + }; +normalize(#deactivated_alarm{ + activate_at = ActivateAt, + name = Name, + details = Details, + message = Message, + deactivate_at = DeactivateAt +}) -> + #{ + name => Name, + details => Details, + message => Message, + activate_at => ActivateAt, + deactivate_at => DeactivateAt, + activated => false + }. normalize_message(Name, <<"">>) -> list_to_binary(io_lib:format("~p", [Name])); -normalize_message(_Name, Message) -> Message. +normalize_message(_Name, Message) -> + Message. diff --git a/apps/emqx/src/emqx_alarm_handler.erl b/apps/emqx/src/emqx_alarm_handler.erl index e19084cde..1897556ab 100644 --- a/apps/emqx/src/emqx_alarm_handler.erl +++ b/apps/emqx/src/emqx_alarm_handler.erl @@ -22,18 +22,19 @@ -include("logger.hrl"). -include_lib("lc/include/lc.hrl"). - %% gen_event callbacks --export([ init/1 - , handle_event/2 - , handle_call/2 - , handle_info/2 - , terminate/2 - ]). +-export([ + init/1, + handle_event/2, + handle_call/2, + handle_info/2, + terminate/2 +]). --export([ load/0 - , unload/0 - ]). +-export([ + load/0, + unload/0 +]). %%-------------------------------------------------------------------- %% API @@ -52,43 +53,44 @@ unload() -> init({_Args, {alarm_handler, _ExistingAlarms}}) -> {ok, []}; - init(_) -> {ok, []}. handle_event({set_alarm, {system_memory_high_watermark, []}}, State) -> HighWatermark = emqx_os_mon:get_sysmem_high_watermark(), Message = to_bin("System memory usage is higher than ~p%", [HighWatermark]), - emqx_alarm:activate(high_system_memory_usage, - #{high_watermark => HighWatermark}, Message), + emqx_alarm:activate( + high_system_memory_usage, + #{high_watermark => HighWatermark}, + Message + ), {ok, State}; - handle_event({set_alarm, {process_memory_high_watermark, Pid}}, State) -> HighWatermark = emqx_os_mon:get_procmem_high_watermark(), Message = to_bin("Process memory usage is higher than ~p%", [HighWatermark]), - emqx_alarm:activate(high_process_memory_usage, - #{pid => list_to_binary(pid_to_list(Pid)), - high_watermark => HighWatermark}, Message), + emqx_alarm:activate( + high_process_memory_usage, + #{ + pid => list_to_binary(pid_to_list(Pid)), + high_watermark => HighWatermark + }, + Message + ), {ok, State}; - handle_event({clear_alarm, system_memory_high_watermark}, State) -> _ = emqx_alarm:deactivate(high_system_memory_usage), {ok, State}; - handle_event({clear_alarm, process_memory_high_watermark}, State) -> _ = emqx_alarm:deactivate(high_process_memory_usage), {ok, State}; - handle_event({set_alarm, {?LC_ALARM_ID_RUNQ, Info}}, State) -> #{node := Node, runq_length := Len} = Info, Message = to_bin("VM is overloaded on node: ~p: ~p", [Node, Len]), emqx_alarm:activate(runq_overload, Info, Message), {ok, State}; - handle_event({clear_alarm, ?LC_ALARM_ID_RUNQ}, State) -> _ = emqx_alarm:deactivate(runq_overload), {ok, State}; - handle_event(_, State) -> {ok, State}. diff --git a/apps/emqx/src/emqx_app.erl b/apps/emqx/src/emqx_app.erl index 9e038cfa9..25a6195be 100644 --- a/apps/emqx/src/emqx_app.erl +++ b/apps/emqx/src/emqx_app.erl @@ -18,16 +18,17 @@ -behaviour(application). --export([ start/2 - , prep_stop/1 - , stop/1 - , get_description/0 - , get_release/0 - , set_init_config_load_done/0 - , get_init_config_load_done/0 - , set_init_tnx_id/1 - , get_init_tnx_id/0 - ]). +-export([ + start/2, + prep_stop/1, + stop/1, + get_description/0, + get_release/0, + set_init_config_load_done/0, + get_init_config_load_done/0, + set_init_tnx_id/1, + get_init_tnx_id/0 +]). -include("emqx.hrl"). -include("logger.hrl"). @@ -54,8 +55,8 @@ start(_Type, _Args) -> prep_stop(_State) -> ok = emqx_alarm_handler:unload(), emqx_config:remove_handlers(), - emqx_boot:is_enabled(listeners) - andalso emqx_listeners:stop(). + emqx_boot:is_enabled(listeners) andalso + emqx_listeners:stop(). stop(_State) -> ok. @@ -93,14 +94,19 @@ maybe_start_listeners() -> maybe_start_quicer() -> case is_quicer_app_present() andalso is_quic_listener_configured() of - true -> {ok, _} = application:ensure_all_started(quicer), ok; - false -> ok + true -> + {ok, _} = application:ensure_all_started(quicer), + ok; + false -> + ok end. is_quicer_app_present() -> case application:load(quicer) of - ok -> true; - {error, {already_loaded, _}} -> true; + ok -> + true; + {error, {already_loaded, _}} -> + true; _ -> ?SLOG(info, #{msg => "quicer_app_not_found"}), false diff --git a/apps/emqx/src/emqx_authentication.erl b/apps/emqx/src/emqx_authentication.erl index 5df50a427..71bf18183 100644 --- a/apps/emqx/src/emqx_authentication.erl +++ b/apps/emqx/src/emqx_authentication.erl @@ -31,65 +31,69 @@ -define(CONF_ROOT, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM). %% The authentication entrypoint. --export([ authenticate/2 - ]). +-export([authenticate/2]). %% Authenticator manager process start/stop --export([ start_link/0 - , stop/0 - , get_providers/0 - ]). +-export([ + start_link/0, + stop/0, + get_providers/0 +]). %% Authenticator management APIs --export([ initialize_authentication/2 - , register_provider/2 - , register_providers/1 - , deregister_provider/1 - , deregister_providers/1 - , create_chain/1 - , delete_chain/1 - , lookup_chain/1 - , list_chains/0 - , list_chain_names/0 - , create_authenticator/2 - , delete_authenticator/2 - , update_authenticator/3 - , lookup_authenticator/2 - , list_authenticators/1 - , move_authenticator/3 - ]). +-export([ + initialize_authentication/2, + register_provider/2, + register_providers/1, + deregister_provider/1, + deregister_providers/1, + create_chain/1, + delete_chain/1, + lookup_chain/1, + list_chains/0, + list_chain_names/0, + create_authenticator/2, + delete_authenticator/2, + update_authenticator/3, + lookup_authenticator/2, + list_authenticators/1, + move_authenticator/3 +]). %% APIs for observer built_in_database --export([ import_users/3 - , add_user/3 - , delete_user/3 - , update_user/4 - , lookup_user/3 - , list_users/3 - ]). +-export([ + import_users/3, + add_user/3, + delete_user/3, + update_user/4, + lookup_user/3, + list_users/3 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). %% utility functions --export([ authenticator_id/1 - ]). +-export([authenticator_id/1]). %% proxy callback --export([ pre_config_update/3 - , post_config_update/5 - ]). +-export([ + pre_config_update/3, + post_config_update/5 +]). --export_type([ authenticator_id/0 - , position/0 - , chain_name/0 - ]). +-export_type([ + authenticator_id/0, + position/0, + chain_name/0 +]). -ifdef(TEST). -compile(export_all). @@ -104,85 +108,108 @@ -type authn_type() :: atom() | {atom(), atom()}. -type provider() :: module(). --type chain() :: #{name := chain_name(), - authenticators := [authenticator()]}. +-type chain() :: #{ + name := chain_name(), + authenticators := [authenticator()] +}. --type authenticator() :: #{id := authenticator_id(), - provider := provider(), - enable := boolean(), - state := map()}. +-type authenticator() :: #{ + id := authenticator_id(), + provider := provider(), + enable := boolean(), + state := map() +}. -type config() :: emqx_authentication_config:config(). -type state() :: #{atom() => term()}. --type extra() :: #{is_superuser := boolean(), - atom() => term()}. --type user_info() :: #{user_id := binary(), - atom() => term()}. +-type extra() :: #{ + is_superuser := boolean(), + atom() => term() +}. +-type user_info() :: #{ + user_id := binary(), + atom() => term() +}. %% @doc check_config takes raw config from config file, %% parse and validate it, and return parsed result. -callback check_config(config()) -> config(). --callback create(AuthenticatorID, Config) - -> {ok, State} - | {error, term()} - when AuthenticatorID::authenticator_id(), Config::config(), State::state(). +-callback create(AuthenticatorID, Config) -> + {ok, State} + | {error, term()} +when + AuthenticatorID :: authenticator_id(), Config :: config(), State :: state(). --callback update(Config, State) - -> {ok, NewState} - | {error, term()} - when Config::config(), State::state(), NewState::state(). +-callback update(Config, State) -> + {ok, NewState} + | {error, term()} +when + Config :: config(), State :: state(), NewState :: state(). --callback authenticate(Credential, State) - -> ignore - | {ok, Extra} - | {ok, Extra, AuthData} - | {continue, AuthCache} - | {continue, AuthData, AuthCache} - | {error, term()} - when Credential::map(), State::state(), Extra::extra(), AuthData::binary(), AuthCache::map(). +-callback authenticate(Credential, State) -> + ignore + | {ok, Extra} + | {ok, Extra, AuthData} + | {continue, AuthCache} + | {continue, AuthData, AuthCache} + | {error, term()} +when + Credential :: map(), + State :: state(), + Extra :: extra(), + AuthData :: binary(), + AuthCache :: map(). --callback destroy(State) - -> ok - when State::state(). +-callback destroy(State) -> + ok +when + State :: state(). --callback import_users(Filename, State) - -> ok - | {error, term()} - when Filename::binary(), State::state(). +-callback import_users(Filename, State) -> + ok + | {error, term()} +when + Filename :: binary(), State :: state(). --callback add_user(UserInfo, State) - -> {ok, User} - | {error, term()} - when UserInfo::user_info(), State::state(), User::user_info(). +-callback add_user(UserInfo, State) -> + {ok, User} + | {error, term()} +when + UserInfo :: user_info(), State :: state(), User :: user_info(). --callback delete_user(UserID, State) - -> ok - | {error, term()} - when UserID::binary(), State::state(). +-callback delete_user(UserID, State) -> + ok + | {error, term()} +when + UserID :: binary(), State :: state(). --callback update_user(UserID, UserInfo, State) - -> {ok, User} - | {error, term()} - when UserID::binary(), UserInfo::map(), State::state(), User::user_info(). +-callback update_user(UserID, UserInfo, State) -> + {ok, User} + | {error, term()} +when + UserID :: binary(), UserInfo :: map(), State :: state(), User :: user_info(). --callback lookup_user(UserID, UserInfo, State) - -> {ok, User} - | {error, term()} - when UserID::binary(), UserInfo::map(), State::state(), User::user_info(). +-callback lookup_user(UserID, UserInfo, State) -> + {ok, User} + | {error, term()} +when + UserID :: binary(), UserInfo :: map(), State :: state(), User :: user_info(). --callback list_users(State) - -> {ok, Users} - when State::state(), Users::[user_info()]. +-callback list_users(State) -> + {ok, Users} +when + State :: state(), Users :: [user_info()]. --optional_callbacks([ import_users/2 - , add_user/2 - , delete_user/2 - , update_user/3 - , lookup_user/3 - , list_users/1 - , check_config/1 - ]). +-optional_callbacks([ + import_users/2, + add_user/2, + delete_user/2, + update_user/3, + lookup_user/3, + list_users/1, + check_config/1 +]). %%------------------------------------------------------------------------------ %% Authenticate @@ -235,22 +262,26 @@ authenticator_id(Config) -> %% @doc Call this API to initialize authenticators implemented in another APP. -spec initialize_authentication(chain_name(), [config()]) -> ok. -initialize_authentication(_, []) -> ok; +initialize_authentication(_, []) -> + ok; initialize_authentication(ChainName, AuthenticatorsConfig) -> _ = create_chain(ChainName), CheckedConfig = to_list(AuthenticatorsConfig), - lists:foreach(fun(AuthenticatorConfig) -> - case create_authenticator(ChainName, AuthenticatorConfig) of - {ok, _} -> - ok; - {error, Reason} -> - ?SLOG(error, #{ - msg => "failed_to_create_authenticator", - authenticator => authenticator_id(AuthenticatorConfig), - reason => Reason - }) - end - end, CheckedConfig). + lists:foreach( + fun(AuthenticatorConfig) -> + case create_authenticator(ChainName, AuthenticatorConfig) of + {ok, _} -> + ok; + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_create_authenticator", + authenticator => authenticator_id(AuthenticatorConfig), + reason => Reason + }) + end + end, + CheckedConfig + ). -spec start_link() -> {ok, pid()} | ignore | {error, term()}. start_link() -> @@ -392,32 +423,38 @@ init(_Opts) -> handle_call(get_providers, _From, #{providers := Providers} = State) -> reply(Providers, State); -handle_call({register_providers, Providers}, _From, - #{providers := Reg0} = State) -> +handle_call( + {register_providers, Providers}, + _From, + #{providers := Reg0} = State +) -> case lists:filter(fun({T, _}) -> maps:is_key(T, Reg0) end, Providers) of [] -> - Reg = lists:foldl(fun({AuthNType, Module}, Pin) -> - Pin#{AuthNType => Module} - end, Reg0, Providers), + Reg = lists:foldl( + fun({AuthNType, Module}, Pin) -> + Pin#{AuthNType => Module} + end, + Reg0, + Providers + ), reply(ok, State#{providers := Reg}); Clashes -> reply({error, {authentication_type_clash, Clashes}}, State) end; - handle_call({deregister_providers, AuthNTypes}, _From, #{providers := Providers} = State) -> reply(ok, State#{providers := maps:without(AuthNTypes, Providers)}); - handle_call({create_chain, Name}, _From, State) -> case ets:member(?CHAINS_TAB, Name) of true -> reply({error, {already_exists, {chain, Name}}}, State); false -> - Chain = #chain{name = Name, - authenticators = []}, + Chain = #chain{ + name = Name, + authenticators = [] + }, true = ets:insert(?CHAINS_TAB, Chain), reply({ok, serialize_chain(Chain)}, State) end; - handle_call({delete_chain, Name}, _From, State) -> case ets:lookup(?CHAINS_TAB, Name) of [] -> @@ -427,59 +464,48 @@ handle_call({delete_chain, Name}, _From, State) -> true = ets:delete(?CHAINS_TAB, Name), reply(ok, maybe_unhook(State)) end; - handle_call({create_authenticator, ChainName, Config}, _From, #{providers := Providers} = State) -> UpdateFun = fun(Chain) -> - handle_create_authenticator(Chain, Config, Providers) - end, + handle_create_authenticator(Chain, Config, Providers) + end, Reply = update_chain(ChainName, UpdateFun), reply(Reply, maybe_hook(State)); - handle_call({delete_authenticator, ChainName, AuthenticatorID}, _From, State) -> UpdateFun = fun(Chain) -> - handle_delete_authenticator(Chain, AuthenticatorID) - end, + handle_delete_authenticator(Chain, AuthenticatorID) + end, Reply = update_chain(ChainName, UpdateFun), reply(Reply, maybe_unhook(State)); - handle_call({update_authenticator, ChainName, AuthenticatorID, Config}, _From, State) -> UpdateFun = fun(Chain) -> - handle_update_authenticator(Chain, AuthenticatorID, Config) - end, + handle_update_authenticator(Chain, AuthenticatorID, Config) + end, Reply = update_chain(ChainName, UpdateFun), reply(Reply, State); - handle_call({move_authenticator, ChainName, AuthenticatorID, Position}, _From, State) -> UpdateFun = fun(Chain) -> - handle_move_authenticator(Chain, AuthenticatorID, Position) - end, + handle_move_authenticator(Chain, AuthenticatorID, Position) + end, Reply = update_chain(ChainName, UpdateFun), reply(Reply, State); - handle_call({import_users, ChainName, AuthenticatorID, Filename}, _From, State) -> Reply = call_authenticator(ChainName, AuthenticatorID, import_users, [Filename]), reply(Reply, State); - handle_call({add_user, ChainName, AuthenticatorID, UserInfo}, _From, State) -> Reply = call_authenticator(ChainName, AuthenticatorID, add_user, [UserInfo]), reply(Reply, State); - handle_call({delete_user, ChainName, AuthenticatorID, UserID}, _From, State) -> Reply = call_authenticator(ChainName, AuthenticatorID, delete_user, [UserID]), reply(Reply, State); - handle_call({update_user, ChainName, AuthenticatorID, UserID, NewUserInfo}, _From, State) -> Reply = call_authenticator(ChainName, AuthenticatorID, update_user, [UserID, NewUserInfo]), reply(Reply, State); - handle_call({lookup_user, ChainName, AuthenticatorID, UserID}, _From, State) -> Reply = call_authenticator(ChainName, AuthenticatorID, lookup_user, [UserID]), reply(Reply, State); - handle_call({list_users, ChainName, AuthenticatorID, FuzzyParams}, _From, State) -> Reply = call_authenticator(ChainName, AuthenticatorID, list_users, [FuzzyParams]), reply(Reply, State); - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. @@ -494,10 +520,15 @@ handle_info(Info, State) -> terminate(Reason, _State) -> case Reason of - normal -> ok; - {shutdown, _} -> ok; - Other -> ?SLOG(error, #{msg => "emqx_authentication_terminating", - reason => Other}) + normal -> + ok; + {shutdown, _} -> + ok; + Other -> + ?SLOG(error, #{ + msg => "emqx_authentication_terminating", + reason => Other + }) end, emqx_config_handler:remove_handler([?CONF_ROOT]), emqx_config_handler:remove_handler([listeners, '?', '?', ?CONF_ROOT]), @@ -521,15 +552,18 @@ handle_update_authenticator(Chain, AuthenticatorID, Config) -> case Provider:update(Config, ST) of {ok, NewST} -> NewAuthenticator = Authenticator#authenticator{ - state = NewST, - enable = maps:get(enable, Config)}, + state = NewST, + enable = maps:get(enable, Config) + }, NewAuthenticators = replace_authenticator( - AuthenticatorID, - NewAuthenticator, - Authenticators), + AuthenticatorID, + NewAuthenticator, + Authenticators + ), true = ets:insert( - ?CHAINS_TAB, - Chain#chain{authenticators = NewAuthenticators}), + ?CHAINS_TAB, + Chain#chain{authenticators = NewAuthenticators} + ), {ok, serialize_authenticator(NewAuthenticator)}; {error, Reason} -> {error, Reason} @@ -541,8 +575,8 @@ handle_update_authenticator(Chain, AuthenticatorID, Config) -> handle_delete_authenticator(Chain, AuthenticatorID) -> MatchFun = fun(#authenticator{id = ID}) -> - ID =:= AuthenticatorID - end, + ID =:= AuthenticatorID + end, case do_delete_authenticators(MatchFun, Chain) of [] -> {error, {not_found, {authenticator, AuthenticatorID}}}; [AuthenticatorID] -> ok @@ -569,9 +603,11 @@ handle_create_authenticator(Chain, Config, Providers) -> {ok, Authenticator} -> NAuthenticators = Authenticators ++ - [Authenticator#authenticator{enable = maps:get(enable, Config)}], - true = ets:insert(?CHAINS_TAB, - Chain#chain{authenticators = NAuthenticators}), + [Authenticator#authenticator{enable = maps:get(enable, Config)}], + true = ets:insert( + ?CHAINS_TAB, + Chain#chain{authenticators = NAuthenticators} + ), {ok, serialize_authenticator(Authenticator)}; {error, Reason} -> {error, Reason} @@ -593,23 +629,28 @@ do_authenticate([#authenticator{id = ID, provider = Provider, state = State} | M {stop, Result} catch Class:Reason:Stacktrace -> - ?SLOG(warning, #{msg => "unexpected_error_in_authentication", - exception => Class, - reason => Reason, - stacktrace => Stacktrace, - authenticator => ID}), + ?SLOG(warning, #{ + msg => "unexpected_error_in_authentication", + exception => Class, + reason => Reason, + stacktrace => Stacktrace, + authenticator => ID + }), do_authenticate(More, Credential) end. - reply(Reply, State) -> {reply, Reply, State}. create_chain_table() -> try - _ = ets:new(?CHAINS_TAB, [named_table, set, public, - {keypos, #chain.name}, - {read_concurrency, true}]), + _ = ets:new(?CHAINS_TAB, [ + named_table, + set, + public, + {keypos, #chain.name}, + {read_concurrency, true} + ]), ok catch error:badarg -> ok @@ -629,9 +670,15 @@ global_chain(_) -> 'unknown:global'. maybe_hook(#{hooked := false} = State) -> - case lists:any(fun(#chain{authenticators = []}) -> false; - (_) -> true - end, ets:tab2list(?CHAINS_TAB)) of + case + lists:any( + fun + (#chain{authenticators = []}) -> false; + (_) -> true + end, + ets:tab2list(?CHAINS_TAB) + ) + of true -> _ = emqx:hook('client.authenticate', {?MODULE, authenticate, []}), State#{hooked => true}; @@ -642,9 +689,15 @@ maybe_hook(State) -> State. maybe_unhook(#{hooked := true} = State) -> - case lists:all(fun(#chain{authenticators = []}) -> true; - (_) -> false - end, ets:tab2list(?CHAINS_TAB)) of + case + lists:all( + fun + (#chain{authenticators = []}) -> true; + (_) -> false + end, + ets:tab2list(?CHAINS_TAB) + ) + of true -> _ = emqx:unhook('client.authenticate', {?MODULE, authenticate, []}), State#{hooked => false}; @@ -661,10 +714,12 @@ do_create_authenticator(AuthenticatorID, #{enable := Enable} = Config, Providers Provider -> case Provider:create(AuthenticatorID, Config) of {ok, State} -> - Authenticator = #authenticator{id = AuthenticatorID, - provider = Provider, - enable = Enable, - state = State}, + Authenticator = #authenticator{ + id = AuthenticatorID, + provider = Provider, + enable = Enable, + state = State + }, {ok, Authenticator}; {error, Reason} -> {error, Reason} @@ -675,8 +730,9 @@ do_delete_authenticators(MatchFun, #chain{authenticators = Authenticators} = Cha {Matching, Others} = lists:partition(MatchFun, Authenticators), MatchingIDs = lists:map( - fun(#authenticator{id = ID}) -> ID end, - Matching), + fun(#authenticator{id = ID}) -> ID end, + Matching + ), ok = lists:foreach(fun do_destroy_authenticator/1, Matching), true = ets:insert(?CHAINS_TAB, Chain#chain{authenticators = Others}), @@ -708,8 +764,12 @@ do_move_authenticator(ID, Authenticators, Position) -> insert(_, [], {_, RelatedID}, _) -> {error, {not_found, {authenticator, RelatedID}}}; -insert(Authenticator, [#authenticator{id = RelatedID} = Related | Rest], - {Relative, RelatedID}, Acc) -> +insert( + Authenticator, + [#authenticator{id = RelatedID} = Related | Rest], + {Relative, RelatedID}, + Acc +) -> case Relative of before -> {ok, lists:reverse(Acc) ++ [Authenticator, Related | Rest]}; @@ -744,24 +804,30 @@ call_authenticator(ChainName, AuthenticatorID, Func, Args) -> end, update_chain(ChainName, UpdateFun). -serialize_chain(#chain{name = Name, - authenticators = Authenticators}) -> - #{ name => Name - , authenticators => serialize_authenticators(Authenticators) - }. +serialize_chain(#chain{ + name = Name, + authenticators = Authenticators +}) -> + #{ + name => Name, + authenticators => serialize_authenticators(Authenticators) + }. serialize_authenticators(Authenticators) -> [serialize_authenticator(Authenticator) || Authenticator <- Authenticators]. -serialize_authenticator(#authenticator{id = ID, - provider = Provider, - enable = Enable, - state = State}) -> - #{ id => ID - , provider => Provider - , enable => Enable - , state => State - }. +serialize_authenticator(#authenticator{ + id = ID, + provider = Provider, + enable = Enable, + state = State +}) -> + #{ + id => ID, + provider => Provider, + enable => Enable, + state => State + }. authn_type(#{mechanism := Mechanism, backend := Backend}) -> {Mechanism, Backend}; diff --git a/apps/emqx/src/emqx_authentication_config.erl b/apps/emqx/src/emqx_authentication_config.erl index 66f96fc76..046068f6a 100644 --- a/apps/emqx/src/emqx_authentication_config.erl +++ b/apps/emqx/src/emqx_authentication_config.erl @@ -19,13 +19,15 @@ -behaviour(emqx_config_handler). --export([ pre_config_update/3 - , post_config_update/5 - ]). +-export([ + pre_config_update/3, + post_config_update/5 +]). --export([ authenticator_id/1 - , authn_type/1 - ]). +-export([ + authenticator_id/1, + authn_type/1 +]). -ifdef(TEST). -export([convert_certs/2, convert_certs/3, clear_certs/2]). @@ -36,32 +38,35 @@ -include("logger.hrl"). -include("emqx_authentication.hrl"). --type parsed_config() :: #{mechanism := atom(), - backend => atom(), - atom() => term()}. +-type parsed_config() :: #{ + mechanism := atom(), + backend => atom(), + atom() => term() +}. -type raw_config() :: #{binary() => term()}. -type config() :: parsed_config() | raw_config(). -type authenticator_id() :: emqx_authentication:authenticator_id(). -type position() :: emqx_authentication:position(). -type chain_name() :: emqx_authentication:chain_name(). --type update_request() :: {create_authenticator, chain_name(), map()} - | {delete_authenticator, chain_name(), authenticator_id()} - | {update_authenticator, chain_name(), authenticator_id(), map()} - | {move_authenticator, chain_name(), authenticator_id(), position()}. +-type update_request() :: + {create_authenticator, chain_name(), map()} + | {delete_authenticator, chain_name(), authenticator_id()} + | {update_authenticator, chain_name(), authenticator_id(), map()} + | {move_authenticator, chain_name(), authenticator_id(), position()}. %%------------------------------------------------------------------------------ %% Callbacks of config handler %%------------------------------------------------------------------------------ --spec pre_config_update(list(atom()), update_request(), emqx_config:raw_config()) - -> {ok, map() | list()} | {error, term()}. +-spec pre_config_update(list(atom()), update_request(), emqx_config:raw_config()) -> + {ok, map() | list()} | {error, term()}. pre_config_update(_, UpdateReq, OldConfig) -> try do_pre_config_update(UpdateReq, to_list(OldConfig)) of {error, Reason} -> {error, Reason}; {ok, NewConfig} -> {ok, return_map(NewConfig)} catch - throw : Reason -> + throw:Reason -> {error, Reason} end. @@ -70,23 +75,29 @@ do_pre_config_update({create_authenticator, ChainName, Config}, OldConfig) -> NConfig = convert_certs(CertsDir, Config), {ok, OldConfig ++ [NConfig]}; do_pre_config_update({delete_authenticator, _ChainName, AuthenticatorID}, OldConfig) -> - NewConfig = lists:filter(fun(OldConfig0) -> - AuthenticatorID =/= authenticator_id(OldConfig0) - end, OldConfig), + NewConfig = lists:filter( + fun(OldConfig0) -> + AuthenticatorID =/= authenticator_id(OldConfig0) + end, + OldConfig + ), {ok, NewConfig}; do_pre_config_update({update_authenticator, ChainName, AuthenticatorID, Config}, OldConfig) -> CertsDir = certs_dir(ChainName, AuthenticatorID), NewConfig = lists:map( - fun(OldConfig0) -> - case AuthenticatorID =:= authenticator_id(OldConfig0) of - true -> convert_certs(CertsDir, Config, OldConfig0); - false -> OldConfig0 - end - end, OldConfig), + fun(OldConfig0) -> + case AuthenticatorID =:= authenticator_id(OldConfig0) of + true -> convert_certs(CertsDir, Config, OldConfig0); + false -> OldConfig0 + end + end, + OldConfig + ), {ok, NewConfig}; do_pre_config_update({move_authenticator, _ChainName, AuthenticatorID, Position}, OldConfig) -> case split_by_id(AuthenticatorID, OldConfig) of - {error, Reason} -> {error, Reason}; + {error, Reason} -> + {error, Reason}; {ok, BeforeFound, [Found | AfterFound]} -> case Position of ?CMD_MOVE_FRONT -> @@ -110,13 +121,14 @@ do_pre_config_update({move_authenticator, _ChainName, AuthenticatorID, Position} end end. --spec post_config_update(list(atom()), - update_request(), - map() | list(), - emqx_config:raw_config(), - emqx_config:app_envs() - ) - -> ok | {ok, map()} | {error, term()}. +-spec post_config_update( + list(atom()), + update_request(), + map() | list(), + emqx_config:raw_config(), + emqx_config:app_envs() +) -> + ok | {ok, map()} | {error, term()}. post_config_update(_, UpdateReq, NewConfig, OldConfig, AppEnvs) -> do_post_config_update(UpdateReq, check_configs(to_list(NewConfig)), OldConfig, AppEnvs). @@ -124,8 +136,12 @@ do_post_config_update({create_authenticator, ChainName, Config}, NewConfig, _Old NConfig = get_authenticator_config(authenticator_id(Config), NewConfig), _ = emqx_authentication:create_chain(ChainName), emqx_authentication:create_authenticator(ChainName, NConfig); -do_post_config_update({delete_authenticator, ChainName, AuthenticatorID}, - _NewConfig, OldConfig, _AppEnvs) -> +do_post_config_update( + {delete_authenticator, ChainName, AuthenticatorID}, + _NewConfig, + OldConfig, + _AppEnvs +) -> case emqx_authentication:delete_authenticator(ChainName, AuthenticatorID) of ok -> Config = get_authenticator_config(AuthenticatorID, to_list(OldConfig)), @@ -134,16 +150,24 @@ do_post_config_update({delete_authenticator, ChainName, AuthenticatorID}, {error, Reason} -> {error, Reason} end; -do_post_config_update({update_authenticator, ChainName, AuthenticatorID, Config}, - NewConfig, _OldConfig, _AppEnvs) -> +do_post_config_update( + {update_authenticator, ChainName, AuthenticatorID, Config}, + NewConfig, + _OldConfig, + _AppEnvs +) -> case get_authenticator_config(authenticator_id(Config), NewConfig) of {error, not_found} -> {error, {not_found, {authenticator, AuthenticatorID}}}; NConfig -> emqx_authentication:update_authenticator(ChainName, AuthenticatorID, NConfig) end; -do_post_config_update({move_authenticator, ChainName, AuthenticatorID, Position}, - _NewConfig, _OldConfig, _AppEnvs) -> +do_post_config_update( + {move_authenticator, ChainName, AuthenticatorID, Position}, + _NewConfig, + _OldConfig, + _AppEnvs +) -> emqx_authentication:move_authenticator(ChainName, AuthenticatorID, Position). check_configs(Configs) -> @@ -154,38 +178,45 @@ do_check_config(Config, Providers) -> Type = authn_type(Config), case maps:get(Type, Providers, false) of false -> - ?SLOG(warning, #{msg => "unknown_authn_type", - type => Type, - providers => Providers}), + ?SLOG(warning, #{ + msg => "unknown_authn_type", + type => Type, + providers => Providers + }), throw({unknown_authn_type, Type}); Module -> do_check_config(Type, Config, Module) end. do_check_config(Type, Config, Module) -> - F = case erlang:function_exported(Module, check_config, 1) of + F = + case erlang:function_exported(Module, check_config, 1) of true -> fun Module:check_config/1; false -> fun(C) -> - Key = list_to_binary(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME), - AtomKey = list_to_atom(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME), - R = hocon_tconf:check_plain(Module, #{Key => C}, - #{atom_key => true}), - maps:get(AtomKey, R) + Key = list_to_binary(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME), + AtomKey = list_to_atom(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME), + R = hocon_tconf:check_plain( + Module, + #{Key => C}, + #{atom_key => true} + ), + maps:get(AtomKey, R) end end, try F(Config) catch - C : E : S -> - ?SLOG(warning, #{msg => "failed_to_check_config", - config => Config, - type => Type, - exception => C, - reason => E, - stacktrace => S - }), + C:E:S -> + ?SLOG(warning, #{ + msg => "failed_to_check_config", + config => Config, + type => Type, + exception => C, + reason => E, + stacktrace => S + }), throw({bad_authenticator_config, #{type => Type, reason => E}}) end. @@ -232,17 +263,23 @@ get_authenticator_config(AuthenticatorID, AuthenticatorsConfig) -> end. split_by_id(ID, AuthenticatorsConfig) -> - case lists:foldl( - fun(C, {P1, P2, F0}) -> - F = case ID =:= authenticator_id(C) of - true -> true; - false -> F0 - end, - case F of - false -> {[C | P1], P2, F}; - true -> {P1, [C | P2], F} - end - end, {[], [], false}, AuthenticatorsConfig) of + case + lists:foldl( + fun(C, {P1, P2, F0}) -> + F = + case ID =:= authenticator_id(C) of + true -> true; + false -> F0 + end, + case F of + false -> {[C | P1], P2, F}; + true -> {P1, [C | P2], F} + end + end, + {[], [], false}, + AuthenticatorsConfig + ) + of {_, _, false} -> {error, {not_found, {authenticator, ID}}}; {Part1, Part2, true} -> @@ -273,7 +310,7 @@ authenticator_id(_C) -> throw({missing_parameter, #{name => mechanism}}). %% @doc Make the authentication type. -authn_type(#{mechanism := M, backend := B}) -> {atom(M), atom(B)}; +authn_type(#{mechanism := M, backend := B}) -> {atom(M), atom(B)}; authn_type(#{mechanism := M}) -> atom(M); authn_type(#{<<"mechanism">> := M, <<"backend">> := B}) -> {atom(M), atom(B)}; authn_type(#{<<"mechanism">> := M}) -> atom(M). diff --git a/apps/emqx/src/emqx_authentication_sup.erl b/apps/emqx/src/emqx_authentication_sup.erl index 2e1e18b06..5c2a554ec 100644 --- a/apps/emqx/src/emqx_authentication_sup.erl +++ b/apps/emqx/src/emqx_authentication_sup.erl @@ -34,15 +34,19 @@ start_link() -> %%-------------------------------------------------------------------- init([]) -> - SupFlags = #{strategy => one_for_one, - intensity => 100, - period => 10}, + SupFlags = #{ + strategy => one_for_one, + intensity => 100, + period => 10 + }, - AuthN = #{id => emqx_authentication, - start => {emqx_authentication, start_link, []}, - restart => permanent, - shutdown => 1000, - type => worker, - modules => [emqx_authentication]}, + AuthN = #{ + id => emqx_authentication, + start => {emqx_authentication, start_link, []}, + restart => permanent, + shutdown => 1000, + type => worker, + modules => [emqx_authentication] + }, {ok, {SupFlags, [AuthN]}}. diff --git a/apps/emqx/src/emqx_authz_cache.erl b/apps/emqx/src/emqx_authz_cache.erl index 41e4831f5..668435d36 100644 --- a/apps/emqx/src/emqx_authz_cache.erl +++ b/apps/emqx/src/emqx_authz_cache.erl @@ -18,52 +18,54 @@ -include("emqx.hrl"). --export([ list_authz_cache/0 - , get_authz_cache/2 - , put_authz_cache/3 - , cleanup_authz_cache/0 - , empty_authz_cache/0 - , dump_authz_cache/0 - , get_cache_max_size/0 - , get_cache_ttl/0 - , is_enabled/0 - , drain_cache/0 - , drain_cache/1 - ]). +-export([ + list_authz_cache/0, + get_authz_cache/2, + put_authz_cache/3, + cleanup_authz_cache/0, + empty_authz_cache/0, + dump_authz_cache/0, + get_cache_max_size/0, + get_cache_ttl/0, + is_enabled/0, + drain_cache/0, + drain_cache/1 +]). %% export for test --export([ cache_k/2 - , cache_v/1 - , get_cache_size/0 - , get_newest_key/0 - , get_oldest_key/0 - ]). +-export([ + cache_k/2, + cache_v/1, + get_cache_size/0, + get_newest_key/0, + get_oldest_key/0 +]). --type(authz_result() :: allow | deny). --type(system_time() :: integer()). --type(cache_key() :: {emqx_types:pubsub(), emqx_types:topic()}). --type(cache_val() :: {authz_result(), system_time()}). +-type authz_result() :: allow | deny. +-type system_time() :: integer(). +-type cache_key() :: {emqx_types:pubsub(), emqx_types:topic()}. +-type cache_val() :: {authz_result(), system_time()}. --type(authz_cache_entry() :: {cache_key(), cache_val()}). +-type authz_cache_entry() :: {cache_key(), cache_val()}. %% Wrappers for key and value -cache_k(PubSub, Topic)-> {PubSub, Topic}. -cache_v(AuthzResult)-> {AuthzResult, time_now()}. +cache_k(PubSub, Topic) -> {PubSub, Topic}. +cache_v(AuthzResult) -> {AuthzResult, time_now()}. drain_k() -> {?MODULE, drain_timestamp}. --spec(is_enabled() -> boolean()). +-spec is_enabled() -> boolean(). is_enabled() -> emqx:get_config([authorization, cache, enable], false). --spec(get_cache_max_size() -> integer()). +-spec get_cache_max_size() -> integer(). get_cache_max_size() -> emqx:get_config([authorization, cache, max_size]). --spec(get_cache_ttl() -> integer()). +-spec get_cache_ttl() -> integer(). get_cache_ttl() -> emqx:get_config([authorization, cache, ttl]). --spec(list_authz_cache() -> [authz_cache_entry()]). +-spec list_authz_cache() -> [authz_cache_entry()]. list_authz_cache() -> cleanup_authz_cache(), map_authz_cache(fun(Cache) -> Cache end). @@ -73,23 +75,29 @@ list_authz_cache() -> authz_result() | not_found. get_authz_cache(PubSub, Topic) -> case erlang:get(cache_k(PubSub, Topic)) of - undefined -> not_found; + undefined -> + not_found; {AuthzResult, CachedAt} -> - if_expired(get_cache_ttl(), CachedAt, - fun(false) -> - AuthzResult; - (true) -> - cleanup_authz_cache(), - not_found - end) + if_expired( + get_cache_ttl(), + CachedAt, + fun + (false) -> + AuthzResult; + (true) -> + cleanup_authz_cache(), + not_found + end + ) end. %% If the cache get full, and also the latest one %% is expired, then delete all the cache entries --spec put_authz_cache(emqx_types:pubsub(), emqx_types:topic(), authz_result()) - -> ok. +-spec put_authz_cache(emqx_types:pubsub(), emqx_types:topic(), authz_result()) -> + ok. put_authz_cache(PubSub, Topic, AuthzResult) -> - MaxSize = get_cache_max_size(), true = (MaxSize =/= 0), + MaxSize = get_cache_max_size(), + true = (MaxSize =/= 0), Size = get_cache_size(), case Size < MaxSize of true -> @@ -97,37 +105,42 @@ put_authz_cache(PubSub, Topic, AuthzResult) -> false -> NewestK = get_newest_key(), {_AuthzResult, CachedAt} = erlang:get(NewestK), - if_expired(get_cache_ttl(), CachedAt, - fun(true) -> - % all cache expired, cleanup first - empty_authz_cache(), - add_authz(PubSub, Topic, AuthzResult); - (false) -> - % cache full, perform cache replacement - evict_authz_cache(), - add_authz(PubSub, Topic, AuthzResult) - end) + if_expired( + get_cache_ttl(), + CachedAt, + fun + (true) -> + % all cache expired, cleanup first + empty_authz_cache(), + add_authz(PubSub, Topic, AuthzResult); + (false) -> + % cache full, perform cache replacement + evict_authz_cache(), + add_authz(PubSub, Topic, AuthzResult) + end + ) end. %% delete all the authz entries --spec(empty_authz_cache() -> ok). +-spec empty_authz_cache() -> ok. empty_authz_cache() -> foreach_authz_cache(fun({CacheK, _CacheV}) -> erlang:erase(CacheK) end), set_cache_size(0), keys_queue_set(queue:new()). %% delete the oldest authz entry --spec(evict_authz_cache() -> ok). +-spec evict_authz_cache() -> ok. evict_authz_cache() -> OldestK = keys_queue_out(), erlang:erase(OldestK), decr_cache_size(). %% cleanup all the expired cache entries --spec(cleanup_authz_cache() -> ok). +-spec cleanup_authz_cache() -> ok. cleanup_authz_cache() -> keys_queue_set( - cleanup_authz(get_cache_ttl(), keys_queue_get())). + cleanup_authz(get_cache_ttl(), keys_queue_get()) + ). get_oldest_key() -> keys_queue_pick(queue_front()). @@ -144,8 +157,11 @@ dump_authz_cache() -> map_authz_cache(fun(Cache) -> Cache end). map_authz_cache(Fun) -> - [Fun(R) || R = {{SubPub, _T}, _Authz} <- erlang:get(), - SubPub =:= publish orelse SubPub =:= subscribe]. + [ + Fun(R) + || R = {{SubPub, _T}, _Authz} <- erlang:get(), + SubPub =:= publish orelse SubPub =:= subscribe + ]. foreach_authz_cache(Fun) -> _ = map_authz_cache(Fun), ok. @@ -174,8 +190,7 @@ add_authz(PubSub, Topic, AuthzResult) -> V = cache_v(AuthzResult), case erlang:get(K) of undefined -> add_new_authz(K, V); - {_AuthzResult, _CachedAt} -> - update_authz(K, V) + {_AuthzResult, _CachedAt} -> update_authz(K, V) end. add_new_authz(K, V) -> @@ -191,30 +206,38 @@ cleanup_authz(TTL, KeysQ) -> case queue:out(KeysQ) of {{value, OldestK}, KeysQ2} -> {_AuthzResult, CachedAt} = erlang:get(OldestK), - if_expired(TTL, CachedAt, - fun(false) -> KeysQ; - (true) -> - erlang:erase(OldestK), - decr_cache_size(), - cleanup_authz(TTL, KeysQ2) - end); - {empty, KeysQ} -> KeysQ + if_expired( + TTL, + CachedAt, + fun + (false) -> + KeysQ; + (true) -> + erlang:erase(OldestK), + decr_cache_size(), + cleanup_authz(TTL, KeysQ2) + end + ); + {empty, KeysQ} -> + KeysQ end. incr_cache_size() -> - erlang:put(authz_cache_size, get_cache_size() + 1), ok. + erlang:put(authz_cache_size, get_cache_size() + 1), + ok. decr_cache_size() -> Size = get_cache_size(), case Size > 1 of true -> - erlang:put(authz_cache_size, Size-1); + erlang:put(authz_cache_size, Size - 1); false -> - erlang:put(authz_cache_size, 0) + erlang:put(authz_cache_size, 0) end, ok. set_cache_size(N) -> - erlang:put(authz_cache_size, N), ok. + erlang:put(authz_cache_size, N), + ok. %%% Ordered Keys Q %%% keys_queue_in(Key) -> @@ -225,7 +248,8 @@ keys_queue_in(Key) -> keys_queue_out() -> case queue:out(keys_queue_get()) of {{value, OldestK}, Q2} -> - keys_queue_set(Q2), OldestK; + keys_queue_set(Q2), + OldestK; {empty, _Q} -> undefined end. @@ -242,12 +266,17 @@ keys_queue_pick(Pick) -> end. keys_queue_remove(Key, KeysQ) -> - queue:filter(fun - (K) when K =:= Key -> false; (_) -> true - end, KeysQ). + queue:filter( + fun + (K) when K =:= Key -> false; + (_) -> true + end, + KeysQ + ). keys_queue_set(KeysQ) -> - erlang:put(authz_keys_q, KeysQ), ok. + erlang:put(authz_keys_q, KeysQ), + ok. keys_queue_get() -> case erlang:get(authz_keys_q) of undefined -> queue:new(); diff --git a/apps/emqx/src/emqx_banned.erl b/apps/emqx/src/emqx_banned.erl index fce9b29cd..67fa283b0 100644 --- a/apps/emqx/src/emqx_banned.erl +++ b/apps/emqx/src/emqx_banned.erl @@ -22,7 +22,6 @@ -include("logger.hrl"). -include("types.hrl"). - %% Mnesia bootstrap -export([mnesia/1]). @@ -30,23 +29,25 @@ -export([start_link/0, stop/0]). --export([ check/1 - , create/1 - , look_up/1 - , delete/1 - , info/1 - , format/1 - , parse/1 - ]). +-export([ + check/1, + create/1, + look_up/1, + delete/1, + info/1, + format/1, + parse/1 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -elvis([{elvis_style, state_record_and_type, disable}]). @@ -63,68 +64,71 @@ mnesia(boot) -> ok = mria:create_table(?BANNED_TAB, [ - {type, set}, - {rlog_shard, ?COMMON_SHARD}, - {storage, disc_copies}, - {record_name, banned}, - {attributes, record_info(fields, banned)}, - {storage_properties, [{ets, [{read_concurrency, true}]}]}]). + {type, set}, + {rlog_shard, ?COMMON_SHARD}, + {storage, disc_copies}, + {record_name, banned}, + {attributes, record_info(fields, banned)}, + {storage_properties, [{ets, [{read_concurrency, true}]}]} + ]). %% @doc Start the banned server. --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% for tests --spec(stop() -> ok). +-spec stop() -> ok. stop() -> gen_server:stop(?MODULE). --spec(check(emqx_types:clientinfo()) -> boolean()). +-spec check(emqx_types:clientinfo()) -> boolean(). check(ClientInfo) -> - do_check({clientid, maps:get(clientid, ClientInfo, undefined)}) - orelse do_check({username, maps:get(username, ClientInfo, undefined)}) - orelse do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}). + do_check({clientid, maps:get(clientid, ClientInfo, undefined)}) orelse + do_check({username, maps:get(username, ClientInfo, undefined)}) orelse + do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}). do_check({_, undefined}) -> false; do_check(Who) when is_tuple(Who) -> case mnesia:dirty_read(?BANNED_TAB, Who) of [] -> false; - [#banned{until = Until}] -> - Until > erlang:system_time(second) + [#banned{until = Until}] -> Until > erlang:system_time(second) end. -format(#banned{who = Who0, - by = By, - reason = Reason, - at = At, - until = Until}) -> +format(#banned{ + who = Who0, + by = By, + reason = Reason, + at = At, + until = Until +}) -> {As, Who} = maybe_format_host(Who0), #{ - as => As, - who => Who, - by => By, + as => As, + who => Who, + by => By, reason => Reason, - at => to_rfc3339(At), - until => to_rfc3339(Until) + at => to_rfc3339(At), + until => to_rfc3339(Until) }. parse(Params) -> case pares_who(Params) of - {error, Reason} -> {error, Reason}; - Who -> - By = maps:get(<<"by">>, Params, <<"mgmt_api">>), + {error, Reason} -> + {error, Reason}; + Who -> + By = maps:get(<<"by">>, Params, <<"mgmt_api">>), Reason = maps:get(<<"reason">>, Params, <<"">>), - At = maps:get(<<"at">>, Params, erlang:system_time(second)), - Until = maps:get(<<"until">>, Params, At + 5 * 60), + At = maps:get(<<"at">>, Params, erlang:system_time(second)), + Until = maps:get(<<"until">>, Params, At + 5 * 60), case Until > erlang:system_time(second) of true -> #banned{ - who = Who, - by = By, + who = Who, + by = By, reason = Reason, - at = At, - until = Until + at = At, + until = Until }; false -> ErrorReason = @@ -151,13 +155,15 @@ maybe_format_host({As, Who}) -> to_rfc3339(Timestamp) -> list_to_binary(calendar:system_time_to_rfc3339(Timestamp, [{unit, second}])). --spec(create(emqx_types:banned() | map()) -> - {ok, emqx_types:banned()} | {error, {already_exist, emqx_types:banned()}}). -create(#{who := Who, - by := By, - reason := Reason, - at := At, - until := Until}) -> +-spec create(emqx_types:banned() | map()) -> + {ok, emqx_types:banned()} | {error, {already_exist, emqx_types:banned()}}. +create(#{ + who := Who, + by := By, + reason := Reason, + at := At, + until := Until +}) -> Banned = #banned{ who = Who, by = By, @@ -166,8 +172,7 @@ create(#{who := Who, until = Until }, create(Banned); - -create(Banned = #banned{who = Who}) -> +create(Banned = #banned{who = Who}) -> case look_up(Who) of [] -> mria:dirty_write(?BANNED_TAB, Banned), @@ -176,8 +181,10 @@ create(Banned = #banned{who = Who}) -> %% Don't support shorten or extend the until time by overwrite. %% We don't support update api yet, user must delete then create new one. case Until > erlang:system_time(second) of - true -> {error, {already_exist, OldBanned}}; - false -> %% overwrite expired one is ok. + true -> + {error, {already_exist, OldBanned}}; + %% overwrite expired one is ok. + false -> mria:dirty_write(?BANNED_TAB, Banned), {ok, Banned} end @@ -188,10 +195,12 @@ look_up(Who) when is_map(Who) -> look_up(Who) -> mnesia:dirty_read(?BANNED_TAB, Who). --spec(delete({clientid, emqx_types:clientid()} - | {username, emqx_types:username()} - | {peerhost, emqx_types:peerhost()}) -> ok). -delete(Who) when is_map(Who)-> +-spec delete( + {clientid, emqx_types:clientid()} + | {username, emqx_types:username()} + | {peerhost, emqx_types:peerhost()} +) -> ok. +delete(Who) when is_map(Who) -> delete(pares_who(Who)); delete(Who) -> mria:dirty_delete(?BANNED_TAB, Who). @@ -217,7 +226,6 @@ handle_cast(Msg, State) -> handle_info({timeout, TRef, expire}, State = #{expiry_timer := TRef}) -> _ = mria:transaction(?COMMON_SHARD, fun expire_banned_items/1, [erlang:system_time(second)]), {noreply, ensure_expiry_timer(State), hibernate}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -242,7 +250,12 @@ ensure_expiry_timer(State) -> expire_banned_items(Now) -> mnesia:foldl( - fun(B = #banned{until = Until}, _Acc) when Until < Now -> - mnesia:delete_object(?BANNED_TAB, B, sticky_write); - (_, _Acc) -> ok - end, ok, ?BANNED_TAB). + fun + (B = #banned{until = Until}, _Acc) when Until < Now -> + mnesia:delete_object(?BANNED_TAB, B, sticky_write); + (_, _Acc) -> + ok + end, + ok, + ?BANNED_TAB + ). diff --git a/apps/emqx/src/emqx_base62.erl b/apps/emqx/src/emqx_base62.erl index 73caed36b..059b20d52 100644 --- a/apps/emqx/src/emqx_base62.erl +++ b/apps/emqx/src/emqx_base62.erl @@ -17,9 +17,10 @@ -module(emqx_base62). %% APIs --export([ encode/1 - , decode/1 - ]). +-export([ + encode/1, + decode/1 +]). %%-------------------------------------------------------------------- %% APIs @@ -29,7 +30,7 @@ -spec encode(string() | integer() | binary()) -> binary(). encode(I) when is_integer(I) -> encode(integer_to_binary(I)); -encode(S) when is_list(S)-> +encode(S) when is_list(S) -> encode(unicode:characters_to_binary(S)); encode(B) when is_binary(B) -> encode(B, <<>>). @@ -47,21 +48,22 @@ decode(B) when is_binary(B) -> encode(<>, Acc) -> CharList = [encode_char(Index1), encode_char(Index2), encode_char(Index3), encode_char(Index4)], - NewAcc = <>, + NewAcc = <>, encode(Rest, NewAcc); encode(<>, Acc) -> CharList = [encode_char(Index1), encode_char(Index2), encode_char(Index3)], - NewAcc = <>, + NewAcc = <>, encode(<<>>, NewAcc); encode(<>, Acc) -> CharList = [encode_char(Index1), encode_char(Index2)], - NewAcc = <>, + NewAcc = <>, encode(<<>>, NewAcc); encode(<<>>, Acc) -> Acc. -decode(<>, Acc) - when bit_size(Rest) >= 8-> +decode(<>, Acc) when + bit_size(Rest) >= 8 +-> case Head == $9 of true -> <> = Rest, @@ -85,7 +87,6 @@ decode(<>, Acc) -> decode(<<>>, Acc) -> Acc. - encode_char(I) when I < 26 -> $A + I; encode_char(I) when I < 52 -> @@ -97,9 +98,9 @@ encode_char(I) -> decode_char(I) when I >= $a andalso I =< $z -> I + 26 - $a; -decode_char(I) when I >= $0 andalso I =< $8-> +decode_char(I) when I >= $0 andalso I =< $8 -> I + 52 - $0; -decode_char(I) when I >= $A andalso I =< $Z-> +decode_char(I) when I >= $A andalso I =< $Z -> I - $A. decode_char(9, I) -> diff --git a/apps/emqx/src/emqx_batch.erl b/apps/emqx/src/emqx_batch.erl index fc4c68938..a287604c4 100644 --- a/apps/emqx/src/emqx_batch.erl +++ b/apps/emqx/src/emqx_batch.erl @@ -17,62 +17,69 @@ -module(emqx_batch). %% APIs --export([ init/1 - , push/2 - , commit/1 - , size/1 - , items/1 - ]). +-export([ + init/1, + push/2, + commit/1, + size/1, + items/1 +]). -export_type([options/0, batch/0]). -record(batch, { - batch_size :: non_neg_integer(), - batch_q :: list(any()), - linger_ms :: pos_integer(), - linger_timer :: reference() | undefined, - commit_fun :: function() - }). + batch_size :: non_neg_integer(), + batch_q :: list(any()), + linger_ms :: pos_integer(), + linger_timer :: reference() | undefined, + commit_fun :: function() +}). --type(options() :: #{ - batch_size => non_neg_integer(), - linger_ms => pos_integer(), - commit_fun := function() - }). +-type options() :: #{ + batch_size => non_neg_integer(), + linger_ms => pos_integer(), + commit_fun := function() +}. --opaque(batch() :: #batch{}). +-opaque batch() :: #batch{}. %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- --spec(init(options()) -> batch()). +-spec init(options()) -> batch(). init(Opts) when is_map(Opts) -> - #batch{batch_size = maps:get(batch_size, Opts, 1000), - batch_q = [], - linger_ms = maps:get(linger_ms, Opts, 1000), - commit_fun = maps:get(commit_fun, Opts)}. + #batch{ + batch_size = maps:get(batch_size, Opts, 1000), + batch_q = [], + linger_ms = maps:get(linger_ms, Opts, 1000), + commit_fun = maps:get(commit_fun, Opts) + }. --spec(push(any(), batch()) -> batch()). -push(El, Batch = #batch{batch_q = Q, - linger_ms = Ms, - linger_timer = undefined}) - when length(Q) == 0 -> +-spec push(any(), batch()) -> batch(). +push( + El, + Batch = #batch{ + batch_q = Q, + linger_ms = Ms, + linger_timer = undefined + } +) when + length(Q) == 0 +-> TRef = erlang:send_after(Ms, self(), batch_linger_expired), Batch#batch{batch_q = [El], linger_timer = TRef}; - %% no limit. push(El, Batch = #batch{batch_size = 0, batch_q = Q}) -> - Batch#batch{batch_q = [El|Q]}; - -push(El, Batch = #batch{batch_size = MaxSize, batch_q = Q}) - when length(Q) >= MaxSize -> - commit(Batch#batch{batch_q = [El|Q]}); - + Batch#batch{batch_q = [El | Q]}; +push(El, Batch = #batch{batch_size = MaxSize, batch_q = Q}) when + length(Q) >= MaxSize +-> + commit(Batch#batch{batch_q = [El | Q]}); push(El, Batch = #batch{batch_q = Q}) -> - Batch#batch{batch_q = [El|Q]}. + Batch#batch{batch_q = [El | Q]}. --spec(commit(batch()) -> batch()). +-spec commit(batch()) -> batch(). commit(Batch = #batch{batch_q = Q, commit_fun = Commit}) -> _ = Commit(lists:reverse(Q)), reset(Batch). @@ -81,11 +88,10 @@ reset(Batch = #batch{linger_timer = TRef}) -> _ = emqx_misc:cancel_timer(TRef), Batch#batch{batch_q = [], linger_timer = undefined}. --spec(size(batch()) -> non_neg_integer()). +-spec size(batch()) -> non_neg_integer(). size(#batch{batch_q = Q}) -> length(Q). --spec(items(batch()) -> list(any())). +-spec items(batch()) -> list(any()). items(#batch{batch_q = Q}) -> lists:reverse(Q). - diff --git a/apps/emqx/src/emqx_boot.erl b/apps/emqx/src/emqx_boot.erl index 97985a14c..7bd18e22f 100644 --- a/apps/emqx/src/emqx_boot.erl +++ b/apps/emqx/src/emqx_boot.erl @@ -20,10 +20,9 @@ -define(BOOT_MODULES, [router, broker, listeners]). --spec(is_enabled(all|router|broker|listeners) -> boolean()). +-spec is_enabled(all | router | broker | listeners) -> boolean(). is_enabled(Mod) -> (BootMods = boot_modules()) =:= all orelse lists:member(Mod, BootMods). boot_modules() -> application:get_env(emqx, boot_modules, ?BOOT_MODULES). - diff --git a/apps/emqx/src/emqx_broker.erl b/apps/emqx/src/emqx_broker.erl index 4afc4f99d..4ea83673a 100644 --- a/apps/emqx/src/emqx_broker.erl +++ b/apps/emqx/src/emqx_broker.erl @@ -23,35 +23,38 @@ -include("types.hrl"). -include("emqx_mqtt.hrl"). - -export([start_link/2]). %% PubSub --export([ subscribe/1 - , subscribe/2 - , subscribe/3 - ]). +-export([ + subscribe/1, + subscribe/2, + subscribe/3 +]). -export([unsubscribe/1]). -export([subscriber_down/1]). --export([ publish/1 - , safe_publish/1 - ]). +-export([ + publish/1, + safe_publish/1 +]). -export([dispatch/2]). %% PubSub Infos --export([ subscriptions/1 - , subscriptions_via_topic/1 - , subscribers/1 - , subscribed/2 - ]). +-export([ + subscriptions/1, + subscriptions_via_topic/1, + subscribers/1, + subscribed/2 +]). --export([ get_subopts/2 - , set_subopts/2 - ]). +-export([ + get_subopts/2, + set_subopts/2 +]). -export([topics/0]). @@ -59,13 +62,14 @@ -export([stats_fun/0]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -import(emqx_tables, [lookup_value/2, lookup_value/3]). @@ -84,17 +88,21 @@ %% Guards -define(IS_SUBID(Id), (is_binary(Id) orelse is_atom(Id))). --spec(start_link(atom(), pos_integer()) -> startlink_ret()). +-spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> ok = create_tabs(), - gen_server:start_link({local, emqx_misc:proc_name(?BROKER, Id)}, - ?MODULE, [Pool, Id], []). + gen_server:start_link( + {local, emqx_misc:proc_name(?BROKER, Id)}, + ?MODULE, + [Pool, Id], + [] + ). %%------------------------------------------------------------------------------ %% Create tabs %%------------------------------------------------------------------------------ --spec(create_tabs() -> ok). +-spec create_tabs() -> ok. create_tabs() -> TabOpts = [public, {read_concurrency, true}, {write_concurrency, true}], @@ -113,28 +121,31 @@ create_tabs() -> %% Subscribe API %%------------------------------------------------------------------------------ --spec(subscribe(emqx_types:topic()) -> ok). +-spec subscribe(emqx_types:topic()) -> ok. subscribe(Topic) when is_binary(Topic) -> subscribe(Topic, undefined). --spec(subscribe(emqx_types:topic(), emqx_types:subid() | emqx_types:subopts()) -> ok). +-spec subscribe(emqx_types:topic(), emqx_types:subid() | emqx_types:subopts()) -> ok. subscribe(Topic, SubId) when is_binary(Topic), ?IS_SUBID(SubId) -> subscribe(Topic, SubId, ?DEFAULT_SUBOPTS); subscribe(Topic, SubOpts) when is_binary(Topic), is_map(SubOpts) -> subscribe(Topic, undefined, SubOpts). --spec(subscribe(emqx_types:topic(), emqx_types:subid(), emqx_types:subopts()) -> ok). +-spec subscribe(emqx_types:topic(), emqx_types:subid(), emqx_types:subopts()) -> ok. subscribe(Topic, SubId, SubOpts0) when is_binary(Topic), ?IS_SUBID(SubId), is_map(SubOpts0) -> SubOpts = maps:merge(?DEFAULT_SUBOPTS, SubOpts0), _ = emqx_trace:subscribe(Topic, SubId, SubOpts), SubPid = self(), case ets:member(?SUBOPTION, {SubPid, Topic}) of - false -> %% New + %% New + false -> ok = emqx_broker_helper:register_sub(SubPid, SubId), do_subscribe(Topic, SubPid, with_subid(SubId, SubOpts)); - true -> %% Existed + %% Existed + true -> set_subopts(SubPid, Topic, with_subid(SubId, SubOpts)), - ok %% ensure to return 'ok' + %% ensure to return 'ok' + ok end. -compile({inline, [with_subid/2]}). @@ -151,14 +162,15 @@ do_subscribe(Topic, SubPid, SubOpts) -> do_subscribe(undefined, Topic, SubPid, SubOpts) -> case emqx_broker_helper:get_sub_shard(SubPid, Topic) of - 0 -> true = ets:insert(?SUBSCRIBER, {Topic, SubPid}), - true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}), - call(pick(Topic), {subscribe, Topic}); - I -> true = ets:insert(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), - true = ets:insert(?SUBOPTION, {{SubPid, Topic}, maps:put(shard, I, SubOpts)}), - call(pick({Topic, I}), {subscribe, Topic, I}) + 0 -> + true = ets:insert(?SUBSCRIBER, {Topic, SubPid}), + true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}), + call(pick(Topic), {subscribe, Topic}); + I -> + true = ets:insert(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), + true = ets:insert(?SUBOPTION, {{SubPid, Topic}, maps:put(shard, I, SubOpts)}), + call(pick({Topic, I}), {subscribe, Topic, I}) end; - %% Shared subscription do_subscribe(Group, Topic, SubPid, SubOpts) -> true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}), @@ -168,7 +180,7 @@ do_subscribe(Group, Topic, SubPid, SubOpts) -> %% Unsubscribe API %%-------------------------------------------------------------------- --spec(unsubscribe(emqx_types:topic()) -> ok). +-spec unsubscribe(emqx_types:topic()) -> ok. unsubscribe(Topic) when is_binary(Topic) -> SubPid = self(), case ets:lookup(?SUBOPTION, {SubPid, Topic}) of @@ -176,7 +188,8 @@ unsubscribe(Topic) when is_binary(Topic) -> _ = emqx_broker_helper:reclaim_seq(Topic), _ = emqx_trace:unsubscribe(Topic, SubOpts), do_unsubscribe(Topic, SubPid, SubOpts); - [] -> ok + [] -> + ok end. do_unsubscribe(Topic, SubPid, SubOpts) -> @@ -187,12 +200,13 @@ do_unsubscribe(Topic, SubPid, SubOpts) -> do_unsubscribe(undefined, Topic, SubPid, SubOpts) -> case maps:get(shard, SubOpts, 0) of - 0 -> true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), - cast(pick(Topic), {unsubscribed, Topic}); - I -> true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), - cast(pick({Topic, I}), {unsubscribed, Topic, I}) + 0 -> + true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), + cast(pick(Topic), {unsubscribed, Topic}); + I -> + true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), + cast(pick({Topic, I}), {unsubscribed, Topic, I}) end; - do_unsubscribe(Group, Topic, SubPid, _SubOpts) -> emqx_shared_sub:unsubscribe(Group, Topic, SubPid). @@ -200,14 +214,16 @@ do_unsubscribe(Group, Topic, SubPid, _SubOpts) -> %% Publish %%-------------------------------------------------------------------- --spec(publish(emqx_types:message()) -> emqx_types:publish_result()). +-spec publish(emqx_types:message()) -> emqx_types:publish_result(). publish(Msg) when is_record(Msg, message) -> _ = emqx_trace:publish(Msg), emqx_message:is_sys(Msg) orelse emqx_metrics:inc('messages.publish'), case emqx_hooks:run_fold('message.publish', [], emqx_message:clean_dup(Msg)) of #message{headers = #{allow_publish := false}, topic = Topic} -> - ?TRACE("MQTT", "msg_publish_not_allowed", #{message => emqx_message:to_log_map(Msg), - topic => Topic}), + ?TRACE("MQTT", "msg_publish_not_allowed", #{ + message => emqx_message:to_log_map(Msg), + topic => Topic + }), []; Msg1 = #message{topic = Topic} -> emqx_persistent_session:persist_message(Msg1), @@ -215,19 +231,21 @@ publish(Msg) when is_record(Msg, message) -> end. %% Called internally --spec(safe_publish(emqx_types:message()) -> emqx_types:publish_result()). +-spec safe_publish(emqx_types:message()) -> emqx_types:publish_result(). safe_publish(Msg) when is_record(Msg, message) -> try publish(Msg) catch - Error : Reason : Stk-> - ?SLOG(error,#{ - msg => "publishing_error", - exception => Error, - reason => Reason, - payload => emqx_message:to_log_map(Msg), - stacktrace => Stk - }, + Error:Reason:Stk -> + ?SLOG( + error, + #{ + msg => "publishing_error", + exception => Error, + reason => Reason, + payload => emqx_message:to_log_map(Msg), + stacktrace => Stk + }, #{topic => Msg#message.topic} ), [] @@ -240,17 +258,20 @@ delivery(Msg) -> #delivery{sender = self(), message = Msg}. %% Route %%-------------------------------------------------------------------- --spec(route([emqx_types:route_entry()], emqx_types:delivery()) - -> emqx_types:publish_result()). +-spec route([emqx_types:route_entry()], emqx_types:delivery()) -> + emqx_types:publish_result(). route([], #delivery{message = Msg}) -> ok = emqx_hooks:run('message.dropped', [Msg, #{node => node()}, no_subscribers]), ok = inc_dropped_cnt(Msg), []; - route(Routes, Delivery) -> - lists:foldl(fun(Route, Acc) -> - [do_route(Route, Delivery) | Acc] - end, [], Routes). + lists:foldl( + fun(Route, Acc) -> + [do_route(Route, Delivery) | Acc] + end, + [], + Routes + ). do_route({To, Node}, Delivery) when Node =:= node() -> {Node, To, dispatch(To, Delivery)}; @@ -259,43 +280,52 @@ do_route({To, Node}, Delivery) when is_atom(Node) -> do_route({To, Group}, Delivery) when is_tuple(Group); is_binary(Group) -> {share, To, emqx_shared_sub:dispatch(Group, To, Delivery)}. -aggre([]) -> []; +aggre([]) -> + []; aggre([#route{topic = To, dest = Node}]) when is_atom(Node) -> [{To, Node}]; aggre([#route{topic = To, dest = {Group, _Node}}]) -> [{To, Group}]; aggre(Routes) -> lists:foldl( - fun(#route{topic = To, dest = Node}, Acc) when is_atom(Node) -> - [{To, Node} | Acc]; - (#route{topic = To, dest = {Group, _Node}}, Acc) -> - lists:usort([{To, Group} | Acc]) - end, [], Routes). + fun + (#route{topic = To, dest = Node}, Acc) when is_atom(Node) -> + [{To, Node} | Acc]; + (#route{topic = To, dest = {Group, _Node}}, Acc) -> + lists:usort([{To, Group} | Acc]) + end, + [], + Routes + ). %% @doc Forward message to another node. --spec(forward(node(), emqx_types:topic(), emqx_types:delivery(), RpcMode::sync | async) - -> emqx_types:deliver_result()). +-spec forward(node(), emqx_types:topic(), emqx_types:delivery(), RpcMode :: sync | async) -> + emqx_types:deliver_result(). forward(Node, To, Delivery, async) -> true = emqx_broker_proto_v1:forward_async(Node, To, Delivery), emqx_metrics:inc('messages.forward'); forward(Node, To, Delivery, sync) -> case emqx_broker_proto_v1:forward(Node, To, Delivery) of {Err, Reason} when Err =:= badrpc; Err =:= badtcp -> - ?SLOG(error, #{ - msg => "sync_forward_msg_to_node_failed", - node => Node, - Err => Reason - }, #{topic => To}), + ?SLOG( + error, + #{ + msg => "sync_forward_msg_to_node_failed", + node => Node, + Err => Reason + }, + #{topic => To} + ), {error, badrpc}; Result -> emqx_metrics:inc('messages.forward'), Result end. --spec(dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result()). +-spec dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result(). dispatch(Topic, Delivery = #delivery{}) when is_binary(Topic) -> case emqx:is_running() of - true -> + true -> do_dispatch(Topic, Delivery); false -> %% In a rare case emqx_router_helper process may delay @@ -308,81 +338,92 @@ dispatch(Topic, Delivery = #delivery{}) when is_binary(Topic) -> -compile({inline, [inc_dropped_cnt/1]}). inc_dropped_cnt(Msg) -> case emqx_message:is_sys(Msg) of - true -> ok; - false -> ok = emqx_metrics:inc('messages.dropped'), - emqx_metrics:inc('messages.dropped.no_subscribers') + true -> + ok; + false -> + ok = emqx_metrics:inc('messages.dropped'), + emqx_metrics:inc('messages.dropped.no_subscribers') end. -compile({inline, [subscribers/1]}). --spec(subscribers(emqx_types:topic() | {shard, emqx_types:topic(), non_neg_integer()}) - -> [pid()]). +-spec subscribers(emqx_types:topic() | {shard, emqx_types:topic(), non_neg_integer()}) -> + [pid()]. subscribers(Topic) when is_binary(Topic) -> lookup_value(?SUBSCRIBER, Topic, []); -subscribers(Shard = {shard, _Topic, _I}) -> +subscribers(Shard = {shard, _Topic, _I}) -> lookup_value(?SUBSCRIBER, Shard, []). %%-------------------------------------------------------------------- %% Subscriber is down %%-------------------------------------------------------------------- --spec(subscriber_down(pid()) -> true). +-spec subscriber_down(pid()) -> true. subscriber_down(SubPid) -> lists:foreach( - fun(Topic) -> - case lookup_value(?SUBOPTION, {SubPid, Topic}) of - SubOpts when is_map(SubOpts) -> - _ = emqx_broker_helper:reclaim_seq(Topic), - true = ets:delete(?SUBOPTION, {SubPid, Topic}), - case maps:get(shard, SubOpts, 0) of - 0 -> true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), - ok = cast(pick(Topic), {unsubscribed, Topic}); - I -> true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), - ok = cast(pick({Topic, I}), {unsubscribed, Topic, I}) - end; - undefined -> ok - end - end, lookup_value(?SUBSCRIPTION, SubPid, [])), + fun(Topic) -> + case lookup_value(?SUBOPTION, {SubPid, Topic}) of + SubOpts when is_map(SubOpts) -> + _ = emqx_broker_helper:reclaim_seq(Topic), + true = ets:delete(?SUBOPTION, {SubPid, Topic}), + case maps:get(shard, SubOpts, 0) of + 0 -> + true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), + ok = cast(pick(Topic), {unsubscribed, Topic}); + I -> + true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), + ok = cast(pick({Topic, I}), {unsubscribed, Topic, I}) + end; + undefined -> + ok + end + end, + lookup_value(?SUBSCRIPTION, SubPid, []) + ), ets:delete(?SUBSCRIPTION, SubPid). %%-------------------------------------------------------------------- %% Management APIs %%-------------------------------------------------------------------- --spec(subscriptions(pid() | emqx_types:subid()) - -> [{emqx_types:topic(), emqx_types:subopts()}]). +-spec subscriptions(pid() | emqx_types:subid()) -> + [{emqx_types:topic(), emqx_types:subopts()}]. subscriptions(SubPid) when is_pid(SubPid) -> - [{Topic, lookup_value(?SUBOPTION, {SubPid, Topic}, #{})} - || Topic <- lookup_value(?SUBSCRIPTION, SubPid, [])]; + [ + {Topic, lookup_value(?SUBOPTION, {SubPid, Topic}, #{})} + || Topic <- lookup_value(?SUBSCRIPTION, SubPid, []) + ]; subscriptions(SubId) -> case emqx_broker_helper:lookup_subpid(SubId) of SubPid when is_pid(SubPid) -> subscriptions(SubPid); - undefined -> [] + undefined -> + [] end. --spec(subscriptions_via_topic(emqx_types:topic()) -> [emqx_types:subopts()]). +-spec subscriptions_via_topic(emqx_types:topic()) -> [emqx_types:subopts()]. subscriptions_via_topic(Topic) -> MatchSpec = [{{{'_', '$1'}, '_'}, [{'=:=', '$1', Topic}], ['$_']}], ets:select(?SUBOPTION, MatchSpec). --spec(subscribed(pid() | emqx_types:subid(), emqx_types:topic()) -> boolean()). +-spec subscribed(pid() | emqx_types:subid(), emqx_types:topic()) -> boolean(). subscribed(SubPid, Topic) when is_pid(SubPid) -> ets:member(?SUBOPTION, {SubPid, Topic}); subscribed(SubId, Topic) when ?IS_SUBID(SubId) -> SubPid = emqx_broker_helper:lookup_subpid(SubId), ets:member(?SUBOPTION, {SubPid, Topic}). --spec(get_subopts(pid(), emqx_types:topic()) -> maybe(emqx_types:subopts())). +-spec get_subopts(pid(), emqx_types:topic()) -> maybe(emqx_types:subopts()). get_subopts(SubPid, Topic) when is_pid(SubPid), is_binary(Topic) -> lookup_value(?SUBOPTION, {SubPid, Topic}); get_subopts(SubId, Topic) when ?IS_SUBID(SubId) -> case emqx_broker_helper:lookup_subpid(SubId) of SubPid when is_pid(SubPid) -> get_subopts(SubPid, Topic); - undefined -> undefined + undefined -> + undefined end. --spec(set_subopts(emqx_types:topic(), emqx_types:subopts()) -> boolean()). +-spec set_subopts(emqx_types:topic(), emqx_types:subopts()) -> boolean(). set_subopts(Topic, NewOpts) when is_binary(Topic), is_map(NewOpts) -> set_subopts(self(), Topic, NewOpts). @@ -392,10 +433,11 @@ set_subopts(SubPid, Topic, NewOpts) -> case ets:lookup(?SUBOPTION, Sub) of [{_, OldOpts}] -> ets:insert(?SUBOPTION, {Sub, maps:merge(OldOpts, NewOpts)}); - [] -> false + [] -> + false end. --spec(topics() -> [emqx_types:topic()]). +-spec topics() -> [emqx_types:topic()]. topics() -> emqx_router:topics(). @@ -441,18 +483,18 @@ init([Pool, Id]) -> handle_call({subscribe, Topic}, _From, State) -> Ok = emqx_router:do_add_route(Topic), {reply, Ok, State}; - handle_call({subscribe, Topic, I}, _From, State) -> Shard = {Topic, I}, - Ok = case get(Shard) of - undefined -> - _ = put(Shard, true), - true = ets:insert(?SUBSCRIBER, {Topic, {shard, I}}), - cast(pick(Topic), {subscribe, Topic}); - true -> ok - end, + Ok = + case get(Shard) of + undefined -> + _ = put(Shard, true), + true = ets:insert(?SUBSCRIBER, {Topic, {shard, I}}), + cast(pick(Topic), {subscribe, Topic}); + true -> + ok + end, {reply, Ok, State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. @@ -460,30 +502,28 @@ handle_call(Req, _From, State) -> handle_cast({subscribe, Topic}, State) -> case emqx_router:do_add_route(Topic) of ok -> ok; - {error, Reason} -> - ?SLOG(error, #{msg => "failed_to_add_route", reason => Reason}) + {error, Reason} -> ?SLOG(error, #{msg => "failed_to_add_route", reason => Reason}) end, {noreply, State}; - handle_cast({unsubscribed, Topic}, State) -> case ets:member(?SUBSCRIBER, Topic) of false -> _ = emqx_router:do_delete_route(Topic), ok; - true -> ok + true -> + ok end, {noreply, State}; - handle_cast({unsubscribed, Topic, I}, State) -> case ets:member(?SUBSCRIBER, {shard, Topic, I}) of false -> _ = erase({Topic, I}), true = ets:delete_object(?SUBSCRIBER, {Topic, {shard, I}}), cast(pick(Topic), {unsubscribed, Topic}); - true -> ok + true -> + ok end, {noreply, State}; - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. @@ -502,12 +542,15 @@ code_change(_OldVsn, State, _Extra) -> %% Internal functions %%-------------------------------------------------------------------- --spec(do_dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result()). +-spec do_dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result(). do_dispatch(Topic, #delivery{message = Msg}) -> DispN = lists:foldl( - fun(Sub, N) -> - N + do_dispatch(Sub, Topic, Msg) - end, 0, subscribers(Topic)), + fun(Sub, N) -> + N + do_dispatch(Sub, Topic, Msg) + end, + 0, + subscribers(Topic) + ), case DispN of 0 -> ok = emqx_hooks:run('message.dropped', [Msg, #{node => node()}, no_subscribers]), @@ -520,11 +563,16 @@ do_dispatch(Topic, #delivery{message = Msg}) -> do_dispatch(SubPid, Topic, Msg) when is_pid(SubPid) -> case erlang:is_process_alive(SubPid) of true -> - SubPid ! {deliver, Topic, Msg}, 1; - false -> 0 + SubPid ! {deliver, Topic, Msg}, + 1; + false -> + 0 end; do_dispatch({shard, I}, Topic, Msg) -> lists:foldl( fun(SubPid, N) -> N + do_dispatch(SubPid, Topic, Msg) - end, 0, subscribers({shard, Topic, I})). + end, + 0, + subscribers({shard, Topic, I}) + ). diff --git a/apps/emqx/src/emqx_broker_bench.erl b/apps/emqx/src/emqx_broker_bench.erl index 25fa390f8..18f74a523 100644 --- a/apps/emqx/src/emqx_broker_bench.erl +++ b/apps/emqx/src/emqx_broker_bench.erl @@ -25,13 +25,14 @@ run1() -> run1(80, 1000, 80, 10000). run1(Subs, SubOps, Pubs, PubOps) -> - run(#{subscribers => Subs, - publishers => Pubs, - sub_ops => SubOps, - pub_ops => PubOps, - sub_ptn => <<"device/{{id}}/+/{{num}}/#">>, - pub_ptn => <<"device/{{id}}/foo/{{num}}/bar/1/2/3/4/5">> - }). + run(#{ + subscribers => Subs, + publishers => Pubs, + sub_ops => SubOps, + pub_ops => PubOps, + sub_ptn => <<"device/{{id}}/+/{{num}}/#">>, + pub_ptn => <<"device/{{id}}/foo/{{num}}/bar/1/2/3/4/5">> + }). %% setting fields: %% - subscribers: spawn this number of subscriber workers @@ -43,20 +44,23 @@ run1(Subs, SubOps, Pubs, PubOps) -> %% replaced by worker id and {{num}} replaced by topic number. %% - pub_ptn: topic pattern used to benchmark publish (match) performance %% e.g. a/x/{{id}}/{{num}}/foo/bar -run(#{subscribers := Subs, - publishers := Pubs, - sub_ops := SubOps, - pub_ops := PubOps - } = Settings) -> - SubsPids = start_callers(Subs, fun start_subscriber/1, Settings), +run( + #{ + subscribers := Subs, + publishers := Pubs, + sub_ops := SubOps, + pub_ops := PubOps + } = Settings +) -> + SubsPids = start_callers(Subs, fun start_subscriber/1, Settings), PubsPids = start_callers(Pubs, fun start_publisher/1, Settings), _ = collect_results(SubsPids, subscriber_ready), io:format(user, "subscribe ...~n", []), {T1, SubsTime} = ?T(begin - lists:foreach(fun(Pid) -> Pid ! start_subscribe end, SubsPids), - collect_results(SubsPids, subscribe_time) - end), + lists:foreach(fun(Pid) -> Pid ! start_subscribe end, SubsPids), + collect_results(SubsPids, subscribe_time) + end), io:format(user, "InsertTotalTime: ~ts~n", [ns(T1)]), io:format(user, "InsertTimeAverage: ~ts~n", [ns(SubsTime / Subs)]), io:format(user, "InsertRps: ~p~n", [rps(Subs * SubOps, T1)]), @@ -64,9 +68,9 @@ run(#{subscribers := Subs, io:format(user, "lookup ...~n", []), {T2, PubsTime} = ?T(begin - lists:foreach(fun(Pid) -> Pid ! start_lookup end, PubsPids), - collect_results(PubsPids, lookup_time) - end), + lists:foreach(fun(Pid) -> Pid ! start_lookup end, PubsPids), + collect_results(PubsPids, lookup_time) + end), io:format(user, "LookupTotalTime: ~ts~n", [ns(T2)]), io:format(user, "LookupTimeAverage: ~ts~n", [ns(PubsTime / Pubs)]), io:format(user, "LookupRps: ~p~n", [rps(Pubs * PubOps, T2)]), @@ -76,14 +80,15 @@ run(#{subscribers := Subs, io:format(user, "unsubscribe ...~n", []), {T3, ok} = ?T(begin - lists:foreach(fun(Pid) -> Pid ! stop end, SubsPids), - wait_until_empty() - end), + lists:foreach(fun(Pid) -> Pid ! stop end, SubsPids), + wait_until_empty() + end), io:format(user, "TimeToUnsubscribeAll: ~ts~n", [ns(T3)]). wait_until_empty() -> case emqx_trie:empty() of - true -> ok; + true -> + ok; false -> timer:sleep(5), wait_until_empty() @@ -98,13 +103,13 @@ ns(T) -> io_lib:format("~p(ns)", [T]). ram_bytes() -> Wordsize = erlang:system_info(wordsize), mnesia:table_info(emqx_trie, memory) * Wordsize + - case lists:member(emqx_trie_node, ets:all()) of - true -> - %% before 4.3 - mnesia:table_info(emqx_trie_node, memory) * Wordsize; - false -> - 0 - end. + case lists:member(emqx_trie_node, ets:all()) of + true -> + %% before 4.3 + mnesia:table_info(emqx_trie_node, memory) * Wordsize; + false -> + 0 + end. start_callers(N, F, Settings) -> start_callers(N, F, Settings, []). @@ -117,7 +122,8 @@ start_callers(N, F, Settings, Acc) -> collect_results(Pids, Tag) -> collect_results(Pids, Tag, 0). -collect_results([], _Tag, R) -> R; +collect_results([], _Tag, R) -> + R; collect_results([Pid | Pids], Tag, R) -> receive {Pid, Tag, N} -> @@ -128,40 +134,43 @@ start_subscriber(#{id := Id, sub_ops := N, sub_ptn := SubPtn}) -> Parent = self(), proc_lib:spawn_link( fun() -> - SubTopics = make_topics(SubPtn, Id, N), - Parent ! {self(), subscriber_ready, 0}, - receive - start_subscribe -> - ok - end, - {Ts, _} = ?T(subscribe(SubTopics)), - _ = erlang:send(Parent, {self(), subscribe_time, Ts/ N}), - %% subscribers should not exit before publish test is done - receive - stop -> - ok - end - end). + SubTopics = make_topics(SubPtn, Id, N), + Parent ! {self(), subscriber_ready, 0}, + receive + start_subscribe -> + ok + end, + {Ts, _} = ?T(subscribe(SubTopics)), + _ = erlang:send(Parent, {self(), subscribe_time, Ts / N}), + %% subscribers should not exit before publish test is done + receive + stop -> + ok + end + end + ). start_publisher(#{id := Id, pub_ops := N, pub_ptn := PubPtn, subscribers := Subs}) -> Parent = self(), proc_lib:spawn_link( - fun() -> - L = lists:seq(1, N), - [Topic] = make_topics(PubPtn, (Id rem Subs) + 1, 1), - receive - start_lookup -> - ok - end, - {Tm, ok} = ?T(lists:foreach(fun(_) -> match(Topic) end, L)), - _ = erlang:send(Parent, {self(), lookup_time, Tm / N}), - ok - end). + fun() -> + L = lists:seq(1, N), + [Topic] = make_topics(PubPtn, (Id rem Subs) + 1, 1), + receive + start_lookup -> + ok + end, + {Tm, ok} = ?T(lists:foreach(fun(_) -> match(Topic) end, L)), + _ = erlang:send(Parent, {self(), lookup_time, Tm / N}), + ok + end + ). match(Topic) -> [_] = emqx_router:match_routes(Topic). -subscribe([]) -> ok; +subscribe([]) -> + ok; subscribe([Topic | Rest]) -> ok = emqx_broker:subscribe(Topic), subscribe(Rest). diff --git a/apps/emqx/src/emqx_broker_helper.erl b/apps/emqx/src/emqx_broker_helper.erl index 63e2292dc..72a44f711 100644 --- a/apps/emqx/src/emqx_broker_helper.erl +++ b/apps/emqx/src/emqx_broker_helper.erl @@ -21,26 +21,27 @@ -include("logger.hrl"). -include("types.hrl"). - -export([start_link/0]). %% APIs --export([ register_sub/2 - , lookup_subid/1 - , lookup_subpid/1 - , get_sub_shard/2 - , create_seq/1 - , reclaim_seq/1 - ]). +-export([ + register_sub/2, + lookup_subid/1, + lookup_subpid/1, + get_sub_shard/2, + create_seq/1, + reclaim_seq/1 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -ifdef(TEST). -compile(export_all). @@ -55,11 +56,11 @@ -define(BATCH_SIZE, 100000). --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?HELPER}, ?MODULE, [], []). --spec(register_sub(pid(), emqx_types:subid()) -> ok). +-spec register_sub(pid(), emqx_types:subid()) -> ok. register_sub(SubPid, SubId) when is_pid(SubPid) -> case ets:lookup(?SUBMON, SubPid) of [] -> @@ -70,31 +71,31 @@ register_sub(SubPid, SubId) when is_pid(SubPid) -> error(subid_conflict) end. --spec(lookup_subid(pid()) -> maybe(emqx_types:subid())). +-spec lookup_subid(pid()) -> maybe(emqx_types:subid()). lookup_subid(SubPid) when is_pid(SubPid) -> emqx_tables:lookup_value(?SUBMON, SubPid). --spec(lookup_subpid(emqx_types:subid()) -> maybe(pid())). +-spec lookup_subpid(emqx_types:subid()) -> maybe(pid()). lookup_subpid(SubId) -> emqx_tables:lookup_value(?SUBID, SubId). --spec(get_sub_shard(pid(), emqx_types:topic()) -> non_neg_integer()). +-spec get_sub_shard(pid(), emqx_types:topic()) -> non_neg_integer(). get_sub_shard(SubPid, Topic) -> case create_seq(Topic) of Seq when Seq =< ?SHARD -> 0; _ -> erlang:phash2(SubPid, shards_num()) + 1 end. --spec(shards_num() -> pos_integer()). +-spec shards_num() -> pos_integer(). shards_num() -> %% Dynamic sharding later... ets:lookup_element(?HELPER, shards, 2). --spec(create_seq(emqx_types:topic()) -> emqx_sequence:seqid()). +-spec create_seq(emqx_types:topic()) -> emqx_sequence:seqid(). create_seq(Topic) -> emqx_sequence:nextval(?SUBSEQ, Topic). --spec(reclaim_seq(emqx_types:topic()) -> emqx_sequence:seqid()). +-spec reclaim_seq(emqx_types:topic()) -> emqx_sequence:seqid(). reclaim_seq(Topic) -> emqx_sequence:reclaim(?SUBSEQ, Topic). @@ -125,7 +126,6 @@ handle_cast({register_sub, SubPid, SubId}, State = #{pmon := PMon}) -> true = (SubId =:= undefined) orelse ets:insert(?SUBID, {SubId, SubPid}), true = ets:insert(?SUBMON, {SubPid, SubId}), {noreply, State#{pmon := emqx_pmon:monitor(SubPid, PMon)}}; - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. @@ -133,10 +133,10 @@ handle_cast(Msg, State) -> handle_info({'DOWN', _MRef, process, SubPid, _Reason}, State = #{pmon := PMon}) -> SubPids = [SubPid | emqx_misc:drain_down(?BATCH_SIZE)], ok = emqx_pool:async_submit( - fun lists:foreach/2, [fun clean_down/1, SubPids]), + fun lists:foreach/2, [fun clean_down/1, SubPids] + ), {_, PMon1} = emqx_pmon:erase_all(SubPids, PMon), {noreply, State#{pmon := PMon1}}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -156,9 +156,10 @@ clean_down(SubPid) -> case ets:lookup(?SUBMON, SubPid) of [{_, SubId}] -> true = ets:delete(?SUBMON, SubPid), - true = (SubId =:= undefined) - orelse ets:delete_object(?SUBID, {SubId, SubPid}), + true = + (SubId =:= undefined) orelse + ets:delete_object(?SUBID, {SubId, SubPid}), emqx_broker:subscriber_down(SubPid); - [] -> ok + [] -> + ok end. - diff --git a/apps/emqx/src/emqx_broker_sup.erl b/apps/emqx/src/emqx_broker_sup.erl index 3c872bf56..63c39d229 100644 --- a/apps/emqx/src/emqx_broker_sup.erl +++ b/apps/emqx/src/emqx_broker_sup.erl @@ -32,32 +32,41 @@ start_link() -> init([]) -> %% Broker pool PoolSize = emqx_vm:schedulers() * 2, - BrokerPool = emqx_pool_sup:spec([broker_pool, hash, PoolSize, - {emqx_broker, start_link, []}]), + BrokerPool = emqx_pool_sup:spec([ + broker_pool, + hash, + PoolSize, + {emqx_broker, start_link, []} + ]), %% Shared subscription - SharedSub = #{id => shared_sub, - start => {emqx_shared_sub, start_link, []}, - restart => permanent, - shutdown => 2000, - type => worker, - modules => [emqx_shared_sub]}, + SharedSub = #{ + id => shared_sub, + start => {emqx_shared_sub, start_link, []}, + restart => permanent, + shutdown => 2000, + type => worker, + modules => [emqx_shared_sub] + }, %% Authentication - AuthNSup = #{id => emqx_authentication_sup, - start => {emqx_authentication_sup, start_link, []}, - restart => permanent, - shutdown => infinity, - type => supervisor, - modules => [emqx_authentication_sup]}, + AuthNSup = #{ + id => emqx_authentication_sup, + start => {emqx_authentication_sup, start_link, []}, + restart => permanent, + shutdown => infinity, + type => supervisor, + modules => [emqx_authentication_sup] + }, %% Broker helper - Helper = #{id => helper, - start => {emqx_broker_helper, start_link, []}, - restart => permanent, - shutdown => 2000, - type => worker, - modules => [emqx_broker_helper]}, + Helper = #{ + id => helper, + start => {emqx_broker_helper, start_link, []}, + restart => permanent, + shutdown => 2000, + type => worker, + modules => [emqx_broker_helper] + }, {ok, {{one_for_all, 0, 1}, [BrokerPool, SharedSub, AuthNSup, Helper]}}. - diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 521091d1c..860f99300 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -22,129 +22,135 @@ -include("logger.hrl"). -include("types.hrl"). - -ifdef(TEST). -compile(export_all). -compile(nowarn_export_all). -endif. --export([ info/1 - , info/2 - , get_mqtt_conf/2 - , get_mqtt_conf/3 - , set_conn_state/2 - , stats/1 - , caps/1 - ]). +-export([ + info/1, + info/2, + get_mqtt_conf/2, + get_mqtt_conf/3, + set_conn_state/2, + stats/1, + caps/1 +]). --export([ init/2 - , handle_in/2 - , handle_deliver/2 - , handle_out/3 - , handle_timeout/3 - , handle_call/2 - , handle_info/2 - , terminate/2 - ]). +-export([ + init/2, + handle_in/2, + handle_deliver/2, + handle_out/3, + handle_timeout/3, + handle_call/2, + handle_info/2, + terminate/2 +]). %% Export for emqx_sn --export([ do_deliver/2 - , ensure_keepalive/2 - , clear_keepalive/1 - ]). +-export([ + do_deliver/2, + ensure_keepalive/2, + clear_keepalive/1 +]). %% Exports for CT -export([set_field/3]). --import(emqx_misc, - [ run_fold/3 - , pipeline/3 - , maybe_apply/2 - ]). +-import( + emqx_misc, + [ + run_fold/3, + pipeline/3, + maybe_apply/2 + ] +). -export_type([channel/0, opts/0]). -record(channel, { - %% MQTT ConnInfo - conninfo :: emqx_types:conninfo(), - %% MQTT ClientInfo - clientinfo :: emqx_types:clientinfo(), - %% MQTT Session - session :: maybe(emqx_session:session()), - %% Keepalive - keepalive :: maybe(emqx_keepalive:keepalive()), - %% MQTT Will Msg - will_msg :: maybe(emqx_types:message()), - %% MQTT Topic Aliases - topic_aliases :: emqx_types:topic_aliases(), - %% MQTT Topic Alias Maximum - alias_maximum :: maybe(map()), - %% Authentication Data Cache - auth_cache :: maybe(map()), - %% Quota checkers - quota :: maybe(emqx_limiter_container:limiter()), - %% Timers - timers :: #{atom() => disabled | maybe(reference())}, - %% Conn State - conn_state :: conn_state(), - %% Takeover - takeover :: boolean(), - %% Resume - resuming :: boolean(), - %% Pending delivers when takeovering - pendings :: list() - }). + %% MQTT ConnInfo + conninfo :: emqx_types:conninfo(), + %% MQTT ClientInfo + clientinfo :: emqx_types:clientinfo(), + %% MQTT Session + session :: maybe(emqx_session:session()), + %% Keepalive + keepalive :: maybe(emqx_keepalive:keepalive()), + %% MQTT Will Msg + will_msg :: maybe(emqx_types:message()), + %% MQTT Topic Aliases + topic_aliases :: emqx_types:topic_aliases(), + %% MQTT Topic Alias Maximum + alias_maximum :: maybe(map()), + %% Authentication Data Cache + auth_cache :: maybe(map()), + %% Quota checkers + quota :: maybe(emqx_limiter_container:limiter()), + %% Timers + timers :: #{atom() => disabled | maybe(reference())}, + %% Conn State + conn_state :: conn_state(), + %% Takeover + takeover :: boolean(), + %% Resume + resuming :: boolean(), + %% Pending delivers when takeovering + pendings :: list() +}). --type(channel() :: #channel{}). +-type channel() :: #channel{}. --type(opts() :: #{zone := atom(), listener := {Type::atom(), Name::atom()}, atom() => term()}). +-type opts() :: #{zone := atom(), listener := {Type :: atom(), Name :: atom()}, atom() => term()}. --type(conn_state() :: idle | connecting | connected | reauthenticating | disconnected). +-type conn_state() :: idle | connecting | connected | reauthenticating | disconnected. --type(reply() :: {outgoing, emqx_types:packet()} - | {outgoing, [emqx_types:packet()]} - | {event, conn_state() | updated} - | {close, Reason :: atom()}). +-type reply() :: + {outgoing, emqx_types:packet()} + | {outgoing, [emqx_types:packet()]} + | {event, conn_state() | updated} + | {close, Reason :: atom()}. --type(replies() :: emqx_types:packet() | reply() | [reply()]). +-type replies() :: emqx_types:packet() | reply() | [reply()]. -define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}). -define(TIMER_TABLE, #{ - alive_timer => keepalive, - retry_timer => retry_delivery, - await_timer => expire_awaiting_rel, - expire_timer => expire_session, - will_timer => will_message, - quota_timer => expire_quota_limit - }). + alive_timer => keepalive, + retry_timer => retry_delivery, + await_timer => expire_awaiting_rel, + expire_timer => expire_session, + will_timer => will_message, + quota_timer => expire_quota_limit +}). --define(CHANNEL_METRICS, - [ recv_pkt - , recv_msg - , 'recv_msg.qos0' - , 'recv_msg.qos1' - , 'recv_msg.qos2' - , 'recv_msg.dropped' - , 'recv_msg.dropped.await_pubrel_timeout' - , send_pkt - , send_msg - , 'send_msg.qos0' - , 'send_msg.qos1' - , 'send_msg.qos2' - , 'send_msg.dropped' - , 'send_msg.dropped.expired' - , 'send_msg.dropped.queue_full' - , 'send_msg.dropped.too_large' - ]). +-define(CHANNEL_METRICS, [ + recv_pkt, + recv_msg, + 'recv_msg.qos0', + 'recv_msg.qos1', + 'recv_msg.qos2', + 'recv_msg.dropped', + 'recv_msg.dropped.await_pubrel_timeout', + send_pkt, + send_msg, + 'send_msg.qos0', + 'send_msg.qos1', + 'send_msg.qos2', + 'send_msg.dropped', + 'send_msg.dropped.expired', + 'send_msg.dropped.queue_full', + 'send_msg.dropped.too_large' +]). --define(INFO_KEYS, - [ conninfo - , conn_state - , clientinfo - , session - , will_msg - ]). +-define(INFO_KEYS, [ + conninfo, + conn_state, + clientinfo, + session, + will_msg +]). -define(LIMITER_ROUTING, message_routing). @@ -155,11 +161,11 @@ %%-------------------------------------------------------------------- %% @doc Get infos of the channel. --spec(info(channel()) -> emqx_types:infos()). +-spec info(channel()) -> emqx_types:infos(). info(Channel) -> maps:from_list(info(?INFO_KEYS, Channel)). --spec(info(list(atom()) | atom(), channel()) -> term()). +-spec info(list(atom()) | atom(), channel()) -> term(). info(Keys, Channel) when is_list(Keys) -> [{Key, info(Key, Channel)} || Key <- Keys]; info(conninfo, #channel{conninfo = ConnInfo}) -> @@ -200,7 +206,8 @@ info(topic_aliases, #channel{topic_aliases = Aliases}) -> Aliases; info(alias_maximum, #channel{alias_maximum = Limits}) -> Limits; -info(timers, #channel{timers = Timers}) -> Timers. +info(timers, #channel{timers = Timers}) -> + Timers. set_conn_state(ConnState, Channel) -> Channel#channel{conn_state = ConnState}. @@ -210,73 +217,81 @@ set_session(Session, Channel = #channel{conninfo = ConnInfo, clientinfo = Client Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), Channel#channel{session = Session1}. --spec(stats(channel()) -> emqx_types:stats()). -stats(#channel{session = Session})-> +-spec stats(channel()) -> emqx_types:stats(). +stats(#channel{session = Session}) -> lists:append(emqx_session:stats(Session), emqx_pd:get_counters(?CHANNEL_METRICS)). --spec(caps(channel()) -> emqx_types:caps()). +-spec caps(channel()) -> emqx_types:caps(). caps(#channel{clientinfo = #{zone := Zone}}) -> emqx_mqtt_caps:get_caps(Zone). - %%-------------------------------------------------------------------- %% Init the channel %%-------------------------------------------------------------------- --spec(init(emqx_types:conninfo(), opts()) -> channel()). -init(ConnInfo = #{peername := {PeerHost, _Port}, - sockname := {_Host, SockPort}}, - #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}}) -> +-spec init(emqx_types:conninfo(), opts()) -> channel(). +init( + ConnInfo = #{ + peername := {PeerHost, _Port}, + sockname := {_Host, SockPort} + }, + #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} +) -> Peercert = maps:get(peercert, ConnInfo, undefined), Protocol = maps:get(protocol, ConnInfo, mqtt), - MountPoint = case emqx_config:get_listener_conf(Type, Listener, [mountpoint]) of - <<>> -> undefined; - MP -> MP - end, + MountPoint = + case emqx_config:get_listener_conf(Type, Listener, [mountpoint]) of + <<>> -> undefined; + MP -> MP + end, ClientInfo = set_peercert_infos( - Peercert, - #{zone => Zone, - listener => emqx_listeners:listener_id(Type, Listener), - protocol => Protocol, - peerhost => PeerHost, - sockport => SockPort, - clientid => undefined, - username => undefined, - mountpoint => MountPoint, - is_bridge => false, - is_superuser => false - }, Zone), + Peercert, + #{ + zone => Zone, + listener => emqx_listeners:listener_id(Type, Listener), + protocol => Protocol, + peerhost => PeerHost, + sockport => SockPort, + clientid => undefined, + username => undefined, + mountpoint => MountPoint, + is_bridge => false, + is_superuser => false + }, + Zone + ), {NClientInfo, NConnInfo} = take_ws_cookie(ClientInfo, ConnInfo), - #channel{conninfo = NConnInfo, - clientinfo = NClientInfo, - topic_aliases = #{inbound => #{}, - outbound => #{} - }, - auth_cache = #{}, - quota = emqx_limiter_container:get_limiter_by_names([?LIMITER_ROUTING], LimiterCfg), - timers = #{}, - conn_state = idle, - takeover = false, - resuming = false, - pendings = [] - }. + #channel{ + conninfo = NConnInfo, + clientinfo = NClientInfo, + topic_aliases = #{ + inbound => #{}, + outbound => #{} + }, + auth_cache = #{}, + quota = emqx_limiter_container:get_limiter_by_names([?LIMITER_ROUTING], LimiterCfg), + timers = #{}, + conn_state = idle, + takeover = false, + resuming = false, + pendings = [] + }. -set_peercert_infos(NoSSL, ClientInfo, _) - when NoSSL =:= nossl; - NoSSL =:= undefined -> +set_peercert_infos(NoSSL, ClientInfo, _) when + NoSSL =:= nossl; + NoSSL =:= undefined +-> ClientInfo#{username => undefined}; - set_peercert_infos(Peercert, ClientInfo, Zone) -> - {DN, CN} = {esockd_peercert:subject(Peercert), - esockd_peercert:common_name(Peercert)}, + {DN, CN} = {esockd_peercert:subject(Peercert), esockd_peercert:common_name(Peercert)}, PeercetAs = fun(Key) -> case get_mqtt_conf(Zone, Key) of - cn -> CN; - dn -> DN; - crt -> Peercert; - pem when is_binary(Peercert) -> base64:encode(Peercert); - md5 when is_binary(Peercert) -> emqx_passwd:hash_data(md5, Peercert); - _ -> undefined + cn -> CN; + dn -> DN; + crt -> Peercert; + pem when is_binary(Peercert) -> base64:encode(Peercert); + md5 when is_binary(Peercert) -> emqx_passwd:hash_data(md5, Peercert); + _ -> undefined end end, Username = PeercetAs(peer_cert_as_username), @@ -295,33 +310,39 @@ take_ws_cookie(ClientInfo, ConnInfo) -> %% Handle incoming packet %%-------------------------------------------------------------------- --spec(handle_in(emqx_types:packet(), channel()) - -> {ok, channel()} - | {ok, replies(), channel()} - | {shutdown, Reason :: term(), channel()} - | {shutdown, Reason :: term(), replies(), channel()}). -handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) - when ConnState =:= connected orelse ConnState =:= reauthenticating -> +-spec handle_in(emqx_types:packet(), channel()) -> + {ok, channel()} + | {ok, replies(), channel()} + | {shutdown, Reason :: term(), channel()} + | {shutdown, Reason :: term(), replies(), channel()}. +handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when + ConnState =:= connected orelse ConnState =:= reauthenticating +-> handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel); - handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) -> handle_out(connack, ?RC_PROTOCOL_ERROR, Channel); - handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) -> - case pipeline([fun overload_protection/2, - fun enrich_conninfo/2, - fun run_conn_hooks/2, - fun check_connect/2, - fun enrich_client/2, - fun set_log_meta/2, - fun check_banned/2 - ], ConnPkt, Channel#channel{conn_state = connecting}) of + case + pipeline( + [ + fun overload_protection/2, + fun enrich_conninfo/2, + fun run_conn_hooks/2, + fun check_connect/2, + fun enrich_client/2, + fun set_log_meta/2, + fun check_banned/2 + ], + ConnPkt, + Channel#channel{conn_state = connecting} + ) + of {ok, NConnPkt, NChannel = #channel{clientinfo = ClientInfo}} -> ?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}), NChannel1 = NChannel#channel{ - will_msg = emqx_packet:will_msg(NConnPkt), - alias_maximum = init_alias_maximum(NConnPkt, ClientInfo) - }, + will_msg = emqx_packet:will_msg(NConnPkt), + alias_maximum = init_alias_maximum(NConnPkt, ClientInfo) + }, case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of {ok, Properties, NChannel2} -> process_connect(Properties, ensure_connected(NChannel2)); @@ -333,9 +354,10 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) -> {error, ReasonCode, NChannel} -> handle_out(connack, ReasonCode, NChannel) end; - -handle_in(Packet = ?AUTH_PACKET(ReasonCode, _Properties), - Channel = #channel{conn_state = ConnState}) -> +handle_in( + Packet = ?AUTH_PACKET(ReasonCode, _Properties), + Channel = #channel{conn_state = ConnState} +) -> try case {ReasonCode, ConnState} of {?RC_CONTINUE_AUTHENTICATION, connecting} -> ok; @@ -349,16 +371,18 @@ handle_in(Packet = ?AUTH_PACKET(ReasonCode, _Properties), connecting -> process_connect(NProperties, ensure_connected(NChannel)); _ -> - handle_out( auth - , {?RC_SUCCESS, NProperties} - , NChannel#channel{conn_state = connected} - ) + handle_out( + auth, + {?RC_SUCCESS, NProperties}, + NChannel#channel{conn_state = connected} + ) end; {continue, NProperties, NChannel} -> - handle_out( auth - , {?RC_CONTINUE_AUTHENTICATION, NProperties} - , NChannel#channel{conn_state = reauthenticating} - ); + handle_out( + auth, + {?RC_CONTINUE_AUTHENTICATION, NProperties}, + NChannel#channel{conn_state = reauthenticating} + ); {error, NReasonCode} -> case ConnState of connecting -> @@ -376,20 +400,20 @@ handle_in(Packet = ?AUTH_PACKET(ReasonCode, _Properties), handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel) end end; - -handle_in(?PACKET(_), Channel = #channel{conn_state = ConnState}) - when ConnState =/= connected andalso ConnState =/= reauthenticating -> +handle_in(?PACKET(_), Channel = #channel{conn_state = ConnState}) when + ConnState =/= connected andalso ConnState =/= reauthenticating +-> handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel); - handle_in(Packet = ?PUBLISH_PACKET(_QoS), Channel) -> case emqx_packet:check(Packet) of ok -> process_publish(Packet, Channel); - {error, ReasonCode} -> - handle_out(disconnect, ReasonCode, Channel) + {error, ReasonCode} -> handle_out(disconnect, ReasonCode, Channel) end; - -handle_in(?PUBACK_PACKET(PacketId, _ReasonCode, Properties), Channel - = #channel{clientinfo = ClientInfo, session = Session}) -> +handle_in( + ?PUBACK_PACKET(PacketId, _ReasonCode, Properties), + Channel = + #channel{clientinfo = ClientInfo, session = Session} +) -> case emqx_session:puback(ClientInfo, PacketId, Session) of {ok, Msg, NSession} -> ok = after_message_acked(ClientInfo, Msg, Properties), @@ -406,9 +430,11 @@ handle_in(?PUBACK_PACKET(PacketId, _ReasonCode, Properties), Channel ok = emqx_metrics:inc('packets.puback.missed'), {ok, Channel} end; - -handle_in(?PUBREC_PACKET(PacketId, _ReasonCode, Properties), Channel - = #channel{clientinfo = ClientInfo, session = Session}) -> +handle_in( + ?PUBREC_PACKET(PacketId, _ReasonCode, Properties), + Channel = + #channel{clientinfo = ClientInfo, session = Session} +) -> case emqx_session:pubrec(ClientInfo, PacketId, Session) of {ok, Msg, NSession} -> ok = after_message_acked(ClientInfo, Msg, Properties), @@ -423,9 +449,13 @@ handle_in(?PUBREC_PACKET(PacketId, _ReasonCode, Properties), Channel ok = emqx_metrics:inc('packets.pubrec.missed'), handle_out(pubrel, {PacketId, RC}, Channel) end; - -handle_in(?PUBREL_PACKET(PacketId, _ReasonCode), Channel = #channel{clientinfo = ClientInfo, - session = Session}) -> +handle_in( + ?PUBREL_PACKET(PacketId, _ReasonCode), + Channel = #channel{ + clientinfo = ClientInfo, + session = Session + } +) -> case emqx_session:pubrel(ClientInfo, PacketId, Session) of {ok, NSession} -> NChannel = set_session(NSession, Channel), @@ -435,9 +465,12 @@ handle_in(?PUBREL_PACKET(PacketId, _ReasonCode), Channel = #channel{clientinfo = ok = emqx_metrics:inc('packets.pubrel.missed'), handle_out(pubcomp, {PacketId, RC}, Channel) end; - -handle_in(?PUBCOMP_PACKET(PacketId, _ReasonCode), Channel = #channel{ - clientinfo = ClientInfo, session = Session}) -> +handle_in( + ?PUBCOMP_PACKET(PacketId, _ReasonCode), + Channel = #channel{ + clientinfo = ClientInfo, session = Session + } +) -> case emqx_session:pubcomp(ClientInfo, PacketId, Session) of {ok, NSession} -> {ok, set_session(NSession, Channel)}; @@ -451,86 +484,97 @@ handle_in(?PUBCOMP_PACKET(PacketId, _ReasonCode), Channel = #channel{ ok = emqx_metrics:inc('packets.pubcomp.missed'), {ok, Channel} end; - -handle_in(Packet = ?SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), - Channel = #channel{clientinfo = ClientInfo}) -> +handle_in( + Packet = ?SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), + Channel = #channel{clientinfo = ClientInfo} +) -> case emqx_packet:check(Packet) of ok -> TopicFilters0 = parse_topic_filters(TopicFilters), TopicFilters1 = put_subid_in_subopts(Properties, TopicFilters0), TupleTopicFilters0 = check_sub_authzs(TopicFilters1, Channel), - HasAuthzDeny = lists:any(fun({_TopicFilter, ReasonCode}) -> + HasAuthzDeny = lists:any( + fun({_TopicFilter, ReasonCode}) -> ReasonCode =:= ?RC_NOT_AUTHORIZED - end, TupleTopicFilters0), + end, + TupleTopicFilters0 + ), DenyAction = emqx:get_config([authorization, deny_action], ignore), case DenyAction =:= disconnect andalso HasAuthzDeny of - true -> handle_out(disconnect, ?RC_NOT_AUTHORIZED, Channel); + true -> + handle_out(disconnect, ?RC_NOT_AUTHORIZED, Channel); false -> Replace = fun - _Fun(TupleList, [ Tuple = {Key, _Value} | More]) -> - _Fun(lists:keyreplace(Key, 1, TupleList, Tuple), More); - _Fun(TupleList, []) -> TupleList - end, - TopicFilters2 = [ TopicFilter || {TopicFilter, 0} <- TupleTopicFilters0], - TopicFilters3 = run_hooks('client.subscribe', - [ClientInfo, Properties], - TopicFilters2), - {TupleTopicFilters1, NChannel} = process_subscribe(TopicFilters3, - Properties, - Channel), + _Fun(TupleList, [Tuple = {Key, _Value} | More]) -> + _Fun(lists:keyreplace(Key, 1, TupleList, Tuple), More); + _Fun(TupleList, []) -> + TupleList + end, + TopicFilters2 = [TopicFilter || {TopicFilter, 0} <- TupleTopicFilters0], + TopicFilters3 = run_hooks( + 'client.subscribe', + [ClientInfo, Properties], + TopicFilters2 + ), + {TupleTopicFilters1, NChannel} = process_subscribe( + TopicFilters3, + Properties, + Channel + ), TupleTopicFilters2 = Replace(TupleTopicFilters0, TupleTopicFilters1), - ReasonCodes2 = [ ReasonCode - || {_TopicFilter, ReasonCode} <- TupleTopicFilters2], + ReasonCodes2 = [ + ReasonCode + || {_TopicFilter, ReasonCode} <- TupleTopicFilters2 + ], handle_out(suback, {PacketId, ReasonCodes2}, NChannel) end; {error, ReasonCode} -> handle_out(disconnect, ReasonCode, Channel) end; - -handle_in(Packet = ?UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), - Channel = #channel{clientinfo = ClientInfo}) -> +handle_in( + Packet = ?UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), + Channel = #channel{clientinfo = ClientInfo} +) -> case emqx_packet:check(Packet) of - ok -> TopicFilters1 = run_hooks('client.unsubscribe', - [ClientInfo, Properties], - parse_topic_filters(TopicFilters) - ), - {ReasonCodes, NChannel} = process_unsubscribe(TopicFilters1, Properties, Channel), - handle_out(unsuback, {PacketId, ReasonCodes}, NChannel); + ok -> + TopicFilters1 = run_hooks( + 'client.unsubscribe', + [ClientInfo, Properties], + parse_topic_filters(TopicFilters) + ), + {ReasonCodes, NChannel} = process_unsubscribe(TopicFilters1, Properties, Channel), + handle_out(unsuback, {PacketId, ReasonCodes}, NChannel); {error, ReasonCode} -> handle_out(disconnect, ReasonCode, Channel) end; - handle_in(?PACKET(?PINGREQ), Channel) -> {ok, ?PACKET(?PINGRESP), Channel}; - -handle_in(?DISCONNECT_PACKET(ReasonCode, Properties), - Channel = #channel{conninfo = ConnInfo}) -> +handle_in( + ?DISCONNECT_PACKET(ReasonCode, Properties), + Channel = #channel{conninfo = ConnInfo} +) -> NConnInfo = ConnInfo#{disconn_props => Properties}, NChannel = maybe_clean_will_msg(ReasonCode, Channel#channel{conninfo = NConnInfo}), process_disconnect(ReasonCode, Properties, NChannel); - handle_in(?AUTH_PACKET(), Channel) -> handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel); - handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) -> shutdown(Reason, Channel); - handle_in({frame_error, frame_too_large}, Channel = #channel{conn_state = connecting}) -> shutdown(frame_too_large, ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel); handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) -> shutdown(Reason, ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel); - -handle_in({frame_error, frame_too_large}, Channel = #channel{conn_state = ConnState}) - when ConnState =:= connected orelse ConnState =:= reauthenticating -> +handle_in({frame_error, frame_too_large}, Channel = #channel{conn_state = ConnState}) when + ConnState =:= connected orelse ConnState =:= reauthenticating +-> handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel); -handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) - when ConnState =:= connected orelse ConnState =:= reauthenticating -> +handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) when + ConnState =:= connected orelse ConnState =:= reauthenticating +-> handle_out(disconnect, {?RC_MALFORMED_PACKET, Reason}, Channel); - handle_in({frame_error, Reason}, Channel = #channel{conn_state = disconnected}) -> ?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}), {ok, Channel}; - handle_in(Packet, Channel) -> ?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}), handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel). @@ -539,8 +583,13 @@ handle_in(Packet, Channel) -> %% Process Connect %%-------------------------------------------------------------------- -process_connect(AckProps, Channel = #channel{conninfo = ConnInfo, - clientinfo = ClientInfo}) -> +process_connect( + AckProps, + Channel = #channel{ + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> #{clean_start := CleanStart} = ConnInfo, case emqx_cm:open_session(CleanStart, ClientInfo, ConnInfo) of {ok, #{session := Session, present := false}} -> @@ -548,10 +597,11 @@ process_connect(AckProps, Channel = #channel{conninfo = ConnInfo, handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, NChannel); {ok, #{session := Session, present := true, pendings := Pendings}} -> Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())), - NChannel = Channel#channel{session = Session, - resuming = true, - pendings = Pendings1 - }, + NChannel = Channel#channel{ + session = Session, + resuming = true, + pendings = Pendings1 + }, handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, NChannel); {error, client_id_unavailable} -> handle_out(connack, ?RC_CLIENT_IDENTIFIER_NOT_VALID, Channel); @@ -565,37 +615,50 @@ process_connect(AckProps, Channel = #channel{conninfo = ConnInfo, %%-------------------------------------------------------------------- process_publish(Packet = ?PUBLISH_PACKET(QoS, Topic, PacketId), Channel) -> - case pipeline([fun check_quota_exceeded/2, - fun process_alias/2, - fun check_pub_alias/2, - fun check_pub_authz/2, - fun check_pub_caps/2 - ], Packet, Channel) of + case + pipeline( + [ + fun check_quota_exceeded/2, + fun process_alias/2, + fun check_pub_alias/2, + fun check_pub_authz/2, + fun check_pub_caps/2 + ], + Packet, + Channel + ) + of {ok, NPacket, NChannel} -> Msg = packet_to_message(NPacket, NChannel), do_publish(PacketId, Msg, NChannel); {error, Rc = ?RC_NOT_AUTHORIZED, NChannel} -> - ?SLOG(warning, #{ - msg => "cannot_publish_to_topic", - reason => emqx_reason_codes:name(Rc) - }, #{topic => Topic}), + ?SLOG( + warning, + #{ + msg => "cannot_publish_to_topic", + reason => emqx_reason_codes:name(Rc) + }, + #{topic => Topic} + ), case emqx:get_config([authorization, deny_action], ignore) of ignore -> case QoS of - ?QOS_0 -> {ok, NChannel}; - ?QOS_1 -> - handle_out(puback, {PacketId, Rc}, NChannel); - ?QOS_2 -> - handle_out(pubrec, {PacketId, Rc}, NChannel) + ?QOS_0 -> {ok, NChannel}; + ?QOS_1 -> handle_out(puback, {PacketId, Rc}, NChannel); + ?QOS_2 -> handle_out(pubrec, {PacketId, Rc}, NChannel) end; disconnect -> handle_out(disconnect, Rc, NChannel) end; {error, Rc = ?RC_QUOTA_EXCEEDED, NChannel} -> - ?SLOG(warning, #{ - msg => "cannot_publish_to_topic", - reason => emqx_reason_codes:name(Rc) - }, #{topic => Topic}), + ?SLOG( + warning, + #{ + msg => "cannot_publish_to_topic", + reason => emqx_reason_codes:name(Rc) + }, + #{topic => Topic} + ), case QoS of ?QOS_0 -> ok = emqx_metrics:inc('packets.publish.dropped'), @@ -606,45 +669,56 @@ process_publish(Packet = ?PUBLISH_PACKET(QoS, Topic, PacketId), Channel) -> handle_out(pubrec, {PacketId, Rc}, NChannel) end; {error, Rc, NChannel} -> - ?SLOG(warning, #{ - msg => "cannot_publish_to_topic", - topic => Topic, - reason => emqx_reason_codes:name(Rc) - }, #{topic => Topic}), + ?SLOG( + warning, + #{ + msg => "cannot_publish_to_topic", + topic => Topic, + reason => emqx_reason_codes:name(Rc) + }, + #{topic => Topic} + ), handle_out(disconnect, Rc, NChannel) end. packet_to_message(Packet, #channel{ - conninfo = #{proto_ver := ProtoVer}, - clientinfo = #{ - protocol := Protocol, - clientid := ClientId, - username := Username, - peerhost := PeerHost, - mountpoint := MountPoint - } - }) -> - emqx_mountpoint:mount(MountPoint, + conninfo = #{proto_ver := ProtoVer}, + clientinfo = #{ + protocol := Protocol, + clientid := ClientId, + username := Username, + peerhost := PeerHost, + mountpoint := MountPoint + } +}) -> + emqx_mountpoint:mount( + MountPoint, emqx_packet:to_message( - Packet, ClientId, - #{proto_ver => ProtoVer, - protocol => Protocol, - username => Username, - peerhost => PeerHost})). + Packet, + ClientId, + #{ + proto_ver => ProtoVer, + protocol => Protocol, + username => Username, + peerhost => PeerHost + } + ) + ). do_publish(_PacketId, Msg = #message{qos = ?QOS_0}, Channel) -> Result = emqx_broker:publish(Msg), NChannel = ensure_quota(Result, Channel), {ok, NChannel}; - do_publish(PacketId, Msg = #message{qos = ?QOS_1}, Channel) -> PubRes = emqx_broker:publish(Msg), RC = puback_reason_code(PubRes), NChannel = ensure_quota(PubRes, Channel), handle_out(puback, {PacketId, RC}, NChannel); - -do_publish(PacketId, Msg = #message{qos = ?QOS_2}, - Channel = #channel{clientinfo = ClientInfo, session = Session}) -> +do_publish( + PacketId, + Msg = #message{qos = ?QOS_2}, + Channel = #channel{clientinfo = ClientInfo, session = Session} +) -> case emqx_session:publish(ClientInfo, PacketId, Msg, Session) of {ok, PubRes, NSession} -> RC = puback_reason_code(PubRes), @@ -656,11 +730,15 @@ do_publish(PacketId, Msg = #message{qos = ?QOS_2}, ok = emqx_metrics:inc('packets.publish.inuse'), handle_out(pubrec, {PacketId, RC}, Channel); {error, RC = ?RC_RECEIVE_MAXIMUM_EXCEEDED} -> - ?SLOG(warning, #{ - msg => "dropped_qos2_packet", - reason => emqx_reason_codes:name(RC), - packet_id => PacketId - }, #{topic => Msg#message.topic}), + ?SLOG( + warning, + #{ + msg => "dropped_qos2_packet", + reason => emqx_reason_codes:name(RC), + packet_id => PacketId + }, + #{topic => Msg#message.topic} + ), ok = emqx_metrics:inc('packets.publish.dropped'), handle_out(disconnect, RC, Channel) end. @@ -669,10 +747,14 @@ ensure_quota(_, Channel = #channel{quota = undefined}) -> Channel; ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> Cnt = lists:foldl( - fun({_, _, ok}, N) -> N + 1; - ({_, _, {ok, I}}, N) -> N + I; - (_, N) -> N - end, 1, PubRes), + fun + ({_, _, ok}, N) -> N + 1; + ({_, _, {ok, I}}, N) -> N + I; + (_, N) -> N + end, + 1, + PubRes + ), case emqx_limiter_container:check(Cnt, ?LIMITER_ROUTING, Limiter) of {ok, NLimiter} -> Channel#channel{quota = NLimiter}; @@ -681,14 +763,16 @@ ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> end. -compile({inline, [puback_reason_code/1]}). -puback_reason_code([]) -> ?RC_NO_MATCHING_SUBSCRIBERS; +puback_reason_code([]) -> ?RC_NO_MATCHING_SUBSCRIBERS; puback_reason_code([_ | _]) -> ?RC_SUCCESS. -compile({inline, [after_message_acked/3]}). after_message_acked(ClientInfo, Msg, PubAckProps) -> ok = emqx_metrics:inc('messages.acked'), - emqx_hooks:run('message.acked', [ClientInfo, - emqx_message:set_header(puback_props, PubAckProps, Msg)]). + emqx_hooks:run('message.acked', [ + ClientInfo, + emqx_message:set_header(puback_props, PubAckProps, Msg) + ]). %%-------------------------------------------------------------------- %% Process Subscribe @@ -700,35 +784,50 @@ process_subscribe(TopicFilters, SubProps, Channel) -> process_subscribe([], _SubProps, Channel, Acc) -> {lists:reverse(Acc), Channel}; - process_subscribe([Topic = {TopicFilter, SubOpts} | More], SubProps, Channel, Acc) -> case check_sub_caps(TopicFilter, SubOpts, Channel) of - ok -> - {ReasonCode, NChannel} = do_subscribe(TopicFilter, - SubOpts#{sub_props => SubProps}, - Channel), + ok -> + {ReasonCode, NChannel} = do_subscribe( + TopicFilter, + SubOpts#{sub_props => SubProps}, + Channel + ), process_subscribe(More, SubProps, NChannel, [{Topic, ReasonCode} | Acc]); {error, ReasonCode} -> - ?SLOG(warning, #{ - msg => "cannot_subscribe_topic_filter", - reason => emqx_reason_codes:name(ReasonCode) - }, #{topic => TopicFilter}), + ?SLOG( + warning, + #{ + msg => "cannot_subscribe_topic_filter", + reason => emqx_reason_codes:name(ReasonCode) + }, + #{topic => TopicFilter} + ), process_subscribe(More, SubProps, Channel, [{Topic, ReasonCode} | Acc]) end. -do_subscribe(TopicFilter, SubOpts = #{qos := QoS}, Channel = - #channel{clientinfo = ClientInfo = #{mountpoint := MountPoint}, - session = Session}) -> +do_subscribe( + TopicFilter, + SubOpts = #{qos := QoS}, + Channel = + #channel{ + clientinfo = ClientInfo = #{mountpoint := MountPoint}, + session = Session + } +) -> NTopicFilter = emqx_mountpoint:mount(MountPoint, TopicFilter), NSubOpts = enrich_subopts(maps:merge(?DEFAULT_SUBOPTS, SubOpts), Channel), case emqx_session:subscribe(ClientInfo, NTopicFilter, NSubOpts, Session) of {ok, NSession} -> {QoS, set_session(NSession, Channel)}; {error, RC} -> - ?SLOG(warning, #{ - msg => "cannot_subscribe_topic_filter", - reason => emqx_reason_codes:text(RC) - }, #{topic => NTopicFilter}), + ?SLOG( + warning, + #{ + msg => "cannot_subscribe_topic_filter", + reason => emqx_reason_codes:text(RC) + }, + #{topic => NTopicFilter} + ), {RC, Channel} end. @@ -742,19 +841,25 @@ process_unsubscribe(TopicFilters, UnSubProps, Channel) -> process_unsubscribe([], _UnSubProps, Channel, Acc) -> {lists:reverse(Acc), Channel}; - process_unsubscribe([{TopicFilter, SubOpts} | More], UnSubProps, Channel, Acc) -> {RC, NChannel} = do_unsubscribe(TopicFilter, SubOpts#{unsub_props => UnSubProps}, Channel), process_unsubscribe(More, UnSubProps, NChannel, [RC | Acc]). -do_unsubscribe(TopicFilter, SubOpts, Channel = - #channel{clientinfo = ClientInfo = #{mountpoint := MountPoint}, - session = Session}) -> +do_unsubscribe( + TopicFilter, + SubOpts, + Channel = + #channel{ + clientinfo = ClientInfo = #{mountpoint := MountPoint}, + session = Session + } +) -> TopicFilter1 = emqx_mountpoint:mount(MountPoint, TopicFilter), case emqx_session:unsubscribe(ClientInfo, TopicFilter1, SubOpts, Session) of {ok, NSession} -> {?RC_SUCCESS, set_session(NSession, Channel)}; - {error, RC} -> {RC, Channel} + {error, RC} -> + {RC, Channel} end. %%-------------------------------------------------------------------- %% Process Disconnect @@ -767,21 +872,27 @@ maybe_clean_will_msg(_ReasonCode, Channel) -> Channel. %% MQTT-v5.0: 3.14.2.2.2 Session Expiry Interval -process_disconnect(_ReasonCode, #{'Session-Expiry-Interval' := Interval}, - Channel = #channel{conninfo = #{expiry_interval := 0}}) - when Interval > 0 -> +process_disconnect( + _ReasonCode, + #{'Session-Expiry-Interval' := Interval}, + Channel = #channel{conninfo = #{expiry_interval := 0}} +) when + Interval > 0 +-> handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel); - process_disconnect(ReasonCode, Properties, Channel) -> NChannel = maybe_update_expiry_interval(Properties, Channel), {ok, {close, disconnect_reason(ReasonCode)}, NChannel}. -maybe_update_expiry_interval(#{'Session-Expiry-Interval' := Interval}, - Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo}) -> +maybe_update_expiry_interval( + #{'Session-Expiry-Interval' := Interval}, + Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo} +) -> EI = timer:seconds(Interval), OldEI = maps:get(expiry_interval, ConnInfo, 0), case OldEI =:= EI of - true -> Channel; + true -> + Channel; false -> NChannel = Channel#channel{conninfo = ConnInfo#{expiry_interval => EI}}, ClientID = maps:get(clientid, ClientInfo, undefined), @@ -794,31 +905,42 @@ maybe_update_expiry_interval(#{'Session-Expiry-Interval' := Interval}, NChannel end end; -maybe_update_expiry_interval(_Properties, Channel) -> Channel. +maybe_update_expiry_interval(_Properties, Channel) -> + Channel. %%-------------------------------------------------------------------- %% Handle Delivers from broker to client %%-------------------------------------------------------------------- --spec(handle_deliver(list(emqx_types:deliver()), channel()) - -> {ok, channel()} | {ok, replies(), channel()}). +-spec handle_deliver(list(emqx_types:deliver()), channel()) -> + {ok, channel()} | {ok, replies(), channel()}. -handle_deliver(Delivers, Channel = #channel{takeover = true, - pendings = Pendings, - session = Session, - clientinfo = #{clientid := ClientId} = ClientInfo}) -> +handle_deliver( + Delivers, + Channel = #channel{ + takeover = true, + pendings = Pendings, + session = Session, + clientinfo = #{clientid := ClientId} = ClientInfo + } +) -> %% NOTE: Order is important here. While the takeover is in %% progress, the session cannot enqueue messages, since it already %% passed on the queue to the new connection in the session state. NPendings = lists:append( - Pendings, - emqx_session:ignore_local(ClientInfo, maybe_nack(Delivers), ClientId, Session)), + Pendings, + emqx_session:ignore_local(ClientInfo, maybe_nack(Delivers), ClientId, Session) + ), {ok, Channel#channel{pendings = NPendings}}; - -handle_deliver(Delivers, Channel = #channel{conn_state = disconnected, - takeover = false, - session = Session, - clientinfo = #{clientid := ClientId} = ClientInfo}) -> +handle_deliver( + Delivers, + Channel = #channel{ + conn_state = disconnected, + takeover = false, + session = Session, + clientinfo = #{clientid := ClientId} = ClientInfo + } +) -> Delivers1 = maybe_nack(Delivers), Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), @@ -826,13 +948,21 @@ handle_deliver(Delivers, Channel = #channel{conn_state = disconnected, %% We consider queued/dropped messages as delivered since they are now in the session state. maybe_mark_as_delivered(Session, Delivers), {ok, NChannel}; - -handle_deliver(Delivers, Channel = #channel{session = Session, - takeover = false, - clientinfo = #{clientid := ClientId} = ClientInfo - }) -> - case emqx_session:deliver(ClientInfo, - emqx_session:ignore_local(ClientInfo, Delivers, ClientId, Session), Session) of +handle_deliver( + Delivers, + Channel = #channel{ + session = Session, + takeover = false, + clientinfo = #{clientid := ClientId} = ClientInfo + } +) -> + case + emqx_session:deliver( + ClientInfo, + emqx_session:ignore_local(ClientInfo, Delivers, ClientId, Session), + Session + ) + of {ok, Publishes, NSession} -> NChannel = set_session(NSession, Channel), maybe_mark_as_delivered(NSession, Delivers), @@ -846,12 +976,13 @@ maybe_nack(Delivers) -> lists:filter(fun not_nacked/1, Delivers). not_nacked({deliver, _Topic, Msg}) -> - not (emqx_shared_sub:is_ack_required(Msg) - andalso (ok == emqx_shared_sub:nack_no_connection(Msg))). + not (emqx_shared_sub:is_ack_required(Msg) andalso + (ok == emqx_shared_sub:nack_no_connection(Msg))). maybe_mark_as_delivered(Session, Delivers) -> case emqx_session:info(is_persistent, Session) of - false -> skip; + false -> + skip; true -> SessionID = emqx_session:info(id, Session), emqx_persistent_session:mark_as_delivered(SessionID, Delivers) @@ -861,81 +992,77 @@ maybe_mark_as_delivered(Session, Delivers) -> %% Handle outgoing packet %%-------------------------------------------------------------------- --spec(handle_out(atom(), term(), channel()) - -> {ok, channel()} - | {ok, replies(), channel()} - | {shutdown, Reason :: term(), channel()} - | {shutdown, Reason :: term(), replies(), channel()}). +-spec handle_out(atom(), term(), channel()) -> + {ok, channel()} + | {ok, replies(), channel()} + | {shutdown, Reason :: term(), channel()} + | {shutdown, Reason :: term(), replies(), channel()}. handle_out(connack, {?RC_SUCCESS, SP, Props}, Channel = #channel{conninfo = ConnInfo}) -> - AckProps = run_fold([fun enrich_connack_caps/2, - fun enrich_server_keepalive/2, - fun enrich_response_information/2, - fun enrich_assigned_clientid/2 - ], Props, Channel), - NAckProps = run_hooks('client.connack', - [ConnInfo, emqx_reason_codes:name(?RC_SUCCESS)], - AckProps - ), - - return_connack(?CONNACK_PACKET(?RC_SUCCESS, SP, NAckProps), - ensure_keepalive(NAckProps, Channel)); + AckProps = run_fold( + [ + fun enrich_connack_caps/2, + fun enrich_server_keepalive/2, + fun enrich_response_information/2, + fun enrich_assigned_clientid/2 + ], + Props, + Channel + ), + NAckProps = run_hooks( + 'client.connack', + [ConnInfo, emqx_reason_codes:name(?RC_SUCCESS)], + AckProps + ), + return_connack( + ?CONNACK_PACKET(?RC_SUCCESS, SP, NAckProps), + ensure_keepalive(NAckProps, Channel) + ); handle_out(connack, ReasonCode, Channel = #channel{conninfo = ConnInfo}) -> Reason = emqx_reason_codes:name(ReasonCode), AckProps = run_hooks('client.connack', [ConnInfo, Reason], emqx_mqtt_props:new()), - AckPacket = ?CONNACK_PACKET(case maps:get(proto_ver, ConnInfo) of - ?MQTT_PROTO_V5 -> ReasonCode; - _ -> emqx_reason_codes:compat(connack, ReasonCode) - end, sp(false), AckProps), + AckPacket = ?CONNACK_PACKET( + case maps:get(proto_ver, ConnInfo) of + ?MQTT_PROTO_V5 -> ReasonCode; + _ -> emqx_reason_codes:compat(connack, ReasonCode) + end, + sp(false), + AckProps + ), shutdown(Reason, AckPacket, Channel); - %% Optimize? handle_out(publish, [], Channel) -> {ok, Channel}; - handle_out(publish, Publishes, Channel) -> {Packets, NChannel} = do_deliver(Publishes, Channel), {ok, {outgoing, Packets}, NChannel}; - handle_out(puback, {PacketId, ReasonCode}, Channel) -> {ok, ?PUBACK_PACKET(PacketId, ReasonCode), Channel}; - handle_out(pubrec, {PacketId, ReasonCode}, Channel) -> {ok, ?PUBREC_PACKET(PacketId, ReasonCode), Channel}; - handle_out(pubrel, {PacketId, ReasonCode}, Channel) -> {ok, ?PUBREL_PACKET(PacketId, ReasonCode), Channel}; - handle_out(pubcomp, {PacketId, ReasonCode}, Channel) -> {ok, ?PUBCOMP_PACKET(PacketId, ReasonCode), Channel}; - handle_out(suback, {PacketId, ReasonCodes}, Channel = ?IS_MQTT_V5) -> return_sub_unsub_ack(?SUBACK_PACKET(PacketId, ReasonCodes), Channel); - handle_out(suback, {PacketId, ReasonCodes}, Channel) -> ReasonCodes1 = [emqx_reason_codes:compat(suback, RC) || RC <- ReasonCodes], return_sub_unsub_ack(?SUBACK_PACKET(PacketId, ReasonCodes1), Channel); - handle_out(unsuback, {PacketId, ReasonCodes}, Channel = ?IS_MQTT_V5) -> return_sub_unsub_ack(?UNSUBACK_PACKET(PacketId, ReasonCodes), Channel); - handle_out(unsuback, {PacketId, _ReasonCodes}, Channel) -> return_sub_unsub_ack(?UNSUBACK_PACKET(PacketId), Channel); - handle_out(disconnect, ReasonCode, Channel) when is_integer(ReasonCode) -> ReasonName = disconnect_reason(ReasonCode), handle_out(disconnect, {ReasonCode, ReasonName}, Channel); - handle_out(disconnect, {ReasonCode, ReasonName}, Channel = ?IS_MQTT_V5) -> Packet = ?DISCONNECT_PACKET(ReasonCode), {ok, [{outgoing, Packet}, {close, ReasonName}], Channel}; - handle_out(disconnect, {_ReasonCode, ReasonName}, Channel) -> {ok, {close, ReasonName}, Channel}; - handle_out(auth, {ReasonCode, Properties}, Channel) -> {ok, ?AUTH_PACKET(ReasonCode, Properties), Channel}; - handle_out(Type, Data, Channel) -> ?SLOG(error, #{msg => "unexpected_outgoing", type => Type, data => Data}), {ok, Channel}. @@ -947,11 +1074,13 @@ handle_out(Type, Data, Channel) -> return_connack(AckPacket, Channel) -> Replies = [{event, connected}, {connack, AckPacket}], case maybe_resume_session(Channel) of - ignore -> {ok, Replies, Channel}; + ignore -> + {ok, Replies, Channel}; {ok, Publishes, NSession} -> - NChannel0 = Channel#channel{resuming = false, - pendings = [] - }, + NChannel0 = Channel#channel{ + resuming = false, + pendings = [] + }, NChannel1 = set_session(NSession, NChannel0), {Packets, NChannel2} = do_deliver(Publishes, NChannel1), Outgoing = [{outgoing, Packets} || length(Packets) > 0], @@ -965,28 +1094,36 @@ return_connack(AckPacket, Channel) -> %% return list(emqx_types:packet()) do_deliver({pubrel, PacketId}, Channel) -> {[?PUBREL_PACKET(PacketId, ?RC_SUCCESS)], Channel}; - -do_deliver({PacketId, Msg}, Channel = #channel{clientinfo = ClientInfo = - #{mountpoint := MountPoint}}) -> +do_deliver( + {PacketId, Msg}, + Channel = #channel{ + clientinfo = + ClientInfo = + #{mountpoint := MountPoint} + } +) -> ok = emqx_metrics:inc('messages.delivered'), - Msg1 = emqx_hooks:run_fold('message.delivered', - [ClientInfo], - emqx_message:update_expiry(Msg) - ), + Msg1 = emqx_hooks:run_fold( + 'message.delivered', + [ClientInfo], + emqx_message:update_expiry(Msg) + ), Msg2 = emqx_mountpoint:unmount(MountPoint, Msg1), Packet = emqx_message:to_packet(PacketId, Msg2), {NPacket, NChannel} = packing_alias(Packet, Channel), {[NPacket], NChannel}; - do_deliver([Publish], Channel) -> do_deliver(Publish, Channel); - do_deliver(Publishes, Channel) when is_list(Publishes) -> {Packets, NChannel} = - lists:foldl(fun(Publish, {Acc, Chann}) -> - {Packets, NChann} = do_deliver(Publish, Chann), - {Packets ++ Acc, NChann} - end, {[], Channel}, Publishes), + lists:foldl( + fun(Publish, {Acc, Chann}) -> + {Packets, NChann} = do_deliver(Publish, Chann), + {Packets ++ Acc, NChann} + end, + {[], Channel}, + Publishes + ), {lists:reverse(Packets), NChannel}. %%-------------------------------------------------------------------- @@ -1000,38 +1137,42 @@ return_sub_unsub_ack(Packet, Channel) -> %% Handle call %%-------------------------------------------------------------------- --spec(handle_call(Req :: term(), channel()) - -> {reply, Reply :: term(), channel()} - | {shutdown, Reason :: term(), Reply :: term(), channel()} - | {shutdown, Reason :: term(), Reply :: term(), emqx_types:packet(), channel()}). +-spec handle_call(Req :: term(), channel()) -> + {reply, Reply :: term(), channel()} + | {shutdown, Reason :: term(), Reply :: term(), channel()} + | {shutdown, Reason :: term(), Reply :: term(), emqx_types:packet(), channel()}. handle_call(kick, Channel) -> Channel1 = ensure_disconnected(kicked, Channel), disconnect_and_shutdown(kicked, ok, Channel1); - handle_call(discard, Channel) -> disconnect_and_shutdown(discarded, ok, Channel); - %% Session Takeover handle_call({takeover, 'begin'}, Channel = #channel{session = Session}) -> reply(Session, Channel#channel{takeover = true}); - -handle_call({takeover, 'end'}, Channel = #channel{session = Session, - pendings = Pendings}) -> +handle_call( + {takeover, 'end'}, + Channel = #channel{ + session = Session, + pendings = Pendings + } +) -> ok = emqx_session:takeover(Session), %% TODO: Should not drain deliver here (side effect) Delivers = emqx_misc:drain_deliver(), AllPendings = lists:append(Delivers, Pendings), disconnect_and_shutdown(takenover, AllPendings, Channel); - handle_call(list_authz_cache, Channel) -> {reply, emqx_authz_cache:list_authz_cache(), Channel}; - handle_call({quota, Bucket}, #channel{quota = Quota} = Channel) -> Quota2 = emqx_limiter_container:update_by_name(message_routing, Bucket, Quota), reply(ok, Channel#channel{quota = Quota2}); - -handle_call({keepalive, Interval}, Channel = #channel{keepalive = KeepAlive, - conninfo = ConnInfo}) -> +handle_call( + {keepalive, Interval}, + Channel = #channel{ + keepalive = KeepAlive, + conninfo = ConnInfo + } +) -> ClientId = info(clientid, Channel), NKeepalive = emqx_keepalive:set(interval, Interval * 1000, KeepAlive), NConnInfo = maps:put(keepalive, Interval, ConnInfo), @@ -1040,7 +1181,6 @@ handle_call({keepalive, Interval}, Channel = #channel{keepalive = KeepAlive, ChanInfo1 = info(NChannel), emqx_cm:set_chan_info(ClientId, ChanInfo1#{sockinfo => SockInfo}), reply(ok, reset_timer(alive_timer, NChannel)); - handle_call(Req, Channel) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), reply(ignored, Channel). @@ -1049,51 +1189,52 @@ handle_call(Req, Channel) -> %% Handle Info %%-------------------------------------------------------------------- --spec(handle_info(Info :: term(), channel()) - -> ok | {ok, channel()} | {shutdown, Reason :: term(), channel()}). +-spec handle_info(Info :: term(), channel()) -> + ok | {ok, channel()} | {shutdown, Reason :: term(), channel()}. -handle_info({subscribe, TopicFilters}, Channel ) -> +handle_info({subscribe, TopicFilters}, Channel) -> {_, NChannel} = lists:foldl( fun({TopicFilter, SubOpts}, {_, ChannelAcc}) -> do_subscribe(TopicFilter, SubOpts, ChannelAcc) - end, {[], Channel}, parse_topic_filters(TopicFilters)), + end, + {[], Channel}, + parse_topic_filters(TopicFilters) + ), {ok, NChannel}; - handle_info({unsubscribe, TopicFilters}, Channel) -> {_RC, NChannel} = process_unsubscribe(TopicFilters, #{}, Channel), {ok, NChannel}; - handle_info({sock_closed, Reason}, Channel = #channel{conn_state = idle}) -> shutdown(Reason, Channel); - handle_info({sock_closed, Reason}, Channel = #channel{conn_state = connecting}) -> shutdown(Reason, Channel); - -handle_info({sock_closed, Reason}, Channel = - #channel{conn_state = ConnState, - clientinfo = ClientInfo = #{zone := Zone}}) - when ConnState =:= connected orelse ConnState =:= reauthenticating -> - emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) - andalso emqx_flapping:detect(ClientInfo), +handle_info( + {sock_closed, Reason}, + Channel = + #channel{ + conn_state = ConnState, + clientinfo = ClientInfo = #{zone := Zone} + } +) when + ConnState =:= connected orelse ConnState =:= reauthenticating +-> + emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) andalso + emqx_flapping:detect(ClientInfo), Channel1 = ensure_disconnected(Reason, mabye_publish_will_msg(Channel)), case maybe_shutdown(Reason, Channel1) of {ok, Channel2} -> {ok, {event, disconnected}, Channel2}; Shutdown -> Shutdown end; - handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) -> ?SLOG(error, #{msg => "unexpected_sock_close", reason => Reason}), {ok, Channel}; - handle_info(clean_authz_cache, Channel) -> ok = emqx_authz_cache:empty_authz_cache(), {ok, Channel}; - handle_info(die_if_test = Info, Channel) -> die_if_test_compiled(), ?SLOG(error, #{msg => "unexpected_info", info => Info}), {ok, Channel}; - handle_info(Info, Channel) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {ok, Channel}. @@ -1115,18 +1256,27 @@ die_if_test_compiled() -> %% Handle timeout %%-------------------------------------------------------------------- --spec(handle_timeout(reference(), Msg :: term(), channel()) - -> {ok, channel()} - | {ok, replies(), channel()} - | {shutdown, Reason :: term(), channel()}). -handle_timeout(_TRef, {keepalive, _StatVal}, - Channel = #channel{keepalive = undefined}) -> +-spec handle_timeout(reference(), Msg :: term(), channel()) -> + {ok, channel()} + | {ok, replies(), channel()} + | {shutdown, Reason :: term(), channel()}. +handle_timeout( + _TRef, + {keepalive, _StatVal}, + Channel = #channel{keepalive = undefined} +) -> {ok, Channel}; -handle_timeout(_TRef, {keepalive, _StatVal}, - Channel = #channel{conn_state = disconnected}) -> +handle_timeout( + _TRef, + {keepalive, _StatVal}, + Channel = #channel{conn_state = disconnected} +) -> {ok, Channel}; -handle_timeout(_TRef, {keepalive, StatVal}, - Channel = #channel{keepalive = Keepalive}) -> +handle_timeout( + _TRef, + {keepalive, StatVal}, + Channel = #channel{keepalive = Keepalive} +) -> case emqx_keepalive:check(StatVal, Keepalive) of {ok, NKeepalive} -> NChannel = Channel#channel{keepalive = NKeepalive}, @@ -1134,12 +1284,17 @@ handle_timeout(_TRef, {keepalive, StatVal}, {error, timeout} -> handle_out(disconnect, ?RC_KEEP_ALIVE_TIMEOUT, Channel) end; - -handle_timeout(_TRef, retry_delivery, - Channel = #channel{conn_state = disconnected}) -> +handle_timeout( + _TRef, + retry_delivery, + Channel = #channel{conn_state = disconnected} +) -> {ok, Channel}; -handle_timeout(_TRef, retry_delivery, - Channel = #channel{session = Session, clientinfo = ClientInfo}) -> +handle_timeout( + _TRef, + retry_delivery, + Channel = #channel{session = Session, clientinfo = ClientInfo} +) -> case emqx_session:retry(ClientInfo, Session) of {ok, NSession} -> {ok, clean_timer(retry_timer, set_session(NSession, Channel))}; @@ -1147,28 +1302,33 @@ handle_timeout(_TRef, retry_delivery, NChannel = set_session(NSession, Channel), handle_out(publish, Publishes, reset_timer(retry_timer, Timeout, NChannel)) end; - -handle_timeout(_TRef, expire_awaiting_rel, - Channel = #channel{conn_state = disconnected}) -> +handle_timeout( + _TRef, + expire_awaiting_rel, + Channel = #channel{conn_state = disconnected} +) -> {ok, Channel}; -handle_timeout(_TRef, expire_awaiting_rel, - Channel = #channel{session = Session, clientinfo = ClientInfo}) -> +handle_timeout( + _TRef, + expire_awaiting_rel, + Channel = #channel{session = Session, clientinfo = ClientInfo} +) -> case emqx_session:expire(ClientInfo, awaiting_rel, Session) of {ok, NSession} -> {ok, clean_timer(await_timer, set_session(NSession, Channel))}; {ok, Timeout, NSession} -> {ok, reset_timer(await_timer, Timeout, set_session(NSession, Channel))} end; - handle_timeout(_TRef, expire_session, Channel) -> shutdown(expired, Channel); - handle_timeout(_TRef, will_message, Channel = #channel{will_msg = WillMsg}) -> (WillMsg =/= undefined) andalso publish_will_msg(WillMsg), {ok, clean_timer(will_timer, Channel#channel{will_msg = undefined})}; - -handle_timeout(_TRef, expire_quota_limit, - #channel{quota = Quota} = Channel) -> +handle_timeout( + _TRef, + expire_quota_limit, + #channel{quota = Quota} = Channel +) -> case emqx_limiter_container:retry(?LIMITER_ROUTING, Quota) of {_, Intv, Quota2} -> Channel2 = ensure_timer(quota_timer, Intv, Channel#channel{quota = Quota2}), @@ -1176,7 +1336,6 @@ handle_timeout(_TRef, expire_quota_limit, {_, Quota2} -> {ok, clean_timer(quota_timer, Channel#channel{quota = Quota2})} end; - handle_timeout(_TRef, Msg, Channel) -> ?SLOG(error, #{msg => "unexpected_timeout", timeout_msg => Msg}), {ok, Channel}. @@ -1189,13 +1348,13 @@ ensure_timer([Name], Channel) -> ensure_timer(Name, Channel); ensure_timer([Name | Rest], Channel) -> ensure_timer(Rest, ensure_timer(Name, Channel)); - ensure_timer(Name, Channel = #channel{timers = Timers}) -> TRef = maps:get(Name, Timers, undefined), Time = interval(Name, Channel), case TRef == undefined andalso Time > 0 of - true -> ensure_timer(Name, Time, Channel); - false -> Channel %% Timer disabled or exists + true -> ensure_timer(Name, Time, Channel); + %% Timer disabled or exists + false -> Channel end. ensure_timer(Name, Time, Channel = #channel{timers = Timers}) -> @@ -1227,14 +1386,17 @@ interval(will_timer, #channel{will_msg = WillMsg}) -> %% Terminate %%-------------------------------------------------------------------- --spec(terminate(any(), channel()) -> ok). -terminate(_, #channel{conn_state = idle}) -> ok; +-spec terminate(any(), channel()) -> ok. +terminate(_, #channel{conn_state = idle}) -> + ok; terminate(normal, Channel) -> run_terminate_hook(normal, Channel); terminate({shutdown, kicked}, Channel) -> run_terminate_hook(kicked, Channel); -terminate({shutdown, Reason}, Channel) when Reason =:= discarded; - Reason =:= takenover -> +terminate({shutdown, Reason}, Channel) when + Reason =:= discarded; + Reason =:= takenover +-> run_terminate_hook(Reason, Channel); terminate(Reason, Channel = #channel{will_msg = WillMsg}) -> (WillMsg =/= undefined) andalso publish_will_msg(WillMsg), @@ -1244,15 +1406,18 @@ terminate(Reason, Channel = #channel{will_msg = WillMsg}) -> persist_if_session(#channel{session = Session} = Channel) -> case emqx_session:is_session(Session) of true -> - _ = emqx_persistent_session:persist(Channel#channel.clientinfo, - Channel#channel.conninfo, - Channel#channel.session), + _ = emqx_persistent_session:persist( + Channel#channel.clientinfo, + Channel#channel.conninfo, + Channel#channel.session + ), ok; false -> ok end. -run_terminate_hook(_Reason, #channel{session = undefined}) -> ok; +run_terminate_hook(_Reason, #channel{session = undefined}) -> + ok; run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session}) -> emqx_session:terminate(ClientInfo, Reason, Session). @@ -1266,34 +1431,40 @@ overload_protection(_, #channel{clientinfo = #{zone := Zone}}) -> %%-------------------------------------------------------------------- %% Enrich MQTT Connect Info -enrich_conninfo(ConnPkt = #mqtt_packet_connect{ - proto_name = ProtoName, - proto_ver = ProtoVer, - clean_start = CleanStart, - keepalive = Keepalive, - properties = ConnProps, - clientid = ClientId, - username = Username - }, - Channel = #channel{conninfo = ConnInfo, - clientinfo = #{zone := Zone} - }) -> +enrich_conninfo( + ConnPkt = #mqtt_packet_connect{ + proto_name = ProtoName, + proto_ver = ProtoVer, + clean_start = CleanStart, + keepalive = Keepalive, + properties = ConnProps, + clientid = ClientId, + username = Username + }, + Channel = #channel{ + conninfo = ConnInfo, + clientinfo = #{zone := Zone} + } +) -> ExpiryInterval = expiry_interval(Zone, ConnPkt), - NConnInfo = ConnInfo#{proto_name => ProtoName, - proto_ver => ProtoVer, - clean_start => CleanStart, - keepalive => Keepalive, - clientid => ClientId, - username => Username, - conn_props => ConnProps, - expiry_interval => ExpiryInterval, - receive_maximum => receive_maximum(Zone, ConnProps) - }, + NConnInfo = ConnInfo#{ + proto_name => ProtoName, + proto_ver => ProtoVer, + clean_start => CleanStart, + keepalive => Keepalive, + clientid => ClientId, + username => Username, + conn_props => ConnProps, + expiry_interval => ExpiryInterval, + receive_maximum => receive_maximum(Zone, ConnProps) + }, {ok, Channel#channel{conninfo = NConnInfo}}. %% If the Session Expiry Interval is absent the value 0 is used. -expiry_interval(_, #mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V5, - properties = ConnProps}) -> +expiry_interval(_, #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V5, + properties = ConnProps +}) -> timer:seconds(emqx_mqtt_props:get('Session-Expiry-Interval', ConnProps, 0)); expiry_interval(Zone, #mqtt_packet_connect{clean_start = false}) -> get_mqtt_conf(Zone, session_expiry_interval); @@ -1301,10 +1472,11 @@ expiry_interval(_, #mqtt_packet_connect{clean_start = true}) -> 0. receive_maximum(Zone, ConnProps) -> - MaxInflightConfig = case get_mqtt_conf(Zone, max_inflight) of - 0 -> ?RECEIVE_MAXIMUM_LIMIT; - N -> N - end, + MaxInflightConfig = + case get_mqtt_conf(Zone, max_inflight) of + 0 -> ?RECEIVE_MAXIMUM_LIMIT; + N -> N + end, %% Received might be zero which should be a protocol error %% we do not validate MQTT properties here %% it is to be caught later @@ -1331,34 +1503,49 @@ check_connect(ConnPkt, #channel{clientinfo = #{zone := Zone}}) -> %% Enrich Client Info enrich_client(ConnPkt, Channel = #channel{clientinfo = ClientInfo}) -> - {ok, NConnPkt, NClientInfo} = pipeline([fun set_username/2, - fun set_bridge_mode/2, - fun maybe_username_as_clientid/2, - fun maybe_assign_clientid/2, - fun fix_mountpoint/2 - ], ConnPkt, ClientInfo), + {ok, NConnPkt, NClientInfo} = pipeline( + [ + fun set_username/2, + fun set_bridge_mode/2, + fun maybe_username_as_clientid/2, + fun maybe_assign_clientid/2, + fun fix_mountpoint/2 + ], + ConnPkt, + ClientInfo + ), {ok, NConnPkt, Channel#channel{clientinfo = NClientInfo}}. -set_username(#mqtt_packet_connect{username = Username}, - ClientInfo = #{username := undefined}) -> +set_username( + #mqtt_packet_connect{username = Username}, + ClientInfo = #{username := undefined} +) -> {ok, ClientInfo#{username => Username}}; -set_username(_ConnPkt, ClientInfo) -> {ok, ClientInfo}. +set_username(_ConnPkt, ClientInfo) -> + {ok, ClientInfo}. set_bridge_mode(#mqtt_packet_connect{is_bridge = true}, ClientInfo) -> {ok, ClientInfo#{is_bridge => true}}; -set_bridge_mode(_ConnPkt, _ClientInfo) -> ok. +set_bridge_mode(_ConnPkt, _ClientInfo) -> + ok. maybe_username_as_clientid(_ConnPkt, ClientInfo = #{username := undefined}) -> {ok, ClientInfo}; -maybe_username_as_clientid(_ConnPkt, ClientInfo = #{zone := Zone, - username := Username}) -> +maybe_username_as_clientid( + _ConnPkt, + ClientInfo = #{ + zone := Zone, + username := Username + } +) -> case get_mqtt_conf(Zone, use_username_as_clientid) of - true -> {ok, ClientInfo#{clientid => Username}}; + true -> {ok, ClientInfo#{clientid => Username}}; false -> ok end. -maybe_assign_clientid(_ConnPkt, ClientInfo = #{clientid := ClientId}) - when ClientId /= undefined -> +maybe_assign_clientid(_ConnPkt, ClientInfo = #{clientid := ClientId}) when + ClientId /= undefined +-> {ok, ClientInfo}; maybe_assign_clientid(#mqtt_packet_connect{clientid = <<>>}, ClientInfo) -> %% Generate a rand clientId @@ -1366,7 +1553,8 @@ maybe_assign_clientid(#mqtt_packet_connect{clientid = <<>>}, ClientInfo) -> maybe_assign_clientid(#mqtt_packet_connect{clientid = ClientId}, ClientInfo) -> {ok, ClientInfo#{clientid => ClientId}}. -fix_mountpoint(_ConnPkt, #{mountpoint := undefined}) -> ok; +fix_mountpoint(_ConnPkt, #{mountpoint := undefined}) -> + ok; fix_mountpoint(_ConnPkt, ClientInfo = #{mountpoint := MountPoint}) -> MountPoint1 = emqx_mountpoint:replvar(MountPoint, ClientInfo), {ok, ClientInfo#{mountpoint := MountPoint1}}. @@ -1382,65 +1570,87 @@ set_log_meta(_ConnPkt, #channel{clientinfo = #{clientid := ClientId}}) -> check_banned(_ConnPkt, #channel{clientinfo = ClientInfo}) -> case emqx_banned:check(ClientInfo) of - true -> {error, ?RC_BANNED}; + true -> {error, ?RC_BANNED}; false -> ok end. %%-------------------------------------------------------------------- %% Authenticate -authenticate(?CONNECT_PACKET( - #mqtt_packet_connect{ - proto_ver = ?MQTT_PROTO_V5, - properties = #{'Authentication-Method' := AuthMethod} = Properties}), - #channel{clientinfo = ClientInfo, - auth_cache = AuthCache} = Channel) -> +authenticate( + ?CONNECT_PACKET( + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V5, + properties = #{'Authentication-Method' := AuthMethod} = Properties + } + ), + #channel{ + clientinfo = ClientInfo, + auth_cache = AuthCache + } = Channel +) -> AuthData = emqx_mqtt_props:get('Authentication-Data', Properties, undefined), - do_authenticate(ClientInfo#{auth_method => AuthMethod, - auth_data => AuthData, - auth_cache => AuthCache}, Channel); - -authenticate(?CONNECT_PACKET(#mqtt_packet_connect{password = Password}), - #channel{clientinfo = ClientInfo} = Channel) -> + do_authenticate( + ClientInfo#{ + auth_method => AuthMethod, + auth_data => AuthData, + auth_cache => AuthCache + }, + Channel + ); +authenticate( + ?CONNECT_PACKET(#mqtt_packet_connect{password = Password}), + #channel{clientinfo = ClientInfo} = Channel +) -> do_authenticate(ClientInfo#{password => Password}, Channel); - -authenticate(?AUTH_PACKET(_, #{'Authentication-Method' := AuthMethod} = Properties), - #channel{clientinfo = ClientInfo, - conninfo = #{conn_props := ConnProps}, - auth_cache = AuthCache} = Channel) -> +authenticate( + ?AUTH_PACKET(_, #{'Authentication-Method' := AuthMethod} = Properties), + #channel{ + clientinfo = ClientInfo, + conninfo = #{conn_props := ConnProps}, + auth_cache = AuthCache + } = Channel +) -> case emqx_mqtt_props:get('Authentication-Method', ConnProps, undefined) of AuthMethod -> AuthData = emqx_mqtt_props:get('Authentication-Data', Properties, undefined), - do_authenticate(ClientInfo#{auth_method => AuthMethod, - auth_data => AuthData, - auth_cache => AuthCache}, Channel); + do_authenticate( + ClientInfo#{ + auth_method => AuthMethod, + auth_data => AuthData, + auth_cache => AuthCache + }, + Channel + ); _ -> {error, ?RC_BAD_AUTHENTICATION_METHOD} end. -do_authenticate(#{auth_method := AuthMethod} = Credential, - #channel{clientinfo = ClientInfo} = Channel) -> +do_authenticate( + #{auth_method := AuthMethod} = Credential, + #channel{clientinfo = ClientInfo} = Channel +) -> Properties = #{'Authentication-Method' => AuthMethod}, case emqx_access_control:authenticate(Credential) of {ok, Result} -> - {ok, Properties, - Channel#channel{ - clientinfo = ClientInfo#{is_superuser => maps:get(is_superuser, Result, false)}, - auth_cache = #{}}}; + {ok, Properties, Channel#channel{ + clientinfo = ClientInfo#{is_superuser => maps:get(is_superuser, Result, false)}, + auth_cache = #{} + }}; {ok, Result, AuthData} -> - {ok, Properties#{'Authentication-Data' => AuthData}, - Channel#channel{ - clientinfo = ClientInfo#{is_superuser => maps:get(is_superuser, Result, false)}, - auth_cache = #{}}}; + {ok, Properties#{'Authentication-Data' => AuthData}, Channel#channel{ + clientinfo = ClientInfo#{is_superuser => maps:get(is_superuser, Result, false)}, + auth_cache = #{} + }}; {continue, AuthCache} -> {continue, Properties, Channel#channel{auth_cache = AuthCache}}; {continue, AuthData, AuthCache} -> - {continue, Properties#{'Authentication-Data' => AuthData}, - Channel#channel{auth_cache = AuthCache}}; + {continue, Properties#{'Authentication-Data' => AuthData}, Channel#channel{ + auth_cache = AuthCache + }}; {error, Reason} -> {error, emqx_reason_codes:connack_error(Reason)} end; - do_authenticate(Credential, #channel{clientinfo = ClientInfo} = Channel) -> case emqx_access_control:authenticate(Credential) of {ok, #{is_superuser := IsSuperuser}} -> @@ -1452,65 +1662,82 @@ do_authenticate(Credential, #channel{clientinfo = ClientInfo} = Channel) -> %%-------------------------------------------------------------------- %% Process Topic Alias -process_alias(Packet = #mqtt_packet{ - variable = #mqtt_packet_publish{topic_name = <<>>, - properties = #{'Topic-Alias' := AliasId} - } = Publish - }, - Channel = ?IS_MQTT_V5 = #channel{topic_aliases = TopicAliases}) -> +process_alias( + Packet = #mqtt_packet{ + variable = + #mqtt_packet_publish{ + topic_name = <<>>, + properties = #{'Topic-Alias' := AliasId} + } = Publish + }, + Channel = ?IS_MQTT_V5 = #channel{topic_aliases = TopicAliases} +) -> case find_alias(inbound, AliasId, TopicAliases) of {ok, Topic} -> NPublish = Publish#mqtt_packet_publish{topic_name = Topic}, {ok, Packet#mqtt_packet{variable = NPublish}, Channel}; - error -> {error, ?RC_PROTOCOL_ERROR} + error -> + {error, ?RC_PROTOCOL_ERROR} end; - -process_alias(#mqtt_packet{ - variable = #mqtt_packet_publish{topic_name = Topic, - properties = #{'Topic-Alias' := AliasId} - } - }, - Channel = ?IS_MQTT_V5 = #channel{topic_aliases = TopicAliases}) -> +process_alias( + #mqtt_packet{ + variable = #mqtt_packet_publish{ + topic_name = Topic, + properties = #{'Topic-Alias' := AliasId} + } + }, + Channel = ?IS_MQTT_V5 = #channel{topic_aliases = TopicAliases} +) -> NTopicAliases = save_alias(inbound, AliasId, Topic, TopicAliases), {ok, Channel#channel{topic_aliases = NTopicAliases}}; - -process_alias(_Packet, Channel) -> {ok, Channel}. +process_alias(_Packet, Channel) -> + {ok, Channel}. %%-------------------------------------------------------------------- %% Packing Topic Alias -packing_alias(Packet = #mqtt_packet{ - variable = #mqtt_packet_publish{ - topic_name = Topic, - properties = Prop - } = Publish - }, - Channel = ?IS_MQTT_V5 = #channel{topic_aliases = TopicAliases, - alias_maximum = Limits}) -> +packing_alias( + Packet = #mqtt_packet{ + variable = + #mqtt_packet_publish{ + topic_name = Topic, + properties = Prop + } = Publish + }, + Channel = + ?IS_MQTT_V5 = #channel{ + topic_aliases = TopicAliases, + alias_maximum = Limits + } +) -> case find_alias(outbound, Topic, TopicAliases) of {ok, AliasId} -> NPublish = Publish#mqtt_packet_publish{ - topic_name = <<>>, - properties = maps:merge(Prop, #{'Topic-Alias' => AliasId}) - }, + topic_name = <<>>, + properties = maps:merge(Prop, #{'Topic-Alias' => AliasId}) + }, {Packet#mqtt_packet{variable = NPublish}, Channel}; error -> #{outbound := Aliases} = TopicAliases, AliasId = maps:size(Aliases) + 1, - case (Limits =:= undefined) orelse - (AliasId =< maps:get(outbound, Limits, 0)) of + case + (Limits =:= undefined) orelse + (AliasId =< maps:get(outbound, Limits, 0)) + of true -> NTopicAliases = save_alias(outbound, AliasId, Topic, TopicAliases), NChannel = Channel#channel{topic_aliases = NTopicAliases}, NPublish = Publish#mqtt_packet_publish{ - topic_name = Topic, - properties = maps:merge(Prop, #{'Topic-Alias' => AliasId}) - }, + topic_name = Topic, + properties = maps:merge(Prop, #{'Topic-Alias' => AliasId}) + }, {Packet#mqtt_packet{variable = NPublish}, NChannel}; - false -> {Packet, Channel} + false -> + {Packet, Channel} end end; -packing_alias(Packet, Channel) -> {Packet, Channel}. +packing_alias(Packet, Channel) -> + {Packet, Channel}. %%-------------------------------------------------------------------- %% Check quota state @@ -1524,37 +1751,49 @@ check_quota_exceeded(_, #channel{timers = Timers}) -> %%-------------------------------------------------------------------- %% Check Pub Alias -check_pub_alias(#mqtt_packet{ - variable = #mqtt_packet_publish{ - properties = #{'Topic-Alias' := AliasId} - } - }, - #channel{alias_maximum = Limits}) -> - case (Limits =:= undefined) orelse - (AliasId =< maps:get(inbound, Limits, ?MAX_TOPIC_AlIAS)) of - true -> ok; +check_pub_alias( + #mqtt_packet{ + variable = #mqtt_packet_publish{ + properties = #{'Topic-Alias' := AliasId} + } + }, + #channel{alias_maximum = Limits} +) -> + case + (Limits =:= undefined) orelse + (AliasId =< maps:get(inbound, Limits, ?MAX_TOPIC_AlIAS)) + of + true -> ok; false -> {error, ?RC_TOPIC_ALIAS_INVALID} end; -check_pub_alias(_Packet, _Channel) -> ok. +check_pub_alias(_Packet, _Channel) -> + ok. %%-------------------------------------------------------------------- %% Check Pub Authorization -check_pub_authz(#mqtt_packet{variable = #mqtt_packet_publish{topic_name = Topic}}, - #channel{clientinfo = ClientInfo}) -> +check_pub_authz( + #mqtt_packet{variable = #mqtt_packet_publish{topic_name = Topic}}, + #channel{clientinfo = ClientInfo} +) -> case emqx_access_control:authorize(ClientInfo, publish, Topic) of allow -> ok; - deny -> {error, ?RC_NOT_AUTHORIZED} + deny -> {error, ?RC_NOT_AUTHORIZED} end. %%-------------------------------------------------------------------- %% Check Pub Caps -check_pub_caps(#mqtt_packet{header = #mqtt_packet_header{qos = QoS, - retain = Retain}, - variable = #mqtt_packet_publish{topic_name = Topic} - }, - #channel{clientinfo = #{zone := Zone}}) -> +check_pub_caps( + #mqtt_packet{ + header = #mqtt_packet_header{ + qos = QoS, + retain = Retain + }, + variable = #mqtt_packet_publish{topic_name = Topic} + }, + #channel{clientinfo = #{zone := Zone}} +) -> emqx_mqtt_caps:check_pub(Zone, #{qos => QoS, retain => Retain, topic => Topic}). %%-------------------------------------------------------------------- @@ -1563,14 +1802,16 @@ check_pub_caps(#mqtt_packet{header = #mqtt_packet_header{qos = QoS, check_sub_authzs(TopicFilters, Channel) -> check_sub_authzs(TopicFilters, Channel, []). -check_sub_authzs([ TopicFilter = {Topic, _} | More], - Channel = #channel{clientinfo = ClientInfo}, - Acc) -> +check_sub_authzs( + [TopicFilter = {Topic, _} | More], + Channel = #channel{clientinfo = ClientInfo}, + Acc +) -> case emqx_access_control:authorize(ClientInfo, subscribe, Topic) of allow -> - check_sub_authzs(More, Channel, [ {TopicFilter, 0} | Acc]); + check_sub_authzs(More, Channel, [{TopicFilter, 0} | Acc]); deny -> - check_sub_authzs(More, Channel, [ {TopicFilter, ?RC_NOT_AUTHORIZED} | Acc]) + check_sub_authzs(More, Channel, [{TopicFilter, ?RC_NOT_AUTHORIZED} | Acc]) end; check_sub_authzs([], _Channel, Acc) -> lists:reverse(Acc). @@ -1586,7 +1827,8 @@ check_sub_caps(TopicFilter, SubOpts, #channel{clientinfo = #{zone := Zone}}) -> put_subid_in_subopts(#{'Subscription-Identifier' := SubId}, TopicFilters) -> [{Topic, SubOpts#{subid => SubId}} || {Topic, SubOpts} <- TopicFilters]; -put_subid_in_subopts(_Properties, TopicFilters) -> TopicFilters. +put_subid_in_subopts(_Properties, TopicFilters) -> + TopicFilters. %%-------------------------------------------------------------------- %% Enrich SubOpts @@ -1600,22 +1842,30 @@ enrich_subopts(SubOpts, #channel{clientinfo = #{zone := Zone, is_bridge := IsBri %%-------------------------------------------------------------------- %% Enrich ConnAck Caps -enrich_connack_caps(AckProps, ?IS_MQTT_V5 = #channel{clientinfo = #{ - zone := Zone}}) -> - #{max_packet_size := MaxPktSize, - max_qos_allowed := MaxQoS, - retain_available := Retain, - max_topic_alias := MaxAlias, - shared_subscription := Shared, - wildcard_subscription := Wildcard - } = emqx_mqtt_caps:get_caps(Zone), - NAckProps = AckProps#{'Retain-Available' => flag(Retain), - 'Maximum-Packet-Size' => MaxPktSize, - 'Topic-Alias-Maximum' => MaxAlias, - 'Wildcard-Subscription-Available' => flag(Wildcard), - 'Subscription-Identifier-Available' => 1, - 'Shared-Subscription-Available' => flag(Shared) - }, +enrich_connack_caps( + AckProps, + ?IS_MQTT_V5 = #channel{ + clientinfo = #{ + zone := Zone + } + } +) -> + #{ + max_packet_size := MaxPktSize, + max_qos_allowed := MaxQoS, + retain_available := Retain, + max_topic_alias := MaxAlias, + shared_subscription := Shared, + wildcard_subscription := Wildcard + } = emqx_mqtt_caps:get_caps(Zone), + NAckProps = AckProps#{ + 'Retain-Available' => flag(Retain), + 'Maximum-Packet-Size' => MaxPktSize, + 'Topic-Alias-Maximum' => MaxAlias, + 'Wildcard-Subscription-Available' => flag(Wildcard), + 'Subscription-Identifier-Available' => 1, + 'Shared-Subscription-Available' => flag(Shared) + }, %% MQTT 5.0 - 3.2.2.3.4: %% It is a Protocol Error to include Maximum QoS more than once, %% or to have a value other than 0 or 1. If the Maximum QoS is absent, @@ -1624,8 +1874,8 @@ enrich_connack_caps(AckProps, ?IS_MQTT_V5 = #channel{clientinfo = #{ true -> NAckProps; _ -> NAckProps#{'Maximum-QoS' => MaxQoS} end; - -enrich_connack_caps(AckProps, _Channel) -> AckProps. +enrich_connack_caps(AckProps, _Channel) -> + AckProps. %%-------------------------------------------------------------------- %% Enrich server keepalive @@ -1635,55 +1885,76 @@ enrich_server_keepalive(AckProps, ?IS_MQTT_V5 = #channel{clientinfo = #{zone := disabled -> AckProps; Keepalive -> AckProps#{'Server-Keep-Alive' => Keepalive} end; - -enrich_server_keepalive(AckProps, _Channel) -> AckProps. +enrich_server_keepalive(AckProps, _Channel) -> + AckProps. %%-------------------------------------------------------------------- %% Enrich response information -enrich_response_information(AckProps, #channel{conninfo = #{conn_props := ConnProps}, - clientinfo = #{zone := Zone}}) -> +enrich_response_information(AckProps, #channel{ + conninfo = #{conn_props := ConnProps}, + clientinfo = #{zone := Zone} +}) -> case emqx_mqtt_props:get('Request-Response-Information', ConnProps, 0) of - 0 -> AckProps; - 1 -> AckProps#{'Response-Information' => - case get_mqtt_conf(Zone, response_information, "") of - "" -> undefined; - RspInfo -> RspInfo - end} + 0 -> + AckProps; + 1 -> + AckProps#{ + 'Response-Information' => + case get_mqtt_conf(Zone, response_information, "") of + "" -> undefined; + RspInfo -> RspInfo + end + } end. %%-------------------------------------------------------------------- %% Enrich Assigned ClientId -enrich_assigned_clientid(AckProps, #channel{conninfo = ConnInfo, - clientinfo = #{clientid := ClientId}}) -> +enrich_assigned_clientid(AckProps, #channel{ + conninfo = ConnInfo, + clientinfo = #{clientid := ClientId} +}) -> case maps:get(clientid, ConnInfo) of - <<>> -> %% Original ClientId is null. + %% Original ClientId is null. + <<>> -> AckProps#{'Assigned-Client-Identifier' => ClientId}; - _Origin -> AckProps + _Origin -> + AckProps end. %%-------------------------------------------------------------------- %% Ensure connected -ensure_connected(Channel = #channel{conninfo = ConnInfo, - clientinfo = ClientInfo}) -> +ensure_connected( + Channel = #channel{ + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> NConnInfo = ConnInfo#{connected_at => erlang:system_time(millisecond)}, ok = run_hooks('client.connected', [ClientInfo, NConnInfo]), - Channel#channel{conninfo = NConnInfo, - conn_state = connected - }. + Channel#channel{ + conninfo = NConnInfo, + conn_state = connected + }. %%-------------------------------------------------------------------- %% Init Alias Maximum -init_alias_maximum(#mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V5, - properties = Properties}, - #{zone := Zone} = _ClientInfo) -> - #{outbound => emqx_mqtt_props:get('Topic-Alias-Maximum', Properties, 0), - inbound => maps:get(max_topic_alias, emqx_mqtt_caps:get_caps(Zone)) - }; -init_alias_maximum(_ConnPkt, _ClientInfo) -> undefined. +init_alias_maximum( + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V5, + properties = Properties + }, + #{zone := Zone} = _ClientInfo +) -> + #{ + outbound => emqx_mqtt_props:get('Topic-Alias-Maximum', Properties, 0), + inbound => maps:get(max_topic_alias, emqx_mqtt_caps:get_caps(Zone)) + }; +init_alias_maximum(_ConnPkt, _ClientInfo) -> + undefined. %%-------------------------------------------------------------------- %% Ensure Keepalive @@ -1691,13 +1962,14 @@ init_alias_maximum(_ConnPkt, _ClientInfo) -> undefined. %% MQTT 5 ensure_keepalive(#{'Server-Keep-Alive' := Interval}, Channel = #channel{conninfo = ConnInfo}) -> ensure_keepalive_timer(Interval, Channel#channel{conninfo = ConnInfo#{keepalive => Interval}}); - %% MQTT 3,4 ensure_keepalive(_AckProps, Channel = #channel{conninfo = ConnInfo}) -> ensure_keepalive_timer(maps:get(keepalive, ConnInfo), Channel). -ensure_keepalive_timer(0, Channel) -> Channel; -ensure_keepalive_timer(disabled, Channel) -> Channel; +ensure_keepalive_timer(0, Channel) -> + Channel; +ensure_keepalive_timer(disabled, Channel) -> + Channel; ensure_keepalive_timer(Interval, Channel = #channel{clientinfo = #{zone := Zone}}) -> Backoff = get_mqtt_conf(Zone, keepalive_backoff), RecvOct = emqx_pd:get_counter(incoming_bytes), @@ -1717,10 +1989,12 @@ clear_keepalive(Channel = #channel{timers = Timers}) -> maybe_resume_session(#channel{resuming = false}) -> ignore; -maybe_resume_session(#channel{session = Session, - resuming = true, - pendings = Pendings, - clientinfo = #{clientid := ClientId} = ClientInfo}) -> +maybe_resume_session(#channel{ + session = Session, + resuming = true, + pendings = Pendings, + clientinfo = #{clientid := ClientId} = ClientInfo +}) -> {ok, Publishes, Session1} = emqx_session:replay(ClientInfo, Session), %% We consider queued/dropped messages as delivered since they are now in the session state. emqx_persistent_session:mark_as_delivered(ClientId, Pendings), @@ -1736,10 +2010,12 @@ maybe_resume_session(#channel{session = Session, maybe_shutdown(Reason, Channel = #channel{conninfo = ConnInfo}) -> case maps:get(expiry_interval, ConnInfo) of - ?UINT_MAX -> {ok, Channel}; + ?UINT_MAX -> + {ok, Channel}; I when I > 0 -> {ok, ensure_timer(expire_timer, I, Channel)}; - _ -> shutdown(Reason, Channel) + _ -> + shutdown(Reason, Channel) end. %%-------------------------------------------------------------------- @@ -1752,8 +2028,13 @@ parse_topic_filters(TopicFilters) -> %%-------------------------------------------------------------------- %% Ensure disconnected -ensure_disconnected(Reason, Channel = #channel{conninfo = ConnInfo, - clientinfo = ClientInfo}) -> +ensure_disconnected( + Reason, + Channel = #channel{ + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> NConnInfo = ConnInfo#{disconnected_at => erlang:system_time(millisecond)}, ok = run_hooks('client.disconnected', [ClientInfo, Reason, NConnInfo]), ChanPid = self(), @@ -1786,7 +2067,7 @@ publish_will_msg(Msg) -> %% Disconnect Reason disconnect_reason(?RC_SUCCESS) -> normal; -disconnect_reason(ReasonCode) -> emqx_reason_codes:name(ReasonCode). +disconnect_reason(ReasonCode) -> emqx_reason_codes:name(ReasonCode). reason_code(takenover) -> ?RC_SESSION_TAKEN_OVER; reason_code(discarded) -> ?RC_SESSION_TAKEN_OVER; @@ -1798,20 +2079,21 @@ reason_code(_) -> ?RC_NORMAL_DISCONNECTION. -compile({inline, [run_hooks/2, run_hooks/3]}). run_hooks(Name, Args) -> - ok = emqx_metrics:inc(Name), emqx_hooks:run(Name, Args). + ok = emqx_metrics:inc(Name), + emqx_hooks:run(Name, Args). run_hooks(Name, Args, Acc) -> - ok = emqx_metrics:inc(Name), emqx_hooks:run_fold(Name, Args, Acc). + ok = emqx_metrics:inc(Name), + emqx_hooks:run_fold(Name, Args, Acc). -compile({inline, [find_alias/3, save_alias/4]}). find_alias(_, _, undefined) -> error; -find_alias(inbound, AliasId, _TopicAliases = #{inbound := Aliases}) -> - maps:find(AliasId, Aliases); -find_alias(outbound, Topic, _TopicAliases = #{outbound := Aliases}) -> - maps:find(Topic, Aliases). +find_alias(inbound, AliasId, _TopicAliases = #{inbound := Aliases}) -> maps:find(AliasId, Aliases); +find_alias(outbound, Topic, _TopicAliases = #{outbound := Aliases}) -> maps:find(Topic, Aliases). -save_alias(_, _, _, undefined) -> false; +save_alias(_, _, _, undefined) -> + false; save_alias(inbound, AliasId, Topic, TopicAliases = #{inbound := Aliases}) -> NAliases = maps:put(AliasId, Topic, Aliases), TopicAliases#{inbound => NAliases}; @@ -1839,18 +2121,23 @@ shutdown(success, Reply, Packet, Channel) -> shutdown(Reason, Reply, Packet, Channel) -> {shutdown, Reason, Reply, Packet, Channel}. -disconnect_and_shutdown(Reason, Reply, Channel = ?IS_MQTT_V5 - = #channel{conn_state = ConnState}) - when ConnState =:= connected orelse ConnState =:= reauthenticating -> +disconnect_and_shutdown( + Reason, + Reply, + Channel = + ?IS_MQTT_V5 = + #channel{conn_state = ConnState} +) when + ConnState =:= connected orelse ConnState =:= reauthenticating +-> shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), Channel); - disconnect_and_shutdown(Reason, Reply, Channel) -> shutdown(Reason, Reply, Channel). -sp(true) -> 1; +sp(true) -> 1; sp(false) -> 0. -flag(true) -> 1; +flag(true) -> 1; flag(false) -> 0. get_mqtt_conf(Zone, Key) -> @@ -1865,4 +2152,4 @@ get_mqtt_conf(Zone, Key, Default) -> set_field(Name, Value, Channel) -> Pos = emqx_misc:index_of(Name, record_info(fields, channel)), - setelement(Pos+1, Channel, Value). + setelement(Pos + 1, Channel, Value). diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index 1bb0d5005..d39f43686 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -23,80 +23,89 @@ -include("types.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). - -export([start_link/0]). --export([ register_channel/3 - , unregister_channel/1 - , insert_channel_info/3 - ]). +-export([ + register_channel/3, + unregister_channel/1, + insert_channel_info/3 +]). -export([connection_closed/1]). --export([ get_chan_info/1 - , get_chan_info/2 - , set_chan_info/2 - ]). +-export([ + get_chan_info/1, + get_chan_info/2, + set_chan_info/2 +]). --export([ get_chan_stats/1 - , get_chan_stats/2 - , set_chan_stats/2 - ]). +-export([ + get_chan_stats/1, + get_chan_stats/2, + set_chan_stats/2 +]). -export([get_chann_conn_mod/2]). --export([ open_session/3 - , discard_session/1 - , discard_session/2 - , takeover_session/1 - , takeover_session/2 - , kick_session/1 - , kick_session/2 - ]). +-export([ + open_session/3, + discard_session/1, + discard_session/2, + takeover_session/1, + takeover_session/2, + kick_session/1, + kick_session/2 +]). --export([ lookup_channels/1 - , lookup_channels/2 +-export([ + lookup_channels/1, + lookup_channels/2, - , lookup_client/1 - ]). + lookup_client/1 +]). %% Test/debug interface --export([ all_channels/0 - , all_client_ids/0 - ]). +-export([ + all_channels/0, + all_client_ids/0 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). %% Internal export --export([ stats_fun/0 - , clean_down/1 - , mark_channel_connected/1 - , mark_channel_disconnected/1 - , get_connected_client_count/0 +-export([ + stats_fun/0, + clean_down/1, + mark_channel_connected/1, + mark_channel_disconnected/1, + get_connected_client_count/0, - , do_kick_session/3 - , do_get_chan_stats/2 - , do_get_chan_info/2 - , do_get_chann_conn_mod/2 - ]). + do_kick_session/3, + do_get_chan_stats/2, + do_get_chan_info/2, + do_get_chann_conn_mod/2 +]). --export_type([ channel_info/0 - , chan_pid/0 - ]). +-export_type([ + channel_info/0, + chan_pid/0 +]). --type(chan_pid() :: pid()). +-type chan_pid() :: pid(). --type(channel_info() :: { _Chan :: {emqx_types:clientid(), pid()} - , _Info :: emqx_types:infos() - , _Stats :: emqx_types:stats() - }). +-type channel_info() :: { + _Chan :: {emqx_types:clientid(), pid()}, + _Info :: emqx_types:infos(), + _Stats :: emqx_types:stats() +}. -include("emqx_cm.hrl"). @@ -106,12 +115,12 @@ -define(CHAN_INFO_TAB, emqx_channel_info). -define(CHAN_LIVE_TAB, emqx_channel_live). --define(CHAN_STATS, - [{?CHAN_TAB, 'channels.count', 'channels.max'}, - {?CHAN_TAB, 'sessions.count', 'sessions.max'}, - {?CHAN_CONN_TAB, 'connections.count', 'connections.max'}, - {?CHAN_LIVE_TAB, 'live_connections.count', 'live_connections.max'} - ]). +-define(CHAN_STATS, [ + {?CHAN_TAB, 'channels.count', 'channels.max'}, + {?CHAN_TAB, 'sessions.count', 'sessions.max'}, + {?CHAN_CONN_TAB, 'connections.count', 'connections.max'}, + {?CHAN_LIVE_TAB, 'live_connections.count', 'live_connections.max'} +]). %% Batch drain -define(BATCH_SIZE, 100000). @@ -120,12 +129,13 @@ -define(CM, ?MODULE). %% linting overrides --elvis([ {elvis_style, invalid_dynamic_call, #{ignore => [emqx_cm]}} - , {elvis_style, god_modules, #{ignore => [emqx_cm]}} - ]). +-elvis([ + {elvis_style, invalid_dynamic_call, #{ignore => [emqx_cm]}}, + {elvis_style, god_modules, #{ignore => [emqx_cm]}} +]). %% @doc Start the channel manager. --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?CM}, ?MODULE, [], []). @@ -134,9 +144,11 @@ start_link() -> %%-------------------------------------------------------------------- %% @doc Insert/Update the channel info and stats to emqx_channel table --spec(insert_channel_info(emqx_types:clientid(), - emqx_types:infos(), - emqx_types:stats()) -> ok). +-spec insert_channel_info( + emqx_types:clientid(), + emqx_types:infos(), + emqx_types:stats() +) -> ok. insert_channel_info(ClientId, Info, Stats) -> Chan = {ClientId, self()}, true = ets:insert(?CHAN_INFO_TAB, {Chan, Info, Stats}), @@ -160,7 +172,7 @@ register_channel(ClientId, ChanPid, #{conn_mod := ConnMod}) when is_pid(ChanPid) cast({registered, Chan}). %% @doc Unregister a channel. --spec(unregister_channel(emqx_types:clientid()) -> ok). +-spec unregister_channel(emqx_types:clientid()) -> ok. unregister_channel(ClientId) when is_binary(ClientId) -> true = do_unregister_channel({ClientId, self()}), ok. @@ -172,143 +184,158 @@ do_unregister_channel(Chan) -> true = ets:delete(?CHAN_INFO_TAB, Chan), ets:delete_object(?CHAN_TAB, Chan). --spec(connection_closed(emqx_types:clientid()) -> true). +-spec connection_closed(emqx_types:clientid()) -> true. connection_closed(ClientId) -> connection_closed(ClientId, self()). --spec(connection_closed(emqx_types:clientid(), chan_pid()) -> true). +-spec connection_closed(emqx_types:clientid(), chan_pid()) -> true. connection_closed(ClientId, ChanPid) -> ets:delete_object(?CHAN_CONN_TAB, {ClientId, ChanPid}). %% @doc Get info of a channel. --spec(get_chan_info(emqx_types:clientid()) -> maybe(emqx_types:infos())). +-spec get_chan_info(emqx_types:clientid()) -> maybe(emqx_types:infos()). get_chan_info(ClientId) -> with_channel(ClientId, fun(ChanPid) -> get_chan_info(ClientId, ChanPid) end). --spec(do_get_chan_info(emqx_types:clientid(), chan_pid()) - -> maybe(emqx_types:infos())). +-spec do_get_chan_info(emqx_types:clientid(), chan_pid()) -> + maybe(emqx_types:infos()). do_get_chan_info(ClientId, ChanPid) -> Chan = {ClientId, ChanPid}, - try ets:lookup_element(?CHAN_INFO_TAB, Chan, 2) + try + ets:lookup_element(?CHAN_INFO_TAB, Chan, 2) catch error:badarg -> undefined end. --spec(get_chan_info(emqx_types:clientid(), chan_pid()) - -> maybe(emqx_types:infos())). +-spec get_chan_info(emqx_types:clientid(), chan_pid()) -> + maybe(emqx_types:infos()). get_chan_info(ClientId, ChanPid) -> wrap_rpc(emqx_cm_proto_v1:get_chan_info(ClientId, ChanPid)). %% @doc Update infos of the channel. --spec(set_chan_info(emqx_types:clientid(), emqx_types:attrs()) -> boolean()). +-spec set_chan_info(emqx_types:clientid(), emqx_types:attrs()) -> boolean(). set_chan_info(ClientId, Info) when is_binary(ClientId) -> Chan = {ClientId, self()}, - try ets:update_element(?CHAN_INFO_TAB, Chan, {2, Info}) + try + ets:update_element(?CHAN_INFO_TAB, Chan, {2, Info}) catch error:badarg -> false end. %% @doc Get channel's stats. --spec(get_chan_stats(emqx_types:clientid()) -> maybe(emqx_types:stats())). +-spec get_chan_stats(emqx_types:clientid()) -> maybe(emqx_types:stats()). get_chan_stats(ClientId) -> with_channel(ClientId, fun(ChanPid) -> get_chan_stats(ClientId, ChanPid) end). --spec(do_get_chan_stats(emqx_types:clientid(), chan_pid()) - -> maybe(emqx_types:stats())). +-spec do_get_chan_stats(emqx_types:clientid(), chan_pid()) -> + maybe(emqx_types:stats()). do_get_chan_stats(ClientId, ChanPid) -> Chan = {ClientId, ChanPid}, - try ets:lookup_element(?CHAN_INFO_TAB, Chan, 3) + try + ets:lookup_element(?CHAN_INFO_TAB, Chan, 3) catch error:badarg -> undefined end. --spec(get_chan_stats(emqx_types:clientid(), chan_pid()) - -> maybe(emqx_types:stats())). +-spec get_chan_stats(emqx_types:clientid(), chan_pid()) -> + maybe(emqx_types:stats()). get_chan_stats(ClientId, ChanPid) -> wrap_rpc(emqx_cm_proto_v1:get_chan_stats(ClientId, ChanPid)). %% @doc Set channel's stats. --spec(set_chan_stats(emqx_types:clientid(), emqx_types:stats()) -> boolean()). +-spec set_chan_stats(emqx_types:clientid(), emqx_types:stats()) -> boolean(). set_chan_stats(ClientId, Stats) when is_binary(ClientId) -> set_chan_stats(ClientId, self(), Stats). --spec(set_chan_stats(emqx_types:clientid(), chan_pid(), emqx_types:stats()) - -> boolean()). +-spec set_chan_stats(emqx_types:clientid(), chan_pid(), emqx_types:stats()) -> + boolean(). set_chan_stats(ClientId, ChanPid, Stats) -> Chan = {ClientId, ChanPid}, - try ets:update_element(?CHAN_INFO_TAB, Chan, {3, Stats}) + try + ets:update_element(?CHAN_INFO_TAB, Chan, {3, Stats}) catch error:badarg -> false end. %% @doc Open a session. --spec(open_session(boolean(), emqx_types:clientinfo(), emqx_types:conninfo()) - -> {ok, #{session := emqx_session:session(), - present := boolean(), - pendings => list()}} - | {error, Reason :: term()}). +-spec open_session(boolean(), emqx_types:clientinfo(), emqx_types:conninfo()) -> + {ok, #{ + session := emqx_session:session(), + present := boolean(), + pendings => list() + }} + | {error, Reason :: term()}. open_session(true, ClientInfo = #{clientid := ClientId}, ConnInfo) -> Self = self(), CleanStart = fun(_) -> - ok = discard_session(ClientId), - ok = emqx_persistent_session:discard_if_present(ClientId), - Session = create_session(ClientInfo, ConnInfo), - Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, present => false}} - end, + ok = discard_session(ClientId), + ok = emqx_persistent_session:discard_if_present(ClientId), + Session = create_session(ClientInfo, ConnInfo), + Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), + register_channel(ClientId, Self, ConnInfo), + {ok, #{session => Session1, present => false}} + end, emqx_cm_locker:trans(ClientId, CleanStart); - open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> Self = self(), ResumeStart = fun(_) -> - CreateSess = - fun() -> - Session = create_session(ClientInfo, ConnInfo), - Session1 = emqx_persistent_session:persist( - ClientInfo,ConnInfo, Session), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, present => false}} - end, - case takeover_session(ClientId) of - {persistent, Session} -> - %% This is a persistent session without a managing process. - {Session1, Pendings} = - emqx_persistent_session:resume(ClientInfo, ConnInfo, Session), - register_channel(ClientId, Self, ConnInfo), + CreateSess = + fun() -> + Session = create_session(ClientInfo, ConnInfo), + Session1 = emqx_persistent_session:persist( + ClientInfo, ConnInfo, Session + ), + register_channel(ClientId, Self, ConnInfo), + {ok, #{session => Session1, present => false}} + end, + case takeover_session(ClientId) of + {persistent, Session} -> + %% This is a persistent session without a managing process. + {Session1, Pendings} = + emqx_persistent_session:resume(ClientInfo, ConnInfo, Session), + register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, - present => true, - pendings => Pendings}}; - {living, ConnMod, ChanPid, Session} -> - ok = emqx_session:resume(ClientInfo, Session), - case request_stepdown( - {takeover, 'end'}, - ConnMod, - ChanPid) of - {ok, Pendings} -> - Session1 = emqx_persistent_session:persist( - ClientInfo, ConnInfo, Session), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, - present => true, - pendings => Pendings}}; - {error, _} -> - CreateSess() - end; - {expired, OldSession} -> - _ = emqx_persistent_session:discard(ClientId, OldSession), - Session = create_session(ClientInfo, ConnInfo), - Session1 = emqx_persistent_session:persist( ClientInfo - , ConnInfo - , Session - ), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, present => false}}; - none -> - CreateSess() - end - end, + {ok, #{ + session => Session1, + present => true, + pendings => Pendings + }}; + {living, ConnMod, ChanPid, Session} -> + ok = emqx_session:resume(ClientInfo, Session), + case + request_stepdown( + {takeover, 'end'}, + ConnMod, + ChanPid + ) + of + {ok, Pendings} -> + Session1 = emqx_persistent_session:persist( + ClientInfo, ConnInfo, Session + ), + register_channel(ClientId, Self, ConnInfo), + {ok, #{ + session => Session1, + present => true, + pendings => Pendings + }}; + {error, _} -> + CreateSess() + end; + {expired, OldSession} -> + _ = emqx_persistent_session:discard(ClientId, OldSession), + Session = create_session(ClientInfo, ConnInfo), + Session1 = emqx_persistent_session:persist( + ClientInfo, + ConnInfo, + Session + ), + register_channel(ClientId, Self, ConnInfo), + {ok, #{session => Session1, present => false}}; + none -> + CreateSess() + end + end, emqx_cm_locker:trans(ClientId, ResumeStart). create_session(ClientInfo, ConnInfo) -> @@ -318,36 +345,40 @@ create_session(ClientInfo, ConnInfo) -> ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]), Session. -get_session_confs(#{zone := Zone, clientid := ClientId}, #{receive_maximum := MaxInflight, expiry_interval := EI}) -> - #{clientid => ClientId, - max_subscriptions => get_mqtt_conf(Zone, max_subscriptions), - upgrade_qos => get_mqtt_conf(Zone, upgrade_qos), - max_inflight => MaxInflight, - retry_interval => get_mqtt_conf(Zone, retry_interval), - await_rel_timeout => get_mqtt_conf(Zone, await_rel_timeout), - mqueue => mqueue_confs(Zone), - %% TODO: Add conf for allowing/disallowing persistent sessions. - %% Note that the connection info is already enriched to have - %% default config values for session expiry. - is_persistent => EI > 0 - }. +get_session_confs(#{zone := Zone, clientid := ClientId}, #{ + receive_maximum := MaxInflight, expiry_interval := EI +}) -> + #{ + clientid => ClientId, + max_subscriptions => get_mqtt_conf(Zone, max_subscriptions), + upgrade_qos => get_mqtt_conf(Zone, upgrade_qos), + max_inflight => MaxInflight, + retry_interval => get_mqtt_conf(Zone, retry_interval), + await_rel_timeout => get_mqtt_conf(Zone, await_rel_timeout), + mqueue => mqueue_confs(Zone), + %% TODO: Add conf for allowing/disallowing persistent sessions. + %% Note that the connection info is already enriched to have + %% default config values for session expiry. + is_persistent => EI > 0 + }. mqueue_confs(Zone) -> - #{max_len => get_mqtt_conf(Zone, max_mqueue_len), - store_qos0 => get_mqtt_conf(Zone, mqueue_store_qos0), - priorities => get_mqtt_conf(Zone, mqueue_priorities), - default_priority => get_mqtt_conf(Zone, mqueue_default_priority) - }. + #{ + max_len => get_mqtt_conf(Zone, max_mqueue_len), + store_qos0 => get_mqtt_conf(Zone, mqueue_store_qos0), + priorities => get_mqtt_conf(Zone, mqueue_priorities), + default_priority => get_mqtt_conf(Zone, mqueue_default_priority) + }. get_mqtt_conf(Zone, Key) -> emqx_config:get_zone_conf(Zone, [mqtt, Key]). %% @doc Try to takeover a session. -spec takeover_session(emqx_types:clientid()) -> - none - | {living, atom(), pid(), emqx_session:session()} - | {persistent, emqx_session:session()} - | {expired, emqx_session:session()}. + none + | {living, atom(), pid(), emqx_session:session()} + | {persistent, emqx_session:session()} + | {expired, emqx_session:session()}. takeover_session(ClientId) -> case lookup_channels(ClientId) of [] -> @@ -357,20 +388,28 @@ takeover_session(ClientId) -> ChanPids -> [ChanPid | StalePids] = lists:reverse(ChanPids), ?SLOG(warning, #{msg => "more_than_one_channel_found", chan_pids => ChanPids}), - lists:foreach(fun(StalePid) -> - catch discard_session(ClientId, StalePid) - end, StalePids), + lists:foreach( + fun(StalePid) -> + catch discard_session(ClientId, StalePid) + end, + StalePids + ), takeover_session(ClientId, ChanPid) end. takeover_session(ClientId, Pid) -> - try do_takeover_session(ClientId, Pid) + try + do_takeover_session(ClientId, Pid) catch - _ : R when R == noproc; - R == timeout; - R == unexpected_exception -> %% request_stepdown/3 + _:R when + R == noproc; + R == timeout; + %% request_stepdown/3 + R == unexpected_exception + -> emqx_persistent_session:lookup(ClientId); - _ : {'EXIT', {noproc, _}} -> % rpc_call/3 + % rpc_call/3 + _:{'EXIT', {noproc, _}} -> emqx_persistent_session:lookup(ClientId) end. @@ -390,7 +429,7 @@ do_takeover_session(ClientId, ChanPid) -> wrap_rpc(emqx_cm_proto_v1:takeover_session(ClientId, ChanPid)). %% @doc Discard all the sessions identified by the ClientId. --spec(discard_session(emqx_types:clientid()) -> ok). +-spec discard_session(emqx_types:clientid()) -> ok. discard_session(ClientId) when is_binary(ClientId) -> case lookup_channels(ClientId) of [] -> ok; @@ -401,11 +440,12 @@ discard_session(ClientId) when is_binary(ClientId) -> %% If failed to kick (e.g. timeout) force a kill. %% Keeping the stale pid around, or returning error or raise an exception %% benefits nobody. --spec request_stepdown(Action, module(), pid()) - -> ok - | {ok, emqx_session:session() | list(emqx_type:deliver())} - | {error, term()} - when Action :: kick | discard | {takeover, 'begin'} | {takeover, 'end'}. +-spec request_stepdown(Action, module(), pid()) -> + ok + | {ok, emqx_session:session() | list(emqx_type:deliver())} + | {error, term()} +when + Action :: kick | discard | {takeover, 'begin'} | {takeover, 'end'}. request_stepdown(Action, ConnMod, Pid) -> Timeout = case Action == kick orelse Action == discard of @@ -420,27 +460,40 @@ request_stepdown(Action, ConnMod, Pid) -> ok -> ok; Reply -> {ok, Reply} catch - _ : noproc -> % emqx_ws_connection: call + % emqx_ws_connection: call + _:noproc -> ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}), {error, noproc}; - _ : {noproc, _} -> % emqx_connection: gen_server:call + % emqx_connection: gen_server:call + _:{noproc, _} -> ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}), {error, noproc}; - _ : {shutdown, _} -> + _:{shutdown, _} -> ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}), {error, noproc}; - _ : {{shutdown, _}, _} -> + _:{{shutdown, _}, _} -> ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}), {error, noproc}; - _ : {timeout, {gen_server, call, _}} -> - ?tp(warning, "session_stepdown_request_timeout", - #{pid => Pid, action => Action, stale_channel => stale_channel_info(Pid)}), + _:{timeout, {gen_server, call, _}} -> + ?tp( + warning, + "session_stepdown_request_timeout", + #{pid => Pid, action => Action, stale_channel => stale_channel_info(Pid)} + ), ok = force_kill(Pid), {error, timeout}; - _ : Error : St -> - ?tp(error, "session_stepdown_request_exception", - #{pid => Pid, action => Action, reason => Error, stacktrace => St, - stale_channel => stale_channel_info(Pid)}), + _:Error:St -> + ?tp( + error, + "session_stepdown_request_exception", + #{ + pid => Pid, + action => Action, + reason => Error, + stacktrace => St, + stale_channel => stale_channel_info(Pid) + } + ), ok = force_kill(Pid), {error, unexpected_exception} end, @@ -477,33 +530,46 @@ kick_session(Action, ClientId, ChanPid) -> try wrap_rpc(emqx_cm_proto_v1:kick_session(Action, ClientId, ChanPid)) catch - Error : Reason -> + Error:Reason -> %% This should mostly be RPC failures. %% However, if the node is still running the old version %% code (prior to emqx app 4.3.10) some of the RPC handler %% exceptions may get propagated to a new version node - ?SLOG(error, #{ msg => "failed_to_kick_session_on_remote_node" - , node => node(ChanPid) - , action => Action - , error => Error - , reason => Reason - }, - #{clientid => ClientId}) + ?SLOG( + error, + #{ + msg => "failed_to_kick_session_on_remote_node", + node => node(ChanPid), + action => Action, + error => Error, + reason => Reason + }, + #{clientid => ClientId} + ) end. kick_session(ClientId) -> case lookup_channels(ClientId) of [] -> - ?SLOG(warning, #{msg => "kicked_an_unknown_session"}, - #{clientid => ClientId}), + ?SLOG( + warning, + #{msg => "kicked_an_unknown_session"}, + #{clientid => ClientId} + ), ok; ChanPids -> case length(ChanPids) > 1 of true -> - ?SLOG(warning, #{msg => "more_than_one_channel_found", - chan_pids => ChanPids}, - #{clientid => ClientId}); - false -> ok + ?SLOG( + warning, + #{ + msg => "more_than_one_channel_found", + chan_pids => ChanPids + }, + #{clientid => ClientId} + ); + false -> + ok end, lists:foreach(fun(Pid) -> kick_session(ClientId, Pid) end, ChanPids) end. @@ -514,9 +580,9 @@ kick_session(ClientId) -> with_channel(ClientId, Fun) -> case lookup_channels(ClientId) of - [] -> undefined; + [] -> undefined; [Pid] -> Fun(Pid); - Pids -> Fun(lists:last(Pids)) + Pids -> Fun(lists:last(Pids)) end. %% @doc Get all registered channel pids. Debug/test interface @@ -529,14 +595,13 @@ all_client_ids() -> Pat = [{{'$1', '_'}, [], ['$1']}], ets:select(?CHAN_TAB, Pat). - %% @doc Lookup channels. --spec(lookup_channels(emqx_types:clientid()) -> list(chan_pid())). +-spec lookup_channels(emqx_types:clientid()) -> list(chan_pid()). lookup_channels(ClientId) -> lookup_channels(global, ClientId). %% @doc Lookup local or global channels. --spec(lookup_channels(local | global, emqx_types:clientid()) -> list(chan_pid())). +-spec lookup_channels(local | global, emqx_types:clientid()) -> list(chan_pid()). lookup_channels(global, ClientId) -> case emqx_cm_registry:is_enabled() of true -> @@ -544,21 +609,22 @@ lookup_channels(global, ClientId) -> false -> lookup_channels(local, ClientId) end; - lookup_channels(local, ClientId) -> [ChanPid || {_, ChanPid} <- ets:lookup(?CHAN_TAB, ClientId)]. -spec lookup_client({clientid, emqx_types:clientid()} | {username, emqx_types:username()}) -> - [channel_info()]. + [channel_info()]. lookup_client({username, Username}) -> - MatchSpec = [{ {'_', #{clientinfo => #{username => '$1'}}, '_'} - , [{'=:=','$1', Username}] - , ['$_'] - }], + MatchSpec = [ + {{'_', #{clientinfo => #{username => '$1'}}, '_'}, [{'=:=', '$1', Username}], ['$_']} + ], ets:select(emqx_channel_info, MatchSpec); lookup_client({clientid, ClientId}) -> - [Rec || Key <- ets:lookup(emqx_channel, ClientId) - , Rec <- ets:lookup(emqx_channel_info, Key)]. + [ + Rec + || Key <- ets:lookup(emqx_channel, ClientId), + Rec <- ets:lookup(emqx_channel_info, Key) + ]. %% @private wrap_rpc(Result) -> @@ -636,10 +702,12 @@ update_stats({Tab, Stat, MaxStat}) -> end. -spec do_get_chann_conn_mod(emqx_types:clientid(), chan_pid()) -> - module() | undefined. + module() | undefined. do_get_chann_conn_mod(ClientId, ChanPid) -> Chan = {ClientId, ChanPid}, - try [ConnMod] = ets:lookup_element(?CHAN_CONN_TAB, Chan, 2), ConnMod + try + [ConnMod] = ets:lookup_element(?CHAN_CONN_TAB, Chan, 2), + ConnMod catch error:badarg -> undefined end. diff --git a/apps/emqx/src/emqx_cm_locker.erl b/apps/emqx/src/emqx_cm_locker.erl index 283514847..57569fdee 100644 --- a/apps/emqx/src/emqx_cm_locker.erl +++ b/apps/emqx/src/emqx_cm_locker.erl @@ -21,46 +21,53 @@ -export([start_link/0]). --export([ trans/2 - , trans/3 - , lock/1 - , lock/2 - , unlock/1 - ]). +-export([ + trans/2, + trans/3, + lock/1, + lock/2, + unlock/1 +]). --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> ekka_locker:start_link(?MODULE). --spec(trans(emqx_types:clientid(), fun(([node()]) -> any())) -> any()). +-spec trans(emqx_types:clientid(), fun(([node()]) -> any())) -> any(). trans(ClientId, Fun) -> trans(ClientId, Fun, undefined). --spec(trans(maybe(emqx_types:clientid()), - fun(([node()])-> any()), ekka_locker:piggyback()) -> any()). +-spec trans( + maybe(emqx_types:clientid()), + fun(([node()]) -> any()), + ekka_locker:piggyback() +) -> any(). trans(undefined, Fun, _Piggyback) -> Fun([]); trans(ClientId, Fun, Piggyback) -> case lock(ClientId, Piggyback) of {true, Nodes} -> - try Fun(Nodes) after unlock(ClientId) end; + try + Fun(Nodes) + after + unlock(ClientId) + end; {false, _Nodes} -> {error, client_id_unavailable} end. --spec(lock(emqx_types:clientid()) -> ekka_locker:lock_result()). +-spec lock(emqx_types:clientid()) -> ekka_locker:lock_result(). lock(ClientId) -> ekka_locker:acquire(?MODULE, ClientId, strategy()). --spec(lock(emqx_types:clientid(), ekka_locker:piggyback()) -> ekka_locker:lock_result()). +-spec lock(emqx_types:clientid(), ekka_locker:piggyback()) -> ekka_locker:lock_result(). lock(ClientId, Piggyback) -> ekka_locker:acquire(?MODULE, ClientId, strategy(), Piggyback). --spec(unlock(emqx_types:clientid()) -> {boolean(), [node()]}). +-spec unlock(emqx_types:clientid()) -> {boolean(), [node()]}. unlock(ClientId) -> ekka_locker:release(?MODULE, ClientId, strategy()). --spec(strategy() -> local | leader | quorum | all). +-spec strategy() -> local | leader | quorum | all. strategy() -> emqx:get_config([broker, session_locking_strategy]). - diff --git a/apps/emqx/src/emqx_cm_registry.erl b/apps/emqx/src/emqx_cm_registry.erl index be4bc1e0d..7049d31d5 100644 --- a/apps/emqx/src/emqx_cm_registry.erl +++ b/apps/emqx/src/emqx_cm_registry.erl @@ -23,25 +23,26 @@ -include("logger.hrl"). -include("types.hrl"). - -export([start_link/0]). -export([is_enabled/0]). --export([ register_channel/1 - , unregister_channel/1 - ]). +-export([ + register_channel/1, + unregister_channel/1 +]). -export([lookup_channels/1]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -define(REGISTRY, ?MODULE). -define(TAB, emqx_channel_registry). @@ -50,7 +51,7 @@ -record(channel, {chid, pid}). %% @doc Start the global channel registry. --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?REGISTRY}, ?MODULE, [], []). @@ -59,16 +60,17 @@ start_link() -> %%-------------------------------------------------------------------- %% @doc Is the global registry enabled? --spec(is_enabled() -> boolean()). +-spec is_enabled() -> boolean(). is_enabled() -> emqx:get_config([broker, enable_session_registry]). %% @doc Register a global channel. --spec(register_channel(emqx_types:clientid() - | {emqx_types:clientid(), pid()}) -> ok). +-spec register_channel( + emqx_types:clientid() + | {emqx_types:clientid(), pid()} +) -> ok. register_channel(ClientId) when is_binary(ClientId) -> register_channel({ClientId, self()}); - register_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) -> case is_enabled() of true -> mria:dirty_write(?TAB, record(ClientId, ChanPid)); @@ -76,11 +78,12 @@ register_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) end. %% @doc Unregister a global channel. --spec(unregister_channel(emqx_types:clientid() - | {emqx_types:clientid(), pid()}) -> ok). +-spec unregister_channel( + emqx_types:clientid() + | {emqx_types:clientid(), pid()} +) -> ok. unregister_channel(ClientId) when is_binary(ClientId) -> unregister_channel({ClientId, self()}); - unregister_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) -> case is_enabled() of true -> mria:dirty_delete_object(?TAB, record(ClientId, ChanPid)); @@ -88,7 +91,7 @@ unregister_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid end. %% @doc Lookup the global channels. --spec(lookup_channels(emqx_types:clientid()) -> list(pid())). +-spec lookup_channels(emqx_types:clientid()) -> list(pid()). lookup_channels(ClientId) -> [ChanPid || #channel{pid = ChanPid} <- mnesia:dirty_read(?TAB, ClientId)]. @@ -102,13 +105,18 @@ record(ClientId, ChanPid) -> init([]) -> mria_config:set_dirty_shard(?CM_SHARD, true), ok = mria:create_table(?TAB, [ - {type, bag}, - {rlog_shard, ?CM_SHARD}, - {storage, ram_copies}, - {record_name, channel}, - {attributes, record_info(fields, channel)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]), + {type, bag}, + {rlog_shard, ?CM_SHARD}, + {storage, ram_copies}, + {record_name, channel}, + {attributes, record_info(fields, channel)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]), ok = mria_rlog:wait_for_shards([?CM_SHARD], infinity), ok = ekka:monitor(membership), {ok, #{}}. @@ -124,14 +132,11 @@ handle_cast(Msg, State) -> handle_info({membership, {mnesia, down, Node}}, State) -> cleanup_channels(Node), {noreply, State}; - handle_info({membership, {node, down, Node}}, State) -> cleanup_channels(Node), {noreply, State}; - handle_info({membership, _Event}, State) -> {noreply, State}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -147,10 +152,12 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- cleanup_channels(Node) -> - global:trans({?LOCK, self()}, - fun() -> - mria:transaction(?CM_SHARD, fun do_cleanup_channels/1, [Node]) - end). + global:trans( + {?LOCK, self()}, + fun() -> + mria:transaction(?CM_SHARD, fun do_cleanup_channels/1, [Node]) + end + ). do_cleanup_channels(Node) -> Pat = [{#channel{pid = '$1', _ = '_'}, [{'==', {node, '$1'}, Node}], ['$_']}], diff --git a/apps/emqx/src/emqx_cm_sup.erl b/apps/emqx/src/emqx_cm_sup.erl index 2fc504695..c1e945ed7 100644 --- a/apps/emqx/src/emqx_cm_sup.erl +++ b/apps/emqx/src/emqx_cm_sup.erl @@ -34,15 +34,16 @@ start_link() -> %%-------------------------------------------------------------------- init([]) -> - SupFlags = #{strategy => one_for_one, - intensity => 100, - period => 10 - }, - Banned = child_spec(emqx_banned, 1000, worker), + SupFlags = #{ + strategy => one_for_one, + intensity => 100, + period => 10 + }, + Banned = child_spec(emqx_banned, 1000, worker), Flapping = child_spec(emqx_flapping, 1000, worker), - Locker = child_spec(emqx_cm_locker, 5000, worker), + Locker = child_spec(emqx_cm_locker, 5000, worker), Registry = child_spec(emqx_cm_registry, 5000, worker), - Manager = child_spec(emqx_cm, 5000, worker), + Manager = child_spec(emqx_cm, 5000, worker), {ok, {SupFlags, [Banned, Flapping, Locker, Registry, Manager]}}. %%-------------------------------------------------------------------- @@ -50,10 +51,11 @@ init([]) -> %%-------------------------------------------------------------------- child_spec(Mod, Shutdown, Type) -> - #{id => Mod, - start => {Mod, start_link, []}, - restart => permanent, - shutdown => Shutdown, - type => Type, - modules => [Mod] - }. + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => permanent, + shutdown => Shutdown, + type => Type, + modules => [Mod] + }. diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index 88e4e1b91..0ed0a2dcc 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -18,61 +18,68 @@ -compile({no_auto_import, [get/0, get/1, put/2, erase/1]}). -elvis([{elvis_style, god_modules, disable}]). --export([ init_load/1 - , init_load/2 - , read_override_conf/1 - , delete_override_conf_files/0 - , check_config/2 - , fill_defaults/1 - , fill_defaults/2 - , save_configs/5 - , save_to_app_env/1 - , save_to_config_map/2 - , save_to_override_conf/2 - ]). +-export([ + init_load/1, + init_load/2, + read_override_conf/1, + delete_override_conf_files/0, + check_config/2, + fill_defaults/1, + fill_defaults/2, + save_configs/5, + save_to_app_env/1, + save_to_config_map/2, + save_to_override_conf/2 +]). --export([ get_root/1 - , get_root_raw/1 - ]). +-export([ + get_root/1, + get_root_raw/1 +]). --export([ get_default_value/1 - ]). +-export([get_default_value/1]). --export([ get/1 - , get/2 - , find/1 - , find_raw/1 - , put/1 - , put/2 - , erase/1 - ]). +-export([ + get/1, + get/2, + find/1, + find_raw/1, + put/1, + put/2, + erase/1 +]). --export([ get_raw/1 - , get_raw/2 - , put_raw/1 - , put_raw/2 - ]). +-export([ + get_raw/1, + get_raw/2, + put_raw/1, + put_raw/2 +]). --export([ save_schema_mod_and_names/1 - , get_schema_mod/0 - , get_schema_mod/1 - , get_root_names/0 - ]). +-export([ + save_schema_mod_and_names/1, + get_schema_mod/0, + get_schema_mod/1, + get_root_names/0 +]). --export([ get_zone_conf/2 - , get_zone_conf/3 - , put_zone_conf/3 - ]). +-export([ + get_zone_conf/2, + get_zone_conf/3, + put_zone_conf/3 +]). --export([ get_listener_conf/3 - , get_listener_conf/4 - , put_listener_conf/4 - , find_listener_conf/3 - ]). +-export([ + get_listener_conf/3, + get_listener_conf/4, + put_listener_conf/4, + find_listener_conf/3 +]). --export([ add_handlers/0 - , remove_handlers/0 - ]). +-export([ + add_handlers/0, + remove_handlers/0 +]). -include("logger.hrl"). @@ -88,25 +95,34 @@ AtomKeyPath -> EXP catch error:badarg -> EXP_ON_FAIL - end). + end +). --export_type([update_request/0, raw_config/0, config/0, app_envs/0, - update_opts/0, update_cmd/0, update_args/0, - update_error/0, update_result/0]). +-export_type([ + update_request/0, + raw_config/0, + config/0, + app_envs/0, + update_opts/0, + update_cmd/0, + update_args/0, + update_error/0, + update_result/0 +]). -type update_request() :: term(). -type update_cmd() :: {update, update_request()} | remove. -type update_opts() :: #{ - %% rawconf_with_defaults: - %% fill the default values into the `raw_config` field of the return value - %% defaults to `false` - rawconf_with_defaults => boolean(), - %% persistent: - %% save the updated config to the emqx_override.conf file - %% defaults to `true` - persistent => boolean(), - override_to => local | cluster - }. + %% rawconf_with_defaults: + %% fill the default values into the `raw_config` field of the return value + %% defaults to `false` + rawconf_with_defaults => boolean(), + %% persistent: + %% save the updated config to the emqx_override.conf file + %% defaults to `true` + persistent => boolean(), + override_to => local | cluster +}. -type update_args() :: {update_cmd(), Opts :: update_opts()}. -type update_stage() :: pre_config_update | post_config_update. -type update_error() :: {update_stage(), module(), term()} | {save_configs, term()} | term(). @@ -149,8 +165,11 @@ find([]) -> Res -> {ok, Res} end; find(KeyPath) -> - ?ATOM_CONF_PATH(KeyPath, emqx_map_lib:deep_find(AtomKeyPath, get_root(KeyPath)), - {not_found, KeyPath}). + ?ATOM_CONF_PATH( + KeyPath, + emqx_map_lib:deep_find(AtomKeyPath, get_root(KeyPath)), + {not_found, KeyPath} + ). -spec find_raw(emqx_map_lib:config_key_path()) -> {ok, term()} | {not_found, emqx_map_lib:config_key_path(), term()}. @@ -166,17 +185,21 @@ find_raw(KeyPath) -> -spec get_zone_conf(atom(), emqx_map_lib:config_key_path()) -> term(). get_zone_conf(Zone, KeyPath) -> case find(?ZONE_CONF_PATH(Zone, KeyPath)) of - {not_found, _, _} -> %% not found in zones, try to find the global config + %% not found in zones, try to find the global config + {not_found, _, _} -> ?MODULE:get(KeyPath); - {ok, Value} -> Value + {ok, Value} -> + Value end. -spec get_zone_conf(atom(), emqx_map_lib:config_key_path(), term()) -> term(). get_zone_conf(Zone, KeyPath, Default) -> case find(?ZONE_CONF_PATH(Zone, KeyPath)) of - {not_found, _, _} -> %% not found in zones, try to find the global config + %% not found in zones, try to find the global config + {not_found, _, _} -> ?MODULE:get(KeyPath, Default); - {ok, Value} -> Value + {ok, Value} -> + Value end. -spec put_zone_conf(atom(), emqx_map_lib:config_key_path(), term()) -> ok. @@ -202,9 +225,13 @@ find_listener_conf(Type, Listener, KeyPath) -> -spec put(map()) -> ok. put(Config) -> - maps:fold(fun(RootName, RootValue, _) -> + maps:fold( + fun(RootName, RootValue, _) -> ?MODULE:put([RootName], RootValue) - end, ok, Config). + end, + ok, + Config + ). erase(RootName) -> persistent_term:erase(?PERSIS_KEY(?CONF, bin(RootName))), @@ -219,12 +246,14 @@ get_default_value([RootName | _] = KeyPath) -> case find_raw([RootName]) of {ok, RawConf} -> RawConf1 = emqx_map_lib:deep_remove(BinKeyPath, #{bin(RootName) => RawConf}), - try fill_defaults(get_schema_mod(RootName), RawConf1) of FullConf -> - case emqx_map_lib:deep_find(BinKeyPath, FullConf) of - {not_found, _, _} -> {error, no_default_value}; - {ok, Val} -> {ok, Val} - end - catch error : Reason -> {error, Reason} + try fill_defaults(get_schema_mod(RootName), RawConf1) of + FullConf -> + case emqx_map_lib:deep_find(BinKeyPath, FullConf) of + {not_found, _, _} -> {error, no_default_value}; + {ok, Val} -> {ok, Val} + end + catch + error:Reason -> {error, Reason} end; {not_found, _, _} -> {error, {rootname_not_found, RootName}} @@ -238,9 +267,13 @@ get_raw(KeyPath, Default) -> do_get(?RAW_CONF, KeyPath, Default). -spec put_raw(map()) -> ok. put_raw(Config) -> - maps:fold(fun(RootName, RootV, _) -> + maps:fold( + fun(RootName, RootV, _) -> ?MODULE:put_raw([RootName], RootV) - end, ok, hocon_maps:ensure_plain(Config)). + end, + ok, + hocon_maps:ensure_plain(Config) + ). -spec put_raw(emqx_map_lib:config_key_path(), term()) -> ok. put_raw(KeyPath, Config) -> do_put(?RAW_CONF, KeyPath, Config). @@ -268,10 +301,12 @@ init_load(SchemaMod, RawConf) when is_map(RawConf) -> RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides), %% check configs against the schema {_AppEnvs, CheckedConf} = - check_config(SchemaMod, RawConfWithOverrides , #{}), + check_config(SchemaMod, RawConfWithOverrides, #{}), RootNames = get_root_names(), - ok = save_to_config_map(maps:with(get_atom_root_names(), CheckedConf), - maps:with(RootNames, RawConfWithOverrides)). + ok = save_to_config_map( + maps:with(get_atom_root_names(), CheckedConf), + maps:with(RootNames, RawConfWithOverrides) + ). parse_hocon(Conf) -> IncDirs = include_dirs(), @@ -279,12 +314,13 @@ parse_hocon(Conf) -> {ok, HoconMap} -> HoconMap; {error, Reason} -> - ?SLOG(error, #{msg => "failed_to_load_hocon_conf", - reason => Reason, - pwd => file:get_cwd(), - include_dirs => IncDirs, - config_file => Conf - }), + ?SLOG(error, #{ + msg => "failed_to_load_hocon_conf", + reason => Reason, + pwd => file:get_cwd(), + include_dirs => IncDirs, + config_file => Conf + }), error(failed_to_load_hocon_conf) end. @@ -299,22 +335,26 @@ include_dirs() -> [filename:join(emqx:data_dir(), "configs")]. merge_envs(SchemaMod, RawConf) -> - Opts = #{required => false, %% TODO: evil, remove, required should be declared in schema - format => map, - apply_override_envs => true - }, + %% TODO: evil, remove, required should be declared in schema + Opts = #{ + required => false, + format => map, + apply_override_envs => true + }, hocon_tconf:merge_env_overrides(SchemaMod, RawConf, all, Opts). --spec check_config(hocon_schema:schema(), raw_config()) -> {AppEnvs, CheckedConf} - when AppEnvs :: app_envs(), CheckedConf :: config(). +-spec check_config(hocon_schema:schema(), raw_config()) -> {AppEnvs, CheckedConf} when + AppEnvs :: app_envs(), CheckedConf :: config(). check_config(SchemaMod, RawConf) -> check_config(SchemaMod, RawConf, #{}). check_config(SchemaMod, RawConf, Opts0) -> - Opts1 = #{return_plain => true, - required => false, %% TODO: evil, remove, required should be declared in schema - format => map - }, + Opts1 = #{ + return_plain => true, + %% TODO: evil, remove, required should be declared in schema + required => false, + format => map + }, Opts = maps:merge(Opts0, Opts1), {AppEnvs, CheckedConf} = hocon_tconf:map_translate(SchemaMod, RawConf, Opts), @@ -323,21 +363,28 @@ check_config(SchemaMod, RawConf, Opts0) -> -spec fill_defaults(raw_config()) -> map(). fill_defaults(RawConf) -> RootNames = get_root_names(), - maps:fold(fun(Key, Conf, Acc) -> + maps:fold( + fun(Key, Conf, Acc) -> SubMap = #{Key => Conf}, - WithDefaults = case lists:member(Key, RootNames) of - true -> fill_defaults(get_schema_mod(Key), SubMap); - false -> SubMap - end, + WithDefaults = + case lists:member(Key, RootNames) of + true -> fill_defaults(get_schema_mod(Key), SubMap); + false -> SubMap + end, maps:merge(Acc, WithDefaults) - end, #{}, RawConf). + end, + #{}, + RawConf + ). -spec fill_defaults(module(), raw_config()) -> map(). fill_defaults(SchemaMod, RawConf) -> - hocon_tconf:check_plain(SchemaMod, RawConf, + hocon_tconf:check_plain( + SchemaMod, + RawConf, #{required => false, only_fill_defaults => true}, - root_names_from_conf(RawConf)). - + root_names_from_conf(RawConf) + ). %% @doc Only for test cleanups. %% Delete override config files. @@ -408,9 +455,12 @@ save_configs(_AppEnvs, Conf, RawConf, OverrideConf, Opts) -> -spec save_to_app_env([tuple()]) -> ok. save_to_app_env(AppEnvs) -> - lists:foreach(fun({AppName, Envs}) -> + lists:foreach( + fun({AppName, Envs}) -> [application:set_env(AppName, Par, Val) || {Par, Val} <- Envs] - end, AppEnvs). + end, + AppEnvs + ). -spec save_to_config_map(config(), raw_config()) -> ok. save_to_config_map(Conf, RawConf) -> @@ -422,15 +472,19 @@ save_to_override_conf(undefined, _) -> ok; save_to_override_conf(RawConf, Opts) -> case override_conf_file(Opts) of - undefined -> ok; + undefined -> + ok; FileName -> ok = filelib:ensure_dir(FileName), case file:write_file(FileName, hocon_pp:do(RawConf, #{})) of - ok -> ok; + ok -> + ok; {error, Reason} -> - ?SLOG(error, #{msg => "failed_to_write_override_file", - filename => FileName, - reason => Reason}), + ?SLOG(error, #{ + msg => "failed_to_write_override_file", + filename => FileName, + reason => Reason + }), {error, Reason} end end. @@ -449,7 +503,8 @@ load_hocon_file(FileName, LoadType) -> Opts = #{include_dirs => include_dirs(), format => LoadType}, {ok, Raw0} = hocon:load(FileName, Opts), Raw0; - false -> #{} + false -> + #{} end. do_get(Type, KeyPath) -> @@ -461,11 +516,16 @@ do_get(Type, KeyPath) -> end. do_get(Type, [], Default) -> - AllConf = lists:foldl(fun + AllConf = lists:foldl( + fun ({?PERSIS_KEY(Type0, RootName), Conf}, AccIn) when Type0 == Type -> AccIn#{conf_key(Type0, RootName) => Conf}; - (_, AccIn) -> AccIn - end, #{}, persistent_term:get()), + (_, AccIn) -> + AccIn + end, + #{}, + persistent_term:get() + ), case AllConf =:= #{} of true -> Default; false -> AllConf @@ -477,23 +537,33 @@ do_get(Type, [RootName | KeyPath], Default) -> do_deep_get(Type, KeyPath, RootV, Default). do_put(Type, [], DeepValue) -> - maps:fold(fun(RootName, Value, _Res) -> + maps:fold( + fun(RootName, Value, _Res) -> do_put(Type, [RootName], Value) - end, ok, DeepValue); + end, + ok, + DeepValue + ); do_put(Type, [RootName | KeyPath], DeepValue) -> OldValue = do_get(Type, [RootName], #{}), NewValue = do_deep_put(Type, KeyPath, OldValue, DeepValue), persistent_term:put(?PERSIS_KEY(Type, bin(RootName)), NewValue). do_deep_get(?CONF, KeyPath, Map, Default) -> - ?ATOM_CONF_PATH(KeyPath, emqx_map_lib:deep_get(AtomKeyPath, Map, Default), - Default); + ?ATOM_CONF_PATH( + KeyPath, + emqx_map_lib:deep_get(AtomKeyPath, Map, Default), + Default + ); do_deep_get(?RAW_CONF, KeyPath, Map, Default) -> emqx_map_lib:deep_get([bin(Key) || Key <- KeyPath], Map, Default). do_deep_put(?CONF, KeyPath, Map, Value) -> - ?ATOM_CONF_PATH(KeyPath, emqx_map_lib:deep_put(AtomKeyPath, Map, Value), - error({not_found, KeyPath})); + ?ATOM_CONF_PATH( + KeyPath, + emqx_map_lib:deep_put(AtomKeyPath, Map, Value), + error({not_found, KeyPath}) + ); do_deep_put(?RAW_CONF, KeyPath, Map, Value) -> emqx_map_lib:deep_put([bin(Key) || Key <- KeyPath], Map, Value). diff --git a/apps/emqx/src/emqx_config_handler.erl b/apps/emqx/src/emqx_config_handler.erl index cfe29cecc..141eacdd1 100644 --- a/apps/emqx/src/emqx_config_handler.erl +++ b/apps/emqx/src/emqx_config_handler.erl @@ -23,23 +23,26 @@ -behaviour(gen_server). %% API functions --export([ start_link/0 - , stop/0 - , add_handler/2 - , remove_handler/1 - , update_config/3 - , get_raw_cluster_override_conf/0 - , info/0 - , merge_to_old_config/2 - ]). +-export([ + start_link/0, + stop/0, + add_handler/2, + remove_handler/1, + update_config/3, + get_raw_cluster_override_conf/0, + info/0, + merge_to_old_config/2 +]). %% gen_server callbacks --export([init/1, - handle_call/3, - handle_cast/2, - handle_info/2, - terminate/2, - code_change/3]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -define(MOD, {mod}). -define(WKEY, '?'). @@ -47,16 +50,22 @@ -type handler_name() :: module(). -type handlers() :: #{emqx_config:config_key() => handlers(), ?MOD => handler_name()}. --optional_callbacks([ pre_config_update/3 - , post_config_update/5 - ]). +-optional_callbacks([ + pre_config_update/3, + post_config_update/5 +]). -callback pre_config_update([atom()], emqx_config:update_request(), emqx_config:raw_config()) -> {ok, emqx_config:update_request()} | {error, term()}. --callback post_config_update([atom()], emqx_config:update_request(), emqx_config:config(), - emqx_config:config(), emqx_config:app_envs()) -> - ok | {ok, Result::any()} | {error, Reason::term()}. +-callback post_config_update( + [atom()], + emqx_config:update_request(), + emqx_config:config(), + emqx_config:config(), + emqx_config:app_envs() +) -> + ok | {ok, Result :: any()} | {error, Reason :: term()}. -type state() :: #{ handlers := handlers(), @@ -106,9 +115,11 @@ handle_call({add_handler, ConfKeyPath, HandlerName}, _From, State = #{handlers : {ok, NewHandlers} -> {reply, ok, State#{handlers => NewHandlers}}; {error, _Reason} = Error -> {reply, Error, State} end; - -handle_call({change_config, SchemaModule, ConfKeyPath, UpdateArgs}, _From, - #{handlers := Handlers} = State) -> +handle_call( + {change_config, SchemaModule, ConfKeyPath, UpdateArgs}, + _From, + #{handlers := Handlers} = State +) -> Result = handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs), {reply, Result, State}; handle_call(get_raw_cluster_override_conf, _From, State) -> @@ -140,13 +151,14 @@ deep_put_handler([], Handlers, Mod) -> deep_put_handler([Key | KeyPath], Handlers, Mod) -> SubHandlers = maps:get(Key, Handlers, #{}), case deep_put_handler(KeyPath, SubHandlers, Mod) of - {ok, NewSubHandlers} -> + {ok, NewSubHandlers} -> NewHandlers = Handlers#{Key => NewSubHandlers}, case check_handler_conflict(NewHandlers) of ok -> {ok, NewHandlers}; {error, Reason} -> {error, Reason} end; - {error, _Reason} = Error -> Error + {error, _Reason} = Error -> + Error end. %% Make sure that Specify Key and ?WKEY cannot be on the same level. @@ -165,34 +177,45 @@ check_handler_conflict(Handlers) -> filter_top_level_handlers(Handlers) -> maps:fold( fun - (K, #{?MOD := _}, Acc) -> [K | Acc]; + (K, #{?MOD := _}, Acc) -> [K | Acc]; (_K, #{}, Acc) -> Acc; (?MOD, _, Acc) -> Acc - end, [], Handlers). + end, + [], + Handlers + ). handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) -> try do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) catch - throw : Reason -> + throw:Reason -> {error, Reason}; - Error : Reason : ST -> - ?SLOG(error, #{msg => "change_config_crashed", - exception => Error, - reason => Reason, - update_req => UpdateArgs, - module => SchemaModule, - key_path => ConfKeyPath, - stacktrace => ST - }), + Error:Reason:ST -> + ?SLOG(error, #{ + msg => "change_config_crashed", + exception => Error, + reason => Reason, + update_req => UpdateArgs, + module => SchemaModule, + key_path => ConfKeyPath, + stacktrace => ST + }), {error, config_update_crashed} end. do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) -> case process_update_request(ConfKeyPath, Handlers, UpdateArgs) of {ok, NewRawConf, OverrideConf, Opts} -> - check_and_save_configs(SchemaModule, ConfKeyPath, Handlers, NewRawConf, - OverrideConf, UpdateArgs, Opts); + check_and_save_configs( + SchemaModule, + ConfKeyPath, + Handlers, + NewRawConf, + OverrideConf, + UpdateArgs, + Opts + ); {error, Result} -> {error, Result} end. @@ -211,7 +234,8 @@ process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}) -> {ok, NewRawConf} -> OverrideConf = update_override_config(NewRawConf, Opts), {ok, NewRawConf, OverrideConf, Opts}; - Error -> Error + Error -> + Error end. do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) -> @@ -219,8 +243,13 @@ do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) -> do_update_config([], Handlers, OldRawConf, UpdateReq, ConfKeyPath) -> call_pre_config_update(Handlers, OldRawConf, UpdateReq, ConfKeyPath); -do_update_config([ConfKey | SubConfKeyPath], Handlers, OldRawConf, - UpdateReq, ConfKeyPath0) -> +do_update_config( + [ConfKey | SubConfKeyPath], + Handlers, + OldRawConf, + UpdateReq, + ConfKeyPath0 +) -> ConfKeyPath = ConfKeyPath0 ++ [ConfKey], ConfKeyBin = bin(ConfKey), SubOldRawConf = get_sub_config(ConfKeyBin, OldRawConf), @@ -230,43 +259,110 @@ do_update_config([ConfKey | SubConfKeyPath], Handlers, OldRawConf, Error -> Error end. -check_and_save_configs(SchemaModule, ConfKeyPath, Handlers, NewRawConf, OverrideConf, - UpdateArgs, Opts) -> +check_and_save_configs( + SchemaModule, + ConfKeyPath, + Handlers, + NewRawConf, + OverrideConf, + UpdateArgs, + Opts +) -> OldConf = emqx_config:get_root(ConfKeyPath), Schema = schema(SchemaModule, ConfKeyPath), {AppEnvs, NewConf} = emqx_config:check_config(Schema, NewRawConf), case do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, #{}) of {ok, Result0} -> remove_from_local_if_cluster_change(ConfKeyPath, Opts), - case save_configs(ConfKeyPath, AppEnvs, NewConf, NewRawConf, OverrideConf, - UpdateArgs, Opts) of + case + save_configs( + ConfKeyPath, + AppEnvs, + NewConf, + NewRawConf, + OverrideConf, + UpdateArgs, + Opts + ) + of {ok, Result1} -> {ok, Result1#{post_config_update => Result0}}; - Error -> Error + Error -> + Error end; - Error -> Error + Error -> + Error end. do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, Result) -> - do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, - Result, []). + do_post_config_update( + ConfKeyPath, + Handlers, + OldConf, + NewConf, + AppEnvs, + UpdateArgs, + Result, + [] + ). -do_post_config_update([], Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, Result, - ConfKeyPath) -> - call_post_config_update(Handlers, OldConf, NewConf, AppEnvs, up_req(UpdateArgs), - Result, ConfKeyPath); -do_post_config_update([ConfKey | SubConfKeyPath], Handlers, OldConf, NewConf, AppEnvs, - UpdateArgs, Result, ConfKeyPath0) -> +do_post_config_update( + [], + Handlers, + OldConf, + NewConf, + AppEnvs, + UpdateArgs, + Result, + ConfKeyPath +) -> + call_post_config_update( + Handlers, + OldConf, + NewConf, + AppEnvs, + up_req(UpdateArgs), + Result, + ConfKeyPath + ); +do_post_config_update( + [ConfKey | SubConfKeyPath], + Handlers, + OldConf, + NewConf, + AppEnvs, + UpdateArgs, + Result, + ConfKeyPath0 +) -> ConfKeyPath = ConfKeyPath0 ++ [ConfKey], SubOldConf = get_sub_config(ConfKey, OldConf), SubNewConf = get_sub_config(ConfKey, NewConf), SubHandlers = get_sub_handlers(ConfKey, Handlers), - case do_post_config_update(SubConfKeyPath, SubHandlers, SubOldConf, SubNewConf, AppEnvs, - UpdateArgs, Result, ConfKeyPath) of + case + do_post_config_update( + SubConfKeyPath, + SubHandlers, + SubOldConf, + SubNewConf, + AppEnvs, + UpdateArgs, + Result, + ConfKeyPath + ) + of {ok, Result1} -> - call_post_config_update(Handlers, OldConf, NewConf, AppEnvs, up_req(UpdateArgs), - Result1, ConfKeyPath); - Error -> Error + call_post_config_update( + Handlers, + OldConf, + NewConf, + AppEnvs, + up_req(UpdateArgs), + Result1, + ConfKeyPath + ); + Error -> + Error end. get_sub_handlers(ConfKey, Handlers) -> @@ -277,7 +373,8 @@ get_sub_handlers(ConfKey, Handlers) -> get_sub_config(ConfKey, Conf) when is_map(Conf) -> maps:get(ConfKey, Conf, undefined); -get_sub_config(_, _Conf) -> %% the Conf is a primitive +%% the Conf is a primitive +get_sub_config(_, _Conf) -> undefined. call_pre_config_update(#{?MOD := HandlerName}, OldRawConf, UpdateReq, ConfKeyPath) -> @@ -287,25 +384,48 @@ call_pre_config_update(#{?MOD := HandlerName}, OldRawConf, UpdateReq, ConfKeyPat {ok, NewUpdateReq} -> {ok, NewUpdateReq}; {error, Reason} -> {error, {pre_config_update, HandlerName, Reason}} end; - false -> merge_to_old_config(UpdateReq, OldRawConf) + false -> + merge_to_old_config(UpdateReq, OldRawConf) end; call_pre_config_update(_Handlers, OldRawConf, UpdateReq, _ConfKeyPath) -> merge_to_old_config(UpdateReq, OldRawConf). -call_post_config_update(#{?MOD := HandlerName}, OldConf, NewConf, - AppEnvs, UpdateReq, Result, ConfKeyPath) -> +call_post_config_update( + #{?MOD := HandlerName}, + OldConf, + NewConf, + AppEnvs, + UpdateReq, + Result, + ConfKeyPath +) -> case erlang:function_exported(HandlerName, post_config_update, 5) of true -> - case HandlerName:post_config_update(ConfKeyPath, UpdateReq, - NewConf, OldConf, AppEnvs) of + case + HandlerName:post_config_update( + ConfKeyPath, + UpdateReq, + NewConf, + OldConf, + AppEnvs + ) + of ok -> {ok, Result}; {ok, Result1} -> {ok, Result#{HandlerName => Result1}}; {error, Reason} -> {error, {post_config_update, HandlerName, Reason}} end; - false -> {ok, Result} + false -> + {ok, Result} end; -call_post_config_update(_Handlers, _OldConf, _NewConf, _AppEnvs, - _UpdateReq, Result, _ConfKeyPath) -> +call_post_config_update( + _Handlers, + _OldConf, + _NewConf, + _AppEnvs, + _UpdateReq, + Result, + _ConfKeyPath +) -> {ok, Result}. save_configs(ConfKeyPath, AppEnvs, CheckedConf, NewRawConf, OverrideConf, UpdateArgs, Opts) -> @@ -350,8 +470,10 @@ up_req({remove, _Opts}) -> '$remove'; up_req({{update, Req}, _Opts}) -> Req. return_change_result(ConfKeyPath, {{update, _Req}, Opts}) -> - #{config => emqx_config:get(ConfKeyPath), - raw_config => return_rawconf(ConfKeyPath, Opts)}; + #{ + config => emqx_config:get(ConfKeyPath), + raw_config => return_rawconf(ConfKeyPath, Opts) + }; return_change_result(_ConfKeyPath, {remove, _Opts}) -> #{}. @@ -378,19 +500,24 @@ do_remove_handler(ConfKeyPath, Handlers) -> NewHandlers = emqx_map_lib:deep_remove(ConfKeyPath ++ [?MOD], Handlers), remove_empty_leaf(ConfKeyPath, NewHandlers). -remove_empty_leaf([], Handlers) -> Handlers; +remove_empty_leaf([], Handlers) -> + Handlers; remove_empty_leaf(KeyPath, Handlers) -> case emqx_map_lib:deep_find(KeyPath, Handlers) =:= {ok, #{}} of - true -> %% empty leaf + %% empty leaf + true -> Handlers1 = emqx_map_lib:deep_remove(KeyPath, Handlers), SubKeyPath = lists:sublist(KeyPath, length(KeyPath) - 1), remove_empty_leaf(SubKeyPath, Handlers1); - false -> Handlers + false -> + Handlers end. assert_callback_function(Mod) -> - case erlang:function_exported(Mod, pre_config_update, 3) orelse - erlang:function_exported(Mod, post_config_update, 5) of + case + erlang:function_exported(Mod, pre_config_update, 3) orelse + erlang:function_exported(Mod, post_config_update, 5) + of true -> ok; false -> error(#{msg => "bad_emqx_config_handler_callback", module => Mod}) end, @@ -410,12 +537,16 @@ schema(SchemaModule, [RootKey | _]) -> validations => hocon_schema:validations(SchemaModule) }. -parse_translations(#{translate_to := TRs } = Field, Name, SchemaModule) -> +parse_translations(#{translate_to := TRs} = Field, Name, SchemaModule) -> { {Name, maps:remove(translate_to, Field)}, - lists:foldl(fun(T, Acc) -> - Acc#{T => hocon_schema:translation(SchemaModule, T)} - end, #{}, TRs) + lists:foldl( + fun(T, Acc) -> + Acc#{T => hocon_schema:translation(SchemaModule, T)} + end, + #{}, + TRs + ) }; parse_translations(Field, Name, _SchemaModule) -> {{Name, Field}, #{}}. diff --git a/apps/emqx/src/emqx_congestion.erl b/apps/emqx/src/emqx_congestion.erl index ca94ded90..f8448b106 100644 --- a/apps/emqx/src/emqx_congestion.erl +++ b/apps/emqx/src/emqx_congestion.erl @@ -16,21 +16,41 @@ -module(emqx_congestion). --export([ maybe_alarm_conn_congestion/3 - , cancel_alarms/3 - ]). +-export([ + maybe_alarm_conn_congestion/3, + cancel_alarms/3 +]). -elvis([{elvis_style, invalid_dynamic_call, #{ignore => [emqx_congestion]}}]). -define(ALARM_CONN_CONGEST(Channel, Reason), - list_to_binary( - io_lib:format("~ts/~ts/~ts", - [Reason, emqx_channel:info(clientid, Channel), - maps:get(username, emqx_channel:info(clientinfo, Channel), - <<"unknown_user">>)]))). + list_to_binary( + io_lib:format( + "~ts/~ts/~ts", + [ + Reason, + emqx_channel:info(clientid, Channel), + maps:get( + username, + emqx_channel:info(clientinfo, Channel), + <<"unknown_user">> + ) + ] + ) + ) +). --define(ALARM_CONN_INFO_KEYS, [socktype, sockname, peername, clientid, username, - proto_name, proto_ver, connected_at, conn_state]). +-define(ALARM_CONN_INFO_KEYS, [ + socktype, + sockname, + peername, + clientid, + username, + proto_name, + proto_ver, + connected_at, + conn_state +]). -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). -define(PROC_INFO_KEYS, [message_queue_len, memory, reductions]). @@ -40,7 +60,8 @@ maybe_alarm_conn_congestion(Socket, Transport, Channel) -> case is_alarm_enabled(Channel) of - false -> ok; + false -> + ok; true -> case is_tcp_congested(Socket, Transport) of true -> alarm_congestion(Socket, Transport, Channel, conn_congestion); @@ -49,12 +70,15 @@ maybe_alarm_conn_congestion(Socket, Transport, Channel) -> end. cancel_alarms(Socket, Transport, Channel) -> - lists:foreach(fun(Reason) -> - case has_alarm_sent(Reason) of - true -> do_cancel_alarm_congestion(Socket, Transport, Channel, Reason); - false -> ok - end - end, ?ALL_ALARM_REASONS). + lists:foreach( + fun(Reason) -> + case has_alarm_sent(Reason) of + true -> do_cancel_alarm_congestion(Socket, Transport, Channel, Reason); + false -> ok + end + end, + ?ALL_ALARM_REASONS + ). is_alarm_enabled(Channel) -> Zone = emqx_channel:info(zone, Channel), @@ -62,7 +86,8 @@ is_alarm_enabled(Channel) -> alarm_congestion(Socket, Transport, Channel, Reason) -> case has_alarm_sent(Reason) of - false -> do_alarm_congestion(Socket, Transport, Channel, Reason); + false -> + do_alarm_congestion(Socket, Transport, Channel, Reason); true -> %% pretend we have sent an alarm again update_alarm_sent_at(Reason) @@ -70,8 +95,10 @@ alarm_congestion(Socket, Transport, Channel, Reason) -> cancel_alarm_congestion(Socket, Transport, Channel, Reason) -> Zone = emqx_channel:info(zone, Channel), - WontClearIn = emqx_config:get_zone_conf(Zone, [conn_congestion, - min_alarm_sustain_duration]), + WontClearIn = emqx_config:get_zone_conf(Zone, [ + conn_congestion, + min_alarm_sustain_duration + ]), case has_alarm_sent(Reason) andalso long_time_since_last_alarm(Reason, WontClearIn) of true -> do_cancel_alarm_congestion(Socket, Transport, Channel, Reason); false -> ok @@ -130,14 +157,16 @@ timenow() -> tcp_congestion_alarm_details(Socket, Transport, Channel) -> ProcInfo = process_info(self(), ?PROC_INFO_KEYS), BasicInfo = [{pid, list_to_binary(pid_to_list(self()))} | ProcInfo], - Stat = case Transport:getstat(Socket, ?ALARM_SOCK_STATS_KEYS) of - {ok, Stat0} -> Stat0; - {error, _} -> [] - end, - Opts = case Transport:getopts(Socket, ?ALARM_SOCK_OPTS_KEYS) of - {ok, Opts0} -> Opts0; - {error, _} -> [] - end, + Stat = + case Transport:getstat(Socket, ?ALARM_SOCK_STATS_KEYS) of + {ok, Stat0} -> Stat0; + {error, _} -> [] + end, + Opts = + case Transport:getopts(Socket, ?ALARM_SOCK_OPTS_KEYS) of + {ok, Opts0} -> Opts0; + {error, _} -> [] + end, SockInfo = Stat ++ Opts, ConnInfo = [conn_info(Key, Channel) || Key <- ?ALARM_CONN_INFO_KEYS], maps:from_list(BasicInfo ++ ConnInfo ++ SockInfo). diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 3dce1440f..7d11d07aa 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -23,7 +23,6 @@ -include("types.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). - -ifdef(TEST). -compile(export_all). -compile(nowarn_export_all). @@ -32,33 +31,38 @@ -elvis([{elvis_style, invalid_dynamic_call, #{ignore => [emqx_connection]}}]). %% API --export([ start_link/3 - , stop/1 - ]). +-export([ + start_link/3, + stop/1 +]). --export([ info/1 - , stats/1 - ]). +-export([ + info/1, + stats/1 +]). --export([ async_set_keepalive/3 - , async_set_keepalive/4 - , async_set_socket_options/2 - ]). +-export([ + async_set_keepalive/3, + async_set_keepalive/4, + async_set_socket_options/2 +]). --export([ call/2 - , call/3 - , cast/2 - ]). +-export([ + call/2, + call/3, + cast/2 +]). %% Callback -export([init/4]). %% Sys callbacks --export([ system_continue/3 - , system_terminate/4 - , system_code_change/4 - , system_get_state/1 - ]). +-export([ + system_continue/3, + system_terminate/4, + system_code_change/4, + system_get_state/1 +]). %% Internal callback -export([wakeup_from_hib/2, recvloop/2, get_state/1]). @@ -66,89 +70,105 @@ %% Export for CT -export([set_field/3]). --import(emqx_misc, - [ start_timer/2 - ]). +-import( + emqx_misc, + [start_timer/2] +). -record(state, { - %% TCP/TLS Transport - transport :: esockd:transport(), - %% TCP/TLS Socket - socket :: esockd:socket(), - %% Peername of the connection - peername :: emqx_types:peername(), - %% Sockname of the connection - sockname :: emqx_types:peername(), - %% Sock State - sockstate :: emqx_types:sockstate(), - parse_state :: emqx_frame:parse_state(), - %% Serialize options - serialize :: emqx_frame:serialize_opts(), - %% Channel State - channel :: emqx_channel:channel(), - %% GC State - gc_state :: maybe(emqx_gc:gc_state()), - %% Stats Timer - stats_timer :: disabled | maybe(reference()), - %% Idle Timeout - idle_timeout :: integer() | infinity, - %% Idle Timer - idle_timer :: maybe(reference()), - %% Zone name - zone :: atom(), - %% Listener Type and Name - listener :: {Type::atom(), Name::atom()}, + %% TCP/TLS Transport + transport :: esockd:transport(), + %% TCP/TLS Socket + socket :: esockd:socket(), + %% Peername of the connection + peername :: emqx_types:peername(), + %% Sockname of the connection + sockname :: emqx_types:peername(), + %% Sock State + sockstate :: emqx_types:sockstate(), + parse_state :: emqx_frame:parse_state(), + %% Serialize options + serialize :: emqx_frame:serialize_opts(), + %% Channel State + channel :: emqx_channel:channel(), + %% GC State + gc_state :: maybe(emqx_gc:gc_state()), + %% Stats Timer + stats_timer :: disabled | maybe(reference()), + %% Idle Timeout + idle_timeout :: integer() | infinity, + %% Idle Timer + idle_timer :: maybe(reference()), + %% Zone name + zone :: atom(), + %% Listener Type and Name + listener :: {Type :: atom(), Name :: atom()}, - %% Limiter - limiter :: maybe(limiter()), + %% Limiter + limiter :: maybe(limiter()), - %% cache operation when overload - limiter_cache :: queue:queue(cache()), + %% cache operation when overload + limiter_cache :: queue:queue(cache()), - %% limiter timers - limiter_timer :: undefined | reference() - }). + %% limiter timers + limiter_timer :: undefined | reference() +}). --record(retry, { types :: list(limiter_type()) - , data :: any() - , next :: check_succ_handler() - }). +-record(retry, { + types :: list(limiter_type()), + data :: any(), + next :: check_succ_handler() +}). --record(cache, { need :: list({pos_integer(), limiter_type()}) - , data :: any() - , next :: check_succ_handler() - }). +-record(cache, { + need :: list({pos_integer(), limiter_type()}), + data :: any(), + next :: check_succ_handler() +}). --type(state() :: #state{}). +-type state() :: #state{}. -type cache() :: #cache{}. -define(ACTIVE_N, 100). --define(INFO_KEYS, - [ socktype - , peername - , sockname - , sockstate - ]). +-define(INFO_KEYS, [ + socktype, + peername, + sockname, + sockstate +]). --define(SOCK_STATS, - [ recv_oct - , recv_cnt - , send_oct - , send_cnt - , send_pend - ]). +-define(SOCK_STATS, [ + recv_oct, + recv_cnt, + send_oct, + send_cnt, + send_pend +]). -define(ENABLED(X), (X =/= undefined)). -define(ALARM_TCP_CONGEST(Channel), - list_to_binary(io_lib:format("mqtt_conn/congested/~ts/~ts", - [emqx_channel:info(clientid, Channel), - emqx_channel:info(username, Channel)]))). + list_to_binary( + io_lib:format( + "mqtt_conn/congested/~ts/~ts", + [ + emqx_channel:info(clientid, Channel), + emqx_channel:info(username, Channel) + ] + ) + ) +). -define(ALARM_CONN_INFO_KEYS, [ - socktype, sockname, peername, - clientid, username, proto_name, proto_ver, connected_at + socktype, + sockname, + peername, + clientid, + username, + proto_name, + proto_ver, + connected_at ]). -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). @@ -159,17 +179,22 @@ -define(EMPTY_QUEUE, {[], []}). -dialyzer({no_match, [info/2]}). --dialyzer({nowarn_function, [ init/4 - , init_state/3 - , run_loop/2 - , system_terminate/4 - , system_code_change/4 - ]}). +-dialyzer( + {nowarn_function, [ + init/4, + init_state/3, + run_loop/2, + system_terminate/4, + system_code_change/4 + ]} +). --spec(start_link(esockd:transport(), - esockd:socket() | {pid(), quicer:connection_handler()}, - emqx_channel:opts()) - -> {ok, pid()}). +-spec start_link( + esockd:transport(), + esockd:socket() | {pid(), quicer:connection_handler()}, + emqx_channel:opts() +) -> + {ok, pid()}. start_link(Transport, Socket, Options) -> Args = [self(), Transport, Socket, Options], CPid = proc_lib:spawn_link(?MODULE, init, Args), @@ -180,13 +205,14 @@ start_link(Transport, Socket, Options) -> %%-------------------------------------------------------------------- %% @doc Get infos of the connection/channel. --spec(info(pid() | state()) -> emqx_types:infos()). +-spec info(pid() | state()) -> emqx_types:infos(). info(CPid) when is_pid(CPid) -> call(CPid, info); info(State = #state{channel = Channel}) -> ChanInfo = emqx_channel:info(Channel), SockInfo = maps:from_list( - info(?INFO_KEYS, State)), + info(?INFO_KEYS, State) + ), ChanInfo#{sockinfo => SockInfo}. info(Keys, State) when is_list(Keys) -> @@ -207,16 +233,19 @@ info(limiter_timer, #state{limiter_timer = Timer}) -> Timer. %% @doc Get stats of the connection/channel. --spec(stats(pid() | state()) -> emqx_types:stats()). +-spec stats(pid() | state()) -> emqx_types:stats(). stats(CPid) when is_pid(CPid) -> call(CPid, stats); -stats(#state{transport = Transport, - socket = Socket, - channel = Channel}) -> - SockStats = case Transport:getstat(Socket, ?SOCK_STATS) of - {ok, Ss} -> Ss; - {error, _} -> [] - end, +stats(#state{ + transport = Transport, + socket = Socket, + channel = Channel +}) -> + SockStats = + case Transport:getstat(Socket, ?SOCK_STATS) of + {ok, Ss} -> Ss; + {error, _} -> [] + end, ChanStats = emqx_channel:stats(Channel), ProcStats = emqx_misc:proc_stats(), lists:append([SockStats, ChanStats, ProcStats]). @@ -236,11 +265,12 @@ async_set_keepalive(Idle, Interval, Probes) -> async_set_keepalive(self(), Idle, Interval, Probes). async_set_keepalive(Pid, Idle, Interval, Probes) -> - Options = [ {keepalive, true} - , {raw, 6, 4, <>} - , {raw, 6, 5, <>} - , {raw, 6, 6, <>} - ], + Options = [ + {keepalive, true}, + {raw, 6, 4, <>}, + {raw, 6, 5, <>}, + {raw, 6, 6, <>} + ], async_set_socket_options(Pid, Options). %% @doc Set custom socket options. @@ -274,17 +304,21 @@ init(Parent, Transport, RawSocket, Options) -> exit_on_sock_error(Reason) end. -init_state(Transport, Socket, - #{zone := Zone, limiter := LimiterCfg, listener := Listener} = Opts) -> +init_state( + Transport, + Socket, + #{zone := Zone, limiter := LimiterCfg, listener := Listener} = Opts +) -> {ok, Peername} = Transport:ensure_ok_or_exit(peername, [Socket]), {ok, Sockname} = Transport:ensure_ok_or_exit(sockname, [Socket]), Peercert = Transport:ensure_ok_or_exit(peercert, [Socket]), - ConnInfo = #{socktype => Transport:type(Socket), - peername => Peername, - sockname => Sockname, - peercert => Peercert, - conn_mod => ?MODULE - }, + ConnInfo = #{ + socktype => Transport:type(Socket), + peername => Peername, + sockname => Sockname, + peercert => Peercert, + conn_mod => ?MODULE + }, LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], Limiter = emqx_limiter_container:get_limiter_by_names(LimiterTypes, LimiterCfg), @@ -296,54 +330,67 @@ init_state(Transport, Socket, ParseState = emqx_frame:initial_parse_state(FrameOpts), Serialize = emqx_frame:serialize_opts(), Channel = emqx_channel:init(ConnInfo, Opts), - GcState = case emqx_config:get_zone_conf(Zone, [force_gc]) of - #{enable := false} -> undefined; - GcPolicy -> emqx_gc:init(GcPolicy) - end, - StatsTimer = case emqx_config:get_zone_conf(Zone, [stats, enable]) of - true -> undefined; - false -> disabled - end, + GcState = + case emqx_config:get_zone_conf(Zone, [force_gc]) of + #{enable := false} -> undefined; + GcPolicy -> emqx_gc:init(GcPolicy) + end, + StatsTimer = + case emqx_config:get_zone_conf(Zone, [stats, enable]) of + true -> undefined; + false -> disabled + end, IdleTimeout = emqx_channel:get_mqtt_conf(Zone, idle_timeout), IdleTimer = start_timer(IdleTimeout, idle_timeout), - #state{transport = Transport, - socket = Socket, - peername = Peername, - sockname = Sockname, - sockstate = idle, - limiter = Limiter, - parse_state = ParseState, - serialize = Serialize, - channel = Channel, - gc_state = GcState, - stats_timer = StatsTimer, - idle_timeout = IdleTimeout, - idle_timer = IdleTimer, - zone = Zone, - listener = Listener, - limiter_cache = queue:new(), - limiter_timer = undefined - }. + #state{ + transport = Transport, + socket = Socket, + peername = Peername, + sockname = Sockname, + sockstate = idle, + limiter = Limiter, + parse_state = ParseState, + serialize = Serialize, + channel = Channel, + gc_state = GcState, + stats_timer = StatsTimer, + idle_timeout = IdleTimeout, + idle_timer = IdleTimer, + zone = Zone, + listener = Listener, + limiter_cache = queue:new(), + limiter_timer = undefined + }. -run_loop(Parent, State = #state{transport = Transport, - socket = Socket, - peername = Peername, - channel = Channel}) -> +run_loop( + Parent, + State = #state{ + transport = Transport, + socket = Socket, + peername = Peername, + channel = Channel + } +) -> emqx_logger:set_metadata_peername(esockd:format(Peername)), - ShutdownPolicy = emqx_config:get_zone_conf(emqx_channel:info(zone, Channel), - [force_shutdown]), + ShutdownPolicy = emqx_config:get_zone_conf( + emqx_channel:info(zone, Channel), + [force_shutdown] + ), emqx_misc:tune_heap_size(ShutdownPolicy), case activate_socket(State) of - {ok, NState} -> hibernate(Parent, NState); + {ok, NState} -> + hibernate(Parent, NState); {error, Reason} -> ok = Transport:fast_close(Socket), exit_on_sock_error(Reason) end. -spec exit_on_sock_error(any()) -> no_return(). -exit_on_sock_error(Reason) when Reason =:= einval; - Reason =:= enotconn; - Reason =:= closed -> +exit_on_sock_error(Reason) when + Reason =:= einval; + Reason =:= enotconn; + Reason =:= closed +-> erlang:exit(normal); exit_on_sock_error(timeout) -> erlang:exit({shutdown, ssl_upgrade_timeout}); @@ -353,20 +400,23 @@ exit_on_sock_error(Reason) -> %%-------------------------------------------------------------------- %% Recv Loop -recvloop(Parent, State = #state{ idle_timeout = IdleTimeout - , zone = Zone - }) -> +recvloop( + Parent, + State = #state{ + idle_timeout = IdleTimeout, + zone = Zone + } +) -> receive Msg -> handle_recv(Msg, Parent, State) - after - IdleTimeout + 100 -> - case emqx_olp:backoff_hibernation(Zone) of - true -> - recvloop(Parent, State); - false -> - hibernate(Parent, cancel_stats_timer(State)) - end + after IdleTimeout + 100 -> + case emqx_olp:backoff_hibernation(Zone) of + true -> + recvloop(Parent, State); + false -> + hibernate(Parent, cancel_stats_timer(State)) + end end. handle_recv({system, From, Request}, Parent, State) -> @@ -395,14 +445,16 @@ wakeup_from_hib(Parent, State) -> -compile({inline, [ensure_stats_timer/2]}). ensure_stats_timer(Timeout, State = #state{stats_timer = undefined}) -> State#state{stats_timer = start_timer(Timeout, emit_stats)}; -ensure_stats_timer(_Timeout, State) -> State. +ensure_stats_timer(_Timeout, State) -> + State. -compile({inline, [cancel_stats_timer/1]}). cancel_stats_timer(State = #state{stats_timer = TRef}) when is_reference(TRef) -> ?tp(debug, cancel_stats_timer, #{}), ok = emqx_misc:cancel_timer(TRef), State#state{stats_timer = undefined}; -cancel_stats_timer(State) -> State. +cancel_stats_timer(State) -> + State. %%-------------------------------------------------------------------- %% Process next Msg @@ -422,22 +474,27 @@ process_msg([Msg | More], State) -> {stop, Reason, NState} end catch - exit : normal -> + exit:normal -> {stop, normal, State}; - exit : shutdown -> + exit:shutdown -> {stop, shutdown, State}; - exit : {shutdown, _} = Shutdown -> + exit:{shutdown, _} = Shutdown -> {stop, Shutdown, State}; - Exception : Context : Stack -> - {stop, #{exception => Exception, - context => Context, - stacktrace => Stack}, State} + Exception:Context:Stack -> + {stop, + #{ + exception => Exception, + context => Context, + stacktrace => Stack + }, + State} end. -compile({inline, [append_msg/2]}). append_msg([], Msgs) when is_list(Msgs) -> Msgs; -append_msg([], Msg) -> [Msg]; +append_msg([], Msg) -> + [Msg]; append_msg(Q, Msgs) when is_list(Msgs) -> lists:append(Q, Msgs); append_msg(Q, Msg) -> @@ -458,19 +515,16 @@ handle_msg({'$gen_call', From, Req}, State) -> handle_msg({'$gen_cast', Req}, State) -> NewState = handle_cast(Req, State), {ok, NewState}; - handle_msg({Inet, _Sock, Data}, State) when Inet == tcp; Inet == ssl -> Oct = iolist_size(Data), inc_counter(incoming_bytes, Oct), ok = emqx_metrics:inc('bytes.received', Oct), when_bytes_in(Oct, Data, State); - handle_msg({quic, Data, _Sock, _, _, _}, State) -> Oct = iolist_size(Data), inc_counter(incoming_bytes, Oct), ok = emqx_metrics:inc('bytes.received', Oct), when_bytes_in(Oct, Data, State); - handle_msg(check_cache, #state{limiter_cache = Cache} = State) -> case queue:peek(Cache) of empty -> @@ -479,32 +533,32 @@ handle_msg(check_cache, #state{limiter_cache = Cache} = State) -> State2 = State#state{limiter_cache = queue:drop(Cache)}, check_limiter(Needs, Data, Next, [check_cache], State2) end; - -handle_msg({incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, - State = #state{idle_timer = IdleTimer}) -> +handle_msg( + {incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, + State = #state{idle_timer = IdleTimer} +) -> ok = emqx_misc:cancel_timer(IdleTimer), Serialize = emqx_frame:serialize_opts(ConnPkt), - NState = State#state{serialize = Serialize, - idle_timer = undefined - }, + NState = State#state{ + serialize = Serialize, + idle_timer = undefined + }, handle_incoming(Packet, NState); - handle_msg({incoming, Packet}, State) -> handle_incoming(Packet, State); - handle_msg({outgoing, Packets}, State) -> handle_outgoing(Packets, State); - -handle_msg({Error, _Sock, Reason}, State) - when Error == tcp_error; Error == ssl_error -> +handle_msg({Error, _Sock, Reason}, State) when + Error == tcp_error; Error == ssl_error +-> handle_info({sock_error, Reason}, State); - -handle_msg({Closed, _Sock}, State) - when Closed == tcp_closed; Closed == ssl_closed -> +handle_msg({Closed, _Sock}, State) when + Closed == tcp_closed; Closed == ssl_closed +-> handle_info({sock_closed, Closed}, close_socket(State)); - -handle_msg({Passive, _Sock}, State) - when Passive == tcp_passive; Passive == ssl_passive; Passive =:= quic_passive -> +handle_msg({Passive, _Sock}, State) when + Passive == tcp_passive; Passive == ssl_passive; Passive =:= quic_passive +-> %% In Stats Pubs = emqx_pd:reset_counter(incoming_pubs), Bytes = emqx_pd:reset_counter(incoming_bytes), @@ -512,13 +566,13 @@ handle_msg({Passive, _Sock}, State) %% Run GC and Check OOM NState1 = check_oom(run_gc(InStats, State)), handle_info(activate_socket, NState1); - -handle_msg(Deliver = {deliver, _Topic, _Msg}, - #state{listener = {Type, Listener}} = State) -> +handle_msg( + Deliver = {deliver, _Topic, _Msg}, + #state{listener = {Type, Listener}} = State +) -> ActiveN = get_active_n(Type, Listener), Delivers = [Deliver | emqx_misc:drain_deliver(ActiveN)], with_channel(handle_deliver, [Delivers], State); - %% Something sent handle_msg({inet_reply, _Sock, ok}, State = #state{listener = {Type, Listener}}) -> case emqx_pd:get_counter(outgoing_pubs) > get_active_n(Type, Listener) of @@ -527,41 +581,33 @@ handle_msg({inet_reply, _Sock, ok}, State = #state{listener = {Type, Listener}}) Bytes = emqx_pd:reset_counter(outgoing_bytes), OutStats = #{cnt => Pubs, oct => Bytes}, {ok, check_oom(run_gc(OutStats, State))}; - false -> ok + false -> + ok end; - handle_msg({inet_reply, _Sock, {error, Reason}}, State) -> handle_info({sock_error, Reason}, State); - handle_msg({connack, ConnAck}, State) -> handle_outgoing(ConnAck, State); - handle_msg({close, Reason}, State) -> ?TRACE("SOCKET", "socket_force_closed", #{reason => Reason}), handle_info({sock_closed, Reason}, close_socket(State)); - handle_msg({event, connected}, State = #state{channel = Channel}) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:insert_channel_info(ClientId, info(State), stats(State)); - handle_msg({event, disconnected}, State = #state{channel = Channel}) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:set_chan_info(ClientId, info(State)), emqx_cm:connection_closed(ClientId), {ok, State}; - handle_msg({event, _Other}, State = #state{channel = Channel}) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:set_chan_info(ClientId, info(State)), emqx_cm:set_chan_stats(ClientId, stats(State)), {ok, State}; - handle_msg({timeout, TRef, TMsg}, State) -> handle_timeout(TRef, TMsg, State); - handle_msg(Shutdown = {shutdown, _Reason}, State) -> stop(Shutdown, State); - handle_msg(Msg, State) -> handle_info(Msg, State). @@ -569,8 +615,14 @@ handle_msg(Msg, State) -> %% Terminate -spec terminate(any(), state()) -> no_return(). -terminate(Reason, State = #state{channel = Channel, transport = Transport, - socket = Socket}) -> +terminate( + Reason, + State = #state{ + channel = Channel, + transport = Transport, + socket = Socket + } +) -> try Channel1 = emqx_channel:set_conn_state(disconnected, Channel), emqx_congestion:cancel_alarms(Socket, Transport, Channel1), @@ -578,7 +630,7 @@ terminate(Reason, State = #state{channel = Channel, transport = Transport, close_socket_ok(State), ?TRACE("SOCKET", "tcp_socket_terminated", #{reason => Reason}) catch - E : C : S -> + E:C:S -> ?tp(warning, unclean_terminate, #{exception => E, context => C, stacktrace => S}) end, ?tp(info, terminate, #{reason => Reason}), @@ -590,10 +642,11 @@ close_socket_ok(State) -> ok. %% tell truth about the original exception -maybe_raise_exception(#{exception := Exception, - context := Context, - stacktrace := Stacktrace - }) -> +maybe_raise_exception(#{ + exception := Exception, + context := Context, + stacktrace := Stacktrace +}) -> erlang:raise(Exception, Context, Stacktrace); maybe_raise_exception(Reason) -> exit(Reason). @@ -617,17 +670,14 @@ system_get_state(State) -> {ok, State}. handle_call(_From, info, State) -> {reply, info(State), State}; - handle_call(_From, stats, State) -> {reply, stats(State), State}; - handle_call(_From, {ratelimit, Changes}, State = #state{limiter = Limiter}) -> Fun = fun({Type, Bucket}, Acc) -> - emqx_limiter_container:update_by_name(Type, Bucket, Acc) - end, + emqx_limiter_container:update_by_name(Type, Bucket, Acc) + end, Limiter2 = lists:foldl(Fun, Limiter, Changes), {reply, ok, State#state{limiter = Limiter2}}; - handle_call(_From, Req, State = #state{channel = Channel}) -> case emqx_channel:handle_call(Req, Channel) of {reply, Reply, NChannel} -> @@ -645,22 +695,33 @@ handle_call(_From, Req, State = #state{channel = Channel}) -> handle_timeout(_TRef, idle_timeout, State) -> shutdown(idle_timeout, State); - handle_timeout(_TRef, limit_timeout, State) -> retry_limiter(State); - -handle_timeout(_TRef, emit_stats, State = #state{channel = Channel, transport = Transport, - socket = Socket}) -> +handle_timeout( + _TRef, + emit_stats, + State = #state{ + channel = Channel, + transport = Transport, + socket = Socket + } +) -> emqx_congestion:maybe_alarm_conn_congestion(Socket, Transport, Channel), ClientId = emqx_channel:info(clientid, Channel), emqx_cm:set_chan_stats(ClientId, stats(State)), {ok, State#state{stats_timer = undefined}}; - -handle_timeout(TRef, keepalive, State = #state{transport = Transport, - socket = Socket, - channel = Channel})-> +handle_timeout( + TRef, + keepalive, + State = #state{ + transport = Transport, + socket = Socket, + channel = Channel + } +) -> case emqx_channel:info(conn_state, Channel) of - disconnected -> {ok, State}; + disconnected -> + {ok, State}; _ -> case Transport:getstat(Socket, [recv_oct]) of {ok, [{recv_oct, RecvOct}]} -> @@ -669,7 +730,6 @@ handle_timeout(TRef, keepalive, State = #state{transport = Transport, handle_info({sock_error, Reason}, State) end end; - handle_timeout(TRef, Msg, State) -> with_channel(handle_timeout, [TRef, Msg], State). @@ -679,11 +739,13 @@ handle_timeout(TRef, Msg, State) -> when_bytes_in(Oct, Data, State) -> {Packets, NState} = parse_incoming(Data, [], State), Len = erlang:length(Packets), - check_limiter([{Oct, ?LIMITER_BYTES_IN}, {Len, ?LIMITER_MESSAGE_IN}], - Packets, - fun next_incoming_msgs/3, - [], - NState). + check_limiter( + [{Oct, ?LIMITER_BYTES_IN}, {Len, ?LIMITER_MESSAGE_IN}], + Packets, + fun next_incoming_msgs/3, + [], + NState + ). -compile({inline, [next_incoming_msgs/3]}). next_incoming_msgs([Packet], Msgs, State) -> @@ -695,7 +757,6 @@ next_incoming_msgs(Packets, Msgs, State) -> parse_incoming(<<>>, Packets, State) -> {Packets, State}; - parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> try emqx_frame:parse(Data, ParseState) of {more, NParseState} -> @@ -704,20 +765,22 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> NState = State#state{parse_state = NParseState}, parse_incoming(Rest, [Packet | Packets], NState) catch - throw : ?FRAME_PARSE_ERROR(Reason) -> - ?SLOG(info, #{ reason => Reason - , at_state => emqx_frame:describe_state(ParseState) - , input_bytes => Data - , parsed_packets => Packets - }), + throw:?FRAME_PARSE_ERROR(Reason) -> + ?SLOG(info, #{ + reason => Reason, + at_state => emqx_frame:describe_state(ParseState), + input_bytes => Data, + parsed_packets => Packets + }), {[{frame_error, Reason} | Packets], State}; - error : Reason : Stacktrace -> - ?SLOG(error, #{ at_state => emqx_frame:describe_state(ParseState) - , input_bytes => Data - , parsed_packets => Packets - , reason => Reason - , stacktrace => Stacktrace - }), + error:Reason:Stacktrace -> + ?SLOG(error, #{ + at_state => emqx_frame:describe_state(ParseState), + input_bytes => Data, + parsed_packets => Packets, + reason => Reason, + stacktrace => Stacktrace + }), {[{frame_error, Reason} | Packets], State} end. @@ -728,7 +791,6 @@ handle_incoming(Packet, State) when is_record(Packet, mqtt_packet) -> ok = inc_incoming_stats(Packet), ?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}), with_channel(handle_in, [Packet], State); - handle_incoming(FrameError, State) -> with_channel(handle_in, [FrameError], State). @@ -737,7 +799,8 @@ handle_incoming(FrameError, State) -> with_channel(Fun, Args, State = #state{channel = Channel}) -> case erlang:apply(emqx_channel, Fun, Args ++ [Channel]) of - ok -> {ok, State}; + ok -> + {ok, State}; {ok, NChannel} -> {ok, State#state{channel = NChannel}}; {ok, Replies, NChannel} -> @@ -755,36 +818,40 @@ with_channel(Fun, Args, State = #state{channel = Channel}) -> handle_outgoing(Packets, State) when is_list(Packets) -> send(lists:map(serialize_and_inc_stats_fun(State), Packets), State); - handle_outgoing(Packet, State) -> send((serialize_and_inc_stats_fun(State))(Packet), State). serialize_and_inc_stats_fun(#state{serialize = Serialize}) -> fun(Packet) -> try emqx_frame:serialize_pkt(Packet, Serialize) of - <<>> -> ?SLOG(warning, #{ - msg => "packet_is_discarded", - reason => "frame_is_too_large", - packet => emqx_packet:format(Packet, hidden) - }), - ok = emqx_metrics:inc('delivery.dropped.too_large'), - ok = emqx_metrics:inc('delivery.dropped'), - ok = inc_outgoing_stats({error, message_too_large}), - <<>>; + <<>> -> + ?SLOG(warning, #{ + msg => "packet_is_discarded", + reason => "frame_is_too_large", + packet => emqx_packet:format(Packet, hidden) + }), + ok = emqx_metrics:inc('delivery.dropped.too_large'), + ok = emqx_metrics:inc('delivery.dropped'), + ok = inc_outgoing_stats({error, message_too_large}), + <<>>; Data -> - ?TRACE("MQTT", "mqtt_packet_sent", #{packet => Packet}), - ok = inc_outgoing_stats(Packet), - Data + ?TRACE("MQTT", "mqtt_packet_sent", #{packet => Packet}), + ok = inc_outgoing_stats(Packet), + Data catch %% Maybe Never happen. - throw : ?FRAME_SERIALIZE_ERROR(Reason) -> - ?SLOG(info, #{ reason => Reason - , input_packet => Packet}), + throw:?FRAME_SERIALIZE_ERROR(Reason) -> + ?SLOG(info, #{ + reason => Reason, + input_packet => Packet + }), erlang:error(?FRAME_SERIALIZE_ERROR(Reason)); - error : Reason : Stacktrace -> - ?SLOG(error, #{ input_packet => Packet - , exception => Reason - , stacktrace => Stacktrace}), + error:Reason:Stacktrace -> + ?SLOG(error, #{ + input_packet => Packet, + exception => Reason, + stacktrace => Stacktrace + }), erlang:error(frame_serialize_error) end end. @@ -792,14 +859,15 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) -> %%-------------------------------------------------------------------- %% Send data --spec(send(iodata(), state()) -> ok). +-spec send(iodata(), state()) -> ok. send(IoData, #state{transport = Transport, socket = Socket, channel = Channel}) -> Oct = iolist_size(IoData), ok = emqx_metrics:inc('bytes.sent', Oct), inc_counter(outgoing_bytes, Oct), emqx_congestion:maybe_alarm_conn_congestion(Socket, Transport, Channel), case Transport:async_send(Socket, IoData, []) of - ok -> ok; + ok -> + ok; Error = {error, _Reason} -> %% Send an inet_reply to postpone handling the error self() ! {inet_reply, Socket, Error}, @@ -819,33 +887,31 @@ handle_info(activate_socket, State = #state{sockstate = OldSst}) -> {error, Reason} -> handle_info({sock_error, Reason}, State) end; - handle_info({sock_error, Reason}, State) -> case Reason =/= closed andalso Reason =/= einval of true -> ?SLOG(warning, #{msg => "socket_error", reason => Reason}); false -> ok end, handle_info({sock_closed, Reason}, close_socket(State)); - handle_info({quic, peer_send_shutdown, _Stream}, State) -> handle_info({sock_closed, force}, close_socket(State)); - handle_info({quic, closed, _Channel, ReasonFlag}, State) -> handle_info({sock_closed, ReasonFlag}, State); - handle_info({quic, closed, _Stream}, State) -> handle_info({sock_closed, force}, State); - handle_info(Info, State) -> with_channel(handle_info, [Info], State). %%-------------------------------------------------------------------- %% Handle Info -handle_cast({async_set_socket_options, Opts}, - State = #state{transport = Transport, - socket = Socket - }) -> +handle_cast( + {async_set_socket_options, Opts}, + State = #state{ + transport = Transport, + socket = Socket + } +) -> case Transport:setopts(Socket, Opts) of ok -> ?tp(info, "custom_socket_options_successfully", #{opts => Opts}); Err -> ?tp(error, "failed_to_set_custom_socket_optionn", #{reason => Err}) @@ -861,43 +927,55 @@ handle_cast(Req, State) -> -type limiter_type() :: emqx_limiter_container:limiter_type(). -type limiter() :: emqx_limiter_container:limiter(). -type check_succ_handler() :: - fun((any(), list(any()), state()) -> _). + fun((any(), list(any()), state()) -> _). %% check limiters, if succeeded call WhenOk with Data and Msgs %% Data is the data to be processed %% Msgs include the next msg which after Data processed --spec check_limiter(list({pos_integer(), limiter_type()}), - any(), - check_succ_handler(), - list(any()), - state()) -> _. -check_limiter(Needs, - Data, - WhenOk, - Msgs, - #state{limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_cache = Cache} = State) when Limiter =/= undefined -> +-spec check_limiter( + list({pos_integer(), limiter_type()}), + any(), + check_succ_handler(), + list(any()), + state() +) -> _. +check_limiter( + Needs, + Data, + WhenOk, + Msgs, + #state{ + limiter = Limiter, + limiter_timer = LimiterTimer, + limiter_cache = Cache + } = State +) when Limiter =/= undefined -> case LimiterTimer of undefined -> case emqx_limiter_container:check_list(Needs, Limiter) of {ok, Limiter2} -> WhenOk(Data, Msgs, State#state{limiter = Limiter2}); {pause, Time, Limiter2} -> - ?SLOG(warning, #{msg => "pause_time_dueto_rate_limit", - needs => Needs, - time_in_ms => Time}), + ?SLOG(warning, #{ + msg => "pause_time_dueto_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk}, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), TRef = start_timer(Time, limit_timeout), - {ok, State#state{limiter = Limiter3, - limiter_timer = TRef}}; + {ok, State#state{ + limiter = Limiter3, + limiter_timer = TRef + }}; {drop, Limiter2} -> {ok, State#state{limiter = Limiter2}} end; @@ -909,54 +987,62 @@ check_limiter(Needs, New = #cache{need = Needs, data = Data, next = WhenOk}, {ok, State#state{limiter_cache = queue:in(New, Cache)}} end; - check_limiter(_, Data, WhenOk, Msgs, State) -> WhenOk(Data, Msgs, State). %% try to perform a retry -spec retry_limiter(state()) -> _. retry_limiter(#state{limiter = Limiter} = State) -> - #retry{types = Types, data = Data, next = Next} - = emqx_limiter_container:get_retry_context(Limiter), + #retry{types = Types, data = Data, next = Next} = + emqx_limiter_container:get_retry_context(Limiter), case emqx_limiter_container:retry_list(Types, Limiter) of - {ok, Limiter2} -> - Next(Data, - [check_cache], - State#state{ limiter = Limiter2 - , limiter_timer = undefined - }); - {pause, Time, Limiter2} -> - ?SLOG(warning, #{msg => "pause_time_dueto_rate_limit", - types => Types, - time_in_ms => Time}), + {ok, Limiter2} -> + Next( + Data, + [check_cache], + State#state{ + limiter = Limiter2, + limiter_timer = undefined + } + ); + {pause, Time, Limiter2} -> + ?SLOG(warning, #{ + msg => "pause_time_dueto_rate_limit", + types => Types, + time_in_ms => Time + }), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - {ok, State#state{limiter = Limiter2, - limiter_timer = TRef}} + {ok, State#state{ + limiter = Limiter2, + limiter_timer = TRef + }} end. %%-------------------------------------------------------------------- %% Run GC and Check OOM run_gc(Stats, State = #state{gc_state = GcSt, zone = Zone}) -> - case ?ENABLED(GcSt) andalso not emqx_olp:backoff_gc(Zone) - andalso emqx_gc:run(Stats, GcSt) + case + ?ENABLED(GcSt) andalso not emqx_olp:backoff_gc(Zone) andalso + emqx_gc:run(Stats, GcSt) of false -> State; - {_IsGC, GcSt1} -> - State#state{gc_state = GcSt1} + {_IsGC, GcSt1} -> State#state{gc_state = GcSt1} end. check_oom(State = #state{channel = Channel}) -> ShutdownPolicy = emqx_config:get_zone_conf( - emqx_channel:info(zone, Channel), [force_shutdown]), + emqx_channel:info(zone, Channel), [force_shutdown] + ), ?tp(debug, check_oom, #{policy => ShutdownPolicy}), case emqx_misc:check_oom(ShutdownPolicy) of {shutdown, Reason} -> %% triggers terminate/2 callback immediately erlang:exit({shutdown, Reason}); - _ -> ok + _ -> + ok end, State. @@ -964,28 +1050,33 @@ check_oom(State = #state{channel = Channel}) -> %% Activate Socket %% TODO: maybe we could keep socket passive for receiving socket closed event. -compile({inline, [activate_socket/1]}). -activate_socket(#state{limiter_timer = Timer} = State) - when Timer =/= undefined -> +activate_socket(#state{limiter_timer = Timer} = State) when + Timer =/= undefined +-> {ok, State#state{sockstate = blocked}}; - -activate_socket(#state{transport = Transport, - sockstate = SockState, - socket = Socket, - listener = {Type, Listener}} = State) - when SockState =/= closed -> +activate_socket( + #state{ + transport = Transport, + sockstate = SockState, + socket = Socket, + listener = {Type, Listener} + } = State +) when + SockState =/= closed +-> ActiveN = get_active_n(Type, Listener), case Transport:setopts(Socket, [{active, ActiveN}]) of ok -> {ok, State#state{sockstate = running}}; Error -> Error end; - activate_socket(State) -> {ok, State}. %%-------------------------------------------------------------------- %% Close Socket -close_socket(State = #state{sockstate = closed}) -> State; +close_socket(State = #state{sockstate = closed}) -> + State; close_socket(State = #state{transport = Transport, socket = Socket}) -> ok = Transport:fast_close(Socket), State#state{sockstate = closed}. @@ -1033,7 +1124,6 @@ inc_qos_stats(Type, Packet) -> inc_qos_stats_key(send_msg, ?QOS_0) -> 'send_msg.qos0'; inc_qos_stats_key(send_msg, ?QOS_1) -> 'send_msg.qos1'; inc_qos_stats_key(send_msg, ?QOS_2) -> 'send_msg.qos2'; - inc_qos_stats_key(recv_msg, ?QOS_0) -> 'recv_msg.qos0'; inc_qos_stats_key(recv_msg, ?QOS_1) -> 'recv_msg.qos1'; inc_qos_stats_key(recv_msg, ?QOS_2) -> 'recv_msg.qos2'; @@ -1075,13 +1165,16 @@ inc_counter(Key, Inc) -> set_field(Name, Value, State) -> Pos = emqx_misc:index_of(Name, record_info(fields, state)), - setelement(Pos+1, State, Value). + setelement(Pos + 1, State, Value). get_state(Pid) -> State = sys:get_state(Pid), - maps:from_list(lists:zip(record_info(fields, state), - tl(tuple_to_list(State)))). + maps:from_list( + lists:zip( + record_info(fields, state), + tl(tuple_to_list(State)) + ) + ). get_active_n(quic, _Listener) -> ?ACTIVE_N; -get_active_n(Type, Listener) -> - emqx_config:get_listener_conf(Type, Listener, [tcp, active_n]). +get_active_n(Type, Listener) -> emqx_config:get_listener_conf(Type, Listener, [tcp, active_n]). diff --git a/apps/emqx/src/emqx_ctl.erl b/apps/emqx/src/emqx_ctl.erl index 0bfa17350..448c47d74 100644 --- a/apps/emqx/src/emqx_ctl.erl +++ b/apps/emqx/src/emqx_ctl.erl @@ -21,67 +21,71 @@ -include("types.hrl"). -include("logger.hrl"). - -export([start_link/0, stop/0]). --export([ register_command/2 - , register_command/3 - , unregister_command/1 - ]). +-export([ + register_command/2, + register_command/3, + unregister_command/1 +]). --export([ run_command/1 - , run_command/2 - , lookup_command/1 - , get_commands/0 - ]). +-export([ + run_command/1, + run_command/2, + lookup_command/1, + get_commands/0 +]). --export([ print/1 - , print/2 - , usage/1 - , usage/2 - ]). +-export([ + print/1, + print/2, + usage/1, + usage/2 +]). %% Exports mainly for test cases --export([ format/2 - , format_usage/1 - , format_usage/2 - ]). +-export([ + format/2, + format_usage/1, + format_usage/2 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -record(state, {seq = 0}). --type(cmd() :: atom()). --type(cmd_params() :: string()). --type(cmd_descr() :: string()). --type(cmd_usage() :: {cmd_params(), cmd_descr()}). +-type cmd() :: atom(). +-type cmd_params() :: string(). +-type cmd_descr() :: string(). +-type cmd_usage() :: {cmd_params(), cmd_descr()}. -define(SERVER, ?MODULE). -define(CMD_TAB, emqx_command). --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). --spec(stop() -> ok). +-spec stop() -> ok. stop() -> gen_server:stop(?SERVER). --spec(register_command(cmd(), {module(), atom()}) -> ok). +-spec register_command(cmd(), {module(), atom()}) -> ok. register_command(Cmd, MF) when is_atom(Cmd) -> register_command(Cmd, MF, []). --spec(register_command(cmd(), {module(), atom()}, list()) -> ok). +-spec register_command(cmd(), {module(), atom()}, list()) -> ok. register_command(Cmd, MF, Opts) when is_atom(Cmd) -> call({register_command, Cmd, MF, Opts}). --spec(unregister_command(cmd()) -> ok). +-spec unregister_command(cmd()) -> ok. unregister_command(Cmd) when is_atom(Cmd) -> cast({unregister_command, Cmd}). @@ -89,14 +93,15 @@ call(Req) -> gen_server:call(?SERVER, Req). cast(Msg) -> gen_server:cast(?SERVER, Msg). --spec(run_command(list(string())) -> ok | {error, term()}). +-spec run_command(list(string())) -> ok | {error, term()}. run_command([]) -> run_command(help, []); run_command([Cmd | Args]) -> run_command(list_to_atom(Cmd), Args). --spec(run_command(cmd(), list(string())) -> ok | {error, term()}). -run_command(help, []) -> help(); +-spec run_command(cmd(), list(string())) -> ok | {error, term()}. +run_command(help, []) -> + help(); run_command(Cmd, Args) when is_atom(Cmd) -> case lookup_command(Cmd) of [{Mod, Fun}] -> @@ -104,24 +109,26 @@ run_command(Cmd, Args) when is_atom(Cmd) -> _ -> ok catch _:Reason:Stacktrace -> - ?SLOG(error, #{msg => "ctl_command_crashed", - stacktrace => Stacktrace, - reason => Reason - }), + ?SLOG(error, #{ + msg => "ctl_command_crashed", + stacktrace => Stacktrace, + reason => Reason + }), {error, Reason} end; [] -> - help(), {error, cmd_not_found} + help(), + {error, cmd_not_found} end. --spec(lookup_command(cmd()) -> [{module(), atom()}]). +-spec lookup_command(cmd()) -> [{module(), atom()}]. lookup_command(Cmd) when is_atom(Cmd) -> case ets:match(?CMD_TAB, {{'_', Cmd}, '$1', '_'}) of [El] -> El; - [] -> [] + [] -> [] end. --spec(get_commands() -> list({cmd(), module(), atom()})). +-spec get_commands() -> list({cmd(), module(), atom()}). get_commands() -> [{Cmd, M, F} || {{_Seq, Cmd}, {M, F}, _Opts} <- ets:tab2list(?CMD_TAB)]. @@ -131,42 +138,52 @@ help() -> print("No commands available.~n"); Cmds -> print("Usage: ~ts~n", [?MODULE]), - lists:foreach(fun({_, {Mod, Cmd}, _}) -> - print("~110..-s~n", [""]), Mod:Cmd(usage) - end, Cmds) + lists:foreach( + fun({_, {Mod, Cmd}, _}) -> + print("~110..-s~n", [""]), + Mod:Cmd(usage) + end, + Cmds + ) end. --spec(print(io:format()) -> ok). +-spec print(io:format()) -> ok. print(Msg) -> io:format("~ts", [format(Msg, [])]). --spec(print(io:format(), [term()]) -> ok). +-spec print(io:format(), [term()]) -> ok. print(Format, Args) -> io:format("~ts", [format(Format, Args)]). --spec(usage([cmd_usage()]) -> ok). +-spec usage([cmd_usage()]) -> ok. usage(UsageList) -> io:format(format_usage(UsageList)). --spec(usage(cmd_params(), cmd_descr()) -> ok). +-spec usage(cmd_params(), cmd_descr()) -> ok. usage(CmdParams, Desc) -> io:format(format_usage(CmdParams, Desc)). --spec(format(io:format(), [term()]) -> string()). +-spec format(io:format(), [term()]) -> string(). format(Format, Args) -> lists:flatten(io_lib:format(Format, Args)). --spec(format_usage([cmd_usage()]) -> [string()]). +-spec format_usage([cmd_usage()]) -> [string()]. format_usage(UsageList) -> - Width = lists:foldl(fun({CmdStr, _}, W) -> - max(iolist_size(CmdStr), W) - end, 0, UsageList), + Width = lists:foldl( + fun({CmdStr, _}, W) -> + max(iolist_size(CmdStr), W) + end, + 0, + UsageList + ), lists:map( fun({CmdParams, Desc}) -> format_usage(CmdParams, Desc, Width) - end, UsageList). + end, + UsageList + ). --spec(format_usage(cmd_params(), cmd_descr()) -> string()). +-spec format_usage(cmd_params(), cmd_descr()) -> string(). format_usage(CmdParams, Desc) -> format_usage(CmdParams, Desc, 0). @@ -177,9 +194,13 @@ format_usage(CmdParams, Desc, Width) -> DescLines = split_cmd(Desc), Zipped = zip_cmd(CmdLines, DescLines), Fmt = "~-" ++ integer_to_list(Width + 1) ++ "s# ~ts~n", - lists:foldl(fun({CmdStr, DescStr}, Usage) -> - Usage ++ format(Fmt, [CmdStr, DescStr]) - end, "", Zipped). + lists:foldl( + fun({CmdStr, DescStr}, Usage) -> + Usage ++ format(Fmt, [CmdStr, DescStr]) + end, + "", + Zipped + ). %%-------------------------------------------------------------------- %% gen_server callbacks @@ -191,13 +212,13 @@ init([]) -> handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq}) -> case ets:match(?CMD_TAB, {{'$1', Cmd}, '_', '_'}) of - [] -> ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts}); + [] -> + ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts}); [[OriginSeq] | _] -> ?SLOG(warning, #{msg => "CMD_overidden", cmd => Cmd, mf => MF}), true = ets:insert(?CMD_TAB, {{OriginSeq, Cmd}, MF, Opts}) end, {reply, ok, next_seq(State)}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. @@ -205,7 +226,6 @@ handle_call(Req, _From, State) -> handle_cast({unregister_command, Cmd}, State) -> ets:match_delete(?CMD_TAB, {{'_', Cmd}, '_', '_'}), noreply(State); - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), noreply(State). diff --git a/apps/emqx/src/emqx_datetime.erl b/apps/emqx/src/emqx_datetime.erl index 8754f5438..2ee403f14 100644 --- a/apps/emqx/src/emqx_datetime.erl +++ b/apps/emqx/src/emqx_datetime.erl @@ -18,16 +18,19 @@ -include_lib("typerefl/include/types.hrl"). %% API --export([ to_epoch_millisecond/1 - , to_epoch_second/1 - ]). --export([ epoch_to_rfc3339/1 - , epoch_to_rfc3339/2 - ]). +-export([ + to_epoch_millisecond/1, + to_epoch_second/1 +]). +-export([ + epoch_to_rfc3339/1, + epoch_to_rfc3339/2 +]). --reflect_type([ epoch_millisecond/0 - , epoch_second/0 - ]). +-reflect_type([ + epoch_millisecond/0, + epoch_second/0 +]). -type epoch_second() :: non_neg_integer(). -type epoch_millisecond() :: non_neg_integer(). @@ -47,8 +50,9 @@ to_epoch(DateTime, Unit) -> {_Epoch, []} -> {error, bad_epoch}; _ -> {ok, calendar:rfc3339_to_system_time(DateTime, [{unit, Unit}])} end - catch error: _ -> - {error, bad_rfc3339_timestamp} + catch + error:_ -> + {error, bad_rfc3339_timestamp} end. epoch_to_rfc3339(TimeStamp) -> @@ -69,8 +73,11 @@ fields(bar) -> {millisecond, ?MODULE:epoch_millisecond()} ]. --define(FORMAT(_Sec_, _Ms_), lists:flatten( - io_lib:format("bar={second=~w,millisecond=~w}", [_Sec_, _Ms_]))). +-define(FORMAT(_Sec_, _Ms_), + lists:flatten( + io_lib:format("bar={second=~w,millisecond=~w}", [_Sec_, _Ms_]) + ) +). epoch_ok_test() -> Args = [ @@ -78,31 +85,43 @@ epoch_ok_test() -> {1, 1, 1, 1}, {"2022-01-01T08:00:00+08:00", "2022-01-01T08:00:00+08:00", 1640995200, 1640995200000} ], - lists:foreach(fun({Sec, Ms, EpochSec, EpochMs}) -> - check_ok(?FORMAT(Sec, Ms), EpochSec, EpochMs) - end, Args), + lists:foreach( + fun({Sec, Ms, EpochSec, EpochMs}) -> + check_ok(?FORMAT(Sec, Ms), EpochSec, EpochMs) + end, + Args + ), ok. check_ok(Input, Sec, Ms) -> {ok, Data} = hocon:binary(Input, #{}), - ?assertMatch(#{bar := #{second := Sec, millisecond := Ms}}, - hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])), + ?assertMatch( + #{bar := #{second := Sec, millisecond := Ms}}, + hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar]) + ), ok. epoch_failed_test() -> Args = [ {-1, -1}, {"1s", "1s"}, - {"2022-13-13T08:00:00+08:00", "2022-13-13T08:00:00+08:00"}], - lists:foreach(fun({Sec, Ms}) -> - check_failed(?FORMAT(Sec, Ms)) - end, Args), + {"2022-13-13T08:00:00+08:00", "2022-13-13T08:00:00+08:00"} + ], + lists:foreach( + fun({Sec, Ms}) -> + check_failed(?FORMAT(Sec, Ms)) + end, + Args + ), ok. check_failed(Input) -> {ok, Data} = hocon:binary(Input, #{}), - ?assertException(throw, _, - hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])), + ?assertException( + throw, + _, + hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar]) + ), ok. -endif. diff --git a/apps/emqx/src/emqx_flapping.erl b/apps/emqx/src/emqx_flapping.erl index c69208c22..7e72c488f 100644 --- a/apps/emqx/src/emqx_flapping.erl +++ b/apps/emqx/src/emqx_flapping.erl @@ -28,13 +28,14 @@ -export([detect/1]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). %% Tab -define(FLAPPING_TAB, ?MODULE). @@ -42,31 +43,31 @@ -define(FLAPPING_THRESHOLD, 30). -define(FLAPPING_DURATION, 60000). -define(FLAPPING_BANNED_INTERVAL, 300000). --define(DEFAULT_DETECT_POLICY, - #{max_count => ?FLAPPING_THRESHOLD, - window_time => ?FLAPPING_DURATION, - ban_time => ?FLAPPING_BANNED_INTERVAL - }). +-define(DEFAULT_DETECT_POLICY, #{ + max_count => ?FLAPPING_THRESHOLD, + window_time => ?FLAPPING_DURATION, + ban_time => ?FLAPPING_BANNED_INTERVAL +}). -record(flapping, { - clientid :: emqx_types:clientid(), - peerhost :: emqx_types:peerhost(), - started_at :: pos_integer(), - detect_cnt :: integer() - }). + clientid :: emqx_types:clientid(), + peerhost :: emqx_types:peerhost(), + started_at :: pos_integer(), + detect_cnt :: integer() +}). --opaque(flapping() :: #flapping{}). +-opaque flapping() :: #flapping{}. -export_type([flapping/0]). --spec(start_link() -> emqx_types:startlink_ret()). +-spec start_link() -> emqx_types:startlink_ret(). start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). stop() -> gen_server:stop(?MODULE). %% @doc Detect flapping when a MQTT client disconnected. --spec(detect(emqx_types:clientinfo()) -> boolean()). +-spec detect(emqx_types:clientinfo()) -> boolean(). detect(#{clientid := ClientId, peerhost := PeerHost, zone := Zone}) -> Policy = #{max_count := Threshold} = get_policy(Zone), %% The initial flapping record sets the detect_cnt to 0. @@ -83,7 +84,8 @@ detect(#{clientid := ClientId, peerhost := PeerHost, zone := Zone}) -> [Flapping] -> ok = gen_server:cast(?MODULE, {detected, Flapping, Policy}), true; - [] -> false + [] -> + false end end. @@ -97,11 +99,13 @@ now_diff(TS) -> erlang:system_time(millisecond) - TS. %%-------------------------------------------------------------------- init([]) -> - ok = emqx_tables:new(?FLAPPING_TAB, [public, set, - {keypos, #flapping.clientid}, - {read_concurrency, true}, - {write_concurrency, true} - ]), + ok = emqx_tables:new(?FLAPPING_TAB, [ + public, + set, + {keypos, #flapping.clientid}, + {read_concurrency, true}, + {write_concurrency, true} + ]), start_timers(), {ok, #{}, hibernate}. @@ -109,49 +113,65 @@ handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. -handle_cast({detected, #flapping{clientid = ClientId, - peerhost = PeerHost, - started_at = StartedAt, - detect_cnt = DetectCnt}, - #{window_time := WindTime, ban_time := Interval}}, State) -> +handle_cast( + {detected, + #flapping{ + clientid = ClientId, + peerhost = PeerHost, + started_at = StartedAt, + detect_cnt = DetectCnt + }, + #{window_time := WindTime, ban_time := Interval}}, + State +) -> case now_diff(StartedAt) < WindTime of - true -> %% Flapping happened:( - ?SLOG(warning, #{ - msg => "flapping_detected", - peer_host => fmt_host(PeerHost), - detect_cnt => DetectCnt, - wind_time_in_ms => WindTime - }, #{clientid => ClientId}), + %% Flapping happened:( + true -> + ?SLOG( + warning, + #{ + msg => "flapping_detected", + peer_host => fmt_host(PeerHost), + detect_cnt => DetectCnt, + wind_time_in_ms => WindTime + }, + #{clientid => ClientId} + ), Now = erlang:system_time(second), - Banned = #banned{who = {clientid, ClientId}, - by = <<"flapping detector">>, - reason = <<"flapping is detected">>, - at = Now, - until = Now + (Interval div 1000)}, + Banned = #banned{ + who = {clientid, ClientId}, + by = <<"flapping detector">>, + reason = <<"flapping is detected">>, + at = Now, + until = Now + (Interval div 1000) + }, {ok, _} = emqx_banned:create(Banned), ok; false -> - ?SLOG(warning, #{ - msg => "client_disconnected", - peer_host => fmt_host(PeerHost), - detect_cnt => DetectCnt, - interval => Interval - }, #{clientid => ClientId}) + ?SLOG( + warning, + #{ + msg => "client_disconnected", + peer_host => fmt_host(PeerHost), + detect_cnt => DetectCnt, + interval => Interval + }, + #{clientid => ClientId} + ) end, {noreply, State}; - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. handle_info({timeout, _TRef, {garbage_collect, Zone}}, State) -> - Timestamp = erlang:system_time(millisecond) - - maps:get(window_time, get_policy(Zone)), - MatchSpec = [{{'_', '_', '_', '$1', '_'},[{'<', '$1', Timestamp}], [true]}], + Timestamp = + erlang:system_time(millisecond) - + maps:get(window_time, get_policy(Zone)), + MatchSpec = [{{'_', '_', '_', '$1', '_'}, [{'<', '$1', Timestamp}], [true]}], ets:select_delete(?FLAPPING_TAB, MatchSpec), _ = start_timer(Zone), {noreply, State, hibernate}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -167,11 +187,16 @@ start_timer(Zone) -> emqx_misc:start_timer(WindTime, {garbage_collect, Zone}). start_timers() -> - lists:foreach(fun({Zone, _ZoneConf}) -> + lists:foreach( + fun({Zone, _ZoneConf}) -> start_timer(Zone) - end, maps:to_list(emqx:get_config([zones], #{}))). + end, + maps:to_list(emqx:get_config([zones], #{})) + ). fmt_host(PeerHost) -> - try inet:ntoa(PeerHost) - catch _:_ -> PeerHost + try + inet:ntoa(PeerHost) + catch + _:_ -> PeerHost end. diff --git a/apps/emqx/src/emqx_frame.erl b/apps/emqx/src/emqx_frame.erl index 277787aaf..9b34152e7 100644 --- a/apps/emqx/src/emqx_frame.erl +++ b/apps/emqx/src/emqx_frame.erl @@ -19,59 +19,65 @@ -include("emqx.hrl"). -include("emqx_mqtt.hrl"). --export([ initial_parse_state/0 - , initial_parse_state/1 - ]). +-export([ + initial_parse_state/0, + initial_parse_state/1 +]). --export([ parse/1 - , parse/2 - , serialize_fun/0 - , serialize_fun/1 - , serialize_opts/0 - , serialize_opts/1 - , serialize_pkt/2 - , serialize/1 - , serialize/2 - ]). +-export([ + parse/1, + parse/2, + serialize_fun/0, + serialize_fun/1, + serialize_opts/0, + serialize_opts/1, + serialize_pkt/2, + serialize/1, + serialize/2 +]). +-export([describe_state/1]). --export([ describe_state/1 - ]). - --export_type([ options/0 - , parse_state/0 - , parse_result/0 - , serialize_opts/0 - ]). +-export_type([ + options/0, + parse_state/0, + parse_result/0, + serialize_opts/0 +]). -define(Q(BYTES, Q), {BYTES, Q}). --type(options() :: #{strict_mode => boolean(), - max_size => 1..?MAX_PACKET_SIZE, - version => emqx_types:proto_ver() - }). +-type options() :: #{ + strict_mode => boolean(), + max_size => 1..?MAX_PACKET_SIZE, + version => emqx_types:proto_ver() +}. -define(NONE(Options), {none, Options}). --type(parse_state() :: ?NONE(options()) | {cont_state(), options()}). +-type parse_state() :: ?NONE(options()) | {cont_state(), options()}. --type(parse_result() :: {more, parse_state()} - | {ok, emqx_types:packet(), binary(), parse_state()}). +-type parse_result() :: + {more, parse_state()} + | {ok, emqx_types:packet(), binary(), parse_state()}. --type(cont_state() :: - {Stage :: len | body, - State :: #{hdr := #mqtt_packet_header{}, - len := {pos_integer(), non_neg_integer()} | non_neg_integer(), - rest => binary() | ?Q(non_neg_integer(), queue:queue(binary())) - }}). +-type cont_state() :: + { + Stage :: len | body, + State :: #{ + hdr := #mqtt_packet_header{}, + len := {pos_integer(), non_neg_integer()} | non_neg_integer(), + rest => binary() | ?Q(non_neg_integer(), queue:queue(binary())) + } + }. --type(serialize_opts() :: options()). +-type serialize_opts() :: options(). --define(DEFAULT_OPTIONS, - #{strict_mode => false, - max_size => ?MAX_PACKET_SIZE, - version => ?MQTT_PROTO_V4 - }). +-define(DEFAULT_OPTIONS, #{ + strict_mode => false, + max_size => ?MAX_PACKET_SIZE, + version => ?MQTT_PROTO_V4 +}). -define(PARSE_ERR(Reason), ?THROW_FRAME_ERROR(Reason)). -define(SERIALIZE_ERR(Reason), ?THROW_SERIALIZE_ERROR(Reason)). @@ -81,15 +87,19 @@ -dialyzer({no_match, [serialize_utf8_string/2]}). %% @doc Describe state for logging. -describe_state(?NONE(_Opts)) -> <<"clean">>; -describe_state({{len, _}, _Opts}) -> <<"parsing_varint_length">>; +describe_state(?NONE(_Opts)) -> + <<"clean">>; +describe_state({{len, _}, _Opts}) -> + <<"parsing_varint_length">>; describe_state({{body, State}, _Opts}) -> - #{ hdr := Hdr - , len := Len - } = State, - Desc = #{ parsed_header => Hdr - , expected_bytes => Len - }, + #{ + hdr := Hdr, + len := Len + } = State, + Desc = #{ + parsed_header => Hdr, + expected_bytes => Len + }, case maps:get(rest, State, undefined) of undefined -> Desc; Body -> Desc#{received_bytes => body_bytes(Body)} @@ -99,11 +109,11 @@ describe_state({{body, State}, _Opts}) -> %% Init Parse State %%-------------------------------------------------------------------- --spec(initial_parse_state() -> ?NONE(options())). +-spec initial_parse_state() -> ?NONE(options()). initial_parse_state() -> initial_parse_state(#{}). --spec(initial_parse_state(options()) -> ?NONE(options())). +-spec initial_parse_state(options()) -> ?NONE(options()). initial_parse_state(Options) when is_map(Options) -> ?NONE(maps:merge(?DEFAULT_OPTIONS, Options)). @@ -111,32 +121,42 @@ initial_parse_state(Options) when is_map(Options) -> %% Parse MQTT Frame %%-------------------------------------------------------------------- --spec(parse(binary()) -> parse_result()). +-spec parse(binary()) -> parse_result(). parse(Bin) -> parse(Bin, initial_parse_state()). --spec(parse(binary(), parse_state()) -> parse_result()). +-spec parse(binary(), parse_state()) -> parse_result(). parse(<<>>, ?NONE(Options)) -> {more, ?NONE(Options)}; -parse(<>, - ?NONE(Options = #{strict_mode := StrictMode})) -> +parse( + <>, + ?NONE(Options = #{strict_mode := StrictMode}) +) -> %% Validate header if strict mode. StrictMode andalso validate_header(Type, Dup, QoS, Retain), - Header = #mqtt_packet_header{type = Type, - dup = bool(Dup), - qos = fixqos(Type, QoS), - retain = bool(Retain) - }, + Header = #mqtt_packet_header{ + type = Type, + dup = bool(Dup), + qos = fixqos(Type, QoS), + retain = bool(Retain) + }, parse_remaining_len(Rest, Header, Options); - -parse(Bin, {{len, #{hdr := Header, - len := {Multiplier, Length}} - }, Options}) when is_binary(Bin) -> +parse(Bin, { + {len, #{ + hdr := Header, + len := {Multiplier, Length} + }}, + Options +}) when is_binary(Bin) -> parse_remaining_len(Bin, Header, Multiplier, Length, Options); -parse(Bin, {{body, #{hdr := Header, - len := Length, - rest := Body} - }, Options}) when is_binary(Bin) -> +parse(Bin, { + {body, #{ + hdr := Header, + len := Length, + rest := Body + }}, + Options +}) when is_binary(Bin) -> NewBody = append_body(Body, Bin), parse_frame(NewBody, Header, Length, Options). @@ -145,14 +165,20 @@ parse_remaining_len(<<>>, Header, Options) -> parse_remaining_len(Rest, Header, Options) -> parse_remaining_len(Rest, Header, 1, 0, Options). -parse_remaining_len(_Bin, _Header, _Multiplier, Length, #{max_size := MaxSize}) - when Length > MaxSize -> +parse_remaining_len(_Bin, _Header, _Multiplier, Length, #{max_size := MaxSize}) when + Length > MaxSize +-> ?PARSE_ERR(frame_too_large); parse_remaining_len(<<>>, Header, Multiplier, Length, Options) -> {more, {{len, #{hdr => Header, len => {Multiplier, Length}}}, Options}}; %% Match DISCONNECT without payload -parse_remaining_len(<<0:8, Rest/binary>>, - Header = #mqtt_packet_header{type = ?DISCONNECT}, 1, 0, Options) -> +parse_remaining_len( + <<0:8, Rest/binary>>, + Header = #mqtt_packet_header{type = ?DISCONNECT}, + 1, + 0, + Options +) -> Packet = packet(Header, #mqtt_packet_disconnect{reason_code = ?RC_SUCCESS}), {ok, Packet, Rest, ?NONE(Options)}; %% Match PINGREQ. @@ -161,13 +187,19 @@ parse_remaining_len(<<0:8, Rest/binary>>, Header, 1, 0, Options) -> %% Match PUBACK, PUBREC, PUBREL, PUBCOMP, UNSUBACK... parse_remaining_len(<<0:1, 2:7, Rest/binary>>, Header, 1, 0, Options) -> parse_frame(Rest, Header, 2, Options); -parse_remaining_len(<<1:1, _Len:7, _Rest/binary>>, _Header, Multiplier, _Value, _Options) - when Multiplier > ?MULTIPLIER_MAX -> +parse_remaining_len(<<1:1, _Len:7, _Rest/binary>>, _Header, Multiplier, _Value, _Options) when + Multiplier > ?MULTIPLIER_MAX +-> ?PARSE_ERR(malformed_variable_byte_integer); parse_remaining_len(<<1:1, Len:7, Rest/binary>>, Header, Multiplier, Value, Options) -> parse_remaining_len(Rest, Header, Multiplier * ?HIGHBIT, Value + Len * Multiplier, Options); -parse_remaining_len(<<0:1, Len:7, Rest/binary>>, Header, Multiplier, Value, - Options = #{max_size := MaxSize}) -> +parse_remaining_len( + <<0:1, Len:7, Rest/binary>>, + Header, + Multiplier, + Value, + Options = #{max_size := MaxSize} +) -> FrameLen = Value + Len * Multiplier, case FrameLen > MaxSize of true -> ?PARSE_ERR(frame_too_large); @@ -177,7 +209,8 @@ parse_remaining_len(<<0:1, Len:7, Rest/binary>>, Header, Multiplier, Value, body_bytes(B) when is_binary(B) -> size(B); body_bytes(?Q(Bytes, _)) -> Bytes. -append_body(H, <<>>) -> H; +append_body(H, <<>>) -> + H; append_body(H, T) when is_binary(H) andalso size(H) < 1024 -> <>; append_body(H, T) when is_binary(H) -> @@ -204,10 +237,14 @@ parse_frame(Body, Header, Length, Options) -> {ok, packet(Header, Variable), Rest, ?NONE(Options)} end; false -> - {more, {{body, #{hdr => Header, - len => Length, - rest => Body - }}, Options}} + {more, { + {body, #{ + hdr => Header, + len => Length, + rest => Body + }}, + Options + }} end. -compile({inline, [packet/1, packet/2, packet/3]}). @@ -218,152 +255,185 @@ packet(Header, Variable) -> packet(Header, Variable, Payload) -> #mqtt_packet{header = Header, variable = Variable, payload = Payload}. -parse_packet(#mqtt_packet_header{type = ?CONNECT} - , FrameBin, - #{strict_mode := StrictMode}) -> +parse_packet( + #mqtt_packet_header{type = ?CONNECT}, + FrameBin, + #{strict_mode := StrictMode} +) -> {ProtoName, Rest} = parse_utf8_string(FrameBin, StrictMode), <> = Rest, % Note: Crash when reserved flag doesn't equal to 0, there is no strict % compliance with the MQTT5.0. - <> = Rest1, + <> = Rest1, {Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode), {ClientId, Rest4} = parse_utf8_string(Rest3, StrictMode), - ConnPacket = #mqtt_packet_connect{proto_name = ProtoName, - proto_ver = ProtoVer, - is_bridge = (BridgeTag =:= 8), - clean_start = bool(CleanStart), - will_flag = bool(WillFlag), - will_qos = WillQoS, - will_retain = bool(WillRetain), - keepalive = KeepAlive, - properties = Properties, - clientid = ClientId - }, + ConnPacket = #mqtt_packet_connect{ + proto_name = ProtoName, + proto_ver = ProtoVer, + is_bridge = (BridgeTag =:= 8), + clean_start = bool(CleanStart), + will_flag = bool(WillFlag), + will_qos = WillQoS, + will_retain = bool(WillRetain), + keepalive = KeepAlive, + properties = Properties, + clientid = ClientId + }, {ConnPacket1, Rest5} = parse_will_message(ConnPacket, Rest4, StrictMode), {Username, Rest6} = parse_utf8_string(Rest5, StrictMode, bool(UsernameFlag)), - {Password, <<>>} = parse_utf8_string(Rest6, StrictMode, bool(PasswordFlag)), + {Password, <<>>} = parse_utf8_string(Rest6, StrictMode, bool(PasswordFlag)), ConnPacket1#mqtt_packet_connect{username = Username, password = Password}; - -parse_packet(#mqtt_packet_header{type = ?CONNACK}, - <>, - #{version := Ver, strict_mode := StrictMode}) -> +parse_packet( + #mqtt_packet_header{type = ?CONNACK}, + <>, + #{version := Ver, strict_mode := StrictMode} +) -> {Properties, <<>>} = parse_properties(Rest, Ver, StrictMode), - #mqtt_packet_connack{ack_flags = AckFlags, - reason_code = ReasonCode, - properties = Properties - }; - -parse_packet(#mqtt_packet_header{type = ?PUBLISH, qos = QoS}, - Bin, - #{strict_mode := StrictMode, version := Ver}) -> + #mqtt_packet_connack{ + ack_flags = AckFlags, + reason_code = ReasonCode, + properties = Properties + }; +parse_packet( + #mqtt_packet_header{type = ?PUBLISH, qos = QoS}, + Bin, + #{strict_mode := StrictMode, version := Ver} +) -> {TopicName, Rest} = parse_utf8_string(Bin, StrictMode), - {PacketId, Rest1} = case QoS of - ?QOS_0 -> {undefined, Rest}; - _ -> parse_packet_id(Rest) - end, + {PacketId, Rest1} = + case QoS of + ?QOS_0 -> {undefined, Rest}; + _ -> parse_packet_id(Rest) + end, (PacketId =/= undefined) andalso - StrictMode andalso validate_packet_id(PacketId), + StrictMode andalso validate_packet_id(PacketId), {Properties, Payload} = parse_properties(Rest1, Ver, StrictMode), - Publish = #mqtt_packet_publish{topic_name = TopicName, - packet_id = PacketId, - properties = Properties - }, + Publish = #mqtt_packet_publish{ + topic_name = TopicName, + packet_id = PacketId, + properties = Properties + }, {Publish, Payload}; - -parse_packet(#mqtt_packet_header{type = PubAck}, <>, #{strict_mode := StrictMode}) - when ?PUBACK =< PubAck, PubAck =< ?PUBCOMP -> +parse_packet(#mqtt_packet_header{type = PubAck}, <>, #{strict_mode := StrictMode}) when + ?PUBACK =< PubAck, PubAck =< ?PUBCOMP +-> StrictMode andalso validate_packet_id(PacketId), #mqtt_packet_puback{packet_id = PacketId, reason_code = 0}; - -parse_packet(#mqtt_packet_header{type = PubAck}, <>, - #{strict_mode := StrictMode, version := Ver = ?MQTT_PROTO_V5}) - when ?PUBACK =< PubAck, PubAck =< ?PUBCOMP -> +parse_packet( + #mqtt_packet_header{type = PubAck}, + <>, + #{strict_mode := StrictMode, version := Ver = ?MQTT_PROTO_V5} +) when + ?PUBACK =< PubAck, PubAck =< ?PUBCOMP +-> StrictMode andalso validate_packet_id(PacketId), {Properties, <<>>} = parse_properties(Rest, Ver, StrictMode), - #mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode, - properties = Properties - }; - -parse_packet(#mqtt_packet_header{type = ?SUBSCRIBE}, <>, - #{strict_mode := StrictMode, version := Ver}) -> + #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode, + properties = Properties + }; +parse_packet( + #mqtt_packet_header{type = ?SUBSCRIBE}, + <>, + #{strict_mode := StrictMode, version := Ver} +) -> StrictMode andalso validate_packet_id(PacketId), {Properties, Rest1} = parse_properties(Rest, Ver, StrictMode), TopicFilters = parse_topic_filters(subscribe, Rest1), ok = validate_subqos([QoS || {_, #{qos := QoS}} <- TopicFilters]), - #mqtt_packet_subscribe{packet_id = PacketId, - properties = Properties, - topic_filters = TopicFilters - }; - -parse_packet(#mqtt_packet_header{type = ?SUBACK}, <>, - #{strict_mode := StrictMode, version := Ver}) -> + #mqtt_packet_subscribe{ + packet_id = PacketId, + properties = Properties, + topic_filters = TopicFilters + }; +parse_packet( + #mqtt_packet_header{type = ?SUBACK}, + <>, + #{strict_mode := StrictMode, version := Ver} +) -> StrictMode andalso validate_packet_id(PacketId), {Properties, Rest1} = parse_properties(Rest, Ver, StrictMode), ReasonCodes = parse_reason_codes(Rest1), - #mqtt_packet_suback{packet_id = PacketId, - properties = Properties, - reason_codes = ReasonCodes - }; - -parse_packet(#mqtt_packet_header{type = ?UNSUBSCRIBE}, <>, - #{strict_mode := StrictMode, version := Ver}) -> + #mqtt_packet_suback{ + packet_id = PacketId, + properties = Properties, + reason_codes = ReasonCodes + }; +parse_packet( + #mqtt_packet_header{type = ?UNSUBSCRIBE}, + <>, + #{strict_mode := StrictMode, version := Ver} +) -> StrictMode andalso validate_packet_id(PacketId), {Properties, Rest1} = parse_properties(Rest, Ver, StrictMode), TopicFilters = parse_topic_filters(unsubscribe, Rest1), - #mqtt_packet_unsubscribe{packet_id = PacketId, - properties = Properties, - topic_filters = TopicFilters - }; - -parse_packet(#mqtt_packet_header{type = ?UNSUBACK}, <>, - #{strict_mode := StrictMode}) -> + #mqtt_packet_unsubscribe{ + packet_id = PacketId, + properties = Properties, + topic_filters = TopicFilters + }; +parse_packet( + #mqtt_packet_header{type = ?UNSUBACK}, + <>, + #{strict_mode := StrictMode} +) -> StrictMode andalso validate_packet_id(PacketId), #mqtt_packet_unsuback{packet_id = PacketId}; - -parse_packet(#mqtt_packet_header{type = ?UNSUBACK}, <>, - #{strict_mode := StrictMode, version := Ver}) -> +parse_packet( + #mqtt_packet_header{type = ?UNSUBACK}, + <>, + #{strict_mode := StrictMode, version := Ver} +) -> StrictMode andalso validate_packet_id(PacketId), {Properties, Rest1} = parse_properties(Rest, Ver, StrictMode), ReasonCodes = parse_reason_codes(Rest1), - #mqtt_packet_unsuback{packet_id = PacketId, - properties = Properties, - reason_codes = ReasonCodes - }; - -parse_packet(#mqtt_packet_header{type = ?DISCONNECT}, <>, - #{strict_mode := StrictMode, version := ?MQTT_PROTO_V5}) -> + #mqtt_packet_unsuback{ + packet_id = PacketId, + properties = Properties, + reason_codes = ReasonCodes + }; +parse_packet( + #mqtt_packet_header{type = ?DISCONNECT}, + <>, + #{strict_mode := StrictMode, version := ?MQTT_PROTO_V5} +) -> {Properties, <<>>} = parse_properties(Rest, ?MQTT_PROTO_V5, StrictMode), - #mqtt_packet_disconnect{reason_code = ReasonCode, - properties = Properties - }; - -parse_packet(#mqtt_packet_header{type = ?AUTH}, <>, - #{strict_mode := StrictMode, version := ?MQTT_PROTO_V5}) -> + #mqtt_packet_disconnect{ + reason_code = ReasonCode, + properties = Properties + }; +parse_packet( + #mqtt_packet_header{type = ?AUTH}, + <>, + #{strict_mode := StrictMode, version := ?MQTT_PROTO_V5} +) -> {Properties, <<>>} = parse_properties(Rest, ?MQTT_PROTO_V5, StrictMode), #mqtt_packet_auth{reason_code = ReasonCode, properties = Properties}. -parse_will_message( Packet = #mqtt_packet_connect{will_flag = true, - proto_ver = Ver} - , Bin - , StrictMode) -> +parse_will_message( + Packet = #mqtt_packet_connect{ + will_flag = true, + proto_ver = Ver + }, + Bin, + StrictMode +) -> {Props, Rest} = parse_properties(Bin, Ver, StrictMode), {Topic, Rest1} = parse_utf8_string(Rest, StrictMode), {Payload, Rest2} = parse_binary_data(Rest1), - {Packet#mqtt_packet_connect{will_props = Props, - will_topic = Topic, - will_payload = Payload - }, Rest2}; -parse_will_message(Packet, Bin, _StrictMode) -> {Packet, Bin}. + { + Packet#mqtt_packet_connect{ + will_props = Props, + will_topic = Topic, + will_payload = Payload + }, + Rest2 + }; +parse_will_message(Packet, Bin, _StrictMode) -> + {Packet, Bin}. -compile({inline, [parse_packet_id/1]}). parse_packet_id(<>) -> @@ -458,8 +528,9 @@ parse_property(<>, _Props, _StrictMode) -> parse_variable_byte_integer(Bin) -> parse_variable_byte_integer(Bin, 1, 0). -parse_variable_byte_integer(<<1:1, _Len:7, _Rest/binary>>, Multiplier, _Value) - when Multiplier > ?MULTIPLIER_MAX -> +parse_variable_byte_integer(<<1:1, _Len:7, _Rest/binary>>, Multiplier, _Value) when + Multiplier > ?MULTIPLIER_MAX +-> ?PARSE_ERR(malformed_variable_byte_integer); parse_variable_byte_integer(<<1:1, Len:7, Rest/binary>>, Multiplier, Value) -> parse_variable_byte_integer(Rest, Multiplier * ?HIGHBIT, Value + Len * Multiplier); @@ -467,39 +538,51 @@ parse_variable_byte_integer(<<0:1, Len:7, Rest/binary>>, Multiplier, Value) -> {Value + Len * Multiplier, Rest}. parse_topic_filters(subscribe, Bin) -> - [{Topic, #{rh => Rh, rap => Rap, nl => Nl, qos => QoS}} - || <> <= Bin]; - + [ + {Topic, #{rh => Rh, rap => Rap, nl => Nl, qos => QoS}} + || <> <= Bin + ]; parse_topic_filters(unsubscribe, Bin) -> [Topic || <> <= Bin]. parse_reason_codes(Bin) -> [Code || <> <= Bin]. -parse_utf8_pair( <> - , true) -> +parse_utf8_pair( + <>, + true +) -> {{validate_utf8(Key), validate_utf8(Val)}, Rest}; -parse_utf8_pair( <> - , false) -> +parse_utf8_pair( + <>, + false +) -> {{Key, Val}, Rest}; -parse_utf8_pair(<>, _StrictMode) - when LenK > byte_size(Rest) -> - ?PARSE_ERR(#{ hint => user_property_not_enough_bytes - , parsed_key_length => LenK - , remaining_bytes_length => byte_size(Rest)}); -parse_utf8_pair(<>, _StrictMode) - when LenV > byte_size(Rest) -> - ?PARSE_ERR(#{ hint => malformed_user_property_value - , parsed_key_length => LenK - , parsed_value_length => LenV - , remaining_bytes_length => byte_size(Rest)}); -parse_utf8_pair(Bin, _StrictMode) - when 4 > byte_size(Bin) -> - ?PARSE_ERR(#{ hint => user_property_not_enough_bytes - , total_bytes => byte_size(Bin)}). +parse_utf8_pair(<>, _StrictMode) when + LenK > byte_size(Rest) +-> + ?PARSE_ERR(#{ + hint => user_property_not_enough_bytes, + parsed_key_length => LenK, + remaining_bytes_length => byte_size(Rest) + }); +%% key maybe malformed +parse_utf8_pair(<>, _StrictMode) when + LenV > byte_size(Rest) +-> + ?PARSE_ERR(#{ + hint => malformed_user_property_value, + parsed_key_length => LenK, + parsed_value_length => LenV, + remaining_bytes_length => byte_size(Rest) + }); +parse_utf8_pair(Bin, _StrictMode) when + 4 > byte_size(Bin) +-> + ?PARSE_ERR(#{ + hint => user_property_not_enough_bytes, + total_bytes => byte_size(Bin) + }). parse_utf8_string(Bin, _StrictMode, false) -> {undefined, Bin}; @@ -510,24 +593,32 @@ parse_utf8_string(<>, true) -> {validate_utf8(Str), Rest}; parse_utf8_string(<>, false) -> {Str, Rest}; -parse_utf8_string(<>, _) - when Len > byte_size(Rest) -> - ?PARSE_ERR(#{ hint => malformed_utf8_string - , parsed_length => Len - , remaining_bytes_length => byte_size(Rest)}); -parse_utf8_string(Bin, _) - when 2 > byte_size(Bin) -> +parse_utf8_string(<>, _) when + Len > byte_size(Rest) +-> + ?PARSE_ERR(#{ + hint => malformed_utf8_string, + parsed_length => Len, + remaining_bytes_length => byte_size(Rest) + }); +parse_utf8_string(Bin, _) when + 2 > byte_size(Bin) +-> ?PARSE_ERR(malformed_utf8_string_length). parse_binary_data(<>) -> {Data, Rest}; -parse_binary_data(<>) - when Len > byte_size(Rest) -> - ?PARSE_ERR(#{ hint => malformed_binary_data - , parsed_length => Len - , remaining_bytes_length => byte_size(Rest)}); -parse_binary_data(Bin) - when 2 > byte_size(Bin) -> +parse_binary_data(<>) when + Len > byte_size(Rest) +-> + ?PARSE_ERR(#{ + hint => malformed_binary_data, + parsed_length => Len, + remaining_bytes_length => byte_size(Rest) + }); +parse_binary_data(Bin) when + 2 > byte_size(Bin) +-> ?PARSE_ERR(malformed_binary_data_length). %%-------------------------------------------------------------------- @@ -539,12 +630,11 @@ serialize_fun() -> serialize_fun(?DEFAULT_OPTIONS). serialize_fun(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) -> MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE), serialize_fun(#{version => ProtoVer, max_size => MaxSize}); - serialize_fun(#{version := Ver, max_size := MaxSize}) -> fun(Packet) -> IoData = serialize(Packet, Ver), case is_too_large(IoData, MaxSize) of - true -> <<>>; + true -> <<>>; false -> IoData end end. @@ -563,131 +653,207 @@ serialize_pkt(Packet, #{version := Ver, max_size := MaxSize}) -> false -> IoData end. --spec(serialize(emqx_types:packet()) -> iodata()). +-spec serialize(emqx_types:packet()) -> iodata(). serialize(Packet) -> serialize(Packet, ?MQTT_PROTO_V4). --spec(serialize(emqx_types:packet(), emqx_types:proto_ver()) -> iodata()). -serialize(#mqtt_packet{header = Header, - variable = Variable, - payload = Payload}, Ver) -> +-spec serialize(emqx_types:packet(), emqx_types:proto_ver()) -> iodata(). +serialize( + #mqtt_packet{ + header = Header, + variable = Variable, + payload = Payload + }, + Ver +) -> serialize(Header, serialize_variable(Variable, Ver), serialize_payload(Payload)). -serialize(#mqtt_packet_header{type = Type, - dup = Dup, - qos = QoS, - retain = Retain - }, VariableBin, PayloadBin) - when ?CONNECT =< Type andalso Type =< ?AUTH -> +serialize( + #mqtt_packet_header{ + type = Type, + dup = Dup, + qos = QoS, + retain = Retain + }, + VariableBin, + PayloadBin +) when + ?CONNECT =< Type andalso Type =< ?AUTH +-> Len = iolist_size(VariableBin) + iolist_size(PayloadBin), - [<>, - serialize_remaining_len(Len), VariableBin, PayloadBin]. + [ + <>, + serialize_remaining_len(Len), + VariableBin, + PayloadBin + ]. -serialize_variable(#mqtt_packet_connect{ - proto_name = ProtoName, - proto_ver = ProtoVer, - is_bridge = IsBridge, - clean_start = CleanStart, - will_flag = WillFlag, - will_qos = WillQoS, - will_retain = WillRetain, - keepalive = KeepAlive, - properties = Properties, - clientid = ClientId, - will_props = WillProps, - will_topic = WillTopic, - will_payload = WillPayload, - username = Username, - password = Password}, _Ver) -> - [serialize_binary_data(ProtoName), - <<(case IsBridge of - true -> 16#80 + ProtoVer; - false -> ProtoVer - end):8, - (flag(Username)):1, - (flag(Password)):1, - (flag(WillRetain)):1, - WillQoS:2, - (flag(WillFlag)):1, - (flag(CleanStart)):1, - 0:1, - KeepAlive:16/big-unsigned-integer>>, - serialize_properties(Properties, ProtoVer), - serialize_utf8_string(ClientId), - case WillFlag of - true -> [serialize_properties(WillProps, ProtoVer), - serialize_utf8_string(WillTopic), - serialize_binary_data(WillPayload)]; - false -> <<>> - end, - serialize_utf8_string(Username, true), - serialize_utf8_string(Password, true)]; - -serialize_variable(#mqtt_packet_connack{ack_flags = AckFlags, - reason_code = ReasonCode, - properties = Properties}, Ver) -> +serialize_variable( + #mqtt_packet_connect{ + proto_name = ProtoName, + proto_ver = ProtoVer, + is_bridge = IsBridge, + clean_start = CleanStart, + will_flag = WillFlag, + will_qos = WillQoS, + will_retain = WillRetain, + keepalive = KeepAlive, + properties = Properties, + clientid = ClientId, + will_props = WillProps, + will_topic = WillTopic, + will_payload = WillPayload, + username = Username, + password = Password + }, + _Ver +) -> + [ + serialize_binary_data(ProtoName), + << + (case IsBridge of + true -> 16#80 + ProtoVer; + false -> ProtoVer + end):8, + (flag(Username)):1, + (flag(Password)):1, + (flag(WillRetain)):1, + WillQoS:2, + (flag(WillFlag)):1, + (flag(CleanStart)):1, + 0:1, + KeepAlive:16/big-unsigned-integer + >>, + serialize_properties(Properties, ProtoVer), + serialize_utf8_string(ClientId), + case WillFlag of + true -> + [ + serialize_properties(WillProps, ProtoVer), + serialize_utf8_string(WillTopic), + serialize_binary_data(WillPayload) + ]; + false -> + <<>> + end, + serialize_utf8_string(Username, true), + serialize_utf8_string(Password, true) + ]; +serialize_variable( + #mqtt_packet_connack{ + ack_flags = AckFlags, + reason_code = ReasonCode, + properties = Properties + }, + Ver +) -> [AckFlags, ReasonCode, serialize_properties(Properties, Ver)]; - -serialize_variable(#mqtt_packet_publish{topic_name = TopicName, - packet_id = PacketId, - properties = Properties}, Ver) -> - [serialize_utf8_string(TopicName), - if - PacketId =:= undefined -> <<>>; - true -> <> - end, - serialize_properties(Properties, Ver)]; - -serialize_variable(#mqtt_packet_puback{packet_id = PacketId}, Ver) - when Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4 -> +serialize_variable( + #mqtt_packet_publish{ + topic_name = TopicName, + packet_id = PacketId, + properties = Properties + }, + Ver +) -> + [ + serialize_utf8_string(TopicName), + if + PacketId =:= undefined -> <<>>; + true -> <> + end, + serialize_properties(Properties, Ver) + ]; +serialize_variable(#mqtt_packet_puback{packet_id = PacketId}, Ver) when + Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4 +-> <>; -serialize_variable(#mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode, - properties = Properties - }, - Ver = ?MQTT_PROTO_V5) -> - [<>, ReasonCode, - serialize_properties(Properties, Ver)]; - -serialize_variable(#mqtt_packet_subscribe{packet_id = PacketId, - properties = Properties, - topic_filters = TopicFilters}, Ver) -> - [<>, serialize_properties(Properties, Ver), - serialize_topic_filters(subscribe, TopicFilters, Ver)]; - -serialize_variable(#mqtt_packet_suback{packet_id = PacketId, - properties = Properties, - reason_codes = ReasonCodes}, Ver) -> - [<>, serialize_properties(Properties, Ver), - serialize_reason_codes(ReasonCodes)]; - -serialize_variable(#mqtt_packet_unsubscribe{packet_id = PacketId, - properties = Properties, - topic_filters = TopicFilters}, Ver) -> - [<>, serialize_properties(Properties, Ver), - serialize_topic_filters(unsubscribe, TopicFilters, Ver)]; - -serialize_variable(#mqtt_packet_unsuback{packet_id = PacketId, - properties = Properties, - reason_codes = ReasonCodes}, Ver) -> - [<>, serialize_properties(Properties, Ver), - serialize_reason_codes(ReasonCodes)]; - -serialize_variable(#mqtt_packet_disconnect{}, Ver) - when Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4 -> +serialize_variable( + #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode, + properties = Properties + }, + Ver = ?MQTT_PROTO_V5 +) -> + [ + <>, + ReasonCode, + serialize_properties(Properties, Ver) + ]; +serialize_variable( + #mqtt_packet_subscribe{ + packet_id = PacketId, + properties = Properties, + topic_filters = TopicFilters + }, + Ver +) -> + [ + <>, + serialize_properties(Properties, Ver), + serialize_topic_filters(subscribe, TopicFilters, Ver) + ]; +serialize_variable( + #mqtt_packet_suback{ + packet_id = PacketId, + properties = Properties, + reason_codes = ReasonCodes + }, + Ver +) -> + [ + <>, + serialize_properties(Properties, Ver), + serialize_reason_codes(ReasonCodes) + ]; +serialize_variable( + #mqtt_packet_unsubscribe{ + packet_id = PacketId, + properties = Properties, + topic_filters = TopicFilters + }, + Ver +) -> + [ + <>, + serialize_properties(Properties, Ver), + serialize_topic_filters(unsubscribe, TopicFilters, Ver) + ]; +serialize_variable( + #mqtt_packet_unsuback{ + packet_id = PacketId, + properties = Properties, + reason_codes = ReasonCodes + }, + Ver +) -> + [ + <>, + serialize_properties(Properties, Ver), + serialize_reason_codes(ReasonCodes) + ]; +serialize_variable(#mqtt_packet_disconnect{}, Ver) when + Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4 +-> <<>>; - -serialize_variable(#mqtt_packet_disconnect{reason_code = ReasonCode, - properties = Properties}, - Ver = ?MQTT_PROTO_V5) -> +serialize_variable( + #mqtt_packet_disconnect{ + reason_code = ReasonCode, + properties = Properties + }, + Ver = ?MQTT_PROTO_V5 +) -> [ReasonCode, serialize_properties(Properties, Ver)]; serialize_variable(#mqtt_packet_disconnect{}, _Ver) -> <<>>; - -serialize_variable(#mqtt_packet_auth{reason_code = ReasonCode, - properties = Properties}, - Ver = ?MQTT_PROTO_V5) -> +serialize_variable( + #mqtt_packet_auth{ + reason_code = ReasonCode, + properties = Properties + }, + Ver = ?MQTT_PROTO_V5 +) -> [ReasonCode, serialize_properties(Properties, Ver)]; - serialize_variable(PacketId, ?MQTT_PROTO_V3) when is_integer(PacketId) -> <>; serialize_variable(PacketId, ?MQTT_PROTO_V4) when is_integer(PacketId) -> @@ -696,7 +862,7 @@ serialize_variable(undefined, _Ver) -> <<>>. serialize_payload(undefined) -> <<>>; -serialize_payload(Bin) -> Bin. +serialize_payload(Bin) -> Bin. serialize_properties(_Props, Ver) when Ver =/= ?MQTT_PROTO_V5 -> <<>>; @@ -708,7 +874,7 @@ serialize_properties(undefined) -> serialize_properties(Props) when map_size(Props) == 0 -> <<0>>; serialize_properties(Props) when is_map(Props) -> - Bin = << <<(serialize_property(Prop, Val))/binary>> || {Prop, Val} <- maps:to_list(Props) >>, + Bin = <<<<(serialize_property(Prop, Val))/binary>> || {Prop, Val} <- maps:to_list(Props)>>, [serialize_variable_byte_integer(byte_size(Bin)), Bin]. serialize_property(_, Disabled) when Disabled =:= disabled; Disabled =:= undefined -> @@ -760,8 +926,10 @@ serialize_property('Retain-Available', Val) -> serialize_property('User-Property', {Key, Val}) -> <<16#26, (serialize_utf8_pair({Key, Val}))/binary>>; serialize_property('User-Property', Props) when is_list(Props) -> - << <<(serialize_property('User-Property', {Key, Val}))/binary>> - || {Key, Val} <- Props >>; + << + <<(serialize_property('User-Property', {Key, Val}))/binary>> + || {Key, Val} <- Props + >>; serialize_property('Maximum-Packet-Size', Val) -> <<16#27, Val:32/big>>; serialize_property('Wildcard-Subscription-Available', Val) -> @@ -772,24 +940,32 @@ serialize_property('Shared-Subscription-Available', Val) -> <<16#2A, Val>>. serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5) -> - << <<(serialize_utf8_string(Topic))/binary, - ?RESERVED:2, Rh:2, (flag(Rap)):1,(flag(Nl)):1, QoS:2 >> - || {Topic, #{rh := Rh, rap := Rap, nl := Nl, qos := QoS}} <- TopicFilters >>; - + << + << + (serialize_utf8_string(Topic))/binary, + ?RESERVED:2, + Rh:2, + (flag(Rap)):1, + (flag(Nl)):1, + QoS:2 + >> + || {Topic, #{rh := Rh, rap := Rap, nl := Nl, qos := QoS}} <- TopicFilters + >>; serialize_topic_filters(subscribe, TopicFilters, _Ver) -> - << <<(serialize_utf8_string(Topic))/binary, ?RESERVED:6, QoS:2>> - || {Topic, #{qos := QoS}} <- TopicFilters >>; - + << + <<(serialize_utf8_string(Topic))/binary, ?RESERVED:6, QoS:2>> + || {Topic, #{qos := QoS}} <- TopicFilters + >>; serialize_topic_filters(unsubscribe, TopicFilters, _Ver) -> - << <<(serialize_utf8_string(Topic))/binary>> || Topic <- TopicFilters >>. + <<<<(serialize_utf8_string(Topic))/binary>> || Topic <- TopicFilters>>. serialize_reason_codes(undefined) -> <<>>; serialize_reason_codes(ReasonCodes) when is_list(ReasonCodes) -> - << <> || Code <- ReasonCodes >>. + <<<> || Code <- ReasonCodes>>. serialize_utf8_pair({Name, Value}) -> - << (serialize_utf8_string(Name))/binary, (serialize_utf8_string(Value))/binary >>. + <<(serialize_utf8_string(Name))/binary, (serialize_utf8_string(Value))/binary>>. serialize_binary_data(Bin) -> [<<(byte_size(Bin)):16/big-unsigned-integer>>, Bin]. @@ -816,7 +992,7 @@ serialize_variable_byte_integer(N) -> <<1:1, (N rem ?HIGHBIT):7, (serialize_variable_byte_integer(N div ?HIGHBIT))/binary>>. %% Is the frame too large? --spec(is_too_large(iodata(), pos_integer()) -> boolean()). +-spec is_too_large(iodata(), pos_integer()) -> boolean(). is_too_large(IoData, MaxSize) -> iolist_size(IoData) >= MaxSize. @@ -826,46 +1002,46 @@ get_property(Key, Props, Default) -> maps:get(Key, Props, Default). %% Validate header if sctrict mode. See: mqtt-v5.0: 2.1.3 Flags -validate_header(?CONNECT, 0, 0, 0) -> ok; -validate_header(?CONNACK, 0, 0, 0) -> ok; +validate_header(?CONNECT, 0, 0, 0) -> ok; +validate_header(?CONNACK, 0, 0, 0) -> ok; validate_header(?PUBLISH, 0, ?QOS_0, _) -> ok; validate_header(?PUBLISH, _, ?QOS_1, _) -> ok; validate_header(?PUBLISH, 0, ?QOS_2, _) -> ok; -validate_header(?PUBACK, 0, 0, 0) -> ok; -validate_header(?PUBREC, 0, 0, 0) -> ok; -validate_header(?PUBREL, 0, 1, 0) -> ok; -validate_header(?PUBCOMP, 0, 0, 0) -> ok; -validate_header(?SUBSCRIBE, 0, 1, 0) -> ok; -validate_header(?SUBACK, 0, 0, 0) -> ok; -validate_header(?UNSUBSCRIBE, 0, 1, 0) -> ok; -validate_header(?UNSUBACK, 0, 0, 0) -> ok; -validate_header(?PINGREQ, 0, 0, 0) -> ok; -validate_header(?PINGRESP, 0, 0, 0) -> ok; -validate_header(?DISCONNECT, 0, 0, 0) -> ok; -validate_header(?AUTH, 0, 0, 0) -> ok; +validate_header(?PUBACK, 0, 0, 0) -> ok; +validate_header(?PUBREC, 0, 0, 0) -> ok; +validate_header(?PUBREL, 0, 1, 0) -> ok; +validate_header(?PUBCOMP, 0, 0, 0) -> ok; +validate_header(?SUBSCRIBE, 0, 1, 0) -> ok; +validate_header(?SUBACK, 0, 0, 0) -> ok; +validate_header(?UNSUBSCRIBE, 0, 1, 0) -> ok; +validate_header(?UNSUBACK, 0, 0, 0) -> ok; +validate_header(?PINGREQ, 0, 0, 0) -> ok; +validate_header(?PINGRESP, 0, 0, 0) -> ok; +validate_header(?DISCONNECT, 0, 0, 0) -> ok; +validate_header(?AUTH, 0, 0, 0) -> ok; validate_header(_Type, _Dup, _QoS, _Rt) -> ?PARSE_ERR(bad_frame_header). -compile({inline, [validate_packet_id/1]}). validate_packet_id(0) -> ?PARSE_ERR(bad_packet_id); validate_packet_id(_) -> ok. -validate_subqos([3|_]) -> ?PARSE_ERR(bad_subqos); -validate_subqos([_|T]) -> validate_subqos(T); -validate_subqos([]) -> ok. +validate_subqos([3 | _]) -> ?PARSE_ERR(bad_subqos); +validate_subqos([_ | T]) -> validate_subqos(T); +validate_subqos([]) -> ok. bool(0) -> false; bool(1) -> true. -flag(undefined) -> ?RESERVED; -flag(false) -> 0; -flag(true) -> 1; +flag(undefined) -> ?RESERVED; +flag(false) -> 0; +flag(true) -> 1; flag(X) when is_integer(X) -> X; -flag(B) when is_binary(B) -> 1. +flag(B) when is_binary(B) -> 1. -fixqos(?PUBREL, 0) -> 1; -fixqos(?SUBSCRIBE, 0) -> 1; +fixqos(?PUBREL, 0) -> 1; +fixqos(?SUBSCRIBE, 0) -> 1; fixqos(?UNSUBSCRIBE, 0) -> 1; -fixqos(_Type, QoS) -> QoS. +fixqos(_Type, QoS) -> QoS. validate_utf8(Bin) -> case unicode:characters_to_binary(Bin) of @@ -885,33 +1061,39 @@ validate_utf8(Bin) -> validate_mqtt_utf8_char(<<>>) -> true; %% ==== 1-Byte UTF-8 invalid: [[U+0000 .. U+001F] && [U+007F]] -validate_mqtt_utf8_char(<>) - when B1 >= 16#20, B1 =< 16#7E -> +validate_mqtt_utf8_char(<>) when + B1 >= 16#20, B1 =< 16#7E +-> validate_mqtt_utf8_char(Bs); -validate_mqtt_utf8_char(<>) - when B1 >= 16#00, B1 =< 16#1F; - B1 =:= 16#7F -> +validate_mqtt_utf8_char(<>) when + B1 >= 16#00, B1 =< 16#1F; + B1 =:= 16#7F +-> %% [U+0000 .. U+001F] && [U+007F] false; %% ==== 2-Bytes UTF-8 invalid: [U+0080 .. U+009F] -validate_mqtt_utf8_char(<>) - when B1 =:= 16#C2; - B2 >= 16#A0, B2 =< 16#BF; - B1 > 16#C3, B1 =< 16#DE; - B2 >= 16#80, B2 =< 16#BF -> +validate_mqtt_utf8_char(<>) when + B1 =:= 16#C2; + B2 >= 16#A0, B2 =< 16#BF; + B1 > 16#C3, B1 =< 16#DE; + B2 >= 16#80, B2 =< 16#BF +-> validate_mqtt_utf8_char(Bs); -validate_mqtt_utf8_char(<<16#C2, B2, _Bs/binary>>) - when B2 >= 16#80, B2 =< 16#9F -> +validate_mqtt_utf8_char(<<16#C2, B2, _Bs/binary>>) when + B2 >= 16#80, B2 =< 16#9F +-> %% [U+0080 .. U+009F] false; %% ==== 3-Bytes UTF-8 invalid: [U+D800 .. U+DFFF] -validate_mqtt_utf8_char(<>) - when B1 >= 16#E0, B1 =< 16#EE; - B1 =:= 16#EF -> +validate_mqtt_utf8_char(<>) when + B1 >= 16#E0, B1 =< 16#EE; + B1 =:= 16#EF +-> validate_mqtt_utf8_char(Bs); validate_mqtt_utf8_char(<<16#ED, _B2, _B3, _Bs/binary>>) -> false; %% ==== 4-Bytes UTF-8 -validate_mqtt_utf8_char(<>) - when B1 =:= 16#0F -> +validate_mqtt_utf8_char(<>) when + B1 =:= 16#0F +-> validate_mqtt_utf8_char(Bs). diff --git a/apps/emqx/src/emqx_gc.erl b/apps/emqx/src/emqx_gc.erl index a7d538621..53ba53d22 100644 --- a/apps/emqx/src/emqx_gc.erl +++ b/apps/emqx/src/emqx_gc.erl @@ -28,22 +28,27 @@ -include("types.hrl"). --export([ init/1 - , run/2 - , run/3 - , info/1 - , reset/1 - ]). +-export([ + init/1, + run/2, + run/3, + info/1, + reset/1 +]). -export_type([opts/0, gc_state/0]). --type(opts() :: #{count => integer(), - bytes => integer()}). +-type opts() :: #{ + count => integer(), + bytes => integer() +}. --type(st() :: #{cnt => {integer(), integer()}, - oct => {integer(), integer()}}). +-type st() :: #{ + cnt => {integer(), integer()}, + oct => {integer(), integer()} +}. --opaque(gc_state() :: {gc_state, st()}). +-opaque gc_state() :: {gc_state, st()}. -define(GCS(St), {gc_state, St}). @@ -51,27 +56,27 @@ -define(ENABLED(X), (is_integer(X) andalso X > 0)). %% @doc Initialize force GC state. --spec(init(opts()) -> gc_state()). +-spec init(opts()) -> gc_state(). init(#{count := Count, bytes := Bytes}) -> Cnt = [{cnt, {Count, Count}} || ?ENABLED(Count)], Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)], ?GCS(maps:from_list(Cnt ++ Oct)). %% @doc Try to run GC based on reduntions of count or bytes. --spec(run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state()) - -> {boolean(), gc_state()}). +-spec run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state()) -> + {boolean(), gc_state()}. run(#{cnt := Cnt, oct := Oct}, GcSt) -> run(Cnt, Oct, GcSt). --spec(run(pos_integer(), pos_integer(), gc_state()) - -> {boolean(), gc_state()}). +-spec run(pos_integer(), pos_integer(), gc_state()) -> + {boolean(), gc_state()}. run(Cnt, Oct, ?GCS(St)) -> {Res, St1} = do_run([{cnt, Cnt}, {oct, Oct}], St), {Res, ?GCS(St1)}. do_run([], St) -> {false, St}; -do_run([{K, N}|T], St) -> +do_run([{K, N} | T], St) -> case dec(K, N, St) of {true, St1} -> erlang:garbage_collect(), @@ -81,11 +86,11 @@ do_run([{K, N}|T], St) -> end. %% @doc Info of GC state. --spec(info(maybe(gc_state())) -> maybe(map())). +-spec info(maybe(gc_state())) -> maybe(map()). info(?GCS(St)) -> St. %% @doc Reset counters to zero. --spec(reset(maybe(gc_state())) -> gc_state()). +-spec reset(maybe(gc_state())) -> gc_state(). reset(?GCS(St)) -> ?GCS(do_reset(St)). @@ -93,7 +98,7 @@ reset(?GCS(St)) -> %% Internal functions %%-------------------------------------------------------------------- --spec(dec(cnt | oct, pos_integer(), st()) -> {boolean(), st()}). +-spec dec(cnt | oct, pos_integer(), st()) -> {boolean(), st()}. dec(Key, Num, St) -> case maps:get(Key, St, ?disabled) of ?disabled -> @@ -113,4 +118,3 @@ do_reset(Key, St) -> ?disabled -> St; {Init, _} -> maps:put(Key, {Init, Init}, St) end. - diff --git a/apps/emqx/src/emqx_guid.erl b/apps/emqx/src/emqx_guid.erl index 63f3604ef..a156abac6 100644 --- a/apps/emqx/src/emqx_guid.erl +++ b/apps/emqx/src/emqx_guid.erl @@ -30,17 +30,17 @@ -module(emqx_guid). --export([ gen/0 - , new/0 - , timestamp/1 - , to_hexstr/1 - , from_hexstr/1 - , to_base62/1 - , from_base62/1 - ]). +-export([ + gen/0, + new/0, + timestamp/1, + to_hexstr/1, + from_hexstr/1, + to_base62/1, + from_base62/1 +]). --export_type([ guid/0 - ]). +-export_type([guid/0]). -define(TAG_VERSION, 131). -define(PID_EXT, 103). @@ -48,21 +48,23 @@ -define(MAX_SEQ, 16#FFFF). --type(guid() :: <<_:128>>). +-type guid() :: <<_:128>>. %% @doc Generate a global unique id. --spec(gen() -> guid()). +-spec gen() -> guid(). gen() -> - Guid = case get(guid) of - undefined -> new(); - {_Ts, NPid, Seq} -> next(NPid, Seq) - end, - put(guid, Guid), bin(Guid). + Guid = + case get(guid) of + undefined -> new(); + {_Ts, NPid, Seq} -> next(NPid, Seq) + end, + put(guid, Guid), + bin(Guid). new() -> {ts(), npid(), 0}. --spec(timestamp(guid()) -> integer()). +-spec timestamp(guid()) -> integer(). timestamp(<>) -> Ts. @@ -78,11 +80,10 @@ ts() -> erlang:system_time(micro_seconds). %% Copied from https://github.com/okeuday/uuid.git. npid() -> - <> = - crypto:hash(sha, erlang:list_to_binary(erlang:atom_to_list(node()))), + <> = + crypto:hash(sha, erlang:list_to_binary(erlang:atom_to_list(node()))), PidBin = case erlang:term_to_binary(self()) of @@ -95,38 +96,44 @@ npid() -> end, % 72/86 bits for the Erlang pid - <> = PidBin, - PidCR1 = case PidCreation of - <> -> - D1; - <> -> - D1 bxor D2 bxor D3 bxor D4 - end, + % ID (Node specific, 15 bits) + <> = PidBin, + + PidCR1 = + case PidCreation of + <> -> + D1; + <> -> + D1 bxor D2 bxor D3 bxor D4 + end, % reduce the 160 bit NodeData checksum to 16 bits - NodeByte1 = ((((((((NodeD01 bxor NodeD02) - bxor NodeD03) - bxor NodeD04) - bxor NodeD05) - bxor NodeD06) - bxor NodeD07) - bxor NodeD08) - bxor NodeD09) - bxor NodeD10, - NodeByte2 = (((((((((NodeD11 bxor NodeD12) - bxor NodeD13) - bxor NodeD14) - bxor NodeD15) - bxor NodeD16) - bxor NodeD17) - bxor NodeD18) - bxor NodeD19) - bxor NodeD20) - bxor PidCR1, + NodeByte1 = + ((((((((NodeD01 bxor NodeD02) bxor + NodeD03) bxor + NodeD04) bxor + NodeD05) bxor + NodeD06) bxor + NodeD07) bxor + NodeD08) bxor + NodeD09) bxor + NodeD10, + NodeByte2 = + (((((((((NodeD11 bxor NodeD12) bxor + NodeD13) bxor + NodeD14) bxor + NodeD15) bxor + NodeD16) bxor + NodeD17) bxor + NodeD18) bxor + NodeD19) bxor + NodeD20) bxor + PidCR1, % reduce the Erlang pid to 32 bits PidByte1 = PidID1 bxor PidSR4, @@ -134,9 +141,7 @@ npid() -> PidByte3 = PidID3 bxor PidSR2, PidByte4 = PidID4 bxor PidSR1, - <> = <>, + <> = <>, NPid. to_hexstr(I) when byte_size(I) =:= 16 -> @@ -149,5 +154,5 @@ to_base62(<>) -> emqx_base62:encode(I). from_base62(S) -> - I = binary_to_integer( emqx_base62:decode(S)), + I = binary_to_integer(emqx_base62:decode(S)), <>. diff --git a/apps/emqx/src/emqx_hocon.erl b/apps/emqx/src/emqx_hocon.erl index 9d9da03de..77e040721 100644 --- a/apps/emqx/src/emqx_hocon.erl +++ b/apps/emqx/src/emqx_hocon.erl @@ -17,22 +17,22 @@ %% @doc HOCON schema help module -module(emqx_hocon). --export([ format_path/1 - , check/2 - ]). +-export([ + format_path/1, + check/2 +]). %% @doc Format hocon config field path to dot-separated string in iolist format. -spec format_path([atom() | string() | binary()]) -> iolist(). format_path([]) -> ""; format_path([Name]) -> iol(Name); -format_path([Name | Rest]) -> - [iol(Name) , "." | format_path(Rest)]. +format_path([Name | Rest]) -> [iol(Name), "." | format_path(Rest)]. %% @doc Plain check the input config. %% The input can either be `richmap' or plain `map'. %% Always return plain map with atom keys. -spec check(module(), hocon:config() | iodata()) -> - {ok, hocon:config()} | {error, any()}. + {ok, hocon:config()} | {error, any()}. check(SchemaModule, Conf) when is_map(Conf) -> %% TODO: remove required %% fields should state required or not in their schema @@ -40,7 +40,7 @@ check(SchemaModule, Conf) when is_map(Conf) -> try {ok, hocon_tconf:check_plain(SchemaModule, Conf, Opts)} catch - throw : Reason -> + throw:Reason -> {error, Reason} end; check(SchemaModule, HoconText) -> diff --git a/apps/emqx/src/emqx_hooks.erl b/apps/emqx/src/emqx_hooks.erl index 2cc5a1740..72319bae0 100644 --- a/apps/emqx/src/emqx_hooks.erl +++ b/apps/emqx/src/emqx_hooks.erl @@ -22,42 +22,46 @@ -include("types.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). - --export([ start_link/0 - , stop/0 - ]). +-export([ + start_link/0, + stop/0 +]). %% Hooks API --export([ add/2 - , add/3 - , add/4 - , put/2 - , put/3 - , put/4 - , del/2 - , run/2 - , run_fold/3 - , lookup/1 - ]). +-export([ + add/2, + add/3, + add/4, + put/2, + put/3, + put/4, + del/2, + run/2, + run_fold/3, + lookup/1 +]). --export([ callback_action/1 - , callback_filter/1 - , callback_priority/1 - ]). +-export([ + callback_action/1, + callback_filter/1, + callback_priority/1 +]). %% gen_server Function Exports --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). --export_type([ hookpoint/0 - , action/0 - , filter/0 - ]). +-export_type([ + hookpoint/0, + action/0, + filter/0 +]). %% Multiple callbacks can be registered on a hookpoint. %% The execution order depends on the priority value: @@ -67,32 +71,36 @@ %% - The execution order is the adding order of callbacks if they have %% equal priority values. --type(hookpoint() :: atom() | binary()). --type(action() :: {module(), atom(), [term()] | undefined}). --type(filter() :: {module(), atom(), [term()] | undefined}). +-type hookpoint() :: atom() | binary(). +-type action() :: {module(), atom(), [term()] | undefined}. +-type filter() :: {module(), atom(), [term()] | undefined}. -record(callback, { - action :: action(), - filter :: maybe(filter()), - priority :: integer() - }). + action :: action(), + filter :: maybe(filter()), + priority :: integer() +}). --type(callback() :: #callback{}). +-type callback() :: #callback{}. -record(hook, { - name :: hookpoint(), - callbacks :: list(#callback{}) - }). + name :: hookpoint(), + callbacks :: list(#callback{}) +}). -define(TAB, ?MODULE). -define(SERVER, ?MODULE). --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> - gen_server:start_link({local, ?SERVER}, - ?MODULE, [], [{hibernate_after, 1000}]). + gen_server:start_link( + {local, ?SERVER}, + ?MODULE, + [], + [{hibernate_after, 1000}] + ). --spec(stop() -> ok). +-spec stop() -> ok. stop() -> gen_server:stop(?SERVER, normal, infinity). @@ -107,65 +115,63 @@ callback_action(#callback{action = A}) -> A. callback_filter(#callback{filter = F}) -> F. %% @doc Get callback priority. -callback_priority(#callback{priority= P}) -> P. +callback_priority(#callback{priority = P}) -> P. %%-------------------------------------------------------------------- %% Hooks API %%-------------------------------------------------------------------- %% @doc Register a callback --spec(add(hookpoint(), action() | callback()) -> ok_or_error(already_exists)). +-spec add(hookpoint(), action() | callback()) -> ok_or_error(already_exists). add(HookPoint, Callback) when is_record(Callback, callback) -> gen_server:call(?SERVER, {add, HookPoint, Callback}, infinity); add(HookPoint, Action) when is_function(Action); is_tuple(Action) -> add(HookPoint, #callback{action = Action, priority = 0}). --spec(add(hookpoint(), action(), filter() | integer() | list()) - -> ok_or_error(already_exists)). +-spec add(hookpoint(), action(), filter() | integer() | list()) -> + ok_or_error(already_exists). add(HookPoint, Action, {_M, _F, _A} = Filter) -> add(HookPoint, #callback{action = Action, filter = Filter, priority = 0}); add(HookPoint, Action, Priority) when is_integer(Priority) -> add(HookPoint, #callback{action = Action, priority = Priority}). --spec(add(hookpoint(), action(), filter(), integer()) - -> ok_or_error(already_exists)). +-spec add(hookpoint(), action(), filter(), integer()) -> + ok_or_error(already_exists). add(HookPoint, Action, Filter, Priority) when is_integer(Priority) -> add(HookPoint, #callback{action = Action, filter = Filter, priority = Priority}). %% @doc Like add/2, it register a callback, discard 'already_exists' error. --spec(put(hookpoint(), action() | callback()) -> ok). +-spec put(hookpoint(), action() | callback()) -> ok. put(HookPoint, Callback) when is_record(Callback, callback) -> case add(HookPoint, Callback) of ok -> ok; - {error, already_exists} -> - gen_server:call(?SERVER, {put, HookPoint, Callback}, infinity) + {error, already_exists} -> gen_server:call(?SERVER, {put, HookPoint, Callback}, infinity) end; put(HookPoint, Action) when is_function(Action); is_tuple(Action) -> ?MODULE:put(HookPoint, #callback{action = Action, priority = 0}). --spec(put(hookpoint(), action(), filter() | integer() | list()) -> ok). +-spec put(hookpoint(), action(), filter() | integer() | list()) -> ok. put(HookPoint, Action, {_M, _F, _A} = Filter) -> ?MODULE:put(HookPoint, #callback{action = Action, filter = Filter, priority = 0}); put(HookPoint, Action, Priority) when is_integer(Priority) -> ?MODULE:put(HookPoint, #callback{action = Action, priority = Priority}). --spec(put(hookpoint(), action(), filter(), integer()) -> ok). +-spec put(hookpoint(), action(), filter(), integer()) -> ok. put(HookPoint, Action, Filter, Priority) when is_integer(Priority) -> ?MODULE:put(HookPoint, #callback{action = Action, filter = Filter, priority = Priority}). - %% @doc Unregister a callback. --spec(del(hookpoint(), action() | {module(), atom()}) -> ok). +-spec del(hookpoint(), action() | {module(), atom()}) -> ok. del(HookPoint, Action) -> gen_server:cast(?SERVER, {del, HookPoint, Action}). %% @doc Run hooks. --spec(run(hookpoint(), list(Arg::term())) -> ok). +-spec run(hookpoint(), list(Arg :: term())) -> ok. run(HookPoint, Args) -> do_run(lookup(HookPoint), Args). %% @doc Run hooks with Accumulator. --spec(run_fold(hookpoint(), list(Arg::term()), Acc::term()) -> Acc::term()). +-spec run_fold(hookpoint(), list(Arg :: term()), Acc :: term()) -> Acc :: term(). run_fold(HookPoint, Args, Acc) -> do_run_fold(lookup(HookPoint), Args, Acc). @@ -187,9 +193,9 @@ do_run_fold([#callback{action = Action, filter = Filter} | Callbacks], Args, Acc %% stop the hook chain stop -> Acc; %% stop the hook chain with NewAcc - {stop, NewAcc} -> NewAcc; + {stop, NewAcc} -> NewAcc; %% continue the hook chain with NewAcc - {ok, NewAcc} -> do_run_fold(Callbacks, Args, NewAcc); + {ok, NewAcc} -> do_run_fold(Callbacks, Args, NewAcc); %% continue the hook chain, in following cases: %% - the filter validation failed with 'false' %% - the callback returns any term other than 'stop' or {'stop', NewAcc} @@ -198,10 +204,9 @@ do_run_fold([#callback{action = Action, filter = Filter} | Callbacks], Args, Acc do_run_fold([], _Args, Acc) -> Acc. --spec(filter_passed(filter(), Args::term()) -> true | false). +-spec filter_passed(filter(), Args :: term()) -> true | false. filter_passed(undefined, _Args) -> true; -filter_passed(Filter, Args) -> - execute(Filter, Args). +filter_passed(Filter, Args) -> execute(Filter, Args). safe_execute({M, F, A}, Args) -> try execute({M, F, A}, Args) of @@ -222,12 +227,13 @@ execute({M, F, A}, Args) -> erlang:apply(M, F, Args ++ A). %% @doc Lookup callbacks. --spec(lookup(hookpoint()) -> [callback()]). +-spec lookup(hookpoint()) -> [callback()]. lookup(HookPoint) -> case ets:lookup(?TAB, HookPoint) of [#hook{callbacks = Callbacks}] -> Callbacks; - [] -> [] + [] -> + [] end. %%-------------------------------------------------------------------- @@ -239,19 +245,23 @@ init([]) -> {ok, #{}}. handle_call({add, HookPoint, Callback = #callback{action = {M, F, _}}}, _From, State) -> - Reply = case lists:any(fun (#callback{action = {M0, F0, _}}) -> - M0 =:= M andalso F0 =:= F - end, Callbacks = lookup(HookPoint)) of - true -> {error, already_exists}; - false -> insert_hook(HookPoint, add_callback(Callback, Callbacks)) - end, + Reply = + case + lists:any( + fun(#callback{action = {M0, F0, _}}) -> + M0 =:= M andalso F0 =:= F + end, + Callbacks = lookup(HookPoint) + ) + of + true -> {error, already_exists}; + false -> insert_hook(HookPoint, add_callback(Callback, Callbacks)) + end, {reply, Reply, State}; - handle_call({put, HookPoint, Callback = #callback{action = {M, F, _}}}, _From, State) -> Callbacks = del_callback({M, F}, lookup(HookPoint)), Reply = update_hook(HookPoint, add_callback(Callback, Callbacks)), {reply, Reply, State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", req => Req}), {reply, ignored, State}. @@ -264,7 +274,6 @@ handle_cast({del, HookPoint, Action}, State) -> insert_hook(HookPoint, Callbacks) end, {noreply, State}; - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", req => Msg}), {noreply, State}. @@ -284,9 +293,10 @@ code_change(_OldVsn, State, _Extra) -> %%------------------------------------------------------------------------------ insert_hook(HookPoint, Callbacks) -> - ets:insert(?TAB, #hook{name = HookPoint, callbacks = Callbacks}), ok. + ets:insert(?TAB, #hook{name = HookPoint, callbacks = Callbacks}), + ok. update_hook(HookPoint, Callbacks) -> - Ms = ets:fun2ms(fun ({hook, K, V}) when K =:= HookPoint -> {hook, K, Callbacks} end), + Ms = ets:fun2ms(fun({hook, K, V}) when K =:= HookPoint -> {hook, K, Callbacks} end), ets:select_replace(emqx_hooks, Ms), ok. @@ -295,11 +305,17 @@ add_callback(C, Callbacks) -> add_callback(C, [], Acc) -> lists:reverse([C | Acc]); -add_callback(C1 = #callback{priority = P1}, [C2 = #callback{priority = P2} | More], Acc) - when P1 < P2 -> +add_callback(C1 = #callback{priority = P1}, [C2 = #callback{priority = P2} | More], Acc) when + P1 < P2 +-> add_callback(C1, More, [C2 | Acc]); -add_callback(C1 = #callback{priority = P1, action = MFA1}, [C2 = #callback{priority = P2, action = MFA2} | More], Acc) - when P1 =:= P2 andalso MFA1 >= MFA2 -> +add_callback( + C1 = #callback{priority = P1, action = MFA1}, + [C2 = #callback{priority = P2, action = MFA2} | More], + Acc +) when + P1 =:= P2 andalso MFA1 >= MFA2 +-> add_callback(C1, More, [C2 | Acc]); add_callback(C1, More, Acc) -> lists:append(lists:reverse(Acc), [C1 | More]). diff --git a/apps/emqx/src/emqx_inflight.erl b/apps/emqx/src/emqx_inflight.erl index fc9853a8b..1aa0ed763 100644 --- a/apps/emqx/src/emqx_inflight.erl +++ b/apps/emqx/src/emqx_inflight.erl @@ -19,109 +19,109 @@ -compile(inline). %% APIs --export([ new/0 - , new/1 - , contain/2 - , lookup/2 - , insert/3 - , update/3 - , resize/2 - , delete/2 - , values/1 - , to_list/1 - , to_list/2 - , size/1 - , max_size/1 - , is_full/1 - , is_empty/1 - , window/1 - ]). +-export([ + new/0, + new/1, + contain/2, + lookup/2, + insert/3, + update/3, + resize/2, + delete/2, + values/1, + to_list/1, + to_list/2, + size/1, + max_size/1, + is_full/1, + is_empty/1, + window/1 +]). -export_type([inflight/0]). --type(key() :: term()). +-type key() :: term(). --type(max_size() :: pos_integer()). +-type max_size() :: pos_integer(). --opaque(inflight() :: {inflight, max_size(), gb_trees:tree()}). +-opaque inflight() :: {inflight, max_size(), gb_trees:tree()}. -define(INFLIGHT(Tree), {inflight, _MaxSize, Tree}). -define(INFLIGHT(MaxSize, Tree), {inflight, MaxSize, (Tree)}). --spec(new() -> inflight()). +-spec new() -> inflight(). new() -> new(0). --spec(new(non_neg_integer()) -> inflight()). +-spec new(non_neg_integer()) -> inflight(). new(MaxSize) when MaxSize >= 0 -> ?INFLIGHT(MaxSize, gb_trees:empty()). --spec(contain(key(), inflight()) -> boolean()). +-spec contain(key(), inflight()) -> boolean(). contain(Key, ?INFLIGHT(Tree)) -> gb_trees:is_defined(Key, Tree). --spec(lookup(key(), inflight()) -> {value, term()} | none). +-spec lookup(key(), inflight()) -> {value, term()} | none. lookup(Key, ?INFLIGHT(Tree)) -> gb_trees:lookup(Key, Tree). --spec(insert(key(), Val :: term(), inflight()) -> inflight()). +-spec insert(key(), Val :: term(), inflight()) -> inflight(). insert(Key, Val, ?INFLIGHT(MaxSize, Tree)) -> ?INFLIGHT(MaxSize, gb_trees:insert(Key, Val, Tree)). --spec(delete(key(), inflight()) -> inflight()). +-spec delete(key(), inflight()) -> inflight(). delete(Key, ?INFLIGHT(MaxSize, Tree)) -> ?INFLIGHT(MaxSize, gb_trees:delete(Key, Tree)). --spec(update(key(), Val :: term(), inflight()) -> inflight()). +-spec update(key(), Val :: term(), inflight()) -> inflight(). update(Key, Val, ?INFLIGHT(MaxSize, Tree)) -> ?INFLIGHT(MaxSize, gb_trees:update(Key, Val, Tree)). --spec(resize(integer(), inflight()) -> inflight()). +-spec resize(integer(), inflight()) -> inflight(). resize(MaxSize, ?INFLIGHT(Tree)) -> ?INFLIGHT(MaxSize, Tree). --spec(is_full(inflight()) -> boolean()). +-spec is_full(inflight()) -> boolean(). is_full(?INFLIGHT(0, _Tree)) -> false; is_full(?INFLIGHT(MaxSize, Tree)) -> MaxSize =< gb_trees:size(Tree). --spec(is_empty(inflight()) -> boolean()). +-spec is_empty(inflight()) -> boolean(). is_empty(?INFLIGHT(Tree)) -> gb_trees:is_empty(Tree). --spec(smallest(inflight()) -> {key(), term()}). +-spec smallest(inflight()) -> {key(), term()}. smallest(?INFLIGHT(Tree)) -> gb_trees:smallest(Tree). --spec(largest(inflight()) -> {key(), term()}). +-spec largest(inflight()) -> {key(), term()}. largest(?INFLIGHT(Tree)) -> gb_trees:largest(Tree). --spec(values(inflight()) -> list()). +-spec values(inflight()) -> list(). values(?INFLIGHT(Tree)) -> gb_trees:values(Tree). --spec(to_list(inflight()) -> list({key(), term()})). +-spec to_list(inflight()) -> list({key(), term()}). to_list(?INFLIGHT(Tree)) -> gb_trees:to_list(Tree). --spec(to_list(fun(), inflight()) -> list({key(), term()})). +-spec to_list(fun(), inflight()) -> list({key(), term()}). to_list(SortFun, ?INFLIGHT(Tree)) -> lists:sort(SortFun, gb_trees:to_list(Tree)). --spec(window(inflight()) -> list()). +-spec window(inflight()) -> list(). window(Inflight = ?INFLIGHT(Tree)) -> case gb_trees:is_empty(Tree) of true -> []; false -> [Key || {Key, _Val} <- [smallest(Inflight), largest(Inflight)]] end. --spec(size(inflight()) -> non_neg_integer()). +-spec size(inflight()) -> non_neg_integer(). size(?INFLIGHT(Tree)) -> gb_trees:size(Tree). --spec(max_size(inflight()) -> non_neg_integer()). +-spec max_size(inflight()) -> non_neg_integer(). max_size(?INFLIGHT(MaxSize, _Tree)) -> MaxSize. - diff --git a/apps/emqx/src/emqx_json.erl b/apps/emqx/src/emqx_json.erl index c8ee0a85a..4be39152c 100644 --- a/apps/emqx/src/emqx_json.erl +++ b/apps/emqx/src/emqx_json.erl @@ -18,52 +18,58 @@ -compile(inline). --export([ encode/1 - , encode/2 - , safe_encode/1 - , safe_encode/2 - ]). +-export([ + encode/1, + encode/2, + safe_encode/1, + safe_encode/2 +]). --compile({inline, - [ encode/1 - , encode/2 - ]}). +-compile( + {inline, [ + encode/1, + encode/2 + ]} +). --export([ decode/1 - , decode/2 - , safe_decode/1 - , safe_decode/2 - ]). +-export([ + decode/1, + decode/2, + safe_decode/1, + safe_decode/2 +]). --compile({inline, - [ decode/1 - , decode/2 - ]}). +-compile( + {inline, [ + decode/1, + decode/2 + ]} +). --type(encode_options() :: jiffy:encode_options()). --type(decode_options() :: jiffy:decode_options()). +-type encode_options() :: jiffy:encode_options(). +-type decode_options() :: jiffy:decode_options(). --type(json_text() :: iolist() | binary()). --type(json_term() :: jiffy:jiffy_decode_result()). +-type json_text() :: iolist() | binary(). +-type json_term() :: jiffy:jiffy_decode_result(). -export_type([json_text/0, json_term/0]). -export_type([decode_options/0, encode_options/0]). --spec(encode(json_term()) -> json_text()). +-spec encode(json_term()) -> json_text(). encode(Term) -> encode(Term, [force_utf8]). --spec(encode(json_term(), encode_options()) -> json_text()). +-spec encode(json_term(), encode_options()) -> json_text(). encode(Term, Opts) -> to_binary(jiffy:encode(to_ejson(Term), Opts)). --spec(safe_encode(json_term()) - -> {ok, json_text()} | {error, Reason :: term()}). +-spec safe_encode(json_term()) -> + {ok, json_text()} | {error, Reason :: term()}. safe_encode(Term) -> safe_encode(Term, [force_utf8]). --spec(safe_encode(json_term(), encode_options()) - -> {ok, json_text()} | {error, Reason :: term()}). +-spec safe_encode(json_term(), encode_options()) -> + {ok, json_text()} | {error, Reason :: term()}. safe_encode(Term, Opts) -> try encode(Term, Opts) of Json -> {ok, Json} @@ -72,20 +78,20 @@ safe_encode(Term, Opts) -> {error, Reason} end. --spec(decode(json_text()) -> json_term()). +-spec decode(json_text()) -> json_term(). decode(Json) -> decode(Json, []). --spec(decode(json_text(), decode_options()) -> json_term()). +-spec decode(json_text(), decode_options()) -> json_term(). decode(Json, Opts) -> from_ejson(jiffy:decode(Json, Opts)). --spec(safe_decode(json_text()) - -> {ok, json_term()} | {error, Reason :: term()}). +-spec safe_decode(json_text()) -> + {ok, json_term()} | {error, Reason :: term()}. safe_decode(Json) -> safe_decode(Json, []). --spec(safe_decode(json_text(), decode_options()) - -> {ok, json_term()} | {error, Reason :: term()}). +-spec safe_decode(json_text(), decode_options()) -> + {ok, json_term()} | {error, Reason :: term()}. safe_decode(Json, Opts) -> try decode(Json, Opts) of Term -> {ok, Term} @@ -98,18 +104,21 @@ safe_decode(Json, Opts) -> %% Helpers %%-------------------------------------------------------------------- --compile({inline, - [ to_ejson/1 - , from_ejson/1 - ]}). +-compile( + {inline, [ + to_ejson/1, + from_ejson/1 + ]} +). to_ejson([{}]) -> {[]}; -to_ejson([{_, _}|_] = L) -> - {[{K, to_ejson(V)} || {K, V} <- L ]}; +to_ejson([{_, _} | _] = L) -> + {[{K, to_ejson(V)} || {K, V} <- L]}; to_ejson(L) when is_list(L) -> [to_ejson(E) || E <- L]; -to_ejson(T) -> T. +to_ejson(T) -> + T. from_ejson(L) when is_list(L) -> [from_ejson(E) || E <- L]; @@ -117,7 +126,8 @@ from_ejson({[]}) -> [{}]; from_ejson({L}) -> [{Name, from_ejson(Value)} || {Name, Value} <- L]; -from_ejson(T) -> T. +from_ejson(T) -> + T. to_binary(B) when is_binary(B) -> B; to_binary(L) when is_list(L) -> diff --git a/apps/emqx/src/emqx_keepalive.erl b/apps/emqx/src/emqx_keepalive.erl index b19cd9277..3b3a3e46b 100644 --- a/apps/emqx/src/emqx_keepalive.erl +++ b/apps/emqx/src/emqx_keepalive.erl @@ -16,49 +16,55 @@ -module(emqx_keepalive). --export([ init/1 - , init/2 - , info/1 - , info/2 - , check/2 - , set/3 - ]). +-export([ + init/1, + init/2, + info/1, + info/2, + check/2, + set/3 +]). -elvis([{elvis_style, no_if_expression, disable}]). -export_type([keepalive/0]). -record(keepalive, { - interval :: pos_integer(), - statval :: non_neg_integer(), - repeat :: non_neg_integer() - }). + interval :: pos_integer(), + statval :: non_neg_integer(), + repeat :: non_neg_integer() +}). --opaque(keepalive() :: #keepalive{}). +-opaque keepalive() :: #keepalive{}. %% @doc Init keepalive. --spec(init(Interval :: non_neg_integer()) -> keepalive()). +-spec init(Interval :: non_neg_integer()) -> keepalive(). init(Interval) -> init(0, Interval). %% @doc Init keepalive. --spec(init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive()). +-spec init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive(). init(StatVal, Interval) when Interval > 0 -> - #keepalive{interval = Interval, - statval = StatVal, - repeat = 0}. + #keepalive{ + interval = Interval, + statval = StatVal, + repeat = 0 + }. %% @doc Get Info of the keepalive. --spec(info(keepalive()) -> emqx_types:infos()). -info(#keepalive{interval = Interval, - statval = StatVal, - repeat = Repeat}) -> - #{interval => Interval, - statval => StatVal, - repeat => Repeat - }. +-spec info(keepalive()) -> emqx_types:infos(). +info(#keepalive{ + interval = Interval, + statval = StatVal, + repeat = Repeat +}) -> + #{ + interval => Interval, + statval => StatVal, + repeat => Repeat + }. --spec(info(interval | statval | repeat, keepalive()) - -> non_neg_integer()). +-spec info(interval | statval | repeat, keepalive()) -> + non_neg_integer(). info(interval, #keepalive{interval = Interval}) -> Interval; info(statval, #keepalive{statval = StatVal}) -> @@ -67,16 +73,22 @@ info(repeat, #keepalive{repeat = Repeat}) -> Repeat. %% @doc Check keepalive. --spec(check(non_neg_integer(), keepalive()) - -> {ok, keepalive()} | {error, timeout}). -check(NewVal, KeepAlive = #keepalive{statval = OldVal, - repeat = Repeat}) -> +-spec check(non_neg_integer(), keepalive()) -> + {ok, keepalive()} | {error, timeout}. +check( + NewVal, + KeepAlive = #keepalive{ + statval = OldVal, + repeat = Repeat + } +) -> if NewVal =/= OldVal -> {ok, KeepAlive#keepalive{statval = NewVal, repeat = 0}}; Repeat < 1 -> {ok, KeepAlive#keepalive{repeat = Repeat + 1}}; - true -> {error, timeout} + true -> + {error, timeout} end. %% from mqtt-v3.1.1 specific @@ -91,6 +103,6 @@ check(NewVal, KeepAlive = #keepalive{statval = OldVal, %% typically this is a few minutes. %% The maximum value is (65535s) 18 hours 12 minutes and 15 seconds. %% @doc Update keepalive's interval --spec(set(interval, non_neg_integer(), keepalive()) -> keepalive()). +-spec set(interval, non_neg_integer(), keepalive()) -> keepalive(). set(interval, Interval, KeepAlive) when Interval >= 0 andalso Interval =< 65535000 -> KeepAlive#keepalive{interval = Interval}. diff --git a/apps/emqx/src/emqx_kernel_sup.erl b/apps/emqx/src/emqx_kernel_sup.erl index 1cf592dab..654c5e223 100644 --- a/apps/emqx/src/emqx_kernel_sup.erl +++ b/apps/emqx/src/emqx_kernel_sup.erl @@ -26,33 +26,37 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - {ok, {{one_for_one, 10, 100}, - %% always start emqx_config_handler first to load the emqx.conf to emqx_config - [ child_spec(emqx_config_handler, worker) - , child_spec(emqx_pool_sup, supervisor) - , child_spec(emqx_hooks, worker) - , child_spec(emqx_stats, worker) - , child_spec(emqx_metrics, worker) - , child_spec(emqx_ctl, worker) - ]}}. + {ok, { + {one_for_one, 10, 100}, + %% always start emqx_config_handler first to load the emqx.conf to emqx_config + [ + child_spec(emqx_config_handler, worker), + child_spec(emqx_pool_sup, supervisor), + child_spec(emqx_hooks, worker), + child_spec(emqx_stats, worker), + child_spec(emqx_metrics, worker), + child_spec(emqx_ctl, worker) + ] + }}. child_spec(M, Type) -> child_spec(M, Type, []). child_spec(M, worker, Args) -> - #{id => M, - start => {M, start_link, Args}, - restart => permanent, - shutdown => 5000, - type => worker, - modules => [M] - }; - + #{ + id => M, + start => {M, start_link, Args}, + restart => permanent, + shutdown => 5000, + type => worker, + modules => [M] + }; child_spec(M, supervisor, Args) -> - #{id => M, - start => {M, start_link, Args}, - restart => permanent, - shutdown => infinity, - type => supervisor, - modules => [M] - }. + #{ + id => M, + start => {M, start_link, Args}, + restart => permanent, + shutdown => infinity, + type => supervisor, + modules => [M] + }. diff --git a/apps/emqx/src/emqx_limiter.erl b/apps/emqx/src/emqx_limiter.erl index d20163c3f..f8874234a 100644 --- a/apps/emqx/src/emqx_limiter.erl +++ b/apps/emqx/src/emqx_limiter.erl @@ -19,39 +19,46 @@ -include("types.hrl"). --export([ init/2 - , init/4 %% XXX: Compatible with before 4.2 version - , info/1 - , check/2 - ]). +-export([ + init/2, + %% XXX: Compatible with before 4.2 version + init/4, + info/1, + check/2 +]). -record(limiter, { - %% Zone - zone :: atom(), - %% Checkers - checkers :: [checker()] - }). + %% Zone + zone :: atom(), + %% Checkers + checkers :: [checker()] +}). --type(checker() :: #{ name := name() - , capacity := non_neg_integer() - , interval := non_neg_integer() - , consumer := esockd_rate_limit:bucket() | atom() - }). +-type checker() :: #{ + name := name(), + capacity := non_neg_integer(), + interval := non_neg_integer(), + consumer := esockd_rate_limit:bucket() | atom() +}. --type(name() :: conn_bytes_in - | conn_messages_in - | conn_messages_routing - | overall_messages_routing - ). +-type name() :: + conn_bytes_in + | conn_messages_in + | conn_messages_routing + | overall_messages_routing. --type(policy() :: [{name(), esockd_rate_limit:config()}]). +-type policy() :: [{name(), esockd_rate_limit:config()}]. --type(info() :: #{name() := - #{tokens := non_neg_integer(), - capacity := non_neg_integer(), - interval := non_neg_integer()}}). +-type info() :: #{ + name() := + #{ + tokens := non_neg_integer(), + capacity := non_neg_integer(), + interval := non_neg_integer() + } +}. --type(limiter() :: #limiter{}). +-type limiter() :: #limiter{}. -dialyzer({nowarn_function, [consume/3]}). @@ -59,17 +66,25 @@ %% APIs %%-------------------------------------------------------------------- --spec(init(atom(), - maybe(esockd_rate_limit:config()), - maybe(esockd_rate_limit:config()), policy()) - -> maybe(limiter())). +-spec init( + atom(), + maybe(esockd_rate_limit:config()), + maybe(esockd_rate_limit:config()), + policy() +) -> + maybe(limiter()). init(Zone, PubLimit, BytesIn, Specs) -> - Merged = maps:merge(#{conn_messages_in => PubLimit, - conn_bytes_in => BytesIn}, maps:from_list(Specs)), + Merged = maps:merge( + #{ + conn_messages_in => PubLimit, + conn_bytes_in => BytesIn + }, + maps:from_list(Specs) + ), Filtered = maps:filter(fun(_, V) -> V /= undefined end, Merged), init(Zone, maps:to_list(Filtered)). --spec(init(atom(), policy()) -> maybe(limiter())). +-spec init(atom(), policy()) -> maybe(limiter()). init(_Zone, []) -> undefined; init(Zone, Specs) -> @@ -91,15 +106,19 @@ do_init_checker(Zone, {Name, {Capacity, Interval}}) -> Ck#{consumer => esockd_rate_limit:new(Capacity / Interval, Capacity)} end. --spec(info(limiter()) -> info()). +-spec info(limiter()) -> info(). info(#limiter{zone = Zone, checkers = Cks}) -> maps:from_list([get_info(Zone, Ck) || Ck <- Cks]). --spec(check(#{cnt := Cnt :: non_neg_integer(), - oct := Oct :: non_neg_integer()}, - Limiter :: limiter()) - -> {ok, NLimiter :: limiter()} - | {pause, MilliSecs :: non_neg_integer(), NLimiter :: limiter()}). +-spec check( + #{ + cnt := Cnt :: non_neg_integer(), + oct := Oct :: non_neg_integer() + }, + Limiter :: limiter() +) -> + {ok, NLimiter :: limiter()} + | {pause, MilliSecs :: non_neg_integer(), NLimiter :: limiter()}. check(#{cnt := Cnt, oct := Oct}, Limiter = #limiter{checkers = Cks}) -> {Pauses, NCks} = do_check(Cnt, Oct, Cks, [], []), case lists:max(Pauses) of @@ -112,16 +131,20 @@ check(#{cnt := Cnt, oct := Oct}, Limiter = #limiter{checkers = Cks}) -> %% @private do_check(_, _, [], Pauses, NCks) -> {Pauses, lists:reverse(NCks)}; -do_check(Pubs, Bytes, [Ck|More], Pauses, Acc) -> +do_check(Pubs, Bytes, [Ck | More], Pauses, Acc) -> {I, NConsumer} = consume(Pubs, Bytes, Ck), - do_check(Pubs, Bytes, More, [I|Pauses], [Ck#{consumer := NConsumer}|Acc]). + do_check(Pubs, Bytes, More, [I | Pauses], [Ck#{consumer := NConsumer} | Acc]). %%-------------------------------------------------------------------- %% Internal funcs %%-------------------------------------------------------------------- consume(Pubs, Bytes, #{name := Name, consumer := Cons}) -> - Tokens = case is_message_limiter(Name) of true -> Pubs; _ -> Bytes end, + Tokens = + case is_message_limiter(Name) of + true -> Pubs; + _ -> Bytes + end, case Tokens =:= 0 of true -> {0, Cons}; @@ -135,15 +158,22 @@ consume(Pubs, Bytes, #{name := Name, consumer := Cons}) -> end end. -get_info(Zone, #{name := Name, capacity := Cap, - interval := Intv, consumer := Cons}) -> - Info = case is_overall_limiter(Name) of - true -> esockd_limiter:lookup({Zone, Name}); - _ -> esockd_rate_limit:info(Cons) - end, - {Name, #{capacity => Cap, - interval => Intv, - tokens => maps:get(tokens, Info)}}. +get_info(Zone, #{ + name := Name, + capacity := Cap, + interval := Intv, + consumer := Cons +}) -> + Info = + case is_overall_limiter(Name) of + true -> esockd_limiter:lookup({Zone, Name}); + _ -> esockd_rate_limit:info(Cons) + end, + {Name, #{ + capacity => Cap, + interval => Intv, + tokens => maps:get(tokens, Info) + }}. is_overall_limiter(overall_messages_routing) -> true; is_overall_limiter(_) -> false. diff --git a/apps/emqx/src/emqx_listeners.erl b/apps/emqx/src/emqx_listeners.erl index 32fe1830d..6e1c1e151 100644 --- a/apps/emqx/src/emqx_listeners.erl +++ b/apps/emqx/src/emqx_listeners.erl @@ -23,37 +23,40 @@ -include("logger.hrl"). %% APIs --export([ list/0 - , start/0 - , restart/0 - , stop/0 - , is_running/1 - , current_conns/2 - , max_conns/2 - , id_example/0 - ]). +-export([ + list/0, + start/0, + restart/0, + stop/0, + is_running/1, + current_conns/2, + max_conns/2, + id_example/0 +]). --export([ start_listener/1 - , start_listener/3 - , stop_listener/1 - , stop_listener/3 - , restart_listener/1 - , restart_listener/3 - , has_enabled_listener_conf_by_type/1 - ]). +-export([ + start_listener/1, + start_listener/3, + stop_listener/1, + stop_listener/3, + restart_listener/1, + restart_listener/3, + has_enabled_listener_conf_by_type/1 +]). --export([ listener_id/2 - , parse_listener_id/1 - ]). +-export([ + listener_id/2, + parse_listener_id/1 +]). -export([post_config_update/5]). -export([format_addr/1]). -define(CONF_KEY_PATH, [listeners]). --define(TYPES_STRING, ["tcp","ssl","ws","wss","quic"]). +-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]). --spec(id_example() -> atom()). +-spec id_example() -> atom(). id_example() -> id_example(list()). @@ -66,7 +69,7 @@ id_example([_ | Listeners]) -> id_example(Listeners). %% @doc List configured listeners. --spec(list() -> [{ListenerId :: atom(), ListenerConf :: map()}]). +-spec list() -> [{ListenerId :: atom(), ListenerConf :: map()}]. list() -> [{listener_id(Type, LName), LConf} || {Type, LName, LConf} <- do_list()]. @@ -75,37 +78,45 @@ do_list() -> lists:append([list(Type, maps:to_list(Conf)) || {Type, Conf} <- Listeners]). list(Type, Conf) -> - [begin - Running = is_running(Type, listener_id(Type, LName), LConf), - {Type, LName, maps:put(running, Running, LConf)} - end || {LName, LConf} <- Conf, is_map(LConf)]. + [ + begin + Running = is_running(Type, listener_id(Type, LName), LConf), + {Type, LName, maps:put(running, Running, LConf)} + end + || {LName, LConf} <- Conf, is_map(LConf) + ]. -spec is_running(ListenerId :: atom()) -> boolean() | {error, no_found}. is_running(ListenerId) -> - case lists:filtermap(fun({_Type, Id, #{running := IsRunning}}) -> - Id =:= ListenerId andalso {true, IsRunning} - end, do_list()) of + case + lists:filtermap( + fun({_Type, Id, #{running := IsRunning}}) -> + Id =:= ListenerId andalso {true, IsRunning} + end, + do_list() + ) + of [IsRunning] -> IsRunning; [] -> {error, not_found} end. is_running(Type, ListenerId, #{bind := ListenOn}) when Type =:= tcp; Type =:= ssl -> try esockd:listener({ListenerId, ListenOn}) of - Pid when is_pid(Pid)-> + Pid when is_pid(Pid) -> true - catch _:_ -> - false + catch + _:_ -> + false end; - is_running(Type, ListenerId, _Conf) when Type =:= ws; Type =:= wss -> try Info = ranch:info(ListenerId), proplists:get_value(status, Info) =:= running - catch _:_ -> - false + catch + _:_ -> + false end; - -is_running(quic, _ListenerId, _Conf)-> +is_running(quic, _ListenerId, _Conf) -> %% TODO: quic support {error, no_found}. @@ -132,7 +143,7 @@ max_conns(_, _, _) -> {error, not_support}. %% @doc Start all listeners. --spec(start() -> ok). +-spec start() -> ok. start() -> %% The ?MODULE:start/0 will be called by emqx_app when emqx get started, %% so we install the config handler here. @@ -146,31 +157,39 @@ start_listener(ListenerId) -> -spec start_listener(atom(), atom(), map()) -> ok | {error, term()}. start_listener(Type, ListenerName, #{bind := Bind} = Conf) -> case do_start_listener(Type, ListenerName, Conf) of - {ok, {skipped, Reason}} when Reason =:= listener_disabled; - Reason =:= quic_app_missing -> - console_print("Listener ~ts is NOT started due to: ~p~n.", - [listener_id(Type, ListenerName), Reason]); + {ok, {skipped, Reason}} when + Reason =:= listener_disabled; + Reason =:= quic_app_missing + -> + console_print( + "Listener ~ts is NOT started due to: ~p~n.", + [listener_id(Type, ListenerName), Reason] + ); {ok, _} -> - console_print("Listener ~ts on ~ts started.~n", - [listener_id(Type, ListenerName), format_addr(Bind)]); + console_print( + "Listener ~ts on ~ts started.~n", + [listener_id(Type, ListenerName), format_addr(Bind)] + ); {error, {already_started, Pid}} -> {error, {already_started, Pid}}; {error, Reason} -> - ?ELOG("Failed to start listener ~ts on ~ts: ~0p~n", - [listener_id(Type, ListenerName), format_addr(Bind), Reason]), + ?ELOG( + "Failed to start listener ~ts on ~ts: ~0p~n", + [listener_id(Type, ListenerName), format_addr(Bind), Reason] + ), error(Reason) end. %% @doc Restart all listeners --spec(restart() -> ok). +-spec restart() -> ok. restart() -> foreach_listeners(fun restart_listener/3). --spec(restart_listener(atom()) -> ok | {error, term()}). +-spec restart_listener(atom()) -> ok | {error, term()}. restart_listener(ListenerId) -> apply_on_listener(ListenerId, fun restart_listener/3). --spec(restart_listener(atom(), atom(), map()) -> ok | {error, term()}). +-spec restart_listener(atom(), atom(), map()) -> ok | {error, term()}. restart_listener(Type, ListenerName, {OldConf, NewConf}) -> restart_listener(Type, ListenerName, OldConf, NewConf); restart_listener(Type, ListenerName, Conf) -> @@ -184,30 +203,34 @@ restart_listener(Type, ListenerName, OldConf, NewConf) -> end. %% @doc Stop all listeners. --spec(stop() -> ok). +-spec stop() -> ok. stop() -> %% The ?MODULE:stop/0 will be called by emqx_app when emqx is going to shutdown, %% so we uninstall the config handler here. _ = emqx_config_handler:remove_handler(?CONF_KEY_PATH), foreach_listeners(fun stop_listener/3). --spec(stop_listener(atom()) -> ok | {error, term()}). +-spec stop_listener(atom()) -> ok | {error, term()}. stop_listener(ListenerId) -> apply_on_listener(ListenerId, fun stop_listener/3). stop_listener(Type, ListenerName, #{bind := Bind} = Conf) -> case do_stop_listener(Type, ListenerName, Conf) of ok -> - console_print("Listener ~ts on ~ts stopped.~n", - [listener_id(Type, ListenerName), format_addr(Bind)]), + console_print( + "Listener ~ts on ~ts stopped.~n", + [listener_id(Type, ListenerName), format_addr(Bind)] + ), ok; {error, Reason} -> - ?ELOG("Failed to stop listener ~ts on ~ts: ~0p~n", - [listener_id(Type, ListenerName), format_addr(Bind), Reason]), + ?ELOG( + "Failed to stop listener ~ts on ~ts: ~0p~n", + [listener_id(Type, ListenerName), format_addr(Bind), Reason] + ), {error, Reason} end. --spec(do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}). +-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}. do_stop_listener(Type, ListenerName, #{bind := ListenOn}) when Type == tcp; Type == ssl -> esockd:close(listener_id(Type, ListenerName), ListenOn); do_stop_listener(Type, ListenerName, _Conf) when Type == ws; Type == wss -> @@ -222,21 +245,29 @@ console_print(_Fmt, _Args) -> ok. -endif. %% Start MQTT/TCP listener --spec(do_start_listener(atom(), atom(), map()) - -> {ok, pid() | {skipped, atom()}} | {error, term()}). +-spec do_start_listener(atom(), atom(), map()) -> + {ok, pid() | {skipped, atom()}} | {error, term()}. do_start_listener(_Type, _ListenerName, #{enabled := false}) -> {ok, {skipped, listener_disabled}}; -do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) - when Type == tcp; Type == ssl -> - esockd:open(listener_id(Type, ListenerName), ListenOn, merge_default(esockd_opts(Type, Opts)), - {emqx_connection, start_link, - [#{listener => {Type, ListenerName}, - zone => zone(Opts), - limiter => limiter(Opts)}]}); - +do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when + Type == tcp; Type == ssl +-> + esockd:open( + listener_id(Type, ListenerName), + ListenOn, + merge_default(esockd_opts(Type, Opts)), + {emqx_connection, start_link, [ + #{ + listener => {Type, ListenerName}, + zone => zone(Opts), + limiter => limiter(Opts) + } + ]} + ); %% Start MQTT/WS listener -do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) - when Type == ws; Type == wss -> +do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when + Type == ws; Type == wss +-> Id = listener_id(Type, ListenerName), RanchOpts = ranch_opts(Type, ListenOn, Opts), WsOpts = ws_opts(Type, ListenerName, Opts), @@ -244,31 +275,36 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) ws -> cowboy:start_clear(Id, RanchOpts, WsOpts); wss -> cowboy:start_tls(Id, RanchOpts, WsOpts) end; - %% Start MQTT/QUIC listener do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) -> - case [ A || {quicer, _, _} = A<-application:which_applications() ] of + case [A || {quicer, _, _} = A <- application:which_applications()] of [_] -> DefAcceptors = erlang:system_info(schedulers_online) * 8, - ListenOpts = [ {cert, maps:get(certfile, Opts)} - , {key, maps:get(keyfile, Opts)} - , {alpn, ["mqtt"]} - , {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])} - , {idle_timeout_ms, - lists:max([ - emqx_config:get_zone_conf(zone(Opts), [mqtt, idle_timeout]) * 3, - timer:seconds(maps:get(idle_timeout, Opts))])} - ], - ConnectionOpts = #{ conn_callback => emqx_quic_connection - , peer_unidi_stream_count => 1 - , peer_bidi_stream_count => 10 - , zone => zone(Opts) - , listener => {quic, ListenerName} - , limiter => limiter(Opts) - }, + ListenOpts = [ + {cert, maps:get(certfile, Opts)}, + {key, maps:get(keyfile, Opts)}, + {alpn, ["mqtt"]}, + {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])}, + {idle_timeout_ms, + lists:max([ + emqx_config:get_zone_conf(zone(Opts), [mqtt, idle_timeout]) * 3, + timer:seconds(maps:get(idle_timeout, Opts)) + ])} + ], + ConnectionOpts = #{ + conn_callback => emqx_quic_connection, + peer_unidi_stream_count => 1, + peer_bidi_stream_count => 10, + zone => zone(Opts), + listener => {quic, ListenerName}, + limiter => limiter(Opts) + }, StreamOpts = [{stream_callback, emqx_quic_stream}], - quicer:start_listener(listener_id(quic, ListenerName), - port(ListenOn), {ListenOpts, ConnectionOpts, StreamOpts}); + quicer:start_listener( + listener_id(quic, ListenerName), + port(ListenOn), + {ListenOpts, ConnectionOpts, StreamOpts} + ); [] -> {ok, {skipped, quic_app_missing}} end. @@ -278,54 +314,70 @@ delete_authentication(Type, ListenerName, _Conf) -> %% Update the listeners at runtime post_config_update(_, _Req, NewListeners, OldListeners, _AppEnvs) -> - #{added := Added, removed := Removed, changed := Updated} - = diff_listeners(NewListeners, OldListeners), + #{added := Added, removed := Removed, changed := Updated} = + diff_listeners(NewListeners, OldListeners), perform_listener_changes(fun stop_listener/3, Removed), perform_listener_changes(fun delete_authentication/3, Removed), perform_listener_changes(fun start_listener/3, Added), perform_listener_changes(fun restart_listener/3, Updated). perform_listener_changes(Action, MapConfs) -> - lists:foreach(fun - ({Id, Conf}) -> + lists:foreach( + fun({Id, Conf}) -> {Type, Name} = parse_listener_id(Id), Action(Type, Name, Conf) - end, maps:to_list(MapConfs)). + end, + maps:to_list(MapConfs) + ). diff_listeners(NewListeners, OldListeners) -> emqx_map_lib:diff_maps(flatten_listeners(NewListeners), flatten_listeners(OldListeners)). flatten_listeners(Conf0) -> maps:from_list( - lists:append([do_flatten_listeners(Type, Conf) - || {Type, Conf} <- maps:to_list(Conf0)])). + lists:append([ + do_flatten_listeners(Type, Conf) + || {Type, Conf} <- maps:to_list(Conf0) + ]) + ). do_flatten_listeners(Type, Conf0) -> - [{listener_id(Type, Name), maps:remove(authentication, Conf)} || - {Name, Conf} <- maps:to_list(Conf0)]. + [ + {listener_id(Type, Name), maps:remove(authentication, Conf)} + || {Name, Conf} <- maps:to_list(Conf0) + ]. esockd_opts(Type, Opts0) -> Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Limiter = limiter(Opts0), - Opts2 = case maps:get(connection, Limiter, undefined) of - undefined -> - Opts1; - BucketName -> - Opts1#{limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName)} - end, - Opts3 = Opts2#{ access_rules => esockd_access_rules(maps:get(access_rules, Opts0, [])) - , tune_fun => {emqx_olp, backoff_new_conn, [zone(Opts0)]} - }, - maps:to_list(case Type of - tcp -> Opts3#{tcp_options => tcp_opts(Opts0)}; - ssl -> Opts3#{ssl_options => ssl_opts(Opts0), tcp_options => tcp_opts(Opts0)} - end). + Opts2 = + case maps:get(connection, Limiter, undefined) of + undefined -> + Opts1; + BucketName -> + Opts1#{ + limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName) + } + end, + Opts3 = Opts2#{ + access_rules => esockd_access_rules(maps:get(access_rules, Opts0, [])), + tune_fun => {emqx_olp, backoff_new_conn, [zone(Opts0)]} + }, + maps:to_list( + case Type of + tcp -> Opts3#{tcp_options => tcp_opts(Opts0)}; + ssl -> Opts3#{ssl_options => ssl_opts(Opts0), tcp_options => tcp_opts(Opts0)} + end + ). ws_opts(Type, ListenerName, Opts) -> - WsPaths = [{maps:get(mqtt_path, Opts, "/mqtt"), emqx_ws_connection, - #{zone => zone(Opts), - listener => {Type, ListenerName}, - limiter => limiter(Opts)}}], + WsPaths = [ + {maps:get(mqtt_path, Opts, "/mqtt"), emqx_ws_connection, #{ + zone => zone(Opts), + listener => {Type, ListenerName}, + limiter => limiter(Opts) + }} + ], Dispatch = cowboy_router:compile([{'_', WsPaths}]), ProxyProto = maps:get(proxy_protocol, Opts, false), #{env => #{dispatch => Dispatch}, proxy_header => ProxyProto}. @@ -333,16 +385,19 @@ ws_opts(Type, ListenerName, Opts) -> ranch_opts(Type, ListenOn, Opts) -> NumAcceptors = maps:get(acceptors, Opts, 4), MaxConnections = maps:get(max_connections, Opts, 1024), - SocketOpts = case Type of - wss -> tcp_opts(Opts) ++ proplists:delete(handshake_timeout, ssl_opts(Opts)); - ws -> tcp_opts(Opts) - end, - #{num_acceptors => NumAcceptors, - max_connections => MaxConnections, - handshake_timeout => maps:get(handshake_timeout, Opts, 15000), - socket_opts => ip_port(ListenOn) ++ + SocketOpts = + case Type of + wss -> tcp_opts(Opts) ++ proplists:delete(handshake_timeout, ssl_opts(Opts)); + ws -> tcp_opts(Opts) + end, + #{ + num_acceptors => NumAcceptors, + max_connections => MaxConnections, + handshake_timeout => maps:get(handshake_timeout, Opts, 15000), + socket_opts => ip_port(ListenOn) ++ %% cowboy don't allow us to set 'reuseaddr' - proplists:delete(reuseaddr, SocketOpts)}. + proplists:delete(reuseaddr, SocketOpts) + }. ip_port(Port) when is_integer(Port) -> [{port, Port}]; @@ -355,7 +410,13 @@ port({_Addr, Port}) when is_integer(Port) -> Port. esockd_access_rules(StrRules) -> Access = fun(S) -> [A, CIDR] = string:tokens(S, " "), - {list_to_atom(A), case CIDR of "all" -> all; _ -> CIDR end} + { + list_to_atom(A), + case CIDR of + "all" -> all; + _ -> CIDR + end + } end, [Access(R) || R <- StrRules]. @@ -389,7 +450,8 @@ parse_listener_id(Id) -> true -> {list_to_existing_atom(Type), list_to_atom(Name)}; false -> {error, {invalid_listener_id, Id}} end; - _ -> {error, {invalid_listener_id, Id}} + _ -> + {error, {invalid_listener_id, Id}} end. zone(Opts) -> @@ -401,25 +463,36 @@ limiter(Opts) -> ssl_opts(Opts) -> maps:to_list( emqx_tls_lib:drop_tls13_for_old_otp( - maps:without([enable], - maps:get(ssl, Opts, #{})))). + maps:without( + [enable], + maps:get(ssl, Opts, #{}) + ) + ) + ). tcp_opts(Opts) -> maps:to_list( - maps:without([active_n], - maps:get(tcp, Opts, #{}))). + maps:without( + [active_n], + maps:get(tcp, Opts, #{}) + ) + ). foreach_listeners(Do) -> lists:foreach( fun({Type, LName, LConf}) -> - Do(Type, LName, LConf) - end, do_list()). + Do(Type, LName, LConf) + end, + do_list() + ). has_enabled_listener_conf_by_type(Type) -> lists:any( fun({Type0, _LName, LConf}) when is_map(LConf) -> - Type =:= Type0 andalso maps:get(enabled, LConf, true) - end, do_list()). + Type =:= Type0 andalso maps:get(enabled, LConf, true) + end, + do_list() + ). apply_on_listener(ListenerId, Do) -> {Type, ListenerName} = parse_listener_id(ListenerId), diff --git a/apps/emqx/src/emqx_logger.erl b/apps/emqx/src/emqx_logger.erl index 7fcfb2d01..f780f2217 100644 --- a/apps/emqx/src/emqx_logger.erl +++ b/apps/emqx/src/emqx_logger.erl @@ -21,165 +21,169 @@ -elvis([{elvis_style, god_modules, disable}]). %% Logs --export([ debug/1 - , debug/2 - , debug/3 - , info/1 - , info/2 - , info/3 - , warning/1 - , warning/2 - , warning/3 - , error/1 - , error/2 - , error/3 - , critical/1 - , critical/2 - , critical/3 - ]). +-export([ + debug/1, + debug/2, + debug/3, + info/1, + info/2, + info/3, + warning/1, + warning/2, + warning/3, + error/1, + error/2, + error/3, + critical/1, + critical/2, + critical/3 +]). %% Configs --export([ set_metadata_peername/1 - , set_metadata_clientid/1 - , set_proc_metadata/1 - , set_primary_log_level/1 - , set_log_handler_level/2 - , set_log_level/1 - , set_all_log_handlers_level/1 - ]). +-export([ + set_metadata_peername/1, + set_metadata_clientid/1, + set_proc_metadata/1, + set_primary_log_level/1, + set_log_handler_level/2, + set_log_level/1, + set_all_log_handlers_level/1 +]). --export([ get_primary_log_level/0 - , tune_primary_log_level/0 - , get_log_handlers/0 - , get_log_handlers/1 - , get_log_handler/1 - ]). +-export([ + get_primary_log_level/0, + tune_primary_log_level/0, + get_log_handlers/0, + get_log_handlers/1, + get_log_handler/1 +]). --export([ start_log_handler/1 - , stop_log_handler/1 - ]). +-export([ + start_log_handler/1, + stop_log_handler/1 +]). --type(peername_str() :: list()). --type(logger_dst() :: file:filename() | console | unknown). --type(logger_handler_info() :: #{ - id := logger:handler_id(), - level := logger:level(), - dst := logger_dst(), - filters := [{logger:filter_id(), logger:filter()}], - status := started | stopped - }). +-type peername_str() :: list(). +-type logger_dst() :: file:filename() | console | unknown. +-type logger_handler_info() :: #{ + id := logger:handler_id(), + level := logger:level(), + dst := logger_dst(), + filters := [{logger:filter_id(), logger:filter()}], + status := started | stopped +}. -define(STOPPED_HANDLERS, {?MODULE, stopped_handlers}). %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- --spec(debug(unicode:chardata()) -> ok). +-spec debug(unicode:chardata()) -> ok. debug(Msg) -> logger:debug(Msg). --spec(debug(io:format(), [term()]) -> ok). +-spec debug(io:format(), [term()]) -> ok. debug(Format, Args) -> logger:debug(Format, Args). --spec(debug(logger:metadata(), io:format(), [term()]) -> ok). +-spec debug(logger:metadata(), io:format(), [term()]) -> ok. debug(Metadata, Format, Args) when is_map(Metadata) -> logger:debug(Format, Args, Metadata). - --spec(info(unicode:chardata()) -> ok). +-spec info(unicode:chardata()) -> ok. info(Msg) -> logger:info(Msg). --spec(info(io:format(), [term()]) -> ok). +-spec info(io:format(), [term()]) -> ok. info(Format, Args) -> logger:info(Format, Args). --spec(info(logger:metadata(), io:format(), [term()]) -> ok). +-spec info(logger:metadata(), io:format(), [term()]) -> ok. info(Metadata, Format, Args) when is_map(Metadata) -> logger:info(Format, Args, Metadata). - --spec(warning(unicode:chardata()) -> ok). +-spec warning(unicode:chardata()) -> ok. warning(Msg) -> logger:warning(Msg). --spec(warning(io:format(), [term()]) -> ok). +-spec warning(io:format(), [term()]) -> ok. warning(Format, Args) -> logger:warning(Format, Args). --spec(warning(logger:metadata(), io:format(), [term()]) -> ok). +-spec warning(logger:metadata(), io:format(), [term()]) -> ok. warning(Metadata, Format, Args) when is_map(Metadata) -> logger:warning(Format, Args, Metadata). - --spec(error(unicode:chardata()) -> ok). +-spec error(unicode:chardata()) -> ok. error(Msg) -> logger:error(Msg). --spec(error(io:format(), [term()]) -> ok). +-spec error(io:format(), [term()]) -> ok. error(Format, Args) -> logger:error(Format, Args). --spec(error(logger:metadata(), io:format(), [term()]) -> ok). +-spec error(logger:metadata(), io:format(), [term()]) -> ok. error(Metadata, Format, Args) when is_map(Metadata) -> logger:error(Format, Args, Metadata). - --spec(critical(unicode:chardata()) -> ok). +-spec critical(unicode:chardata()) -> ok. critical(Msg) -> logger:critical(Msg). --spec(critical(io:format(), [term()]) -> ok). +-spec critical(io:format(), [term()]) -> ok. critical(Format, Args) -> logger:critical(Format, Args). --spec(critical(logger:metadata(), io:format(), [term()]) -> ok). +-spec critical(logger:metadata(), io:format(), [term()]) -> ok. critical(Metadata, Format, Args) when is_map(Metadata) -> logger:critical(Format, Args, Metadata). --spec(set_metadata_clientid(emqx_types:clientid()) -> ok). +-spec set_metadata_clientid(emqx_types:clientid()) -> ok. set_metadata_clientid(<<>>) -> ok; set_metadata_clientid(ClientId) -> set_proc_metadata(#{clientid => ClientId}). --spec(set_metadata_peername(peername_str()) -> ok). +-spec set_metadata_peername(peername_str()) -> ok. set_metadata_peername(Peername) -> set_proc_metadata(#{peername => Peername}). --spec(set_proc_metadata(logger:metadata()) -> ok). +-spec set_proc_metadata(logger:metadata()) -> ok. set_proc_metadata(Meta) -> logger:update_process_metadata(Meta). --spec(get_primary_log_level() -> logger:level()). +-spec get_primary_log_level() -> logger:level(). get_primary_log_level() -> #{level := Level} = logger:get_primary_config(), Level. -spec tune_primary_log_level() -> ok. tune_primary_log_level() -> - LowestLevel = lists:foldl(fun(#{level := Level}, OldLevel) -> + LowestLevel = lists:foldl( + fun(#{level := Level}, OldLevel) -> case logger:compare_levels(Level, OldLevel) of lt -> Level; _ -> OldLevel end - end, get_primary_log_level(), get_log_handlers()), + end, + get_primary_log_level(), + get_log_handlers() + ), set_primary_log_level(LowestLevel). --spec(set_primary_log_level(logger:level()) -> ok | {error, term()}). +-spec set_primary_log_level(logger:level()) -> ok | {error, term()}. set_primary_log_level(Level) -> logger:set_primary_config(level, Level). --spec(get_log_handlers() -> [logger_handler_info()]). +-spec get_log_handlers() -> [logger_handler_info()]. get_log_handlers() -> get_log_handlers(started) ++ get_log_handlers(stopped). --spec(get_log_handlers(started | stopped) -> [logger_handler_info()]). +-spec get_log_handlers(started | stopped) -> [logger_handler_info()]. get_log_handlers(started) -> [log_handler_info(Conf, started) || Conf <- logger:get_handler_config()]; get_log_handlers(stopped) -> [log_handler_info(Conf, stopped) || Conf <- list_stopped_handler_config()]. --spec(get_log_handler(logger:handler_id()) -> logger_handler_info()). +-spec get_log_handler(logger:handler_id()) -> logger_handler_info(). get_log_handler(HandlerId) -> case logger:get_handler_config(HandlerId) of {ok, Conf} -> @@ -191,13 +195,15 @@ get_log_handler(HandlerId) -> end end. --spec(start_log_handler(logger:handler_id()) -> ok | {error, term()}). +-spec start_log_handler(logger:handler_id()) -> ok | {error, term()}. start_log_handler(HandlerId) -> case lists:member(HandlerId, logger:get_handler_ids()) of - true -> ok; + true -> + ok; false -> case read_stopped_handler_config(HandlerId) of - error -> {error, {not_found, HandlerId}}; + error -> + {error, {not_found, HandlerId}}; {ok, Conf = #{module := Mod}} -> case logger:add_handler(HandlerId, Mod, Conf) of ok -> remove_stopped_handler_config(HandlerId); @@ -206,7 +212,7 @@ start_log_handler(HandlerId) -> end end. --spec(stop_log_handler(logger:handler_id()) -> ok | {error, term()}). +-spec stop_log_handler(logger:handler_id()) -> ok | {error, term()}. stop_log_handler(HandlerId) -> case logger:get_handler_config(HandlerId) of {ok, Conf} -> @@ -218,20 +224,20 @@ stop_log_handler(HandlerId) -> {error, {not_started, HandlerId}} end. --spec(set_log_handler_level(logger:handler_id(), logger:level()) -> ok | {error, term()}). +-spec set_log_handler_level(logger:handler_id(), logger:level()) -> ok | {error, term()}. set_log_handler_level(HandlerId, Level) -> case logger:set_handler_config(HandlerId, level, Level) of - ok -> ok; + ok -> + ok; {error, _} -> case read_stopped_handler_config(HandlerId) of error -> {error, {not_found, HandlerId}}; - {ok, Conf} -> - save_stopped_handler_config(HandlerId, Conf#{level => Level}) + {ok, Conf} -> save_stopped_handler_config(HandlerId, Conf#{level => Level}) end end. %% @doc Set both the primary and all handlers level in one command --spec(set_log_level(logger:handler_id()) -> ok | {error, term()}). +-spec set_log_level(logger:handler_id()) -> ok | {error, term()}. set_log_level(Level) -> case set_primary_log_level(Level) of ok -> set_all_log_handlers_level(Level); @@ -242,18 +248,47 @@ set_log_level(Level) -> %% Internal Functions %%-------------------------------------------------------------------- -log_handler_info(#{id := Id, level := Level, module := logger_std_h, - filters := Filters, config := #{type := Type}}, Status) when - Type =:= standard_io; - Type =:= standard_error -> +log_handler_info( + #{ + id := Id, + level := Level, + module := logger_std_h, + filters := Filters, + config := #{type := Type} + }, + Status +) when + Type =:= standard_io; + Type =:= standard_error +-> #{id => Id, level => Level, dst => console, status => Status, filters => Filters}; -log_handler_info(#{id := Id, level := Level, module := logger_std_h, - filters := Filters, config := Config = #{type := file}}, Status) -> - #{id => Id, level => Level, status => Status, filters => Filters, - dst => maps:get(file, Config, atom_to_list(Id))}; - -log_handler_info(#{id := Id, level := Level, module := logger_disk_log_h, - filters := Filters, config := #{file := Filename}}, Status) -> +log_handler_info( + #{ + id := Id, + level := Level, + module := logger_std_h, + filters := Filters, + config := Config = #{type := file} + }, + Status +) -> + #{ + id => Id, + level => Level, + status => Status, + filters => Filters, + dst => maps:get(file, Config, atom_to_list(Id)) + }; +log_handler_info( + #{ + id := Id, + level := Level, + module := logger_disk_log_h, + filters := Filters, + config := #{file := Filename} + }, + Status +) -> #{id => Id, level => Level, dst => Filename, status => Status, filters => Filters}; log_handler_info(#{id := Id, level := Level, filters := Filters}, Status) -> #{id => Id, level => Level, dst => unknown, status => Status, filters => Filters}. @@ -264,7 +299,8 @@ set_all_log_handlers_level(Level) -> set_all_log_handlers_level([#{id := ID, level := Level} | List], NewLevel, ChangeHistory) -> case set_log_handler_level(ID, NewLevel) of - ok -> set_all_log_handlers_level(List, NewLevel, [{ID, Level} | ChangeHistory]); + ok -> + set_all_log_handlers_level(List, NewLevel, [{ID, Level} | ChangeHistory]); {error, Error} -> rollback(ChangeHistory), {error, {handlers_logger_level, {ID, Error}}} @@ -275,7 +311,8 @@ set_all_log_handlers_level([], _NewLevel, _NewHanlder) -> rollback([{ID, Level} | List]) -> _ = set_log_handler_level(ID, Level), rollback(List); -rollback([]) -> ok. +rollback([]) -> + ok. save_stopped_handler_config(HandlerId, Config) -> case persistent_term:get(?STOPPED_HANDLERS, undefined) of @@ -291,12 +328,12 @@ read_stopped_handler_config(HandlerId) -> end. remove_stopped_handler_config(HandlerId) -> case persistent_term:get(?STOPPED_HANDLERS, undefined) of - undefined -> ok; + undefined -> + ok; ConfList -> case maps:find(HandlerId, ConfList) of error -> ok; - {ok, _} -> - persistent_term:put(?STOPPED_HANDLERS, maps:remove(HandlerId, ConfList)) + {ok, _} -> persistent_term:put(?STOPPED_HANDLERS, maps:remove(HandlerId, ConfList)) end end. list_stopped_handler_config() -> diff --git a/apps/emqx/src/emqx_logger_jsonfmt.erl b/apps/emqx/src/emqx_logger_jsonfmt.erl index 9e935787d..f1a9b8ee5 100644 --- a/apps/emqx/src/emqx_logger_jsonfmt.erl +++ b/apps/emqx/src/emqx_logger_jsonfmt.erl @@ -43,14 +43,16 @@ -export_type([config/0]). --elvis([{elvis_style, no_nested_try_catch, #{ ignore => [emqx_logger_jsonfmt]}}]). +-elvis([{elvis_style, no_nested_try_catch, #{ignore => [emqx_logger_jsonfmt]}}]). %% this is what used when calling logger:log(Level, Report, Meta). -define(DEFAULT_FORMATTER, fun logger:format_otp_report/1). --type config() :: #{depth => pos_integer() | unlimited, - report_cb => logger:report_cb(), - single_line => boolean()}. +-type config() :: #{ + depth => pos_integer() | unlimited, + report_cb => logger:report_cb(), + single_line => boolean() +}. -define(IS_STRING(String), (is_list(String) orelse is_binary(String))). @@ -67,7 +69,7 @@ best_effort_json(Input) -> -spec format(logger:log_event(), config()) -> iodata(). format(#{level := Level, msg := Msg, meta := Meta}, Config0) when is_map(Config0) -> Config = add_default_config(Config0), - [format(Msg, Meta#{level => Level}, Config) , "\n"]. + [format(Msg, Meta#{level => Level}, Config), "\n"]. format(Msg, Meta, Config) -> Data0 = @@ -78,12 +80,13 @@ format(Msg, Meta, Config) -> Meta#{msg => Bin} catch C:R:S -> - Meta#{ msg => "emqx_logger_jsonfmt_format_error" - , fmt_raw_input => Msg - , fmt_error => C - , fmt_reason => R - , fmt_stacktrace => S - } + Meta#{ + msg => "emqx_logger_jsonfmt_format_error", + fmt_raw_input => Msg, + fmt_error => C, + fmt_reason => R, + fmt_stacktrace => S + } end, Data = maps:without([report_cb], Data0), jiffy:encode(json_obj(Data, Config)). @@ -102,8 +105,9 @@ maybe_format_msg(Msg, Meta, Config) -> format_msg({string, Chardata}, Meta, Config) -> %% already formatted format_msg({"~ts", [Chardata]}, Meta, Config); -format_msg({report, _} = Msg, Meta, #{report_cb := Fun} = Config) - when is_function(Fun,1); is_function(Fun,2) -> +format_msg({report, _} = Msg, Meta, #{report_cb := Fun} = Config) when + is_function(Fun, 1); is_function(Fun, 2) +-> %% a format callback function in config, no idea when this happens, but leaving it format_msg(Msg, Meta#{report_cb => Fun}, maps:remove(report_cb, Config)); format_msg({report, Report}, #{report_cb := Fun} = Meta, Config) when is_function(Fun, 1) -> @@ -112,10 +116,11 @@ format_msg({report, Report}, #{report_cb := Fun} = Meta, Config) when is_functio {Format, Args} when is_list(Format), is_list(Args) -> format_msg({Format, Args}, maps:remove(report_cb, Meta), Config); Other -> - #{ msg => "report_cb_bad_return" - , report_cb_fun => Fun - , report_cb_return => Other - } + #{ + msg => "report_cb_bad_return", + report_cb_fun => Fun, + report_cb_return => Other + } end; format_msg({report, Report}, #{report_cb := Fun}, Config) when is_function(Fun, 2) -> %% a format callback function of arity 2 @@ -125,30 +130,34 @@ format_msg({report, Report}, #{report_cb := Fun}, Config) when is_function(Fun, unicode:characters_to_binary(Chardata, utf8) catch _:_ -> - #{ msg => "report_cb_bad_return" - , report_cb_fun => Fun - , report_cb_return => Chardata - } + #{ + msg => "report_cb_bad_return", + report_cb_fun => Fun, + report_cb_return => Chardata + } end; Other -> - #{ msg => "report_cb_bad_return" - , report_cb_fun => Fun - , report_cb_return => Other - } + #{ + msg => "report_cb_bad_return", + report_cb_fun => Fun, + report_cb_return => Other + } end; format_msg({Fmt, Args}, _Meta, Config) -> do_format_msg(Fmt, Args, Config). -do_format_msg(Format0, Args, #{depth := Depth, - single_line := SingleLine - }) -> +do_format_msg(Format0, Args, #{ + depth := Depth, + single_line := SingleLine +}) -> Format1 = io_lib:scan_format(Format0, Args), Format = reformat(Format1, Depth, SingleLine), Text0 = io_lib:build_text(Format, []), - Text = case SingleLine of - true -> re:replace(Text0, ",?\r?\n\s*",", ", [{return, list}, global, unicode]); - false -> Text0 - end, + Text = + case SingleLine of + true -> re:replace(Text0, ",?\r?\n\s*", ", ", [{return, list}, global, unicode]); + false -> Text0 + end, trim(unicode:characters_to_binary(Text, utf8)). %% Get rid of the leading spaces. @@ -162,16 +171,18 @@ reformat([#{control_char := C} = M | T], Depth, true) when C =:= $p -> [limit_depth(M#{width => 0}, Depth) | reformat(T, Depth, true)]; reformat([#{control_char := C} = M | T], Depth, true) when C =:= $P -> [M#{width => 0} | reformat(T, Depth, true)]; -reformat([#{control_char := C}=M | T], Depth, Single) when C =:= $p; C =:= $w -> +reformat([#{control_char := C} = M | T], Depth, Single) when C =:= $p; C =:= $w -> [limit_depth(M, Depth) | reformat(T, Depth, Single)]; reformat([H | T], Depth, Single) -> [H | reformat(T, Depth, Single)]; reformat([], _, _) -> []. -limit_depth(M0, unlimited) -> M0; -limit_depth(#{control_char:=C0, args:=Args}=M0, Depth) -> - C = C0 - ($a - $A), %To uppercase. +limit_depth(M0, unlimited) -> + M0; +limit_depth(#{control_char := C0, args := Args} = M0, Depth) -> + %To uppercase. + C = C0 - ($a - $A), M0#{control_char := C, args := Args ++ [Depth]}. add_default_config(Config0) -> @@ -187,7 +198,7 @@ best_effort_unicode(Input, Config) -> B when is_binary(B) -> B; _ -> do_format_msg("~p", [Input], Config) catch - _ : _ -> + _:_ -> do_format_msg("~p", [Input], Config) end. @@ -195,19 +206,21 @@ best_effort_json_obj(List, Config) when is_list(List) -> try json_obj(maps:from_list(List), Config) catch - _ : _ -> + _:_ -> [json(I, Config) || I <- List] end; best_effort_json_obj(Map, Config) -> try json_obj(Map, Config) catch - _ : _ -> + _:_ -> do_format_msg("~p", [Map], Config) end. -json([], _) -> "[]"; -json(<<"">>, _) -> "\"\""; +json([], _) -> + "[]"; +json(<<"">>, _) -> + "\"\""; json(A, _) when is_atom(A) -> atom_to_binary(A, utf8); json(I, _) when is_integer(I) -> I; json(F, _) when is_float(F) -> F; @@ -216,7 +229,7 @@ json(P, C) when is_port(P) -> json(port_to_list(P), C); json(F, C) when is_function(F) -> json(erlang:fun_to_list(F), C); json(B, Config) when is_binary(B) -> best_effort_unicode(B, Config); -json(L, Config) when is_list(L), is_integer(hd(L))-> +json(L, Config) when is_list(L), is_integer(hd(L)) -> best_effort_unicode(L, Config); json(M, Config) when is_list(M), is_tuple(hd(M)), tuple_size(hd(M)) =:= 2 -> best_effort_json_obj(M, Config); @@ -228,15 +241,28 @@ json(Term, Config) -> do_format_msg("~p", [Term], Config). json_obj(Data, Config) -> - maps:fold(fun (K, V, D) -> - json_kv(K, V, D, Config) - end, maps:new(), Data). + maps:fold( + fun(K, V, D) -> + json_kv(K, V, D, Config) + end, + maps:new(), + Data + ). json_kv(mfa, {M, F, A}, Data, _Config) -> - maps:put(mfa, <<(atom_to_binary(M, utf8))/binary, $:, - (atom_to_binary(F, utf8))/binary, $/, - (integer_to_binary(A))/binary>>, Data); -json_kv('$kind', Kind, Data, Config) -> %% snabbkaffe + maps:put( + mfa, + << + (atom_to_binary(M, utf8))/binary, + $:, + (atom_to_binary(F, utf8))/binary, + $/, + (integer_to_binary(A))/binary + >>, + Data + ); +%% snabbkaffe +json_kv('$kind', Kind, Data, Config) -> maps:put(msg, json(Kind, Config), Data); json_kv(gl, _, Data, _Config) -> %% drop gl because it's not interesting @@ -267,44 +293,52 @@ json_key(Term) -> no_crash_test_() -> Opts = [{numtests, 1000}, {to_file, user}], - {timeout, 30, - fun() -> ?assert(proper:quickcheck(t_no_crash(), Opts)) end}. + {timeout, 30, fun() -> ?assert(proper:quickcheck(t_no_crash(), Opts)) end}. t_no_crash() -> - ?FORALL({Level, Report, Meta, Config}, - {p_level(), p_report(), p_meta(), p_config()}, - t_no_crash_run(Level, Report, Meta, Config)). + ?FORALL( + {Level, Report, Meta, Config}, + {p_level(), p_report(), p_meta(), p_config()}, + t_no_crash_run(Level, Report, Meta, Config) + ). t_no_crash_run(Level, Report, {undefined, Meta}, Config) -> t_no_crash_run(Level, Report, maps:from_list(Meta), Config); t_no_crash_run(Level, Report, {ReportCb, Meta}, Config) -> t_no_crash_run(Level, Report, maps:from_list([{report_cb, ReportCb} | Meta]), Config); t_no_crash_run(Level, Report, Meta, Config) -> - Input = #{ level => Level - , msg => {report, Report} - , meta => filter(Meta) - }, + Input = #{ + level => Level, + msg => {report, Report}, + meta => filter(Meta) + }, _ = format(Input, maps:from_list(Config)), true. %% assume top level Report and Meta are sane filter(Map) -> Keys = lists:filter( - fun(K) -> - try json_key(K), true - catch throw : {badkey, _} -> false - end - end, maps:keys(Map)), + fun(K) -> + try + json_key(K), + true + catch + throw:{badkey, _} -> false + end + end, + maps:keys(Map) + ), maps:with(Keys, Map). p_report_cb() -> - proper_types:oneof([ fun ?MODULE:report_cb_1/1 - , fun ?MODULE:report_cb_2/2 - , fun ?MODULE:report_cb_crash/2 - , fun logger:format_otp_report/1 - , fun logger:format_report/1 - , format_report_undefined - ]). + proper_types:oneof([ + fun ?MODULE:report_cb_1/1, + fun ?MODULE:report_cb_2/2, + fun ?MODULE:report_cb_crash/2, + fun logger:format_otp_report/1, + fun logger:format_report/1, + format_report_undefined + ]). report_cb_1(Input) -> {"~p", [Input]}. @@ -314,9 +348,12 @@ report_cb_crash(_Input, _Config) -> error(report_cb_crash). p_kvlist() -> proper_types:list({ - proper_types:oneof([proper_types:atom(), - proper_types:binary() - ]), proper_types:term()}). + proper_types:oneof([ + proper_types:atom(), + proper_types:binary() + ]), + proper_types:term() + }). %% meta type is 2-tuple, report_cb type, and some random key value pairs p_meta() -> @@ -330,8 +367,10 @@ p_level() -> proper_types:oneof([info, debug, error, warning, foobar]). p_config() -> proper_types:shrink_list( - [ {depth, p_limit()} - , {single_line, proper_types:boolean()} - ]). + [ + {depth, p_limit()}, + {single_line, proper_types:boolean()} + ] + ). -endif. diff --git a/apps/emqx/src/emqx_logger_textfmt.erl b/apps/emqx/src/emqx_logger_textfmt.erl index 64602ef90..43f805c74 100644 --- a/apps/emqx/src/emqx_logger_textfmt.erl +++ b/apps/emqx/src/emqx_logger_textfmt.erl @@ -44,8 +44,9 @@ try_format_unicode(Char) -> {incomplete, _, _} -> error; Binary -> Binary end - catch _:_ -> - error + catch + _:_ -> + error end, case List of error -> io_lib:format("~0p", [Char]); @@ -54,15 +55,18 @@ try_format_unicode(Char) -> enrich_report_mfa(Report, #{mfa := Mfa, line := Line}) -> Report#{mfa => mfa(Mfa), line => Line}; -enrich_report_mfa(Report, _) -> Report. +enrich_report_mfa(Report, _) -> + Report. enrich_report_clientid(Report, #{clientid := ClientId}) -> Report#{clientid => try_format_unicode(ClientId)}; -enrich_report_clientid(Report, _) -> Report. +enrich_report_clientid(Report, _) -> + Report. enrich_report_peername(Report, #{peername := Peername}) -> Report#{peername => Peername}; -enrich_report_peername(Report, _) -> Report. +enrich_report_peername(Report, _) -> + Report. %% clientid and peername always in emqx_conn's process metadata. %% topic can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2 @@ -70,7 +74,8 @@ enrich_report_topic(Report, #{topic := Topic}) -> Report#{topic => try_format_unicode(Topic)}; enrich_report_topic(Report = #{topic := Topic}, _) -> Report#{topic => try_format_unicode(Topic)}; -enrich_report_topic(Report, _) -> Report. +enrich_report_topic(Report, _) -> + Report. enrich_mfa({Fmt, Args}, #{mfa := Mfa, line := Line}) when is_list(Fmt) -> {Fmt ++ " mfa: ~ts line: ~w", Args ++ [mfa(Mfa), Line]}; @@ -78,7 +83,7 @@ enrich_mfa(Msg, _) -> Msg. enrich_client_info({Fmt, Args}, #{clientid := ClientId, peername := Peer}) when is_list(Fmt) -> - {" ~ts@~ts " ++ Fmt, [ClientId, Peer | Args] }; + {" ~ts@~ts " ++ Fmt, [ClientId, Peer | Args]}; enrich_client_info({Fmt, Args}, #{clientid := ClientId}) when is_list(Fmt) -> {" ~ts " ++ Fmt, [ClientId | Args]}; enrich_client_info({Fmt, Args}, #{peername := Peer}) when is_list(Fmt) -> diff --git a/apps/emqx/src/emqx_map_lib.erl b/apps/emqx/src/emqx_map_lib.erl index c22a13e33..d666e315e 100644 --- a/apps/emqx/src/emqx_map_lib.erl +++ b/apps/emqx/src/emqx_map_lib.erl @@ -15,26 +15,27 @@ %%-------------------------------------------------------------------- -module(emqx_map_lib). --export([ deep_get/2 - , deep_get/3 - , deep_find/2 - , deep_put/3 - , deep_remove/2 - , deep_merge/2 - , safe_atom_key_map/1 - , unsafe_atom_key_map/1 - , jsonable_map/1 - , jsonable_map/2 - , binary_string/1 - , deep_convert/3 - , diff_maps/2 - , merge_with/3 - ]). +-export([ + deep_get/2, + deep_get/3, + deep_find/2, + deep_put/3, + deep_remove/2, + deep_merge/2, + safe_atom_key_map/1, + unsafe_atom_key_map/1, + jsonable_map/1, + jsonable_map/2, + binary_string/1, + deep_convert/3, + diff_maps/2, + merge_with/3 +]). -export_type([config_key/0, config_key_path/0]). -type config_key() :: atom() | binary() | [byte()]. -type config_key_path() :: [config_key()]. --type convert_fun() :: fun((...) -> {K1::any(), V1::any()} | drop). +-type convert_fun() :: fun((...) -> {K1 :: any(), V1 :: any()} | drop). %%----------------------------------------------------------------- -spec deep_get(config_key_path(), map()) -> term(). @@ -81,8 +82,10 @@ deep_remove([Key | KeyPath], Map) -> case maps:find(Key, Map) of {ok, SubMap} when is_map(SubMap) -> Map#{Key => deep_remove(KeyPath, SubMap)}; - {ok, _Val} -> Map; - error -> Map + {ok, _Val} -> + Map; + error -> + Map end. %% #{a => #{b => 3, c => 2}, d => 4} @@ -90,7 +93,8 @@ deep_remove([Key | KeyPath], Map) -> -spec deep_merge(map(), map()) -> map(). deep_merge(BaseMap, NewMap) -> NewKeys = maps:keys(NewMap) -- maps:keys(BaseMap), - MergedBase = maps:fold(fun(K, V, Acc) -> + MergedBase = maps:fold( + fun(K, V, Acc) -> case maps:find(K, NewMap) of error -> Acc#{K => V}; @@ -99,20 +103,28 @@ deep_merge(BaseMap, NewMap) -> {ok, NewV} -> Acc#{K => NewV} end - end, #{}, BaseMap), + end, + #{}, + BaseMap + ), maps:merge(MergedBase, maps:with(NewKeys, NewMap)). --spec deep_convert(map(), convert_fun(), Args::list()) -> map(). +-spec deep_convert(map(), convert_fun(), Args :: list()) -> map(). deep_convert(Map, ConvFun, Args) when is_map(Map) -> - maps:fold(fun(K, V, Acc) -> + maps:fold( + fun(K, V, Acc) -> case apply(ConvFun, [K, deep_convert(V, ConvFun, Args) | Args]) of drop -> Acc; {K1, V1} -> Acc#{K1 => V1} end - end, #{}, Map); + end, + #{}, + Map + ); deep_convert(ListV, ConvFun, Args) when is_list(ListV) -> [deep_convert(V, ConvFun, Args) || V <- ListV]; -deep_convert(Val, _, _Args) -> Val. +deep_convert(Val, _, _Args) -> + Val. -spec unsafe_atom_key_map(#{binary() | atom() => any()}) -> #{atom() => any()}. unsafe_atom_key_map(Map) -> @@ -130,33 +142,41 @@ jsonable_map(Map, JsonableFun) -> deep_convert(Map, fun binary_string_kv/3, [JsonableFun]). -spec diff_maps(map(), map()) -> - #{added := map(), identical := map(), removed := map(), - changed := #{any() => {OldValue::any(), NewValue::any()}}}. + #{ + added := map(), + identical := map(), + removed := map(), + changed := #{any() => {OldValue :: any(), NewValue :: any()}} + }. diff_maps(NewMap, OldMap) -> InitR = #{identical => #{}, changed => #{}, removed => #{}}, {Result, RemInNew} = - lists:foldl(fun({OldK, OldV}, {Result0 = #{identical := I, changed := U, removed := D}, - RemNewMap}) -> - Result1 = case maps:find(OldK, NewMap) of - error -> - Result0#{removed => D#{OldK => OldV}}; - {ok, NewV} when NewV == OldV -> - Result0#{identical => I#{OldK => OldV}}; - {ok, NewV} -> - Result0#{changed => U#{OldK => {OldV, NewV}}} + lists:foldl( + fun({OldK, OldV}, {Result0 = #{identical := I, changed := U, removed := D}, RemNewMap}) -> + Result1 = + case maps:find(OldK, NewMap) of + error -> + Result0#{removed => D#{OldK => OldV}}; + {ok, NewV} when NewV == OldV -> + Result0#{identical => I#{OldK => OldV}}; + {ok, NewV} -> + Result0#{changed => U#{OldK => {OldV, NewV}}} + end, + {Result1, maps:remove(OldK, RemNewMap)} end, - {Result1, maps:remove(OldK, RemNewMap)} - end, {InitR, NewMap}, maps:to_list(OldMap)), + {InitR, NewMap}, + maps:to_list(OldMap) + ), Result#{added => RemInNew}. - binary_string_kv(K, V, JsonableFun) -> case JsonableFun(K, V) of drop -> drop; {K1, V1} -> {binary_string(K1), V1} end. -binary_string([]) -> []; +binary_string([]) -> + []; binary_string(Val) when is_list(Val) -> case io_lib:printable_unicode_list(Val) of true -> unicode:characters_to_binary(Val); @@ -167,40 +187,52 @@ binary_string(Val) -> %%--------------------------------------------------------------------------- covert_keys_to_atom(BinKeyMap, Conv) -> - deep_convert(BinKeyMap, fun + deep_convert( + BinKeyMap, + fun (K, V) when is_atom(K) -> {K, V}; (K, V) when is_binary(K) -> {Conv(K), V} - end, []). + end, + [] + ). %% copy from maps.erl OTP24.0 -compile({inline, [error_with_info/2]}). -merge_with(Combiner, Map1, Map2) when is_map(Map1), - is_map(Map2), - is_function(Combiner, 3) -> +merge_with(Combiner, Map1, Map2) when + is_map(Map1), + is_map(Map2), + is_function(Combiner, 3) +-> case map_size(Map1) > map_size(Map2) of true -> Iterator = maps:iterator(Map2), - merge_with_t(maps:next(Iterator), - Map1, - Map2, - Combiner); + merge_with_t( + maps:next(Iterator), + Map1, + Map2, + Combiner + ); false -> Iterator = maps:iterator(Map1), - merge_with_t(maps:next(Iterator), - Map2, - Map1, - fun(K, V1, V2) -> Combiner(K, V2, V1) end) + merge_with_t( + maps:next(Iterator), + Map2, + Map1, + fun(K, V1, V2) -> Combiner(K, V2, V1) end + ) end; merge_with(Combiner, Map1, Map2) -> - error_with_info(error_type_merge_intersect(Map1, Map2, Combiner), - [Combiner, Map1, Map2]). + error_with_info( + error_type_merge_intersect(Map1, Map2, Combiner), + [Combiner, Map1, Map2] + ). merge_with_t({K, V2, Iterator}, Map1, Map2, Combiner) -> case Map1 of - #{ K := V1 } -> - NewMap1 = Map1#{ K := Combiner(K, V1, V2) }, + #{K := V1} -> + NewMap1 = Map1#{K := Combiner(K, V1, V2)}, merge_with_t(maps:next(Iterator), NewMap1, Map2, Combiner); - #{ } -> + #{} -> merge_with_t(maps:next(Iterator), maps:put(K, V2, Map1), Map2, Combiner) end; merge_with_t(none, Result, _, _) -> diff --git a/apps/emqx/src/emqx_message.erl b/apps/emqx/src/emqx_message.erl index 73a3e0c1e..ae74a614b 100644 --- a/apps/emqx/src/emqx_message.erl +++ b/apps/emqx/src/emqx_message.erl @@ -23,309 +23,361 @@ -include("types.hrl"). %% Create --export([ make/2 - , make/3 - , make/4 - , make/6 - , make/7 - ]). +-export([ + make/2, + make/3, + make/4, + make/6, + make/7 +]). %% Fields --export([ id/1 - , qos/1 - , from/1 - , topic/1 - , payload/1 - , timestamp/1 - ]). +-export([ + id/1, + qos/1, + from/1, + topic/1, + payload/1, + timestamp/1 +]). %% Flags --export([ is_sys/1 - , clean_dup/1 - , get_flag/2 - , get_flag/3 - , get_flags/1 - , set_flag/2 - , set_flag/3 - , set_flags/2 - , unset_flag/2 - ]). +-export([ + is_sys/1, + clean_dup/1, + get_flag/2, + get_flag/3, + get_flags/1, + set_flag/2, + set_flag/3, + set_flags/2, + unset_flag/2 +]). %% Headers --export([ get_headers/1 - , get_header/2 - , get_header/3 - , set_header/3 - , set_headers/2 - , remove_header/2 - ]). +-export([ + get_headers/1, + get_header/2, + get_header/3, + set_header/3, + set_headers/2, + remove_header/2 +]). --export([ is_expired/1 - , update_expiry/1 - ]). +-export([ + is_expired/1, + update_expiry/1 +]). --export([ to_packet/2 - , to_map/1 - , to_log_map/1 - , to_list/1 - , from_map/1 - ]). +-export([ + to_packet/2, + to_map/1, + to_log_map/1, + to_list/1, + from_map/1 +]). -export_type([message_map/0]). --type(message_map() :: #{id := binary(), - qos := 0 | 1 | 2, - from := atom() | binary(), - flags := emqx_types:flags(), - headers := emqx_types:headers(), - topic := emqx_types:topic(), - payload := emqx_types:payload(), - timestamp := integer(), - extra := _} - ). +-type message_map() :: #{ + id := binary(), + qos := 0 | 1 | 2, + from := atom() | binary(), + flags := emqx_types:flags(), + headers := emqx_types:headers(), + topic := emqx_types:topic(), + payload := emqx_types:payload(), + timestamp := integer(), + extra := _ +}. -elvis([{elvis_style, god_modules, disable}]). --spec(make(emqx_types:topic(), emqx_types:payload()) -> emqx_types:message()). +-spec make(emqx_types:topic(), emqx_types:payload()) -> emqx_types:message(). make(Topic, Payload) -> make(undefined, Topic, Payload). --spec(make(emqx_types:clientid(), - emqx_types:topic(), - emqx_types:payload()) -> emqx_types:message()). +-spec make( + emqx_types:clientid(), + emqx_types:topic(), + emqx_types:payload() +) -> emqx_types:message(). make(From, Topic, Payload) -> make(From, ?QOS_0, Topic, Payload). --spec(make(emqx_types:clientid(), - emqx_types:qos(), - emqx_types:topic(), - emqx_types:payload()) -> emqx_types:message()). +-spec make( + emqx_types:clientid(), + emqx_types:qos(), + emqx_types:topic(), + emqx_types:payload() +) -> emqx_types:message(). make(From, QoS, Topic, Payload) when ?QOS_0 =< QoS, QoS =< ?QOS_2 -> Now = erlang:system_time(millisecond), - #message{id = emqx_guid:gen(), - qos = QoS, - from = From, - topic = Topic, - payload = Payload, - timestamp = Now - }. + #message{ + id = emqx_guid:gen(), + qos = QoS, + from = From, + topic = Topic, + payload = Payload, + timestamp = Now + }. --spec(make(emqx_types:clientid(), - emqx_types:qos(), - emqx_types:topic(), - emqx_types:payload(), - emqx_types:flags(), - emqx_types:headers()) -> emqx_types:message()). -make(From, QoS, Topic, Payload, Flags, Headers) - when ?QOS_0 =< QoS, QoS =< ?QOS_2, - is_map(Flags), is_map(Headers) -> +-spec make( + emqx_types:clientid(), + emqx_types:qos(), + emqx_types:topic(), + emqx_types:payload(), + emqx_types:flags(), + emqx_types:headers() +) -> emqx_types:message(). +make(From, QoS, Topic, Payload, Flags, Headers) when + ?QOS_0 =< QoS, + QoS =< ?QOS_2, + is_map(Flags), + is_map(Headers) +-> Now = erlang:system_time(millisecond), - #message{id = emqx_guid:gen(), - qos = QoS, - from = From, - flags = Flags, - headers = Headers, - topic = Topic, - payload = Payload, - timestamp = Now - }. + #message{ + id = emqx_guid:gen(), + qos = QoS, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Now + }. --spec(make(MsgId :: binary(), - emqx_types:clientid(), - emqx_types:qos(), - emqx_types:topic(), - emqx_types:payload(), - emqx_types:flags(), - emqx_types:headers()) -> emqx_types:message()). -make(MsgId, From, QoS, Topic, Payload, Flags, Headers) - when ?QOS_0 =< QoS, QoS =< ?QOS_2, - is_map(Flags), is_map(Headers) -> +-spec make( + MsgId :: binary(), + emqx_types:clientid(), + emqx_types:qos(), + emqx_types:topic(), + emqx_types:payload(), + emqx_types:flags(), + emqx_types:headers() +) -> emqx_types:message(). +make(MsgId, From, QoS, Topic, Payload, Flags, Headers) when + ?QOS_0 =< QoS, + QoS =< ?QOS_2, + is_map(Flags), + is_map(Headers) +-> Now = erlang:system_time(millisecond), - #message{id = MsgId, - qos = QoS, - from = From, - flags = Flags, - headers = Headers, - topic = Topic, - payload = Payload, - timestamp = Now - }. + #message{ + id = MsgId, + qos = QoS, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Now + }. --spec(id(emqx_types:message()) -> maybe(binary())). +-spec id(emqx_types:message()) -> maybe(binary()). id(#message{id = Id}) -> Id. --spec(qos(emqx_types:message()) -> emqx_types:qos()). +-spec qos(emqx_types:message()) -> emqx_types:qos(). qos(#message{qos = QoS}) -> QoS. --spec(from(emqx_types:message()) -> atom() | binary()). +-spec from(emqx_types:message()) -> atom() | binary(). from(#message{from = From}) -> From. --spec(topic(emqx_types:message()) -> emqx_types:topic()). +-spec topic(emqx_types:message()) -> emqx_types:topic(). topic(#message{topic = Topic}) -> Topic. --spec(payload(emqx_types:message()) -> emqx_types:payload()). +-spec payload(emqx_types:message()) -> emqx_types:payload(). payload(#message{payload = Payload}) -> Payload. --spec(timestamp(emqx_types:message()) -> integer()). +-spec timestamp(emqx_types:message()) -> integer(). timestamp(#message{timestamp = TS}) -> TS. --spec(is_sys(emqx_types:message()) -> boolean()). +-spec is_sys(emqx_types:message()) -> boolean(). is_sys(#message{flags = #{sys := true}}) -> true; is_sys(#message{topic = <<"$SYS/", _/binary>>}) -> true; -is_sys(_Msg) -> false. +is_sys(_Msg) -> + false. --spec(clean_dup(emqx_types:message()) -> emqx_types:message()). +-spec clean_dup(emqx_types:message()) -> emqx_types:message(). clean_dup(Msg = #message{flags = Flags = #{dup := true}}) -> Msg#message{flags = Flags#{dup => false}}; -clean_dup(Msg) -> Msg. +clean_dup(Msg) -> + Msg. --spec(set_flags(map(), emqx_types:message()) -> emqx_types:message()). +-spec set_flags(map(), emqx_types:message()) -> emqx_types:message(). set_flags(New, Msg = #message{flags = Old}) when is_map(New) -> Msg#message{flags = maps:merge(Old, New)}. --spec(get_flag(emqx_types:flag(), emqx_types:message()) -> boolean()). +-spec get_flag(emqx_types:flag(), emqx_types:message()) -> boolean(). get_flag(Flag, Msg) -> get_flag(Flag, Msg, false). get_flag(Flag, #message{flags = Flags}, Default) -> maps:get(Flag, Flags, Default). --spec(get_flags(emqx_types:message()) -> maybe(map())). +-spec get_flags(emqx_types:message()) -> maybe(map()). get_flags(#message{flags = Flags}) -> Flags. --spec(set_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message()). +-spec set_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message(). set_flag(Flag, Msg = #message{flags = Flags}) when is_atom(Flag) -> Msg#message{flags = maps:put(Flag, true, Flags)}. --spec(set_flag(emqx_types:flag(), boolean() | integer(), emqx_types:message()) - -> emqx_types:message()). +-spec set_flag(emqx_types:flag(), boolean() | integer(), emqx_types:message()) -> + emqx_types:message(). set_flag(Flag, Val, Msg = #message{flags = Flags}) when is_atom(Flag) -> Msg#message{flags = maps:put(Flag, Val, Flags)}. --spec(unset_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message()). +-spec unset_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message(). unset_flag(Flag, Msg = #message{flags = Flags}) -> case maps:is_key(Flag, Flags) of - true -> Msg#message{flags = maps:remove(Flag, Flags)}; + true -> Msg#message{flags = maps:remove(Flag, Flags)}; false -> Msg end. --spec(set_headers(map(), emqx_types:message()) -> emqx_types:message()). +-spec set_headers(map(), emqx_types:message()) -> emqx_types:message(). set_headers(New, Msg = #message{headers = Old}) when is_map(New) -> Msg#message{headers = maps:merge(Old, New)}. --spec(get_headers(emqx_types:message()) -> maybe(map())). +-spec get_headers(emqx_types:message()) -> maybe(map()). get_headers(Msg) -> Msg#message.headers. --spec(get_header(term(), emqx_types:message()) -> term()). +-spec get_header(term(), emqx_types:message()) -> term(). get_header(Hdr, Msg) -> get_header(Hdr, Msg, undefined). --spec(get_header(term(), emqx_types:message(), term()) -> term()). +-spec get_header(term(), emqx_types:message(), term()) -> term(). get_header(Hdr, #message{headers = Headers}, Default) -> maps:get(Hdr, Headers, Default). --spec(set_header(term(), term(), emqx_types:message()) -> emqx_types:message()). +-spec set_header(term(), term(), emqx_types:message()) -> emqx_types:message(). set_header(Hdr, Val, Msg = #message{headers = Headers}) -> Msg#message{headers = maps:put(Hdr, Val, Headers)}. --spec(remove_header(term(), emqx_types:message()) -> emqx_types:message()). +-spec remove_header(term(), emqx_types:message()) -> emqx_types:message(). remove_header(Hdr, Msg = #message{headers = Headers}) -> case maps:is_key(Hdr, Headers) of - true -> Msg#message{headers = maps:remove(Hdr, Headers)}; + true -> Msg#message{headers = maps:remove(Hdr, Headers)}; false -> Msg end. --spec(is_expired(emqx_types:message()) -> boolean()). -is_expired(#message{headers = #{properties := #{'Message-Expiry-Interval' := Interval}}, - timestamp = CreatedAt}) -> +-spec is_expired(emqx_types:message()) -> boolean(). +is_expired(#message{ + headers = #{properties := #{'Message-Expiry-Interval' := Interval}}, + timestamp = CreatedAt +}) -> elapsed(CreatedAt) > timer:seconds(Interval); -is_expired(_Msg) -> false. +is_expired(_Msg) -> + false. --spec(update_expiry(emqx_types:message()) -> emqx_types:message()). -update_expiry(Msg = #message{headers = #{properties := #{'Message-Expiry-Interval' := Interval}}, - timestamp = CreatedAt}) -> +-spec update_expiry(emqx_types:message()) -> emqx_types:message(). +update_expiry( + Msg = #message{ + headers = #{properties := #{'Message-Expiry-Interval' := Interval}}, + timestamp = CreatedAt + } +) -> Props = maps:get(properties, Msg#message.headers), case elapsed(CreatedAt) of Elapsed when Elapsed > 0 -> Interval1 = max(1, Interval - (Elapsed div 1000)), set_header(properties, Props#{'Message-Expiry-Interval' => Interval1}, Msg); - _ -> Msg + _ -> + Msg end; -update_expiry(Msg) -> Msg. +update_expiry(Msg) -> + Msg. %% @doc Message to PUBLISH Packet. --spec(to_packet(emqx_types:packet_id(), emqx_types:message()) - -> emqx_types:packet()). -to_packet(PacketId, Msg = #message{qos = QoS, headers = Headers, - topic = Topic, payload = Payload}) -> - #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - dup = get_flag(dup, Msg), - qos = QoS, - retain = get_flag(retain, Msg) - }, - variable = #mqtt_packet_publish{topic_name = Topic, - packet_id = PacketId, - properties = filter_pub_props( - maps:get(properties, Headers, #{})) - }, - payload = Payload - }. +-spec to_packet(emqx_types:packet_id(), emqx_types:message()) -> + emqx_types:packet(). +to_packet( + PacketId, + Msg = #message{ + qos = QoS, + headers = Headers, + topic = Topic, + payload = Payload + } +) -> + #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + dup = get_flag(dup, Msg), + qos = QoS, + retain = get_flag(retain, Msg) + }, + variable = #mqtt_packet_publish{ + topic_name = Topic, + packet_id = PacketId, + properties = filter_pub_props( + maps:get(properties, Headers, #{}) + ) + }, + payload = Payload + }. filter_pub_props(Props) -> - maps:with(['Payload-Format-Indicator', - 'Message-Expiry-Interval', - 'Response-Topic', - 'Correlation-Data', - 'User-Property', - 'Subscription-Identifier', - 'Content-Type' - ], Props). + maps:with( + [ + 'Payload-Format-Indicator', + 'Message-Expiry-Interval', + 'Response-Topic', + 'Correlation-Data', + 'User-Property', + 'Subscription-Identifier', + 'Content-Type' + ], + Props + ). %% @doc Message to map --spec(to_map(emqx_types:message()) -> message_map()). +-spec to_map(emqx_types:message()) -> message_map(). to_map(#message{ - id = Id, - qos = QoS, - from = From, - flags = Flags, - headers = Headers, - topic = Topic, - payload = Payload, - timestamp = Timestamp, - extra = Extra - }) -> - #{id => Id, - qos => QoS, - from => From, - flags => Flags, - headers => Headers, - topic => Topic, - payload => Payload, - timestamp => Timestamp, - extra => Extra - }. + id = Id, + qos = QoS, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Timestamp, + extra = Extra +}) -> + #{ + id => Id, + qos => QoS, + from => From, + flags => Flags, + headers => Headers, + topic => Topic, + payload => Payload, + timestamp => Timestamp, + extra => Extra + }. %% @doc To map for logging, with payload dropped. to_log_map(Msg) -> maps:without([payload], to_map(Msg)). %% @doc Message to tuple list --spec(to_list(emqx_types:message()) -> list()). +-spec to_list(emqx_types:message()) -> list(). to_list(Msg) -> lists:zip(record_info(fields, message), tl(tuple_to_list(Msg))). %% @doc Map to message --spec(from_map(message_map()) -> emqx_types:message()). -from_map(#{id := Id, - qos := QoS, - from := From, - flags := Flags, - headers := Headers, - topic := Topic, - payload := Payload, - timestamp := Timestamp, - extra := Extra - }) -> +-spec from_map(message_map()) -> emqx_types:message(). +from_map(#{ + id := Id, + qos := QoS, + from := From, + flags := Flags, + headers := Headers, + topic := Topic, + payload := Payload, + timestamp := Timestamp, + extra := Extra +}) -> #message{ id = Id, qos = QoS, diff --git a/apps/emqx/src/emqx_metrics.erl b/apps/emqx/src/emqx_metrics.erl index 6401f11e2..319506ffc 100644 --- a/apps/emqx/src/emqx_metrics.erl +++ b/apps/emqx/src/emqx_metrics.erl @@ -23,57 +23,62 @@ -include("types.hrl"). -include("emqx_mqtt.hrl"). --export([ start_link/0 - , stop/0 - ]). +-export([ + start_link/0, + stop/0 +]). --export([ new/1 - , new/2 - , ensure/1 - , ensure/2 - , all/0 - ]). +-export([ + new/1, + new/2, + ensure/1, + ensure/2, + all/0 +]). --export([ val/1 - , inc/1 - , inc/2 - , dec/1 - , dec/2 - , set/2 - ]). +-export([ + val/1, + inc/1, + inc/2, + dec/1, + dec/2, + set/2 +]). --export([ trans/2 - , trans/3 - , commit/0 - ]). +-export([ + trans/2, + trans/3, + commit/0 +]). %% Inc received/sent metrics --export([ inc_msg/1 - , inc_recv/1 - , inc_sent/1 - ]). +-export([ + inc_msg/1, + inc_recv/1, + inc_sent/1 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). %% BACKW: v4.3.0 --export([ upgrade_retained_delayed_counter_type/0 - ]). +-export([upgrade_retained_delayed_counter_type/0]). -export_type([metric_idx/0]). -compile({inline, [inc/1, inc/2, dec/1, dec/2]}). -compile({inline, [inc_recv/1, inc_sent/1]}). --opaque(metric_idx() :: 1..1024). +-opaque metric_idx() :: 1..1024. --type(metric_name() :: atom() | string() | binary()). +-type metric_name() :: atom() | string() | binary(). -define(MAX_SIZE, 1024). -define(RESERVED_IDX, 512). @@ -82,133 +87,198 @@ %% Bytes sent and received -define(BYTES_METRICS, - [{counter, 'bytes.received'}, % Total bytes received - {counter, 'bytes.sent'} % Total bytes sent - ]). + % Total bytes received + [ + {counter, 'bytes.received'}, + % Total bytes sent + {counter, 'bytes.sent'} + ] +). %% Packets sent and received -define(PACKET_METRICS, - [{counter, 'packets.received'}, % All Packets received - {counter, 'packets.sent'}, % All Packets sent - {counter, 'packets.connect.received'}, % CONNECT Packets received - {counter, 'packets.connack.sent'}, % CONNACK Packets sent - {counter, 'packets.connack.error'}, % CONNACK error sent - {counter, 'packets.connack.auth_error'}, % CONNACK auth_error sent - {counter, 'packets.publish.received'}, % PUBLISH packets received - {counter, 'packets.publish.sent'}, % PUBLISH packets sent - {counter, 'packets.publish.inuse'}, % PUBLISH packet_id inuse - {counter, 'packets.publish.error'}, % PUBLISH failed for error - {counter, 'packets.publish.auth_error'}, % PUBLISH failed for auth error - {counter, 'packets.publish.dropped'}, % PUBLISH(QoS2) packets dropped - {counter, 'packets.puback.received'}, % PUBACK packets received - {counter, 'packets.puback.sent'}, % PUBACK packets sent - {counter, 'packets.puback.inuse'}, % PUBACK packet_id inuse - {counter, 'packets.puback.missed'}, % PUBACK packets missed - {counter, 'packets.pubrec.received'}, % PUBREC packets received - {counter, 'packets.pubrec.sent'}, % PUBREC packets sent - {counter, 'packets.pubrec.inuse'}, % PUBREC packet_id inuse - {counter, 'packets.pubrec.missed'}, % PUBREC packets missed - {counter, 'packets.pubrel.received'}, % PUBREL packets received - {counter, 'packets.pubrel.sent'}, % PUBREL packets sent - {counter, 'packets.pubrel.missed'}, % PUBREL packets missed - {counter, 'packets.pubcomp.received'}, % PUBCOMP packets received - {counter, 'packets.pubcomp.sent'}, % PUBCOMP packets sent - {counter, 'packets.pubcomp.inuse'}, % PUBCOMP packet_id inuse - {counter, 'packets.pubcomp.missed'}, % PUBCOMP packets missed - {counter, 'packets.subscribe.received'}, % SUBSCRIBE Packets received - {counter, 'packets.subscribe.error'}, % SUBSCRIBE error - {counter, 'packets.subscribe.auth_error'}, % SUBSCRIBE failed for not auth - {counter, 'packets.suback.sent'}, % SUBACK packets sent - {counter, 'packets.unsubscribe.received'}, % UNSUBSCRIBE Packets received - {counter, 'packets.unsubscribe.error'}, % UNSUBSCRIBE error - {counter, 'packets.unsuback.sent'}, % UNSUBACK Packets sent - {counter, 'packets.pingreq.received'}, % PINGREQ packets received - {counter, 'packets.pingresp.sent'}, % PINGRESP Packets sent - {counter, 'packets.disconnect.received'}, % DISCONNECT Packets received - {counter, 'packets.disconnect.sent'}, % DISCONNECT Packets sent - {counter, 'packets.auth.received'}, % Auth Packets received - {counter, 'packets.auth.sent'} % Auth Packets sent - ]). + % All Packets received + [ + {counter, 'packets.received'}, + % All Packets sent + {counter, 'packets.sent'}, + % CONNECT Packets received + {counter, 'packets.connect.received'}, + % CONNACK Packets sent + {counter, 'packets.connack.sent'}, + % CONNACK error sent + {counter, 'packets.connack.error'}, + % CONNACK auth_error sent + {counter, 'packets.connack.auth_error'}, + % PUBLISH packets received + {counter, 'packets.publish.received'}, + % PUBLISH packets sent + {counter, 'packets.publish.sent'}, + % PUBLISH packet_id inuse + {counter, 'packets.publish.inuse'}, + % PUBLISH failed for error + {counter, 'packets.publish.error'}, + % PUBLISH failed for auth error + {counter, 'packets.publish.auth_error'}, + % PUBLISH(QoS2) packets dropped + {counter, 'packets.publish.dropped'}, + % PUBACK packets received + {counter, 'packets.puback.received'}, + % PUBACK packets sent + {counter, 'packets.puback.sent'}, + % PUBACK packet_id inuse + {counter, 'packets.puback.inuse'}, + % PUBACK packets missed + {counter, 'packets.puback.missed'}, + % PUBREC packets received + {counter, 'packets.pubrec.received'}, + % PUBREC packets sent + {counter, 'packets.pubrec.sent'}, + % PUBREC packet_id inuse + {counter, 'packets.pubrec.inuse'}, + % PUBREC packets missed + {counter, 'packets.pubrec.missed'}, + % PUBREL packets received + {counter, 'packets.pubrel.received'}, + % PUBREL packets sent + {counter, 'packets.pubrel.sent'}, + % PUBREL packets missed + {counter, 'packets.pubrel.missed'}, + % PUBCOMP packets received + {counter, 'packets.pubcomp.received'}, + % PUBCOMP packets sent + {counter, 'packets.pubcomp.sent'}, + % PUBCOMP packet_id inuse + {counter, 'packets.pubcomp.inuse'}, + % PUBCOMP packets missed + {counter, 'packets.pubcomp.missed'}, + % SUBSCRIBE Packets received + {counter, 'packets.subscribe.received'}, + % SUBSCRIBE error + {counter, 'packets.subscribe.error'}, + % SUBSCRIBE failed for not auth + {counter, 'packets.subscribe.auth_error'}, + % SUBACK packets sent + {counter, 'packets.suback.sent'}, + % UNSUBSCRIBE Packets received + {counter, 'packets.unsubscribe.received'}, + % UNSUBSCRIBE error + {counter, 'packets.unsubscribe.error'}, + % UNSUBACK Packets sent + {counter, 'packets.unsuback.sent'}, + % PINGREQ packets received + {counter, 'packets.pingreq.received'}, + % PINGRESP Packets sent + {counter, 'packets.pingresp.sent'}, + % DISCONNECT Packets received + {counter, 'packets.disconnect.received'}, + % DISCONNECT Packets sent + {counter, 'packets.disconnect.sent'}, + % Auth Packets received + {counter, 'packets.auth.received'}, + % Auth Packets sent + {counter, 'packets.auth.sent'} + ] +). %% Messages sent/received and pubsub -define(MESSAGE_METRICS, - [{counter, 'messages.received'}, % All Messages received - {counter, 'messages.sent'}, % All Messages sent - {counter, 'messages.qos0.received'}, % QoS0 Messages received - {counter, 'messages.qos0.sent'}, % QoS0 Messages sent - {counter, 'messages.qos1.received'}, % QoS1 Messages received - {counter, 'messages.qos1.sent'}, % QoS1 Messages sent - {counter, 'messages.qos2.received'}, % QoS2 Messages received - {counter, 'messages.qos2.sent'}, % QoS2 Messages sent - %% PubSub Metrics - {counter, 'messages.publish'}, % Messages Publish - {counter, 'messages.dropped'}, % Messages dropped due to no subscribers - {counter, 'messages.dropped.await_pubrel_timeout'}, % QoS2 Messages expired - {counter, 'messages.dropped.no_subscribers'}, % Messages dropped - {counter, 'messages.forward'}, % Messages forward - {counter, 'messages.delayed'}, % Messages delayed - {counter, 'messages.delivered'}, % Messages delivered - {counter, 'messages.acked'} % Messages acked - ]). + % All Messages received + [ + {counter, 'messages.received'}, + % All Messages sent + {counter, 'messages.sent'}, + % QoS0 Messages received + {counter, 'messages.qos0.received'}, + % QoS0 Messages sent + {counter, 'messages.qos0.sent'}, + % QoS1 Messages received + {counter, 'messages.qos1.received'}, + % QoS1 Messages sent + {counter, 'messages.qos1.sent'}, + % QoS2 Messages received + {counter, 'messages.qos2.received'}, + % QoS2 Messages sent + {counter, 'messages.qos2.sent'}, + %% PubSub Metrics + + % Messages Publish + {counter, 'messages.publish'}, + % Messages dropped due to no subscribers + {counter, 'messages.dropped'}, + % QoS2 Messages expired + {counter, 'messages.dropped.await_pubrel_timeout'}, + % Messages dropped + {counter, 'messages.dropped.no_subscribers'}, + % Messages forward + {counter, 'messages.forward'}, + % Messages delayed + {counter, 'messages.delayed'}, + % Messages delivered + {counter, 'messages.delivered'}, + % Messages acked + {counter, 'messages.acked'} + ] +). %% Delivery metrics --define(DELIVERY_METRICS, - [{counter, 'delivery.dropped'}, - {counter, 'delivery.dropped.no_local'}, - {counter, 'delivery.dropped.too_large'}, - {counter, 'delivery.dropped.qos0_msg'}, - {counter, 'delivery.dropped.queue_full'}, - {counter, 'delivery.dropped.expired'} - ]). +-define(DELIVERY_METRICS, [ + {counter, 'delivery.dropped'}, + {counter, 'delivery.dropped.no_local'}, + {counter, 'delivery.dropped.too_large'}, + {counter, 'delivery.dropped.qos0_msg'}, + {counter, 'delivery.dropped.queue_full'}, + {counter, 'delivery.dropped.expired'} +]). %% Client Lifecircle metrics --define(CLIENT_METRICS, - [{counter, 'client.connect'}, - {counter, 'client.connack'}, - {counter, 'client.connected'}, - {counter, 'client.authenticate'}, - {counter, 'client.auth.anonymous'}, - {counter, 'client.authorize'}, - {counter, 'client.subscribe'}, - {counter, 'client.unsubscribe'}, - {counter, 'client.disconnected'} - ]). +-define(CLIENT_METRICS, [ + {counter, 'client.connect'}, + {counter, 'client.connack'}, + {counter, 'client.connected'}, + {counter, 'client.authenticate'}, + {counter, 'client.auth.anonymous'}, + {counter, 'client.authorize'}, + {counter, 'client.subscribe'}, + {counter, 'client.unsubscribe'}, + {counter, 'client.disconnected'} +]). %% Session Lifecircle metrics --define(SESSION_METRICS, - [{counter, 'session.created'}, - {counter, 'session.resumed'}, - {counter, 'session.takenover'}, - {counter, 'session.discarded'}, - {counter, 'session.terminated'} - ]). +-define(SESSION_METRICS, [ + {counter, 'session.created'}, + {counter, 'session.resumed'}, + {counter, 'session.takenover'}, + {counter, 'session.discarded'}, + {counter, 'session.terminated'} +]). %% Statistic metrics for ACL checking --define(STASTS_ACL_METRICS, - [ {counter, 'client.acl.allow'}, - {counter, 'client.acl.deny'}, - {counter, 'client.acl.cache_hit'} - ]). +-define(STASTS_ACL_METRICS, [ + {counter, 'client.acl.allow'}, + {counter, 'client.acl.deny'}, + {counter, 'client.acl.cache_hit'} +]). %% Overload protetion counters --define(OLP_METRICS, - [{counter, 'olp.delay.ok'}, - {counter, 'olp.delay.timeout'}, - {counter, 'olp.hbn'}, - {counter, 'olp.gc'}, - {counter, 'olp.new_conn'} - ]). +-define(OLP_METRICS, [ + {counter, 'olp.delay.ok'}, + {counter, 'olp.delay.timeout'}, + {counter, 'olp.hbn'}, + {counter, 'olp.gc'}, + {counter, 'olp.new_conn'} +]). -record(state, {next_idx = 1}). -record(metric, {name, type, idx}). %% @doc Start the metrics server. --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). --spec(stop() -> ok). +-spec stop() -> ok. stop() -> gen_server:stop(?SERVER). %% BACKW: v4.3.0 @@ -220,21 +290,21 @@ upgrade_retained_delayed_counter_type() -> %% Metrics API %%-------------------------------------------------------------------- --spec(new(metric_name()) -> ok). +-spec new(metric_name()) -> ok. new(Name) -> new(counter, Name). --spec(new(gauge|counter, metric_name()) -> ok). +-spec new(gauge | counter, metric_name()) -> ok. new(gauge, Name) -> create(gauge, Name); new(counter, Name) -> create(counter, Name). --spec(ensure(metric_name()) -> ok). +-spec ensure(metric_name()) -> ok. ensure(Name) -> ensure(counter, Name). --spec(ensure(gauge|counter, metric_name()) -> ok). +-spec ensure(gauge | counter, metric_name()) -> ok. ensure(Type, Name) when Type =:= gauge; Type =:= counter -> case ets:lookup(?TAB, Name) of [] -> create(Type, Name); @@ -249,73 +319,80 @@ create(Type, Name) -> end. %% @doc Get all metrics --spec(all() -> [{metric_name(), non_neg_integer()}]). +-spec all() -> [{metric_name(), non_neg_integer()}]. all() -> CRef = persistent_term:get(?MODULE), - [{Name, counters:get(CRef, Idx)} - || #metric{name = Name, idx = Idx} <- ets:tab2list(?TAB)]. + [ + {Name, counters:get(CRef, Idx)} + || #metric{name = Name, idx = Idx} <- ets:tab2list(?TAB) + ]. %% @doc Get metric value --spec(val(metric_name()) -> maybe(non_neg_integer())). +-spec val(metric_name()) -> maybe(non_neg_integer()). val(Name) -> case ets:lookup(?TAB, Name) of [#metric{idx = Idx}] -> CRef = persistent_term:get(?MODULE), counters:get(CRef, Idx); - [] -> undefined + [] -> + undefined end. %% @doc Increase counter --spec(inc(metric_name()) -> ok). +-spec inc(metric_name()) -> ok. inc(Name) -> inc(Name, 1). %% @doc Increase metric value --spec(inc(metric_name(), pos_integer()) -> ok). +-spec inc(metric_name(), pos_integer()) -> ok. inc(Name, Value) -> update_counter(Name, Value). %% @doc Decrease metric value --spec(dec(metric_name()) -> ok). +-spec dec(metric_name()) -> ok. dec(Name) -> dec(Name, 1). %% @doc Decrease metric value --spec(dec(metric_name(), pos_integer()) -> ok). +-spec dec(metric_name(), pos_integer()) -> ok. dec(Name, Value) -> update_counter(Name, -Value). %% @doc Set metric value --spec(set(metric_name(), integer()) -> ok). +-spec set(metric_name(), integer()) -> ok. set(Name, Value) -> CRef = persistent_term:get(?MODULE), Idx = ets:lookup_element(?TAB, Name, 4), counters:put(CRef, Idx, Value). --spec(trans(inc | dec, metric_name()) -> ok). +-spec trans(inc | dec, metric_name()) -> ok. trans(Op, Name) when Op =:= inc; Op =:= dec -> trans(Op, Name, 1). --spec(trans(inc | dec, metric_name(), pos_integer()) -> ok). +-spec trans(inc | dec, metric_name(), pos_integer()) -> ok. trans(inc, Name, Value) -> cache(Name, Value); trans(dec, Name, Value) -> cache(Name, -Value). --spec(cache(metric_name(), integer()) -> ok). +-spec cache(metric_name(), integer()) -> ok. cache(Name, Value) -> - put('$metrics', case get('$metrics') of - undefined -> - #{Name => Value}; - Metrics -> - maps:update_with(Name, fun(Cnt) -> Cnt + Value end, Value, Metrics) - end), + put( + '$metrics', + case get('$metrics') of + undefined -> + #{Name => Value}; + Metrics -> + maps:update_with(Name, fun(Cnt) -> Cnt + Value end, Value, Metrics) + end + ), ok. --spec(commit() -> ok). +-spec commit() -> ok. commit() -> case get('$metrics') of - undefined -> ok; + undefined -> + ok; Metrics -> _ = erase('$metrics'), lists:foreach(fun update_counter/1, maps:to_list(Metrics)) @@ -326,18 +403,18 @@ update_counter({Name, Value}) -> update_counter(Name, Value) -> CRef = persistent_term:get(?MODULE), - CIdx = case reserved_idx(Name) of - Idx when is_integer(Idx) -> Idx; - undefined -> - ets:lookup_element(?TAB, Name, 4) - end, + CIdx = + case reserved_idx(Name) of + Idx when is_integer(Idx) -> Idx; + undefined -> ets:lookup_element(?TAB, Name, 4) + end, counters:add(CRef, CIdx, Value). %%-------------------------------------------------------------------- %% Inc received/sent metrics %%-------------------------------------------------------------------- --spec(inc_msg(emqx_types:massage()) -> ok). +-spec inc_msg(emqx_types:massage()) -> ok. inc_msg(Msg) -> case Msg#message.qos of 0 -> inc('messages.qos0.received'); @@ -347,7 +424,7 @@ inc_msg(Msg) -> inc('messages.received'). %% @doc Inc packets received. --spec(inc_recv(emqx_types:packet()) -> ok). +-spec inc_recv(emqx_types:packet()) -> ok. inc_recv(Packet) -> inc('packets.received'), do_inc_recv(Packet). @@ -381,10 +458,11 @@ do_inc_recv(?PACKET(?DISCONNECT)) -> inc('packets.disconnect.received'); do_inc_recv(?PACKET(?AUTH)) -> inc('packets.auth.received'); -do_inc_recv(_Packet) -> ok. +do_inc_recv(_Packet) -> + ok. %% @doc Inc packets sent. Will not count $SYS PUBLISH. --spec(inc_sent(emqx_types:packet()) -> ok). +-spec inc_sent(emqx_types:packet()) -> ok. inc_sent(?PUBLISH_PACKET(_QoS, <<"$SYS/", _/binary>>, _, _)) -> ok; inc_sent(Packet) -> @@ -396,7 +474,6 @@ do_inc_sent(?CONNACK_PACKET(ReasonCode)) -> (ReasonCode == ?RC_NOT_AUTHORIZED) andalso inc('packets.connack.auth_error'), (ReasonCode == ?RC_BAD_USER_NAME_OR_PASSWORD) andalso inc('packets.connack.auth_error'), inc('packets.connack.sent'); - do_inc_sent(?PUBLISH_PACKET(QoS)) -> inc('messages.sent'), case QoS of @@ -428,7 +505,8 @@ do_inc_sent(?PACKET(?DISCONNECT)) -> inc('packets.disconnect.sent'); do_inc_sent(?PACKET(?AUTH)) -> inc('packets.auth.sent'); -do_inc_sent(_Packet) -> ok. +do_inc_sent(_Packet) -> + ok. %%-------------------------------------------------------------------- %% gen_server callbacks @@ -440,22 +518,26 @@ init([]) -> ok = persistent_term:put(?MODULE, CRef), % Create index mapping table ok = emqx_tables:new(?TAB, [{keypos, 2}, {read_concurrency, true}]), - Metrics = lists:append([?BYTES_METRICS, - ?PACKET_METRICS, - ?MESSAGE_METRICS, - ?DELIVERY_METRICS, - ?CLIENT_METRICS, - ?SESSION_METRICS, - ?STASTS_ACL_METRICS, - ?OLP_METRICS - ]), + Metrics = lists:append([ + ?BYTES_METRICS, + ?PACKET_METRICS, + ?MESSAGE_METRICS, + ?DELIVERY_METRICS, + ?CLIENT_METRICS, + ?SESSION_METRICS, + ?STASTS_ACL_METRICS, + ?OLP_METRICS + ]), % Store reserved indices - ok = lists:foreach(fun({Type, Name}) -> - Idx = reserved_idx(Name), - Metric = #metric{name = Name, type = Type, idx = Idx}, - true = ets:insert(?TAB, Metric), - ok = counters:put(CRef, Idx, 0) - end, Metrics), + ok = lists:foreach( + fun({Type, Name}) -> + Idx = reserved_idx(Name), + Metric = #metric{name = Name, type = Type, idx = Idx}, + true = ets:insert(?TAB, Metric), + ok = counters:put(CRef, Idx, 0) + end, + Metrics + ), {ok, #state{next_idx = ?RESERVED_IDX + 1}, hibernate}. handle_call({create, Type, Name}, _From, State = #state{next_idx = ?MAX_SIZE}) -> @@ -465,7 +547,6 @@ handle_call({create, Type, Name}, _From, State = #state{next_idx = ?MAX_SIZE}) - name => Name }), {reply, {error, metric_index_exceeded}, State}; - handle_call({create, Type, Name}, _From, State = #state{next_idx = NextIdx}) -> case ets:lookup(?TAB, Name) of [#metric{idx = Idx}] -> @@ -476,14 +557,14 @@ handle_call({create, Type, Name}, _From, State = #state{next_idx = NextIdx}) -> true = ets:insert(?TAB, Metric), {reply, {ok, NextIdx}, State#state{next_idx = NextIdx + 1}} end; - handle_call({set_type_to_counter, Keys}, _From, State) -> lists:foreach( - fun(K) -> - ets:update_element(?TAB, K, {#metric.type, counter}) - end, Keys), + fun(K) -> + ets:update_element(?TAB, K, {#metric.type, counter}) + end, + Keys + ), {reply, ok, State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", req => Req}), {reply, ignored, State}. @@ -506,100 +587,95 @@ code_change(_OldVsn, State, _Extra) -> %% Internal functions %%-------------------------------------------------------------------- -reserved_idx('bytes.received') -> 01; -reserved_idx('bytes.sent') -> 02; +reserved_idx('bytes.received') -> 01; +reserved_idx('bytes.sent') -> 02; %% Reserved indices of packet's metrics -reserved_idx('packets.received') -> 10; -reserved_idx('packets.sent') -> 11; -reserved_idx('packets.connect.received') -> 12; -reserved_idx('packets.connack.sent') -> 13; -reserved_idx('packets.connack.error') -> 14; -reserved_idx('packets.connack.auth_error') -> 15; -reserved_idx('packets.publish.received') -> 16; -reserved_idx('packets.publish.sent') -> 17; -reserved_idx('packets.publish.inuse') -> 18; -reserved_idx('packets.publish.error') -> 19; -reserved_idx('packets.publish.auth_error') -> 20; -reserved_idx('packets.puback.received') -> 21; -reserved_idx('packets.puback.sent') -> 22; -reserved_idx('packets.puback.inuse') -> 23; -reserved_idx('packets.puback.missed') -> 24; -reserved_idx('packets.pubrec.received') -> 25; -reserved_idx('packets.pubrec.sent') -> 26; -reserved_idx('packets.pubrec.inuse') -> 27; -reserved_idx('packets.pubrec.missed') -> 28; -reserved_idx('packets.pubrel.received') -> 29; -reserved_idx('packets.pubrel.sent') -> 30; -reserved_idx('packets.pubrel.missed') -> 31; -reserved_idx('packets.pubcomp.received') -> 32; -reserved_idx('packets.pubcomp.sent') -> 33; -reserved_idx('packets.pubcomp.inuse') -> 34; -reserved_idx('packets.pubcomp.missed') -> 35; -reserved_idx('packets.subscribe.received') -> 36; -reserved_idx('packets.subscribe.error') -> 37; +reserved_idx('packets.received') -> 10; +reserved_idx('packets.sent') -> 11; +reserved_idx('packets.connect.received') -> 12; +reserved_idx('packets.connack.sent') -> 13; +reserved_idx('packets.connack.error') -> 14; +reserved_idx('packets.connack.auth_error') -> 15; +reserved_idx('packets.publish.received') -> 16; +reserved_idx('packets.publish.sent') -> 17; +reserved_idx('packets.publish.inuse') -> 18; +reserved_idx('packets.publish.error') -> 19; +reserved_idx('packets.publish.auth_error') -> 20; +reserved_idx('packets.puback.received') -> 21; +reserved_idx('packets.puback.sent') -> 22; +reserved_idx('packets.puback.inuse') -> 23; +reserved_idx('packets.puback.missed') -> 24; +reserved_idx('packets.pubrec.received') -> 25; +reserved_idx('packets.pubrec.sent') -> 26; +reserved_idx('packets.pubrec.inuse') -> 27; +reserved_idx('packets.pubrec.missed') -> 28; +reserved_idx('packets.pubrel.received') -> 29; +reserved_idx('packets.pubrel.sent') -> 30; +reserved_idx('packets.pubrel.missed') -> 31; +reserved_idx('packets.pubcomp.received') -> 32; +reserved_idx('packets.pubcomp.sent') -> 33; +reserved_idx('packets.pubcomp.inuse') -> 34; +reserved_idx('packets.pubcomp.missed') -> 35; +reserved_idx('packets.subscribe.received') -> 36; +reserved_idx('packets.subscribe.error') -> 37; reserved_idx('packets.subscribe.auth_error') -> 38; -reserved_idx('packets.suback.sent') -> 39; +reserved_idx('packets.suback.sent') -> 39; reserved_idx('packets.unsubscribe.received') -> 40; -reserved_idx('packets.unsubscribe.error') -> 41; -reserved_idx('packets.unsuback.sent') -> 42; -reserved_idx('packets.pingreq.received') -> 43; -reserved_idx('packets.pingresp.sent') -> 44; -reserved_idx('packets.disconnect.received') -> 45; -reserved_idx('packets.disconnect.sent') -> 46; -reserved_idx('packets.auth.received') -> 47; -reserved_idx('packets.auth.sent') -> 48; -reserved_idx('packets.publish.dropped') -> 49; +reserved_idx('packets.unsubscribe.error') -> 41; +reserved_idx('packets.unsuback.sent') -> 42; +reserved_idx('packets.pingreq.received') -> 43; +reserved_idx('packets.pingresp.sent') -> 44; +reserved_idx('packets.disconnect.received') -> 45; +reserved_idx('packets.disconnect.sent') -> 46; +reserved_idx('packets.auth.received') -> 47; +reserved_idx('packets.auth.sent') -> 48; +reserved_idx('packets.publish.dropped') -> 49; %% Reserved indices of message's metrics -reserved_idx('messages.received') -> 100; -reserved_idx('messages.sent') -> 101; -reserved_idx('messages.qos0.received') -> 102; -reserved_idx('messages.qos0.sent') -> 103; -reserved_idx('messages.qos1.received') -> 104; -reserved_idx('messages.qos1.sent') -> 105; -reserved_idx('messages.qos2.received') -> 106; -reserved_idx('messages.qos2.sent') -> 107; -reserved_idx('messages.publish') -> 108; -reserved_idx('messages.dropped') -> 109; -reserved_idx('messages.dropped.await_pubrel_timeout') -> 110; +reserved_idx('messages.received') -> 100; +reserved_idx('messages.sent') -> 101; +reserved_idx('messages.qos0.received') -> 102; +reserved_idx('messages.qos0.sent') -> 103; +reserved_idx('messages.qos1.received') -> 104; +reserved_idx('messages.qos1.sent') -> 105; +reserved_idx('messages.qos2.received') -> 106; +reserved_idx('messages.qos2.sent') -> 107; +reserved_idx('messages.publish') -> 108; +reserved_idx('messages.dropped') -> 109; +reserved_idx('messages.dropped.await_pubrel_timeout') -> 110; reserved_idx('messages.dropped.no_subscribers') -> 111; -reserved_idx('messages.forward') -> 112; +reserved_idx('messages.forward') -> 112; %%reserved_idx('messages.retained') -> 113; %% keep the index, new metrics can use this -reserved_idx('messages.delayed') -> 114; -reserved_idx('messages.delivered') -> 115; -reserved_idx('messages.acked') -> 116; -reserved_idx('delivery.expired') -> 117; -reserved_idx('delivery.dropped') -> 118; -reserved_idx('delivery.dropped.no_local') -> 119; -reserved_idx('delivery.dropped.too_large') -> 120; -reserved_idx('delivery.dropped.qos0_msg') -> 121; -reserved_idx('delivery.dropped.queue_full') -> 122; -reserved_idx('delivery.dropped.expired') -> 123; - -reserved_idx('client.connect') -> 200; -reserved_idx('client.connack') -> 201; -reserved_idx('client.connected') -> 202; -reserved_idx('client.authenticate') -> 203; +reserved_idx('messages.delayed') -> 114; +reserved_idx('messages.delivered') -> 115; +reserved_idx('messages.acked') -> 116; +reserved_idx('delivery.expired') -> 117; +reserved_idx('delivery.dropped') -> 118; +reserved_idx('delivery.dropped.no_local') -> 119; +reserved_idx('delivery.dropped.too_large') -> 120; +reserved_idx('delivery.dropped.qos0_msg') -> 121; +reserved_idx('delivery.dropped.queue_full') -> 122; +reserved_idx('delivery.dropped.expired') -> 123; +reserved_idx('client.connect') -> 200; +reserved_idx('client.connack') -> 201; +reserved_idx('client.connected') -> 202; +reserved_idx('client.authenticate') -> 203; reserved_idx('client.enhanced_authenticate') -> 204; -reserved_idx('client.auth.anonymous') -> 205; -reserved_idx('client.authorize') -> 206; -reserved_idx('client.subscribe') -> 207; -reserved_idx('client.unsubscribe') -> 208; -reserved_idx('client.disconnected') -> 209; - -reserved_idx('session.created') -> 220; -reserved_idx('session.resumed') -> 221; -reserved_idx('session.takenover') -> 222; -reserved_idx('session.discarded') -> 223; -reserved_idx('session.terminated') -> 224; - -reserved_idx('client.acl.allow') -> 300; -reserved_idx('client.acl.deny') -> 301; -reserved_idx('client.acl.cache_hit') -> 302; - -reserved_idx('olp.delay.ok') -> 400; -reserved_idx('olp.delay.timeout') -> 401; -reserved_idx('olp.hbn') -> 402; -reserved_idx('olp.gc') -> 403; -reserved_idx('olp.new_conn') -> 404; - -reserved_idx(_) -> undefined. +reserved_idx('client.auth.anonymous') -> 205; +reserved_idx('client.authorize') -> 206; +reserved_idx('client.subscribe') -> 207; +reserved_idx('client.unsubscribe') -> 208; +reserved_idx('client.disconnected') -> 209; +reserved_idx('session.created') -> 220; +reserved_idx('session.resumed') -> 221; +reserved_idx('session.takenover') -> 222; +reserved_idx('session.discarded') -> 223; +reserved_idx('session.terminated') -> 224; +reserved_idx('client.acl.allow') -> 300; +reserved_idx('client.acl.deny') -> 301; +reserved_idx('client.acl.cache_hit') -> 302; +reserved_idx('olp.delay.ok') -> 400; +reserved_idx('olp.delay.timeout') -> 401; +reserved_idx('olp.hbn') -> 402; +reserved_idx('olp.gc') -> 403; +reserved_idx('olp.new_conn') -> 404; +reserved_idx(_) -> undefined. diff --git a/apps/emqx/src/emqx_misc.erl b/apps/emqx/src/emqx_misc.erl index f3154cbbf..f2c4c6b79 100644 --- a/apps/emqx/src/emqx_misc.erl +++ b/apps/emqx/src/emqx_misc.erl @@ -22,39 +22,41 @@ -include("types.hrl"). -include("logger.hrl"). --export([ merge_opts/2 - , maybe_apply/2 - , compose/1 - , compose/2 - , run_fold/3 - , pipeline/3 - , start_timer/2 - , start_timer/3 - , cancel_timer/1 - , drain_deliver/0 - , drain_deliver/1 - , drain_down/1 - , check_oom/1 - , check_oom/2 - , tune_heap_size/1 - , proc_name/2 - , proc_stats/0 - , proc_stats/1 - , rand_seed/0 - , now_to_secs/1 - , now_to_ms/1 - , index_of/2 - , maybe_parse_ip/1 - , ipv6_probe/1 - , gen_id/0 - , gen_id/1 - , explain_posix/1 - ]). +-export([ + merge_opts/2, + maybe_apply/2, + compose/1, + compose/2, + run_fold/3, + pipeline/3, + start_timer/2, + start_timer/3, + cancel_timer/1, + drain_deliver/0, + drain_deliver/1, + drain_down/1, + check_oom/1, + check_oom/2, + tune_heap_size/1, + proc_name/2, + proc_stats/0, + proc_stats/1, + rand_seed/0, + now_to_secs/1, + now_to_ms/1, + index_of/2, + maybe_parse_ip/1, + ipv6_probe/1, + gen_id/0, + gen_id/1, + explain_posix/1 +]). --export([ bin2hexstr_A_F/1 - , bin2hexstr_a_f/1 - , hexstr2bin/1 - ]). +-export([ + bin2hexstr_A_F/1, + bin2hexstr_a_f/1, + hexstr2bin/1 +]). -export([clamp/3]). @@ -77,33 +79,43 @@ ipv6_probe(Opts) -> end. %% @doc Merge options --spec(merge_opts(Opts, Opts) -> Opts when Opts :: proplists:proplist()). +-spec merge_opts(Opts, Opts) -> Opts when Opts :: proplists:proplist(). merge_opts(Defaults, Options) -> lists:foldl( - fun({Opt, Val}, Acc) -> - lists:keystore(Opt, 1, Acc, {Opt, Val}); - (Opt, Acc) -> - lists:usort([Opt | Acc]) - end, Defaults, Options). + fun + ({Opt, Val}, Acc) -> + lists:keystore(Opt, 1, Acc, {Opt, Val}); + (Opt, Acc) -> + lists:usort([Opt | Acc]) + end, + Defaults, + Options + ). %% @doc Apply a function to a maybe argument. --spec(maybe_apply(fun((maybe(A)) -> maybe(A)), maybe(A)) - -> maybe(A) when A :: any()). -maybe_apply(_Fun, undefined) -> undefined; +-spec maybe_apply(fun((maybe(A)) -> maybe(A)), maybe(A)) -> + maybe(A) +when + A :: any(). +maybe_apply(_Fun, undefined) -> + undefined; maybe_apply(Fun, Arg) when is_function(Fun) -> erlang:apply(Fun, [Arg]). --spec(compose(list(F)) -> G - when F :: fun((any()) -> any()), - G :: fun((any()) -> any())). +-spec compose(list(F)) -> G when + F :: fun((any()) -> any()), + G :: fun((any()) -> any()). compose([F | More]) -> compose(F, More). --spec(compose(F, G | [Gs]) -> C - when F :: fun((X1) -> X2), - G :: fun((X2) -> X3), - Gs :: [fun((Xn) -> Xn1)], - C :: fun((X1) -> Xm), - X3 :: any(), Xn :: any(), Xn1 :: any(), Xm :: any()). +-spec compose(F, G | [Gs]) -> C when + F :: fun((X1) -> X2), + G :: fun((X2) -> X3), + Gs :: [fun((Xn) -> Xn1)], + C :: fun((X1) -> Xm), + X3 :: any(), + Xn :: any(), + Xn1 :: any(), + Xm :: any(). compose(F, G) when is_function(G) -> fun(X) -> G(F(X)) end; compose(F, [G]) -> compose(F, G); compose(F, [G | More]) -> compose(compose(F, G), More). @@ -117,18 +129,13 @@ run_fold([Fun | More], Acc, State) -> %% @doc Pipeline pipeline([], Input, State) -> {ok, Input, State}; - pipeline([Fun | More], Input, State) -> case apply_fun(Fun, Input, State) of ok -> pipeline(More, Input, State); - {ok, NState} -> - pipeline(More, Input, NState); - {ok, Output, NState} -> - pipeline(More, Output, NState); - {error, Reason} -> - {error, Reason, State}; - {error, Reason, NState} -> - {error, Reason, NState} + {ok, NState} -> pipeline(More, Input, NState); + {ok, Output, NState} -> pipeline(More, Output, NState); + {error, Reason} -> {error, Reason, State}; + {error, Reason, NState} -> {error, Reason, NState} end. -compile({inline, [apply_fun/3]}). @@ -138,23 +145,29 @@ apply_fun(Fun, Input, State) -> {arity, 2} -> Fun(Input, State) end. --spec(start_timer(integer() | atom(), term()) -> maybe(reference())). +-spec start_timer(integer() | atom(), term()) -> maybe(reference()). start_timer(Interval, Msg) -> start_timer(Interval, self(), Msg). --spec(start_timer(integer() | atom(), pid() | atom(), term()) -> maybe(reference())). +-spec start_timer(integer() | atom(), pid() | atom(), term()) -> maybe(reference()). start_timer(Interval, Dest, Msg) when is_number(Interval) -> erlang:start_timer(erlang:ceil(Interval), Dest, Msg); -start_timer(_Atom, _Dest, _Msg) -> undefined. +start_timer(_Atom, _Dest, _Msg) -> + undefined. --spec(cancel_timer(maybe(reference())) -> ok). +-spec cancel_timer(maybe(reference())) -> ok. cancel_timer(Timer) when is_reference(Timer) -> case erlang:cancel_timer(Timer) of false -> - receive {timeout, Timer, _} -> ok after 0 -> ok end; - _ -> ok + receive + {timeout, Timer, _} -> ok + after 0 -> ok + end; + _ -> + ok end; -cancel_timer(_) -> ok. +cancel_timer(_) -> + ok. %% @doc Drain delivers drain_deliver() -> @@ -168,13 +181,13 @@ drain_deliver(0, Acc) -> drain_deliver(N, Acc) -> receive Deliver = {deliver, _Topic, _Msg} -> - drain_deliver(N-1, [Deliver | Acc]) + drain_deliver(N - 1, [Deliver | Acc]) after 0 -> lists:reverse(Acc) end. %% @doc Drain process 'DOWN' events. --spec(drain_down(pos_integer()) -> list(pid())). +-spec drain_down(pos_integer()) -> list(pid()). drain_down(Cnt) when Cnt > 0 -> drain_down(Cnt, []). @@ -183,7 +196,7 @@ drain_down(0, Acc) -> drain_down(Cnt, Acc) -> receive {'DOWN', _MRef, process, Pid, _Reason} -> - drain_down(Cnt-1, [Pid | Acc]) + drain_down(Cnt - 1, [Pid | Acc]) after 0 -> lists:reverse(Acc) end. @@ -193,26 +206,32 @@ drain_down(Cnt, Acc) -> %% `ok': There is nothing out of the ordinary. %% `shutdown': Some numbers (message queue length hit the limit), %% hence shutdown for greater good (system stability). --spec(check_oom(emqx_types:oom_policy()) -> ok | {shutdown, term()}). +-spec check_oom(emqx_types:oom_policy()) -> ok | {shutdown, term()}. check_oom(Policy) -> check_oom(self(), Policy). --spec(check_oom(pid(), emqx_types:oom_policy()) -> ok | {shutdown, term()}). -check_oom(_Pid, #{enable := false}) -> ok; -check_oom(Pid, #{max_message_queue_len := MaxQLen, - max_heap_size := MaxHeapSize}) -> +-spec check_oom(pid(), emqx_types:oom_policy()) -> ok | {shutdown, term()}. +check_oom(_Pid, #{enable := false}) -> + ok; +check_oom(Pid, #{ + max_message_queue_len := MaxQLen, + max_heap_size := MaxHeapSize +}) -> case process_info(Pid, [message_queue_len, total_heap_size]) of - undefined -> ok; + undefined -> + ok; [{message_queue_len, QLen}, {total_heap_size, HeapSize}] -> - do_check_oom([{QLen, MaxQLen, message_queue_too_long}, - {HeapSize, MaxHeapSize, proc_heap_too_large} - ]) + do_check_oom([ + {QLen, MaxQLen, message_queue_too_long}, + {HeapSize, MaxHeapSize, proc_heap_too_large} + ]) end. -do_check_oom([]) -> ok; +do_check_oom([]) -> + ok; do_check_oom([{Val, Max, Reason} | Rest]) -> case is_integer(Max) andalso (0 < Max) andalso (Max < Val) of - true -> {shutdown, Reason}; + true -> {shutdown, Reason}; false -> do_check_oom(Rest) end. @@ -220,53 +239,59 @@ tune_heap_size(#{enable := false}) -> ok; %% If the max_heap_size is set to zero, the limit is disabled. tune_heap_size(#{max_heap_size := MaxHeapSize}) when MaxHeapSize > 0 -> - MaxSize = case erlang:system_info(wordsize) of - 8 -> % arch_64 - (1 bsl 59) - 1; - 4 -> % arch_32 - (1 bsl 27) - 1 - end, - OverflowedSize = case erlang:trunc(MaxHeapSize * 1.5) of - SZ when SZ > MaxSize -> MaxSize; - SZ -> SZ - end, + MaxSize = + case erlang:system_info(wordsize) of + % arch_64 + 8 -> + (1 bsl 59) - 1; + % arch_32 + 4 -> + (1 bsl 27) - 1 + end, + OverflowedSize = + case erlang:trunc(MaxHeapSize * 1.5) of + SZ when SZ > MaxSize -> MaxSize; + SZ -> SZ + end, erlang:process_flag(max_heap_size, #{ size => OverflowedSize, kill => true, error_logger => true }). - --spec(proc_name(atom(), pos_integer()) -> atom()). +-spec proc_name(atom(), pos_integer()) -> atom(). proc_name(Mod, Id) -> list_to_atom(lists:concat([Mod, "_", Id])). %% Get Proc's Stats. --spec(proc_stats() -> emqx_types:stats()). +-spec proc_stats() -> emqx_types:stats(). proc_stats() -> proc_stats(self()). --spec(proc_stats(pid()) -> emqx_types:stats()). +-spec proc_stats(pid()) -> emqx_types:stats(). proc_stats(Pid) -> - case process_info(Pid, [message_queue_len, - heap_size, - total_heap_size, - reductions, - memory]) of + case + process_info(Pid, [ + message_queue_len, + heap_size, + total_heap_size, + reductions, + memory + ]) + of undefined -> []; - [{message_queue_len, Len} | ProcStats] -> - [{mailbox_len, Len} | ProcStats] + [{message_queue_len, Len} | ProcStats] -> [{mailbox_len, Len} | ProcStats] end. rand_seed() -> rand:seed(exsplus, erlang:timestamp()). --spec(now_to_secs(erlang:timestamp()) -> pos_integer()). +-spec now_to_secs(erlang:timestamp()) -> pos_integer(). now_to_secs({MegaSecs, Secs, _MicroSecs}) -> MegaSecs * 1000000 + Secs. --spec(now_to_ms(erlang:timestamp()) -> pos_integer()). +-spec now_to_ms(erlang:timestamp()) -> pos_integer(). now_to_ms({MegaSecs, Secs, MicroSecs}) -> - (MegaSecs * 1000000 + Secs) * 1000 + round(MicroSecs/1000). + (MegaSecs * 1000000 + Secs) * 1000 + round(MicroSecs / 1000). %% lists:index_of/2 index_of(E, L) -> @@ -277,33 +302,33 @@ index_of(_E, _I, []) -> index_of(E, I, [E | _]) -> I; index_of(E, I, [_ | L]) -> - index_of(E, I+1, L). + index_of(E, I + 1, L). --spec(bin2hexstr_A_F(binary()) -> binary()). +-spec bin2hexstr_A_F(binary()) -> binary(). bin2hexstr_A_F(B) when is_binary(B) -> - << <<(int2hexchar(H, upper)), (int2hexchar(L, upper))>> || <> <= B>>. + <<<<(int2hexchar(H, upper)), (int2hexchar(L, upper))>> || <> <= B>>. --spec(bin2hexstr_a_f(binary()) -> binary()). +-spec bin2hexstr_a_f(binary()) -> binary(). bin2hexstr_a_f(B) when is_binary(B) -> - << <<(int2hexchar(H, lower)), (int2hexchar(L, lower))>> || <> <= B>>. + <<<<(int2hexchar(H, lower)), (int2hexchar(L, lower))>> || <> <= B>>. int2hexchar(I, _) when I >= 0 andalso I < 10 -> I + $0; int2hexchar(I, upper) -> I - 10 + $A; int2hexchar(I, lower) -> I - 10 + $a. --spec(hexstr2bin(binary()) -> binary()). +-spec hexstr2bin(binary()) -> binary(). hexstr2bin(B) when is_binary(B) -> - << <<(hexchar2int(H)*16 + hexchar2int(L))>> || <> <= B>>. + <<<<(hexchar2int(H) * 16 + hexchar2int(L))>> || <> <= B>>. hexchar2int(I) when I >= $0 andalso I =< $9 -> I - $0; hexchar2int(I) when I >= $A andalso I =< $F -> I - $A + 10; hexchar2int(I) when I >= $a andalso I =< $f -> I - $a + 10. --spec(gen_id() -> list()). +-spec gen_id() -> list(). gen_id() -> gen_id(?SHORT). --spec(gen_id(integer()) -> list()). +-spec gen_id(integer()) -> list(). gen_id(Len) -> BitLen = Len * 4, <> = crypto:strong_rand_bytes(Len div 2), @@ -356,8 +381,9 @@ explain_posix(NotPosix) -> NotPosix. int_to_hex(I, N) when is_integer(I), I >= 0 -> int_to_hex([], I, 1, N). -int_to_hex(L, I, Count, N) - when I < 16 -> +int_to_hex(L, I, Count, N) when + I < 16 +-> pad([int_to_hex(I) | L], N - Count); int_to_hex(L, I, Count, N) -> int_to_hex([int_to_hex(I rem 16) | L], I div 16, Count + 1, N). @@ -380,7 +406,7 @@ ipv6_probe_test() -> true -> ?assertEqual([{ipv6_probe, true}], ipv6_probe([])) catch - _ : _ -> + _:_ -> ok end. diff --git a/apps/emqx/src/emqx_mountpoint.erl b/apps/emqx/src/emqx_mountpoint.erl index bb707de86..5c4c14a42 100644 --- a/apps/emqx/src/emqx_mountpoint.erl +++ b/apps/emqx/src/emqx_mountpoint.erl @@ -20,20 +20,22 @@ -include("emqx_placeholder.hrl"). -include("types.hrl"). --export([ mount/2 - , unmount/2 - ]). +-export([ + mount/2, + unmount/2 +]). -export([replvar/2]). -export_type([mountpoint/0]). --type(mountpoint() :: binary()). +-type mountpoint() :: binary(). --spec(mount(maybe(mountpoint()), Any) -> Any - when Any :: emqx_types:topic() - | emqx_types:message() - | emqx_types:topic_filters()). +-spec mount(maybe(mountpoint()), Any) -> Any when + Any :: + emqx_types:topic() + | emqx_types:message() + | emqx_types:topic_filters(). mount(undefined, Any) -> Any; mount(MountPoint, Topic) when is_binary(Topic) -> @@ -48,33 +50,35 @@ mount(MountPoint, TopicFilters) when is_list(TopicFilters) -> prefix(MountPoint, Topic) -> <>. --spec(unmount(maybe(mountpoint()), Any) -> Any - when Any :: emqx_types:topic() - | emqx_types:message()). +-spec unmount(maybe(mountpoint()), Any) -> Any when + Any :: + emqx_types:topic() + | emqx_types:message(). unmount(undefined, Any) -> Any; unmount(MountPoint, Topic) when is_binary(Topic) -> case string:prefix(Topic, MountPoint) of nomatch -> Topic; - Topic1 -> Topic1 + Topic1 -> Topic1 end; unmount(MountPoint, Msg = #message{topic = Topic}) -> case string:prefix(Topic, MountPoint) of nomatch -> Msg; - Topic1 -> Msg#message{topic = Topic1} + Topic1 -> Msg#message{topic = Topic1} end. --spec(replvar(maybe(mountpoint()), map()) -> maybe(mountpoint())). +-spec replvar(maybe(mountpoint()), map()) -> maybe(mountpoint()). replvar(undefined, _Vars) -> undefined; replvar(MountPoint, Vars) -> - ClientID = maps:get(clientid, Vars, undefined), - UserName = maps:get(username, Vars, undefined), + ClientID = maps:get(clientid, Vars, undefined), + UserName = maps:get(username, Vars, undefined), EndpointName = maps:get(endpoint_name, Vars, undefined), - List = [ {?PH_CLIENTID, ClientID} - , {?PH_USERNAME, UserName} - , {?PH_ENDPOINT_NAME, EndpointName} - ], + List = [ + {?PH_CLIENTID, ClientID}, + {?PH_USERNAME, UserName}, + {?PH_ENDPOINT_NAME, EndpointName} + ], lists:foldl(fun feed_var/2, MountPoint, List). feed_var({_PlaceHolder, undefined}, MountPoint) -> diff --git a/apps/emqx/src/emqx_mqtt_caps.erl b/apps/emqx/src/emqx_mqtt_caps.erl index 1c24dd3e4..e59bcfd0e 100644 --- a/apps/emqx/src/emqx_mqtt_caps.erl +++ b/apps/emqx/src/emqx_mqtt_caps.erl @@ -20,100 +20,128 @@ -include("emqx_mqtt.hrl"). -include("types.hrl"). --export([ check_pub/2 - , check_sub/3 - ]). +-export([ + check_pub/2, + check_sub/3 +]). --export([ get_caps/1 - ]). +-export([get_caps/1]). -export_type([caps/0]). --type(caps() :: #{max_packet_size => integer(), - max_clientid_len => integer(), - max_topic_alias => integer(), - max_topic_levels => integer(), - max_qos_allowed => emqx_types:qos(), - retain_available => boolean(), - wildcard_subscription => boolean(), - subscription_identifiers => boolean(), - shared_subscription => boolean() - }). +-type caps() :: #{ + max_packet_size => integer(), + max_clientid_len => integer(), + max_topic_alias => integer(), + max_topic_levels => integer(), + max_qos_allowed => emqx_types:qos(), + retain_available => boolean(), + wildcard_subscription => boolean(), + subscription_identifiers => boolean(), + shared_subscription => boolean() +}. -define(MAX_TOPIC_LEVELS, 65535). --define(PUBCAP_KEYS, [max_topic_levels, - max_qos_allowed, - retain_available - ]). +-define(PUBCAP_KEYS, [ + max_topic_levels, + max_qos_allowed, + retain_available +]). --define(SUBCAP_KEYS, [max_topic_levels, - max_qos_allowed, - wildcard_subscription, - shared_subscription - ]). +-define(SUBCAP_KEYS, [ + max_topic_levels, + max_qos_allowed, + wildcard_subscription, + shared_subscription +]). --define(DEFAULT_CAPS, #{max_packet_size => ?MAX_PACKET_SIZE, - max_clientid_len => ?MAX_CLIENTID_LEN, - max_topic_alias => ?MAX_TOPIC_AlIAS, - max_topic_levels => ?MAX_TOPIC_LEVELS, - max_qos_allowed => ?QOS_2, - retain_available => true, - wildcard_subscription => true, - subscription_identifiers => true, - shared_subscription => true - }). +-define(DEFAULT_CAPS, #{ + max_packet_size => ?MAX_PACKET_SIZE, + max_clientid_len => ?MAX_CLIENTID_LEN, + max_topic_alias => ?MAX_TOPIC_AlIAS, + max_topic_levels => ?MAX_TOPIC_LEVELS, + max_qos_allowed => ?QOS_2, + retain_available => true, + wildcard_subscription => true, + subscription_identifiers => true, + shared_subscription => true +}). --spec(check_pub(emqx_types:zone(), - #{qos := emqx_types:qos(), - retain := boolean(), - topic := emqx_types:topic()}) - -> ok_or_error(emqx_types:reason_code())). +-spec check_pub( + emqx_types:zone(), + #{ + qos := emqx_types:qos(), + retain := boolean(), + topic := emqx_types:topic() + } +) -> + ok_or_error(emqx_types:reason_code()). check_pub(Zone, Flags) when is_map(Flags) -> - do_check_pub(case maps:take(topic, Flags) of - {Topic, Flags1} -> - Flags1#{topic_levels => emqx_topic:levels(Topic)}; - error -> - Flags - end, maps:with(?PUBCAP_KEYS, get_caps(Zone))). + do_check_pub( + case maps:take(topic, Flags) of + {Topic, Flags1} -> + Flags1#{topic_levels => emqx_topic:levels(Topic)}; + error -> + Flags + end, + maps:with(?PUBCAP_KEYS, get_caps(Zone)) + ). -do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) - when Limit > 0, Levels > Limit -> +do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when + Limit > 0, Levels > Limit +-> {error, ?RC_TOPIC_NAME_INVALID}; -do_check_pub(#{qos := QoS}, #{max_qos_allowed := MaxQoS}) - when QoS > MaxQoS -> +do_check_pub(#{qos := QoS}, #{max_qos_allowed := MaxQoS}) when + QoS > MaxQoS +-> {error, ?RC_QOS_NOT_SUPPORTED}; do_check_pub(#{retain := true}, #{retain_available := false}) -> {error, ?RC_RETAIN_NOT_SUPPORTED}; -do_check_pub(_Flags, _Caps) -> ok. +do_check_pub(_Flags, _Caps) -> + ok. --spec(check_sub(emqx_types:zone(), - emqx_types:topic(), - emqx_types:subopts()) - -> ok_or_error(emqx_types:reason_code())). +-spec check_sub( + emqx_types:zone(), + emqx_types:topic(), + emqx_types:subopts() +) -> + ok_or_error(emqx_types:reason_code()). check_sub(Zone, Topic, SubOpts) -> Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)), Flags = lists:foldl( - fun(max_topic_levels, Map) -> - Map#{topic_levels => emqx_topic:levels(Topic)}; - (wildcard_subscription, Map) -> - Map#{is_wildcard => emqx_topic:wildcard(Topic)}; - (shared_subscription, Map) -> - Map#{is_shared => maps:is_key(share, SubOpts)}; - (_Key, Map) -> Map %% Ignore - end, #{}, maps:keys(Caps)), + fun + (max_topic_levels, Map) -> + Map#{topic_levels => emqx_topic:levels(Topic)}; + (wildcard_subscription, Map) -> + Map#{is_wildcard => emqx_topic:wildcard(Topic)}; + (shared_subscription, Map) -> + Map#{is_shared => maps:is_key(share, SubOpts)}; + %% Ignore + (_Key, Map) -> + Map + end, + #{}, + maps:keys(Caps) + ), do_check_sub(Flags, Caps). -do_check_sub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) - when Limit > 0, Levels > Limit -> +do_check_sub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when + Limit > 0, Levels > Limit +-> {error, ?RC_TOPIC_FILTER_INVALID}; do_check_sub(#{is_wildcard := true}, #{wildcard_subscription := false}) -> {error, ?RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED}; do_check_sub(#{is_shared := true}, #{shared_subscription := false}) -> {error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}; -do_check_sub(_Flags, _Caps) -> ok. +do_check_sub(_Flags, _Caps) -> + ok. get_caps(Zone) -> - lists:foldl(fun({K, V}, Acc) -> + lists:foldl( + fun({K, V}, Acc) -> Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)} - end, #{}, maps:to_list(?DEFAULT_CAPS)). + end, + #{}, + maps:to_list(?DEFAULT_CAPS) + ). diff --git a/apps/emqx/src/emqx_mqtt_props.erl b/apps/emqx/src/emqx_mqtt_props.erl index c0b982a2e..e58ed54a4 100644 --- a/apps/emqx/src/emqx_mqtt_props.erl +++ b/apps/emqx/src/emqx_mqtt_props.erl @@ -19,86 +19,97 @@ -include("emqx_mqtt.hrl"). --export([ id/1 - , name/1 - , filter/2 - , validate/1 - , new/0 - ]). +-export([ + id/1, + name/1, + filter/2, + validate/1, + new/0 +]). %% For tests -export([all/0]). --export([ set/3 - , get/3 - ]). +-export([ + set/3, + get/3 +]). --type(prop_name() :: atom()). --type(prop_id() :: pos_integer()). +-type prop_name() :: atom(). +-type prop_id() :: pos_integer(). --define(PROPS_TABLE, - #{16#01 => {'Payload-Format-Indicator', 'Byte', [?PUBLISH]}, - 16#02 => {'Message-Expiry-Interval', 'Four-Byte-Integer', [?PUBLISH]}, - 16#03 => {'Content-Type', 'UTF8-Encoded-String', [?PUBLISH]}, - 16#08 => {'Response-Topic', 'UTF8-Encoded-String', [?PUBLISH]}, - 16#09 => {'Correlation-Data', 'Binary-Data', [?PUBLISH]}, - 16#0B => {'Subscription-Identifier', 'Variable-Byte-Integer', [?PUBLISH, ?SUBSCRIBE]}, - 16#11 => {'Session-Expiry-Interval', 'Four-Byte-Integer', [?CONNECT, ?CONNACK, ?DISCONNECT]}, - 16#12 => {'Assigned-Client-Identifier', 'UTF8-Encoded-String', [?CONNACK]}, - 16#13 => {'Server-Keep-Alive', 'Two-Byte-Integer', [?CONNACK]}, - 16#15 => {'Authentication-Method', 'UTF8-Encoded-String', [?CONNECT, ?CONNACK, ?AUTH]}, - 16#16 => {'Authentication-Data', 'Binary-Data', [?CONNECT, ?CONNACK, ?AUTH]}, - 16#17 => {'Request-Problem-Information', 'Byte', [?CONNECT]}, - 16#18 => {'Will-Delay-Interval', 'Four-Byte-Integer', ['WILL']}, - 16#19 => {'Request-Response-Information', 'Byte', [?CONNECT]}, - 16#1A => {'Response-Information', 'UTF8-Encoded-String', [?CONNACK]}, - 16#1C => {'Server-Reference', 'UTF8-Encoded-String', [?CONNACK, ?DISCONNECT]}, - 16#1F => {'Reason-String', 'UTF8-Encoded-String', [?CONNACK, ?DISCONNECT, ?PUBACK, - ?PUBREC, ?PUBREL, ?PUBCOMP, - ?SUBACK, ?UNSUBACK, ?AUTH]}, - 16#21 => {'Receive-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]}, - 16#22 => {'Topic-Alias-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]}, - 16#23 => {'Topic-Alias', 'Two-Byte-Integer', [?PUBLISH]}, - 16#24 => {'Maximum-QoS', 'Byte', [?CONNACK]}, - 16#25 => {'Retain-Available', 'Byte', [?CONNACK]}, - 16#26 => {'User-Property', 'UTF8-String-Pair', 'ALL'}, - 16#27 => {'Maximum-Packet-Size', 'Four-Byte-Integer', [?CONNECT, ?CONNACK]}, - 16#28 => {'Wildcard-Subscription-Available', 'Byte', [?CONNACK]}, - 16#29 => {'Subscription-Identifier-Available', 'Byte', [?CONNACK]}, - 16#2A => {'Shared-Subscription-Available', 'Byte', [?CONNACK]} - }). +-define(PROPS_TABLE, #{ + 16#01 => {'Payload-Format-Indicator', 'Byte', [?PUBLISH]}, + 16#02 => {'Message-Expiry-Interval', 'Four-Byte-Integer', [?PUBLISH]}, + 16#03 => {'Content-Type', 'UTF8-Encoded-String', [?PUBLISH]}, + 16#08 => {'Response-Topic', 'UTF8-Encoded-String', [?PUBLISH]}, + 16#09 => {'Correlation-Data', 'Binary-Data', [?PUBLISH]}, + 16#0B => {'Subscription-Identifier', 'Variable-Byte-Integer', [?PUBLISH, ?SUBSCRIBE]}, + 16#11 => {'Session-Expiry-Interval', 'Four-Byte-Integer', [?CONNECT, ?CONNACK, ?DISCONNECT]}, + 16#12 => {'Assigned-Client-Identifier', 'UTF8-Encoded-String', [?CONNACK]}, + 16#13 => {'Server-Keep-Alive', 'Two-Byte-Integer', [?CONNACK]}, + 16#15 => {'Authentication-Method', 'UTF8-Encoded-String', [?CONNECT, ?CONNACK, ?AUTH]}, + 16#16 => {'Authentication-Data', 'Binary-Data', [?CONNECT, ?CONNACK, ?AUTH]}, + 16#17 => {'Request-Problem-Information', 'Byte', [?CONNECT]}, + 16#18 => {'Will-Delay-Interval', 'Four-Byte-Integer', ['WILL']}, + 16#19 => {'Request-Response-Information', 'Byte', [?CONNECT]}, + 16#1A => {'Response-Information', 'UTF8-Encoded-String', [?CONNACK]}, + 16#1C => {'Server-Reference', 'UTF8-Encoded-String', [?CONNACK, ?DISCONNECT]}, + 16#1F => + {'Reason-String', 'UTF8-Encoded-String', [ + ?CONNACK, + ?DISCONNECT, + ?PUBACK, + ?PUBREC, + ?PUBREL, + ?PUBCOMP, + ?SUBACK, + ?UNSUBACK, + ?AUTH + ]}, + 16#21 => {'Receive-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]}, + 16#22 => {'Topic-Alias-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]}, + 16#23 => {'Topic-Alias', 'Two-Byte-Integer', [?PUBLISH]}, + 16#24 => {'Maximum-QoS', 'Byte', [?CONNACK]}, + 16#25 => {'Retain-Available', 'Byte', [?CONNACK]}, + 16#26 => {'User-Property', 'UTF8-String-Pair', 'ALL'}, + 16#27 => {'Maximum-Packet-Size', 'Four-Byte-Integer', [?CONNECT, ?CONNACK]}, + 16#28 => {'Wildcard-Subscription-Available', 'Byte', [?CONNACK]}, + 16#29 => {'Subscription-Identifier-Available', 'Byte', [?CONNACK]}, + 16#2A => {'Shared-Subscription-Available', 'Byte', [?CONNACK]} +}). --spec(id(prop_name()) -> prop_id()). -id('Payload-Format-Indicator') -> 16#01; -id('Message-Expiry-Interval') -> 16#02; -id('Content-Type') -> 16#03; -id('Response-Topic') -> 16#08; -id('Correlation-Data') -> 16#09; -id('Subscription-Identifier') -> 16#0B; -id('Session-Expiry-Interval') -> 16#11; -id('Assigned-Client-Identifier') -> 16#12; -id('Server-Keep-Alive') -> 16#13; -id('Authentication-Method') -> 16#15; -id('Authentication-Data') -> 16#16; -id('Request-Problem-Information') -> 16#17; -id('Will-Delay-Interval') -> 16#18; -id('Request-Response-Information') -> 16#19; -id('Response-Information') -> 16#1A; -id('Server-Reference') -> 16#1C; -id('Reason-String') -> 16#1F; -id('Receive-Maximum') -> 16#21; -id('Topic-Alias-Maximum') -> 16#22; -id('Topic-Alias') -> 16#23; -id('Maximum-QoS') -> 16#24; -id('Retain-Available') -> 16#25; -id('User-Property') -> 16#26; -id('Maximum-Packet-Size') -> 16#27; -id('Wildcard-Subscription-Available') -> 16#28; +-spec id(prop_name()) -> prop_id(). +id('Payload-Format-Indicator') -> 16#01; +id('Message-Expiry-Interval') -> 16#02; +id('Content-Type') -> 16#03; +id('Response-Topic') -> 16#08; +id('Correlation-Data') -> 16#09; +id('Subscription-Identifier') -> 16#0B; +id('Session-Expiry-Interval') -> 16#11; +id('Assigned-Client-Identifier') -> 16#12; +id('Server-Keep-Alive') -> 16#13; +id('Authentication-Method') -> 16#15; +id('Authentication-Data') -> 16#16; +id('Request-Problem-Information') -> 16#17; +id('Will-Delay-Interval') -> 16#18; +id('Request-Response-Information') -> 16#19; +id('Response-Information') -> 16#1A; +id('Server-Reference') -> 16#1C; +id('Reason-String') -> 16#1F; +id('Receive-Maximum') -> 16#21; +id('Topic-Alias-Maximum') -> 16#22; +id('Topic-Alias') -> 16#23; +id('Maximum-QoS') -> 16#24; +id('Retain-Available') -> 16#25; +id('User-Property') -> 16#26; +id('Maximum-Packet-Size') -> 16#27; +id('Wildcard-Subscription-Available') -> 16#28; id('Subscription-Identifier-Available') -> 16#29; -id('Shared-Subscription-Available') -> 16#2A; -id(Name) -> error({bad_property, Name}). +id('Shared-Subscription-Available') -> 16#2A; +id(Name) -> error({bad_property, Name}). --spec(name(prop_id()) -> prop_name()). +-spec name(prop_id()) -> prop_name(). name(16#01) -> 'Payload-Format-Indicator'; name(16#02) -> 'Message-Expiry-Interval'; name(16#03) -> 'Content-Type'; @@ -126,33 +137,36 @@ name(16#27) -> 'Maximum-Packet-Size'; name(16#28) -> 'Wildcard-Subscription-Available'; name(16#29) -> 'Subscription-Identifier-Available'; name(16#2A) -> 'Shared-Subscription-Available'; -name(Id) -> error({unsupported_property, Id}). +name(Id) -> error({unsupported_property, Id}). --spec(filter(emqx_types:packet_type(), emqx_types:properties()) - -> emqx_types:properties()). -filter(PacketType, Props) when is_map(Props), - PacketType >= ?CONNECT, - PacketType =< ?AUTH -> +-spec filter(emqx_types:packet_type(), emqx_types:properties()) -> + emqx_types:properties(). +filter(PacketType, Props) when + is_map(Props), + PacketType >= ?CONNECT, + PacketType =< ?AUTH +-> F = fun(Name, _) -> - case maps:find(id(Name), ?PROPS_TABLE) of - {ok, {Name, _Type, 'ALL'}} -> - true; - {ok, {Name, _Type, AllowedTypes}} -> - lists:member(PacketType, AllowedTypes); - error -> false - end - end, + case maps:find(id(Name), ?PROPS_TABLE) of + {ok, {Name, _Type, 'ALL'}} -> + true; + {ok, {Name, _Type, AllowedTypes}} -> + lists:member(PacketType, AllowedTypes); + error -> + false + end + end, maps:filter(F, Props). --spec(validate(emqx_types:properties()) -> ok). +-spec validate(emqx_types:properties()) -> ok. validate(Props) when is_map(Props) -> lists:foreach(fun validate_prop/1, maps:to_list(Props)). validate_prop(Prop = {Name, Val}) -> case maps:find(id(Name), ?PROPS_TABLE) of {ok, {Name, Type, _}} -> - validate_value(Type, Val) - orelse error({bad_property_value, Prop}); + validate_value(Type, Val) orelse + error({bad_property_value, Prop}); error -> error({bad_property, Name}) end. @@ -166,23 +180,28 @@ validate_value('Four-Byte-Integer', Val) -> validate_value('Variable-Byte-Integer', Val) -> is_integer(Val) andalso 0 =< Val andalso Val =< 16#7FFFFFFF; validate_value('UTF8-String-Pair', {Name, Val}) -> - validate_value('UTF8-Encoded-String', Name) - andalso validate_value('UTF8-Encoded-String', Val); + validate_value('UTF8-Encoded-String', Name) andalso + validate_value('UTF8-Encoded-String', Val); validate_value('UTF8-String-Pair', Pairs) when is_list(Pairs) -> - lists:foldl(fun(Pair, OK) -> - OK andalso validate_value('UTF8-String-Pair', Pair) - end, true, Pairs); -validate_value('UTF8-Encoded-String', Val) -> + lists:foldl( + fun(Pair, OK) -> + OK andalso validate_value('UTF8-String-Pair', Pair) + end, + true, + Pairs + ); +validate_value('UTF8-Encoded-String', Val) -> is_binary(Val); validate_value('Binary-Data', Val) -> is_binary(Val); -validate_value(_Type, _Val) -> false. +validate_value(_Type, _Val) -> + false. --spec(new() -> map()). +-spec new() -> map(). new() -> #{}. --spec(all() -> map()). +-spec all() -> map(). all() -> ?PROPS_TABLE. set(Name, Value, undefined) -> @@ -194,4 +213,3 @@ get(_Name, undefined, Default) -> Default; get(Name, Props, Default) -> maps:get(Name, Props, Default). - diff --git a/apps/emqx/src/emqx_mqueue.erl b/apps/emqx/src/emqx_mqueue.erl index 13f759015..f8556bc39 100644 --- a/apps/emqx/src/emqx_mqueue.erl +++ b/apps/emqx/src/emqx_mqueue.erl @@ -53,39 +53,43 @@ -include("types.hrl"). -include("emqx_mqtt.hrl"). --export([ init/1 - , info/1 - , info/2 - ]). +-export([ + init/1, + info/1, + info/2 +]). --export([ is_empty/1 - , len/1 - , max_len/1 - , in/2 - , out/1 - , stats/1 - , dropped/1 - ]). +-export([ + is_empty/1, + len/1, + max_len/1, + in/2, + out/1, + stats/1, + dropped/1 +]). -define(NO_PRIORITY_TABLE, disabled). -export_type([mqueue/0, options/0]). --type(topic() :: emqx_types:topic()). --type(priority() :: infinity | integer()). --type(pq() :: emqx_pqueue:q()). --type(count() :: non_neg_integer()). --type(p_table() :: ?NO_PRIORITY_TABLE | #{topic() := priority()}). --type(options() :: #{max_len := count(), - priorities => p_table(), - default_priority => highest | lowest, - store_qos0 => boolean() - }). --type(message() :: emqx_types:message()). +-type topic() :: emqx_types:topic(). +-type priority() :: infinity | integer(). +-type pq() :: emqx_pqueue:q(). +-type count() :: non_neg_integer(). +-type p_table() :: ?NO_PRIORITY_TABLE | #{topic() := priority()}. +-type options() :: #{ + max_len := count(), + priorities => p_table(), + default_priority => highest | lowest, + store_qos0 => boolean() +}. +-type message() :: emqx_types:message(). --type(stat() :: {len, non_neg_integer()} - | {max_len, non_neg_integer()} - | {dropped, non_neg_integer()}). +-type stat() :: + {len, non_neg_integer()} + | {max_len, non_neg_integer()} + | {dropped, non_neg_integer()}. -define(PQUEUE, emqx_pqueue). -define(LOWEST_PRIORITY, 0). @@ -94,43 +98,45 @@ -define(INFO_KEYS, [store_qos0, max_len, len, dropped]). -record(shift_opts, { - multiplier :: non_neg_integer(), - base :: integer() - }). + multiplier :: non_neg_integer(), + base :: integer() +}). -record(mqueue, { - store_qos0 = false :: boolean(), - max_len = ?MAX_LEN_INFINITY :: count(), - len = 0 :: count(), - dropped = 0 :: count(), - p_table = ?NO_PRIORITY_TABLE :: p_table(), - default_p = ?LOWEST_PRIORITY :: priority(), - q = ?PQUEUE:new() :: pq(), - shift_opts :: #shift_opts{}, - last_prio :: non_neg_integer() | undefined, - p_credit :: non_neg_integer() | undefined - }). + store_qos0 = false :: boolean(), + max_len = ?MAX_LEN_INFINITY :: count(), + len = 0 :: count(), + dropped = 0 :: count(), + p_table = ?NO_PRIORITY_TABLE :: p_table(), + default_p = ?LOWEST_PRIORITY :: priority(), + q = ?PQUEUE:new() :: pq(), + shift_opts :: #shift_opts{}, + last_prio :: non_neg_integer() | undefined, + p_credit :: non_neg_integer() | undefined +}). --type(mqueue() :: #mqueue{}). +-type mqueue() :: #mqueue{}. --spec(init(options()) -> mqueue()). +-spec init(options()) -> mqueue(). init(Opts = #{max_len := MaxLen0, store_qos0 := QoS_0}) -> - MaxLen = case (is_integer(MaxLen0) andalso MaxLen0 > ?MAX_LEN_INFINITY) of - true -> MaxLen0; - false -> ?MAX_LEN_INFINITY - end, - #mqueue{max_len = MaxLen, - store_qos0 = QoS_0, - p_table = get_opt(priorities, Opts, ?NO_PRIORITY_TABLE), - default_p = get_priority_opt(Opts), - shift_opts = get_shift_opt(Opts) - }. + MaxLen = + case (is_integer(MaxLen0) andalso MaxLen0 > ?MAX_LEN_INFINITY) of + true -> MaxLen0; + false -> ?MAX_LEN_INFINITY + end, + #mqueue{ + max_len = MaxLen, + store_qos0 = QoS_0, + p_table = get_opt(priorities, Opts, ?NO_PRIORITY_TABLE), + default_p = get_priority_opt(Opts), + shift_opts = get_shift_opt(Opts) + }. --spec(info(mqueue()) -> emqx_types:infos()). +-spec info(mqueue()) -> emqx_types:infos(). info(MQ) -> maps:from_list([{Key, info(Key, MQ)} || Key <- ?INFO_KEYS]). --spec(info(atom(), mqueue()) -> term()). +-spec info(atom(), mqueue()) -> term(). info(store_qos0, #mqueue{store_qos0 = True}) -> True; info(max_len, #mqueue{max_len = MaxLen}) -> @@ -147,25 +153,30 @@ len(#mqueue{len = Len}) -> Len. max_len(#mqueue{max_len = MaxLen}) -> MaxLen. %% @doc Return number of dropped messages. --spec(dropped(mqueue()) -> count()). +-spec dropped(mqueue()) -> count(). dropped(#mqueue{dropped = Dropped}) -> Dropped. %% @doc Stats of the mqueue --spec(stats(mqueue()) -> [stat()]). +-spec stats(mqueue()) -> [stat()]. stats(#mqueue{max_len = MaxLen, dropped = Dropped} = MQ) -> [{len, len(MQ)}, {max_len, MaxLen}, {dropped, Dropped}]. %% @doc Enqueue a message. --spec(in(message(), mqueue()) -> {maybe(message()), mqueue()}). +-spec in(message(), mqueue()) -> {maybe(message()), mqueue()}. in(Msg = #message{qos = ?QOS_0}, MQ = #mqueue{store_qos0 = false}) -> {_Dropped = Msg, MQ}; -in(Msg = #message{topic = Topic}, MQ = #mqueue{default_p = Dp, - p_table = PTab, - q = Q, - len = Len, - max_len = MaxLen, - dropped = Dropped - } = MQ) -> +in( + Msg = #message{topic = Topic}, + MQ = + #mqueue{ + default_p = Dp, + p_table = PTab, + q = Q, + len = Len, + max_len = MaxLen, + dropped = Dropped + } = MQ +) -> Priority = get_priority(Topic, PTab, Dp), PLen = ?PQUEUE:plen(Priority, Q), case MaxLen =/= ?MAX_LEN_INFINITY andalso PLen =:= MaxLen of @@ -178,24 +189,26 @@ in(Msg = #message{topic = Topic}, MQ = #mqueue{default_p = Dp, {_DroppedMsg = undefined, MQ#mqueue{len = Len + 1, q = ?PQUEUE:in(Msg, Priority, Q)}} end. --spec(out(mqueue()) -> {empty | {value, message()}, mqueue()}). +-spec out(mqueue()) -> {empty | {value, message()}, mqueue()}. out(MQ = #mqueue{len = 0, q = Q}) -> - 0 = ?PQUEUE:len(Q), %% assert, in this case, ?PQUEUE:len should be very cheap + %% assert, in this case, ?PQUEUE:len should be very cheap + 0 = ?PQUEUE:len(Q), {empty, MQ}; out(MQ = #mqueue{q = Q, len = Len, last_prio = undefined, shift_opts = ShiftOpts}) -> - {{value, Val, Prio}, Q1} = ?PQUEUE:out_p(Q), %% Shouldn't fail, since we've checked the length + %% Shouldn't fail, since we've checked the length + {{value, Val, Prio}, Q1} = ?PQUEUE:out_p(Q), MQ1 = MQ#mqueue{ - q = Q1, - len = Len - 1, - last_prio = Prio, - p_credit = get_credits(Prio, ShiftOpts) - }, + q = Q1, + len = Len - 1, + last_prio = Prio, + p_credit = get_credits(Prio, ShiftOpts) + }, {{value, Val}, MQ1}; out(MQ = #mqueue{q = Q, p_credit = 0}) -> MQ1 = MQ#mqueue{ - q = ?PQUEUE:shift(Q), - last_prio = undefined - }, + q = ?PQUEUE:shift(Q), + last_prio = undefined + }, out(MQ1); out(MQ = #mqueue{q = Q, len = Len, p_credit = Cnt}) -> {R, Q1} = ?PQUEUE:out(Q), @@ -232,23 +245,25 @@ get_shift_opt(Opts) -> %% overhead of ?PQUEUE:rotate Mult = maps:get(shift_multiplier, Opts, 10), true = is_integer(Mult) andalso Mult > 0, - Min = case Opts of - #{p_table := PTab} -> - case maps:size(PTab) of - 0 -> 0; - _ -> lists:min(maps:values(PTab)) - end; - _ -> - ?LOWEST_PRIORITY - end, + Min = + case Opts of + #{p_table := PTab} -> + case maps:size(PTab) of + 0 -> 0; + _ -> lists:min(maps:values(PTab)) + end; + _ -> + ?LOWEST_PRIORITY + end, %% `mqueue' module supports negative priorities, but we don't want %% the counter to be negative, so all priorities should be shifted %% by a constant, if negative priorities are used: - Base = case Min < 0 of - true -> -Min; - false -> 0 - end, + Base = + case Min < 0 of + true -> -Min; + false -> 0 + end, #shift_opts{ - multiplier = Mult, - base = Base - }. + multiplier = Mult, + base = Base + }. diff --git a/apps/emqx/src/emqx_node_dump.erl b/apps/emqx/src/emqx_node_dump.erl index cc901615f..b6659e257 100644 --- a/apps/emqx/src/emqx_node_dump.erl +++ b/apps/emqx/src/emqx_node_dump.erl @@ -17,14 +17,16 @@ %% Collection of functions for creating node dumps -module(emqx_node_dump). --export([ sys_info/0 - , app_env_dump/0 - ]). +-export([ + sys_info/0, + app_env_dump/0 +]). sys_info() -> - #{ release => emqx_app:get_release() - , otp_version => emqx_vm:get_otp_version() - }. + #{ + release => emqx_app:get_release(), + otp_version => emqx_vm:get_otp_version() + }. app_env_dump() -> censor(ets:tab2list(ac_tab)). @@ -37,13 +39,13 @@ censor([_ | Rest]) -> censor(Rest). censor(Path, {Key, Val}) when is_atom(Key) -> - {Key, censor([Key|Path], Val)}; + {Key, censor([Key | Path], Val)}; censor(Path, M) when is_map(M) -> Fun = fun(Key, Val) -> - censor([Key|Path], Val) - end, + censor([Key | Path], Val) + end, maps:map(Fun, M); -censor(Path, L = [Fst|_]) when is_tuple(Fst) -> +censor(Path, L = [Fst | _]) when is_tuple(Fst) -> [censor(Path, I) || I <- L]; censor([Key | _], Val) -> case is_sensitive(Key) of @@ -58,12 +60,14 @@ is_sensitive(Key) when is_list(Key) -> Bin -> is_sensitive(Bin) catch - _ : _ -> + _:_ -> false end; is_sensitive(Key) when is_binary(Key) -> - lists:any(fun(Pattern) -> re:run(Key, Pattern) =/= nomatch end, - ["passwd", "password", "secret"]); + lists:any( + fun(Pattern) -> re:run(Key, Pattern) =/= nomatch end, + ["passwd", "password", "secret"] + ); is_sensitive(Key) when is_tuple(Key) -> false. @@ -77,11 +81,14 @@ obfuscate_value(_Val) -> -include_lib("eunit/include/eunit.hrl"). censor_test() -> - ?assertMatch( [{{env, emqx, listeners}, #{password := <<"********">>}}] - , censor([foo, {{env, emqx, listeners}, #{password => <<"secret">>}}, {app, bar}]) - ), - ?assertMatch( [{{env, emqx, listeners}, [{foo, 1}, {password, "********"}]}] - , censor([{{env, emqx, listeners}, [{foo, 1}, {password, "secret"}]}]) - ). + ?assertMatch( + [{{env, emqx, listeners}, #{password := <<"********">>}}], + censor([foo, {{env, emqx, listeners}, #{password => <<"secret">>}}, {app, bar}]) + ), + ?assertMatch( + [{{env, emqx, listeners}, [{foo, 1}, {password, "********"}]}], + censor([{{env, emqx, listeners}, [{foo, 1}, {password, "secret"}]}]) + ). --endif. %% TEST +%% TEST +-endif. diff --git a/apps/emqx/src/emqx_olp.erl b/apps/emqx/src/emqx_olp.erl index 9c1794815..26c5ed6bd 100644 --- a/apps/emqx/src/emqx_olp.erl +++ b/apps/emqx/src/emqx_olp.erl @@ -17,38 +17,39 @@ -include_lib("lc/include/lc.hrl"). --export([ is_overloaded/0 - , backoff/1 - , backoff_gc/1 - , backoff_hibernation/1 - , backoff_new_conn/1 - ]). - +-export([ + is_overloaded/0, + backoff/1, + backoff_gc/1, + backoff_hibernation/1, + backoff_new_conn/1 +]). %% exports for O&M --export([ status/0 - , enable/0 - , disable/0 - ]). +-export([ + status/0, + enable/0, + disable/0 +]). -type cfg_key() :: - backoff_gc | - backoff_hibernation | - backoff_new_conn. + backoff_gc + | backoff_hibernation + | backoff_new_conn. -type cnt_name() :: - 'olp.delay.ok' | - 'olp.delay.timeout' | - 'olp.hbn' | - 'olp.gc' | - 'olp.new_conn'. + 'olp.delay.ok' + | 'olp.delay.timeout' + | 'olp.hbn' + | 'olp.gc' + | 'olp.new_conn'. -define(overload_protection, overload_protection). %% @doc Light realtime check if system is overloaded. -spec is_overloaded() -> boolean(). is_overloaded() -> - load_ctl:is_overloaded(). + load_ctl:is_overloaded(). %% @doc Backoff with a delay if the system is overloaded, for tasks that could be deferred. %% returns `false' if backoff didn't happen, the system is cool. @@ -56,78 +57,79 @@ is_overloaded() -> %% returns `timeout' if backoff is triggered but get unblocked due to timeout as configured. -spec backoff(Zone :: atom()) -> ok | false | timeout. backoff(Zone) -> - case emqx_config:get_zone_conf(Zone, [?overload_protection]) of - #{enable := true, backoff_delay := Delay} -> - case load_ctl:maydelay(Delay) of - false -> false; - ok -> - emqx_metrics:inc('olp.delay.ok'), - ok; - timeout -> - emqx_metrics:inc('olp.delay.timeout'), - timeout - end; - _ -> - ok - end. + case emqx_config:get_zone_conf(Zone, [?overload_protection]) of + #{enable := true, backoff_delay := Delay} -> + case load_ctl:maydelay(Delay) of + false -> + false; + ok -> + emqx_metrics:inc('olp.delay.ok'), + ok; + timeout -> + emqx_metrics:inc('olp.delay.timeout'), + timeout + end; + _ -> + ok + end. %% @doc If forceful GC should be skipped when the system is overloaded. -spec backoff_gc(Zone :: atom()) -> boolean(). backoff_gc(Zone) -> - do_check(Zone, ?FUNCTION_NAME, 'olp.gc'). + do_check(Zone, ?FUNCTION_NAME, 'olp.gc'). %% @doc If hibernation should be skipped when the system is overloaded. -spec backoff_hibernation(Zone :: atom()) -> boolean(). backoff_hibernation(Zone) -> - do_check(Zone, ?FUNCTION_NAME, 'olp.hbn'). + do_check(Zone, ?FUNCTION_NAME, 'olp.hbn'). %% @doc Returns {error, overloaded} if new connection should be %% closed when system is overloaded. -spec backoff_new_conn(Zone :: atom()) -> ok | {error, overloaded}. backoff_new_conn(Zone) -> - case do_check(Zone, ?FUNCTION_NAME, 'olp.new_conn') of - true -> - {error, overloaded}; - false -> - ok - end. + case do_check(Zone, ?FUNCTION_NAME, 'olp.new_conn') of + true -> + {error, overloaded}; + false -> + ok + end. -spec status() -> any(). status() -> - is_overloaded(). + is_overloaded(). %% @doc turn off background runq check. -spec disable() -> ok | {error, timeout}. disable() -> - load_ctl:stop_runq_flagman(5000). + load_ctl:stop_runq_flagman(5000). %% @doc turn on background runq check. -spec enable() -> {ok, pid()} | {error, running | restarting | disabled}. enable() -> - case load_ctl:restart_runq_flagman() of - {error, disabled} -> - OldCfg = load_ctl:get_config(), - ok = load_ctl:put_config(OldCfg#{ ?RUNQ_MON_F0 => true }), - load_ctl:restart_runq_flagman(); - Other -> - Other - end. + case load_ctl:restart_runq_flagman() of + {error, disabled} -> + OldCfg = load_ctl:get_config(), + ok = load_ctl:put_config(OldCfg#{?RUNQ_MON_F0 => true}), + load_ctl:restart_runq_flagman(); + Other -> + Other + end. %%% Internals --spec do_check(Zone::atom(), cfg_key(), cnt_name()) -> boolean(). +-spec do_check(Zone :: atom(), cfg_key(), cnt_name()) -> boolean(). do_check(Zone, Key, CntName) -> - case load_ctl:is_overloaded() of - true -> - case emqx_config:get_zone_conf(Zone, [?overload_protection]) of - #{enable := true, Key := true} -> - emqx_metrics:inc(CntName), - true; - _ -> - false - end; - false -> false - end. - + case load_ctl:is_overloaded() of + true -> + case emqx_config:get_zone_conf(Zone, [?overload_protection]) of + #{enable := true, Key := true} -> + emqx_metrics:inc(CntName), + true; + _ -> + false + end; + false -> + false + end. %%%_* Emacs ==================================================================== %%% Local Variables: diff --git a/apps/emqx/src/emqx_os_mon.erl b/apps/emqx/src/emqx_os_mon.erl index f9d5fc95d..4ceb0ad64 100644 --- a/apps/emqx/src/emqx_os_mon.erl +++ b/apps/emqx/src/emqx_os_mon.erl @@ -20,25 +20,26 @@ -include("logger.hrl"). - -export([start_link/0]). --export([ get_mem_check_interval/0 - , set_mem_check_interval/1 - , get_sysmem_high_watermark/0 - , set_sysmem_high_watermark/1 - , get_procmem_high_watermark/0 - , set_procmem_high_watermark/1 - ]). +-export([ + get_mem_check_interval/0, + set_mem_check_interval/1, + get_sysmem_high_watermark/0, + set_sysmem_high_watermark/1, + get_procmem_high_watermark/0, + set_procmem_high_watermark/1 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -include("emqx.hrl"). @@ -93,41 +94,47 @@ handle_call(Req, _From, State) -> {reply, {error, {unexpected_call, Req}}, State}. handle_cast(Msg, State) -> - ?SLOG(error, #{msg => "unexpected_cast", cast=> Msg}), + ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. handle_info({timeout, _Timer, check}, State) -> CPUHighWatermark = emqx:get_config([sysmon, os, cpu_high_watermark]) * 100, CPULowWatermark = emqx:get_config([sysmon, os, cpu_low_watermark]) * 100, - _ = case emqx_vm:cpu_util() of %% TODO: should be improved? - 0 -> ok; - Busy when Busy > CPUHighWatermark -> - Usage = list_to_binary(io_lib:format("~.2f%", [Busy])), - Message = <>, - emqx_alarm:activate(high_cpu_usage, - #{ - usage => Usage, - high_watermark => CPUHighWatermark, - low_watermark => CPULowWatermark - }, - Message), - start_check_timer(); - Busy when Busy < CPULowWatermark -> - Usage = list_to_binary(io_lib:format("~.2f%", [Busy])), - Message = <>, - emqx_alarm:deactivate(high_cpu_usage, - #{ - usage => Usage, - high_watermark => CPUHighWatermark, - low_watermark => CPULowWatermark - }, - Message), - start_check_timer(); - _Busy -> - start_check_timer() - end, + %% TODO: should be improved? + _ = + case emqx_vm:cpu_util() of + 0 -> + ok; + Busy when Busy > CPUHighWatermark -> + Usage = list_to_binary(io_lib:format("~.2f%", [Busy])), + Message = <>, + emqx_alarm:activate( + high_cpu_usage, + #{ + usage => Usage, + high_watermark => CPUHighWatermark, + low_watermark => CPULowWatermark + }, + Message + ), + start_check_timer(); + Busy when Busy < CPULowWatermark -> + Usage = list_to_binary(io_lib:format("~.2f%", [Busy])), + Message = <>, + emqx_alarm:deactivate( + high_cpu_usage, + #{ + usage => Usage, + high_watermark => CPUHighWatermark, + low_watermark => CPULowWatermark + }, + Message + ), + start_check_timer(); + _Busy -> + start_check_timer() + end, {noreply, State}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -157,10 +164,11 @@ start_check_timer() -> %% so it can only be checked again at startup. ensure_system_memory_alarm(HW) -> case erlang:whereis(memsup) of - undefined -> ok; + undefined -> + ok; _Pid -> {Total, Allocated, _Worst} = memsup:get_memory_data(), - case Total =/= 0 andalso Allocated/Total * 100 > HW of + case Total =/= 0 andalso Allocated / Total * 100 > HW of true -> emqx_alarm:activate(high_system_memory_usage, #{high_watermark => HW}); false -> ok end diff --git a/apps/emqx/src/emqx_packet.erl b/apps/emqx/src/emqx_packet.erl index 41bf39f86..f15898494 100644 --- a/apps/emqx/src/emqx_packet.erl +++ b/apps/emqx/src/emqx_packet.erl @@ -20,89 +20,83 @@ -include("emqx_mqtt.hrl"). %% Header APIs --export([ type/1 - , type_name/1 - , dup/1 - , qos/1 - , retain/1 - ]). +-export([ + type/1, + type_name/1, + dup/1, + qos/1, + retain/1 +]). %% Field APIs --export([ proto_name/1 - , proto_ver/1 - , info/2 - , set_props/2 - ]). +-export([ + proto_name/1, + proto_ver/1, + info/2, + set_props/2 +]). %% Check API --export([ check/1 - , check/2 - ]). +-export([ + check/1, + check/2 +]). --export([ to_message/2 - , to_message/3 - , will_msg/1 - ]). +-export([ + to_message/2, + to_message/3, + will_msg/1 +]). --export([ format/1 - , format/2 - ]). +-export([ + format/1, + format/2 +]). -export([encode_hex/1]). -define(TYPE_NAMES, - { 'CONNECT' - , 'CONNACK' - , 'PUBLISH' - , 'PUBACK' - , 'PUBREC' - , 'PUBREL' - , 'PUBCOMP' - , 'SUBSCRIBE' - , 'SUBACK' - , 'UNSUBSCRIBE' - , 'UNSUBACK' - , 'PINGREQ' - , 'PINGRESP' - , 'DISCONNECT' - , 'AUTH' - }). + {'CONNECT', 'CONNACK', 'PUBLISH', 'PUBACK', 'PUBREC', 'PUBREL', 'PUBCOMP', 'SUBSCRIBE', + 'SUBACK', 'UNSUBSCRIBE', 'UNSUBACK', 'PINGREQ', 'PINGRESP', 'DISCONNECT', 'AUTH'} +). --type(connect() :: #mqtt_packet_connect{}). --type(publish() :: #mqtt_packet_publish{}). --type(subscribe() :: #mqtt_packet_subscribe{}). --type(unsubscribe() :: #mqtt_packet_unsubscribe{}). +-type connect() :: #mqtt_packet_connect{}. +-type publish() :: #mqtt_packet_publish{}. +-type subscribe() :: #mqtt_packet_subscribe{}. +-type unsubscribe() :: #mqtt_packet_unsubscribe{}. %%-------------------------------------------------------------------- %% MQTT Packet Type and Flags. %%-------------------------------------------------------------------- %% @doc MQTT packet type. --spec(type(emqx_types:packet()) -> emqx_types:packet_type()). +-spec type(emqx_types:packet()) -> emqx_types:packet_type(). type(#mqtt_packet{header = #mqtt_packet_header{type = Type}}) -> Type. %% @doc Name of MQTT packet type. --spec(type_name(emqx_types:packet() | non_neg_integer()) -> atom() | string()). +-spec type_name(emqx_types:packet() | non_neg_integer()) -> atom() | string(). type_name(#mqtt_packet{} = Packet) -> type_name(type(Packet)); -type_name(0) -> 'FORBIDDEN'; +type_name(0) -> + 'FORBIDDEN'; type_name(Type) when Type > 0 andalso Type =< tuple_size(?TYPE_NAMES) -> element(Type, ?TYPE_NAMES); -type_name(Type) -> "UNKNOWN("++ integer_to_list(Type) ++")". +type_name(Type) -> + "UNKNOWN(" ++ integer_to_list(Type) ++ ")". %% @doc Dup flag of MQTT packet. --spec(dup(emqx_types:packet()) -> boolean()). +-spec dup(emqx_types:packet()) -> boolean(). dup(#mqtt_packet{header = #mqtt_packet_header{dup = Dup}}) -> Dup. %% @doc QoS of MQTT packet type. --spec(qos(emqx_types:packet()) -> emqx_types:qos()). +-spec qos(emqx_types:packet()) -> emqx_types:qos(). qos(#mqtt_packet{header = #mqtt_packet_header{qos = QoS}}) -> QoS. %% @doc Retain flag of MQTT packet. --spec(retain(emqx_types:packet()) -> boolean()). +-spec retain(emqx_types:packet()) -> boolean(). retain(#mqtt_packet{header = #mqtt_packet_header{retain = Retain}}) -> Retain. @@ -111,14 +105,14 @@ retain(#mqtt_packet{header = #mqtt_packet_header{retain = Retain}}) -> %%-------------------------------------------------------------------- %% @doc Protocol name of the CONNECT Packet. --spec(proto_name(emqx_types:packet()|connect()) -> binary()). +-spec proto_name(emqx_types:packet() | connect()) -> binary(). proto_name(?CONNECT_PACKET(ConnPkt)) -> proto_name(ConnPkt); proto_name(#mqtt_packet_connect{proto_name = Name}) -> Name. %% @doc Protocol version of the CONNECT Packet. --spec(proto_ver(emqx_types:packet()|connect()) -> emqx_types:proto_ver()). +-spec proto_ver(emqx_types:packet() | connect()) -> emqx_types:proto_ver(). proto_ver(?CONNECT_PACKET(ConnPkt)) -> proto_ver(ConnPkt); proto_ver(#mqtt_packet_connect{proto_ver = Ver}) -> @@ -158,61 +152,52 @@ info(username, #mqtt_packet_connect{username = Username}) -> Username; info(password, #mqtt_packet_connect{password = Password}) -> Password; - info(ack_flags, #mqtt_packet_connack{ack_flags = Flags}) -> Flags; info(reason_code, #mqtt_packet_connack{reason_code = RC}) -> RC; info(properties, #mqtt_packet_connack{properties = Props}) -> Props; - info(topic_name, #mqtt_packet_publish{topic_name = Topic}) -> Topic; info(packet_id, #mqtt_packet_publish{packet_id = PacketId}) -> PacketId; info(properties, #mqtt_packet_publish{properties = Props}) -> Props; - info(packet_id, #mqtt_packet_puback{packet_id = PacketId}) -> PacketId; info(reason_code, #mqtt_packet_puback{reason_code = RC}) -> RC; info(properties, #mqtt_packet_puback{properties = Props}) -> Props; - info(packet_id, #mqtt_packet_subscribe{packet_id = PacketId}) -> PacketId; info(properties, #mqtt_packet_subscribe{properties = Props}) -> Props; info(topic_filters, #mqtt_packet_subscribe{topic_filters = Topics}) -> Topics; - info(packet_id, #mqtt_packet_suback{packet_id = PacketId}) -> PacketId; info(properties, #mqtt_packet_suback{properties = Props}) -> Props; info(reason_codes, #mqtt_packet_suback{reason_codes = RCs}) -> RCs; - info(packet_id, #mqtt_packet_unsubscribe{packet_id = PacketId}) -> PacketId; info(properties, #mqtt_packet_unsubscribe{properties = Props}) -> Props; info(topic_filters, #mqtt_packet_unsubscribe{topic_filters = Topics}) -> Topics; - info(packet_id, #mqtt_packet_unsuback{packet_id = PacketId}) -> PacketId; info(properties, #mqtt_packet_unsuback{properties = Props}) -> Props; info(reason_codes, #mqtt_packet_unsuback{reason_codes = RCs}) -> RCs; - info(reason_code, #mqtt_packet_disconnect{reason_code = RC}) -> RC; info(properties, #mqtt_packet_disconnect{properties = Props}) -> Props; - info(reason_code, #mqtt_packet_auth{reason_code = RC}) -> RC; info(properties, #mqtt_packet_auth{properties = Props}) -> @@ -220,31 +205,22 @@ info(properties, #mqtt_packet_auth{properties = Props}) -> set_props(Props, #mqtt_packet_connect{} = Pkt) -> Pkt#mqtt_packet_connect{properties = Props}; - set_props(Props, #mqtt_packet_connack{} = Pkt) -> Pkt#mqtt_packet_connack{properties = Props}; - set_props(Props, #mqtt_packet_publish{} = Pkt) -> Pkt#mqtt_packet_publish{properties = Props}; - set_props(Props, #mqtt_packet_puback{} = Pkt) -> Pkt#mqtt_packet_puback{properties = Props}; - set_props(Props, #mqtt_packet_subscribe{} = Pkt) -> Pkt#mqtt_packet_subscribe{properties = Props}; - set_props(Props, #mqtt_packet_suback{} = Pkt) -> Pkt#mqtt_packet_suback{properties = Props}; - set_props(Props, #mqtt_packet_unsubscribe{} = Pkt) -> Pkt#mqtt_packet_unsubscribe{properties = Props}; - set_props(Props, #mqtt_packet_unsuback{} = Pkt) -> Pkt#mqtt_packet_unsuback{properties = Props}; - set_props(Props, #mqtt_packet_disconnect{} = Pkt) -> Pkt#mqtt_packet_disconnect{properties = Props}; - set_props(Props, #mqtt_packet_auth{} = Pkt) -> Pkt#mqtt_packet_auth{properties = Props}. @@ -253,10 +229,12 @@ set_props(Props, #mqtt_packet_auth{} = Pkt) -> %%-------------------------------------------------------------------- %% @doc Check PubSub Packet. --spec(check(emqx_types:packet()|publish()|subscribe()|unsubscribe()) - -> ok | {error, emqx_types:reason_code()}). -check(#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH}, - variable = PubPkt}) when not is_tuple(PubPkt) -> +-spec check(emqx_types:packet() | publish() | subscribe() | unsubscribe()) -> + ok | {error, emqx_types:reason_code()}. +check(#mqtt_packet{ + header = #mqtt_packet_header{type = ?PUBLISH}, + variable = PubPkt +}) when not is_tuple(PubPkt) -> %% publish without any data %% disconnect instead of crash {error, ?RC_PROTOCOL_ERROR}; @@ -266,11 +244,10 @@ check(#mqtt_packet{variable = #mqtt_packet_subscribe{} = SubPkt}) -> check(SubPkt); check(#mqtt_packet{variable = #mqtt_packet_unsubscribe{} = UnsubPkt}) -> check(UnsubPkt); - %% A Topic Alias of 0 is not permitted. -check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias':= 0}}) -> +check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias' := 0}}) -> {error, ?RC_PROTOCOL_ERROR}; -check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias':= _Alias}}) -> +check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias' := _Alias}}) -> ok; check(#mqtt_packet_publish{topic_name = <<>>, properties = #{}}) -> {error, ?RC_PROTOCOL_ERROR}; @@ -281,26 +258,24 @@ check(#mqtt_packet_publish{topic_name = TopicName, properties = Props}) -> error:_Error -> {error, ?RC_TOPIC_NAME_INVALID} end; - -check(#mqtt_packet_subscribe{properties = #{'Subscription-Identifier' := I}}) - when I =< 0; I >= 16#FFFFFFF -> +check(#mqtt_packet_subscribe{properties = #{'Subscription-Identifier' := I}}) when + I =< 0; I >= 16#FFFFFFF +-> {error, ?RC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED}; - check(#mqtt_packet_subscribe{topic_filters = []}) -> {error, ?RC_TOPIC_FILTER_INVALID}; - check(#mqtt_packet_subscribe{topic_filters = TopicFilters}) -> - try validate_topic_filters(TopicFilters) + try + validate_topic_filters(TopicFilters) catch error:_Error -> {error, ?RC_TOPIC_FILTER_INVALID} end; - check(#mqtt_packet_unsubscribe{topic_filters = []}) -> {error, ?RC_TOPIC_FILTER_INVALID}; - check(#mqtt_packet_unsubscribe{topic_filters = TopicFilters}) -> - try validate_topic_filters(TopicFilters) + try + validate_topic_filters(TopicFilters) catch error:_Error -> {error, ?RC_TOPIC_FILTER_INVALID} @@ -308,10 +283,8 @@ check(#mqtt_packet_unsubscribe{topic_filters = TopicFilters}) -> check_pub_props(#{'Topic-Alias' := 0}) -> {error, ?RC_TOPIC_ALIAS_INVALID}; - check_pub_props(#{'Subscription-Identifier' := 0}) -> {error, ?RC_PROTOCOL_ERROR}; - check_pub_props(#{'Response-Topic' := ResponseTopic}) -> try emqx_topic:validate(name, ResponseTopic) of true -> ok @@ -319,41 +292,70 @@ check_pub_props(#{'Response-Topic' := ResponseTopic}) -> error:_Error -> {error, ?RC_PROTOCOL_ERROR} end; -check_pub_props(_Props) -> ok. +check_pub_props(_Props) -> + ok. %% @doc Check CONNECT Packet. --spec(check(emqx_types:packet()|connect(), Opts :: map()) - -> ok | {error, emqx_types:reason_code()}). +-spec check(emqx_types:packet() | connect(), Opts :: map()) -> + ok | {error, emqx_types:reason_code()}. check(?CONNECT_PACKET(ConnPkt), Opts) -> check(ConnPkt, Opts); check(ConnPkt, Opts) when is_record(ConnPkt, mqtt_packet_connect) -> - run_checks([fun check_proto_ver/2, - fun check_client_id/2, - fun check_conn_props/2, - fun check_will_msg/2], ConnPkt, Opts). + run_checks( + [ + fun check_proto_ver/2, + fun check_client_id/2, + fun check_conn_props/2, + fun check_will_msg/2 + ], + ConnPkt, + Opts + ). -check_proto_ver(#mqtt_packet_connect{proto_ver = Ver, - proto_name = Name}, _Opts) -> +check_proto_ver( + #mqtt_packet_connect{ + proto_ver = Ver, + proto_name = Name + }, + _Opts +) -> case proplists:get_value(Ver, ?PROTOCOL_NAMES) of - Name -> ok; + Name -> ok; _Other -> {error, ?RC_UNSUPPORTED_PROTOCOL_VERSION} end. %% MQTT3.1 does not allow null clientId -check_client_id(#mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V3, - clientid = <<>>}, _Opts) -> +check_client_id( + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V3, + clientid = <<>> + }, + _Opts +) -> {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID}; %% Issue#599: Null clientId and clean_start = false -check_client_id(#mqtt_packet_connect{clientid = <<>>, - clean_start = false}, _Opts) -> +check_client_id( + #mqtt_packet_connect{ + clientid = <<>>, + clean_start = false + }, + _Opts +) -> {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID}; -check_client_id(#mqtt_packet_connect{clientid = <<>>, - clean_start = true}, _Opts) -> +check_client_id( + #mqtt_packet_connect{ + clientid = <<>>, + clean_start = true + }, + _Opts +) -> ok; -check_client_id(#mqtt_packet_connect{clientid = ClientId}, - #{max_clientid_len := MaxLen} = _Opts) -> +check_client_id( + #mqtt_packet_connect{clientid = ClientId}, + #{max_clientid_len := MaxLen} = _Opts +) -> case (1 =< (Len = byte_size(ClientId))) andalso (Len =< MaxLen) of - true -> ok; + true -> ok; false -> {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID} end. @@ -361,32 +363,44 @@ check_conn_props(#mqtt_packet_connect{properties = undefined}, _Opts) -> ok; check_conn_props(#mqtt_packet_connect{properties = #{'Receive-Maximum' := 0}}, _Opts) -> {error, ?RC_PROTOCOL_ERROR}; -check_conn_props(#mqtt_packet_connect{properties = #{'Request-Response-Information' := ReqRespInfo}}, _Opts) - when ReqRespInfo =/= 0, ReqRespInfo =/= 1 -> +check_conn_props( + #mqtt_packet_connect{properties = #{'Request-Response-Information' := ReqRespInfo}}, _Opts +) when + ReqRespInfo =/= 0, ReqRespInfo =/= 1 +-> {error, ?RC_PROTOCOL_ERROR}; -check_conn_props(#mqtt_packet_connect{properties = #{'Request-Problem-Information' := ReqProInfo}}, _Opts) - when ReqProInfo =/= 0, ReqProInfo =/= 1 -> +check_conn_props( + #mqtt_packet_connect{properties = #{'Request-Problem-Information' := ReqProInfo}}, _Opts +) when + ReqProInfo =/= 0, ReqProInfo =/= 1 +-> {error, ?RC_PROTOCOL_ERROR}; -check_conn_props(_ConnPkt, _Opts) -> ok. +check_conn_props(_ConnPkt, _Opts) -> + ok. check_will_msg(#mqtt_packet_connect{will_flag = false}, _Caps) -> ok; -check_will_msg(#mqtt_packet_connect{will_retain = true}, - _Opts = #{mqtt_retain_available := false}) -> +check_will_msg( + #mqtt_packet_connect{will_retain = true}, + _Opts = #{mqtt_retain_available := false} +) -> {error, ?RC_RETAIN_NOT_SUPPORTED}; -check_will_msg(#mqtt_packet_connect{will_qos = WillQoS}, - _Opts = #{max_qos_allowed := MaxQoS}) when WillQoS > MaxQoS -> +check_will_msg( + #mqtt_packet_connect{will_qos = WillQoS}, + _Opts = #{max_qos_allowed := MaxQoS} +) when WillQoS > MaxQoS -> {error, ?RC_QOS_NOT_SUPPORTED}; check_will_msg(#mqtt_packet_connect{will_topic = WillTopic}, _Opts) -> try emqx_topic:validate(name, WillTopic) of true -> ok - catch error:_Error -> - {error, ?RC_TOPIC_NAME_INVALID} + catch + error:_Error -> + {error, ?RC_TOPIC_NAME_INVALID} end. run_checks([], _Packet, _Options) -> ok; -run_checks([Check|More], Packet, Options) -> +run_checks([Check | More], Packet, Options) -> case Check(Packet, Options) of ok -> run_checks(More, Packet, Options); Error = {error, _Reason} -> Error @@ -396,134 +410,192 @@ run_checks([Check|More], Packet, Options) -> %% @private validate_topic_filters(TopicFilters) -> lists:foreach( - fun({TopicFilter, _SubOpts}) -> - emqx_topic:validate(TopicFilter); - (TopicFilter) -> - emqx_topic:validate(TopicFilter) - end, TopicFilters). + fun + ({TopicFilter, _SubOpts}) -> + emqx_topic:validate(TopicFilter); + (TopicFilter) -> + emqx_topic:validate(TopicFilter) + end, + TopicFilters + ). --spec(to_message(emqx_types:packet(), emqx_types:clientid()) -> emqx_types:message()). +-spec to_message(emqx_types:packet(), emqx_types:clientid()) -> emqx_types:message(). to_message(Packet, ClientId) -> to_message(Packet, ClientId, #{}). %% @doc Transform Publish Packet to Message. --spec(to_message(emqx_types:packet(), emqx_types:clientid(), map()) -> emqx_types:message()). -to_message(#mqtt_packet{ - header = #mqtt_packet_header{ - type = ?PUBLISH, - retain = Retain, - qos = QoS, - dup = Dup}, - variable = #mqtt_packet_publish{ - topic_name = Topic, - properties = Props}, - payload = Payload - }, ClientId, Headers) -> +-spec to_message(emqx_types:packet(), emqx_types:clientid(), map()) -> emqx_types:message(). +to_message( + #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + retain = Retain, + qos = QoS, + dup = Dup + }, + variable = #mqtt_packet_publish{ + topic_name = Topic, + properties = Props + }, + payload = Payload + }, + ClientId, + Headers +) -> Msg = emqx_message:make(ClientId, QoS, Topic, Payload), - Msg#message{flags = #{dup => Dup, retain => Retain}, - headers = Headers#{properties => Props}}. + Msg#message{ + flags = #{dup => Dup, retain => Retain}, + headers = Headers#{properties => Props} + }. --spec(will_msg(#mqtt_packet_connect{}) -> emqx_types:message()). +-spec will_msg(#mqtt_packet_connect{}) -> emqx_types:message(). will_msg(#mqtt_packet_connect{will_flag = false}) -> undefined; -will_msg(#mqtt_packet_connect{clientid = ClientId, - username = Username, - will_retain = Retain, - will_qos = QoS, - will_topic = Topic, - will_props = Props, - will_payload = Payload}) -> +will_msg(#mqtt_packet_connect{ + clientid = ClientId, + username = Username, + will_retain = Retain, + will_qos = QoS, + will_topic = Topic, + will_props = Props, + will_payload = Payload +}) -> Msg = emqx_message:make(ClientId, QoS, Topic, Payload), - Msg#message{flags = #{dup => false, retain => Retain}, - headers = #{username => Username, properties => Props}}. + Msg#message{ + flags = #{dup => false, retain => Retain}, + headers = #{username => Username, properties => Props} + }. %% @doc Format packet --spec(format(emqx_types:packet()) -> iolist()). +-spec format(emqx_types:packet()) -> iolist(). format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()). %% @doc Format packet --spec(format(emqx_types:packet(), hex | text | hidden) -> iolist()). +-spec format(emqx_types:packet(), hex | text | hidden) -> iolist(). format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) -> HeaderIO = format_header(Header), case format_variable(Variable, Payload, PayloadEncode) of "" -> HeaderIO; - VarIO -> [HeaderIO,",", VarIO] + VarIO -> [HeaderIO, ",", VarIO] end. -format_header(#mqtt_packet_header{type = Type, - dup = Dup, - qos = QoS, - retain = Retain}) -> +format_header(#mqtt_packet_header{ + type = Type, + dup = Dup, + qos = QoS, + retain = Retain +}) -> io_lib:format("~ts(Q~p, R~p, D~p)", [type_name(Type), QoS, i(Retain), i(Dup)]). -format_variable(undefined, _, _) -> ""; +format_variable(undefined, _, _) -> + ""; format_variable(Variable, undefined, PayloadEncode) -> format_variable(Variable, PayloadEncode); format_variable(Variable, Payload, PayloadEncode) -> [format_variable(Variable, PayloadEncode), format_payload(Payload, PayloadEncode)]. -format_variable(#mqtt_packet_connect{ - proto_ver = ProtoVer, - proto_name = ProtoName, - will_retain = WillRetain, - will_qos = WillQoS, - will_flag = WillFlag, - clean_start = CleanStart, - keepalive = KeepAlive, - clientid = ClientId, - will_topic = WillTopic, - will_payload = WillPayload, - username = Username, - password = Password}, - PayloadEncode) -> +format_variable( + #mqtt_packet_connect{ + proto_ver = ProtoVer, + proto_name = ProtoName, + will_retain = WillRetain, + will_qos = WillQoS, + will_flag = WillFlag, + clean_start = CleanStart, + keepalive = KeepAlive, + clientid = ClientId, + will_topic = WillTopic, + will_payload = WillPayload, + username = Username, + password = Password + }, + PayloadEncode +) -> Base = io_lib:format( "ClientId=~ts, ProtoName=~ts, ProtoVsn=~p, CleanStart=~ts, KeepAlive=~p, Username=~ts, Password=~ts", - [ClientId, ProtoName, ProtoVer, CleanStart, KeepAlive, Username, format_password(Password)]), + [ClientId, ProtoName, ProtoVer, CleanStart, KeepAlive, Username, format_password(Password)] + ), case WillFlag of true -> - [Base, io_lib:format(", Will(Q~p, R~p, Topic=~ts ", - [WillQoS, i(WillRetain), WillTopic]), - format_payload(WillPayload, PayloadEncode), ")"]; + [ + Base, + io_lib:format( + ", Will(Q~p, R~p, Topic=~ts ", + [WillQoS, i(WillRetain), WillTopic] + ), + format_payload(WillPayload, PayloadEncode), + ")" + ]; false -> Base end; - -format_variable(#mqtt_packet_disconnect - {reason_code = ReasonCode}, _) -> +format_variable( + #mqtt_packet_disconnect{ + reason_code = ReasonCode + }, + _ +) -> io_lib:format("ReasonCode=~p", [ReasonCode]); - -format_variable(#mqtt_packet_connack{ack_flags = AckFlags, - reason_code = ReasonCode}, _) -> +format_variable( + #mqtt_packet_connack{ + ack_flags = AckFlags, + reason_code = ReasonCode + }, + _ +) -> io_lib:format("AckFlags=~p, ReasonCode=~p", [AckFlags, ReasonCode]); - -format_variable(#mqtt_packet_publish{topic_name = TopicName, - packet_id = PacketId}, _) -> +format_variable( + #mqtt_packet_publish{ + topic_name = TopicName, + packet_id = PacketId + }, + _ +) -> io_lib:format("Topic=~ts, PacketId=~p", [TopicName, PacketId]); - -format_variable(#mqtt_packet_puback{packet_id = PacketId, - reason_code = ReasonCode}, _) -> +format_variable( + #mqtt_packet_puback{ + packet_id = PacketId, + reason_code = ReasonCode + }, + _ +) -> io_lib:format("PacketId=~p, ReasonCode=~p", [PacketId, ReasonCode]); - -format_variable(#mqtt_packet_subscribe{packet_id = PacketId, - topic_filters = TopicFilters}, _) -> - [io_lib:format("PacketId=~p ", [PacketId]), "TopicFilters=", - format_topic_filters(TopicFilters)]; - -format_variable(#mqtt_packet_unsubscribe{packet_id = PacketId, - topic_filters = Topics}, _) -> - [io_lib:format("PacketId=~p ", [PacketId]), "TopicFilters=", - format_topic_filters(Topics)]; - -format_variable(#mqtt_packet_suback{packet_id = PacketId, - reason_codes = ReasonCodes}, _) -> +format_variable( + #mqtt_packet_subscribe{ + packet_id = PacketId, + topic_filters = TopicFilters + }, + _ +) -> + [ + io_lib:format("PacketId=~p ", [PacketId]), + "TopicFilters=", + format_topic_filters(TopicFilters) + ]; +format_variable( + #mqtt_packet_unsubscribe{ + packet_id = PacketId, + topic_filters = Topics + }, + _ +) -> + [ + io_lib:format("PacketId=~p ", [PacketId]), + "TopicFilters=", + format_topic_filters(Topics) + ]; +format_variable( + #mqtt_packet_suback{ + packet_id = PacketId, + reason_codes = ReasonCodes + }, + _ +) -> io_lib:format("PacketId=~p, ReasonCodes=~p", [PacketId, ReasonCodes]); - format_variable(#mqtt_packet_unsuback{packet_id = PacketId}, _) -> io_lib:format("PacketId=~p", [PacketId]); - format_variable(#mqtt_packet_auth{reason_code = ReasonCode}, _) -> io_lib:format("ReasonCode=~p", [ReasonCode]); - format_variable(PacketId, _) when is_integer(PacketId) -> io_lib:format("PacketId=~p", [PacketId]). @@ -534,80 +606,92 @@ format_payload(Payload, text) -> ["Payload=", io_lib:format("~ts", [Payload])]; format_payload(Payload, hex) -> ["Payload(hex)=", encode_hex(Payload)]; format_payload(_, hidden) -> "Payload=******". -i(true) -> 1; +i(true) -> 1; i(false) -> 0; i(I) when is_integer(I) -> I. format_topic_filters(Filters) -> - ["[", - lists:join(",", + [ + "[", + lists:join( + ",", lists:map( - fun({TopicFilter, SubOpts}) -> - io_lib:format("~ts(~p)", [TopicFilter, SubOpts]); + fun + ({TopicFilter, SubOpts}) -> + io_lib:format("~ts(~p)", [TopicFilter, SubOpts]); (TopicFilter) -> io_lib:format("~ts", [TopicFilter]) - end, Filters)), - "]"]. + end, + Filters + ) + ), + "]" + ]. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Hex encoding functions %% Copy from binary:encode_hex/1 (was only introduced in OTP24). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -define(HEX(X), (hex(X)):16). --compile({inline,[hex/1]}). +-compile({inline, [hex/1]}). -spec encode_hex(Bin) -> Bin2 when Bin :: binary(), Bin2 :: <<_:_*16>>. encode_hex(Data) when byte_size(Data) rem 8 =:= 0 -> - << <> || <> <= Data >>; + << + <> + || <> <= Data + >>; encode_hex(Data) when byte_size(Data) rem 7 =:= 0 -> - << <> || <> <= Data >>; + << + <> + || <> <= Data + >>; encode_hex(Data) when byte_size(Data) rem 6 =:= 0 -> - << <> || <> <= Data >>; + <<<> || <> <= Data>>; encode_hex(Data) when byte_size(Data) rem 5 =:= 0 -> - << <> || <> <= Data >>; + <<<> || <> <= Data>>; encode_hex(Data) when byte_size(Data) rem 4 =:= 0 -> - << <> || <> <= Data >>; + <<<> || <> <= Data>>; encode_hex(Data) when byte_size(Data) rem 3 =:= 0 -> - << <> || <> <= Data >>; + <<<> || <> <= Data>>; encode_hex(Data) when byte_size(Data) rem 2 =:= 0 -> - << <> || <> <= Data >>; + <<<> || <> <= Data>>; encode_hex(Data) when is_binary(Data) -> - << <> || <> <= Data >>; + <<<> || <> <= Data>>; encode_hex(Bin) -> erlang:error(badarg, [Bin]). hex(X) -> element( - X+1, {16#3030, 16#3031, 16#3032, 16#3033, 16#3034, 16#3035, 16#3036, 16#3037, 16#3038, 16#3039, 16#3041, - 16#3042, 16#3043, 16#3044, 16#3045, 16#3046, - 16#3130, 16#3131, 16#3132, 16#3133, 16#3134, 16#3135, 16#3136, 16#3137, 16#3138, 16#3139, 16#3141, - 16#3142, 16#3143, 16#3144, 16#3145, 16#3146, - 16#3230, 16#3231, 16#3232, 16#3233, 16#3234, 16#3235, 16#3236, 16#3237, 16#3238, 16#3239, 16#3241, - 16#3242, 16#3243, 16#3244, 16#3245, 16#3246, - 16#3330, 16#3331, 16#3332, 16#3333, 16#3334, 16#3335, 16#3336, 16#3337, 16#3338, 16#3339, 16#3341, - 16#3342, 16#3343, 16#3344, 16#3345, 16#3346, - 16#3430, 16#3431, 16#3432, 16#3433, 16#3434, 16#3435, 16#3436, 16#3437, 16#3438, 16#3439, 16#3441, - 16#3442, 16#3443, 16#3444, 16#3445, 16#3446, - 16#3530, 16#3531, 16#3532, 16#3533, 16#3534, 16#3535, 16#3536, 16#3537, 16#3538, 16#3539, 16#3541, - 16#3542, 16#3543, 16#3544, 16#3545, 16#3546, - 16#3630, 16#3631, 16#3632, 16#3633, 16#3634, 16#3635, 16#3636, 16#3637, 16#3638, 16#3639, 16#3641, - 16#3642, 16#3643, 16#3644, 16#3645, 16#3646, - 16#3730, 16#3731, 16#3732, 16#3733, 16#3734, 16#3735, 16#3736, 16#3737, 16#3738, 16#3739, 16#3741, - 16#3742, 16#3743, 16#3744, 16#3745, 16#3746, - 16#3830, 16#3831, 16#3832, 16#3833, 16#3834, 16#3835, 16#3836, 16#3837, 16#3838, 16#3839, 16#3841, - 16#3842, 16#3843, 16#3844, 16#3845, 16#3846, - 16#3930, 16#3931, 16#3932, 16#3933, 16#3934, 16#3935, 16#3936, 16#3937, 16#3938, 16#3939, 16#3941, - 16#3942, 16#3943, 16#3944, 16#3945, 16#3946, - 16#4130, 16#4131, 16#4132, 16#4133, 16#4134, 16#4135, 16#4136, 16#4137, 16#4138, 16#4139, 16#4141, - 16#4142, 16#4143, 16#4144, 16#4145, 16#4146, - 16#4230, 16#4231, 16#4232, 16#4233, 16#4234, 16#4235, 16#4236, 16#4237, 16#4238, 16#4239, 16#4241, - 16#4242, 16#4243, 16#4244, 16#4245, 16#4246, - 16#4330, 16#4331, 16#4332, 16#4333, 16#4334, 16#4335, 16#4336, 16#4337, 16#4338, 16#4339, 16#4341, - 16#4342, 16#4343, 16#4344, 16#4345, 16#4346, - 16#4430, 16#4431, 16#4432, 16#4433, 16#4434, 16#4435, 16#4436, 16#4437, 16#4438, 16#4439, 16#4441, - 16#4442, 16#4443, 16#4444, 16#4445, 16#4446, - 16#4530, 16#4531, 16#4532, 16#4533, 16#4534, 16#4535, 16#4536, 16#4537, 16#4538, 16#4539, 16#4541, - 16#4542, 16#4543, 16#4544, 16#4545, 16#4546, - 16#4630, 16#4631, 16#4632, 16#4633, 16#4634, 16#4635, 16#4636, 16#4637, 16#4638, 16#4639, 16#4641, - 16#4642, 16#4643, 16#4644, 16#4645, 16#4646}). + X + 1, + {16#3030, 16#3031, 16#3032, 16#3033, 16#3034, 16#3035, 16#3036, 16#3037, 16#3038, 16#3039, + 16#3041, 16#3042, 16#3043, 16#3044, 16#3045, 16#3046, 16#3130, 16#3131, 16#3132, + 16#3133, 16#3134, 16#3135, 16#3136, 16#3137, 16#3138, 16#3139, 16#3141, 16#3142, + 16#3143, 16#3144, 16#3145, 16#3146, 16#3230, 16#3231, 16#3232, 16#3233, 16#3234, + 16#3235, 16#3236, 16#3237, 16#3238, 16#3239, 16#3241, 16#3242, 16#3243, 16#3244, + 16#3245, 16#3246, 16#3330, 16#3331, 16#3332, 16#3333, 16#3334, 16#3335, 16#3336, + 16#3337, 16#3338, 16#3339, 16#3341, 16#3342, 16#3343, 16#3344, 16#3345, 16#3346, + 16#3430, 16#3431, 16#3432, 16#3433, 16#3434, 16#3435, 16#3436, 16#3437, 16#3438, + 16#3439, 16#3441, 16#3442, 16#3443, 16#3444, 16#3445, 16#3446, 16#3530, 16#3531, + 16#3532, 16#3533, 16#3534, 16#3535, 16#3536, 16#3537, 16#3538, 16#3539, 16#3541, + 16#3542, 16#3543, 16#3544, 16#3545, 16#3546, 16#3630, 16#3631, 16#3632, 16#3633, + 16#3634, 16#3635, 16#3636, 16#3637, 16#3638, 16#3639, 16#3641, 16#3642, 16#3643, + 16#3644, 16#3645, 16#3646, 16#3730, 16#3731, 16#3732, 16#3733, 16#3734, 16#3735, + 16#3736, 16#3737, 16#3738, 16#3739, 16#3741, 16#3742, 16#3743, 16#3744, 16#3745, + 16#3746, 16#3830, 16#3831, 16#3832, 16#3833, 16#3834, 16#3835, 16#3836, 16#3837, + 16#3838, 16#3839, 16#3841, 16#3842, 16#3843, 16#3844, 16#3845, 16#3846, 16#3930, + 16#3931, 16#3932, 16#3933, 16#3934, 16#3935, 16#3936, 16#3937, 16#3938, 16#3939, + 16#3941, 16#3942, 16#3943, 16#3944, 16#3945, 16#3946, 16#4130, 16#4131, 16#4132, + 16#4133, 16#4134, 16#4135, 16#4136, 16#4137, 16#4138, 16#4139, 16#4141, 16#4142, + 16#4143, 16#4144, 16#4145, 16#4146, 16#4230, 16#4231, 16#4232, 16#4233, 16#4234, + 16#4235, 16#4236, 16#4237, 16#4238, 16#4239, 16#4241, 16#4242, 16#4243, 16#4244, + 16#4245, 16#4246, 16#4330, 16#4331, 16#4332, 16#4333, 16#4334, 16#4335, 16#4336, + 16#4337, 16#4338, 16#4339, 16#4341, 16#4342, 16#4343, 16#4344, 16#4345, 16#4346, + 16#4430, 16#4431, 16#4432, 16#4433, 16#4434, 16#4435, 16#4436, 16#4437, 16#4438, + 16#4439, 16#4441, 16#4442, 16#4443, 16#4444, 16#4445, 16#4446, 16#4530, 16#4531, + 16#4532, 16#4533, 16#4534, 16#4535, 16#4536, 16#4537, 16#4538, 16#4539, 16#4541, + 16#4542, 16#4543, 16#4544, 16#4545, 16#4546, 16#4630, 16#4631, 16#4632, 16#4633, + 16#4634, 16#4635, 16#4636, 16#4637, 16#4638, 16#4639, 16#4641, 16#4642, 16#4643, + 16#4644, 16#4645, 16#4646} + ). diff --git a/apps/emqx/src/emqx_passwd.erl b/apps/emqx/src/emqx_passwd.erl index 792ee3d3b..c737e949e 100644 --- a/apps/emqx/src/emqx_passwd.erl +++ b/apps/emqx/src/emqx_passwd.erl @@ -16,37 +16,40 @@ -module(emqx_passwd). --export([ hash/2 - , hash_data/2 - , check_pass/3 - ]). +-export([ + hash/2, + hash_data/2, + check_pass/3 +]). --export_type([ password/0 - , password_hash/0 - , hash_type_simple/0 - , hash_type/0 - , salt_position/0 - , salt/0]). +-export_type([ + password/0, + password_hash/0, + hash_type_simple/0, + hash_type/0, + salt_position/0, + salt/0 +]). -include("logger.hrl"). --type(password() :: binary()). --type(password_hash() :: binary()). +-type password() :: binary(). +-type password_hash() :: binary(). --type(hash_type_simple() :: plain | md5 | sha | sha256 | sha512). --type(hash_type() :: hash_type_simple() | bcrypt | pbkdf2). +-type hash_type_simple() :: plain | md5 | sha | sha256 | sha512. +-type hash_type() :: hash_type_simple() | bcrypt | pbkdf2. --type(salt_position() :: prefix | suffix). --type(salt() :: binary()). +-type salt_position() :: prefix | suffix. +-type salt() :: binary(). --type(pbkdf2_mac_fun() :: md4 | md5 | ripemd160 | sha | sha224 | sha256 | sha384 | sha512). --type(pbkdf2_iterations() :: pos_integer()). --type(pbkdf2_dk_length() :: pos_integer() | undefined). +-type pbkdf2_mac_fun() :: md4 | md5 | ripemd160 | sha | sha224 | sha256 | sha384 | sha512. +-type pbkdf2_iterations() :: pos_integer(). +-type pbkdf2_dk_length() :: pos_integer() | undefined. --type(hash_params() :: - {bcrypt, salt()} | - {pbkdf2, pbkdf2_mac_fun(), salt(), pbkdf2_iterations(), pbkdf2_dk_length()} | - {hash_type_simple(), salt(), salt_position()}). +-type hash_params() :: + {bcrypt, salt()} + | {pbkdf2, pbkdf2_mac_fun(), salt(), pbkdf2_iterations(), pbkdf2_dk_length()} + | {hash_type_simple(), salt(), salt_position()}. -export_type([pbkdf2_mac_fun/0]). @@ -54,38 +57,38 @@ %% APIs %%-------------------------------------------------------------------- --spec(check_pass(hash_params(), password_hash(), password()) -> boolean()). +-spec check_pass(hash_params(), password_hash(), password()) -> boolean(). check_pass({pbkdf2, MacFun, Salt, Iterations, DKLength}, PasswordHash, Password) -> case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of {ok, HashPasswd} -> compare_secure(hex(HashPasswd), PasswordHash); - {error, _Reason}-> + {error, _Reason} -> false end; check_pass({bcrypt, Salt}, PasswordHash, Password) -> case bcrypt:hashpw(Password, Salt) of {ok, HashPasswd} -> compare_secure(list_to_binary(HashPasswd), PasswordHash); - {error, _Reason}-> + {error, _Reason} -> false end; check_pass({_SimpleHash, _Salt, _SaltPosition} = HashParams, PasswordHash, Password) -> Hash = hash(HashParams, Password), compare_secure(Hash, PasswordHash). --spec(hash(hash_params(), password()) -> password_hash()). +-spec hash(hash_params(), password()) -> password_hash(). hash({pbkdf2, MacFun, Salt, Iterations, DKLength}, Password) -> case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of {ok, HashPasswd} -> hex(HashPasswd); - {error, Reason}-> + {error, Reason} -> error(Reason) end; hash({bcrypt, Salt}, Password) -> case bcrypt:hashpw(Password, Salt) of {ok, HashPasswd} -> list_to_binary(HashPasswd); - {error, Reason}-> + {error, Reason} -> error(Reason) end; hash({SimpleHash, Salt, prefix}, Password) when is_binary(Password), is_binary(Salt) -> @@ -93,8 +96,7 @@ hash({SimpleHash, Salt, prefix}, Password) when is_binary(Password), is_binary(S hash({SimpleHash, Salt, suffix}, Password) when is_binary(Password), is_binary(Salt) -> hash_data(SimpleHash, <>). - --spec(hash_data(hash_type(), binary()) -> binary()). +-spec hash_data(hash_type(), binary()) -> binary(). hash_data(plain, Data) when is_binary(Data) -> Data; hash_data(md5, Data) when is_binary(Data) -> @@ -111,26 +113,24 @@ hash_data(sha512, Data) when is_binary(Data) -> %%-------------------------------------------------------------------- compare_secure(X, Y) when is_binary(X), is_binary(Y) -> - compare_secure(binary_to_list(X), binary_to_list(Y)); + compare_secure(binary_to_list(X), binary_to_list(Y)); compare_secure(X, Y) when is_list(X), is_list(Y) -> - case length(X) == length(Y) of - true -> - compare_secure(X, Y, 0); - false -> - false + case length(X) == length(Y) of + true -> + compare_secure(X, Y, 0); + false -> + false end. compare_secure([X | RestX], [Y | RestY], Result) -> - compare_secure(RestX, RestY, (X bxor Y) bor Result); + compare_secure(RestX, RestY, (X bxor Y) bor Result); compare_secure([], [], Result) -> - Result == 0. - + Result == 0. pbkdf2(MacFun, Password, Salt, Iterations, undefined) -> pbkdf2:pbkdf2(MacFun, Password, Salt, Iterations); pbkdf2(MacFun, Password, Salt, Iterations, DKLength) -> pbkdf2:pbkdf2(MacFun, Password, Salt, Iterations, DKLength). - hex(X) when is_binary(X) -> pbkdf2:to_hex(X). diff --git a/apps/emqx/src/emqx_pd.erl b/apps/emqx/src/emqx_pd.erl index fb122d420..fb2f848b6 100644 --- a/apps/emqx/src/emqx_pd.erl +++ b/apps/emqx/src/emqx_pd.erl @@ -19,34 +19,42 @@ -include("types.hrl"). --export([ get_counters/1 - , get_counter/1 - , inc_counter/2 - , reset_counter/1 - ]). +-export([ + get_counters/1, + get_counter/1, + inc_counter/2, + reset_counter/1 +]). --compile({inline, - [ get_counters/1 - , get_counter/1 - , inc_counter/2 - , reset_counter/1 - ]}). +-compile( + {inline, [ + get_counters/1, + get_counter/1, + inc_counter/2, + reset_counter/1 + ]} +). --type(key() :: term()). +-type key() :: term(). --spec(get_counters(list(key())) -> list({key(), number()})). +-spec get_counters(list(key())) -> list({key(), number()}). get_counters(Keys) when is_list(Keys) -> [{Key, emqx_pd:get_counter(Key)} || Key <- Keys]. --spec(get_counter(key()) -> number()). +-spec get_counter(key()) -> number(). get_counter(Key) -> - case get(Key) of undefined -> 0; Cnt -> Cnt end. + case get(Key) of + undefined -> 0; + Cnt -> Cnt + end. --spec(inc_counter(key(), number()) -> maybe(number())). +-spec inc_counter(key(), number()) -> maybe(number()). inc_counter(Key, Inc) -> put(Key, get_counter(Key) + Inc). --spec(reset_counter(key()) -> number()). +-spec reset_counter(key()) -> number(). reset_counter(Key) -> - case put(Key, 0) of undefined -> 0; Cnt -> Cnt end. - + case put(Key, 0) of + undefined -> 0; + Cnt -> Cnt + end. diff --git a/apps/emqx/src/emqx_persistent_session.erl b/apps/emqx/src/emqx_persistent_session.erl index 4d6798834..52cbbd3a9 100644 --- a/apps/emqx/src/emqx_persistent_session.erl +++ b/apps/emqx/src/emqx_persistent_session.erl @@ -16,42 +16,47 @@ -module(emqx_persistent_session). --export([ is_store_enabled/0 - , init_db_backend/0 - , storage_type/0 - ]). +-export([ + is_store_enabled/0, + init_db_backend/0, + storage_type/0 +]). --export([ discard/2 - , discard_if_present/1 - , lookup/1 - , persist/3 - , persist_message/1 - , pending/1 - , pending/2 - , resume/3 - ]). +-export([ + discard/2, + discard_if_present/1, + lookup/1, + persist/3, + persist_message/1, + pending/1, + pending/2, + resume/3 +]). --export([ add_subscription/3 - , remove_subscription/3 - ]). +-export([ + add_subscription/3, + remove_subscription/3 +]). --export([ mark_as_delivered/2 - , mark_resume_begin/1 - ]). +-export([ + mark_as_delivered/2, + mark_resume_begin/1 +]). --export([ pending_messages_in_db/2 - , delete_session_message/1 - , gc_session_messages/1 - , session_message_info/2 - ]). +-export([ + pending_messages_in_db/2, + delete_session_message/1, + gc_session_messages/1, + session_message_info/2 +]). --export([ delete_message/1 - , first_message_id/0 - , next_message_id/1 - ]). +-export([ + delete_message/1, + first_message_id/0, + next_message_id/1 +]). --export_type([ sess_msg_key/0 - ]). +-export_type([sess_msg_key/0]). -include("emqx.hrl"). -include("emqx_persistent_session.hrl"). @@ -59,7 +64,8 @@ -compile({inline, [is_store_enabled/0]}). --define(MAX_EXPIRY_INTERVAL, 4294967295000). %% 16#FFFFFFFF * 1000 +%% 16#FFFFFFFF * 1000 +-define(MAX_EXPIRY_INTERVAL, 4294967295000). %% NOTE: Order is significant because of traversal order of the table. -define(MARKER, 3). @@ -67,12 +73,11 @@ -define(UNDELIVERED, 1). -define(ABANDONED, 0). - -type bin_timestamp() :: <<_:64>>. -opaque sess_msg_key() :: - {emqx_guid:guid(), emqx_guid:guid(), emqx_types:topic(), ?UNDELIVERED | ?DELIVERED} - | {emqx_guid:guid(), emqx_guid:guid(), <<>> , ?MARKER} - | {emqx_guid:guid(), <<>> , bin_timestamp() , ?ABANDONED}. + {emqx_guid:guid(), emqx_guid:guid(), emqx_types:topic(), ?UNDELIVERED | ?DELIVERED} + | {emqx_guid:guid(), emqx_guid:guid(), <<>>, ?MARKER} + | {emqx_guid:guid(), <<>>, bin_timestamp(), ?ABANDONED}. -type gc_traverse_fun() :: fun(('delete' | 'marker' | 'abandoned', sess_msg_key()) -> 'ok'). @@ -82,14 +87,16 @@ init_db_backend() -> case is_store_enabled() of - true -> + true -> StorageType = storage_type(), ok = emqx_trie:create_session_trie(StorageType), ok = emqx_session_router:create_router_tab(StorageType), case StorageType of disc -> emqx_persistent_session_mnesia_disc_backend:create_tables(), - persistent_term:put(?db_backend_key, emqx_persistent_session_mnesia_disc_backend); + persistent_term:put( + ?db_backend_key, emqx_persistent_session_mnesia_disc_backend + ); ram -> emqx_persistent_session_mnesia_ram_backend:create_tables(), persistent_term:put(?db_backend_key, emqx_persistent_session_mnesia_ram_backend) @@ -112,7 +119,7 @@ storage_type() -> -spec session_message_info('timestamp' | 'sessionID', sess_msg_key()) -> term(). session_message_info(timestamp, {_, <<>>, <>, ?ABANDONED}) -> TS; -session_message_info(timestamp, {_, GUID, _ , _ }) -> emqx_guid:timestamp(GUID); +session_message_info(timestamp, {_, GUID, _, _}) -> emqx_guid:timestamp(GUID); session_message_info(sessionID, {SessionID, _, _, _}) -> SessionID. %%-------------------------------------------------------------------- @@ -147,7 +154,7 @@ lookup_session_store(ClientID) -> ?db_backend:lookup_session_store(ClientID). put_session_message({_, _, _, _} = Key) -> - ?db_backend:put_session_message(#session_msg{ key = Key }). + ?db_backend:put_session_message(#session_msg{key = Key}). put_message(Msg) -> ?db_backend:put_message(Msg). @@ -164,29 +171,37 @@ pending_messages_in_db(SessionID, MarkerIds) -> %% The timestamp (TS) is the last time a client interacted with the session, %% or when the client disconnected. --spec persist(emqx_types:clientinfo(), - emqx_types:conninfo(), - emqx_session:session()) -> emqx_session:session(). +-spec persist( + emqx_types:clientinfo(), + emqx_types:conninfo(), + emqx_session:session() +) -> emqx_session:session(). -persist(#{ clientid := ClientID }, ConnInfo, Session) -> +persist(#{clientid := ClientID}, ConnInfo, Session) -> case ClientID == undefined orelse not emqx_session:info(is_persistent, Session) of - true -> Session; + true -> + Session; false -> - SS = #session_store{ client_id = ClientID - , expiry_interval = maps:get(expiry_interval, ConnInfo) - , ts = timestamp_from_conninfo(ConnInfo) - , session = Session}, + SS = #session_store{ + client_id = ClientID, + expiry_interval = maps:get(expiry_interval, ConnInfo), + ts = timestamp_from_conninfo(ConnInfo), + session = Session + }, case persistent_session_status(SS) of - not_persistent -> Session; - expired -> discard(ClientID, Session); - persistent -> put_session_store(SS), - Session + not_persistent -> + Session; + expired -> + discard(ClientID, Session); + persistent -> + put_session_store(SS), + Session end end. timestamp_from_conninfo(ConnInfo) -> case maps:get(disconnected_at, ConnInfo, undefined) of - undefined -> erlang:system_time(millisecond); + undefined -> erlang:system_time(millisecond); Disconnect -> Disconnect end. @@ -196,11 +211,12 @@ lookup(ClientID) when is_binary(ClientID) -> none; true -> case lookup_session_store(ClientID) of - none -> none; + none -> + none; {value, #session_store{session = S} = SS} -> case persistent_session_status(SS) of - expired -> {expired, S}; - persistent -> {persistent, S} + expired -> {expired, S}; + persistent -> {persistent, S} end end end. @@ -208,7 +224,8 @@ lookup(ClientID) when is_binary(ClientID) -> -spec discard_if_present(binary()) -> 'ok'. discard_if_present(ClientID) -> case lookup(ClientID) of - none -> ok; + none -> + ok; {Tag, Session} when Tag =:= persistent; Tag =:= expired -> _ = discard(ClientID, Session), ok @@ -218,12 +235,12 @@ discard_if_present(ClientID) -> discard(ClientID, Session) -> discard_opt(is_store_enabled(), ClientID, Session). -discard_opt(false,_ClientID, Session) -> +discard_opt(false, _ClientID, Session) -> emqx_session:set_field(is_persistent, false, Session); discard_opt(true, ClientID, Session) -> delete_session_store(ClientID), SessionID = emqx_session:info(id, Session), - put_session_message({SessionID, <<>>, << (erlang:system_time(microsecond)) : 64>>, ?ABANDONED}), + put_session_message({SessionID, <<>>, <<(erlang:system_time(microsecond)):64>>, ?ABANDONED}), Subscriptions = emqx_session:info(subscriptions, Session), emqx_session_router:delete_routes(SessionID, Subscriptions), emqx_session:set_field(is_persistent, false, Session). @@ -236,7 +253,7 @@ mark_resume_begin(SessionID) -> add_subscription(TopicFilter, SessionID, true = _IsPersistent) -> case is_store_enabled() of - true -> emqx_session_router:do_add_route(TopicFilter, SessionID); + true -> emqx_session_router:do_add_route(TopicFilter, SessionID); false -> ok end; add_subscription(_TopicFilter, _SessionID, false = _IsPersistent) -> @@ -244,7 +261,7 @@ add_subscription(_TopicFilter, _SessionID, false = _IsPersistent) -> remove_subscription(TopicFilter, SessionID, true = _IsPersistent) -> case is_store_enabled() of - true -> emqx_session_router:do_delete_route(TopicFilter, SessionID); + true -> emqx_session_router:do_delete_route(TopicFilter, SessionID); false -> ok end; remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) -> @@ -255,8 +272,8 @@ remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) -> %%-------------------------------------------------------------------- %% Must be called inside a emqx_cm_locker transaction. --spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session() - ) -> {emqx_session:session(), [emqx_types:deliver()]}. +-spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()) -> + {emqx_session:session(), [emqx_types:deliver()]}. resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> SessionID = emqx_session:info(id, Session), ?tp(ps_resuming, #{from => db, sid => SessionID}), @@ -267,8 +284,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> ?tp(ps_initial_pendings, #{sid => SessionID}), Pendings1 = pending(SessionID), Pendings2 = emqx_session:ignore_local(ClientInfo, Pendings1, ClientID, Session), - ?tp(ps_got_initial_pendings, #{ sid => SessionID - , msgs => Pendings1}), + ?tp(ps_got_initial_pendings, #{ + sid => SessionID, + msgs => Pendings1 + }), %% 2. Enqueue messages to mimic that the process was alive %% when the messages were delivered. @@ -276,8 +295,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> Session1 = emqx_session:enqueue(ClientInfo, Pendings2, Session), Session2 = persist(ClientInfo, ConnInfo, Session1), mark_as_delivered(SessionID, Pendings2), - ?tp(ps_persist_pendings_msgs, #{ msgs => Pendings2 - , sid => SessionID}), + ?tp(ps_persist_pendings_msgs, #{ + msgs => Pendings2, + sid => SessionID + }), %% 3. Notify writers that we are resuming. %% They will buffer new messages. @@ -295,14 +316,18 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> MarkerIDs = [Marker || {_, Marker} <- NodeMarkers], Pendings3 = pending(SessionID, MarkerIDs), Pendings4 = emqx_session:ignore_local(ClientInfo, Pendings3, ClientID, Session), - ?tp(ps_marker_pendings_msgs, #{ sid => SessionID - , msgs => Pendings4}), + ?tp(ps_marker_pendings_msgs, #{ + sid => SessionID, + msgs => Pendings4 + }), %% 6. Get pending messages from writers. ?tp(ps_resume_end, #{sid => SessionID}), WriterPendings = resume_end(Nodes, SessionID), - ?tp(ps_writer_pendings, #{ msgs => WriterPendings - , sid => SessionID}), + ?tp(ps_writer_pendings, #{ + msgs => WriterPendings, + sid => SessionID + }), %% 7. Drain the inbox and usort the messages %% with the pending messages. (Should be done by caller.) @@ -314,30 +339,32 @@ resume_begin(Nodes, SessionID) -> resume_end(Nodes, SessionID) -> Res = emqx_persistent_session_proto_v1:resume_end(Nodes, self(), SessionID), - ?tp(ps_erpc_multical_result, #{ res => Res, sid => SessionID }), + ?tp(ps_erpc_multical_result, #{res => Res, sid => SessionID}), %% TODO: Should handle the errors - [ {deliver, STopic, M} - || {ok, {ok, Messages}} <- Res, - {{M, STopic}} <- Messages + [ + {deliver, STopic, M} + || {ok, {ok, Messages}} <- Res, + {{M, STopic}} <- Messages ]. - %%-------------------------------------------------------------------- %% Messages API %%-------------------------------------------------------------------- persist_message(Msg) -> case is_store_enabled() of - true -> do_persist_message(Msg); + true -> do_persist_message(Msg); false -> ok end. do_persist_message(Msg) -> case emqx_message:get_flag(dup, Msg) orelse emqx_message:is_sys(Msg) of - true -> ok; + true -> + ok; false -> case emqx_session_router:match_routes(emqx_message:topic(Msg)) of - [] -> ok; + [] -> + ok; Routes -> put_message(Msg), MsgId = emqx_message:id(Msg), @@ -345,7 +372,7 @@ do_persist_message(Msg) -> end end. -persist_message_routes([#route{dest = SessionID, topic = STopic}|Left], MsgId, Msg) -> +persist_message_routes([#route{dest = SessionID, topic = STopic} | Left], MsgId, Msg) -> ?tp(ps_persist_msg, #{sid => SessionID, payload => emqx_message:payload(Msg)}), put_session_message({SessionID, MsgId, STopic, ?UNDELIVERED}), emqx_session_router:buffer(SessionID, STopic, Msg), @@ -355,11 +382,11 @@ persist_message_routes([], _MsgId, _Msg) -> mark_as_delivered(SessionID, List) -> case is_store_enabled() of - true -> do_mark_as_delivered(SessionID, List); + true -> do_mark_as_delivered(SessionID, List); false -> ok end. -do_mark_as_delivered(SessionID, [{deliver, STopic, Msg}|Left]) -> +do_mark_as_delivered(SessionID, [{deliver, STopic, Msg} | Left]) -> MsgID = emqx_message:id(Msg), case next_session_message({SessionID, MsgID, STopic, ?ABANDONED}) of {SessionID, MsgID, STopic, ?UNDELIVERED} = Key -> @@ -374,12 +401,12 @@ do_mark_as_delivered(_SessionID, []) -> ok. -spec pending(emqx_session:sessionID()) -> - [{emqx_types:message(), STopic :: binary()}]. + [{emqx_types:message(), STopic :: binary()}]. pending(SessionID) -> pending_messages_in_db(SessionID, []). -spec pending(emqx_session:sessionID(), MarkerIDs :: [emqx_guid:guid()]) -> - [{emqx_types:message(), STopic :: binary()}]. + [{emqx_types:message(), STopic :: binary()}]. pending(SessionID, MarkerIds) -> %% TODO: Handle lost MarkerIDs case emqx_session_router:pending(SessionID, MarkerIds) of @@ -401,7 +428,7 @@ persistent_session_status(#session_store{expiry_interval = ?MAX_EXPIRY_INTERVAL} persistent; persistent_session_status(#session_store{expiry_interval = E, ts = TS}) -> case E + TS > erlang:system_time(millisecond) of - true -> persistent; + true -> persistent; false -> expired end. @@ -413,20 +440,25 @@ pending_messages_fun(SessionID, MarkerIds) -> fun() -> case pending_messages({SessionID, <<>>, <<>>, ?DELIVERED}, [], MarkerIds) of {Pending, []} -> read_pending_msgs(Pending, []); - {_Pending, [_|_]} -> incomplete + {_Pending, [_ | _]} -> incomplete end end. -read_pending_msgs([{MsgId, STopic}|Left], Acc) -> - Acc1 = try [{deliver, STopic, get_message(MsgId)}|Acc] - catch error:{msg_not_found, _} -> - HighwaterMark = erlang:system_time(microsecond) - - emqx_config:get(?msg_retain) * 1000, - case emqx_guid:timestamp(MsgId) < HighwaterMark of - true -> Acc; %% Probably cleaned by GC - false -> error({msg_not_found, MsgId}) - end - end, +read_pending_msgs([{MsgId, STopic} | Left], Acc) -> + Acc1 = + try + [{deliver, STopic, get_message(MsgId)} | Acc] + catch + error:{msg_not_found, _} -> + HighwaterMark = + erlang:system_time(microsecond) - + emqx_config:get(?msg_retain) * 1000, + case emqx_guid:timestamp(MsgId) < HighwaterMark of + %% Probably cleaned by GC + true -> Acc; + false -> error({msg_not_found, MsgId}) + end + end, read_pending_msgs(Left, Acc1); read_pending_msgs([], Acc) -> lists:reverse(Acc). @@ -450,21 +482,24 @@ pending_messages({SessionID, PrevMsgId, PrevSTopic, PrevTag} = PrevKey, Acc, Mar MarkerIds1 = MarkerIds -- [MsgId], case PrevTag =:= ?UNDELIVERED of false -> pending_messages(Key, Acc, MarkerIds1); - true -> pending_messages(Key, [{PrevMsgId, PrevSTopic}|Acc], MarkerIds1) + true -> pending_messages(Key, [{PrevMsgId, PrevSTopic} | Acc], MarkerIds1) end; - {S, MsgId, STopic, ?DELIVERED} = Key when S =:= SessionID, - MsgId =:= PrevMsgId, - STopic =:= PrevSTopic -> + {S, MsgId, STopic, ?DELIVERED} = Key when + S =:= SessionID, + MsgId =:= PrevMsgId, + STopic =:= PrevSTopic + -> pending_messages(Key, Acc, MarkerIds); {S, _MsgId, _STopic, _Tag} = Key when S =:= SessionID -> case PrevTag =:= ?UNDELIVERED of false -> pending_messages(Key, Acc, MarkerIds); - true -> pending_messages(Key, [{PrevMsgId, PrevSTopic}|Acc], MarkerIds) + true -> pending_messages(Key, [{PrevMsgId, PrevSTopic} | Acc], MarkerIds) end; - _What -> %% Next sessionID or '$end_of_table' + %% Next sessionID or '$end_of_table' + _What -> case PrevTag =:= ?UNDELIVERED of false -> {lists:reverse(Acc), MarkerIds}; - true -> {lists:reverse([{PrevMsgId, PrevSTopic}|Acc]), MarkerIds} + true -> {lists:reverse([{PrevMsgId, PrevSTopic} | Acc]), MarkerIds} end end. @@ -495,16 +530,20 @@ gc_traverse({S, _MsgID, <<>>, ?MARKER} = Key, SessionID, Abandoned, Fun) -> ok = Fun(marker, Key), NewAbandoned = S =:= SessionID andalso Abandoned, gc_traverse(next_session_message(Key), S, NewAbandoned, Fun); -gc_traverse({S, _MsgID, _STopic, _Tag} = Key, SessionID, Abandoned, Fun) when Abandoned andalso - S =:= SessionID -> +gc_traverse({S, _MsgID, _STopic, _Tag} = Key, SessionID, Abandoned, Fun) when + Abandoned andalso + S =:= SessionID +-> %% Delete all messages from an abandoned session. ok = Fun(delete, Key), gc_traverse(next_session_message(Key), S, Abandoned, Fun); gc_traverse({S, MsgID, STopic, ?UNDELIVERED} = Key, SessionID, Abandoned, Fun) -> case next_session_message(Key) of - {S1, M, ST, ?DELIVERED} = NextKey when S1 =:= S andalso - MsgID =:= M andalso - STopic =:= ST -> + {S1, M, ST, ?DELIVERED} = NextKey when + S1 =:= S andalso + MsgID =:= M andalso + STopic =:= ST + -> %% We have both markers for the same message/topic so it is safe to delete both. ok = Fun(delete, Key), ok = Fun(delete, NextKey), diff --git a/apps/emqx/src/emqx_persistent_session.hrl b/apps/emqx/src/emqx_persistent_session.hrl index e50841564..094963163 100644 --- a/apps/emqx/src/emqx_persistent_session.hrl +++ b/apps/emqx/src/emqx_persistent_session.hrl @@ -23,13 +23,17 @@ -define(MSG_TAB_DISC, emqx_persistent_msg_disc). -define(MSG_TAB_RAM, emqx_persistent_msg_ram). --record(session_store, { client_id :: binary() - , expiry_interval :: non_neg_integer() - , ts :: non_neg_integer() - , session :: emqx_session:session()}). +-record(session_store, { + client_id :: binary(), + expiry_interval :: non_neg_integer(), + ts :: non_neg_integer(), + session :: emqx_session:session() +}). --record(session_msg, {key :: emqx_persistent_session:sess_msg_key(), - val = [] :: []}). +-record(session_msg, { + key :: emqx_persistent_session:sess_msg_key(), + val = [] :: [] +}). -define(db_backend_key, [persistent_session_store, db_backend]). -define(is_enabled_key, [persistent_session_store, enabled]). diff --git a/apps/emqx/src/emqx_persistent_session_dummy_backend.erl b/apps/emqx/src/emqx_persistent_session_dummy_backend.erl index a29454e02..85b0b99b1 100644 --- a/apps/emqx/src/emqx_persistent_session_dummy_backend.erl +++ b/apps/emqx/src/emqx_persistent_session_dummy_backend.erl @@ -18,20 +18,21 @@ -include("emqx_persistent_session.hrl"). --export([ first_message_id/0 - , next_message_id/1 - , delete_message/1 - , first_session_message/0 - , next_session_message/1 - , delete_session_message/1 - , put_session_store/1 - , delete_session_store/1 - , lookup_session_store/1 - , put_session_message/1 - , put_message/1 - , get_message/1 - , ro_transaction/1 - ]). +-export([ + first_message_id/0, + next_message_id/1, + delete_message/1, + first_session_message/0, + next_session_message/1, + delete_session_message/1, + put_session_store/1, + delete_session_store/1, + lookup_session_store/1, + put_session_message/1, + put_message/1, + get_message/1, + ro_transaction/1 +]). first_message_id() -> '$end_of_table'. @@ -73,4 +74,3 @@ get_message(_MsgId) -> ro_transaction(Fun) -> Fun(). - diff --git a/apps/emqx/src/emqx_persistent_session_gc.erl b/apps/emqx/src/emqx_persistent_session_gc.erl index f2e57bbb2..54ba0adcc 100644 --- a/apps/emqx/src/emqx_persistent_session_gc.erl +++ b/apps/emqx/src/emqx_persistent_session_gc.erl @@ -23,17 +23,19 @@ -export([start_link/0]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2 +]). -ifdef(TEST). --export([ session_gc_worker/2 - , message_gc_worker/0 - ]). +-export([ + session_gc_worker/2, + message_gc_worker/0 +]). -endif. -define(SERVER, ?MODULE). @@ -85,14 +87,18 @@ terminate(_Reason, _State) -> start_session_gc_timer(State) -> Interval = emqx_config:get([persistent_session_store, session_message_gc_interval]), - State#{ session_gc_timer => erlang:start_timer(Interval, self(), session_gc_timeout)}. + State#{session_gc_timer => erlang:start_timer(Interval, self(), session_gc_timeout)}. -session_gc_timeout(Ref, #{ session_gc_timer := R } = State) when R =:= Ref -> +session_gc_timeout(Ref, #{session_gc_timer := R} = State) when R =:= Ref -> %% Prevent overlapping processes. GCPid = maps:get(session_gc_pid, State, undefined), case GCPid =/= undefined andalso erlang:is_process_alive(GCPid) of - true -> start_session_gc_timer(State); - false -> start_session_gc_timer(State#{ session_gc_pid => proc_lib:spawn_link(fun session_gc_worker/0)}) + true -> + start_session_gc_timer(State); + false -> + start_session_gc_timer(State#{ + session_gc_pid => proc_lib:spawn_link(fun session_gc_worker/0) + }) end; session_gc_timeout(_Ref, State) -> State. @@ -105,13 +111,13 @@ session_gc_worker(delete, Key) -> session_gc_worker(marker, Key) -> TS = emqx_persistent_session:session_message_info(timestamp, Key), case TS + ?MARKER_GRACE_PERIOD < erlang:system_time(microsecond) of - true -> emqx_persistent_session:delete_session_message(Key); + true -> emqx_persistent_session:delete_session_message(Key); false -> ok end; session_gc_worker(abandoned, Key) -> TS = emqx_persistent_session:session_message_info(timestamp, Key), case TS + ?ABANDONED_GRACE_PERIOD < erlang:system_time(microsecond) of - true -> emqx_persistent_session:delete_session_message(Key); + true -> emqx_persistent_session:delete_session_message(Key); false -> ok end. @@ -124,14 +130,18 @@ session_gc_worker(abandoned, Key) -> %% We sacrifice space for simplicity at this point. start_message_gc_timer(State) -> Interval = emqx_config:get([persistent_session_store, session_message_gc_interval]), - State#{ message_gc_timer => erlang:start_timer(Interval, self(), message_gc_timeout)}. + State#{message_gc_timer => erlang:start_timer(Interval, self(), message_gc_timeout)}. -message_gc_timeout(Ref, #{ message_gc_timer := R } = State) when R =:= Ref -> +message_gc_timeout(Ref, #{message_gc_timer := R} = State) when R =:= Ref -> %% Prevent overlapping processes. GCPid = maps:get(message_gc_pid, State, undefined), case GCPid =/= undefined andalso erlang:is_process_alive(GCPid) of - true -> start_message_gc_timer(State); - false -> start_message_gc_timer(State#{ message_gc_pid => proc_lib:spawn_link(fun message_gc_worker/0)}) + true -> + start_message_gc_timer(State); + false -> + start_message_gc_timer(State#{ + message_gc_pid => proc_lib:spawn_link(fun message_gc_worker/0) + }) end; message_gc_timeout(_Ref, State) -> State. diff --git a/apps/emqx/src/emqx_persistent_session_mnesia_disc_backend.erl b/apps/emqx/src/emqx_persistent_session_mnesia_disc_backend.erl index 3273f9a40..3a4dd5b56 100644 --- a/apps/emqx/src/emqx_persistent_session_mnesia_disc_backend.erl +++ b/apps/emqx/src/emqx_persistent_session_mnesia_disc_backend.erl @@ -19,48 +19,60 @@ -include("emqx.hrl"). -include("emqx_persistent_session.hrl"). --export([ create_tables/0 - , first_message_id/0 - , next_message_id/1 - , delete_message/1 - , first_session_message/0 - , next_session_message/1 - , delete_session_message/1 - , put_session_store/1 - , delete_session_store/1 - , lookup_session_store/1 - , put_session_message/1 - , put_message/1 - , get_message/1 - , ro_transaction/1 - ]). +-export([ + create_tables/0, + first_message_id/0, + next_message_id/1, + delete_message/1, + first_session_message/0, + next_session_message/1, + delete_session_message/1, + put_session_store/1, + delete_session_store/1, + lookup_session_store/1, + put_session_message/1, + put_message/1, + get_message/1, + ro_transaction/1 +]). create_tables() -> ok = mria:create_table(?SESSION_STORE_DISC, [ - {type, set}, - {rlog_shard, ?PERSISTENT_SESSION_SHARD}, - {storage, disc_copies}, - {record_name, session_store}, - {attributes, record_info(fields, session_store)}, - {storage_properties, [{ets, [{read_concurrency, true}]}]}]), + {type, set}, + {rlog_shard, ?PERSISTENT_SESSION_SHARD}, + {storage, disc_copies}, + {record_name, session_store}, + {attributes, record_info(fields, session_store)}, + {storage_properties, [{ets, [{read_concurrency, true}]}]} + ]), ok = mria:create_table(?SESS_MSG_TAB_DISC, [ - {type, ordered_set}, - {rlog_shard, ?PERSISTENT_SESSION_SHARD}, - {storage, disc_copies}, - {record_name, session_msg}, - {attributes, record_info(fields, session_msg)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]), + {type, ordered_set}, + {rlog_shard, ?PERSISTENT_SESSION_SHARD}, + {storage, disc_copies}, + {record_name, session_msg}, + {attributes, record_info(fields, session_msg)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]), ok = mria:create_table(?MSG_TAB_DISC, [ - {type, ordered_set}, - {rlog_shard, ?PERSISTENT_SESSION_SHARD}, - {storage, disc_copies}, - {record_name, message}, - {attributes, record_info(fields, message)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]). + {type, ordered_set}, + {rlog_shard, ?PERSISTENT_SESSION_SHARD}, + {storage, disc_copies}, + {record_name, message}, + {attributes, record_info(fields, message)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]). first_session_message() -> mnesia:dirty_first(?SESS_MSG_TAB_DISC). @@ -107,4 +119,3 @@ get_message(MsgId) -> ro_transaction(Fun) -> {atomic, Res} = mria:ro_transaction(?PERSISTENT_SESSION_SHARD, Fun), Res. - diff --git a/apps/emqx/src/emqx_persistent_session_mnesia_ram_backend.erl b/apps/emqx/src/emqx_persistent_session_mnesia_ram_backend.erl index 7bcb1ae05..f6d956079 100644 --- a/apps/emqx/src/emqx_persistent_session_mnesia_ram_backend.erl +++ b/apps/emqx/src/emqx_persistent_session_mnesia_ram_backend.erl @@ -19,48 +19,60 @@ -include("emqx.hrl"). -include("emqx_persistent_session.hrl"). --export([ create_tables/0 - , first_message_id/0 - , next_message_id/1 - , delete_message/1 - , first_session_message/0 - , next_session_message/1 - , delete_session_message/1 - , put_session_store/1 - , delete_session_store/1 - , lookup_session_store/1 - , put_session_message/1 - , put_message/1 - , get_message/1 - , ro_transaction/1 - ]). +-export([ + create_tables/0, + first_message_id/0, + next_message_id/1, + delete_message/1, + first_session_message/0, + next_session_message/1, + delete_session_message/1, + put_session_store/1, + delete_session_store/1, + lookup_session_store/1, + put_session_message/1, + put_message/1, + get_message/1, + ro_transaction/1 +]). create_tables() -> ok = mria:create_table(?SESSION_STORE_RAM, [ - {type, set}, - {rlog_shard, ?PERSISTENT_SESSION_SHARD}, - {storage, ram_copies}, - {record_name, session_store}, - {attributes, record_info(fields, session_store)}, - {storage_properties, [{ets, [{read_concurrency, true}]}]}]), + {type, set}, + {rlog_shard, ?PERSISTENT_SESSION_SHARD}, + {storage, ram_copies}, + {record_name, session_store}, + {attributes, record_info(fields, session_store)}, + {storage_properties, [{ets, [{read_concurrency, true}]}]} + ]), ok = mria:create_table(?SESS_MSG_TAB_RAM, [ - {type, ordered_set}, - {rlog_shard, ?PERSISTENT_SESSION_SHARD}, - {storage, ram_copies}, - {record_name, session_msg}, - {attributes, record_info(fields, session_msg)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]), + {type, ordered_set}, + {rlog_shard, ?PERSISTENT_SESSION_SHARD}, + {storage, ram_copies}, + {record_name, session_msg}, + {attributes, record_info(fields, session_msg)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]), ok = mria:create_table(?MSG_TAB_RAM, [ - {type, ordered_set}, - {rlog_shard, ?PERSISTENT_SESSION_SHARD}, - {storage, ram_copies}, - {record_name, message}, - {attributes, record_info(fields, message)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]). + {type, ordered_set}, + {rlog_shard, ?PERSISTENT_SESSION_SHARD}, + {storage, ram_copies}, + {record_name, message}, + {attributes, record_info(fields, message)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]). first_session_message() -> mnesia:dirty_first(?SESS_MSG_TAB_RAM). @@ -107,4 +119,3 @@ get_message(MsgId) -> ro_transaction(Fun) -> {atomic, Res} = mria:ro_transaction(?PERSISTENT_SESSION_SHARD, Fun), Res. - diff --git a/apps/emqx/src/emqx_persistent_session_sup.erl b/apps/emqx/src/emqx_persistent_session_sup.erl index 1818bc486..d02640cbf 100644 --- a/apps/emqx/src/emqx_persistent_session_sup.erl +++ b/apps/emqx/src/emqx_persistent_session_sup.erl @@ -30,31 +30,40 @@ init([]) -> SessionTab = emqx_session_router:create_init_tab(), %% Resume worker sup - ResumeSup = #{id => router_worker_sup, - start => {emqx_session_router_worker_sup, start_link, [SessionTab]}, - restart => permanent, - shutdown => 2000, - type => supervisor, - modules => [emqx_session_router_worker_sup]}, + ResumeSup = #{ + id => router_worker_sup, + start => {emqx_session_router_worker_sup, start_link, [SessionTab]}, + restart => permanent, + shutdown => 2000, + type => supervisor, + modules => [emqx_session_router_worker_sup] + }, - SessionRouterPool = emqx_pool_sup:spec(session_router_pool, - [session_router_pool, hash, - {emqx_session_router, start_link, []}]), + SessionRouterPool = emqx_pool_sup:spec( + session_router_pool, + [ + session_router_pool, + hash, + {emqx_session_router, start_link, []} + ] + ), GCWorker = child_spec(emqx_persistent_session_gc, worker), - Spec = #{ strategy => one_for_all - , intensity => 0 - , period => 1 - }, + Spec = #{ + strategy => one_for_all, + intensity => 0, + period => 1 + }, {ok, {Spec, [ResumeSup, SessionRouterPool, GCWorker]}}. child_spec(Mod, worker) -> - #{id => Mod, - start => {Mod, start_link, []}, - restart => permanent, - shutdown => 15000, - type => worker, - modules => [Mod] - }. + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => permanent, + shutdown => 15000, + type => worker, + modules => [Mod] + }. diff --git a/apps/emqx/src/emqx_pmon.erl b/apps/emqx/src/emqx_pmon.erl index 8185fdfda..99395d923 100644 --- a/apps/emqx/src/emqx_pmon.erl +++ b/apps/emqx/src/emqx_pmon.erl @@ -20,21 +20,23 @@ -export([new/0]). --export([ monitor/2 - , monitor/3 - , demonitor/2 - ]). +-export([ + monitor/2, + monitor/3, + demonitor/2 +]). --export([ find/2 - , erase/2 - , erase_all/2 - ]). +-export([ + find/2, + erase/2, + erase_all/2 +]). -export([count/1]). -export_type([pmon/0]). --opaque(pmon() :: {?MODULE, map()}). +-opaque pmon() :: {?MODULE, map()}. -define(PMON(Map), {?MODULE, Map}). @@ -42,55 +44,61 @@ %% APIs %%-------------------------------------------------------------------- --spec(new() -> pmon()). +-spec new() -> pmon(). new() -> ?PMON(maps:new()). --spec(monitor(pid(), pmon()) -> pmon()). +-spec monitor(pid(), pmon()) -> pmon(). monitor(Pid, PMon) -> ?MODULE:monitor(Pid, undefined, PMon). --spec(monitor(pid(), term(), pmon()) -> pmon()). +-spec monitor(pid(), term(), pmon()) -> pmon(). monitor(Pid, Val, PMon = ?PMON(Map)) -> case maps:is_key(Pid, Map) of - true -> PMon; + true -> + PMon; false -> Ref = erlang:monitor(process, Pid), ?PMON(maps:put(Pid, {Ref, Val}, Map)) end. --spec(demonitor(pid(), pmon()) -> pmon()). +-spec demonitor(pid(), pmon()) -> pmon(). demonitor(Pid, PMon = ?PMON(Map)) -> case maps:find(Pid, Map) of {ok, {Ref, _Val}} -> %% flush _ = erlang:demonitor(Ref, [flush]), ?PMON(maps:remove(Pid, Map)); - error -> PMon + error -> + PMon end. --spec(find(pid(), pmon()) -> error | {ok, term()}). +-spec find(pid(), pmon()) -> error | {ok, term()}. find(Pid, ?PMON(Map)) -> case maps:find(Pid, Map) of {ok, {_Ref, Val}} -> {ok, Val}; - error -> error + error -> + error end. --spec(erase(pid(), pmon()) -> pmon()). +-spec erase(pid(), pmon()) -> pmon(). erase(Pid, ?PMON(Map)) -> ?PMON(maps:remove(Pid, Map)). --spec(erase_all([pid()], pmon()) -> {[{pid(), term()}], pmon()}). +-spec erase_all([pid()], pmon()) -> {[{pid(), term()}], pmon()}. erase_all(Pids, PMon0) -> lists:foldl( - fun(Pid, {Acc, PMon}) -> - case find(Pid, PMon) of - {ok, Val} -> - {[{Pid, Val}|Acc], erase(Pid, PMon)}; - error -> {Acc, PMon} - end - end, {[], PMon0}, Pids). + fun(Pid, {Acc, PMon}) -> + case find(Pid, PMon) of + {ok, Val} -> + {[{Pid, Val} | Acc], erase(Pid, PMon)}; + error -> + {Acc, PMon} + end + end, + {[], PMon0}, + Pids + ). --spec(count(pmon()) -> non_neg_integer()). +-spec count(pmon()) -> non_neg_integer(). count(?PMON(Map)) -> maps:size(Map). - diff --git a/apps/emqx/src/emqx_pool.erl b/apps/emqx/src/emqx_pool.erl index c4f52a93e..71459d928 100644 --- a/apps/emqx/src/emqx_pool.erl +++ b/apps/emqx/src/emqx_pool.erl @@ -21,49 +21,54 @@ -include("logger.hrl"). -include("types.hrl"). - %% APIs -export([start_link/2]). --export([ submit/1 - , submit/2 - , async_submit/1 - , async_submit/2 - ]). +-export([ + submit/1, + submit/2, + async_submit/1, + async_submit/2 +]). -ifdef(TEST). -export([worker/0, flush_async_tasks/0]). -endif. %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -define(POOL, ?MODULE). --type(task() :: fun() | mfa() | {fun(), Args :: list(any())}). +-type task() :: fun() | mfa() | {fun(), Args :: list(any())}. %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- %% @doc Start pool. --spec(start_link(atom(), pos_integer()) -> startlink_ret()). +-spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> - gen_server:start_link({local, emqx_misc:proc_name(?MODULE, Id)}, - ?MODULE, [Pool, Id], [{hibernate_after, 1000}]). + gen_server:start_link( + {local, emqx_misc:proc_name(?MODULE, Id)}, + ?MODULE, + [Pool, Id], + [{hibernate_after, 1000}] + ). %% @doc Submit work to the pool. --spec(submit(task()) -> any()). +-spec submit(task()) -> any(). submit(Task) -> call({submit, Task}). --spec(submit(fun(), list(any())) -> any()). +-spec submit(fun(), list(any())) -> any(). submit(Fun, Args) -> call({submit, {Fun, Args}}). @@ -72,11 +77,11 @@ call(Req) -> gen_server:call(worker(), Req, infinity). %% @doc Submit work to the pool asynchronously. --spec(async_submit(task()) -> ok). +-spec async_submit(task()) -> ok. async_submit(Task) -> cast({async_submit, Task}). --spec(async_submit(fun(), list(any())) -> ok). +-spec async_submit(fun(), list(any())) -> ok. async_submit(Fun, Args) -> cast({async_submit, {Fun, Args}}). @@ -98,22 +103,23 @@ init([Pool, Id]) -> handle_call({submit, Task}, _From, State) -> {reply, catch run(Task), State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. handle_cast({async_submit, Task}, State) -> - try run(Task) - catch Error:Reason:Stacktrace -> - ?SLOG(error, #{msg => "async_submit_error", - exception => Error, - reason => Reason, - stacktrace => Stacktrace - }) + try + run(Task) + catch + Error:Reason:Stacktrace -> + ?SLOG(error, #{ + msg => "async_submit_error", + exception => Error, + reason => Reason, + stacktrace => Stacktrace + }) end, {noreply, State}; - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. @@ -149,5 +155,12 @@ flush_async_tasks() -> Self = self(), L = lists:seq(1, 997), lists:foreach(fun(I) -> emqx_pool:async_submit(fun() -> Self ! {done, Ref, I} end, []) end, L), - lists:foreach(fun(I) -> receive {done, Ref, I} -> ok end end, L). + lists:foreach( + fun(I) -> + receive + {done, Ref, I} -> ok + end + end, + L + ). -endif. diff --git a/apps/emqx/src/emqx_pool_sup.erl b/apps/emqx/src/emqx_pool_sup.erl index 06c58568b..150e49983 100644 --- a/apps/emqx/src/emqx_pool_sup.erl +++ b/apps/emqx/src/emqx_pool_sup.erl @@ -22,64 +22,73 @@ -export([spec/1, spec/2]). --export([ start_link/0 - , start_link/3 - , start_link/4 - ]). +-export([ + start_link/0, + start_link/3, + start_link/4 +]). -export([init/1]). -define(POOL, emqx_pool). --spec(spec(list()) -> supervisor:child_spec()). +-spec spec(list()) -> supervisor:child_spec(). spec(Args) -> spec(pool_sup, Args). --spec(spec(any(), list()) -> supervisor:child_spec()). +-spec spec(any(), list()) -> supervisor:child_spec(). spec(ChildId, Args) -> - #{id => ChildId, - start => {?MODULE, start_link, Args}, - restart => transient, - shutdown => infinity, - type => supervisor, - modules => [?MODULE]}. + #{ + id => ChildId, + start => {?MODULE, start_link, Args}, + restart => transient, + shutdown => infinity, + type => supervisor, + modules => [?MODULE] + }. %% @doc Start the default pool supervisor. start_link() -> start_link(?POOL, random, {?POOL, start_link, []}). --spec(start_link(atom() | tuple(), atom(), mfargs()) - -> {ok, pid()} | {error, term()}). +-spec start_link(atom() | tuple(), atom(), mfargs()) -> + {ok, pid()} | {error, term()}. start_link(Pool, Type, MFA) -> start_link(Pool, Type, emqx_vm:schedulers(), MFA). --spec(start_link(atom() | tuple(), atom(), pos_integer(), mfargs()) - -> {ok, pid()} | {error, term()}). +-spec start_link(atom() | tuple(), atom(), pos_integer(), mfargs()) -> + {ok, pid()} | {error, term()}. start_link(Pool, Type, Size, MFA) -> supervisor:start_link(?MODULE, [Pool, Type, Size, MFA]). init([Pool, Type, Size, {M, F, Args}]) -> ok = ensure_pool(Pool, Type, [{size, Size}]), - {ok, {{one_for_one, 10, 3600}, [ - begin - ensure_pool_worker(Pool, {Pool, I}, I), - #{id => {M, I}, - start => {M, F, [Pool, I | Args]}, - restart => transient, - shutdown => 5000, - type => worker, - modules => [M]} - end || I <- lists:seq(1, Size)]}}. + {ok, + {{one_for_one, 10, 3600}, [ + begin + ensure_pool_worker(Pool, {Pool, I}, I), + #{ + id => {M, I}, + start => {M, F, [Pool, I | Args]}, + restart => transient, + shutdown => 5000, + type => worker, + modules => [M] + } + end + || I <- lists:seq(1, Size) + ]}}. ensure_pool(Pool, Type, Opts) -> - try gproc_pool:new(Pool, Type, Opts) + try + gproc_pool:new(Pool, Type, Opts) catch error:exists -> ok end. ensure_pool_worker(Pool, Name, Slot) -> - try gproc_pool:add_worker(Pool, Name, Slot) + try + gproc_pool:add_worker(Pool, Name, Slot) catch error:exists -> ok end. - diff --git a/apps/emqx/src/emqx_pqueue.erl b/apps/emqx/src/emqx_pqueue.erl index cd23ee8f8..1b3b2a463 100644 --- a/apps/emqx/src/emqx_pqueue.erl +++ b/apps/emqx/src/emqx_pqueue.erl @@ -39,63 +39,68 @@ -module(emqx_pqueue). --export([ new/0 - , is_queue/1 - , is_empty/1 - , len/1 - , plen/2 - , to_list/1 - , from_list/1 - , in/2 - , in/3 - , out/1 - , out/2 - , out_p/1 - , join/2 - , filter/2 - , fold/3 - , highest/1 - , shift/1 - ]). +-export([ + new/0, + is_queue/1, + is_empty/1, + len/1, + plen/2, + to_list/1, + from_list/1, + in/2, + in/3, + out/1, + out/2, + out_p/1, + join/2, + filter/2, + fold/3, + highest/1, + shift/1 +]). -export_type([q/0]). %%---------------------------------------------------------------------------- --type(priority() :: integer() | 'infinity'). --type(squeue() :: {queue, [any()], [any()], non_neg_integer()}). --type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}). --type(q() :: pqueue()). +-type priority() :: integer() | 'infinity'. +-type squeue() :: {queue, [any()], [any()], non_neg_integer()}. +-type pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}. +-type q() :: pqueue(). %%---------------------------------------------------------------------------- --spec(new() -> pqueue()). +-spec new() -> pqueue(). new() -> {queue, [], [], 0}. --spec(is_queue(any()) -> boolean()). +-spec is_queue(any()) -> boolean(). is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) -> true; is_queue({pqueue, Queues}) when is_list(Queues) -> - lists:all(fun ({infinity, Q}) -> is_queue(Q); - ({P, Q}) -> is_integer(P) andalso is_queue(Q) - end, Queues); + lists:all( + fun + ({infinity, Q}) -> is_queue(Q); + ({P, Q}) -> is_integer(P) andalso is_queue(Q) + end, + Queues + ); is_queue(_) -> false. --spec(is_empty(pqueue()) -> boolean()). +-spec is_empty(pqueue()) -> boolean(). is_empty({queue, [], [], 0}) -> true; is_empty(_) -> false. --spec(len(pqueue()) -> non_neg_integer()). +-spec len(pqueue()) -> non_neg_integer(). len({queue, _R, _F, L}) -> L; len({pqueue, Queues}) -> lists:sum([len(Q) || {_, Q} <- Queues]). --spec(plen(priority(), pqueue()) -> non_neg_integer()). +-spec plen(priority(), pqueue()) -> non_neg_integer(). plen(0, {queue, _R, _F, L}) -> L; plen(_, {queue, _R, _F, _}) -> @@ -103,84 +108,95 @@ plen(_, {queue, _R, _F, _}) -> plen(P, {pqueue, Queues}) -> case lists:keysearch(maybe_negate_priority(P), 1, Queues) of {value, {_, Q}} -> len(Q); - false -> 0 + false -> 0 end. --spec(to_list(pqueue()) -> [{priority(), any()}]). +-spec to_list(pqueue()) -> [{priority(), any()}]. to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) -> [{0, V} || V <- Out ++ lists:reverse(In, [])]; to_list({pqueue, Queues}) -> - [{maybe_negate_priority(P), V} || {P, Q} <- Queues, - {0, V} <- to_list(Q)]. + [ + {maybe_negate_priority(P), V} + || {P, Q} <- Queues, + {0, V} <- to_list(Q) + ]. --spec(from_list([{priority(), any()}]) -> pqueue()). +-spec from_list([{priority(), any()}]) -> pqueue(). from_list(L) -> - lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L). + lists:foldl(fun({P, E}, Q) -> in(E, P, Q) end, new(), L). --spec(in(any(), pqueue()) -> pqueue()). +-spec in(any(), pqueue()) -> pqueue(). in(Item, Q) -> in(Item, 0, Q). --spec(in(any(), priority(), pqueue()) -> pqueue()). +-spec in(any(), priority(), pqueue()) -> pqueue(). in(X, 0, {queue, [_] = In, [], 1}) -> {queue, [X], In, 2}; in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) -> - {queue, [X|In], Out, Len + 1}; + {queue, [X | In], Out, Len + 1}; in(X, Priority, _Q = {queue, [], [], 0}) -> in(X, Priority, {pqueue, []}); in(X, Priority, Q = {queue, _, _, _}) -> in(X, Priority, {pqueue, [{0, Q}]}); in(X, Priority, {pqueue, Queues}) -> P = maybe_negate_priority(Priority), - {pqueue, case lists:keysearch(P, 1, Queues) of - {value, {_, Q}} -> - lists:keyreplace(P, 1, Queues, {P, in(X, Q)}); - false when P == infinity -> - [{P, {queue, [X], [], 1}} | Queues]; - false -> - case Queues of - [{infinity, InfQueue} | Queues1] -> - [{infinity, InfQueue} | - lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])]; - _ -> - lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues]) - end - end}. + {pqueue, + case lists:keysearch(P, 1, Queues) of + {value, {_, Q}} -> + lists:keyreplace(P, 1, Queues, {P, in(X, Q)}); + false when P == infinity -> + [{P, {queue, [X], [], 1}} | Queues]; + false -> + case Queues of + [{infinity, InfQueue} | Queues1] -> + [ + {infinity, InfQueue} + | lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1]) + ]; + _ -> + lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues]) + end + end}. --spec(out(pqueue()) -> {empty | {value, any()}, pqueue()}). +-spec out(pqueue()) -> {empty | {value, any()}, pqueue()}. out({queue, [], [], 0} = Q) -> {empty, Q}; out({queue, [V], [], 1}) -> {{value, V}, {queue, [], [], 0}}; -out({queue, [Y|In], [], Len}) -> - [V|Out] = lists:reverse(In, []), +out({queue, [Y | In], [], Len}) -> + [V | Out] = lists:reverse(In, []), {{value, V}, {queue, [Y], Out, Len - 1}}; out({queue, In, [V], Len}) when is_list(In) -> - {{value,V}, r2f(In, Len - 1)}; -out({queue, In,[V|Out], Len}) when is_list(In) -> + {{value, V}, r2f(In, Len - 1)}; +out({queue, In, [V | Out], Len}) when is_list(In) -> {{value, V}, {queue, In, Out, Len - 1}}; out({pqueue, [{P, Q} | Queues]}) -> {R, Q1} = out(Q), - NewQ = case is_empty(Q1) of - true -> case Queues of - [] -> {queue, [], [], 0}; - [{0, OnlyQ}] -> OnlyQ; - [_|_] -> {pqueue, Queues} - end; - false -> {pqueue, [{P, Q1} | Queues]} - end, + NewQ = + case is_empty(Q1) of + true -> + case Queues of + [] -> {queue, [], [], 0}; + [{0, OnlyQ}] -> OnlyQ; + [_ | _] -> {pqueue, Queues} + end; + false -> + {pqueue, [{P, Q1} | Queues]} + end, {R, NewQ}. --spec(shift(pqueue()) -> pqueue()). +-spec shift(pqueue()) -> pqueue(). shift(Q = {queue, _, _, _}) -> Q; shift({pqueue, []}) -> - {pqueue, []}; %% Shouldn't happen? -shift({pqueue, [Hd|Rest]}) -> - {pqueue, Rest ++ [Hd]}. %% Let's hope there are not many priorities. + %% Shouldn't happen? + {pqueue, []}; +shift({pqueue, [Hd | Rest]}) -> + %% Let's hope there are not many priorities. + {pqueue, Rest ++ [Hd]}. --spec(out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}). -out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0); +-spec out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}. +out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0); out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)). out(0, {queue, _, _, _} = Q) -> @@ -192,25 +208,28 @@ out(Priority, {pqueue, Queues}) -> case lists:keysearch(P, 1, Queues) of {value, {_, Q}} -> {R, Q1} = out(Q), - Queues1 = case is_empty(Q1) of - true -> lists:keydelete(P, 1, Queues); - false -> lists:keyreplace(P, 1, Queues, {P, Q1}) - end, - {R, case Queues1 of - [] -> {queue, [], [], 0}; + Queues1 = + case is_empty(Q1) of + true -> lists:keydelete(P, 1, Queues); + false -> lists:keyreplace(P, 1, Queues, {P, Q1}) + end, + {R, + case Queues1 of + [] -> {queue, [], [], 0}; [{0, OnlyQ}] -> OnlyQ; - [_|_] -> {pqueue, Queues1} + [_ | _] -> {pqueue, Queues1} end}; false -> {empty, {pqueue, Queues}} end. -add_p(R, P) -> case R of - {empty, Q} -> {empty, Q}; - {{value, V}, Q} -> {{value, V, P}, Q} - end. +add_p(R, P) -> + case R of + {empty, Q} -> {empty, Q}; + {{value, V}, Q} -> {{value, V, P}, Q} + end. --spec(join(pqueue(), pqueue()) -> pqueue()). +-spec join(pqueue(), pqueue()) -> pqueue(). join(A, {queue, [], [], 0}) -> A; join({queue, [], [], 0}, B) -> @@ -219,21 +238,23 @@ join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) -> {queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen}; join(A = {queue, _, _, _}, {pqueue, BPQ}) -> {Pre, Post} = - lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ), - Post1 = case Post of - [] -> [ {0, A} ]; - [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ]; - _ -> [ {0, A} | Post ] - end, + lists:splitwith(fun({P, _}) -> P < 0 orelse P == infinity end, BPQ), + Post1 = + case Post of + [] -> [{0, A}]; + [{0, ZeroQueue} | Rest] -> [{0, join(A, ZeroQueue)} | Rest]; + _ -> [{0, A} | Post] + end, {pqueue, Pre ++ Post1}; join({pqueue, APQ}, B = {queue, _, _, _}) -> {Pre, Post} = - lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ), - Post1 = case Post of - [] -> [ {0, B} ]; - [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ]; - _ -> [ {0, B} | Post ] - end, + lists:splitwith(fun({P, _}) -> P < 0 orelse P == infinity end, APQ), + Post1 = + case Post of + [] -> [{0, B}]; + [{0, ZeroQueue} | Rest] -> [{0, join(ZeroQueue, B)} | Rest]; + _ -> [{0, B} | Post] + end, {pqueue, Pre ++ Post1}; join({pqueue, APQ}, {pqueue, BPQ}) -> {pqueue, merge(APQ, BPQ, [])}. @@ -242,36 +263,42 @@ merge([], BPQ, Acc) -> lists:reverse(Acc, BPQ); merge(APQ, [], Acc) -> lists:reverse(Acc, APQ); -merge([{P, A}|As], [{P, B}|Bs], Acc) -> - merge(As, Bs, [ {P, join(A, B)} | Acc ]); -merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity -> - merge(As, Bs, [ {PA, A} | Acc ]); -merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) -> - merge(As, Bs, [ {PB, B} | Acc ]). +merge([{P, A} | As], [{P, B} | Bs], Acc) -> + merge(As, Bs, [{P, join(A, B)} | Acc]); +merge([{PA, A} | As], Bs = [{PB, _} | _], Acc) when PA < PB orelse PA == infinity -> + merge(As, Bs, [{PA, A} | Acc]); +merge(As = [{_, _} | _], [{PB, B} | Bs], Acc) -> + merge(As, Bs, [{PB, B} | Acc]). --spec(filter(fun ((any()) -> boolean()), pqueue()) -> pqueue()). -filter(Pred, Q) -> fold(fun(V, P, Acc) -> - case Pred(V) of - true -> in(V, P, Acc); - false -> Acc - end - end, new(), Q). +-spec filter(fun((any()) -> boolean()), pqueue()) -> pqueue(). +filter(Pred, Q) -> + fold( + fun(V, P, Acc) -> + case Pred(V) of + true -> in(V, P, Acc); + false -> Acc + end + end, + new(), + Q + ). --spec(fold(fun ((any(), priority(), A) -> A), A, pqueue()) -> A). -fold(Fun, Init, Q) -> case out_p(Q) of - {empty, _Q} -> Init; - {{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1) - end. +-spec fold(fun((any(), priority(), A) -> A), A, pqueue()) -> A. +fold(Fun, Init, Q) -> + case out_p(Q) of + {empty, _Q} -> Init; + {{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1) + end. --spec(highest(pqueue()) -> priority() | 'empty'). -highest({queue, [], [], 0}) -> empty; -highest({queue, _, _, _}) -> 0; +-spec highest(pqueue()) -> priority() | 'empty'. +highest({queue, [], [], 0}) -> empty; +highest({queue, _, _, _}) -> 0; highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P). r2f([], 0) -> {queue, [], [], 0}; r2f([_] = R, 1) -> {queue, [], R, 1}; -r2f([X,Y], 2) -> {queue, [X], [Y], 2}; -r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}. +r2f([X, Y], 2) -> {queue, [X], [Y], 2}; +r2f([X, Y | R], L) -> {queue, [X, Y], lists:reverse(R, []), L}. maybe_negate_priority(infinity) -> infinity; -maybe_negate_priority(P) -> -P. +maybe_negate_priority(P) -> -P. diff --git a/apps/emqx/src/emqx_quic_connection.erl b/apps/emqx/src/emqx_quic_connection.erl index af5052bdf..f5c281242 100644 --- a/apps/emqx/src/emqx_quic_connection.erl +++ b/apps/emqx/src/emqx_quic_connection.erl @@ -17,15 +17,15 @@ -module(emqx_quic_connection). %% Callbacks --export([ init/1 - , new_conn/2 - , connected/2 - , shutdown/2 - ]). +-export([ + init/1, + new_conn/2, + connected/2, + shutdown/2 +]). -type cb_state() :: map() | proplists:proplist(). - -spec init(cb_state()) -> cb_state(). init(ConnOpts) when is_list(ConnOpts) -> init(maps:from_list(ConnOpts)); diff --git a/apps/emqx/src/emqx_quic_stream.erl b/apps/emqx/src/emqx_quic_stream.erl index 7dfb4b05c..e98ea0137 100644 --- a/apps/emqx/src/emqx_quic_stream.erl +++ b/apps/emqx/src/emqx_quic_stream.erl @@ -18,18 +18,19 @@ -module(emqx_quic_stream). %% emqx transport Callbacks --export([ type/1 - , wait/1 - , getstat/2 - , fast_close/1 - , ensure_ok_or_exit/2 - , async_send/3 - , setopts/2 - , getopts/2 - , peername/1 - , sockname/1 - , peercert/1 - ]). +-export([ + type/1, + wait/1, + getstat/2, + fast_close/1, + ensure_ok_or_exit/2, + async_send/3, + setopts/2, + getopts/2, + peername/1, + sockname/1, + peercert/1 +]). wait({ConnOwner, Conn}) -> {ok, Conn} = quicer:async_accept_stream(Conn, []), @@ -62,28 +63,34 @@ getstat(Socket, Stats) -> end. setopts(Socket, Opts) -> - lists:foreach(fun({Opt, V}) when is_atom(Opt) -> - quicer:setopt(Socket, Opt, V); - (Opt) when is_atom(Opt) -> - quicer:setopt(Socket, Opt, true) - end, Opts), + lists:foreach( + fun + ({Opt, V}) when is_atom(Opt) -> + quicer:setopt(Socket, Opt, V); + (Opt) when is_atom(Opt) -> + quicer:setopt(Socket, Opt, true) + end, + Opts + ), ok. getopts(_Socket, _Opts) -> %% @todo - {ok, [{high_watermark, 0}, - {high_msgq_watermark, 0}, - {sndbuf, 0}, - {recbuf, 0}, - {buffer,80000}]}. + {ok, [ + {high_watermark, 0}, + {high_msgq_watermark, 0}, + {sndbuf, 0}, + {recbuf, 0}, + {buffer, 80000} + ]}. fast_close(Stream) -> %% Stream might be closed already. _ = quicer:async_close_stream(Stream), ok. --spec(ensure_ok_or_exit(atom(), list(term())) -> term()). -ensure_ok_or_exit(Fun, Args = [Sock|_]) when is_atom(Fun), is_list(Args) -> +-spec ensure_ok_or_exit(atom(), list(term())) -> term(). +ensure_ok_or_exit(Fun, Args = [Sock | _]) when is_atom(Fun), is_list(Args) -> case erlang:apply(?MODULE, Fun, Args) of {error, Reason} when Reason =:= enotconn; Reason =:= closed -> fast_close(Sock), @@ -91,7 +98,8 @@ ensure_ok_or_exit(Fun, Args = [Sock|_]) when is_atom(Fun), is_list(Args) -> {error, Reason} -> fast_close(Sock), exit({shutdown, Reason}); - Result -> Result + Result -> + Result end. async_send(Stream, Data, Options) when is_list(Data) -> @@ -99,6 +107,5 @@ async_send(Stream, Data, Options) when is_list(Data) -> async_send(Stream, Data, _Options) when is_binary(Data) -> case quicer:send(Stream, Data) of {ok, _Len} -> ok; - Other -> - Other + Other -> Other end. diff --git a/apps/emqx/src/emqx_reason_codes.erl b/apps/emqx/src/emqx_reason_codes.erl index eaf7a7e89..e7e27f565 100644 --- a/apps/emqx/src/emqx_reason_codes.erl +++ b/apps/emqx/src/emqx_reason_codes.erl @@ -19,27 +19,36 @@ -include("emqx_mqtt.hrl"). --export([ name/1 - , name/2 - , text/1 - , text/2 - ]). +-export([ + name/1, + name/2, + text/1, + text/2 +]). --export([ frame_error/1 - , connack_error/1 - ]). +-export([ + frame_error/1, + connack_error/1 +]). -export([compat/2]). name(I, Ver) when Ver >= ?MQTT_PROTO_V5 -> name(I); -name(0, _Ver) -> connection_accepted; -name(1, _Ver) -> unacceptable_protocol_version; -name(2, _Ver) -> client_identifier_not_valid; -name(3, _Ver) -> server_unavaliable; -name(4, _Ver) -> malformed_username_or_password; -name(5, _Ver) -> unauthorized_client; -name(_, _Ver) -> unknown_error. +name(0, _Ver) -> + connection_accepted; +name(1, _Ver) -> + unacceptable_protocol_version; +name(2, _Ver) -> + client_identifier_not_valid; +name(3, _Ver) -> + server_unavaliable; +name(4, _Ver) -> + malformed_username_or_password; +name(5, _Ver) -> + unauthorized_client; +name(_, _Ver) -> + unknown_error. name(16#00) -> success; name(16#01) -> granted_qos1; @@ -88,13 +97,20 @@ name(_Code) -> unknown_error. text(I, Ver) when Ver >= ?MQTT_PROTO_V5 -> text(I); -text(0, _Ver) -> <<"Connection accepted">>; -text(1, _Ver) -> <<"unacceptable_protocol_version">>; -text(2, _Ver) -> <<"client_identifier_not_valid">>; -text(3, _Ver) -> <<"server_unavaliable">>; -text(4, _Ver) -> <<"malformed_username_or_password">>; -text(5, _Ver) -> <<"unauthorized_client">>; -text(_, _Ver) -> <<"unknown_error">>. +text(0, _Ver) -> + <<"Connection accepted">>; +text(1, _Ver) -> + <<"unacceptable_protocol_version">>; +text(2, _Ver) -> + <<"client_identifier_not_valid">>; +text(3, _Ver) -> + <<"server_unavaliable">>; +text(4, _Ver) -> + <<"malformed_username_or_password">>; +text(5, _Ver) -> + <<"unauthorized_client">>; +text(_, _Ver) -> + <<"unknown_error">>. text(16#00) -> <<"Success">>; text(16#01) -> <<"Granted QoS 1">>; @@ -159,10 +175,8 @@ compat(connack, 16#97) -> ?CONNACK_SERVER; compat(connack, 16#9C) -> ?CONNACK_SERVER; compat(connack, 16#9D) -> ?CONNACK_SERVER; compat(connack, 16#9F) -> ?CONNACK_SERVER; - compat(suback, Code) when Code =< ?QOS_2 -> Code; -compat(suback, Code) when Code >= 16#80 -> 16#80; - +compat(suback, Code) when Code >= 16#80 -> 16#80; compat(unsuback, _Code) -> undefined; compat(_Other, _Code) -> undefined. @@ -177,4 +191,3 @@ connack_error(server_busy) -> ?RC_SERVER_BUSY; connack_error(banned) -> ?RC_BANNED; connack_error(bad_authentication_method) -> ?RC_BAD_AUTHENTICATION_METHOD; connack_error(_) -> ?RC_UNSPECIFIED_ERROR. - diff --git a/apps/emqx/src/emqx_release.erl b/apps/emqx/src/emqx_release.erl index 1c9ada376..6bb46afd8 100644 --- a/apps/emqx/src/emqx_release.erl +++ b/apps/emqx/src/emqx_release.erl @@ -16,24 +16,25 @@ -module(emqx_release). --export([ edition/0 - , description/0 - , version/0 - ]). +-export([ + edition/0, + description/0, + version/0 +]). -include("emqx_release.hrl"). --define(EMQX_DESCS, - #{ee => "EMQX Enterprise", - ce => "EMQX", - edge => "EMQX Edge" - }). +-define(EMQX_DESCS, #{ + ee => "EMQX Enterprise", + ce => "EMQX", + edge => "EMQX Edge" +}). --define(EMQX_REL_VSNS, - #{ee => ?EMQX_RELEASE_EE, - ce => ?EMQX_RELEASE_CE, - edge => ?EMQX_RELEASE_CE - }). +-define(EMQX_REL_VSNS, #{ + ee => ?EMQX_RELEASE_EE, + ce => ?EMQX_RELEASE_CE, + edge => ?EMQX_RELEASE_CE +}). %% @doc Return EMQX description. description() -> @@ -52,17 +53,21 @@ edition() -> ce. %% @doc Return the release version. version() -> case lists:keyfind(emqx_vsn, 1, ?MODULE:module_info(compile)) of - false -> %% For TEST build or dependency build. + %% For TEST build or dependency build. + false -> build_vsn(); - {_, Vsn} -> %% For emqx release build + %% For emqx release build + {_, Vsn} -> VsnStr = build_vsn(), case string:str(Vsn, VsnStr) of - 1 -> ok; + 1 -> + ok; _ -> - erlang:error(#{ reason => version_mismatch - , source => VsnStr - , built_for => Vsn - }) + erlang:error(#{ + reason => version_mismatch, + source => VsnStr, + built_for => Vsn + }) end, Vsn end. diff --git a/apps/emqx/src/emqx_router.erl b/apps/emqx/src/emqx_router.erl index 45440d198..a17590a71 100644 --- a/apps/emqx/src/emqx_router.erl +++ b/apps/emqx/src/emqx_router.erl @@ -23,7 +23,6 @@ -include("types.hrl"). -include_lib("ekka/include/ekka.hrl"). - %% Mnesia bootstrap -export([mnesia/1]). @@ -32,39 +31,43 @@ -export([start_link/2]). %% Route APIs --export([ add_route/1 - , add_route/2 - , do_add_route/1 - , do_add_route/2 - ]). +-export([ + add_route/1, + add_route/2, + do_add_route/1, + do_add_route/2 +]). --export([ delete_route/1 - , delete_route/2 - , do_delete_route/1 - , do_delete_route/2 - ]). +-export([ + delete_route/1, + delete_route/2, + do_delete_route/1, + do_delete_route/2 +]). --export([ match_routes/1 - , lookup_routes/1 - , has_routes/1 - ]). +-export([ + match_routes/1, + lookup_routes/1, + has_routes/1 +]). -export([print_routes/1]). -export([topics/0]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). --type(group() :: binary()). +-type group() :: binary(). --type(dest() :: node() | {group(), node()}). +-type dest() :: node() | {group(), node()}. -define(ROUTE_TAB, emqx_route). @@ -75,48 +78,58 @@ mnesia(boot) -> mria_config:set_dirty_shard(?ROUTE_SHARD, true), ok = mria:create_table(?ROUTE_TAB, [ - {type, bag}, - {rlog_shard, ?ROUTE_SHARD}, - {storage, ram_copies}, - {record_name, route}, - {attributes, record_info(fields, route)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]). + {type, bag}, + {rlog_shard, ?ROUTE_SHARD}, + {storage, ram_copies}, + {record_name, route}, + {attributes, record_info(fields, route)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]). %%-------------------------------------------------------------------- %% Start a router %%-------------------------------------------------------------------- --spec(start_link(atom(), pos_integer()) -> startlink_ret()). +-spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> - gen_server:start_link({local, emqx_misc:proc_name(?MODULE, Id)}, - ?MODULE, [Pool, Id], [{hibernate_after, 1000}]). + gen_server:start_link( + {local, emqx_misc:proc_name(?MODULE, Id)}, + ?MODULE, + [Pool, Id], + [{hibernate_after, 1000}] + ). %%-------------------------------------------------------------------- %% Route APIs %%-------------------------------------------------------------------- --spec(add_route(emqx_types:topic()) -> ok | {error, term()}). +-spec add_route(emqx_types:topic()) -> ok | {error, term()}. add_route(Topic) when is_binary(Topic) -> add_route(Topic, node()). --spec(add_route(emqx_types:topic(), dest()) -> ok | {error, term()}). +-spec add_route(emqx_types:topic(), dest()) -> ok | {error, term()}. add_route(Topic, Dest) when is_binary(Topic) -> call(pick(Topic), {add_route, Topic, Dest}). --spec(do_add_route(emqx_types:topic()) -> ok | {error, term()}). +-spec do_add_route(emqx_types:topic()) -> ok | {error, term()}. do_add_route(Topic) when is_binary(Topic) -> do_add_route(Topic, node()). --spec(do_add_route(emqx_types:topic(), dest()) -> ok | {error, term()}). +-spec do_add_route(emqx_types:topic(), dest()) -> ok | {error, term()}. do_add_route(Topic, Dest) when is_binary(Topic) -> Route = #route{topic = Topic, dest = Dest}, case lists:member(Route, lookup_routes(Topic)) of - true -> ok; + true -> + ok; false -> ok = emqx_router_helper:monitor(Dest), case emqx_topic:wildcard(Topic) of - true -> + true -> Fun = fun emqx_router_utils:insert_trie_route/2, emqx_router_utils:maybe_trans(Fun, [?ROUTE_TAB, Route], ?ROUTE_SHARD); false -> @@ -125,12 +138,11 @@ do_add_route(Topic, Dest) when is_binary(Topic) -> end. %% @doc Match routes --spec(match_routes(emqx_types:topic()) -> [emqx_types:route()]). +-spec match_routes(emqx_types:topic()) -> [emqx_types:route()]. match_routes(Topic) when is_binary(Topic) -> case match_trie(Topic) of [] -> lookup_routes(Topic); - Matched -> - lists:append([lookup_routes(To) || To <- [Topic | Matched]]) + Matched -> lists:append([lookup_routes(To) || To <- [Topic | Matched]]) end. %% Optimize: routing table will be replicated to all router nodes. @@ -140,47 +152,50 @@ match_trie(Topic) -> false -> emqx_trie:match(Topic) end. --spec(lookup_routes(emqx_types:topic()) -> [emqx_types:route()]). +-spec lookup_routes(emqx_types:topic()) -> [emqx_types:route()]. lookup_routes(Topic) -> ets:lookup(?ROUTE_TAB, Topic). --spec(has_routes(emqx_types:topic()) -> boolean()). +-spec has_routes(emqx_types:topic()) -> boolean(). has_routes(Topic) when is_binary(Topic) -> ets:member(?ROUTE_TAB, Topic). --spec(delete_route(emqx_types:topic()) -> ok | {error, term()}). +-spec delete_route(emqx_types:topic()) -> ok | {error, term()}. delete_route(Topic) when is_binary(Topic) -> delete_route(Topic, node()). --spec(delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}). +-spec delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}. delete_route(Topic, Dest) when is_binary(Topic) -> call(pick(Topic), {delete_route, Topic, Dest}). --spec(do_delete_route(emqx_types:topic()) -> ok | {error, term()}). +-spec do_delete_route(emqx_types:topic()) -> ok | {error, term()}. do_delete_route(Topic) when is_binary(Topic) -> do_delete_route(Topic, node()). --spec(do_delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}). +-spec do_delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}. do_delete_route(Topic, Dest) -> Route = #route{topic = Topic, dest = Dest}, case emqx_topic:wildcard(Topic) of - true -> + true -> Fun = fun emqx_router_utils:delete_trie_route/2, emqx_router_utils:maybe_trans(Fun, [?ROUTE_TAB, Route], ?ROUTE_SHARD); false -> emqx_router_utils:delete_direct_route(?ROUTE_TAB, Route) end. --spec(topics() -> list(emqx_types:topic())). +-spec topics() -> list(emqx_types:topic()). topics() -> mnesia:dirty_all_keys(?ROUTE_TAB). %% @doc Print routes to a topic --spec(print_routes(emqx_types:topic()) -> ok). +-spec print_routes(emqx_types:topic()) -> ok. print_routes(Topic) -> - lists:foreach(fun(#route{topic = To, dest = Dest}) -> - io:format("~ts -> ~ts~n", [To, Dest]) - end, match_routes(Topic)). + lists:foreach( + fun(#route{topic = To, dest = Dest}) -> + io:format("~ts -> ~ts~n", [To, Dest]) + end, + match_routes(Topic) + ). call(Router, Msg) -> gen_server:call(Router, Msg, infinity). @@ -199,11 +214,9 @@ init([Pool, Id]) -> handle_call({add_route, Topic, Dest}, _From, State) -> Ok = do_add_route(Topic, Dest), {reply, Ok, State}; - handle_call({delete_route, Topic, Dest}, _From, State) -> Ok = do_delete_route(Topic, Dest), {reply, Ok, State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. diff --git a/apps/emqx/src/emqx_router_helper.erl b/apps/emqx/src/emqx_router_helper.erl index 3c00ef3cc..fe7f5548c 100644 --- a/apps/emqx/src/emqx_router_helper.erl +++ b/apps/emqx/src/emqx_router_helper.erl @@ -23,28 +23,29 @@ -include("types.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). - %% Mnesia bootstrap -export([mnesia/1]). -boot_mnesia({mnesia, [boot]}). %% API --export([ start_link/0 - , monitor/1 - ]). +-export([ + start_link/0, + monitor/1 +]). %% Internal export -export([stats_fun/0]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -record(routing_node, {name, const = unused}). @@ -60,30 +61,33 @@ mnesia(boot) -> ok = mria:create_table(?ROUTING_NODE, [ - {type, set}, - {rlog_shard, ?ROUTE_SHARD}, - {storage, ram_copies}, - {record_name, routing_node}, - {attributes, record_info(fields, routing_node)}, - {storage_properties, [{ets, [{read_concurrency, true}]}]}]). + {type, set}, + {rlog_shard, ?ROUTE_SHARD}, + {storage, ram_copies}, + {record_name, routing_node}, + {attributes, record_info(fields, routing_node)}, + {storage_properties, [{ets, [{read_concurrency, true}]}]} + ]). %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- %% @doc Starts the router helper --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Monitor routing node --spec(monitor(node() | {binary(), node()}) -> ok). +-spec monitor(node() | {binary(), node()}) -> ok. monitor({_Group, Node}) -> monitor(Node); monitor(Node) when is_atom(Node) -> - case ekka:is_member(Node) - orelse ets:member(?ROUTING_NODE, Node) of - true -> ok; + case + ekka:is_member(Node) orelse + ets:member(?ROUTING_NODE, Node) + of + true -> ok; false -> mria:dirty_write(?ROUTING_NODE, #routing_node{name = Node}) end. @@ -97,13 +101,18 @@ init([]) -> _ = mria:wait_for_tables([?ROUTING_NODE]), {ok, _} = mnesia:subscribe({table, ?ROUTING_NODE, simple}), Nodes = lists:foldl( - fun(Node, Acc) -> - case ekka:is_member(Node) of - true -> Acc; - false -> true = erlang:monitor_node(Node, true), - [Node | Acc] - end - end, [], mnesia:dirty_all_keys(?ROUTING_NODE)), + fun(Node, Acc) -> + case ekka:is_member(Node) of + true -> + Acc; + false -> + true = erlang:monitor_node(Node, true), + [Node | Acc] + end + end, + [], + mnesia:dirty_all_keys(?ROUTING_NODE) + ), ok = emqx_stats:update_interval(route_stats, fun ?MODULE:stats_fun/0), {ok, #{nodes => Nodes}, hibernate}. @@ -115,41 +124,39 @@ handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. -handle_info({mnesia_table_event, {write, {?ROUTING_NODE, Node, _}, _}}, - State = #{nodes := Nodes}) -> +handle_info( + {mnesia_table_event, {write, {?ROUTING_NODE, Node, _}, _}}, + State = #{nodes := Nodes} +) -> case ekka:is_member(Node) orelse lists:member(Node, Nodes) of - true -> {noreply, State}; + true -> + {noreply, State}; false -> true = erlang:monitor_node(Node, true), {noreply, State#{nodes := [Node | Nodes]}} end; - handle_info({mnesia_table_event, {delete, {?ROUTING_NODE, _Node}, _}}, State) -> %% ignore {noreply, State}; - handle_info({mnesia_table_event, Event}, State) -> - ?SLOG(error,#{msg => "unexpected_mnesia_table_event", event => Event}), + ?SLOG(error, #{msg => "unexpected_mnesia_table_event", event => Event}), {noreply, State}; - handle_info({nodedown, Node}, State = #{nodes := Nodes}) -> - global:trans({?LOCK, self()}, - fun() -> - mria:transaction(?ROUTE_SHARD, fun cleanup_routes/1, [Node]) - end), + global:trans( + {?LOCK, self()}, + fun() -> + mria:transaction(?ROUTE_SHARD, fun cleanup_routes/1, [Node]) + end + ), ok = mria:dirty_delete(?ROUTING_NODE, Node), ?tp(emqx_router_helper_cleanup_done, #{node => Node}), {noreply, State#{nodes := lists:delete(Node, Nodes)}, hibernate}; - handle_info({membership, {mnesia, down, Node}}, State) -> handle_info({nodedown, Node}, State); - handle_info({membership, {node, down, Node}}, State) -> handle_info({nodedown, Node}, State); - handle_info({membership, _Event}, State) -> {noreply, State}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -168,14 +175,19 @@ code_change(_OldVsn, State, _Extra) -> stats_fun() -> case ets:info(?ROUTE, size) of - undefined -> ok; + undefined -> + ok; Size -> emqx_stats:setstat('routes.count', 'routes.max', Size), emqx_stats:setstat('topics.count', 'topics.max', Size) end. cleanup_routes(Node) -> - Patterns = [#route{_ = '_', dest = Node}, - #route{_ = '_', dest = {'_', Node}}], - [mnesia:delete_object(?ROUTE, Route, write) - || Pat <- Patterns, Route <- mnesia:match_object(?ROUTE, Pat, write)]. + Patterns = [ + #route{_ = '_', dest = Node}, + #route{_ = '_', dest = {'_', Node}} + ], + [ + mnesia:delete_object(?ROUTE, Route, write) + || Pat <- Patterns, Route <- mnesia:match_object(?ROUTE, Pat, write) + ]. diff --git a/apps/emqx/src/emqx_router_sup.erl b/apps/emqx/src/emqx_router_sup.erl index ba4d2c47b..2e0f4a3b7 100644 --- a/apps/emqx/src/emqx_router_sup.erl +++ b/apps/emqx/src/emqx_router_sup.erl @@ -27,14 +27,18 @@ start_link() -> init([]) -> %% Router helper - Helper = #{id => helper, - start => {emqx_router_helper, start_link, []}, - restart => permanent, - shutdown => 5000, - type => worker, - modules => [emqx_router_helper]}, + Helper = #{ + id => helper, + start => {emqx_router_helper, start_link, []}, + restart => permanent, + shutdown => 5000, + type => worker, + modules => [emqx_router_helper] + }, %% Router pool - RouterPool = emqx_pool_sup:spec([router_pool, hash, - {emqx_router, start_link, []}]), + RouterPool = emqx_pool_sup:spec([ + router_pool, + hash, + {emqx_router, start_link, []} + ]), {ok, {{one_for_all, 0, 1}, [Helper, RouterPool]}}. - diff --git a/apps/emqx/src/emqx_router_utils.erl b/apps/emqx/src/emqx_router_utils.erl index 5cf7e5fc1..ccda44fcc 100644 --- a/apps/emqx/src/emqx_router_utils.erl +++ b/apps/emqx/src/emqx_router_utils.erl @@ -18,14 +18,15 @@ -include("emqx.hrl"). --export([ delete_direct_route/2 - , delete_trie_route/2 - , delete_session_trie_route/2 - , insert_direct_route/2 - , insert_trie_route/2 - , insert_session_trie_route/2 - , maybe_trans/3 - ]). +-export([ + delete_direct_route/2, + delete_trie_route/2, + delete_session_trie_route/2, + insert_direct_route/2, + insert_trie_route/2, + insert_session_trie_route/2, + maybe_trans/3 +]). insert_direct_route(Tab, Route) -> mria:dirty_write(Tab, Route). @@ -33,14 +34,14 @@ insert_direct_route(Tab, Route) -> insert_trie_route(RouteTab, Route = #route{topic = Topic}) -> case mnesia:wread({RouteTab, Topic}) of [] -> emqx_trie:insert(Topic); - _ -> ok + _ -> ok end, mnesia:write(RouteTab, Route, sticky_write). insert_session_trie_route(RouteTab, Route = #route{topic = Topic}) -> case mnesia:wread({RouteTab, Topic}) of - [] -> emqx_trie:insert_session(Topic); - _ -> ok + [] -> emqx_trie:insert_session(Topic); + _ -> ok end, mnesia:write(RouteTab, Route, sticky_write). @@ -59,10 +60,10 @@ delete_trie_route(RouteTab, Route = #route{topic = Topic}, Type) -> %% Remove route and trie ok = mnesia:delete_object(RouteTab, Route, sticky_write), case Type of - normal -> emqx_trie:delete(Topic); + normal -> emqx_trie:delete(Topic); session -> emqx_trie:delete_session(Topic) end; - [_|_] -> + [_ | _] -> %% Remove route only mnesia:delete_object(RouteTab, Route, sticky_write); [] -> @@ -70,30 +71,37 @@ delete_trie_route(RouteTab, Route = #route{topic = Topic}, Type) -> end. %% @private --spec(maybe_trans(function(), list(any()), Shard :: atom()) -> ok | {error, term()}). +-spec maybe_trans(function(), list(any()), Shard :: atom()) -> ok | {error, term()}. maybe_trans(Fun, Args, Shard) -> case emqx:get_config([broker, perf, route_lock_type]) of key -> trans(Fun, Args, Shard); global -> %% Assert: - mnesia = mria_rlog:backend(), %% TODO: do something smarter than just crash + + %% TODO: do something smarter than just crash + mnesia = mria_rlog:backend(), lock_router(Shard), - try mnesia:sync_dirty(Fun, Args) + try + mnesia:sync_dirty(Fun, Args) after unlock_router(Shard) end; tab -> - trans(fun() -> - emqx_trie:lock_tables(), - apply(Fun, Args) - end, [], Shard) + trans( + fun() -> + emqx_trie:lock_tables(), + apply(Fun, Args) + end, + [], + Shard + ) end. %% The created fun only terminates with explicit exception -dialyzer({nowarn_function, [trans/3]}). --spec(trans(function(), list(any()), atom()) -> ok | {error, term()}). +-spec trans(function(), list(any()), atom()) -> ok | {error, term()}. trans(Fun, Args, Shard) -> {WPid, RefMon} = spawn_monitor( @@ -102,12 +110,14 @@ trans(Fun, Args, Shard) -> %% Future changes should keep in mind that this process %% always exit with database write result. fun() -> - Res = case mria:transaction(Shard, Fun, Args) of - {atomic, Ok} -> Ok; - {aborted, Reason} -> {error, Reason} - end, - exit({shutdown, Res}) - end), + Res = + case mria:transaction(Shard, Fun, Args) of + {atomic, Ok} -> Ok; + {aborted, Reason} -> {error, Reason} + end, + exit({shutdown, Res}) + end + ), %% Receive a 'shutdown' exit to pass result from the short-lived process. %% so the receive below can be receive-mark optimized by the compiler. %% diff --git a/apps/emqx/src/emqx_rpc.erl b/apps/emqx/src/emqx_rpc.erl index c542b460b..d5183cb1a 100644 --- a/apps/emqx/src/emqx_rpc.erl +++ b/apps/emqx/src/emqx_rpc.erl @@ -19,33 +19,37 @@ %% Note: please don't forget to add new API functions to %% `emqx_bpapi_trans:extract_mfa' --export([ call/4 - , call/5 - , cast/4 - , cast/5 - , multicall/4 - , multicall/5 +-export([ + call/4, + call/5, + cast/4, + cast/5, + multicall/4, + multicall/5, - , unwrap_erpc/1 - ]). + unwrap_erpc/1 +]). --export_type([ badrpc/0 - , call_result/0 - , cast_result/0 - , multicall_result/1 - , multicall_result/0 - , erpc/1 - , erpc_multicall/1 - ]). +-export_type([ + badrpc/0, + call_result/0, + cast_result/0, + multicall_result/1, + multicall_result/0, + erpc/1, + erpc_multicall/1 +]). --compile({inline, - [ rpc_node/1 - , rpc_nodes/1 - ]}). +-compile( + {inline, [ + rpc_node/1, + rpc_nodes/1 + ]} +). -define(DefaultClientNum, 1). --type badrpc() :: {badrpc, term()} | {badtcp, term()}. +-type badrpc() :: {badrpc, term()} | {badtcp, term()}. -type call_result() :: term() | badrpc(). @@ -55,11 +59,12 @@ -type multicall_result() :: multicall_result(term()). --type erpc(Ret) :: {ok, Ret} - | {throw, _Err} - | {exit, {exception | signal, _Reason}} - | {error, {exception, _Reason, _Stack :: list()}} - | {error, {erpc, _Reason}}. +-type erpc(Ret) :: + {ok, Ret} + | {throw, _Err} + | {exit, {exception | signal, _Reason}} + | {error, {exception, _Reason, _Stack :: list()}} + | {error, {erpc, _Reason}}. -type erpc_multicall(Ret) :: [erpc(Ret)]. @@ -100,8 +105,9 @@ rpc_nodes([], Acc) -> rpc_nodes([Node | Nodes], Acc) -> rpc_nodes(Nodes, [rpc_node(Node) | Acc]). -filter_result({Error, Reason}) - when Error =:= badrpc; Error =:= badtcp -> +filter_result({Error, Reason}) when + Error =:= badrpc; Error =:= badtcp +-> {badrpc, Reason}; filter_result(Delivery) -> Delivery. diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index c5f424946..022ab3664 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -51,31 +51,47 @@ -typerefl_from_string({cipher/0, emqx_schema, to_erl_cipher_suite}). -typerefl_from_string({comma_separated_atoms/0, emqx_schema, to_comma_separated_atoms}). --export([ validate_heap_size/1 - , parse_user_lookup_fun/1 - , validate_alarm_actions/1 - , validations/0 - ]). +-export([ + validate_heap_size/1, + parse_user_lookup_fun/1, + validate_alarm_actions/1, + validations/0 +]). --export([ qos/0]). +-export([qos/0]). % workaround: prevent being recognized as unused functions --export([to_duration/1, to_duration_s/1, to_duration_ms/1, - mk_duration/2, to_bytesize/1, to_wordsize/1, - to_percent/1, to_comma_separated_list/1, - to_bar_separated_list/1, to_ip_port/1, - to_erl_cipher_suite/1, - to_comma_separated_atoms/1 - ]). +-export([ + to_duration/1, + to_duration_s/1, + to_duration_ms/1, + mk_duration/2, + to_bytesize/1, + to_wordsize/1, + to_percent/1, + to_comma_separated_list/1, + to_bar_separated_list/1, + to_ip_port/1, + to_erl_cipher_suite/1, + to_comma_separated_atoms/1 +]). -behaviour(hocon_schema). --reflect_type([ duration/0, duration_s/0, duration_ms/0, - bytesize/0, wordsize/0, percent/0, file/0, - comma_separated_list/0, bar_separated_list/0, ip_port/0, - cipher/0, - comma_separated_atoms/0 - ]). +-reflect_type([ + duration/0, + duration_s/0, + duration_ms/0, + bytesize/0, + wordsize/0, + percent/0, + file/0, + comma_separated_list/0, + bar_separated_list/0, + ip_port/0, + cipher/0, + comma_separated_atoms/0 +]). -export([namespace/0, roots/0, roots/1, fields/1]). -export([conf_get/2, conf_get/3, keys/2, filter/1]). @@ -91,1117 +107,1415 @@ roots() -> roots(high) ++ roots(medium) ++ roots(low). roots(high) -> - [ {"listeners", - sc(ref("listeners"), - #{ desc => "MQTT listeners identified by their protocol type and assigned names" - }) - } - , {"zones", - sc(map("name", ref("zone")), - #{ desc => -"""A zone is a set of configs grouped by the zone name.
-For flexible configuration mapping, the name -can be set to a listener's zone config.
-NOTE: A built-in zone named default is auto created -and can not be deleted.""" - })} - , {"mqtt", - sc(ref("mqtt"), - #{ desc => -"""Global MQTT configuration.
-The configs here work as default values which can be overridden -in zone configs""" - })} - , {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, - authentication( -"""Default authentication configs for all MQTT listeners. -
-For per-listener overrides see authentication -in listener configs -
-
-EMQX can be configured with: -
-
    -
  • []: The default value, it allows *ALL* logins
  • -
  • one: For example {enable:true,backend:\"built_in_database\",mechanism=\"password_based\"} -
  • -
  • chain: An array of structs.
  • -
-
-When a chain is configured, the login credentials are checked against the backends -per the configured order, until an 'allow' or 'deny' decision can be made. -
-If there is no decision after a full chain exhaustion, the login is rejected. -""")} - %% NOTE: authorization schema here is only to keep emqx app prue - %% the full schema for EMQX node is injected in emqx_conf_schema. - , {"authorization", - sc(ref("authorization"), - #{})} + [ + {"listeners", + sc( + ref("listeners"), + #{desc => "MQTT listeners identified by their protocol type and assigned names"} + )}, + {"zones", + sc( + map("name", ref("zone")), + #{ + desc => + "" + "A zone is a set of configs grouped by the zone name.
\n" + "For flexible configuration mapping, the name\n" + "can be set to a listener's zone config.
\n" + "NOTE: A built-in zone named default is auto created\n" + "and can not be deleted." + "" + } + )}, + {"mqtt", + sc( + ref("mqtt"), + #{ + desc => + "" + "Global MQTT configuration.
\n" + "The configs here work as default values which can be overridden\n" + "in zone configs" + "" + } + )}, + {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, + authentication( + "" + "Default authentication configs for all MQTT listeners.\n" + "
\n" + "For per-listener overrides see authentication\n" + "in listener configs\n" + "
\n" + "
\n" + "EMQX can be configured with:\n" + "
\n" + "
    \n" + "
  • []: The default value, it allows *ALL* logins
  • \n" + "
  • one: For example {enable:true,backend:\"built_in_database\",mechanism=\"password_based\"}\n" + "
  • \n" + "
  • chain: An array of structs.
  • \n" + "
\n" + "
\n" + "When a chain is configured, the login credentials are checked against the backends\n" + "per the configured order, until an 'allow' or 'deny' decision can be made.\n" + "
\n" + "If there is no decision after a full chain exhaustion, the login is rejected.\n" + "" + )}, + %% NOTE: authorization schema here is only to keep emqx app prue + %% the full schema for EMQX node is injected in emqx_conf_schema. + {"authorization", + sc( + ref("authorization"), + #{} + )} ]; roots(medium) -> - [ {"broker", - sc(ref("broker"), - #{})} - , {"sys_topics", - sc(ref("sys_topics"), - #{})} - , {"rate_limit", - sc(ref("rate_limit"), - #{})} - , {"force_shutdown", - sc(ref("force_shutdown"), - #{})} - , {"overload_protection", - sc(ref("overload_protection"), - #{})} + [ + {"broker", + sc( + ref("broker"), + #{} + )}, + {"sys_topics", + sc( + ref("sys_topics"), + #{} + )}, + {"rate_limit", + sc( + ref("rate_limit"), + #{} + )}, + {"force_shutdown", + sc( + ref("force_shutdown"), + #{} + )}, + {"overload_protection", + sc( + ref("overload_protection"), + #{} + )} ]; roots(low) -> - [ {"force_gc", - sc(ref("force_gc"), - #{ desc => -"""Force the MQTT connection process garbage collection after -this number of messages or bytes have passed through.""" - })} - , {"conn_congestion", - sc(ref("conn_congestion"), - #{})} - , {"stats", - sc(ref("stats"), - #{})} - , {"sysmon", - sc(ref("sysmon"), - #{})} - , {"alarm", - sc(ref("alarm"), - #{})} - , {"flapping_detect", - sc(ref("flapping_detect"), - #{})} - , {"persistent_session_store", - sc(ref("persistent_session_store"), - #{})} - , {"trace", - sc(ref("trace"), - #{desc => """ -Real-time filtering logs for the ClientID or Topic or IP for debugging. -""" - })} + [ + {"force_gc", + sc( + ref("force_gc"), + #{ + desc => + "" + "Force the MQTT connection process garbage collection after\n" + "this number of messages or bytes have passed through." + "" + } + )}, + {"conn_congestion", + sc( + ref("conn_congestion"), + #{} + )}, + {"stats", + sc( + ref("stats"), + #{} + )}, + {"sysmon", + sc( + ref("sysmon"), + #{} + )}, + {"alarm", + sc( + ref("alarm"), + #{} + )}, + {"flapping_detect", + sc( + ref("flapping_detect"), + #{} + )}, + {"persistent_session_store", + sc( + ref("persistent_session_store"), + #{} + )}, + {"trace", + sc( + ref("trace"), + #{ + desc => + "" + "\n" + "Real-time filtering logs for the ClientID or Topic or IP for debugging.\n" + "" + } + )} ]. fields("persistent_session_store") -> - [ {"enabled", - sc(boolean(), - #{ default => false - , description => """ -Use the database to store information about persistent sessions. -This makes it possible to migrate a client connection to another -cluster node if a node is stopped. -""" - })}, - {"storage_type", - sc(hoconsc:union([ram, disc]), - #{ default => disc - , description => """ -Store information about persistent sessions on disc or in ram. -If ram is chosen, all information about persistent sessions remains -as long as at least one node in a cluster is alive to keep the information. -If disc is chosen, the information is persisted on disc and will survive -cluster restart, at the price of more disc usage and less throughput. -""" - })}, - {"max_retain_undelivered", - sc(duration(), - #{ default => "1h" - , description => """ -The time messages that was not delivered to a persistent session -is stored before being garbage collected if the node the previous -session was handled on restarts of is stopped. -""" - })}, - {"message_gc_interval", - sc(duration(), - #{ default => "1h" - , description => """ -The starting interval for garbage collection of undelivered messages to -a persistent session. This affects how often the \"max_retain_undelivered\" -is checked for removal. -""" - })}, - {"session_message_gc_interval", - sc(duration(), - #{ default => "1m" - , description => """ -The starting interval for garbage collection of transient data for -persistent session messages. This does not affect the life time length -of persistent session messages. -""" - })} + [ + {"enabled", + sc( + boolean(), + #{ + default => false, + description => + "" + "\n" + "Use the database to store information about persistent sessions.\n" + "This makes it possible to migrate a client connection to another\n" + "cluster node if a node is stopped.\n" + "" + } + )}, + {"storage_type", + sc( + hoconsc:union([ram, disc]), + #{ + default => disc, + description => + "" + "\n" + "Store information about persistent sessions on disc or in ram.\n" + "If ram is chosen, all information about persistent sessions remains\n" + "as long as at least one node in a cluster is alive to keep the information.\n" + "If disc is chosen, the information is persisted on disc and will survive\n" + "cluster restart, at the price of more disc usage and less throughput.\n" + "" + } + )}, + {"max_retain_undelivered", + sc( + duration(), + #{ + default => "1h", + description => + "" + "\n" + "The time messages that was not delivered to a persistent session\n" + "is stored before being garbage collected if the node the previous\n" + "session was handled on restarts of is stopped.\n" + "" + } + )}, + {"message_gc_interval", + sc( + duration(), + #{ + default => "1h", + description => + "" + "\n" + "The starting interval for garbage collection of undelivered messages to\n" + "a persistent session. This affects how often the \"max_retain_undelivered\"\n" + "is checked for removal.\n" + "" + } + )}, + {"session_message_gc_interval", + sc( + duration(), + #{ + default => "1m", + description => + "" + "\n" + "The starting interval for garbage collection of transient data for\n" + "persistent session messages. This does not affect the life time length\n" + "of persistent session messages.\n" + "" + } + )} ]; - fields("stats") -> - [ {"enable", - sc(boolean(), - #{ default => true - })} + [ + {"enable", + sc( + boolean(), + #{default => true} + )} ]; - fields("authorization") -> - [ {"no_match", - sc(hoconsc:enum([allow, deny]), - #{ default => allow - %% TODO: make sources a reference link - , desc => """ -Default access control action if the user or client matches no ACL rules, -or if no such user or client is found by the configurable authorization -sources such as built_in_database, an HTTP API, or a query against PostgreSQL. -Find more details in 'authorization.sources' config. -""" - })} - , {"deny_action", - sc(hoconsc:enum([ignore, disconnect]), - #{ default => ignore - })} - , {"cache", - sc(ref(?MODULE, "cache"), - #{ - })} + [ + {"no_match", + sc( + hoconsc:enum([allow, deny]), + #{ + default => allow, + %% TODO: make sources a reference link + desc => + "" + "\n" + "Default access control action if the user or client matches no ACL rules,\n" + "or if no such user or client is found by the configurable authorization\n" + "sources such as built_in_database, an HTTP API, or a query against PostgreSQL.\n" + "Find more details in 'authorization.sources' config.\n" + "" + } + )}, + {"deny_action", + sc( + hoconsc:enum([ignore, disconnect]), + #{default => ignore} + )}, + {"cache", + sc( + ref(?MODULE, "cache"), + #{} + )} ]; - fields("cache") -> - [ {"enable", - sc(boolean(), - #{ default => true - }) - } - , {"max_size", - sc(range(1, 1048576), - #{ default => 32 - }) - } - , {"ttl", - sc(duration(), - #{ default => "1m" - }) - } + [ + {"enable", + sc( + boolean(), + #{default => true} + )}, + {"max_size", + sc( + range(1, 1048576), + #{default => 32} + )}, + {"ttl", + sc( + duration(), + #{default => "1m"} + )} ]; - fields("mqtt") -> - [ {"idle_timeout", - sc(hoconsc:union([infinity, duration()]), - #{ default => "15s", - desc => -"""Close TCP connections from the clients that have not sent MQTT CONNECT -message within this interval.""" - })} - , {"max_packet_size", - sc(bytesize(), - #{ default => "1MB", - desc => "Maximum MQTT packet size allowed." - })} - , {"max_clientid_len", - sc(range(23, 65535), - #{ default => 65535, - desc => "Maximum allowed length of MQTT clientId." - })} - , {"max_topic_levels", - sc(range(1, 65535), - #{ default => 65535, - desc => "Maximum topic levels allowed." - })} - , {"max_qos_allowed", - sc(qos(), - #{ default => 2, - desc => "Maximum QoS allowed." - })} - , {"max_topic_alias", - sc(range(0, 65535), - #{ default => 65535, - desc => "Maximum Topic Alias, 0 means no topic alias supported." - })} - , {"retain_available", - sc(boolean(), - #{ default => true, - desc => "Support MQTT retained messages." - })} - , {"wildcard_subscription", - sc(boolean(), - #{ default => true, - desc => "Support MQTT Wildcard Subscriptions." - })} - , {"shared_subscription", - sc(boolean(), - #{ default => true, - desc => "Support MQTT Shared Subscriptions." - })} - , {"ignore_loop_deliver", - sc(boolean(), - #{ default => false, - desc => "Ignore loop delivery of messages for MQTT v3.1.1." - })} - , {"strict_mode", - sc(boolean(), - #{default => false, - desc => "Parse MQTT messages in strict mode. " - "When set to true, invalid utf8 strings in for example " - "client ID, topic name, etc. will cause the client to be " - "disconnected" - }) - } - , {"response_information", - sc(string(), - #{default => "", - desc => -"""Specify the response information returned to the client -This feature is disabled if is set to \"\".""" - }) - } - , {"server_keepalive", - sc(hoconsc:union([integer(), disabled]), - #{ default => disabled, - desc => -"""'Server Keep Alive' of MQTT 5.0. -If the server returns a 'Server Keep Alive' in the CONNACK packet, -the client MUST use that value instead of the value it sent as the 'Keep Alive'.""" - }) - } - , {"keepalive_backoff", - sc(float(), - #{default => 0.75, - desc => -"""The backoff for MQTT keepalive timeout. The broker will close the connection -after idling for 'Keepalive * backoff * 2'.""" - }) - } - , {"max_subscriptions", - sc(hoconsc:union([range(1, inf), infinity]), - #{ default => infinity, - desc => "Maximum number of subscriptions allowed." - }) - } - , {"upgrade_qos", - sc(boolean(), - #{ default => false, - desc => "Force upgrade of QoS level according to subscription." - }) - } - , {"max_inflight", - sc(range(1, 65535), - #{ default => 32, - desc => "Maximum size of the Inflight Window storing QoS1/2" - " messages delivered but un-acked." - }) - } - , {"retry_interval", - sc(duration(), - #{ default => "30s", - desc => "Retry interval for QoS1/2 message delivering." - }) - } - , {"max_awaiting_rel", - sc(hoconsc:union([integer(), infinity]), - #{ default => 100, - desc => "Maximum QoS2 packets (Client -> Broker) awaiting PUBREL." - }) - } - , {"await_rel_timeout", - sc(duration(), - #{ default => "300s", - desc => "The QoS2 messages (Client -> Broker) will be dropped" - " if awaiting PUBREL timeout." - }) - } - , {"session_expiry_interval", - sc(duration(), - #{ default => "2h", - desc => "Default session expiry interval for MQTT V3.1.1 connections." - }) - } - , {"max_mqueue_len", - sc(hoconsc:union([non_neg_integer(), infinity]), - #{ default => 1000, - desc => -"""Maximum queue length. Enqueued messages when persistent client disconnected, -or inflight window is full.""" - }) - } - , {"mqueue_priorities", - sc(hoconsc:union([map(), disabled]), - #{ default => disabled, - desc => -"""Topic priorities.
-There's no priority table by default, hence all messages are treated equal.
-Priority number [1-255]
- -**NOTE**: Comma and equal signs are not allowed for priority topic names.
-**NOTE**: Messages for topics not in the priority table are treated as -either highest or lowest priority depending on the configured value for -mqtt.mqueue_default_priority. -

-**Examples**: -To configure \"topic/1\" > \"topic/2\":
-mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}""" - }) - } - , {"mqueue_default_priority", - sc(hoconsc:enum([highest, lowest]), - #{ default => lowest, - desc => "Default to the highest priority for topics not matching priority table." - }) - } - , {"mqueue_store_qos0", - sc(boolean(), - #{ default => true, - desc => "Support enqueue QoS0 messages." - }) - } - , {"use_username_as_clientid", - sc(boolean(), - #{ default => false, - desc => "Replace client ID with the username." - }) - } - , {"peer_cert_as_username", - sc(hoconsc:enum([disabled, cn, dn, crt, pem, md5]), - #{ default => disabled, - desc => -"""Use the CN, DN or CRT field from the client certificate as a username. -Only works for the TLS connection.""" - })} - , {"peer_cert_as_clientid", - sc(hoconsc:enum([disabled, cn, dn, crt, pem, md5]), - #{ default => disabled, - desc => -"""Use the CN, DN or CRT field from the client certificate as a clientid. -Only works for the TLS connection.""" - })} + [ + {"idle_timeout", + sc( + hoconsc:union([infinity, duration()]), + #{ + default => "15s", + desc => + "" + "Close TCP connections from the clients that have not sent MQTT CONNECT\n" + "message within this interval." + "" + } + )}, + {"max_packet_size", + sc( + bytesize(), + #{ + default => "1MB", + desc => "Maximum MQTT packet size allowed." + } + )}, + {"max_clientid_len", + sc( + range(23, 65535), + #{ + default => 65535, + desc => "Maximum allowed length of MQTT clientId." + } + )}, + {"max_topic_levels", + sc( + range(1, 65535), + #{ + default => 65535, + desc => "Maximum topic levels allowed." + } + )}, + {"max_qos_allowed", + sc( + qos(), + #{ + default => 2, + desc => "Maximum QoS allowed." + } + )}, + {"max_topic_alias", + sc( + range(0, 65535), + #{ + default => 65535, + desc => "Maximum Topic Alias, 0 means no topic alias supported." + } + )}, + {"retain_available", + sc( + boolean(), + #{ + default => true, + desc => "Support MQTT retained messages." + } + )}, + {"wildcard_subscription", + sc( + boolean(), + #{ + default => true, + desc => "Support MQTT Wildcard Subscriptions." + } + )}, + {"shared_subscription", + sc( + boolean(), + #{ + default => true, + desc => "Support MQTT Shared Subscriptions." + } + )}, + {"ignore_loop_deliver", + sc( + boolean(), + #{ + default => false, + desc => "Ignore loop delivery of messages for MQTT v3.1.1." + } + )}, + {"strict_mode", + sc( + boolean(), + #{ + default => false, + desc => + "Parse MQTT messages in strict mode. " + "When set to true, invalid utf8 strings in for example " + "client ID, topic name, etc. will cause the client to be " + "disconnected" + } + )}, + {"response_information", + sc( + string(), + #{ + default => "", + desc => + "" + "Specify the response information returned to the client\n" + "This feature is disabled if is set to \"\"." + "" + } + )}, + {"server_keepalive", + sc( + hoconsc:union([integer(), disabled]), + #{ + default => disabled, + desc => + "" + "'Server Keep Alive' of MQTT 5.0.\n" + "If the server returns a 'Server Keep Alive' in the CONNACK packet,\n" + "the client MUST use that value instead of the value it sent as the 'Keep Alive'." + "" + } + )}, + {"keepalive_backoff", + sc( + float(), + #{ + default => 0.75, + desc => + "" + "The backoff for MQTT keepalive timeout. The broker will close the connection\n" + "after idling for 'Keepalive * backoff * 2'." + "" + } + )}, + {"max_subscriptions", + sc( + hoconsc:union([range(1, inf), infinity]), + #{ + default => infinity, + desc => "Maximum number of subscriptions allowed." + } + )}, + {"upgrade_qos", + sc( + boolean(), + #{ + default => false, + desc => "Force upgrade of QoS level according to subscription." + } + )}, + {"max_inflight", + sc( + range(1, 65535), + #{ + default => 32, + desc => + "Maximum size of the Inflight Window storing QoS1/2" + " messages delivered but un-acked." + } + )}, + {"retry_interval", + sc( + duration(), + #{ + default => "30s", + desc => "Retry interval for QoS1/2 message delivering." + } + )}, + {"max_awaiting_rel", + sc( + hoconsc:union([integer(), infinity]), + #{ + default => 100, + desc => "Maximum QoS2 packets (Client -> Broker) awaiting PUBREL." + } + )}, + {"await_rel_timeout", + sc( + duration(), + #{ + default => "300s", + desc => + "The QoS2 messages (Client -> Broker) will be dropped" + " if awaiting PUBREL timeout." + } + )}, + {"session_expiry_interval", + sc( + duration(), + #{ + default => "2h", + desc => "Default session expiry interval for MQTT V3.1.1 connections." + } + )}, + {"max_mqueue_len", + sc( + hoconsc:union([non_neg_integer(), infinity]), + #{ + default => 1000, + desc => + "" + "Maximum queue length. Enqueued messages when persistent client disconnected,\n" + "or inflight window is full." + "" + } + )}, + {"mqueue_priorities", + sc( + hoconsc:union([map(), disabled]), + #{ + default => disabled, + desc => + "" + "Topic priorities.
\n" + "There's no priority table by default, hence all messages are treated equal.
\n" + "Priority number [1-255]
\n" + "\n" + "**NOTE**: Comma and equal signs are not allowed for priority topic names.
\n" + "**NOTE**: Messages for topics not in the priority table are treated as\n" + "either highest or lowest priority depending on the configured value for\n" + "mqtt.mqueue_default_priority.\n" + "

\n" + "**Examples**:\n" + "To configure \"topic/1\" > \"topic/2\":
\n" + "mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}" + "" + } + )}, + {"mqueue_default_priority", + sc( + hoconsc:enum([highest, lowest]), + #{ + default => lowest, + desc => + "Default to the highest priority for topics not matching priority table." + } + )}, + {"mqueue_store_qos0", + sc( + boolean(), + #{ + default => true, + desc => "Support enqueue QoS0 messages." + } + )}, + {"use_username_as_clientid", + sc( + boolean(), + #{ + default => false, + desc => "Replace client ID with the username." + } + )}, + {"peer_cert_as_username", + sc( + hoconsc:enum([disabled, cn, dn, crt, pem, md5]), + #{ + default => disabled, + desc => + "" + "Use the CN, DN or CRT field from the client certificate as a username.\n" + "Only works for the TLS connection." + "" + } + )}, + {"peer_cert_as_clientid", + sc( + hoconsc:enum([disabled, cn, dn, crt, pem, md5]), + #{ + default => disabled, + desc => + "" + "Use the CN, DN or CRT field from the client certificate as a clientid.\n" + "Only works for the TLS connection." + "" + } + )} ]; - fields("zone") -> Fields = emqx_zone_schema:roots(), [{F, ref(emqx_zone_schema, F)} || F <- Fields]; - fields("rate_limit") -> - [ {"max_conn_rate", - sc(hoconsc:union([infinity, integer()]), - #{ default => 1000 - }) - } - , {"conn_messages_in", - sc(hoconsc:union([infinity, comma_separated_list()]), - #{ default => infinity - }) - } - , {"conn_bytes_in", - sc(hoconsc:union([infinity, comma_separated_list()]), - #{ default => infinity - }) - } + [ + {"max_conn_rate", + sc( + hoconsc:union([infinity, integer()]), + #{default => 1000} + )}, + {"conn_messages_in", + sc( + hoconsc:union([infinity, comma_separated_list()]), + #{default => infinity} + )}, + {"conn_bytes_in", + sc( + hoconsc:union([infinity, comma_separated_list()]), + #{default => infinity} + )} ]; - fields("flapping_detect") -> - [ {"enable", - sc(boolean(), - #{ default => false - })} - , {"max_count", - sc(integer(), - #{ default => 15 - })} - , {"window_time", - sc(duration(), - #{ default => "1m" - })} - , {"ban_time", - sc(duration(), - #{ default => "5m" - })} + [ + {"enable", + sc( + boolean(), + #{default => false} + )}, + {"max_count", + sc( + integer(), + #{default => 15} + )}, + {"window_time", + sc( + duration(), + #{default => "1m"} + )}, + {"ban_time", + sc( + duration(), + #{default => "5m"} + )} ]; - fields("force_shutdown") -> - [ {"enable", - sc(boolean(), - #{ default => true})} - , {"max_message_queue_len", - sc(range(0, inf), - #{ default => 1000 - })} - , {"max_heap_size", - sc(wordsize(), - #{ default => "32MB", - validator => fun ?MODULE:validate_heap_size/1 - })} + [ + {"enable", + sc( + boolean(), + #{default => true} + )}, + {"max_message_queue_len", + sc( + range(0, inf), + #{default => 1000} + )}, + {"max_heap_size", + sc( + wordsize(), + #{ + default => "32MB", + validator => fun ?MODULE:validate_heap_size/1 + } + )} ]; - fields("overload_protection") -> - [ {"enable", - sc(boolean(), - #{ desc => "React on system overload or not" - , default => false - })} - , {"backoff_delay", - sc(range(0, inf), - #{ desc => "Some unimportant tasks could be delayed " - "for execution, here set the delays in ms" - , default => 1 - })} - , {"backoff_gc", - sc(boolean(), - #{ desc => "Skip forceful GC if necessary" - , default => false - })} - , {"backoff_hibernation", - sc(boolean(), - #{ desc => "Skip process hibernation if necessary" - , default => true - })} - , {"backoff_new_conn", - sc(boolean(), - #{ desc => "Close new incoming connections if necessary" - , default => true - })} + [ + {"enable", + sc( + boolean(), + #{ + desc => "React on system overload or not", + default => false + } + )}, + {"backoff_delay", + sc( + range(0, inf), + #{ + desc => + "Some unimportant tasks could be delayed " + "for execution, here set the delays in ms", + default => 1 + } + )}, + {"backoff_gc", + sc( + boolean(), + #{ + desc => "Skip forceful GC if necessary", + default => false + } + )}, + {"backoff_hibernation", + sc( + boolean(), + #{ + desc => "Skip process hibernation if necessary", + default => true + } + )}, + {"backoff_new_conn", + sc( + boolean(), + #{ + desc => "Close new incoming connections if necessary", + default => true + } + )} ]; - fields("conn_congestion") -> - [ {"enable_alarm", - sc(boolean(), - #{ default => false - })} - , {"min_alarm_sustain_duration", - sc(duration(), - #{ default => "1m" - })} + [ + {"enable_alarm", + sc( + boolean(), + #{default => false} + )}, + {"min_alarm_sustain_duration", + sc( + duration(), + #{default => "1m"} + )} ]; - fields("force_gc") -> - [ {"enable", - sc(boolean(), - #{ default => true - })} - , {"count", - sc(range(0, inf), - #{ default => 16000, - desc => "GC the process after this many received messages." - })} - , {"bytes", - sc(bytesize(), - #{ default => "16MB", - desc => "GC the process after specified number of bytes have passed through." - })} + [ + {"enable", + sc( + boolean(), + #{default => true} + )}, + {"count", + sc( + range(0, inf), + #{ + default => 16000, + desc => "GC the process after this many received messages." + } + )}, + {"bytes", + sc( + bytesize(), + #{ + default => "16MB", + desc => "GC the process after specified number of bytes have passed through." + } + )} ]; - fields("listeners") -> - [ {"tcp", - sc(map(name, ref("mqtt_tcp_listener")), - #{ desc => "TCP listeners" - , required => {false, recursively} - }) - } - , {"ssl", - sc(map(name, ref("mqtt_ssl_listener")), - #{ desc => "SSL listeners" - , required => {false, recursively} - }) - } - , {"ws", - sc(map(name, ref("mqtt_ws_listener")), - #{ desc => "HTTP websocket listeners" - , required => {false, recursively} - }) - } - , {"wss", - sc(map(name, ref("mqtt_wss_listener")), - #{ desc => "HTTPS websocket listeners" - , required => {false, recursively} - }) - } - , {"quic", - sc(map(name, ref("mqtt_quic_listener")), - #{ desc => "QUIC listeners" - , required => {false, recursively} - }) - } + [ + {"tcp", + sc( + map(name, ref("mqtt_tcp_listener")), + #{ + desc => "TCP listeners", + required => {false, recursively} + } + )}, + {"ssl", + sc( + map(name, ref("mqtt_ssl_listener")), + #{ + desc => "SSL listeners", + required => {false, recursively} + } + )}, + {"ws", + sc( + map(name, ref("mqtt_ws_listener")), + #{ + desc => "HTTP websocket listeners", + required => {false, recursively} + } + )}, + {"wss", + sc( + map(name, ref("mqtt_wss_listener")), + #{ + desc => "HTTPS websocket listeners", + required => {false, recursively} + } + )}, + {"quic", + sc( + map(name, ref("mqtt_quic_listener")), + #{ + desc => "QUIC listeners", + required => {false, recursively} + } + )} ]; - fields("mqtt_tcp_listener") -> - [ {"tcp", - sc(ref("tcp_opts"), - #{ desc => "TCP listener options" - }) - } + [ + {"tcp", + sc( + ref("tcp_opts"), + #{desc => "TCP listener options"} + )} ] ++ mqtt_listener(); - fields("mqtt_ssl_listener") -> - [ {"tcp", - sc(ref("tcp_opts"), - #{}) - } - , {"ssl", - sc(ref("listener_ssl_opts"), - #{}) - } + [ + {"tcp", + sc( + ref("tcp_opts"), + #{} + )}, + {"ssl", + sc( + ref("listener_ssl_opts"), + #{} + )} ] ++ mqtt_listener(); - fields("mqtt_ws_listener") -> - [ {"tcp", - sc(ref("tcp_opts"), - #{}) - } - , {"websocket", - sc(ref("ws_opts"), - #{}) - } + [ + {"tcp", + sc( + ref("tcp_opts"), + #{} + )}, + {"websocket", + sc( + ref("ws_opts"), + #{} + )} ] ++ mqtt_listener(); - fields("mqtt_wss_listener") -> - [ {"tcp", - sc(ref("tcp_opts"), - #{}) - } - , {"ssl", - sc(ref("listener_wss_opts"), - #{}) - } - , {"websocket", - sc(ref("ws_opts"), - #{}) - } + [ + {"tcp", + sc( + ref("tcp_opts"), + #{} + )}, + {"ssl", + sc( + ref("listener_wss_opts"), + #{} + )}, + {"websocket", + sc( + ref("ws_opts"), + #{} + )} ] ++ mqtt_listener(); - fields("mqtt_quic_listener") -> - [ {"enabled", - sc(boolean(), - #{ default => true - }) - } - %% TODO: ensure cacertfile is configurable - , {"certfile", - sc(string(), - #{}) - } - , {"keyfile", - sc(string(), - #{}) - } - , {"ciphers", ciphers_schema(quic)} - , {"idle_timeout", - sc(duration(), - #{ default => "15s" - }) - } + [ + {"enabled", + sc( + boolean(), + #{default => true} + )}, + %% TODO: ensure cacertfile is configurable + {"certfile", + sc( + string(), + #{} + )}, + {"keyfile", + sc( + string(), + #{} + )}, + {"ciphers", ciphers_schema(quic)}, + {"idle_timeout", + sc( + duration(), + #{default => "15s"} + )} ] ++ base_listener(); - fields("ws_opts") -> - [ {"mqtt_path", - sc(string(), - #{ default => "/mqtt" - }) - } - , {"mqtt_piggyback", - sc(hoconsc:enum([single, multiple]), - #{ default => multiple - }) - } - , {"compress", - sc(boolean(), - #{ default => false - }) - } - , {"idle_timeout", - sc(duration(), - #{ default => "15s" - }) - } - , {"max_frame_size", - sc(hoconsc:union([infinity, integer()]), - #{ default => infinity - }) - } - , {"fail_if_no_subprotocol", - sc(boolean(), - #{ default => true - }) - } - , {"supported_subprotocols", - sc(comma_separated_list(), - #{ default => "mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5" - }) - } - , {"check_origin_enable", - sc(boolean(), - #{ default => false - }) - } - , {"allow_origin_absence", - sc(boolean(), - #{ default => true - }) - } - , {"check_origins", - sc(hoconsc:array(binary()), - #{ default => [] - }) - } - , {"proxy_address_header", - sc(string(), - #{ default => "x-forwarded-for" - }) - } - , {"proxy_port_header", - sc(string(), - #{ default => "x-forwarded-port" - }) - } - , {"deflate_opts", - sc(ref("deflate_opts"), - #{}) - } + [ + {"mqtt_path", + sc( + string(), + #{default => "/mqtt"} + )}, + {"mqtt_piggyback", + sc( + hoconsc:enum([single, multiple]), + #{default => multiple} + )}, + {"compress", + sc( + boolean(), + #{default => false} + )}, + {"idle_timeout", + sc( + duration(), + #{default => "15s"} + )}, + {"max_frame_size", + sc( + hoconsc:union([infinity, integer()]), + #{default => infinity} + )}, + {"fail_if_no_subprotocol", + sc( + boolean(), + #{default => true} + )}, + {"supported_subprotocols", + sc( + comma_separated_list(), + #{default => "mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5"} + )}, + {"check_origin_enable", + sc( + boolean(), + #{default => false} + )}, + {"allow_origin_absence", + sc( + boolean(), + #{default => true} + )}, + {"check_origins", + sc( + hoconsc:array(binary()), + #{default => []} + )}, + {"proxy_address_header", + sc( + string(), + #{default => "x-forwarded-for"} + )}, + {"proxy_port_header", + sc( + string(), + #{default => "x-forwarded-port"} + )}, + {"deflate_opts", + sc( + ref("deflate_opts"), + #{} + )} ]; - fields("tcp_opts") -> - [ {"active_n", - sc(integer(), - #{ default => 100 - }) - } - , {"backlog", - sc(integer(), - #{ default => 1024 - }) - } - , {"send_timeout", - sc(duration(), - #{ default => "15s" - }) - } - , {"send_timeout_close", - sc(boolean(), - #{ default => true - }) - } - , {"recbuf", - sc(bytesize(), - #{}) - } - , {"sndbuf", - sc(bytesize(), - #{}) - } - , {"buffer", - sc(bytesize(), - #{}) - } - , {"high_watermark", - sc(bytesize(), - #{ default => "1MB"}) - } - , {"nodelay", - sc(boolean(), - #{ default => false}) - } - , {"reuseaddr", - sc(boolean(), - #{ default => true - }) - } + [ + {"active_n", + sc( + integer(), + #{default => 100} + )}, + {"backlog", + sc( + integer(), + #{default => 1024} + )}, + {"send_timeout", + sc( + duration(), + #{default => "15s"} + )}, + {"send_timeout_close", + sc( + boolean(), + #{default => true} + )}, + {"recbuf", + sc( + bytesize(), + #{} + )}, + {"sndbuf", + sc( + bytesize(), + #{} + )}, + {"buffer", + sc( + bytesize(), + #{} + )}, + {"high_watermark", + sc( + bytesize(), + #{default => "1MB"} + )}, + {"nodelay", + sc( + boolean(), + #{default => false} + )}, + {"reuseaddr", + sc( + boolean(), + #{default => true} + )} ]; - fields("listener_ssl_opts") -> server_ssl_opts_schema( - #{ depth => 10 - , reuse_sessions => true - , versions => tls_all_available - , ciphers => tls_all_available - }, false); - + #{ + depth => 10, + reuse_sessions => true, + versions => tls_all_available, + ciphers => tls_all_available + }, + false + ); fields("listener_wss_opts") -> server_ssl_opts_schema( - #{ depth => 10 - , reuse_sessions => true - , versions => tls_all_available - , ciphers => tls_all_available - }, true); + #{ + depth => 10, + reuse_sessions => true, + versions => tls_all_available, + ciphers => tls_all_available + }, + true + ); fields(ssl_client_opts) -> client_ssl_opts_schema(#{}); - fields("deflate_opts") -> - [ {"level", - sc(hoconsc:enum([none, default, best_compression, best_speed]), - #{}) - } - , {"mem_level", - sc(range(1, 9), - #{ default => 8 - }) - } - , {"strategy", - sc(hoconsc:enum([default, filtered, huffman_only, rle]), - #{}) - } - , {"server_context_takeover", - sc(hoconsc:enum([takeover, no_takeover]), - #{}) - } - , {"client_context_takeover", - sc(hoconsc:enum([takeover, no_takeover]), - #{}) - } - , {"server_max_window_bits", - sc(range(8, 15), - #{ default => 15 - }) - } - , {"client_max_window_bits", - sc(range(8, 15), - #{ default => 15 - }) - } + [ + {"level", + sc( + hoconsc:enum([none, default, best_compression, best_speed]), + #{} + )}, + {"mem_level", + sc( + range(1, 9), + #{default => 8} + )}, + {"strategy", + sc( + hoconsc:enum([default, filtered, huffman_only, rle]), + #{} + )}, + {"server_context_takeover", + sc( + hoconsc:enum([takeover, no_takeover]), + #{} + )}, + {"client_context_takeover", + sc( + hoconsc:enum([takeover, no_takeover]), + #{} + )}, + {"server_max_window_bits", + sc( + range(8, 15), + #{default => 15} + )}, + {"client_max_window_bits", + sc( + range(8, 15), + #{default => 15} + )} ]; - fields("broker") -> - [ {"enable_session_registry", - sc(boolean(), - #{ default => true - }) - } - , {"session_locking_strategy", - sc(hoconsc:enum([local, leader, quorum, all]), - #{ default => quorum - }) - } - , {"shared_subscription_strategy", - sc(hoconsc:enum([random, round_robin, sticky, hash_topic, hash_clientid]), - #{ default => round_robin - }) - } - , {"shared_dispatch_ack_enabled", - sc(boolean(), - #{ default => false - }) - } - , {"route_batch_clean", - sc(boolean(), - #{ default => true - })} - , {"perf", - sc(ref("broker_perf"), - #{ desc => "Broker performance tuning parameters" - }) - } + [ + {"enable_session_registry", + sc( + boolean(), + #{default => true} + )}, + {"session_locking_strategy", + sc( + hoconsc:enum([local, leader, quorum, all]), + #{default => quorum} + )}, + {"shared_subscription_strategy", + sc( + hoconsc:enum([random, round_robin, sticky, hash_topic, hash_clientid]), + #{default => round_robin} + )}, + {"shared_dispatch_ack_enabled", + sc( + boolean(), + #{default => false} + )}, + {"route_batch_clean", + sc( + boolean(), + #{default => true} + )}, + {"perf", + sc( + ref("broker_perf"), + #{desc => "Broker performance tuning parameters"} + )} ]; - fields("broker_perf") -> - [ {"route_lock_type", - sc(hoconsc:enum([key, tab, global]), - #{ default => key - })} - , {"trie_compaction", - sc(boolean(), - #{ default => true - })} + [ + {"route_lock_type", + sc( + hoconsc:enum([key, tab, global]), + #{default => key} + )}, + {"trie_compaction", + sc( + boolean(), + #{default => true} + )} ]; - fields("sys_topics") -> - [ {"sys_msg_interval", - sc(hoconsc:union([disabled, duration()]), - #{ default => "1m" - }) - } - , {"sys_heartbeat_interval", - sc(hoconsc:union([disabled, duration()]), - #{ default => "30s" - }) - } - , {"sys_event_messages", - sc(ref("event_names"), - #{ desc => - """Whether to enable Client lifecycle event messages publish.
-The following options are not only for enabling MQTT client event messages -publish but also for Gateway clients. However, these kinds of clients type -are distinguished by the Topic prefix: -- For the MQTT client, its event topic format is:
- $SYS/broker//clients//
-- For the Gateway client, it is - $SYS/broker//gateway//clients//""" - }) - } + [ + {"sys_msg_interval", + sc( + hoconsc:union([disabled, duration()]), + #{default => "1m"} + )}, + {"sys_heartbeat_interval", + sc( + hoconsc:union([disabled, duration()]), + #{default => "30s"} + )}, + {"sys_event_messages", + sc( + ref("event_names"), + #{ + desc => + "" + "Whether to enable Client lifecycle event messages publish.
\n" + "The following options are not only for enabling MQTT client event messages\n" + "publish but also for Gateway clients. However, these kinds of clients type\n" + "are distinguished by the Topic prefix:\n" + "- For the MQTT client, its event topic format is:
\n" + " $SYS/broker//clients//
\n" + "- For the Gateway client, it is\n" + " $SYS/broker//gateway//clients//" + "" + } + )} ]; - fields("event_names") -> - [ {"client_connected", - sc(boolean(), - #{default => true - }) - } - , {"client_disconnected", - sc(boolean(), - #{default => true - }) - } - , {"client_subscribed", - sc(boolean(), - #{default => false - }) - } - , {"client_unsubscribed", - sc(boolean(), - #{default => false - }) - } + [ + {"client_connected", + sc( + boolean(), + #{default => true} + )}, + {"client_disconnected", + sc( + boolean(), + #{default => true} + )}, + {"client_subscribed", + sc( + boolean(), + #{default => false} + )}, + {"client_unsubscribed", + sc( + boolean(), + #{default => false} + )} ]; - fields("sysmon") -> - [ {"vm", - sc(ref("sysmon_vm"), - #{ desc => "This part of the configuration is responsible for collecting - BEAM VM events, such as long garbage collection, traffic congestion in the inter-broker - communication, etc." - }) - } - , {"os", - sc(ref("sysmon_os"), - #{ desc => "This part of the configuration is responsible for monitoring - the host OS health, such as free memory, disk space, CPU load, etc." - }) - } - , {"top", - sc(ref("sysmon_top"), - #{ desc => "This part of the configuration is responsible for monitoring - the Erlang processes in the VM. This information can be sent to an external - PostgreSQL database. This feature is inactive unless the PostgreSQL sink is configured." - }) - } + [ + {"vm", + sc( + ref("sysmon_vm"), + #{ + desc => + "This part of the configuration is responsible for collecting\n" + " BEAM VM events, such as long garbage collection, traffic congestion in the inter-broker\n" + " communication, etc." + } + )}, + {"os", + sc( + ref("sysmon_os"), + #{ + desc => + "This part of the configuration is responsible for monitoring\n" + " the host OS health, such as free memory, disk space, CPU load, etc." + } + )}, + {"top", + sc( + ref("sysmon_top"), + #{ + desc => + "This part of the configuration is responsible for monitoring\n" + " the Erlang processes in the VM. This information can be sent to an external\n" + " PostgreSQL database. This feature is inactive unless the PostgreSQL sink is configured." + } + )} ]; - fields("sysmon_vm") -> - [ {"process_check_interval", - sc(duration(), - #{ default => "30s" - }) - } - , {"process_high_watermark", - sc(percent(), - #{ default => "80%" - }) - } - , {"process_low_watermark", - sc(percent(), - #{ default => "60%" - }) - } - , {"long_gc", - sc(hoconsc:union([disabled, duration()]), - #{}) - } - , {"long_schedule", - sc(hoconsc:union([disabled, duration()]), - #{ default => "240ms" - }) - } - , {"large_heap", - sc(hoconsc:union([disabled, bytesize()]), - #{default => "32MB"}) - } - , {"busy_dist_port", - sc(boolean(), - #{ default => true - }) - } - , {"busy_port", - sc(boolean(), - #{ default => true - })} + [ + {"process_check_interval", + sc( + duration(), + #{default => "30s"} + )}, + {"process_high_watermark", + sc( + percent(), + #{default => "80%"} + )}, + {"process_low_watermark", + sc( + percent(), + #{default => "60%"} + )}, + {"long_gc", + sc( + hoconsc:union([disabled, duration()]), + #{} + )}, + {"long_schedule", + sc( + hoconsc:union([disabled, duration()]), + #{default => "240ms"} + )}, + {"large_heap", + sc( + hoconsc:union([disabled, bytesize()]), + #{default => "32MB"} + )}, + {"busy_dist_port", + sc( + boolean(), + #{default => true} + )}, + {"busy_port", + sc( + boolean(), + #{default => true} + )} ]; - fields("sysmon_os") -> - [ {"cpu_check_interval", - sc(duration(), - #{ default => "60s"}) - } - , {"cpu_high_watermark", - sc(percent(), - #{ default => "80%" - }) - } - , {"cpu_low_watermark", - sc(percent(), - #{ default => "60%" - }) - } - , {"mem_check_interval", - sc(hoconsc:union([disabled, duration()]), - #{ default => "60s" - })} - , {"sysmem_high_watermark", - sc(percent(), - #{ default => "70%" - }) - } - , {"procmem_high_watermark", - sc(percent(), - #{ default => "5%" - }) - } + [ + {"cpu_check_interval", + sc( + duration(), + #{default => "60s"} + )}, + {"cpu_high_watermark", + sc( + percent(), + #{default => "80%"} + )}, + {"cpu_low_watermark", + sc( + percent(), + #{default => "60%"} + )}, + {"mem_check_interval", + sc( + hoconsc:union([disabled, duration()]), + #{default => "60s"} + )}, + {"sysmem_high_watermark", + sc( + percent(), + #{default => "70%"} + )}, + {"procmem_high_watermark", + sc( + percent(), + #{default => "5%"} + )} ]; - fields("sysmon_top") -> - [ {"num_items", - sc(non_neg_integer(), - #{ mapping => "system_monitor.top_num_items" - , default => 10 - , desc => "The number of top processes per monitoring group" - }) - } - , {"sample_interval", - sc(emqx_schema:duration(), - #{ mapping => "system_monitor.top_sample_interval" - , default => "2s" - , desc => "Specifies how often process top should be collected" - }) - } - , {"max_procs", - sc(non_neg_integer(), - #{ mapping => "system_monitor.top_max_procs" - , default => 1_000_000 - , desc => "Stop collecting data when the number of processes -in the VM exceeds this value" - }) - } - , {"db_hostname", - sc(string(), - #{ mapping => "system_monitor.db_hostname" - , desc => "Hostname of the PostgreSQL database that collects the data points" - , default => "" - }) - } - , {"db_port", - sc(integer(), - #{ mapping => "system_monitor.db_port" - , default => 5432 - , desc => "Port of the PostgreSQL database that collects the data points" - }) - } - , {"db_username", - sc(string(), - #{ mapping => "system_monitor.db_username" - , default => "system_monitor" - , desc => "EMQX username in the PostgreSQL database" - }) - } - , {"db_password", - sc(binary(), - #{ mapping => "system_monitor.db_password" - , default => "system_monitor_password" - , desc => "EMQX user password in the PostgreSQL database" - }) - } - , {"db_name", - sc(string(), - #{ mapping => "system_monitor.db_name" - , default => "postgres" - , desc => "PostgreSQL database name" - }) - } + [ + {"num_items", + sc( + non_neg_integer(), + #{ + mapping => "system_monitor.top_num_items", + default => 10, + desc => "The number of top processes per monitoring group" + } + )}, + {"sample_interval", + sc( + emqx_schema:duration(), + #{ + mapping => "system_monitor.top_sample_interval", + default => "2s", + desc => "Specifies how often process top should be collected" + } + )}, + {"max_procs", + sc( + non_neg_integer(), + #{ + mapping => "system_monitor.top_max_procs", + default => 1_000_000, + desc => + "Stop collecting data when the number of processes\n" + "in the VM exceeds this value" + } + )}, + {"db_hostname", + sc( + string(), + #{ + mapping => "system_monitor.db_hostname", + desc => "Hostname of the PostgreSQL database that collects the data points", + default => "" + } + )}, + {"db_port", + sc( + integer(), + #{ + mapping => "system_monitor.db_port", + default => 5432, + desc => "Port of the PostgreSQL database that collects the data points" + } + )}, + {"db_username", + sc( + string(), + #{ + mapping => "system_monitor.db_username", + default => "system_monitor", + desc => "EMQX username in the PostgreSQL database" + } + )}, + {"db_password", + sc( + binary(), + #{ + mapping => "system_monitor.db_password", + default => "system_monitor_password", + desc => "EMQX user password in the PostgreSQL database" + } + )}, + {"db_name", + sc( + string(), + #{ + mapping => "system_monitor.db_name", + default => "postgres", + desc => "PostgreSQL database name" + } + )} ]; - fields("alarm") -> - [ {"actions", - sc(hoconsc:array(atom()), - #{ default => [log, publish], - validator => fun ?MODULE:validate_alarm_actions/1, - example => [log, publish], - desc => - """The actions triggered when the alarm is activated.
-Currently, the following actions are supported: log and publish. -log is to write the alarm to log (console or file). -publish is to publish the alarm as an MQTT message to the system topics: -$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and -$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate""" - }) - } - , {"size_limit", - sc(range(1, 3000), - #{ default => 1000, - example => 1000, - desc => - """The maximum total number of deactivated alarms to keep as history.
-When this limit is exceeded, the oldest deactivated alarms are deleted to cap the total number. -""" - }) - } - , {"validity_period", - sc(duration(), - #{ default => "24h", - example => "24h", - desc => -"""Retention time of deactivated alarms. Alarms are not deleted immediately -when deactivated, but after the retention time. -""" - }) - } + [ + {"actions", + sc( + hoconsc:array(atom()), + #{ + default => [log, publish], + validator => fun ?MODULE:validate_alarm_actions/1, + example => [log, publish], + desc => + "" + "The actions triggered when the alarm is activated.
\n" + "Currently, the following actions are supported: log and publish.\n" + "log is to write the alarm to log (console or file).\n" + "publish is to publish the alarm as an MQTT message to the system topics:\n" + "$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and\n" + "$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate" + "" + } + )}, + {"size_limit", + sc( + range(1, 3000), + #{ + default => 1000, + example => 1000, + desc => + "" + "The maximum total number of deactivated alarms to keep as history.
\n" + "When this limit is exceeded, the oldest deactivated alarms are deleted to cap the total number.\n" + "" + } + )}, + {"validity_period", + sc( + duration(), + #{ + default => "24h", + example => "24h", + desc => + "" + "Retention time of deactivated alarms. Alarms are not deleted immediately\n" + "when deactivated, but after the retention time.\n" + "" + } + )} ]; - fields("trace") -> - [ {"payload_encode", sc(hoconsc:enum([hex, text, hidden]), #{ - default => text, - desc => """ -Determine the format of the payload format in the trace file.
-`text`: Text-based protocol or plain text protocol. - It is recommended when payload is JSON encoded.
-`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
-`hidden`: payload is obfuscated as `******` - """ - })} + [ + {"payload_encode", + sc(hoconsc:enum([hex, text, hidden]), #{ + default => text, + desc => + "" + "\n" + "Determine the format of the payload format in the trace file.
\n" + "`text`: Text-based protocol or plain text protocol.\n" + " It is recommended when payload is JSON encoded.
\n" + "`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
\n" + "`hidden`: payload is obfuscated as `******`\n" + " " + "" + })} ]. mqtt_listener() -> base_listener() ++ - [ {"access_rules", - sc(hoconsc:array(string()), - #{}) - } - , {"proxy_protocol", - sc(boolean(), - #{ default => false - }) - } - , {"proxy_protocol_timeout", - sc(duration(), - #{}) - } - , {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, - authentication("Per-listener authentication override") - } - ]. + [ + {"access_rules", + sc( + hoconsc:array(string()), + #{} + )}, + {"proxy_protocol", + sc( + boolean(), + #{default => false} + )}, + {"proxy_protocol_timeout", + sc( + duration(), + #{} + )}, + {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, + authentication("Per-listener authentication override")} + ]. base_listener() -> - [ {"bind", - sc(hoconsc:union([ip_port(), integer()]), - #{ required => true - })} - , {"acceptors", - sc(integer(), - #{ default => 16 - })} - , {"max_connections", - sc(hoconsc:union([infinity, integer()]), - #{ default => infinity - })} - , {"mountpoint", - sc(binary(), - #{ default => <<>> - })} - , {"zone", - sc(atom(), - #{ default => 'default' - })} - , {"limiter", - sc(map("ratelimit's type", emqx_limiter_schema:bucket_name()), #{default => #{}})} + [ + {"bind", + sc( + hoconsc:union([ip_port(), integer()]), + #{required => true} + )}, + {"acceptors", + sc( + integer(), + #{default => 16} + )}, + {"max_connections", + sc( + hoconsc:union([infinity, integer()]), + #{default => infinity} + )}, + {"mountpoint", + sc( + binary(), + #{default => <<>>} + )}, + {"zone", + sc( + atom(), + #{default => 'default'} + )}, + {"limiter", + sc(map("ratelimit's type", emqx_limiter_schema:bucket_name()), #{default => #{}})} ]. %% utils --spec(conf_get(string() | [string()], hocon:config()) -> term()). +-spec conf_get(string() | [string()], hocon:config()) -> term(). conf_get(Key, Conf) -> V = hocon_maps:get(Key, Conf), case is_binary(V) of @@ -1227,252 +1541,307 @@ filter(Opts) -> %% SSL listener and client. -spec common_ssl_opts_schema(map()) -> hocon_schema:field_schema(). common_ssl_opts_schema(Defaults) -> - D = fun (Field) -> maps:get(to_atom(Field), Defaults, undefined) end, - Df = fun (Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, - [ {"enable", - sc(boolean(), - #{ default => Df("enable", false) - }) - } - , {"cacertfile", - sc(string(), - #{ default => D("cacertfile") - , required => false - , desc => -"""Trusted PEM format CA certificates bundle file.
-The certificates in this file are used to verify the TLS peer's certificates. -Append new certificates to the file if new CAs are to be trusted. -There is no need to restart EMQX to have the updated file loaded, because -the system regularly checks if file has been updated (and reload).
-NOTE: invalidating (deleting) a certificate from the file will not affect -already established connections. -""" - }) - } - , {"certfile", - sc(string(), - #{ default => D("certfile") - , required => false - , desc => -"""PEM format certificates chain file.
-The certificates in this file should be in reversed order of the certificate -issue chain. That is, the host's certificate should be placed in the beginning -of the file, followed by the immediate issuer certificate and so on. -Although the root CA certificate is optional, it should be placed at the end of -the file if it is to be added. -""" - }) - } - , {"keyfile", - sc(string(), - #{ default => D("keyfile") - , required => false - , desc => -"""PEM format private key file.
-""" - }) - } - , {"verify", - sc(hoconsc:enum([verify_peer, verify_none]), - #{ default => Df("verify", verify_none) - }) - } - , {"reuse_sessions", - sc(boolean(), - #{ default => Df("reuse_sessions", true) - }) - } - , {"depth", - sc(integer(), - #{default => Df("depth", 10) - }) - } - , {"password", - sc(string(), - #{ sensitive => true - , required => false - , desc => -"""String containing the user's password. Only used if the private -key file is password-protected.""" - }) - } - , {"versions", - sc(hoconsc:array(typerefl:atom()), - #{ default => default_tls_vsns(maps:get(versions, Defaults, tls_all_available)) - , desc => -"""All TLS/DTLS versions to be supported.
-NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-In case PSK cipher suites are intended, make sure to configured -['tlsv1.2', 'tlsv1.1'] here. -""" - , validator => fun validate_tls_versions/1 - }) - } - , {"ciphers", ciphers_schema(D("ciphers"))} - , {user_lookup_fun, - sc(typerefl:alias("string", any()), - #{ default => <<"emqx_tls_psk:lookup">> - , converter => fun ?MODULE:parse_user_lookup_fun/1 - }) - } - , {"secure_renegotiate", - sc(boolean(), - #{ default => Df("secure_renegotiate", true) - , desc => """ -SSL parameter renegotiation is a feature that allows a client and a server -to renegotiate the parameters of the SSL connection on the fly. -RFC 5746 defines a more secure way of doing this. By enabling secure renegotiation, -you drop support for the insecure renegotiation, prone to MitM attacks. -""" - }) - } + D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, + Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, + [ + {"enable", + sc( + boolean(), + #{default => Df("enable", false)} + )}, + {"cacertfile", + sc( + string(), + #{ + default => D("cacertfile"), + required => false, + desc => + "" + "Trusted PEM format CA certificates bundle file.
\n" + "The certificates in this file are used to verify the TLS peer's certificates.\n" + "Append new certificates to the file if new CAs are to be trusted.\n" + "There is no need to restart EMQX to have the updated file loaded, because\n" + "the system regularly checks if file has been updated (and reload).
\n" + "NOTE: invalidating (deleting) a certificate from the file will not affect\n" + "already established connections.\n" + "" + } + )}, + {"certfile", + sc( + string(), + #{ + default => D("certfile"), + required => false, + desc => + "" + "PEM format certificates chain file.
\n" + "The certificates in this file should be in reversed order of the certificate\n" + "issue chain. That is, the host's certificate should be placed in the beginning\n" + "of the file, followed by the immediate issuer certificate and so on.\n" + "Although the root CA certificate is optional, it should be placed at the end of\n" + "the file if it is to be added.\n" + "" + } + )}, + {"keyfile", + sc( + string(), + #{ + default => D("keyfile"), + required => false, + desc => + "" "PEM format private key file.
\n" "" + } + )}, + {"verify", + sc( + hoconsc:enum([verify_peer, verify_none]), + #{default => Df("verify", verify_none)} + )}, + {"reuse_sessions", + sc( + boolean(), + #{default => Df("reuse_sessions", true)} + )}, + {"depth", + sc( + integer(), + #{default => Df("depth", 10)} + )}, + {"password", + sc( + string(), + #{ + sensitive => true, + required => false, + desc => + "" + "String containing the user's password. Only used if the private\n" + "key file is password-protected." + "" + } + )}, + {"versions", + sc( + hoconsc:array(typerefl:atom()), + #{ + default => default_tls_vsns(maps:get(versions, Defaults, tls_all_available)), + desc => + "" + "All TLS/DTLS versions to be supported.
\n" + "NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
\n" + "In case PSK cipher suites are intended, make sure to configured\n" + "['tlsv1.2', 'tlsv1.1'] here.\n" + "", + validator => fun validate_tls_versions/1 + } + )}, + {"ciphers", ciphers_schema(D("ciphers"))}, + {user_lookup_fun, + sc( + typerefl:alias("string", any()), + #{ + default => <<"emqx_tls_psk:lookup">>, + converter => fun ?MODULE:parse_user_lookup_fun/1 + } + )}, + {"secure_renegotiate", + sc( + boolean(), + #{ + default => Df("secure_renegotiate", true), + desc => + "" + "\n" + "SSL parameter renegotiation is a feature that allows a client and a server\n" + "to renegotiate the parameters of the SSL connection on the fly.\n" + "RFC 5746 defines a more secure way of doing this. By enabling secure renegotiation,\n" + "you drop support for the insecure renegotiation, prone to MitM attacks.\n" + "" + } + )} ]. %% @doc Make schema for SSL listener options. %% When it's for ranch listener, an extra field `handshake_timeout' is added. -spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema(). server_ssl_opts_schema(Defaults, IsRanchListener) -> - D = fun (Field) -> maps:get(to_atom(Field), Defaults, undefined) end, - Df = fun (Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, + D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, + Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, common_ssl_opts_schema(Defaults) ++ - [ {"dhfile", - sc(string(), - #{ default => D("dhfile") - , required => false - , desc => -"""Path to a file containing PEM-encoded Diffie Hellman parameters -to be used by the server if a cipher suite using Diffie Hellman -key exchange is negotiated. If not specified, default parameters -are used.
-NOTE: The dhfile option is not supported by TLS 1.3.""" - }) - } - , {"fail_if_no_peer_cert", - sc(boolean(), - #{ default => Df("fail_if_no_peer_cert", false) - , desc => -""" -Used together with {verify, verify_peer} by an TLS/DTLS server. -If set to true, the server fails if the client does not have a -certificate to send, that is, sends an empty certificate. -If set to false, it fails only if the client sends an invalid -certificate (an empty certificate is considered valid). -""" - }) - } - , {"honor_cipher_order", - sc(boolean(), - #{ default => Df("honor_cipher_order", true) - }) - } - , {"client_renegotiation", - sc(boolean(), - #{ default => Df("client_renegotiation", true) - , desc => """ -In protocols that support client-initiated renegotiation, -the cost of resources of such an operation is higher for the server than the client. -This can act as a vector for denial of service attacks. -The SSL application already takes measures to counter-act such attempts, -but client-initiated renegotiation can be strictly disabled by setting this option to false. -The default value is true. Note that disabling renegotiation can result in -long-lived connections becoming unusable due to limits on -the number of messages the underlying cipher suite can encipher. -""" - }) - } - | [ {"handshake_timeout", - sc(duration(), - #{ default => Df("handshake_timeout", "15s") - , desc => "Maximum time duration allowed for the handshake to complete" - })} - || IsRanchListener] - ]. + [ + {"dhfile", + sc( + string(), + #{ + default => D("dhfile"), + required => false, + desc => + "" + "Path to a file containing PEM-encoded Diffie Hellman parameters\n" + "to be used by the server if a cipher suite using Diffie Hellman\n" + "key exchange is negotiated. If not specified, default parameters\n" + "are used.
\n" + "NOTE: The dhfile option is not supported by TLS 1.3." + "" + } + )}, + {"fail_if_no_peer_cert", + sc( + boolean(), + #{ + default => Df("fail_if_no_peer_cert", false), + desc => + "" + "\n" + "Used together with {verify, verify_peer} by an TLS/DTLS server.\n" + "If set to true, the server fails if the client does not have a\n" + "certificate to send, that is, sends an empty certificate.\n" + "If set to false, it fails only if the client sends an invalid\n" + "certificate (an empty certificate is considered valid).\n" + "" + } + )}, + {"honor_cipher_order", + sc( + boolean(), + #{default => Df("honor_cipher_order", true)} + )}, + {"client_renegotiation", + sc( + boolean(), + #{ + default => Df("client_renegotiation", true), + desc => + "" + "\n" + "In protocols that support client-initiated renegotiation,\n" + "the cost of resources of such an operation is higher for the server than the client.\n" + "This can act as a vector for denial of service attacks.\n" + "The SSL application already takes measures to counter-act such attempts,\n" + "but client-initiated renegotiation can be strictly disabled by setting this option to false.\n" + "The default value is true. Note that disabling renegotiation can result in\n" + "long-lived connections becoming unusable due to limits on\n" + "the number of messages the underlying cipher suite can encipher.\n" + "" + } + )} + | [ + {"handshake_timeout", + sc( + duration(), + #{ + default => Df("handshake_timeout", "15s"), + desc => "Maximum time duration allowed for the handshake to complete" + } + )} + || IsRanchListener + ] + ]. %% @doc Make schema for SSL client. -spec client_ssl_opts_schema(map()) -> hocon_schema:field_schema(). client_ssl_opts_schema(Defaults) -> common_ssl_opts_schema(Defaults) ++ - [ { "server_name_indication", - sc(hoconsc:union([disable, string()]), - #{ required => false - , desc => -"""Specify the host name to be used in TLS Server Name Indication extension.
-For instance, when connecting to \"server.example.net\", the genuine server -which accepts the connection and performs TLS handshake may differ from the -host the TLS client initially connects to, e.g. when connecting to an IP address -or when the host has multiple resolvable DNS records
-If not specified, it will default to the host name string which is used -to establish the connection, unless it is IP addressed used.
-The host name is then also used in the host name verification of the peer -certificate.
The special value 'disable' prevents the Server Name -Indication extension from being sent and disables the hostname -verification check.""" - })} - ]. - + [ + {"server_name_indication", + sc( + hoconsc:union([disable, string()]), + #{ + required => false, + desc => + "" + "Specify the host name to be used in TLS Server Name Indication extension.
\n" + "For instance, when connecting to \"server.example.net\", the genuine server\n" + "which accepts the connection and performs TLS handshake may differ from the\n" + "host the TLS client initially connects to, e.g. when connecting to an IP address\n" + "or when the host has multiple resolvable DNS records
\n" + "If not specified, it will default to the host name string which is used\n" + "to establish the connection, unless it is IP addressed used.
\n" + "The host name is then also used in the host name verification of the peer\n" + "certificate.
The special value 'disable' prevents the Server Name\n" + "Indication extension from being sent and disables the hostname\n" + "verification check." + "" + } + )} + ]. default_tls_vsns(dtls_all_available) -> proplists:get_value(available_dtls, ssl:versions()); default_tls_vsns(tls_all_available) -> emqx_tls_lib:default_versions(). --spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) - -> hocon_schema:field_schema(). +-spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) -> + hocon_schema:field_schema(). ciphers_schema(Default) -> - sc(hoconsc:array(string()), - #{ default => default_ciphers(Default) - , converter => fun(Ciphers) when is_binary(Ciphers) -> - binary:split(Ciphers, <<",">>, [global]); - (Ciphers) when is_list(Ciphers) -> - Ciphers - end - , validator => case Default =:= quic of - true -> undefined; %% quic has openssl statically linked - false -> fun validate_ciphers/1 - end - , desc => -"""This config holds TLS cipher suite names separated by comma, -or as an array of strings. e.g. -\"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256\" or -[\"TLS_AES_256_GCM_SHA384\",\"TLS_AES_128_GCM_SHA256\"]. -
-Ciphers (and their ordering) define the way in which the -client and server encrypts information over the network connection. -Selecting a good cipher suite is critical for the -application's data security, confidentiality and performance. - -The names should be in OpenSSL string format (not RFC format). -All default values and examples provided by EMQX config -documentation are all in OpenSSL format.
- -NOTE: Certain cipher suites are only compatible with -specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') -incompatible cipher suites will be silently dropped. -For instance, if only 'tlsv1.3' is given in the versions, -configuring cipher suites for other versions will have no effect. -
- -NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
-PSK cipher suites: \"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA\"
-""" ++ case Default of - quic -> "NOTE: QUIC listener supports only 'tlsv1.3' ciphers
"; - _ -> "" - end}). + sc( + hoconsc:array(string()), + #{ + default => default_ciphers(Default), + converter => fun + (Ciphers) when is_binary(Ciphers) -> + binary:split(Ciphers, <<",">>, [global]); + (Ciphers) when is_list(Ciphers) -> + Ciphers + end, + validator => + case Default =:= quic of + %% quic has openssl statically linked + true -> undefined; + false -> fun validate_ciphers/1 + end, + desc => + "" + "This config holds TLS cipher suite names separated by comma,\n" + "or as an array of strings. e.g.\n" + "\"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256\" or\n" + "[\"TLS_AES_256_GCM_SHA384\",\"TLS_AES_128_GCM_SHA256\"].\n" + "
\n" + "Ciphers (and their ordering) define the way in which the\n" + "client and server encrypts information over the network connection.\n" + "Selecting a good cipher suite is critical for the\n" + "application's data security, confidentiality and performance.\n" + "\n" + "The names should be in OpenSSL string format (not RFC format).\n" + "All default values and examples provided by EMQX config\n" + "documentation are all in OpenSSL format.
\n" + "\n" + "NOTE: Certain cipher suites are only compatible with\n" + "specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3')\n" + "incompatible cipher suites will be silently dropped.\n" + "For instance, if only 'tlsv1.3' is given in the versions,\n" + "configuring cipher suites for other versions will have no effect.\n" + "
\n" + "\n" + "NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
\n" + "If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
\n" + "PSK cipher suites: \"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384,\n" + "RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256,\n" + "RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA,\n" + "RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA\"
\n" + "" ++ + case Default of + quic -> "NOTE: QUIC listener supports only 'tlsv1.3' ciphers
"; + _ -> "" + end + } + ). default_ciphers(Which) -> - lists:map(fun erlang:iolist_to_binary/1, - do_default_ciphers(Which)). + lists:map( + fun erlang:iolist_to_binary/1, + do_default_ciphers(Which) + ). do_default_ciphers(undefined) -> do_default_ciphers(tls_all_available); -do_default_ciphers(quic) -> [ - "TLS_AES_256_GCM_SHA384", - "TLS_AES_128_GCM_SHA256", - "TLS_CHACHA20_POLY1305_SHA256" +do_default_ciphers(quic) -> + [ + "TLS_AES_256_GCM_SHA384", + "TLS_AES_128_GCM_SHA256", + "TLS_CHACHA20_POLY1305_SHA256" ]; do_default_ciphers(dtls_all_available) -> %% as of now, dtls does not support tlsv1.3 ciphers @@ -1481,7 +1850,7 @@ do_default_ciphers(tls_all_available) -> emqx_tls_lib:default_ciphers(). %% @private return a list of keys in a parent field --spec(keys(string(), hocon:config()) -> [string()]). +-spec keys(string(), hocon:config()) -> [string()]. keys(Parent, Conf) -> [binary_to_list(B) || B <- maps:keys(conf_get(Parent, Conf, #{}))]. @@ -1505,12 +1874,15 @@ ref(Field) -> hoconsc:ref(?MODULE, Field). ref(Module, Field) -> hoconsc:ref(Module, Field). mk_duration(Desc, OverrideMeta) -> - DefaultMeta = #{desc => Desc ++ " time span. A text string with number followed by time units: - `ms` for milliseconds, - `s` for seconds, - `m` for minutes, - `h` for hours; - or combined representation like `1h5m0s`"}, + DefaultMeta = #{ + desc => Desc ++ + " time span. A text string with number followed by time units:\n" + " `ms` for milliseconds,\n" + " `s` for seconds,\n" + " `m` for minutes,\n" + " `h` for hours;\n" + " or combined representation like `1h5m0s`" + }, hoconsc:mk(typerefl:alias("string", duration()), maps:merge(DefaultMeta, OverrideMeta)). to_duration(Str) -> @@ -1525,8 +1897,8 @@ to_duration_s(Str) -> _ -> {error, Str} end. --spec to_duration_ms(Input) -> {ok, integer()} | {error, Input} - when Input :: string() | binary(). +-spec to_duration_ms(Input) -> {ok, integer()} | {error, Input} when + Input :: string() | binary(). to_duration_ms(Str) -> case hocon_postprocess:duration(Str) of I when is_number(I) -> {ok, ceiling(I)}; @@ -1574,7 +1946,8 @@ to_ip_port(Str) -> {error, Str} end end; - _ -> {error, Str} + _ -> + {error, Str} end. to_erl_cipher_suite(Str) -> @@ -1591,19 +1964,24 @@ to_atom(Bin) when is_binary(Bin) -> binary_to_atom(Bin, utf8). validate_heap_size(Siz) -> - MaxSiz = case erlang:system_info(wordsize) of - 8 -> % arch_64 - (1 bsl 59) - 1; - 4 -> % arch_32 - (1 bsl 27) - 1 - end, + MaxSiz = + case erlang:system_info(wordsize) of + % arch_64 + 8 -> + (1 bsl 59) - 1; + % arch_32 + 4 -> + (1 bsl 27) - 1 + end, case Siz > MaxSiz of true -> error(io_lib:format("force_shutdown_policy: heap-size ~ts is too large", [Siz])); false -> ok end. validate_alarm_actions(Actions) -> - UnSupported = lists:filter(fun(Action) -> Action =/= log andalso Action =/= publish end, Actions), + UnSupported = lists:filter( + fun(Action) -> Action =/= log andalso Action =/= publish end, Actions + ), case UnSupported of [] -> ok; Error -> {error, Error} @@ -1623,16 +2001,18 @@ validate_ciphers(Ciphers) -> end. validate_tls_versions(Versions) -> - AvailableVersions = proplists:get_value(available, ssl:versions()) ++ - proplists:get_value(available_dtls, ssl:versions()), + AvailableVersions = + proplists:get_value(available, ssl:versions()) ++ + proplists:get_value(available_dtls, ssl:versions()), case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of [] -> ok; Vs -> {error, {unsupported_ssl_versions, Vs}} end. validations() -> - [{check_process_watermark, fun check_process_watermark/1} - ,{check_cpu_watermark, fun check_cpu_watermark/1} + [ + {check_process_watermark, fun check_process_watermark/1}, + {check_cpu_watermark, fun check_cpu_watermark/1} ]. %% validations from emqx_conf_schema, we must filter other *_schema by undefined. @@ -1644,7 +2024,8 @@ check_cpu_watermark(Conf) -> check_watermark(LowKey, HighKey, Conf) -> case hocon_maps:get(LowKey, Conf) of - undefined -> true; + undefined -> + true; Low -> High = hocon_maps:get(HighKey, Conf), case Low < High of @@ -1672,16 +2053,22 @@ authentication(Desc) -> %% * document generation (at compile time) %% * type checks before boot (in bin/emqx config generation) %% * type checks at runtime (when changing configs via management API) - #{ type => case persistent_term:get(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY, undefined) of - undefined -> Default; - Module -> hoconsc:lazy(Module:root_type()) - end - , desc => iolist_to_binary([Desc, """ -Authentication can be one single authenticator instance or a chain of authenticators as an array. -When authenticating a login (username, client ID, etc.) the authenticators are checked -in the configured order.
-"""]) - }. + #{ + type => + case persistent_term:get(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY, undefined) of + undefined -> Default; + Module -> hoconsc:lazy(Module:root_type()) + end, + desc => iolist_to_binary([ + Desc, + "" + "\n" + "Authentication can be one single authenticator instance or a chain of authenticators as an array.\n" + "When authenticating a login (username, client ID, etc.) the authenticators are checked\n" + "in the configured order.
\n" + "" + ]) + }. -spec qos() -> typerefl:type(). qos() -> diff --git a/apps/emqx/src/emqx_sequence.erl b/apps/emqx/src/emqx_sequence.erl index 24534d4f8..cadfea89f 100644 --- a/apps/emqx/src/emqx_sequence.erl +++ b/apps/emqx/src/emqx_sequence.erl @@ -16,58 +16,62 @@ -module(emqx_sequence). --export([ create/1 - , nextval/2 - , currval/2 - , reclaim/2 - , delete/1 - ]). +-export([ + create/1, + nextval/2, + currval/2, + reclaim/2, + delete/1 +]). -export_type([seqid/0]). --type(key() :: term()). +-type key() :: term(). --type(name() :: atom()). +-type name() :: atom(). --type(seqid() :: non_neg_integer()). +-type seqid() :: non_neg_integer(). %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- %% @doc Create a sequence. --spec(create(name()) -> ok). +-spec create(name()) -> ok. create(Name) -> emqx_tables:new(Name, [public, set, {write_concurrency, true}]). %% @doc Next value of the sequence. --spec(nextval(name(), key()) -> seqid()). +-spec nextval(name(), key()) -> seqid(). nextval(Name, Key) -> ets:update_counter(Name, Key, {2, 1}, {Key, 0}). %% @doc Current value of the sequence. --spec(currval(name(), key()) -> seqid()). +-spec currval(name(), key()) -> seqid(). currval(Name, Key) -> - try ets:lookup_element(Name, Key, 2) + try + ets:lookup_element(Name, Key, 2) catch error:badarg -> 0 end. %% @doc Reclaim a sequence id. --spec(reclaim(name(), key()) -> seqid()). +-spec reclaim(name(), key()) -> seqid(). reclaim(Name, Key) -> try ets:update_counter(Name, Key, {2, -1, 0, 0}) of - 0 -> ets:delete_object(Name, {Key, 0}), 0; - I -> I + 0 -> + ets:delete_object(Name, {Key, 0}), + 0; + I -> + I catch error:badarg -> 0 end. %% @doc Delete the sequence. --spec(delete(name()) -> boolean()). +-spec delete(name()) -> boolean(). delete(Name) -> case ets:info(Name, name) of Name -> ets:delete(Name); undefined -> false end. - diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 504b52c12..310d21c92 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -48,7 +48,6 @@ -include("logger.hrl"). -include("types.hrl"). - -ifdef(TEST). -compile(export_all). -compile(nowarn_export_all). @@ -56,36 +55,41 @@ -export([init/1]). --export([ info/1 - , info/2 - , is_session/1 - , stats/1 - , obtain_next_pkt_id/1 - ]). +-export([ + info/1, + info/2, + is_session/1, + stats/1, + obtain_next_pkt_id/1 +]). --export([ subscribe/4 - , unsubscribe/4 - ]). +-export([ + subscribe/4, + unsubscribe/4 +]). --export([ publish/4 - , puback/3 - , pubrec/3 - , pubrel/3 - , pubcomp/3 - ]). +-export([ + publish/4, + puback/3, + pubrec/3, + pubrel/3, + pubcomp/3 +]). --export([ deliver/3 - , enqueue/3 - , dequeue/2 - , ignore_local/4 - , retry/2 - , terminate/3 - ]). +-export([ + deliver/3, + enqueue/3, + dequeue/2, + ignore_local/4, + retry/2, + terminate/3 +]). --export([ takeover/1 - , resume/2 - , replay/2 - ]). +-export([ + takeover/1, + resume/2, + replay/2 +]). -export([expire/3]). @@ -94,124 +98,130 @@ -type sessionID() :: emqx_guid:guid(). --export_type([ session/0 - , sessionID/0 - ]). +-export_type([ + session/0, + sessionID/0 +]). -record(session, { - %% Client's id - clientid :: emqx_types:clientid(), - id :: sessionID(), - %% Is this session a persistent session i.e. was it started with Session-Expiry > 0 - is_persistent :: boolean(), - %% Client’s Subscriptions. - subscriptions :: map(), - %% Max subscriptions allowed - max_subscriptions :: non_neg_integer() | infinity, - %% Upgrade QoS? - upgrade_qos :: boolean(), - %% Client <- Broker: QoS1/2 messages sent to the client but - %% have not been unacked. - inflight :: emqx_inflight:inflight(), - %% All QoS1/2 messages published to when client is disconnected, - %% or QoS1/2 messages pending transmission to the Client. - %% - %% Optionally, QoS0 messages pending transmission to the Client. - mqueue :: emqx_mqueue:mqueue(), - %% Next packet id of the session - next_pkt_id = 1 :: emqx_types:packet_id(), - %% Retry interval for redelivering QoS1/2 messages (Unit: millisecond) - retry_interval :: timeout(), - %% Client -> Broker: QoS2 messages received from the client, but - %% have not been completely acknowledged - awaiting_rel :: map(), - %% Maximum number of awaiting QoS2 messages allowed - max_awaiting_rel :: non_neg_integer() | infinity, - %% Awaiting PUBREL Timeout (Unit: millisecond) - await_rel_timeout :: timeout(), - %% Created at - created_at :: pos_integer() - %% Message deliver latency stats - }). - + %% Client's id + clientid :: emqx_types:clientid(), + id :: sessionID(), + %% Is this session a persistent session i.e. was it started with Session-Expiry > 0 + is_persistent :: boolean(), + %% Client’s Subscriptions. + subscriptions :: map(), + %% Max subscriptions allowed + max_subscriptions :: non_neg_integer() | infinity, + %% Upgrade QoS? + upgrade_qos :: boolean(), + %% Client <- Broker: QoS1/2 messages sent to the client but + %% have not been unacked. + inflight :: emqx_inflight:inflight(), + %% All QoS1/2 messages published to when client is disconnected, + %% or QoS1/2 messages pending transmission to the Client. + %% + %% Optionally, QoS0 messages pending transmission to the Client. + mqueue :: emqx_mqueue:mqueue(), + %% Next packet id of the session + next_pkt_id = 1 :: emqx_types:packet_id(), + %% Retry interval for redelivering QoS1/2 messages (Unit: millisecond) + retry_interval :: timeout(), + %% Client -> Broker: QoS2 messages received from the client, but + %% have not been completely acknowledged + awaiting_rel :: map(), + %% Maximum number of awaiting QoS2 messages allowed + max_awaiting_rel :: non_neg_integer() | infinity, + %% Awaiting PUBREL Timeout (Unit: millisecond) + await_rel_timeout :: timeout(), + %% Created at + created_at :: pos_integer() + %% Message deliver latency stats +}). -type inflight_data_phase() :: wait_ack | wait_comp. --record(inflight_data, { phase :: inflight_data_phase() - , message :: emqx_types:message() - , timestamp :: non_neg_integer()}). +-record(inflight_data, { + phase :: inflight_data_phase(), + message :: emqx_types:message(), + timestamp :: non_neg_integer() +}). --type(session() :: #session{}). +-type session() :: #session{}. --type(publish() :: {maybe(emqx_types:packet_id()), emqx_types:message()}). +-type publish() :: {maybe(emqx_types:packet_id()), emqx_types:message()}. --type(pubrel() :: {pubrel, emqx_types:packet_id()}). +-type pubrel() :: {pubrel, emqx_types:packet_id()}. --type(replies() :: list(publish() | pubrel())). +-type replies() :: list(publish() | pubrel()). --define(INFO_KEYS, - [ id - , is_persistent - , subscriptions - , upgrade_qos - , retry_interval - , await_rel_timeout - , created_at - ]). +-define(INFO_KEYS, [ + id, + is_persistent, + subscriptions, + upgrade_qos, + retry_interval, + await_rel_timeout, + created_at +]). --define(STATS_KEYS, - [ subscriptions_cnt - , subscriptions_max - , inflight_cnt - , inflight_max - , mqueue_len - , mqueue_max - , mqueue_dropped - , next_pkt_id - , awaiting_rel_cnt - , awaiting_rel_max - ]). +-define(STATS_KEYS, [ + subscriptions_cnt, + subscriptions_max, + inflight_cnt, + inflight_max, + mqueue_len, + mqueue_max, + mqueue_dropped, + next_pkt_id, + awaiting_rel_cnt, + awaiting_rel_max +]). -define(DEFAULT_BATCH_N, 1000). --type options() :: #{ max_subscriptions => non_neg_integer() - , upgrade_qos => boolean() - , retry_interval => timeout() - , max_awaiting_rel => non_neg_integer() | infinity - , await_rel_timeout => timeout() - , max_inflight => integer() - , mqueue => emqx_mqueue:options() - , is_persistent => boolean() - , clientid => emqx_types:clientid() - }. +-type options() :: #{ + max_subscriptions => non_neg_integer(), + upgrade_qos => boolean(), + retry_interval => timeout(), + max_awaiting_rel => non_neg_integer() | infinity, + await_rel_timeout => timeout(), + max_inflight => integer(), + mqueue => emqx_mqueue:options(), + is_persistent => boolean(), + clientid => emqx_types:clientid() +}. %%-------------------------------------------------------------------- %% Init a Session %%-------------------------------------------------------------------- --spec(init(options()) -> session()). +-spec init(options()) -> session(). init(Opts) -> MaxInflight = maps:get(max_inflight, Opts, 1), QueueOpts = maps:merge( - #{max_len => 1000, - store_qos0 => true - }, maps:get(mqueue, Opts, #{})), + #{ + max_len => 1000, + store_qos0 => true + }, + maps:get(mqueue, Opts, #{}) + ), #session{ - id = emqx_guid:gen(), - clientid = maps:get(clientid, Opts, <<>>), - is_persistent = maps:get(is_persistent, Opts, false), - max_subscriptions = maps:get(max_subscriptions, Opts, infinity), - subscriptions = #{}, - upgrade_qos = maps:get(upgrade_qos, Opts, false), - inflight = emqx_inflight:new(MaxInflight), - mqueue = emqx_mqueue:init(QueueOpts), - next_pkt_id = 1, - retry_interval = maps:get(retry_interval, Opts, 30000), - awaiting_rel = #{}, - max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100), - await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000), - created_at = erlang:system_time(millisecond) - }. + id = emqx_guid:gen(), + clientid = maps:get(clientid, Opts, <<>>), + is_persistent = maps:get(is_persistent, Opts, false), + max_subscriptions = maps:get(max_subscriptions, Opts, infinity), + subscriptions = #{}, + upgrade_qos = maps:get(upgrade_qos, Opts, false), + inflight = emqx_inflight:new(MaxInflight), + mqueue = emqx_mqueue:init(QueueOpts), + next_pkt_id = 1, + retry_interval = maps:get(retry_interval, Opts, 30000), + awaiting_rel = #{}, + max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100), + await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000), + created_at = erlang:system_time(millisecond) + }. %%-------------------------------------------------------------------- %% Info, Stats @@ -221,7 +231,7 @@ is_session(#session{}) -> true; is_session(_) -> false. %% @doc Get infos of the session. --spec(info(session()) -> emqx_types:infos()). +-spec info(session()) -> emqx_types:infos(). info(Session) -> maps:from_list(info(?INFO_KEYS, Session)). @@ -269,7 +279,7 @@ info(created_at, #session{created_at = CreatedAt}) -> CreatedAt. %% @doc Get stats of the session. --spec(stats(session()) -> emqx_types:stats()). +-spec stats(session()) -> emqx_types:stats(). stats(Session) -> info(?STATS_KEYS, Session). %%-------------------------------------------------------------------- @@ -278,58 +288,80 @@ stats(Session) -> info(?STATS_KEYS, Session). ignore_local(ClientInfo, Delivers, Subscriber, Session) -> Subs = info(subscriptions, Session), - lists:dropwhile(fun({deliver, Topic, #message{from = Publisher} = Msg}) -> - case maps:find(Topic, Subs) of - {ok, #{nl := 1}} when Subscriber =:= Publisher -> - ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]), - ok = emqx_metrics:inc('delivery.dropped'), - ok = emqx_metrics:inc('delivery.dropped.no_local'), - true; - _ -> - false - end - end, Delivers). + lists:dropwhile( + fun({deliver, Topic, #message{from = Publisher} = Msg}) -> + case maps:find(Topic, Subs) of + {ok, #{nl := 1}} when Subscriber =:= Publisher -> + ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]), + ok = emqx_metrics:inc('delivery.dropped'), + ok = emqx_metrics:inc('delivery.dropped.no_local'), + true; + _ -> + false + end + end, + Delivers + ). %%-------------------------------------------------------------------- %% Client -> Broker: SUBSCRIBE %%-------------------------------------------------------------------- --spec(subscribe(emqx_types:clientinfo(), emqx_types:topic(), - emqx_types:subopts(), session()) - -> {ok, session()} | {error, emqx_types:reason_code()}). -subscribe(ClientInfo = #{clientid := ClientId}, TopicFilter, SubOpts, - Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs}) -> +-spec subscribe( + emqx_types:clientinfo(), + emqx_types:topic(), + emqx_types:subopts(), + session() +) -> + {ok, session()} | {error, emqx_types:reason_code()}. +subscribe( + ClientInfo = #{clientid := ClientId}, + TopicFilter, + SubOpts, + Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs} +) -> IsNew = not maps:is_key(TopicFilter, Subs), case IsNew andalso is_subscriptions_full(Session) of false -> ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts), ok = emqx_persistent_session:add_subscription(TopicFilter, SessionID, IsPS), - ok = emqx_hooks:run('session.subscribed', - [ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]), + ok = emqx_hooks:run( + 'session.subscribed', + [ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}] + ), {ok, Session#session{subscriptions = maps:put(TopicFilter, SubOpts, Subs)}}; - true -> {error, ?RC_QUOTA_EXCEEDED} + true -> + {error, ?RC_QUOTA_EXCEEDED} end. is_subscriptions_full(#session{max_subscriptions = infinity}) -> false; -is_subscriptions_full(#session{subscriptions = Subs, - max_subscriptions = MaxLimit}) -> +is_subscriptions_full(#session{ + subscriptions = Subs, + max_subscriptions = MaxLimit +}) -> maps:size(Subs) >= MaxLimit. %%-------------------------------------------------------------------- %% Client -> Broker: UNSUBSCRIBE %%-------------------------------------------------------------------- --spec(unsubscribe(emqx_types:clientinfo(), emqx_types:topic(), emqx_types:subopts(), session()) - -> {ok, session()} | {error, emqx_types:reason_code()}). -unsubscribe(ClientInfo, TopicFilter, UnSubOpts, - Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS}) -> +-spec unsubscribe(emqx_types:clientinfo(), emqx_types:topic(), emqx_types:subopts(), session()) -> + {ok, session()} | {error, emqx_types:reason_code()}. +unsubscribe( + ClientInfo, + TopicFilter, + UnSubOpts, + Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS} +) -> case maps:find(TopicFilter, Subs) of {ok, SubOpts} -> ok = emqx_broker:unsubscribe(TopicFilter), ok = emqx_persistent_session:remove_subscription(TopicFilter, SessionID, IsPS), - ok = emqx_hooks:run('session.unsubscribed', - [ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]), + ok = emqx_hooks:run( + 'session.unsubscribed', + [ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)] + ), {ok, Session#session{subscriptions = maps:remove(TopicFilter, Subs)}}; error -> {error, ?RC_NO_SUBSCRIPTION_EXISTED} @@ -339,11 +371,15 @@ unsubscribe(ClientInfo, TopicFilter, UnSubOpts, %% Client -> Broker: PUBLISH %%-------------------------------------------------------------------- --spec(publish(emqx_types:clientinfo(), emqx_types:packet_id(), emqx_types:message(), session()) - -> {ok, emqx_types:publish_result(), session()} - | {error, emqx_types:reason_code()}). -publish(_ClientInfo, PacketId, Msg = #message{qos = ?QOS_2, timestamp = Ts}, - Session = #session{awaiting_rel = AwaitingRel}) -> +-spec publish(emqx_types:clientinfo(), emqx_types:packet_id(), emqx_types:message(), session()) -> + {ok, emqx_types:publish_result(), session()} + | {error, emqx_types:reason_code()}. +publish( + _ClientInfo, + PacketId, + Msg = #message{qos = ?QOS_2, timestamp = Ts}, + Session = #session{awaiting_rel = AwaitingRel} +) -> case is_awaiting_full(Session) of false -> case maps:is_key(PacketId, AwaitingRel) of @@ -354,27 +390,29 @@ publish(_ClientInfo, PacketId, Msg = #message{qos = ?QOS_2, timestamp = Ts}, true -> {error, ?RC_PACKET_IDENTIFIER_IN_USE} end; - true -> {error, ?RC_RECEIVE_MAXIMUM_EXCEEDED} + true -> + {error, ?RC_RECEIVE_MAXIMUM_EXCEEDED} end; - %% Publish QoS0/1 directly publish(_ClientInfo, _PacketId, Msg, Session) -> {ok, emqx_broker:publish(Msg), Session}. is_awaiting_full(#session{max_awaiting_rel = infinity}) -> false; -is_awaiting_full(#session{awaiting_rel = AwaitingRel, - max_awaiting_rel = MaxLimit}) -> +is_awaiting_full(#session{ + awaiting_rel = AwaitingRel, + max_awaiting_rel = MaxLimit +}) -> maps:size(AwaitingRel) >= MaxLimit. %%-------------------------------------------------------------------- %% Client -> Broker: PUBACK %%-------------------------------------------------------------------- --spec(puback(emqx_types:clientinfo(), emqx_types:packet_id(), session()) - -> {ok, emqx_types:message(), session()} - | {ok, emqx_types:message(), replies(), session()} - | {error, emqx_types:reason_code()}). +-spec puback(emqx_types:clientinfo(), emqx_types:packet_id(), session()) -> + {ok, emqx_types:message(), session()} + | {ok, emqx_types:message(), replies(), session()} + | {error, emqx_types:reason_code()}. puback(ClientInfo, PacketId, Session = #session{inflight = Inflight}) -> case emqx_inflight:lookup(PacketId, Inflight) of {value, #inflight_data{phase = wait_ack, message = Msg}} -> @@ -396,9 +434,9 @@ return_with(Msg, {ok, Publishes, Session}) -> %% Client -> Broker: PUBREC %%-------------------------------------------------------------------- --spec(pubrec(emqx_types:clientinfo(), emqx_types:packet_id(), session()) - -> {ok, emqx_types:message(), session()} - | {error, emqx_types:reason_code()}). +-spec pubrec(emqx_types:clientinfo(), emqx_types:packet_id(), session()) -> + {ok, emqx_types:message(), session()} + | {error, emqx_types:reason_code()}. pubrec(_ClientInfo, PacketId, Session = #session{inflight = Inflight}) -> case emqx_inflight:lookup(PacketId, Inflight) of {value, #inflight_data{phase = wait_ack, message = Msg} = Data} -> @@ -415,8 +453,8 @@ pubrec(_ClientInfo, PacketId, Session = #session{inflight = Inflight}) -> %% Client -> Broker: PUBREL %%-------------------------------------------------------------------- --spec(pubrel(emqx_types:clientinfo(), emqx_types:packet_id(), session()) - -> {ok, session()} | {error, emqx_types:reason_code()}). +-spec pubrel(emqx_types:clientinfo(), emqx_types:packet_id(), session()) -> + {ok, session()} | {error, emqx_types:reason_code()}. pubrel(_ClientInfo, PacketId, Session = #session{awaiting_rel = AwaitingRel}) -> case maps:take(PacketId, AwaitingRel) of {_Ts, AwaitingRel1} -> @@ -429,9 +467,10 @@ pubrel(_ClientInfo, PacketId, Session = #session{awaiting_rel = AwaitingRel}) -> %% Client -> Broker: PUBCOMP %%-------------------------------------------------------------------- --spec(pubcomp(emqx_types:clientinfo(), emqx_types:packet_id(), session()) - -> {ok, session()} | {ok, replies(), session()} - | {error, emqx_types:reason_code()}). +-spec pubcomp(emqx_types:clientinfo(), emqx_types:packet_id(), session()) -> + {ok, session()} + | {ok, replies(), session()} + | {error, emqx_types:reason_code()}. pubcomp(ClientInfo, PacketId, Session = #session{inflight = Inflight}) -> case emqx_inflight:lookup(PacketId, Inflight) of {value, #inflight_data{phase = wait_comp, message = Msg}} -> @@ -450,7 +489,8 @@ pubcomp(ClientInfo, PacketId, Session = #session{inflight = Inflight}) -> dequeue(ClientInfo, Session = #session{inflight = Inflight, mqueue = Q}) -> case emqx_mqueue:is_empty(Q) of - true -> {ok, Session}; + true -> + {ok, Session}; false -> {Msgs, Q1} = dequeue(ClientInfo, batch_n(Inflight), [], Q), do_deliver(ClientInfo, Msgs, [], Session#session{mqueue = Q1}) @@ -458,17 +498,18 @@ dequeue(ClientInfo, Session = #session{inflight = Inflight, mqueue = Q}) -> dequeue(_ClientInfo, 0, Msgs, Q) -> {lists:reverse(Msgs), Q}; - dequeue(ClientInfo, Cnt, Msgs, Q) -> case emqx_mqueue:out(Q) of - {empty, _Q} -> dequeue(ClientInfo, 0, Msgs, Q); + {empty, _Q} -> + dequeue(ClientInfo, 0, Msgs, Q); {{value, Msg}, Q1} -> case emqx_message:is_expired(Msg) of true -> ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, expired]), ok = inc_delivery_expired_cnt(), dequeue(ClientInfo, Cnt, Msgs, Q1); - false -> dequeue(ClientInfo, acc_cnt(Msg, Cnt), [Msg|Msgs], Q1) + false -> + dequeue(ClientInfo, acc_cnt(Msg, Cnt), [Msg | Msgs], Q1) end end. @@ -479,40 +520,45 @@ acc_cnt(_Msg, Cnt) -> Cnt - 1. %% Broker -> Client: Deliver %%-------------------------------------------------------------------- --spec(deliver(emqx_types:clientinfo(), list(emqx_types:deliver()), session()) - -> {ok, session()} | {ok, replies(), session()}). -deliver(ClientInfo, [Deliver], Session) -> %% Optimize +-spec deliver(emqx_types:clientinfo(), list(emqx_types:deliver()), session()) -> + {ok, session()} | {ok, replies(), session()}. +%% Optimize +deliver(ClientInfo, [Deliver], Session) -> Msg = enrich_deliver(Deliver, Session), deliver_msg(ClientInfo, Msg, Session); - deliver(ClientInfo, Delivers, Session) -> Msgs = [enrich_deliver(D, Session) || D <- Delivers], do_deliver(ClientInfo, Msgs, [], Session). do_deliver(_ClientInfo, [], Publishes, Session) -> {ok, lists:reverse(Publishes), Session}; - do_deliver(ClientInfo, [Msg | More], Acc, Session) -> case deliver_msg(ClientInfo, Msg, Session) of {ok, Session1} -> do_deliver(ClientInfo, More, Acc, Session1); {ok, [Publish], Session1} -> - do_deliver(ClientInfo, More, [Publish|Acc], Session1) + do_deliver(ClientInfo, More, [Publish | Acc], Session1) end. deliver_msg(_ClientInfo, Msg = #message{qos = ?QOS_0}, Session) -> - on_delivery_completed(Msg, Session), % + % + on_delivery_completed(Msg, Session), {ok, [{undefined, maybe_ack(Msg)}], Session}; - -deliver_msg(ClientInfo, Msg = #message{qos = QoS}, Session = - #session{next_pkt_id = PacketId, inflight = Inflight}) - when QoS =:= ?QOS_1 orelse QoS =:= ?QOS_2 -> +deliver_msg( + ClientInfo, + Msg = #message{qos = QoS}, + Session = + #session{next_pkt_id = PacketId, inflight = Inflight} +) when + QoS =:= ?QOS_1 orelse QoS =:= ?QOS_2 +-> case emqx_inflight:is_full(Inflight) of true -> - Session1 = case maybe_nack(Msg) of - true -> Session; - false -> enqueue(ClientInfo, Msg, Session) - end, + Session1 = + case maybe_nack(Msg) of + true -> Session; + false -> enqueue(ClientInfo, Msg, Session) + end, {ok, Session1}; false -> Publish = {PacketId, maybe_ack(Msg)}, @@ -521,14 +567,20 @@ deliver_msg(ClientInfo, Msg = #message{qos = QoS}, Session = {ok, [Publish], next_pkt_id(Session1)} end. --spec(enqueue(emqx_types:clientinfo(), list(emqx_types:deliver())|emqx_types:message(), - session()) -> session()). +-spec enqueue( + emqx_types:clientinfo(), + list(emqx_types:deliver()) | emqx_types:message(), + session() +) -> session(). enqueue(ClientInfo, Delivers, Session) when is_list(Delivers) -> - lists:foldl(fun(Deliver, Session0) -> + lists:foldl( + fun(Deliver, Session0) -> Msg = enrich_deliver(Deliver, Session), enqueue(ClientInfo, Msg, Session0) - end, Session, Delivers); - + end, + Session, + Delivers + ); enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) -> {Dropped, NewQ} = emqx_mqueue:in(Msg, Q), (Dropped =/= undefined) andalso handle_dropped(ClientInfo, Dropped, Session), @@ -536,25 +588,37 @@ enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) -> handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mqueue = Q}) -> Payload = emqx_message:to_log_map(Msg), - #{store_qos0 := StoreQos0} = QueueInfo = emqx_mqueue:info(Q), + #{store_qos0 := StoreQos0} = QueueInfo = emqx_mqueue:info(Q), case (QoS == ?QOS_0) andalso (not StoreQos0) of - true -> + true -> ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, qos0_msg]), ok = emqx_metrics:inc('delivery.dropped'), ok = emqx_metrics:inc('delivery.dropped.qos0_msg'), ok = inc_pd('send_msg.dropped'), - ?SLOG(warning, #{msg => "dropped_qos0_msg", - queue => QueueInfo, - payload => Payload}, #{topic => Topic}); + ?SLOG( + warning, + #{ + msg => "dropped_qos0_msg", + queue => QueueInfo, + payload => Payload + }, + #{topic => Topic} + ); false -> ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, queue_full]), ok = emqx_metrics:inc('delivery.dropped'), ok = emqx_metrics:inc('delivery.dropped.queue_full'), ok = inc_pd('send_msg.dropped'), ok = inc_pd('send_msg.dropped.queue_full'), - ?SLOG(warning, #{msg => "dropped_msg_due_to_mqueue_is_full", - queue => QueueInfo, - payload => Payload}, #{topic => Topic}) + ?SLOG( + warning, + #{ + msg => "dropped_msg_due_to_mqueue_is_full", + queue => QueueInfo, + payload => Payload + }, + #{topic => Topic} + ) end. enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) -> @@ -562,13 +626,13 @@ enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) maybe_ack(Msg) -> case emqx_shared_sub:is_ack_required(Msg) of - true -> emqx_shared_sub:maybe_ack(Msg); + true -> emqx_shared_sub:maybe_ack(Msg); false -> Msg end. maybe_nack(Msg) -> - emqx_shared_sub:is_ack_required(Msg) - andalso (ok == emqx_shared_sub:maybe_nack_dropped(Msg)). + emqx_shared_sub:is_ack_required(Msg) andalso + (ok == emqx_shared_sub:maybe_nack_dropped(Msg)). get_subopts(Topic, SubMap) -> case maps:find(Topic, SubMap) of @@ -576,27 +640,35 @@ get_subopts(Topic, SubMap) -> [{nl, Nl}, {qos, QoS}, {rap, Rap}, {subid, SubId}]; {ok, #{nl := Nl, qos := QoS, rap := Rap}} -> [{nl, Nl}, {qos, QoS}, {rap, Rap}]; - error -> [] + error -> + [] end. -enrich_subopts([], Msg, _Session) -> Msg; -enrich_subopts([{nl, 1}|Opts], Msg, Session) -> +enrich_subopts([], Msg, _Session) -> + Msg; +enrich_subopts([{nl, 1} | Opts], Msg, Session) -> enrich_subopts(Opts, emqx_message:set_flag(nl, Msg), Session); -enrich_subopts([{nl, 0}|Opts], Msg, Session) -> +enrich_subopts([{nl, 0} | Opts], Msg, Session) -> enrich_subopts(Opts, Msg, Session); -enrich_subopts([{qos, SubQoS}|Opts], Msg = #message{qos = PubQoS}, - Session = #session{upgrade_qos = true}) -> +enrich_subopts( + [{qos, SubQoS} | Opts], + Msg = #message{qos = PubQoS}, + Session = #session{upgrade_qos = true} +) -> enrich_subopts(Opts, Msg#message{qos = max(SubQoS, PubQoS)}, Session); -enrich_subopts([{qos, SubQoS}|Opts], Msg = #message{qos = PubQoS}, - Session = #session{upgrade_qos = false}) -> +enrich_subopts( + [{qos, SubQoS} | Opts], + Msg = #message{qos = PubQoS}, + Session = #session{upgrade_qos = false} +) -> enrich_subopts(Opts, Msg#message{qos = min(SubQoS, PubQoS)}, Session); -enrich_subopts([{rap, 1}|Opts], Msg, Session) -> +enrich_subopts([{rap, 1} | Opts], Msg, Session) -> enrich_subopts(Opts, Msg, Session); -enrich_subopts([{rap, 0}|Opts], Msg = #message{headers = #{retained := true}}, Session) -> +enrich_subopts([{rap, 0} | Opts], Msg = #message{headers = #{retained := true}}, Session) -> enrich_subopts(Opts, Msg, Session); -enrich_subopts([{rap, 0}|Opts], Msg, Session) -> +enrich_subopts([{rap, 0} | Opts], Msg, Session) -> enrich_subopts(Opts, emqx_message:set_flag(retain, false, Msg), Session); -enrich_subopts([{subid, SubId}|Opts], Msg, Session) -> +enrich_subopts([{subid, SubId} | Opts], Msg, Session) -> Props = emqx_message:get_header(properties, Msg, #{}), Msg1 = emqx_message:set_header(properties, Props#{'Subscription-Identifier' => SubId}, Msg), enrich_subopts(Opts, Msg1, Session). @@ -613,22 +685,32 @@ await(PacketId, Msg, Session = #session{inflight = Inflight}) -> %% Retry Delivery %%-------------------------------------------------------------------- --spec(retry(emqx_types:clientinfo(), session()) -> - {ok, session()} | {ok, replies(), timeout(), session()}). +-spec retry(emqx_types:clientinfo(), session()) -> + {ok, session()} | {ok, replies(), timeout(), session()}. retry(ClientInfo, Session = #session{inflight = Inflight}) -> case emqx_inflight:is_empty(Inflight) of - true -> {ok, Session}; + true -> + {ok, Session}; false -> Now = erlang:system_time(millisecond), - retry_delivery(emqx_inflight:to_list(fun sort_fun/2, Inflight), - [], Now, Session, ClientInfo) + retry_delivery( + emqx_inflight:to_list(fun sort_fun/2, Inflight), + [], + Now, + Session, + ClientInfo + ) end. retry_delivery([], Acc, _Now, Session = #session{retry_interval = Interval}, _ClientInfo) -> {ok, lists:reverse(Acc), Interval, Session}; - -retry_delivery([{PacketId, #inflight_data{timestamp = Ts} = Data} | More], - Acc, Now, Session = #session{retry_interval = Interval, inflight = Inflight}, ClientInfo) -> +retry_delivery( + [{PacketId, #inflight_data{timestamp = Ts} = Data} | More], + Acc, + Now, + Session = #session{retry_interval = Interval, inflight = Inflight}, + ClientInfo +) -> case (Age = age(Now, Ts)) >= Interval of true -> {Acc1, Inflight1} = do_retry_delivery(PacketId, Data, Now, Acc, Inflight, ClientInfo), @@ -637,8 +719,14 @@ retry_delivery([{PacketId, #inflight_data{timestamp = Ts} = Data} | More], {ok, lists:reverse(Acc), Interval - max(0, Age), Session} end. -do_retry_delivery(PacketId, #inflight_data{phase = wait_ack, message = Msg} = Data, - Now, Acc, Inflight, ClientInfo) -> +do_retry_delivery( + PacketId, + #inflight_data{phase = wait_ack, message = Msg} = Data, + Now, + Acc, + Inflight, + ClientInfo +) -> case emqx_message:is_expired(Msg) of true -> ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, expired]), @@ -650,7 +738,6 @@ do_retry_delivery(PacketId, #inflight_data{phase = wait_ack, message = Msg} = Da Inflight1 = emqx_inflight:update(PacketId, Update, Inflight), {[{PacketId, Msg1} | Acc], Inflight1} end; - do_retry_delivery(PacketId, Data, Now, Acc, Inflight, _) -> Update = Data#inflight_data{timestamp = Now}, Inflight1 = emqx_inflight:update(PacketId, Update, Inflight), @@ -660,16 +747,21 @@ do_retry_delivery(PacketId, Data, Now, Acc, Inflight, _) -> %% Expire Awaiting Rel %%-------------------------------------------------------------------- --spec(expire(emqx_types:clientinfo(), awaiting_rel, session()) -> - {ok, session()} | {ok, timeout(), session()}). +-spec expire(emqx_types:clientinfo(), awaiting_rel, session()) -> + {ok, session()} | {ok, timeout(), session()}. expire(_ClientInfo, awaiting_rel, Session = #session{awaiting_rel = AwaitingRel}) -> case maps:size(AwaitingRel) of 0 -> {ok, Session}; _ -> expire_awaiting_rel(erlang:system_time(millisecond), Session) end. -expire_awaiting_rel(Now, Session = #session{awaiting_rel = AwaitingRel, - await_rel_timeout = Timeout}) -> +expire_awaiting_rel( + Now, + Session = #session{ + awaiting_rel = AwaitingRel, + await_rel_timeout = Timeout + } +) -> NotExpired = fun(_PacketId, Ts) -> age(Now, Ts) < Timeout end, AwaitingRel1 = maps:filter(NotExpired, AwaitingRel), ExpiredCnt = maps:size(AwaitingRel) - maps:size(AwaitingRel1), @@ -684,32 +776,38 @@ expire_awaiting_rel(Now, Session = #session{awaiting_rel = AwaitingRel, %% Takeover, Resume and Replay %%-------------------------------------------------------------------- --spec(takeover(session()) -> ok). +-spec takeover(session()) -> ok. takeover(#session{subscriptions = Subs}) -> lists:foreach(fun emqx_broker:unsubscribe/1, maps:keys(Subs)). --spec(resume(emqx_types:clientinfo(), session()) -> ok). +-spec resume(emqx_types:clientinfo(), session()) -> ok. resume(ClientInfo = #{clientid := ClientId}, Session = #session{subscriptions = Subs}) -> - lists:foreach(fun({TopicFilter, SubOpts}) -> - ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts) - end, maps:to_list(Subs)), + lists:foreach( + fun({TopicFilter, SubOpts}) -> + ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts) + end, + maps:to_list(Subs) + ), ok = emqx_metrics:inc('session.resumed'), emqx_hooks:run('session.resumed', [ClientInfo, info(Session)]). --spec(replay(emqx_types:clientinfo(), session()) -> {ok, replies(), session()}). +-spec replay(emqx_types:clientinfo(), session()) -> {ok, replies(), session()}. replay(ClientInfo, Session = #session{inflight = Inflight}) -> - Pubs = lists:map(fun({PacketId, #inflight_data{phase = wait_comp}}) -> - {pubrel, PacketId}; - ({PacketId, #inflight_data{message = Msg}}) -> - {PacketId, emqx_message:set_flag(dup, true, Msg)} - end, emqx_inflight:to_list(Inflight)), + Pubs = lists:map( + fun + ({PacketId, #inflight_data{phase = wait_comp}}) -> + {pubrel, PacketId}; + ({PacketId, #inflight_data{message = Msg}}) -> + {PacketId, emqx_message:set_flag(dup, true, Msg)} + end, + emqx_inflight:to_list(Inflight) + ), case dequeue(ClientInfo, Session) of {ok, NSession} -> {ok, Pubs, NSession}; - {ok, More, NSession} -> - {ok, lists:append(Pubs, More), NSession} + {ok, More, NSession} -> {ok, lists:append(Pubs, More), NSession} end. --spec(terminate(emqx_types:clientinfo(), Reason :: term(), session()) -> ok). +-spec terminate(emqx_types:clientinfo(), Reason :: term(), session()) -> ok. terminate(ClientInfo, discarded, Session) -> run_hook('session.discarded', [ClientInfo, info(Session)]); terminate(ClientInfo, takenover, Session) -> @@ -718,7 +816,8 @@ terminate(ClientInfo, Reason, Session) -> run_hook('session.terminated', [ClientInfo, Reason, info(Session)]). run_hook(Name, Args) -> - ok = emqx_metrics:inc(Name), emqx_hooks:run(Name, Args). + ok = emqx_metrics:inc(Name), + emqx_hooks:run(Name, Args). %%-------------------------------------------------------------------- %% Inc message/delivery expired counter @@ -753,18 +852,23 @@ obtain_next_pkt_id(Session) -> next_pkt_id(Session = #session{next_pkt_id = ?MAX_PACKET_ID}) -> Session#session{next_pkt_id = 1}; - next_pkt_id(Session = #session{next_pkt_id = Id}) -> Session#session{next_pkt_id = Id + 1}. %%-------------------------------------------------------------------- %% Message Latency Stats %%-------------------------------------------------------------------- -on_delivery_completed(Msg, - #session{created_at = CreateAt, clientid = ClientId}) -> - emqx:run_hook('delivery.completed', - [Msg, - #{session_birth_time => CreateAt, clientid => ClientId}]). +on_delivery_completed( + Msg, + #session{created_at = CreateAt, clientid = ClientId} +) -> + emqx:run_hook( + 'delivery.completed', + [ + Msg, + #{session_birth_time => CreateAt, clientid => ClientId} + ] + ). mark_begin_deliver(Msg) -> emqx_message:set_header(deliver_begin_at, erlang:system_time(second), Msg). @@ -785,9 +889,11 @@ batch_n(Inflight) -> end. with_ts(Msg) -> - #inflight_data{phase = wait_ack, - message = Msg, - timestamp = erlang:system_time(millisecond)}. + #inflight_data{ + phase = wait_ack, + message = Msg, + timestamp = erlang:system_time(millisecond) + }. age(Now, Ts) -> Now - Ts. @@ -797,4 +903,4 @@ age(Now, Ts) -> Now - Ts. set_field(Name, Value, Session) -> Pos = emqx_misc:index_of(Name, record_info(fields, session)), - setelement(Pos+1, Session, Value). + setelement(Pos + 1, Session, Value). diff --git a/apps/emqx/src/emqx_session_router.erl b/apps/emqx/src/emqx_session_router.erl index 67dd58c75..2473322bd 100644 --- a/apps/emqx/src/emqx_session_router.erl +++ b/apps/emqx/src/emqx_session_router.erl @@ -24,37 +24,42 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --export([ create_init_tab/0 - , create_router_tab/1 - , start_link/2]). +-export([ + create_init_tab/0, + create_router_tab/1, + start_link/2 +]). %% Route APIs --export([ delete_routes/2 - , do_add_route/2 - , do_delete_route/2 - , match_routes/1 - ]). +-export([ + delete_routes/2, + do_add_route/2, + do_delete_route/2, + match_routes/1 +]). --export([ buffer/3 - , pending/2 - , resume_begin/2 - , resume_end/2 - ]). +-export([ + buffer/3, + pending/2, + resume_begin/2, + resume_end/2 +]). -export([print_routes/1]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). --type(group() :: binary()). +-type group() :: binary(). --type(dest() :: node() | {group(), node()}). +-type dest() :: node() | {group(), node()}. -define(ROUTE_RAM_TAB, emqx_session_route_ram). -define(ROUTE_DISC_TAB, emqx_session_route_disc). @@ -67,63 +72,83 @@ create_router_tab(disc) -> ok = mria:create_table(?ROUTE_DISC_TAB, [ - {type, bag}, - {rlog_shard, ?ROUTE_SHARD}, - {storage, disc_copies}, - {record_name, route}, - {attributes, record_info(fields, route)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]); + {type, bag}, + {rlog_shard, ?ROUTE_SHARD}, + {storage, disc_copies}, + {record_name, route}, + {attributes, record_info(fields, route)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]); create_router_tab(ram) -> ok = mria:create_table(?ROUTE_RAM_TAB, [ - {type, bag}, - {rlog_shard, ?ROUTE_SHARD}, - {storage, ram_copies}, - {record_name, route}, - {attributes, record_info(fields, route)}, - {storage_properties, [{ets, [{read_concurrency, true}, - {write_concurrency, true}]}]}]). + {type, bag}, + {rlog_shard, ?ROUTE_SHARD}, + {storage, ram_copies}, + {record_name, route}, + {attributes, record_info(fields, route)}, + {storage_properties, [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ]} + ]). %%-------------------------------------------------------------------- %% Start a router %%-------------------------------------------------------------------- create_init_tab() -> - emqx_tables:new(?SESSION_INIT_TAB, [public, {read_concurrency, true}, - {write_concurrency, true}]). + emqx_tables:new(?SESSION_INIT_TAB, [ + public, + {read_concurrency, true}, + {write_concurrency, true} + ]). --spec(start_link(atom(), pos_integer()) -> startlink_ret()). +-spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> - gen_server:start_link({local, emqx_misc:proc_name(?MODULE, Id)}, - ?MODULE, [Pool, Id], [{hibernate_after, 1000}]). + gen_server:start_link( + {local, emqx_misc:proc_name(?MODULE, Id)}, + ?MODULE, + [Pool, Id], + [{hibernate_after, 1000}] + ). %%-------------------------------------------------------------------- %% Route APIs %%-------------------------------------------------------------------- --spec(do_add_route(emqx_topic:topic(), dest()) -> ok | {error, term()}). +-spec do_add_route(emqx_topic:topic(), dest()) -> ok | {error, term()}. do_add_route(Topic, SessionID) when is_binary(Topic) -> Route = #route{topic = Topic, dest = SessionID}, case lists:member(Route, lookup_routes(Topic)) of - true -> ok; + true -> + ok; false -> case emqx_topic:wildcard(Topic) of - true -> + true -> Fun = fun emqx_router_utils:insert_session_trie_route/2, - emqx_router_utils:maybe_trans(Fun, [route_tab(), Route], - ?PERSISTENT_SESSION_SHARD); + emqx_router_utils:maybe_trans( + Fun, + [route_tab(), Route], + ?PERSISTENT_SESSION_SHARD + ); false -> emqx_router_utils:insert_direct_route(route_tab(), Route) end end. %% @doc Match routes --spec(match_routes(emqx_topic:topic()) -> [emqx_types:route()]). +-spec match_routes(emqx_topic:topic()) -> [emqx_types:route()]. match_routes(Topic) when is_binary(Topic) -> case match_trie(Topic) of [] -> lookup_routes(Topic); - Matched -> - lists:append([lookup_routes(To) || To <- [Topic | Matched]]) + Matched -> lists:append([lookup_routes(To) || To <- [Topic | Matched]]) end. %% Optimize: routing table will be replicated to all router nodes. @@ -137,11 +162,11 @@ match_trie(Topic) -> delete_routes(SessionID, Subscriptions) -> cast(pick(SessionID), {delete_routes, SessionID, Subscriptions}). --spec(do_delete_route(emqx_topic:topic(), dest()) -> ok | {error, term()}). +-spec do_delete_route(emqx_topic:topic(), dest()) -> ok | {error, term()}. do_delete_route(Topic, SessionID) -> Route = #route{topic = Topic, dest = SessionID}, case emqx_topic:wildcard(Topic) of - true -> + true -> Fun = fun emqx_router_utils:delete_session_trie_route/2, emqx_router_utils:maybe_trans(Fun, [route_tab(), Route], ?PERSISTENT_SESSION_SHARD); false -> @@ -149,11 +174,14 @@ do_delete_route(Topic, SessionID) -> end. %% @doc Print routes to a topic --spec(print_routes(emqx_topic:topic()) -> ok). +-spec print_routes(emqx_topic:topic()) -> ok. print_routes(Topic) -> - lists:foreach(fun(#route{topic = To, dest = SessionID}) -> - io:format("~s -> ~p~n", [To, SessionID]) - end, match_routes(Topic)). + lists:foreach( + fun(#route{topic = To, dest = SessionID}) -> + io:format("~s -> ~p~n", [To, SessionID]) + end, + match_routes(Topic) + ). %%-------------------------------------------------------------------- %% Session APIs @@ -173,11 +201,11 @@ resume_begin(From, SessionID) when is_pid(From), is_binary(SessionID) -> call(pick(SessionID), {resume_begin, From, SessionID}). -spec resume_end(pid(), binary()) -> - {'ok', [emqx_types:message()]} | {'error', term()}. + {'ok', [emqx_types:message()]} | {'error', term()}. resume_end(From, SessionID) when is_pid(From), is_binary(SessionID) -> case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of undefined -> - ?tp(ps_session_not_found, #{ sid => SessionID }), + ?tp(ps_session_not_found, #{sid => SessionID}), {error, not_found}; Pid -> Res = emqx_session_router_worker:resume_end(From, Pid, SessionID), @@ -237,7 +265,7 @@ handle_cast({resume_end, SessionID, Pid}, State) -> end, Pmon = emqx_pmon:demonitor(Pid, maps:get(pmon, State)), _ = emqx_session_router_worker_sup:abort_worker(Pid), - {noreply, State#{ pmon => Pmon }}; + {noreply, State#{pmon => Pmon}}; handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. @@ -257,7 +285,7 @@ code_change(_OldVsn, State, _Extra) -> %% initialisation of a resuming session. %%-------------------------------------------------------------------- -init_resume_worker(RemotePid, SessionID, #{ pmon := Pmon } = State) -> +init_resume_worker(RemotePid, SessionID, #{pmon := Pmon} = State) -> case emqx_session_router_worker_sup:start_worker(SessionID, RemotePid) of {error, What} -> ?SLOG(error, #{msg => "failed_to_start_resume_worker", reason => What}), @@ -266,11 +294,11 @@ init_resume_worker(RemotePid, SessionID, #{ pmon := Pmon } = State) -> Pmon1 = emqx_pmon:monitor(Pid, Pmon), case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of undefined -> - {ok, Pid, State#{ pmon => Pmon1 }}; + {ok, Pid, State#{pmon => Pmon1}}; {_, OldPid} -> Pmon2 = emqx_pmon:demonitor(OldPid, Pmon1), _ = emqx_session_router_worker_sup:abort_worker(OldPid), - {ok, Pid, State#{ pmon => Pmon2 }} + {ok, Pid, State#{pmon => Pmon2}} end end. @@ -284,5 +312,5 @@ lookup_routes(Topic) -> route_tab() -> case emqx_persistent_session:storage_type() of disc -> ?ROUTE_DISC_TAB; - ram -> ?ROUTE_RAM_TAB + ram -> ?ROUTE_RAM_TAB end. diff --git a/apps/emqx/src/emqx_session_router_worker.erl b/apps/emqx/src/emqx_session_router_worker.erl index 40a5075b6..4c9397bc1 100644 --- a/apps/emqx/src/emqx_session_router_worker.erl +++ b/apps/emqx/src/emqx_session_router_worker.erl @@ -26,41 +26,43 @@ %% the initialization, the messages are delivered and the worker is %% terminated. - -module(emqx_session_router_worker). -behaviour(gen_server). %% API --export([ buffer/3 - , pendings/1 - , resume_end/3 - , start_link/2 - ]). +-export([ + buffer/3, + pendings/1, + resume_end/3, + start_link/2 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2 +]). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --record(state, { remote_pid :: pid() - , session_id :: binary() - , session_tab :: ets:table() - , messages :: ets:table() - , buffering :: boolean() - }). +-record(state, { + remote_pid :: pid(), + session_id :: binary(), + session_tab :: ets:table(), + messages :: ets:table(), + buffering :: boolean() +}). %%%=================================================================== %%% API %%%=================================================================== start_link(SessionTab, #{} = Opts) -> - gen_server:start_link(?MODULE, Opts#{ session_tab => SessionTab}, []). + gen_server:start_link(?MODULE, Opts#{session_tab => SessionTab}, []). pendings(Pid) -> gen_server:call(Pid, pendings). @@ -68,15 +70,19 @@ pendings(Pid) -> resume_end(RemotePid, Pid, _SessionID) -> case gen_server:call(Pid, {resume_end, RemotePid}) of {ok, EtsHandle} -> - ?tp(ps_worker_call_ok, #{ pid => Pid - , remote_pid => RemotePid - , sid => _SessionID}), + ?tp(ps_worker_call_ok, #{ + pid => Pid, + remote_pid => RemotePid, + sid => _SessionID + }), {ok, ets:tab2list(EtsHandle)}; {error, _} = Err -> - ?tp(ps_worker_call_failed, #{ pid => Pid - , remote_pid => RemotePid - , sid => _SessionID - , reason => Err}), + ?tp(ps_worker_call_failed, #{ + pid => Pid, + remote_pid => RemotePid, + sid => _SessionID, + reason => Err + }), Err end. @@ -88,26 +94,31 @@ buffer(Worker, STopic, Msg) -> %%% gen_server callbacks %%%=================================================================== -init(#{ remote_pid := RemotePid - , session_id := SessionID - , session_tab := SessionTab}) -> +init(#{ + remote_pid := RemotePid, + session_id := SessionID, + session_tab := SessionTab +}) -> process_flag(trap_exit, true), erlang:monitor(process, RemotePid), - ?tp(ps_worker_started, #{ remote_pid => RemotePid - , sid => SessionID }), - {ok, #state{ remote_pid = RemotePid - , session_id = SessionID - , session_tab = SessionTab - , messages = ets:new(?MODULE, [protected, ordered_set]) - , buffering = true - }}. + ?tp(ps_worker_started, #{ + remote_pid => RemotePid, + sid => SessionID + }), + {ok, #state{ + remote_pid = RemotePid, + session_id = SessionID, + session_tab = SessionTab, + messages = ets:new(?MODULE, [protected, ordered_set]), + buffering = true + }}. handle_call(pendings, _From, State) -> %% Debug API {reply, {State#state.messages, State#state.remote_pid}, State}; handle_call({resume_end, RemotePid}, _From, #state{remote_pid = RemotePid} = State) -> ?tp(ps_worker_resume_end, #{sid => State#state.session_id}), - {reply, {ok, State#state.messages}, State#state{ buffering = false }}; + {reply, {ok, State#state.messages}, State#state{buffering = false}}; handle_call({resume_end, _RemotePid}, _From, State) -> ?tp(ps_worker_resume_end_error, #{sid => State#state.session_id}), {reply, {error, wrong_remote_pid}, State}; @@ -119,26 +130,30 @@ handle_cast(_Request, State) -> {noreply, State}. handle_info({buffer, _STopic, _Msg}, State) when not State#state.buffering -> - ?tp(ps_worker_drop_deliver, #{ sid => State#state.session_id - , msg_id => emqx_message:id(_Msg) - }), + ?tp(ps_worker_drop_deliver, #{ + sid => State#state.session_id, + msg_id => emqx_message:id(_Msg) + }), {noreply, State}; handle_info({buffer, STopic, Msg}, State) when State#state.buffering -> - ?tp(ps_worker_deliver, #{ sid => State#state.session_id - , msg_id => emqx_message:id(Msg) - }), + ?tp(ps_worker_deliver, #{ + sid => State#state.session_id, + msg_id => emqx_message:id(Msg) + }), ets:insert(State#state.messages, {{Msg, STopic}}), {noreply, State}; handle_info({'DOWN', _, process, RemotePid, _Reason}, #state{remote_pid = RemotePid} = State) -> - ?tp(warning, ps_worker, #{ event => worker_remote_died - , sid => State#state.session_id - , msg => "Remote pid died. Exiting." }), + ?tp(warning, ps_worker, #{ + event => worker_remote_died, + sid => State#state.session_id, + msg => "Remote pid died. Exiting." + }), {stop, normal, State}; handle_info(_Info, State) -> {noreply, State}. terminate(shutdown, _State) -> - ?tp(ps_worker_shutdown, #{ sid => _State#state.session_id }), + ?tp(ps_worker_shutdown, #{sid => _State#state.session_id}), ok; terminate(_, _State) -> ok. diff --git a/apps/emqx/src/emqx_session_router_worker_sup.erl b/apps/emqx/src/emqx_session_router_worker_sup.erl index 14437bb97..e326ad3e2 100644 --- a/apps/emqx/src/emqx_session_router_worker_sup.erl +++ b/apps/emqx/src/emqx_session_router_worker_sup.erl @@ -18,22 +18,25 @@ -behaviour(supervisor). --export([ start_link/1 - ]). +-export([start_link/1]). --export([ abort_worker/1 - , start_worker/2 - ]). +-export([ + abort_worker/1, + start_worker/2 +]). --export([ init/1 - ]). +-export([init/1]). start_link(SessionTab) -> supervisor:start_link({local, ?MODULE}, ?MODULE, SessionTab). start_worker(SessionID, RemotePid) -> - supervisor:start_child(?MODULE, [#{ session_id => SessionID - , remote_pid => RemotePid}]). + supervisor:start_child(?MODULE, [ + #{ + session_id => SessionID, + remote_pid => RemotePid + } + ]). abort_worker(Pid) -> supervisor:terminate_child(?MODULE, Pid). @@ -44,14 +47,18 @@ abort_worker(Pid) -> init(SessionTab) -> %% Resume worker - Worker = #{id => session_router_worker, - start => {emqx_session_router_worker, start_link, [SessionTab]}, - restart => transient, - shutdown => 2000, - type => worker, - modules => [emqx_session_router_worker]}, - Spec = #{ strategy => simple_one_for_one - , intensity => 1 - , period => 5}, + Worker = #{ + id => session_router_worker, + start => {emqx_session_router_worker, start_link, [SessionTab]}, + restart => transient, + shutdown => 2000, + type => worker, + modules => [emqx_session_router_worker] + }, + Spec = #{ + strategy => simple_one_for_one, + intensity => 1, + period => 5 + }, {ok, {Spec, [Worker]}}. diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index b28fd97e6..3d1d0d439 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -23,7 +23,6 @@ -include("logger.hrl"). -include("types.hrl"). - %% Mnesia bootstrap -export([mnesia/1]). @@ -32,38 +31,43 @@ %% APIs -export([start_link/0]). --export([ subscribe/3 - , unsubscribe/3 - ]). +-export([ + subscribe/3, + unsubscribe/3 +]). -export([dispatch/3]). --export([ maybe_ack/1 - , maybe_nack_dropped/1 - , nack_no_connection/1 - , is_ack_required/1 - ]). +-export([ + maybe_ack/1, + maybe_nack_dropped/1, + nack_no_connection/1, + is_ack_required/1 +]). %% for testing -export([subscribers/2]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -export_type([strategy/0]). --type strategy() :: random - | round_robin - | sticky - | hash %% same as hash_clientid, backward compatible - | hash_clientid - | hash_topic. +-type strategy() :: + random + | round_robin + | sticky + %% same as hash_clientid, backward compatible + | hash + | hash_clientid + | hash_topic. -define(SERVER, ?MODULE). -define(TAB, emqx_shared_subscription). @@ -85,33 +89,34 @@ mnesia(boot) -> ok = mria:create_table(?TAB, [ - {type, bag}, - {rlog_shard, ?SHARED_SUB_SHARD}, - {storage, ram_copies}, - {record_name, emqx_shared_subscription}, - {attributes, record_info(fields, emqx_shared_subscription)}]). + {type, bag}, + {rlog_shard, ?SHARED_SUB_SHARD}, + {storage, ram_copies}, + {record_name, emqx_shared_subscription}, + {attributes, record_info(fields, emqx_shared_subscription)} + ]). %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). --spec(subscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok). +-spec subscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok. subscribe(Group, Topic, SubPid) when is_pid(SubPid) -> gen_server:call(?SERVER, {subscribe, Group, Topic, SubPid}). --spec(unsubscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok). +-spec unsubscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok. unsubscribe(Group, Topic, SubPid) when is_pid(SubPid) -> gen_server:call(?SERVER, {unsubscribe, Group, Topic, SubPid}). record(Group, Topic, SubPid) -> #emqx_shared_subscription{group = Group, topic = Topic, subpid = SubPid}. --spec(dispatch(emqx_types:group(), emqx_types:topic(), emqx_types:delivery()) - -> emqx_types:deliver_result()). +-spec dispatch(emqx_types:group(), emqx_types:topic(), emqx_types:delivery()) -> + emqx_types:deliver_result(). dispatch(Group, Topic, Delivery) -> dispatch(Group, Topic, Delivery, _FailedSubs = []). @@ -122,18 +127,19 @@ dispatch(Group, Topic, Delivery = #delivery{message = Msg}, FailedSubs) -> {error, no_subscribers}; {Type, SubPid} -> case do_dispatch(SubPid, Topic, Msg, Type) of - ok -> {ok, 1}; + ok -> + {ok, 1}; {error, _Reason} -> %% Failed to dispatch to this sub, try next. dispatch(Group, Topic, Delivery, [SubPid | FailedSubs]) end end. --spec(strategy() -> strategy()). +-spec strategy() -> strategy(). strategy() -> emqx:get_config([broker, shared_subscription_strategy]). --spec(ack_enabled() -> boolean()). +-spec ack_enabled() -> boolean(). ack_enabled() -> emqx:get_config([broker, shared_dispatch_ack_enabled]). @@ -167,10 +173,11 @@ dispatch_with_ack(SubPid, Topic, Msg) -> Ref = erlang:monitor(process, SubPid), Sender = self(), _ = erlang:send(SubPid, {deliver, Topic, with_ack_ref(Msg, {Sender, Ref})}), - Timeout = case Msg#message.qos of - ?QOS_1 -> timer:seconds(?SHARED_SUB_QOS1_DISPATCH_TIMEOUT_SECONDS); - ?QOS_2 -> infinity - end, + Timeout = + case Msg#message.qos of + ?QOS_1 -> timer:seconds(?SHARED_SUB_QOS1_DISPATCH_TIMEOUT_SECONDS); + ?QOS_2 -> infinity + end, try receive {Ref, ?ACK} -> @@ -180,9 +187,8 @@ dispatch_with_ack(SubPid, Topic, Msg) -> {error, Reason}; {'DOWN', Ref, process, SubPid, Reason} -> {error, Reason} - after - Timeout -> - {error, timeout} + after Timeout -> + {error, timeout} end after _ = erlang:demonitor(Ref, [flush]) @@ -197,11 +203,11 @@ without_ack_ref(Msg) -> get_ack_ref(Msg) -> emqx_message:get_header(shared_dispatch_ack, Msg, ?NO_ACK). --spec(is_ack_required(emqx_types:message()) -> boolean()). +-spec is_ack_required(emqx_types:message()) -> boolean(). is_ack_required(Msg) -> ?NO_ACK =/= get_ack_ref(Msg). %% @doc Negative ack dropped message due to inflight window or message queue being full. --spec(maybe_nack_dropped(emqx_types:message()) -> ok). +-spec maybe_nack_dropped(emqx_types:message()) -> ok. maybe_nack_dropped(Msg) -> case get_ack_ref(Msg) of ?NO_ACK -> ok; @@ -211,17 +217,17 @@ maybe_nack_dropped(Msg) -> %% @doc Negative ack message due to connection down. %% Assuming this function is always called when ack is required %% i.e is_ack_required returned true. --spec(nack_no_connection(emqx_types:message()) -> ok). +-spec nack_no_connection(emqx_types:message()) -> ok. nack_no_connection(Msg) -> {Sender, Ref} = get_ack_ref(Msg), nack(Sender, Ref, no_connection). --spec(nack(pid(), reference(), dropped | no_connection) -> ok). +-spec nack(pid(), reference(), dropped | no_connection) -> ok. nack(Sender, Ref, Reason) -> erlang:send(Sender, {Ref, ?NACK(Reason)}), ok. --spec(maybe_ack(emqx_types:message()) -> emqx_types:message()). +-spec maybe_ack(emqx_types:message()) -> emqx_types:message(). maybe_ack(Msg) -> case get_ack_ref(Msg) of ?NO_ACK -> @@ -262,7 +268,8 @@ do_pick(Strategy, ClientId, SourceTopic, Group, Topic, FailedSubs) -> {fresh, pick_subscriber(Group, Topic, Strategy, ClientId, SourceTopic, Subs)} end. -pick_subscriber(_Group, _Topic, _Strategy, _ClientId, _SourceTopic, [Sub]) -> Sub; +pick_subscriber(_Group, _Topic, _Strategy, _ClientId, _SourceTopic, [Sub]) -> + Sub; pick_subscriber(Group, Topic, Strategy, ClientId, SourceTopic, Subs) -> Nth = do_pick_subscriber(Group, Topic, Strategy, ClientId, SourceTopic, length(Subs)), lists:nth(Nth, Subs). @@ -277,10 +284,11 @@ do_pick_subscriber(_Group, _Topic, hash_clientid, ClientId, _SourceTopic, Count) do_pick_subscriber(_Group, _Topic, hash_topic, _ClientId, SourceTopic, Count) -> 1 + erlang:phash2(SourceTopic) rem Count; do_pick_subscriber(Group, Topic, round_robin, _ClientId, _SourceTopic, Count) -> - Rem = case erlang:get({shared_sub_round_robin, Group, Topic}) of - undefined -> rand:uniform(Count) - 1; - N -> (N + 1) rem Count - end, + Rem = + case erlang:get({shared_sub_round_robin, Group, Topic}) of + undefined -> rand:uniform(Count) - 1; + N -> (N + 1) rem Count + end, _ = erlang:put({shared_sub_round_robin, Group, Topic}, Rem), Rem + 1. @@ -301,26 +309,27 @@ init([]) -> init_monitors() -> mnesia:foldl( - fun(#emqx_shared_subscription{subpid = SubPid}, Mon) -> - emqx_pmon:monitor(SubPid, Mon) - end, emqx_pmon:new(), ?TAB). + fun(#emqx_shared_subscription{subpid = SubPid}, Mon) -> + emqx_pmon:monitor(SubPid, Mon) + end, + emqx_pmon:new(), + ?TAB + ). handle_call({subscribe, Group, Topic, SubPid}, _From, State = #state{pmon = PMon}) -> mria:dirty_write(?TAB, record(Group, Topic, SubPid)), case ets:member(?SHARED_SUBS, {Group, Topic}) of - true -> ok; + true -> ok; false -> ok = emqx_router:do_add_route(Topic, {Group, node()}) end, ok = maybe_insert_alive_tab(SubPid), true = ets:insert(?SHARED_SUBS, {{Group, Topic}, SubPid}), {reply, ok, update_stats(State#state{pmon = emqx_pmon:monitor(SubPid, PMon)})}; - handle_call({unsubscribe, Group, Topic, SubPid}, _From, State) -> mria:dirty_delete_object(?TAB, record(Group, Topic, SubPid)), true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}), delete_route_if_needed({Group, Topic}), {reply, ok, State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", req => Req}), {reply, ignored, State}. @@ -332,7 +341,6 @@ handle_cast(Msg, State) -> handle_info({mnesia_table_event, {write, NewRecord, _}}, State = #state{pmon = PMon}) -> #emqx_shared_subscription{subpid = SubPid} = NewRecord, {noreply, update_stats(State#state{pmon = emqx_pmon:monitor(SubPid, PMon)})}; - %% The subscriber may have subscribed multiple topics, so we need to keep monitoring the PID until %% it `unsubscribed` the last topic. %% The trick is we don't demonitor the subscriber here, and (after a long time) it will eventually @@ -343,12 +351,10 @@ handle_info({mnesia_table_event, {write, NewRecord, _}}, State = #state{pmon = P handle_info({mnesia_table_event, _Event}, State) -> {noreply, State}; - handle_info({'DOWN', _MRef, process, SubPid, Reason}, State = #state{pmon = PMon}) -> ?SLOG(info, #{msg => "shared_subscriber_down", sub_pid => SubPid, reason => Reason}), cleanup_down(SubPid), {noreply, update_stats(State#state{pmon = emqx_pmon:erase(SubPid, PMon)})}; - handle_info(_Info, State) -> {noreply, State}. @@ -364,7 +370,9 @@ code_change(_OldVsn, State, _Extra) -> %% keep track of alive remote pids maybe_insert_alive_tab(Pid) when ?IS_LOCAL_PID(Pid) -> ok; -maybe_insert_alive_tab(Pid) when is_pid(Pid) -> ets:insert(?ALIVE_SUBS, {Pid}), ok. +maybe_insert_alive_tab(Pid) when is_pid(Pid) -> + ets:insert(?ALIVE_SUBS, {Pid}), + ok. cleanup_down(SubPid) -> ?IS_LOCAL_PID(SubPid) orelse ets:delete(?ALIVE_SUBS, SubPid), @@ -373,13 +381,16 @@ cleanup_down(SubPid) -> ok = mria:dirty_delete_object(?TAB, Record), true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}), delete_route_if_needed({Group, Topic}) - end, mnesia:dirty_match_object(#emqx_shared_subscription{_ = '_', subpid = SubPid})). + end, + mnesia:dirty_match_object(#emqx_shared_subscription{_ = '_', subpid = SubPid}) + ). update_stats(State) -> - emqx_stats:setstat('subscriptions.shared.count', - 'subscriptions.shared.max', - ets:info(?TAB, size) - ), + emqx_stats:setstat( + 'subscriptions.shared.count', + 'subscriptions.shared.max', + ets:info(?TAB, size) + ), State. %% Return 'true' if the subscriber process is alive AND not in the failed list diff --git a/apps/emqx/src/emqx_stats.erl b/apps/emqx/src/emqx_stats.erl index 895bb19fd..61d6e77d7 100644 --- a/apps/emqx/src/emqx_stats.erl +++ b/apps/emqx/src/emqx_stats.erl @@ -23,123 +23,140 @@ -include("types.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). - %% APIs --export([ start_link/0 - , start_link/1 - , stop/0 - ]). +-export([ + start_link/0, + start_link/1, + stop/0 +]). %% Stats API. --export([ getstats/0 - , getstat/1 - , setstat/2 - , setstat/3 - , statsfun/1 - , statsfun/2 - ]). +-export([ + getstats/0, + getstat/1, + setstat/2, + setstat/3, + statsfun/1, + statsfun/2 +]). --export([ update_interval/2 - , update_interval/3 - , cancel_update/1 - ]). +-export([ + update_interval/2, + update_interval/3, + cancel_update/1 +]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -export_type([stats/0]). -record(update, {name, countdown, interval, func}). -record(state, { - timer :: maybe(reference()), - updates :: [#update{}], - tick_ms :: timeout() - }). + timer :: maybe(reference()), + updates :: [#update{}], + tick_ms :: timeout() +}). --type(stats() :: list({atom(), non_neg_integer()})). +-type stats() :: list({atom(), non_neg_integer()}). %% Connection stats -define(CONNECTION_STATS, - [ 'connections.count' %% Count of Concurrent Connections - , 'connections.max' %% Maximum Number of Concurrent Connections - , 'live_connections.count' %% Count of connected clients - , 'live_connections.max' %% Maximum number of connected clients - ]). + %% Count of Concurrent Connections + [ + 'connections.count', + %% Maximum Number of Concurrent Connections + 'connections.max', + %% Count of connected clients + 'live_connections.count', + %% Maximum number of connected clients + 'live_connections.max' + ] +). %% Channel stats -define(CHANNEL_STATS, - ['channels.count', %% Count of Concurrent Channels - 'channels.max' %% Maximum Number of Concurrent Channels - ]). + %% Count of Concurrent Channels + [ + 'channels.count', + %% Maximum Number of Concurrent Channels + 'channels.max' + ] +). %% Session stats -define(SESSION_STATS, - ['sessions.count', %% Count of Concurrent Sessions - 'sessions.max' %% Maximum Number of Concurrent Sessions - ]). + %% Count of Concurrent Sessions + [ + 'sessions.count', + %% Maximum Number of Concurrent Sessions + 'sessions.max' + ] +). %% PubSub stats --define(PUBSUB_STATS, - ['topics.count', - 'topics.max', - 'suboptions.count', - 'suboptions.max', - 'subscribers.count', - 'subscribers.max', - 'subscriptions.count', - 'subscriptions.max', - 'subscriptions.shared.count', - 'subscriptions.shared.max' - ]). +-define(PUBSUB_STATS, [ + 'topics.count', + 'topics.max', + 'suboptions.count', + 'suboptions.max', + 'subscribers.count', + 'subscribers.max', + 'subscriptions.count', + 'subscriptions.max', + 'subscriptions.shared.count', + 'subscriptions.shared.max' +]). %% Route stats --define(ROUTE_STATS, - ['routes.count', - 'routes.max' - ]). +-define(ROUTE_STATS, [ + 'routes.count', + 'routes.max' +]). %% Retained stats --define(RETAINED_STATS, - ['retained.count', - 'retained.max' - ]). +-define(RETAINED_STATS, [ + 'retained.count', + 'retained.max' +]). -define(TAB, ?MODULE). -define(SERVER, ?MODULE). --type(opts() :: #{tick_ms := timeout()}). +-type opts() :: #{tick_ms := timeout()}. %% @doc Start stats server --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> start_link(#{tick_ms => timer:seconds(1)}). --spec(start_link(opts()) -> startlink_ret()). +-spec start_link(opts()) -> startlink_ret(). start_link(Opts) -> gen_server:start_link({local, ?SERVER}, ?MODULE, Opts, []). --spec(stop() -> ok). +-spec stop() -> ok. stop() -> gen_server:call(?SERVER, stop, infinity). %% @doc Generate stats fun. --spec(statsfun(Stat :: atom()) -> fun()). +-spec statsfun(Stat :: atom()) -> fun(). statsfun(Stat) -> fun(Val) -> setstat(Stat, Val) end. --spec(statsfun(Stat :: atom(), MaxStat :: atom()) -> fun()). +-spec statsfun(Stat :: atom(), MaxStat :: atom()) -> fun(). statsfun(Stat, MaxStat) -> fun(Val) -> setstat(Stat, MaxStat, Val) end. %% @doc Get all statistics. --spec(getstats() -> stats()). +-spec getstats() -> stats(). getstats() -> case ets:info(?TAB, name) of undefined -> []; @@ -147,7 +164,7 @@ getstats() -> end. %% @doc Get stats by name. --spec(getstat(atom()) -> maybe(non_neg_integer())). +-spec getstat(atom()) -> maybe(non_neg_integer()). getstat(Name) -> case ets:lookup(?TAB, Name) of [{Name, Val}] -> Val; @@ -155,25 +172,28 @@ getstat(Name) -> end. %% @doc Set stats --spec(setstat(Stat :: atom(), Val :: pos_integer()) -> boolean()). +-spec setstat(Stat :: atom(), Val :: pos_integer()) -> boolean(). setstat(Stat, Val) when is_integer(Val) -> safe_update_element(Stat, Val). %% @doc Set stats with max value. --spec(setstat(Stat :: atom(), MaxStat :: atom(), - Val :: pos_integer()) -> ok). +-spec setstat( + Stat :: atom(), + MaxStat :: atom(), + Val :: pos_integer() +) -> ok. setstat(Stat, MaxStat, Val) when is_integer(Val) -> cast({setstat, Stat, MaxStat, Val}). --spec(update_interval(atom(), fun()) -> ok). +-spec update_interval(atom(), fun()) -> ok. update_interval(Name, UpFun) -> update_interval(Name, 1, UpFun). --spec(update_interval(atom(), pos_integer(), fun()) -> ok). +-spec update_interval(atom(), pos_integer(), fun()) -> ok. update_interval(Name, Secs, UpFun) when is_integer(Secs), Secs >= 1 -> cast({update_interval, rec(Name, Secs, UpFun)}). --spec(cancel_update(atom()) -> ok). +-spec cancel_update(atom()) -> ok. cancel_update(Name) -> cast({cancel_update, Name}). @@ -188,13 +208,14 @@ cast(Msg) -> gen_server:cast(?SERVER, Msg). init(#{tick_ms := TickMs}) -> ok = emqx_tables:new(?TAB, [public, set, {write_concurrency, true}]), - Stats = lists:append([?CONNECTION_STATS, - ?CHANNEL_STATS, - ?SESSION_STATS, - ?PUBSUB_STATS, - ?ROUTE_STATS, - ?RETAINED_STATS - ]), + Stats = lists:append([ + ?CONNECTION_STATS, + ?CHANNEL_STATS, + ?SESSION_STATS, + ?PUBSUB_STATS, + ?ROUTE_STATS, + ?RETAINED_STATS + ]), true = ets:insert(?TAB, [{Name, 0} || Name <- Stats]), {ok, start_timer(#state{updates = [], tick_ms = TickMs}), hibernate}. @@ -203,7 +224,6 @@ start_timer(#state{tick_ms = Ms} = State) -> handle_call(stop, _From, State) -> {stop, normal, ok, State}; - handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), {reply, ignored, State}. @@ -212,59 +232,77 @@ handle_cast({setstat, Stat, MaxStat, Val}, State) -> try ets:lookup_element(?TAB, MaxStat, 2) of MaxVal when Val > MaxVal -> ets:update_element(?TAB, MaxStat, {2, Val}); - _ -> ok + _ -> + ok catch error:badarg -> ets:insert(?TAB, {MaxStat, Val}) end, safe_update_element(Stat, Val), - ?tp(emqx_stats_setstat, - #{ count_stat => Stat - , max_stat => MaxStat - , value => Val - }), + ?tp( + emqx_stats_setstat, + #{ + count_stat => Stat, + max_stat => MaxStat, + value => Val + } + ), {noreply, State}; - -handle_cast({update_interval, Update = #update{name = Name}}, - State = #state{updates = Updates}) -> - NState = case lists:keyfind(Name, #update.name, Updates) of - #update{} -> - ?SLOG(warning, #{msg => "duplicated_update", - name => Name - }), - State; - false -> State#state{updates = [Update | Updates]} - end, +handle_cast( + {update_interval, Update = #update{name = Name}}, + State = #state{updates = Updates} +) -> + NState = + case lists:keyfind(Name, #update.name, Updates) of + #update{} -> + ?SLOG(warning, #{ + msg => "duplicated_update", + name => Name + }), + State; + false -> + State#state{updates = [Update | Updates]} + end, {noreply, NState}; - handle_cast({cancel_update, Name}, State = #state{updates = Updates}) -> Updates1 = lists:keydelete(Name, #update.name, Updates), {noreply, State#state{updates = Updates1}}; - handle_cast(Msg, State) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, State}. handle_info({timeout, TRef, tick}, State = #state{timer = TRef, updates = Updates}) -> Updates1 = lists:foldl( - fun(Update = #update{name = Name, countdown = C, interval = I, - func = UpFun}, Acc) when C =< 0 -> - try UpFun() - catch - Error : Reason : Stacktrace -> - ?SLOG(error, #{msg => "update_name_failed", - name => Name, - exception => Error, - reason => Reason, - stacktrace => Stacktrace - }) - end, - [Update#update{countdown = I} | Acc]; - (Update = #update{countdown = C}, Acc) -> - [Update#update{countdown = C - 1} | Acc] - end, [], Updates), + fun + ( + Update = #update{ + name = Name, + countdown = C, + interval = I, + func = UpFun + }, + Acc + ) when C =< 0 -> + try + UpFun() + catch + Error:Reason:Stacktrace -> + ?SLOG(error, #{ + msg => "update_name_failed", + name => Name, + exception => Error, + reason => Reason, + stacktrace => Stacktrace + }) + end, + [Update#update{countdown = I} | Acc]; + (Update = #update{countdown = C}, Acc) -> + [Update#update{countdown = C - 1} | Acc] + end, + [], + Updates + ), {noreply, start_timer(State#state{updates = Updates1}), hibernate}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -283,7 +321,8 @@ safe_update_element(Key, Val) -> try ets:update_element(?TAB, Key, {2, Val}) of false -> ets:insert_new(?TAB, {Key, Val}); - true -> true + true -> + true catch error:badarg -> ?SLOG(warning, #{ diff --git a/apps/emqx/src/emqx_sup.erl b/apps/emqx/src/emqx_sup.erl index 8417a735e..c6b802ae7 100644 --- a/apps/emqx/src/emqx_sup.erl +++ b/apps/emqx/src/emqx_sup.erl @@ -20,17 +20,19 @@ -include("types.hrl"). --export([ start_link/0 - , start_child/1 - , start_child/2 - , stop_child/1 - ]). +-export([ + start_link/0, + start_child/1, + start_child/2, + stop_child/1 +]). -export([init/1]). --type(startchild_ret() :: {ok, supervisor:child()} - | {ok, supervisor:child(), term()} - | {error, term()}). +-type startchild_ret() :: + {ok, supervisor:child()} + | {ok, supervisor:child(), term()} + | {error, term()}. -define(SUP, ?MODULE). @@ -38,19 +40,19 @@ %% API %%-------------------------------------------------------------------- --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> supervisor:start_link({local, ?SUP}, ?MODULE, []). --spec(start_child(supervisor:child_spec()) -> startchild_ret()). +-spec start_child(supervisor:child_spec()) -> startchild_ret(). start_child(ChildSpec) when is_map(ChildSpec) -> supervisor:start_child(?SUP, ChildSpec). --spec(start_child(module(), worker | supervisor) -> startchild_ret()). +-spec start_child(module(), worker | supervisor) -> startchild_ret(). start_child(Mod, Type) -> start_child(child_spec(Mod, Type)). --spec(stop_child(supervisor:child_id()) -> ok | {error, term()}). +-spec stop_child(supervisor:child_id()) -> ok | {error, term()}. stop_child(ChildId) -> case supervisor:terminate_child(?SUP, ChildId) of ok -> supervisor:delete_child(?SUP, ChildId); @@ -69,16 +71,18 @@ init([]) -> CMSup = child_spec(emqx_cm_sup, supervisor), SysSup = child_spec(emqx_sys_sup, supervisor), Limiter = child_spec(emqx_limiter_sup, supervisor), - Children = [KernelSup] ++ - [SessionSup || emqx_persistent_session:is_store_enabled()] ++ - [RouterSup || emqx_boot:is_enabled(router)] ++ - [BrokerSup || emqx_boot:is_enabled(broker)] ++ - [CMSup || emqx_boot:is_enabled(broker)] ++ - [SysSup, Limiter], - SupFlags = #{strategy => one_for_all, - intensity => 0, - period => 1 - }, + Children = + [KernelSup] ++ + [SessionSup || emqx_persistent_session:is_store_enabled()] ++ + [RouterSup || emqx_boot:is_enabled(router)] ++ + [BrokerSup || emqx_boot:is_enabled(broker)] ++ + [CMSup || emqx_boot:is_enabled(broker)] ++ + [SysSup, Limiter], + SupFlags = #{ + strategy => one_for_all, + intensity => 0, + period => 1 + }, {ok, {SupFlags, Children}}. %%-------------------------------------------------------------------- @@ -86,20 +90,20 @@ init([]) -> %%-------------------------------------------------------------------- child_spec(Mod, supervisor) -> - #{id => Mod, - start => {Mod, start_link, []}, - restart => permanent, - shutdown => infinity, - type => supervisor, - modules => [Mod] - }; - + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => permanent, + shutdown => infinity, + type => supervisor, + modules => [Mod] + }; child_spec(Mod, worker) -> - #{id => Mod, - start => {Mod, start_link, []}, - restart => permanent, - shutdown => 15000, - type => worker, - modules => [Mod] - }. - + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => permanent, + shutdown => 15000, + type => worker, + modules => [Mod] + }. diff --git a/apps/emqx/src/emqx_sys.erl b/apps/emqx/src/emqx_sys.erl index 2deb250d8..9fa116313 100644 --- a/apps/emqx/src/emqx_sys.erl +++ b/apps/emqx/src/emqx_sys.erl @@ -22,32 +22,35 @@ -include("types.hrl"). -include("logger.hrl"). +-export([ + start_link/0, + stop/0 +]). --export([ start_link/0 - , stop/0 - ]). - --export([ version/0 - , uptime/0 - , datetime/0 - , sysdescr/0 - ]). +-export([ + version/0, + uptime/0, + datetime/0, + sysdescr/0 +]). -export([info/0]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2 +]). --export([ on_client_connected/2 - , on_client_disconnected/3 - , on_client_subscribed/3 - , on_client_unsubscribed/3 - ]). +-export([ + on_client_connected/2, + on_client_disconnected/3, + on_client_subscribed/3, + on_client_unsubscribed/3 +]). -ifdef(TEST). -compile(export_all). @@ -57,27 +60,33 @@ -import(emqx_topic, [systop/1]). -import(emqx_misc, [start_timer/2]). --record(state, - {heartbeat :: maybe(reference()) - , ticker :: maybe(reference()) - , sysdescr :: binary() - }). +-record(state, { + heartbeat :: maybe(reference()), + ticker :: maybe(reference()), + sysdescr :: binary() +}). -define(APP, emqx). -define(SYS, ?MODULE). -define(INFO_KEYS, - [ version % Broker version - , uptime % Broker uptime - , datetime % Broker local datetime - , sysdescr % Broker description - ]). + % Broker version + [ + version, + % Broker uptime + uptime, + % Broker local datetime + datetime, + % Broker description + sysdescr + ] +). %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- --spec(start_link() -> {ok, pid()} | ignore | {error, any()}). +-spec start_link() -> {ok, pid()} | ignore | {error, any()}. start_link() -> gen_server:start_link({local, ?SYS}, ?MODULE, [], []). @@ -85,26 +94,28 @@ stop() -> gen_server:stop(?SYS). %% @doc Get sys version --spec(version() -> string()). +-spec version() -> string(). version() -> emqx_app:get_release(). %% @doc Get sys description --spec(sysdescr() -> string()). +-spec sysdescr() -> string(). sysdescr() -> emqx_app:get_description(). %% @doc Get sys uptime --spec(uptime() -> Milliseconds :: integer()). +-spec uptime() -> Milliseconds :: integer(). uptime() -> {TotalWallClock, _} = erlang:statistics(wall_clock), TotalWallClock. %% @doc Get sys datetime --spec(datetime() -> string()). +-spec datetime() -> string(). datetime() -> {{Y, M, D}, {H, MM, S}} = calendar:local_time(), lists:flatten( io_lib:format( - "~4..0w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w", [Y, M, D, H, MM, S])). + "~4..0w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w", [Y, M, D, H, MM, S] + ) + ). sys_interval() -> emqx:get_config([sys_topics, sys_msg_interval]). @@ -116,19 +127,21 @@ sys_event_messages() -> emqx:get_config([sys_topics, sys_event_messages]). %% @doc Get sys info --spec(info() -> list(tuple())). +-spec info() -> list(tuple()). info() -> - [{version, version()}, - {sysdescr, sysdescr()}, - {uptime, uptime()}, - {datetime, datetime()}]. + [ + {version, version()}, + {sysdescr, sysdescr()}, + {uptime, uptime()}, + {datetime, datetime()} + ]. %%-------------------------------------------------------------------- %% gen_server callbacks %%-------------------------------------------------------------------- init([]) -> - State = #state{sysdescr = iolist_to_binary(sysdescr())}, + State = #state{sysdescr = iolist_to_binary(sysdescr())}, load_event_hooks(), {ok, heartbeat(tick(State))}. @@ -139,11 +152,15 @@ tick(State) -> load_event_hooks() -> lists:foreach( - fun({_, false}) -> ok; - ({K, true}) -> - {HookPoint, Fun} = hook_and_fun(K), - emqx_hooks:put(HookPoint, {?MODULE, Fun, []}) - end, maps:to_list(sys_event_messages())). + fun + ({_, false}) -> + ok; + ({K, true}) -> + {HookPoint, Fun} = hook_and_fun(K), + emqx_hooks:put(HookPoint, {?MODULE, Fun, []}) + end, + maps:to_list(sys_event_messages()) + ). handle_call(Req, _From, State) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), @@ -157,7 +174,6 @@ handle_info({timeout, TRef, heartbeat}, State = #state{heartbeat = TRef}) -> publish_any(uptime, integer_to_binary(uptime())), publish_any(datetime, iolist_to_binary(datetime())), {noreply, heartbeat(State)}; - handle_info({timeout, TRef, tick}, State = #state{ticker = TRef, sysdescr = Descr}) -> publish_any(version, version()), publish_any(sysdescr, Descr), @@ -165,7 +181,6 @@ handle_info({timeout, TRef, tick}, State = #state{ticker = TRef, sysdescr = Desc publish_any(stats, emqx_stats:getstats()), publish_any(metrics, emqx_metrics:all()), {noreply, tick(State), hibernate}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -175,10 +190,13 @@ terminate(_Reason, #state{heartbeat = TRef1, ticker = TRef2}) -> lists:foreach(fun emqx_misc:cancel_timer/1, [TRef1, TRef2]). unload_event_hooks() -> - lists:foreach(fun({K, _}) -> - {HookPoint, Fun} = hook_and_fun(K), - emqx_hooks:del(HookPoint, {?MODULE, Fun}) - end, maps:to_list(sys_event_messages())). + lists:foreach( + fun({K, _}) -> + {HookPoint, Fun} = hook_and_fun(K), + emqx_hooks:del(HookPoint, {?MODULE, Fun}) + end, + maps:to_list(sys_event_messages()) + ). %%-------------------------------------------------------------------- %% hook callbacks @@ -187,20 +205,22 @@ unload_event_hooks() -> on_client_connected(ClientInfo, ConnInfo) -> Payload0 = common_infos(ClientInfo, ConnInfo), Payload = Payload0#{ - keepalive => maps:get(keepalive, ConnInfo, 0), - clean_start => maps:get(clean_start, ConnInfo, true), - expiry_interval => maps:get(expiry_interval, ConnInfo, 0) - }, + keepalive => maps:get(keepalive, ConnInfo, 0), + clean_start => maps:get(clean_start, ConnInfo, true), + expiry_interval => maps:get(expiry_interval, ConnInfo, 0) + }, publish(connected, Payload). -on_client_disconnected(ClientInfo, Reason, - ConnInfo = #{disconnected_at := DisconnectedAt}) -> - +on_client_disconnected( + ClientInfo, + Reason, + ConnInfo = #{disconnected_at := DisconnectedAt} +) -> Payload0 = common_infos(ClientInfo, ConnInfo), Payload = Payload0#{ - reason => reason(Reason), - disconnected_at => DisconnectedAt - }, + reason => reason(Reason), + disconnected_at => DisconnectedAt + }, publish(disconnected, Payload). -compile({inline, [reason/1]}). @@ -209,29 +229,41 @@ reason({shutdown, Reason}) when is_atom(Reason) -> Reason; reason({Error, _}) when is_atom(Error) -> Error; reason(_) -> internal_error. -on_client_subscribed(_ClientInfo = #{clientid := ClientId, - username := Username, - protocol := Protocol}, - Topic, SubOpts) -> - Payload = #{clientid => ClientId, - username => Username, - protocol => Protocol, - topic => Topic, - subopts => SubOpts, - ts => erlang:system_time(millisecond) - }, +on_client_subscribed( + _ClientInfo = #{ + clientid := ClientId, + username := Username, + protocol := Protocol + }, + Topic, + SubOpts +) -> + Payload = #{ + clientid => ClientId, + username => Username, + protocol => Protocol, + topic => Topic, + subopts => SubOpts, + ts => erlang:system_time(millisecond) + }, publish(subscribed, Payload). -on_client_unsubscribed(_ClientInfo = #{clientid := ClientId, - username := Username, - protocol := Protocol}, - Topic, _SubOpts) -> - Payload = #{clientid => ClientId, - username => Username, - protocol => Protocol, - topic => Topic, - ts => erlang:system_time(millisecond) - }, +on_client_unsubscribed( + _ClientInfo = #{ + clientid := ClientId, + username := Username, + protocol := Protocol + }, + Topic, + _SubOpts +) -> + Payload = #{ + clientid => ClientId, + username => Username, + protocol => Protocol, + topic => Topic, + ts => erlang:system_time(millisecond) + }, publish(unsubscribed, Payload). %%-------------------------------------------------------------------- @@ -263,13 +295,21 @@ publish(brokers, Nodes) -> Payload = string:join([atom_to_list(N) || N <- Nodes], ","), safe_publish(<<"$SYS/brokers">>, #{retain => true}, Payload); publish(stats, Stats) -> - [safe_publish(systop(lists:concat(['stats/', Stat])), integer_to_binary(Val)) - || {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val)]; + [ + safe_publish(systop(lists:concat(['stats/', Stat])), integer_to_binary(Val)) + || {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val) + ]; publish(metrics, Metrics) -> - [safe_publish(systop(metric_topic(Name)), integer_to_binary(Val)) - || {Name, Val} <- Metrics, is_atom(Name), is_integer(Val)]; -publish(Event, Payload) when Event == connected; Event == disconnected; - Event == subscribed; Event == unsubscribed -> + [ + safe_publish(systop(metric_topic(Name)), integer_to_binary(Val)) + || {Name, Val} <- Metrics, is_atom(Name), is_integer(Val) + ]; +publish(Event, Payload) when + Event == connected; + Event == disconnected; + Event == subscribed; + Event == unsubscribed +-> Topic = event_topic(Event, Payload), safe_publish(Topic, emqx_json:encode(Payload)). @@ -280,42 +320,55 @@ safe_publish(Topic, Payload) -> safe_publish(Topic, #{}, Payload). safe_publish(Topic, Flags, Payload) -> emqx_broker:safe_publish( - emqx_message:set_flags( - maps:merge(#{sys => true}, Flags), - emqx_message:make(?SYS, Topic, iolist_to_binary(Payload)))). + emqx_message:set_flags( + maps:merge(#{sys => true}, Flags), + emqx_message:make(?SYS, Topic, iolist_to_binary(Payload)) + ) + ). common_infos( - _ClientInfo = #{clientid := ClientId, - username := Username, - peerhost := PeerHost, - sockport := SockPort, - protocol := Protocol - }, - _ConnInfo = #{proto_name := ProtoName, - proto_ver := ProtoVer, - connected_at := ConnectedAt - }) -> - #{clientid => ClientId, - username => Username, - ipaddress => ntoa(PeerHost), - sockport => SockPort, - protocol => Protocol, - proto_name => ProtoName, - proto_ver => ProtoVer, - connected_at => ConnectedAt, - ts => erlang:system_time(millisecond) - }. + _ClientInfo = #{ + clientid := ClientId, + username := Username, + peerhost := PeerHost, + sockport := SockPort, + protocol := Protocol + }, + _ConnInfo = #{ + proto_name := ProtoName, + proto_ver := ProtoVer, + connected_at := ConnectedAt + } +) -> + #{ + clientid => ClientId, + username => Username, + ipaddress => ntoa(PeerHost), + sockport => SockPort, + protocol => Protocol, + proto_name => ProtoName, + proto_ver => ProtoVer, + connected_at => ConnectedAt, + ts => erlang:system_time(millisecond) + }. ntoa(undefined) -> undefined; -ntoa({IpAddr, Port}) -> - iolist_to_binary([inet:ntoa(IpAddr), ":", integer_to_list(Port)]); -ntoa(IpAddr) -> - iolist_to_binary(inet:ntoa(IpAddr)). +ntoa({IpAddr, Port}) -> iolist_to_binary([inet:ntoa(IpAddr), ":", integer_to_list(Port)]); +ntoa(IpAddr) -> iolist_to_binary(inet:ntoa(IpAddr)). event_topic(Event, #{clientid := ClientId, protocol := mqtt}) -> iolist_to_binary( - [systop("clients"), "/", ClientId, "/", atom_to_binary(Event)]); + [systop("clients"), "/", ClientId, "/", atom_to_binary(Event)] + ); event_topic(Event, #{clientid := ClientId, protocol := GwName}) -> iolist_to_binary( - [systop("gateway"), "/", atom_to_binary(GwName), - "/clients/", ClientId, "/", atom_to_binary(Event)]). + [ + systop("gateway"), + "/", + atom_to_binary(GwName), + "/clients/", + ClientId, + "/", + atom_to_binary(Event) + ] + ). diff --git a/apps/emqx/src/emqx_sys_mon.erl b/apps/emqx/src/emqx_sys_mon.erl index 1df9ec604..697ced06e 100644 --- a/apps/emqx/src/emqx_sys_mon.erl +++ b/apps/emqx/src/emqx_sys_mon.erl @@ -21,25 +21,25 @@ -include("types.hrl"). -include("logger.hrl"). - -export([start_link/0]). %% compress unused warning -export([procinfo/1]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -define(SYSMON, ?MODULE). %% @doc Start the system monitor. --spec(start_link() -> startlink_ret()). +-spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?SYSMON}, ?MODULE, [], []). @@ -63,23 +63,23 @@ sysm_opts() -> sysm_opts(maps:to_list(emqx:get_config([sysmon, vm])), []). sysm_opts([], Acc) -> Acc; -sysm_opts([{_, disabled}|Opts], Acc) -> +sysm_opts([{_, disabled} | Opts], Acc) -> sysm_opts(Opts, Acc); -sysm_opts([{long_gc, Ms}|Opts], Acc) when is_integer(Ms) -> - sysm_opts(Opts, [{long_gc, Ms}|Acc]); -sysm_opts([{long_schedule, Ms}|Opts], Acc) when is_integer(Ms) -> - sysm_opts(Opts, [{long_schedule, Ms}|Acc]); -sysm_opts([{large_heap, Size}|Opts], Acc) when is_integer(Size) -> - sysm_opts(Opts, [{large_heap, Size}|Acc]); -sysm_opts([{busy_port, true}|Opts], Acc) -> - sysm_opts(Opts, [busy_port|Acc]); -sysm_opts([{busy_port, false}|Opts], Acc) -> +sysm_opts([{long_gc, Ms} | Opts], Acc) when is_integer(Ms) -> + sysm_opts(Opts, [{long_gc, Ms} | Acc]); +sysm_opts([{long_schedule, Ms} | Opts], Acc) when is_integer(Ms) -> + sysm_opts(Opts, [{long_schedule, Ms} | Acc]); +sysm_opts([{large_heap, Size} | Opts], Acc) when is_integer(Size) -> + sysm_opts(Opts, [{large_heap, Size} | Acc]); +sysm_opts([{busy_port, true} | Opts], Acc) -> + sysm_opts(Opts, [busy_port | Acc]); +sysm_opts([{busy_port, false} | Opts], Acc) -> sysm_opts(Opts, Acc); -sysm_opts([{busy_dist_port, true}|Opts], Acc) -> - sysm_opts(Opts, [busy_dist_port|Acc]); -sysm_opts([{busy_dist_port, false}|Opts], Acc) -> +sysm_opts([{busy_dist_port, true} | Opts], Acc) -> + sysm_opts(Opts, [busy_dist_port | Acc]); +sysm_opts([{busy_dist_port, false} | Opts], Acc) -> sysm_opts(Opts, Acc); -sysm_opts([_Opt|Opts], Acc) -> +sysm_opts([_Opt | Opts], Acc) -> sysm_opts(Opts, Acc). handle_call(Req, _From, State) -> @@ -91,70 +91,91 @@ handle_cast(Msg, State) -> {noreply, State}. handle_info({monitor, Pid, long_gc, Info}, State) -> - suppress({long_gc, Pid}, - fun() -> - WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]), - ?SLOG(warning, #{msg => long_gc, - info => Info, - porcinfo => procinfo(Pid) - }), - safe_publish(long_gc, WarnMsg) - end, State); - + suppress( + {long_gc, Pid}, + fun() -> + WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]), + ?SLOG(warning, #{ + msg => long_gc, + info => Info, + porcinfo => procinfo(Pid) + }), + safe_publish(long_gc, WarnMsg) + end, + State + ); handle_info({monitor, Pid, long_schedule, Info}, State) when is_pid(Pid) -> - suppress({long_schedule, Pid}, - fun() -> - WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]), - ?SLOG(warning, #{msg => long_schedule, - info => Info, - procinfo => procinfo(Pid)}), - safe_publish(long_schedule, WarnMsg) - end, State); - + suppress( + {long_schedule, Pid}, + fun() -> + WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]), + ?SLOG(warning, #{ + msg => long_schedule, + info => Info, + procinfo => procinfo(Pid) + }), + safe_publish(long_schedule, WarnMsg) + end, + State + ); handle_info({monitor, Port, long_schedule, Info}, State) when is_port(Port) -> - suppress({long_schedule, Port}, - fun() -> - WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]), - ?SLOG(warning, #{msg => long_schedule, - info => Info, - portinfo => portinfo(Port)}), - safe_publish(long_schedule, WarnMsg) - end, State); - + suppress( + {long_schedule, Port}, + fun() -> + WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]), + ?SLOG(warning, #{ + msg => long_schedule, + info => Info, + portinfo => portinfo(Port) + }), + safe_publish(long_schedule, WarnMsg) + end, + State + ); handle_info({monitor, Pid, large_heap, Info}, State) -> - suppress({large_heap, Pid}, - fun() -> - WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]), - ?SLOG(warning, #{msg => large_heap, - info => Info, - procinfo => procinfo(Pid)}), - safe_publish(large_heap, WarnMsg) - end, State); - + suppress( + {large_heap, Pid}, + fun() -> + WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]), + ?SLOG(warning, #{ + msg => large_heap, + info => Info, + procinfo => procinfo(Pid) + }), + safe_publish(large_heap, WarnMsg) + end, + State + ); handle_info({monitor, SusPid, busy_port, Port}, State) -> - suppress({busy_port, Port}, - fun() -> - WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]), - ?SLOG(warning, #{msg => busy_port, - portinfo => portinfo(Port), - procinfo => procinfo(SusPid) - }), - safe_publish(busy_port, WarnMsg) - end, State); - + suppress( + {busy_port, Port}, + fun() -> + WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]), + ?SLOG(warning, #{ + msg => busy_port, + portinfo => portinfo(Port), + procinfo => procinfo(SusPid) + }), + safe_publish(busy_port, WarnMsg) + end, + State + ); handle_info({monitor, SusPid, busy_dist_port, Port}, State) -> - suppress({busy_dist_port, Port}, - fun() -> - WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]), - ?SLOG(warning, #{msg => busy_dist_port, - portinfo => portinfo(Port), - procinfo => procinfo(SusPid)}), - safe_publish(busy_dist_port, WarnMsg) - end, State); - + suppress( + {busy_dist_port, Port}, + fun() -> + WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]), + ?SLOG(warning, #{ + msg => busy_dist_port, + portinfo => portinfo(Port), + procinfo => procinfo(SusPid) + }), + safe_publish(busy_dist_port, WarnMsg) + end, + State + ); handle_info({timeout, _Ref, reset}, State) -> {noreply, State#{events := []}, hibernate}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -182,13 +203,13 @@ suppress(Key, SuccFun, State = #{events := Events}) -> {noreply, State}; false -> _ = SuccFun(), - {noreply, State#{events := [Key|Events]}} + {noreply, State#{events := [Key | Events]}} end. procinfo(Pid) -> [{pid, Pid} | procinfo_l(emqx_vm:get_process_gc_info(Pid))] ++ - get_proc_lib_initial_call(Pid) ++ - procinfo_l(emqx_vm:get_process_info(Pid)). + get_proc_lib_initial_call(Pid) ++ + procinfo_l(emqx_vm:get_process_info(Pid)). procinfo_l(undefined) -> []; procinfo_l(List) -> List. diff --git a/apps/emqx/src/emqx_sys_sup.erl b/apps/emqx/src/emqx_sys_sup.erl index e0054c162..fb50660f8 100644 --- a/apps/emqx/src/emqx_sys_sup.erl +++ b/apps/emqx/src/emqx_sys_sup.erl @@ -26,11 +26,13 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - Childs = [child_spec(emqx_sys), - child_spec(emqx_alarm), - child_spec(emqx_sys_mon), - child_spec(emqx_os_mon), - child_spec(emqx_vm_mon)], + Childs = [ + child_spec(emqx_sys), + child_spec(emqx_alarm), + child_spec(emqx_sys_mon), + child_spec(emqx_os_mon), + child_spec(emqx_vm_mon) + ], {ok, {{one_for_one, 10, 100}, Childs}}. %%-------------------------------------------------------------------- @@ -41,10 +43,11 @@ child_spec(Mod) -> child_spec(Mod, []). child_spec(Mod, Args) -> - #{id => Mod, - start => {Mod, start_link, Args}, - restart => permanent, - shutdown => 5000, - type => worker, - modules => [Mod] - }. + #{ + id => Mod, + start => {Mod, start_link, Args}, + restart => permanent, + shutdown => 5000, + type => worker, + modules => [Mod] + }. diff --git a/apps/emqx/src/emqx_tables.erl b/apps/emqx/src/emqx_tables.erl index b90aa06d3..d41d93f63 100644 --- a/apps/emqx/src/emqx_tables.erl +++ b/apps/emqx/src/emqx_tables.erl @@ -16,50 +16,54 @@ -module(emqx_tables). --export([ new/1 - , new/2 - ]). +-export([ + new/1, + new/2 +]). --export([ lookup_value/2 - , lookup_value/3 - ]). +-export([ + lookup_value/2, + lookup_value/3 +]). -export([delete/1]). %% Create an ets table. --spec(new(atom()) -> ok). +-spec new(atom()) -> ok. new(Tab) -> new(Tab, []). %% Create a named_table ets. --spec(new(atom(), list()) -> ok). +-spec new(atom(), list()) -> ok. new(Tab, Opts) -> case ets:info(Tab, name) of undefined -> _ = ets:new(Tab, lists:usort([named_table | Opts])), ok; - Tab -> ok + Tab -> + ok end. %% KV lookup --spec(lookup_value(ets:tab(), term()) -> any()). +-spec lookup_value(ets:tab(), term()) -> any(). lookup_value(Tab, Key) -> lookup_value(Tab, Key, undefined). --spec(lookup_value(ets:tab(), term(), any()) -> any()). +-spec lookup_value(ets:tab(), term(), any()) -> any(). lookup_value(Tab, Key, Def) -> - try ets:lookup_element(Tab, Key, 2) + try + ets:lookup_element(Tab, Key, 2) catch error:badarg -> Def end. %% Delete the ets table. --spec(delete(ets:tab()) -> ok). +-spec delete(ets:tab()) -> ok. delete(Tab) -> case ets:info(Tab, name) of - undefined -> ok; + undefined -> + ok; Tab -> ets:delete(Tab), ok end. - diff --git a/apps/emqx/src/emqx_tls_lib.erl b/apps/emqx/src/emqx_tls_lib.erl index 920508a85..f6fba3bfc 100644 --- a/apps/emqx/src/emqx_tls_lib.erl +++ b/apps/emqx/src/emqx_tls_lib.erl @@ -17,20 +17,22 @@ -module(emqx_tls_lib). %% version & cipher suites --export([ default_versions/0 - , integral_versions/1 - , default_ciphers/0 - , selected_ciphers/1 - , integral_ciphers/2 - , drop_tls13_for_old_otp/1 - , all_ciphers/0 - ]). +-export([ + default_versions/0, + integral_versions/1, + default_ciphers/0, + selected_ciphers/1, + integral_ciphers/2, + drop_tls13_for_old_otp/1, + all_ciphers/0 +]). %% SSL files --export([ ensure_ssl_files/2 - , delete_ssl_files/3 - , file_content_as_options/1 - ]). +-export([ + ensure_ssl_files/2, + delete_ssl_files/3, + file_content_as_options/1 +]). -include("logger.hrl"). @@ -52,7 +54,7 @@ default_versions() -> available_versions(). %% raise an error exception if non of them are available. %% The input list can be a string/binary of comma separated versions. -spec integral_versions(undefined | string() | binary() | [ssl:tls_version()]) -> - [ssl:tls_version()]. + [ssl:tls_version()]. integral_versions(undefined) -> integral_versions(default_versions()); integral_versions([]) -> @@ -66,10 +68,12 @@ integral_versions(Desired) when is_binary(Desired) -> integral_versions(Desired) -> Available = available_versions(), case lists:filter(fun(V) -> lists:member(V, Available) end, Desired) of - [] -> erlang:error(#{ reason => no_available_tls_version - , desired => Desired - , available => Available - }); + [] -> + erlang:error(#{ + reason => no_available_tls_version, + desired => Desired, + available => Available + }); Filtered -> Filtered end. @@ -89,7 +93,6 @@ all_ciphers(Versions) -> %% assert non-empty [_ | _] = dedup(lists:append([ssl:cipher_suites(all, V, openssl) || V <- Versions])). - %% @doc All Pre-selected TLS ciphers. default_ciphers() -> selected_ciphers(available_versions()). @@ -97,8 +100,12 @@ default_ciphers() -> %% @doc Pre-selected TLS ciphers for given versions.. selected_ciphers(Vsns) -> All = all_ciphers(Vsns), - dedup(lists:filter(fun(Cipher) -> lists:member(Cipher, All) end, - lists:flatmap(fun do_selected_ciphers/1, Vsns))). + dedup( + lists:filter( + fun(Cipher) -> lists:member(Cipher, All) end, + lists:flatmap(fun do_selected_ciphers/1, Vsns) + ) + ). do_selected_ciphers('tlsv1.3') -> case lists:member('tlsv1.3', proplists:get_value(available, ssl:versions())) of @@ -106,24 +113,49 @@ do_selected_ciphers('tlsv1.3') -> false -> [] end ++ do_selected_ciphers('tlsv1.2'); do_selected_ciphers(_) -> - [ "ECDHE-ECDSA-AES256-GCM-SHA384", - "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES256-SHA384", "ECDHE-RSA-AES256-SHA384", - "ECDH-ECDSA-AES256-GCM-SHA384", "ECDH-RSA-AES256-GCM-SHA384", - "ECDH-ECDSA-AES256-SHA384", "ECDH-RSA-AES256-SHA384", "DHE-DSS-AES256-GCM-SHA384", - "DHE-DSS-AES256-SHA256", "AES256-GCM-SHA384", "AES256-SHA256", - "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256", - "ECDHE-ECDSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDH-ECDSA-AES128-GCM-SHA256", - "ECDH-RSA-AES128-GCM-SHA256", "ECDH-ECDSA-AES128-SHA256", "ECDH-RSA-AES128-SHA256", - "DHE-DSS-AES128-GCM-SHA256", "DHE-DSS-AES128-SHA256", "AES128-GCM-SHA256", "AES128-SHA256", - "ECDHE-ECDSA-AES256-SHA", "ECDHE-RSA-AES256-SHA", "DHE-DSS-AES256-SHA", - "ECDH-ECDSA-AES256-SHA", "ECDH-RSA-AES256-SHA", "ECDHE-ECDSA-AES128-SHA", - "ECDHE-RSA-AES128-SHA", "DHE-DSS-AES128-SHA", "ECDH-ECDSA-AES128-SHA", - "ECDH-RSA-AES128-SHA", + [ + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDH-ECDSA-AES256-GCM-SHA384", + "ECDH-RSA-AES256-GCM-SHA384", + "ECDH-ECDSA-AES256-SHA384", + "ECDH-RSA-AES256-SHA384", + "DHE-DSS-AES256-GCM-SHA384", + "DHE-DSS-AES256-SHA256", + "AES256-GCM-SHA384", + "AES256-SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDH-ECDSA-AES128-GCM-SHA256", + "ECDH-RSA-AES128-GCM-SHA256", + "ECDH-ECDSA-AES128-SHA256", + "ECDH-RSA-AES128-SHA256", + "DHE-DSS-AES128-GCM-SHA256", + "DHE-DSS-AES128-SHA256", + "AES128-GCM-SHA256", + "AES128-SHA256", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-DSS-AES256-SHA", + "ECDH-ECDSA-AES256-SHA", + "ECDH-RSA-AES256-SHA", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "DHE-DSS-AES128-SHA", + "ECDH-ECDSA-AES128-SHA", + "ECDH-RSA-AES128-SHA", - %% psk - "RSA-PSK-AES256-GCM-SHA384","RSA-PSK-AES256-CBC-SHA384", - "RSA-PSK-AES128-GCM-SHA256","RSA-PSK-AES128-CBC-SHA256", - "RSA-PSK-AES256-CBC-SHA","RSA-PSK-AES128-CBC-SHA" + %% psk + "RSA-PSK-AES256-GCM-SHA384", + "RSA-PSK-AES256-CBC-SHA384", + "RSA-PSK-AES128-GCM-SHA256", + "RSA-PSK-AES128-CBC-SHA256", + "RSA-PSK-AES256-CBC-SHA", + "RSA-PSK-AES128-CBC-SHA" ]. %% @doc Ensure version & cipher-suites integrity. @@ -146,7 +178,7 @@ integral_ciphers(Versions, Ciphers) -> ensure_tls13_cipher(true, Ciphers) -> Tls13Ciphers = selected_ciphers(['tlsv1.3']), case lists:any(fun(C) -> lists:member(C, Tls13Ciphers) end, Ciphers) of - true -> Ciphers; + true -> Ciphers; false -> Tls13Ciphers ++ Ciphers end; ensure_tls13_cipher(false, Ciphers) -> @@ -172,7 +204,8 @@ dedup([H | T]) -> [H | dedup([I || I <- T, I =/= H])]. parse_versions(Versions) -> do_parse_versions(split_by_comma(Versions), []). -do_parse_versions([], Acc) -> lists:reverse(Acc); +do_parse_versions([], Acc) -> + lists:reverse(Acc); do_parse_versions([V | More], Acc) -> case parse_version(V) of unknown -> @@ -209,21 +242,22 @@ drop_tls13_for_old_otp(SslOpts) -> %% should return when running on otp 23. %% But we still have to hard-code them because tlsv1.3 on otp 22 is %% not trustworthy. --define(TLSV13_EXCLUSIVE_CIPHERS, [ "TLS_AES_256_GCM_SHA384" - , "TLS_AES_128_GCM_SHA256" - , "TLS_CHACHA20_POLY1305_SHA256" - , "TLS_AES_128_CCM_SHA256" - , "TLS_AES_128_CCM_8_SHA256" - ]). +-define(TLSV13_EXCLUSIVE_CIPHERS, [ + "TLS_AES_256_GCM_SHA384", + "TLS_AES_128_GCM_SHA256", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_AES_128_CCM_SHA256", + "TLS_AES_128_CCM_8_SHA256" +]). drop_tls13(SslOpts0) -> - SslOpts1 = case maps:find(versions, SslOpts0) of - error -> SslOpts0; - {ok, Vsns} -> SslOpts0#{versions => (Vsns -- ['tlsv1.3'])} - end, + SslOpts1 = + case maps:find(versions, SslOpts0) of + error -> SslOpts0; + {ok, Vsns} -> SslOpts0#{versions => (Vsns -- ['tlsv1.3'])} + end, case maps:find(ciphers, SslOpts1) of error -> SslOpts1; - {ok, Ciphers} -> - SslOpts1#{ciphers => Ciphers -- ?TLSV13_EXCLUSIVE_CIPHERS} + {ok, Ciphers} -> SslOpts1#{ciphers => Ciphers -- ?TLSV13_EXCLUSIVE_CIPHERS} end. %% @doc The input map is a HOCON decoded result of a struct defined as @@ -233,17 +267,19 @@ drop_tls13(SslOpts0) -> %% When PEM format key or certificate is given, it tries to to save them in the given %% sub-dir in emqx's data_dir, and replace saved file paths for SSL options. -spec ensure_ssl_files(file:name_all(), undefined | map()) -> - {ok, undefined | map()} | {error, map()}. + {ok, undefined | map()} | {error, map()}. ensure_ssl_files(Dir, Opts) -> ensure_ssl_files(Dir, Opts, _DryRun = false). -ensure_ssl_files(_Dir, undefined, _DryRun) -> {ok, undefined}; +ensure_ssl_files(_Dir, undefined, _DryRun) -> + {ok, undefined}; ensure_ssl_files(_Dir, #{<<"enable">> := False} = Opts, _DryRun) when ?IS_FALSE(False) -> {ok, Opts}; ensure_ssl_files(Dir, Opts, DryRun) -> ensure_ssl_files(Dir, Opts, ?SSL_FILE_OPT_NAMES, DryRun). -ensure_ssl_files(_Dir,Opts, [], _DryRun) -> {ok, Opts}; +ensure_ssl_files(_Dir, Opts, [], _DryRun) -> + {ok, Opts}; ensure_ssl_files(Dir, Opts, [Key | Keys], DryRun) -> case ensure_ssl_file(Dir, Key, Opts, maps:get(Key, Opts, undefined), DryRun) of {ok, NewOpts} -> @@ -258,18 +294,25 @@ delete_ssl_files(Dir, NewOpts0, OldOpts0) -> DryRun = true, {ok, NewOpts} = ensure_ssl_files(Dir, NewOpts0, DryRun), {ok, OldOpts} = ensure_ssl_files(Dir, OldOpts0, DryRun), - Get = fun(_K, undefined) -> undefined; - (K, Opts) -> maps:get(K, Opts, undefined) - end, - lists:foreach(fun(Key) -> delete_old_file(Get(Key, NewOpts), Get(Key, OldOpts)) end, - ?SSL_FILE_OPT_NAMES). + Get = fun + (_K, undefined) -> undefined; + (K, Opts) -> maps:get(K, Opts, undefined) + end, + lists:foreach( + fun(Key) -> delete_old_file(Get(Key, NewOpts), Get(Key, OldOpts)) end, + ?SSL_FILE_OPT_NAMES + ). delete_old_file(New, Old) when New =:= Old -> ok; -delete_old_file(_New, _Old = undefined) -> ok; +delete_old_file(_New, _Old = undefined) -> + ok; delete_old_file(_New, Old) -> case filelib:is_regular(Old) andalso file:delete(Old) of - ok -> ok; - false -> ok; %% already deleted + ok -> + ok; + %% already deleted + false -> + ok; {error, Reason} -> ?SLOG(error, #{msg => "failed_to_delete_ssl_file", file_path => Old, reason => Reason}) end. @@ -293,12 +336,14 @@ do_ensure_ssl_file(Dir, Key, Opts, MaybePem, DryRun) -> end; false -> case is_valid_pem_file(MaybePem) of - true -> {ok, Opts}; + true -> + {ok, Opts}; {error, enoent} when DryRun -> {ok, Opts}; {error, Reason} -> - {error, #{pem_check => invalid_pem, - file_read => Reason - }} + {error, #{ + pem_check => invalid_pem, + file_read => Reason + }} end end. @@ -312,8 +357,10 @@ is_valid_string(Binary) when is_binary(Binary) -> %% Check if it is a valid PEM formatted key. is_pem(MaybePem) -> - try public_key:pem_decode(MaybePem) =/= [] - catch _ : _ -> false + try + public_key:pem_decode(MaybePem) =/= [] + catch + _:_ -> false end. %% Write the pem file to the given dir. @@ -328,8 +375,7 @@ save_pem_file(Dir, Key, Pem, DryRun) -> ok -> case file:write_file(Path, Pem) of ok -> {ok, Path}; - {error, Reason} -> - {error, #{failed_to_write_file => Reason, file_path => Path}} + {error, Reason} -> {error, #{failed_to_write_file => Reason, file_path => Path}} end; {error, Reason} -> {error, #{failed_to_create_dir_for => Path, reason => Reason}} @@ -346,7 +392,7 @@ pem_file_name(Dir, Key, Pem) -> filename:join([emqx:certs_dir(), Dir, FileName]). hex_str(Bin) -> - iolist_to_binary([io_lib:format("~2.16.0b",[X]) || <> <= Bin ]). + iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= Bin]). is_valid_pem_file(Path) -> case file:read_file(Path) of @@ -355,7 +401,8 @@ is_valid_pem_file(Path) -> end. %% @doc This is to return SSL file content in management APIs. -file_content_as_options(undefined) -> undefined; +file_content_as_options(undefined) -> + undefined; file_content_as_options(#{<<"enable">> := False} = SSL) when ?IS_FALSE(False) -> {ok, maps:without(?SSL_FILE_OPT_NAMES, SSL)}; file_content_as_options(#{<<"enable">> := True} = SSL) when ?IS_TRUE(True) -> @@ -365,15 +412,17 @@ file_content_as_options([], SSL) -> {ok, SSL}; file_content_as_options([Key | Keys], SSL) -> case maps:get(Key, SSL, undefined) of - undefined -> file_content_as_options(Keys, SSL); + undefined -> + file_content_as_options(Keys, SSL); Path -> case file:read_file(Path) of {ok, Bin} -> file_content_as_options(Keys, SSL#{Key => Bin}); {error, Reason} -> - {error, #{file_path => Path, - reason => Reason - }} + {error, #{ + file_path => Path, + reason => Reason + }} end end. diff --git a/apps/emqx/src/emqx_tls_psk.erl b/apps/emqx/src/emqx_tls_psk.erl index 7be6e9f3f..f350f863e 100644 --- a/apps/emqx/src/emqx_tls_psk.erl +++ b/apps/emqx/src/emqx_tls_psk.erl @@ -30,19 +30,25 @@ lookup(psk, PSKIdentity, _UserState) -> {ok, SharedSecret} when is_binary(SharedSecret) -> {ok, SharedSecret}; normal -> - ?SLOG(info, #{msg => "psk_identity_not_found", - psk_identity => PSKIdentity}), + ?SLOG(info, #{ + msg => "psk_identity_not_found", + psk_identity => PSKIdentity + }), error; {error, Reason} -> - ?SLOG(warning, #{msg => "psk_identity_not_found", - psk_identity => PSKIdentity, - reason => Reason}), + ?SLOG(warning, #{ + msg => "psk_identity_not_found", + psk_identity => PSKIdentity, + reason => Reason + }), error catch Class:Reason:Stacktrace -> - ?SLOG(error, #{msg => "lookup_psk_failed", - class => Class, - reason => Reason, - stacktrace => Stacktrace}), - error + ?SLOG(error, #{ + msg => "lookup_psk_failed", + class => Class, + reason => Reason, + stacktrace => Stacktrace + }), + error end. diff --git a/apps/emqx/src/emqx_topic.erl b/apps/emqx/src/emqx_topic.erl index dbc32228c..35e69bc3e 100644 --- a/apps/emqx/src/emqx_topic.erl +++ b/apps/emqx/src/emqx_topic.erl @@ -17,30 +17,32 @@ -module(emqx_topic). %% APIs --export([ match/2 - , validate/1 - , validate/2 - , levels/1 - , tokens/1 - , words/1 - , wildcard/1 - , join/1 - , prepend/2 - , feed_var/3 - , systop/1 - , parse/1 - , parse/2 - ]). +-export([ + match/2, + validate/1, + validate/2, + levels/1, + tokens/1, + words/1, + wildcard/1, + join/1, + prepend/2, + feed_var/3, + systop/1, + parse/1, + parse/2 +]). --export_type([ group/0 - , topic/0 - , word/0 - ]). +-export_type([ + group/0, + topic/0, + word/0 +]). --type(group() :: binary()). --type(topic() :: binary()). --type(word() :: '' | '+' | '#' | binary()). --type(words() :: list(word())). +-type group() :: binary(). +-type topic() :: binary(). +-type word() :: '' | '+' | '#' | binary(). +-type words() :: list(word()). -define(MAX_TOPIC_LEN, 65535). @@ -49,7 +51,7 @@ %%-------------------------------------------------------------------- %% @doc Is wildcard topic? --spec(wildcard(topic() | words()) -> true | false). +-spec wildcard(topic() | words()) -> true | false. wildcard(Topic) when is_binary(Topic) -> wildcard(words(Topic)); wildcard([]) -> @@ -62,9 +64,9 @@ wildcard([_H | T]) -> wildcard(T). %% @doc Match Topic name with filter. --spec(match(Name, Filter) -> boolean() when - Name :: topic() | words(), - Filter :: topic() | words()). +-spec match(Name, Filter) -> boolean() when + Name :: topic() | words(), + Filter :: topic() | words(). match(<<$$, _/binary>>, <<$+, _/binary>>) -> false; match(<<$$, _/binary>>, <<$#, _/binary>>) -> @@ -87,13 +89,13 @@ match([], [_H | _T2]) -> false. %% @doc Validate topic name or filter --spec(validate(topic() | {name | filter, topic()}) -> true). +-spec validate(topic() | {name | filter, topic()}) -> true. validate(Topic) when is_binary(Topic) -> validate(filter, Topic); validate({Type, Topic}) when Type =:= name; Type =:= filter -> validate(Type, Topic). --spec(validate(name | filter, topic()) -> true). +-spec validate(name | filter, topic()) -> true. validate(_, <<>>) -> error(empty_topic); validate(_, Topic) when is_binary(Topic) andalso (size(Topic) > ?MAX_TOPIC_LEN) -> @@ -102,13 +104,14 @@ validate(filter, Topic) when is_binary(Topic) -> validate2(words(Topic)); validate(name, Topic) when is_binary(Topic) -> Words = words(Topic), - validate2(Words) - andalso (not wildcard(Words)) - orelse error(topic_name_error). + validate2(Words) andalso + (not wildcard(Words)) orelse + error(topic_name_error). validate2([]) -> true; -validate2(['#']) -> % end with '#' +% end with '#' +validate2(['#']) -> true; validate2(['#' | Words]) when length(Words) > 0 -> error('topic_invalid_#'); @@ -128,8 +131,10 @@ validate3(<<_/utf8, Rest/binary>>) -> %% @doc Prepend a topic prefix. %% Ensured to have only one / between prefix and suffix. -prepend(undefined, W) -> bin(W); -prepend(<<>>, W) -> bin(W); +prepend(undefined, W) -> + bin(W); +prepend(<<>>, W) -> + bin(W); prepend(Parent0, W) -> Parent = bin(Parent0), case binary:last(Parent) of @@ -137,40 +142,40 @@ prepend(Parent0, W) -> _ -> <> end. -bin('') -> <<>>; +bin('') -> <<>>; bin('+') -> <<"+">>; bin('#') -> <<"#">>; bin(B) when is_binary(B) -> B; bin(L) when is_list(L) -> list_to_binary(L). --spec(levels(topic()) -> pos_integer()). +-spec levels(topic()) -> pos_integer(). levels(Topic) when is_binary(Topic) -> length(tokens(Topic)). -compile({inline, [tokens/1]}). %% @doc Split topic to tokens. --spec(tokens(topic()) -> list(binary())). +-spec tokens(topic()) -> list(binary()). tokens(Topic) -> binary:split(Topic, <<"/">>, [global]). %% @doc Split Topic Path to Words --spec(words(topic()) -> words()). +-spec words(topic()) -> words(). words(Topic) when is_binary(Topic) -> [word(W) || W <- tokens(Topic)]. -word(<<>>) -> ''; +word(<<>>) -> ''; word(<<"+">>) -> '+'; word(<<"#">>) -> '#'; -word(Bin) -> Bin. +word(Bin) -> Bin. %% @doc '$SYS' Topic. --spec(systop(atom() | string() | binary()) -> topic()). +-spec systop(atom() | string() | binary()) -> topic(). systop(Name) when is_atom(Name); is_list(Name) -> iolist_to_binary(lists:concat(["$SYS/brokers/", node(), "/", Name])); systop(Name) when is_binary(Name) -> iolist_to_binary(["$SYS/brokers/", atom_to_list(node()), "/", Name]). --spec(feed_var(binary(), binary(), binary()) -> binary()). +-spec feed_var(binary(), binary(), binary()) -> binary(). feed_var(Var, Val, Topic) -> feed_var(Var, Val, words(Topic), []). feed_var(_Var, _Val, [], Acc) -> @@ -180,27 +185,31 @@ feed_var(Var, Val, [Var | Words], Acc) -> feed_var(Var, Val, [W | Words], Acc) -> feed_var(Var, Val, Words, [W | Acc]). --spec(join(list(binary())) -> binary()). +-spec join(list(binary())) -> binary(). join([]) -> <<>>; join([W]) -> bin(W); join(Words) -> {_, Bin} = lists:foldr( - fun(W, {true, Tail}) -> - {false, <>}; - (W, {false, Tail}) -> - {false, <>} - end, {true, <<>>}, [bin(W) || W <- Words]), + fun + (W, {true, Tail}) -> + {false, <>}; + (W, {false, Tail}) -> + {false, <>} + end, + {true, <<>>}, + [bin(W) || W <- Words] + ), Bin. --spec(parse(topic() | {topic(), map()}) -> {topic(), #{share => binary()}}). +-spec parse(topic() | {topic(), map()}) -> {topic(), #{share => binary()}}. parse(TopicFilter) when is_binary(TopicFilter) -> parse(TopicFilter, #{}); parse({TopicFilter, Options}) when is_binary(TopicFilter) -> parse(TopicFilter, Options). --spec(parse(topic(), map()) -> {topic(), map()}). +-spec parse(topic(), map()) -> {topic(), map()}. parse(TopicFilter = <<"$queue/", _/binary>>, #{share := _Group}) -> error({invalid_topic_filter, TopicFilter}); parse(TopicFilter = <<"$share/", _/binary>>, #{share := _Group}) -> @@ -209,7 +218,8 @@ parse(<<"$queue/", TopicFilter/binary>>, Options) -> parse(TopicFilter, Options#{share => <<"$queue">>}); parse(TopicFilter = <<"$share/", Rest/binary>>, Options) -> case binary:split(Rest, <<"/">>) of - [_Any] -> error({invalid_topic_filter, TopicFilter}); + [_Any] -> + error({invalid_topic_filter, TopicFilter}); [ShareName, Filter] -> case binary:match(ShareName, [<<"+">>, <<"#">>]) of nomatch -> parse(Filter, Options#{share => ShareName}); diff --git a/apps/emqx/src/emqx_trie.erl b/apps/emqx/src/emqx_trie.erl index baec7c1cf..013cb8f9b 100644 --- a/apps/emqx/src/emqx_trie.erl +++ b/apps/emqx/src/emqx_trie.erl @@ -19,26 +19,29 @@ -include("emqx.hrl"). %% Mnesia bootstrap --export([ mnesia/1 - , create_session_trie/1 - ]). +-export([ + mnesia/1, + create_session_trie/1 +]). -boot_mnesia({mnesia, [boot]}). %% Trie APIs --export([ insert/1 - , insert_session/1 - , match/1 - , match_session/1 - , delete/1 - , delete_session/1 - ]). +-export([ + insert/1, + insert_session/1, + match/1, + match_session/1, + delete/1, + delete_session/1 +]). --export([ empty/0 - , empty_session/0 - , lock_tables/0 - , lock_session_tables/0 - ]). +-export([ + empty/0, + empty_session/0, + lock_tables/0, + lock_session_tables/0 +]). -export([is_compact/0, set_compact/1]). @@ -52,79 +55,98 @@ -define(PREFIX(Prefix), {Prefix, 0}). -define(TOPIC(Topic), {Topic, 1}). --record(?TRIE, - { key :: ?TOPIC(binary()) | ?PREFIX(binary()) - , count = 0 :: non_neg_integer() - }). +-record(?TRIE, { + key :: ?TOPIC(binary()) | ?PREFIX(binary()), + count = 0 :: non_neg_integer() +}). %%-------------------------------------------------------------------- %% Mnesia bootstrap %%-------------------------------------------------------------------- %% @doc Create or replicate topics table. --spec(mnesia(boot | copy) -> ok). +-spec mnesia(boot | copy) -> ok. mnesia(boot) -> %% Optimize storage - StoreProps = [{ets, [{read_concurrency, true}, - {write_concurrency, true} - ]}], + StoreProps = [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ], ok = mria:create_table(?TRIE, [ - {rlog_shard, ?ROUTE_SHARD}, - {record_name, ?TRIE}, - {attributes, record_info(fields, ?TRIE)}, - {type, ordered_set}, - {storage_properties, StoreProps}]). + {rlog_shard, ?ROUTE_SHARD}, + {record_name, ?TRIE}, + {attributes, record_info(fields, ?TRIE)}, + {type, ordered_set}, + {storage_properties, StoreProps} + ]). create_session_trie(disc) -> - StoreProps = [{ets, [{read_concurrency, true}, - {write_concurrency, true} - ]}], - ok = mria:create_table(?SESSION_DISC_TRIE, - [{rlog_shard, ?ROUTE_SHARD}, - {storage, disc_copies}, - {record_name, ?TRIE}, - {attributes, record_info(fields, ?TRIE)}, - {type, ordered_set}, - {storage_properties, StoreProps}]); + StoreProps = [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ], + ok = mria:create_table( + ?SESSION_DISC_TRIE, + [ + {rlog_shard, ?ROUTE_SHARD}, + {storage, disc_copies}, + {record_name, ?TRIE}, + {attributes, record_info(fields, ?TRIE)}, + {type, ordered_set}, + {storage_properties, StoreProps} + ] + ); create_session_trie(ram) -> - StoreProps = [{ets, [{read_concurrency, true}, - {write_concurrency, true} - ]}], - ok = mria:create_table(?SESSION_RAM_TRIE, - [{rlog_shard, ?ROUTE_SHARD}, - {storage, ram_copies}, - {record_name, ?TRIE}, - {attributes, record_info(fields, ?TRIE)}, - {type, ordered_set}, - {storage_properties, StoreProps}]). + StoreProps = [ + {ets, [ + {read_concurrency, true}, + {write_concurrency, true} + ]} + ], + ok = mria:create_table( + ?SESSION_RAM_TRIE, + [ + {rlog_shard, ?ROUTE_SHARD}, + {storage, ram_copies}, + {record_name, ?TRIE}, + {attributes, record_info(fields, ?TRIE)}, + {type, ordered_set}, + {storage_properties, StoreProps} + ] + ). %%-------------------------------------------------------------------- %% Topics APIs %%-------------------------------------------------------------------- %% @doc Insert a topic filter into the trie. --spec(insert(emqx_types:topic()) -> ok). +-spec insert(emqx_types:topic()) -> ok. insert(Topic) when is_binary(Topic) -> insert(Topic, ?TRIE). --spec(insert_session(emqx_topic:topic()) -> ok). +-spec insert_session(emqx_topic:topic()) -> ok. insert_session(Topic) when is_binary(Topic) -> insert(Topic, session_trie()). insert(Topic, Trie) when is_binary(Topic) -> {TopicKey, PrefixKeys} = make_keys(Topic), case mnesia:wread({Trie, TopicKey}) of - [_] -> ok; %% already inserted + %% already inserted + [_] -> ok; [] -> lists:foreach(fun(Key) -> insert_key(Key, Trie) end, [TopicKey | PrefixKeys]) end. %% @doc Delete a topic filter from the trie. --spec(delete(emqx_types:topic()) -> ok). +-spec delete(emqx_types:topic()) -> ok. delete(Topic) when is_binary(Topic) -> delete(Topic, ?TRIE). %% @doc Delete a topic filter from the trie. --spec(delete_session(emqx_topic:topic()) -> ok). +-spec delete_session(emqx_topic:topic()) -> ok. delete_session(Topic) when is_binary(Topic) -> delete(Topic, session_trie()). @@ -136,11 +158,11 @@ delete(Topic, Trie) when is_binary(Topic) -> end. %% @doc Find trie nodes that matches the topic name. --spec(match(emqx_types:topic()) -> list(emqx_types:topic())). +-spec match(emqx_types:topic()) -> list(emqx_types:topic()). match(Topic) when is_binary(Topic) -> match(Topic, ?TRIE). --spec(match_session(emqx_topic:topic()) -> list(emqx_topic:topic())). +-spec match_session(emqx_topic:topic()) -> list(emqx_topic:topic()). match_session(Topic) when is_binary(Topic) -> match(Topic, session_trie()). @@ -161,7 +183,7 @@ match(Topic, Trie) when is_binary(Topic) -> end. %% @doc Is the trie empty? --spec(empty() -> boolean()). +-spec empty() -> boolean(). empty() -> empty(?TRIE). empty_session() -> @@ -184,10 +206,9 @@ lock_session_tables() -> session_trie() -> case emqx_persistent_session:storage_type() of disc -> ?SESSION_DISC_TRIE; - ram -> ?SESSION_RAM_TRIE + ram -> ?SESSION_RAM_TRIE end. - make_keys(Topic) -> Words = emqx_topic:words(Topic), {?TOPIC(Topic), [?PREFIX(Prefix) || Prefix <- make_prefixes(Words)]}. @@ -207,8 +228,10 @@ compact(Words) -> do_compact(Words) -> do_compact(Words, empty, []). -do_compact([], empty, Acc) -> lists:reverse(Acc); -do_compact([], Seg, Acc) -> lists:reverse([Seg | Acc]); +do_compact([], empty, Acc) -> + lists:reverse(Acc); +do_compact([], Seg, Acc) -> + lists:reverse([Seg | Acc]); do_compact([Word | Words], Seg, Acc) when Word =:= '+' orelse Word =:= '#' -> do_compact(Words, empty, [join(Seg, Word) | Acc]); do_compact([Word | Words], Seg, Acc) -> @@ -221,8 +244,10 @@ join(empty, Word) -> Word; join(Prefix, Word) -> emqx_topic:join([Prefix, Word]). make_prefixes(Words) -> - lists:map(fun emqx_topic:join/1, - make_prefixes(compact(Words), [], [])). + lists:map( + fun emqx_topic:join/1, + make_prefixes(compact(Words), [], []) + ). make_prefixes([_LastWord], _Prefix, Acc) -> lists:map(fun lists:reverse/1, Acc); @@ -232,12 +257,13 @@ make_prefixes([H | T], Prefix0, Acc0) -> make_prefixes(T, Prefix, Acc). insert_key(Key, Trie) -> - T = case mnesia:wread({Trie, Key}) of + T = + case mnesia:wread({Trie, Key}) of [#?TRIE{count = C} = T1] -> T1#?TRIE{count = C + 1}; - [] -> + [] -> #?TRIE{key = Key, count = 1} - end, + end, ok = mnesia:write(Trie, T, write). delete_key(Key, Trie) -> @@ -252,7 +278,7 @@ delete_key(Key, Trie) -> %% micro-optimization: no need to lookup when topic is not wildcard %% because we only insert wildcards to emqx_trie -lookup_topic(_Topic,_Trie, false) -> []; +lookup_topic(_Topic, _Trie, false) -> []; lookup_topic(Topic, Trie, true) -> lookup_topic(Topic, Trie). lookup_topic(Topic, Trie) when is_binary(Topic) -> @@ -261,7 +287,9 @@ lookup_topic(Topic, Trie) when is_binary(Topic) -> [] -> [] end. -has_prefix(empty, _Trie) -> true; %% this is the virtual tree root +%% this is the virtual tree root +has_prefix(empty, _Trie) -> + true; has_prefix(Prefix, Trie) -> case ets:lookup(Trie, ?PREFIX(Prefix)) of [#?TRIE{count = C}] -> C > 0; @@ -286,9 +314,11 @@ do_match(Words, Prefix, Trie) -> end. match_no_compact([], Topic, Trie, IsWildcard, Acc) -> - 'match_#'(Topic, Trie) ++ %% try match foo/+/# or foo/bar/# - lookup_topic(Topic, Trie, IsWildcard) ++ %% e.g. foo/+ - Acc; + %% try match foo/+/# or foo/bar/# + 'match_#'(Topic, Trie) ++ + %% e.g. foo/+ + lookup_topic(Topic, Trie, IsWildcard) ++ + Acc; match_no_compact([Word | Words], Prefix, Trie, IsWildcard, Acc0) -> case has_prefix(Prefix, Trie) of true -> @@ -312,9 +342,11 @@ match_no_compact([Word | Words], Prefix, Trie, IsWildcard, Acc0) -> end. match_compact([], Topic, Trie, IsWildcard, Acc) -> - 'match_#'(Topic, Trie) ++ %% try match foo/bar/# - lookup_topic(Topic, Trie, IsWildcard) ++ %% try match foo/bar - Acc; + %% try match foo/bar/# + 'match_#'(Topic, Trie) ++ + %% try match foo/bar + lookup_topic(Topic, Trie, IsWildcard) ++ + Acc; match_compact([Word | Words], Prefix, Trie, IsWildcard, Acc0) -> Acc1 = 'match_#'(Prefix, Trie) ++ Acc0, Acc = match_compact(Words, join(Prefix, Word), Trie, IsWildcard, Acc1), @@ -342,22 +374,21 @@ set_compact(Bool) -> -include_lib("eunit/include/eunit.hrl"). make_keys_test_() -> - [{"no compact", fun() -> with_compact_flag(false, fun make_keys_no_compact/0) end}, - {"compact", fun() -> with_compact_flag(true, fun make_keys_compact/0) end} + [ + {"no compact", fun() -> with_compact_flag(false, fun make_keys_no_compact/0) end}, + {"compact", fun() -> with_compact_flag(true, fun make_keys_compact/0) end} ]. make_keys_no_compact() -> ?assertEqual({?TOPIC(<<"#">>), []}, make_keys(<<"#">>)), - ?assertEqual({?TOPIC(<<"a/+">>), - [?PREFIX(<<"a">>)]}, make_keys(<<"a/+">>)), + ?assertEqual({?TOPIC(<<"a/+">>), [?PREFIX(<<"a">>)]}, make_keys(<<"a/+">>)), ?assertEqual({?TOPIC(<<"+">>), []}, make_keys(<<"+">>)). make_keys_compact() -> ?assertEqual({?TOPIC(<<"#">>), []}, make_keys(<<"#">>)), ?assertEqual({?TOPIC(<<"a/+">>), []}, make_keys(<<"a/+">>)), ?assertEqual({?TOPIC(<<"+">>), []}, make_keys(<<"+">>)), - ?assertEqual({?TOPIC(<<"a/+/c">>), - [?PREFIX(<<"a/+">>)]}, make_keys(<<"a/+/c">>)). + ?assertEqual({?TOPIC(<<"a/+/c">>), [?PREFIX(<<"a/+">>)]}, make_keys(<<"a/+/c">>)). words(T) -> emqx_topic:words(T). @@ -366,19 +397,24 @@ make_prefixes_t(Topic) -> make_prefixes(words(Topic)). with_compact_flag(IsCompact, F) -> OldV = is_compact(), set_compact(IsCompact), - try F() - after set_compact(OldV) + try + F() + after + set_compact(OldV) end. make_prefixes_test_() -> - [{"no compact", fun() -> with_compact_flag(false, fun make_prefixes_no_compact/0) end}, - {"compact", fun() -> with_compact_flag(true, fun make_prefixes_compact/0) end} + [ + {"no compact", fun() -> with_compact_flag(false, fun make_prefixes_no_compact/0) end}, + {"compact", fun() -> with_compact_flag(true, fun make_prefixes_compact/0) end} ]. make_prefixes_no_compact() -> ?assertEqual([<<"a/b">>, <<"a">>], make_prefixes_t(<<"a/b/+">>)), - ?assertEqual([<<"a/b/+/c">>, <<"a/b/+">>, <<"a/b">>, <<"a">>], - make_prefixes_t(<<"a/b/+/c/#">>)). + ?assertEqual( + [<<"a/b/+/c">>, <<"a/b/+">>, <<"a/b">>, <<"a">>], + make_prefixes_t(<<"a/b/+/c/#">>) + ). make_prefixes_compact() -> ?assertEqual([], make_prefixes_t(<<"a/b/+">>)), @@ -389,10 +425,13 @@ do_compact_test() -> ?assertEqual([<<"/#">>], do_compact(words(<<"/#">>))), ?assertEqual([<<"a/b/+">>, <<"c">>], do_compact(words(<<"a/b/+/c">>))), ?assertEqual([<<"a/+">>, <<"+">>, <<"b">>], do_compact(words(<<"a/+/+/b">>))), - ?assertEqual([<<"a/+">>, <<"+">>, <<"+">>, <<"+">>, <<"b">>], - do_compact(words(<<"a/+/+/+/+/b">>))), + ?assertEqual( + [<<"a/+">>, <<"+">>, <<"+">>, <<"+">>, <<"b">>], + do_compact(words(<<"a/+/+/+/+/b">>)) + ), ok. clear_tables() -> mria:clear_table(?TRIE). --endif. % TEST +% TEST +-endif. diff --git a/apps/emqx/src/emqx_types.erl b/apps/emqx/src/emqx_types.erl index 9963aa4ee..9373f02a2 100644 --- a/apps/emqx/src/emqx_types.erl +++ b/apps/emqx/src/emqx_types.erl @@ -20,199 +20,227 @@ -include("emqx_mqtt.hrl"). -include("types.hrl"). --export_type([ proto_ver/0 - , qos/0 - , qos_name/0 - ]). +-export_type([ + proto_ver/0, + qos/0, + qos_name/0 +]). --export_type([ zone/0 - , pubsub/0 - , topic/0 - , subid/0 - ]). +-export_type([ + zone/0, + pubsub/0, + topic/0, + subid/0 +]). --export_type([ socktype/0 - , sockstate/0 - , conninfo/0 - , clientinfo/0 - , clientid/0 - , username/0 - , password/0 - , peerhost/0 - , peername/0 - , protocol/0 - ]). +-export_type([ + socktype/0, + sockstate/0, + conninfo/0, + clientinfo/0, + clientid/0, + username/0, + password/0, + peerhost/0, + peername/0, + protocol/0 +]). --export_type([ connack/0 - , subopts/0 - , reason_code/0 - , alias_id/0 - , topic_aliases/0 - , properties/0 - ]). +-export_type([ + connack/0, + subopts/0, + reason_code/0, + alias_id/0, + topic_aliases/0, + properties/0 +]). --export_type([ packet_id/0 - , packet_type/0 - , packet/0 - ]). +-export_type([ + packet_id/0, + packet_type/0, + packet/0 +]). --export_type([ subscription/0 - , subscriber/0 - , topic_filters/0 - ]). +-export_type([ + subscription/0, + subscriber/0, + topic_filters/0 +]). --export_type([ payload/0 - , message/0 - , flag/0 - , flags/0 - , headers/0 - ]). +-export_type([ + payload/0, + message/0, + flag/0, + flags/0, + headers/0 +]). --export_type([ deliver/0 - , delivery/0 - , publish_result/0 - , deliver_result/0 - ]). +-export_type([ + deliver/0, + delivery/0, + publish_result/0, + deliver_result/0 +]). --export_type([ route/0 - , route_entry/0 - ]). +-export_type([ + route/0, + route_entry/0 +]). --export_type([ plugin/0 - , banned/0 - , command/0 - ]). +-export_type([ + plugin/0, + banned/0, + command/0 +]). --export_type([ caps/0 - , attrs/0 - , infos/0 - , stats/0 - ]). +-export_type([ + caps/0, + attrs/0, + infos/0, + stats/0 +]). -export_type([oom_policy/0]). --type(proto_ver() :: ?MQTT_PROTO_V3 - | ?MQTT_PROTO_V4 - | ?MQTT_PROTO_V5 - | non_neg_integer() - | binary() % For lwm2m, mqtt-sn... - ). +-type proto_ver() :: + ?MQTT_PROTO_V3 + | ?MQTT_PROTO_V4 + | ?MQTT_PROTO_V5 + | non_neg_integer() + % For lwm2m, mqtt-sn... + | binary(). --type(qos() :: ?QOS_0 | ?QOS_1 | ?QOS_2). --type(qos_name() :: qos0 | at_most_once | - qos1 | at_least_once | - qos2 | exactly_once). +-type qos() :: ?QOS_0 | ?QOS_1 | ?QOS_2. +-type qos_name() :: + qos0 + | at_most_once + | qos1 + | at_least_once + | qos2 + | exactly_once. --type(zone() :: atom()). --type(pubsub() :: publish | subscribe). --type(topic() :: emqx_topic:topic()). --type(subid() :: binary() | atom()). +-type zone() :: atom(). +-type pubsub() :: publish | subscribe. +-type topic() :: emqx_topic:topic(). +-type subid() :: binary() | atom(). --type(socktype() :: tcp | udp | ssl | proxy | atom()). --type(sockstate() :: idle | running | blocked | closed). --type(conninfo() :: #{socktype := socktype(), - sockname := peername(), - peername := peername(), - peercert := nossl | undefined | esockd_peercert:peercert(), - conn_mod := module(), - proto_name => binary(), - proto_ver => proto_ver(), - clean_start => boolean(), - clientid => clientid(), - username => username(), - conn_props => properties(), - connected => boolean(), - connected_at => non_neg_integer(), - disconnected_at => non_neg_integer(), - keepalive => 0..16#FFFF, - receive_maximum => non_neg_integer(), - expiry_interval => non_neg_integer(), - atom() => term() - }). --type(clientinfo() :: #{zone := maybe(zone()), - protocol := protocol(), - peerhost := peerhost(), - sockport := non_neg_integer(), - clientid := clientid(), - username := username(), - is_bridge := boolean(), - is_superuser := boolean(), - mountpoint := maybe(binary()), - ws_cookie => maybe(list()), - password => maybe(binary()), - auth_result => auth_result(), - anonymous => boolean(), - cn => binary(), - dn => binary(), - atom() => term() - }). --type(clientid() :: binary() | atom()). --type(username() :: maybe(binary())). --type(password() :: maybe(binary())). --type(peerhost() :: inet:ip_address()). --type(peername() :: {inet:ip_address(), inet:port_number()} - | inet:returned_non_ip_address()). --type(protocol() :: mqtt | 'mqtt-sn' | coap | lwm2m | stomp | none | atom()). --type(auth_result() :: success - | client_identifier_not_valid - | bad_username_or_password - | bad_clientid_or_password - | not_authorized - | server_unavailable - | server_busy - | banned - | bad_authentication_method). +-type socktype() :: tcp | udp | ssl | proxy | atom(). +-type sockstate() :: idle | running | blocked | closed. +-type conninfo() :: #{ + socktype := socktype(), + sockname := peername(), + peername := peername(), + peercert := nossl | undefined | esockd_peercert:peercert(), + conn_mod := module(), + proto_name => binary(), + proto_ver => proto_ver(), + clean_start => boolean(), + clientid => clientid(), + username => username(), + conn_props => properties(), + connected => boolean(), + connected_at => non_neg_integer(), + disconnected_at => non_neg_integer(), + keepalive => 0..16#FFFF, + receive_maximum => non_neg_integer(), + expiry_interval => non_neg_integer(), + atom() => term() +}. +-type clientinfo() :: #{ + zone := maybe(zone()), + protocol := protocol(), + peerhost := peerhost(), + sockport := non_neg_integer(), + clientid := clientid(), + username := username(), + is_bridge := boolean(), + is_superuser := boolean(), + mountpoint := maybe(binary()), + ws_cookie => maybe(list()), + password => maybe(binary()), + auth_result => auth_result(), + anonymous => boolean(), + cn => binary(), + dn => binary(), + atom() => term() +}. +-type clientid() :: binary() | atom(). +-type username() :: maybe(binary()). +-type password() :: maybe(binary()). +-type peerhost() :: inet:ip_address(). +-type peername() :: + {inet:ip_address(), inet:port_number()} + | inet:returned_non_ip_address(). +-type protocol() :: mqtt | 'mqtt-sn' | coap | lwm2m | stomp | none | atom(). +-type auth_result() :: + success + | client_identifier_not_valid + | bad_username_or_password + | bad_clientid_or_password + | not_authorized + | server_unavailable + | server_busy + | banned + | bad_authentication_method. --type(packet_type() :: ?RESERVED..?AUTH). --type(connack() :: ?CONNACK_ACCEPT..?CONNACK_AUTH). --type(subopts() :: #{rh := 0 | 1 | 2, - rap := 0 | 1, - nl := 0 | 1, - qos := qos(), - share => binary(), - atom() => term() - }). --type(reason_code() :: 0..16#FF). --type(packet_id() :: 1..16#FFFF). --type(alias_id() :: 0..16#FFFF). --type(topic_aliases() :: #{inbound => maybe(map()), - outbound => maybe(map())}). --type(properties() :: #{atom() => term()}). --type(topic_filters() :: list({topic(), subopts()})). --type(packet() :: #mqtt_packet{}). +-type packet_type() :: ?RESERVED..?AUTH. +-type connack() :: ?CONNACK_ACCEPT..?CONNACK_AUTH. +-type subopts() :: #{ + rh := 0 | 1 | 2, + rap := 0 | 1, + nl := 0 | 1, + qos := qos(), + share => binary(), + atom() => term() +}. +-type reason_code() :: 0..16#FF. +-type packet_id() :: 1..16#FFFF. +-type alias_id() :: 0..16#FFFF. +-type topic_aliases() :: #{ + inbound => maybe(map()), + outbound => maybe(map()) +}. +-type properties() :: #{atom() => term()}. +-type topic_filters() :: list({topic(), subopts()}). +-type packet() :: #mqtt_packet{}. --type(subscription() :: #subscription{}). --type(subscriber() :: {pid(), subid()}). --type(payload() :: binary() | iodata()). --type(message() :: #message{}). --type(flag() :: sys | dup | retain | atom()). --type(flags() :: #{flag() := boolean()}). --type(headers() :: #{proto_ver => proto_ver(), - protocol => protocol(), - username => username(), - peerhost => peerhost(), - properties => properties(), - allow_publish => boolean(), - atom() => term()}). +-type subscription() :: #subscription{}. +-type subscriber() :: {pid(), subid()}. +-type payload() :: binary() | iodata(). +-type message() :: #message{}. +-type flag() :: sys | dup | retain | atom(). +-type flags() :: #{flag() := boolean()}. +-type headers() :: #{ + proto_ver => proto_ver(), + protocol => protocol(), + username => username(), + peerhost => peerhost(), + properties => properties(), + allow_publish => boolean(), + atom() => term() +}. --type(banned() :: #banned{}). --type(deliver() :: {deliver, topic(), message()}). --type(delivery() :: #delivery{}). --type(deliver_result() :: ok | {ok, non_neg_integer()} | {error, term()}). --type(publish_result() :: [{node(), topic(), deliver_result()} | - {share, topic(), deliver_result()}]). --type(route() :: #route{}). --type(group() :: emqx_topic:group()). --type(route_entry() :: {topic(), node()} | {topic, group()}). --type(plugin() :: #plugin{}). --type(command() :: #command{}). +-type banned() :: #banned{}. +-type deliver() :: {deliver, topic(), message()}. +-type delivery() :: #delivery{}. +-type deliver_result() :: ok | {ok, non_neg_integer()} | {error, term()}. +-type publish_result() :: [ + {node(), topic(), deliver_result()} + | {share, topic(), deliver_result()} +]. +-type route() :: #route{}. +-type group() :: emqx_topic:group(). +-type route_entry() :: {topic(), node()} | {topic, group()}. +-type plugin() :: #plugin{}. +-type command() :: #command{}. --type(caps() :: emqx_mqtt_caps:caps()). --type(attrs() :: #{atom() => term()}). --type(infos() :: #{atom() => term()}). --type(stats() :: [{atom(), term()}]). +-type caps() :: emqx_mqtt_caps:caps(). +-type attrs() :: #{atom() => term()}. +-type infos() :: #{atom() => term()}. +-type stats() :: [{atom(), term()}]. --type(oom_policy() :: #{max_message_queue_len => non_neg_integer(), - max_heap_size => non_neg_integer(), - enable => boolean() - }). +-type oom_policy() :: #{ + max_message_queue_len => non_neg_integer(), + max_heap_size => non_neg_integer(), + enable => boolean() +}. diff --git a/apps/emqx/src/emqx_vm.erl b/apps/emqx/src/emqx_vm.erl index 84f1852f1..61cc55e47 100644 --- a/apps/emqx/src/emqx_vm.erl +++ b/apps/emqx/src/emqx_vm.erl @@ -16,31 +16,34 @@ -module(emqx_vm). --export([ schedulers/0 - , scheduler_usage/1 - , system_info_keys/0 - , get_system_info/0 - , get_system_info/1 - , get_memory/0 - , get_memory/2 - , mem_info/0 - , loads/0 - ]). +-export([ + schedulers/0, + scheduler_usage/1, + system_info_keys/0, + get_system_info/0, + get_system_info/1, + get_memory/0, + get_memory/2, + mem_info/0, + loads/0 +]). --export([ process_info_keys/0 - , get_process_info/0 - , get_process_info/1 - , process_gc_info_keys/0 - , get_process_gc_info/0 - , get_process_gc_info/1 - , get_process_limit/0 - ]). +-export([ + process_info_keys/0, + get_process_info/0, + get_process_info/1, + process_gc_info_keys/0, + get_process_gc_info/0, + get_process_gc_info/1, + get_process_limit/0 +]). --export([ get_ets_list/0 - , get_ets_info/0 - , get_ets_info/1 - , get_otp_version/0 - ]). +-export([ + get_ets_list/0, + get_ets_info/0, + get_ets_info/1, + get_otp_version/0 +]). -export([cpu_util/0]). @@ -49,126 +52,132 @@ -compile(nowarn_export_all). -endif. --define(UTIL_ALLOCATORS, [temp_alloc, - eheap_alloc, - binary_alloc, - ets_alloc, - driver_alloc, - sl_alloc, - ll_alloc, - fix_alloc, - literal_alloc, - std_alloc - ]). +-define(UTIL_ALLOCATORS, [ + temp_alloc, + eheap_alloc, + binary_alloc, + ets_alloc, + driver_alloc, + sl_alloc, + ll_alloc, + fix_alloc, + literal_alloc, + std_alloc +]). --define(PROCESS_INFO_KEYS, [initial_call, - current_stacktrace, - registered_name, - status, - message_queue_len, - group_leader, - priority, - trap_exit, - reductions, - %%binary, - last_calls, - catchlevel, - trace, - suspending, - sequential_trace_token, - error_handler - ]). +-define(PROCESS_INFO_KEYS, [ + initial_call, + current_stacktrace, + registered_name, + status, + message_queue_len, + group_leader, + priority, + trap_exit, + reductions, + %%binary, + last_calls, + catchlevel, + trace, + suspending, + sequential_trace_token, + error_handler +]). --define(PROCESS_GC_KEYS, [memory, - total_heap_size, - heap_size, - stack_size, - min_heap_size - ]). +-define(PROCESS_GC_KEYS, [ + memory, + total_heap_size, + heap_size, + stack_size, + min_heap_size +]). --define(SYSTEM_INFO_KEYS, [allocated_areas, - allocator, - alloc_util_allocators, - build_type, - check_io, - compat_rel, - creation, - debug_compiled, - dist, - dist_ctrl, - driver_version, - elib_malloc, - dist_buf_busy_limit, - %fullsweep_after, % included in garbage_collection - garbage_collection, - %global_heaps_size, % deprecated - heap_sizes, - heap_type, - info, - kernel_poll, - loaded, - logical_processors, - logical_processors_available, - logical_processors_online, - machine, - %min_heap_size, % included in garbage_collection - %min_bin_vheap_size, % included in garbage_collection - modified_timing_level, - multi_scheduling, - multi_scheduling_blockers, - otp_release, - port_count, - process_count, - process_limit, - scheduler_bind_type, - scheduler_bindings, - scheduler_id, - schedulers, - schedulers_online, - smp_support, - system_version, - system_architecture, - threads, - thread_pool_size, - trace_control_word, - update_cpu_info, - version, - wordsize - ]). +-define(SYSTEM_INFO_KEYS, [ + allocated_areas, + allocator, + alloc_util_allocators, + build_type, + check_io, + compat_rel, + creation, + debug_compiled, + dist, + dist_ctrl, + driver_version, + elib_malloc, + dist_buf_busy_limit, + %fullsweep_after, % included in garbage_collection + garbage_collection, + %global_heaps_size, % deprecated + heap_sizes, + heap_type, + info, + kernel_poll, + loaded, + logical_processors, + logical_processors_available, + logical_processors_online, + machine, + %min_heap_size, % included in garbage_collection + %min_bin_vheap_size, % included in garbage_collection + modified_timing_level, + multi_scheduling, + multi_scheduling_blockers, + otp_release, + port_count, + process_count, + process_limit, + scheduler_bind_type, + scheduler_bindings, + scheduler_id, + schedulers, + schedulers_online, + smp_support, + system_version, + system_architecture, + threads, + thread_pool_size, + trace_control_word, + update_cpu_info, + version, + wordsize +]). --define(SOCKET_OPTS, [active, - broadcast, - buffer, - delay_send, - dontroute, - exit_on_close, - header, - high_watermark, - ipv6_v6only, - keepalive, - linger, - low_watermark, - mode, - nodelay, - packet, - packet_size, - priority, - read_packets, - recbuf, - reuseaddr, - send_timeout, - send_timeout_close, - sndbuf, - tos - ]). +-define(SOCKET_OPTS, [ + active, + broadcast, + buffer, + delay_send, + dontroute, + exit_on_close, + header, + high_watermark, + ipv6_v6only, + keepalive, + linger, + low_watermark, + mode, + nodelay, + packet, + packet_size, + priority, + read_packets, + recbuf, + reuseaddr, + send_timeout, + send_timeout_close, + sndbuf, + tos +]). schedulers() -> erlang:system_info(schedulers). loads() -> - [{load1, ftos(avg1()/256)}, - {load5, ftos(avg5()/256)}, - {load15, ftos(avg15()/256)} + [ + {load1, ftos(avg1() / 256)}, + {load5, ftos(avg5() / 256)}, + {load15, ftos(avg15() / 256)} ]. system_info_keys() -> ?SYSTEM_INFO_KEYS. @@ -177,16 +186,24 @@ get_system_info() -> [{Key, format_system_info(Key, get_system_info(Key))} || Key <- ?SYSTEM_INFO_KEYS]. get_system_info(Key) -> - try erlang:system_info(Key) catch error:badarg-> undefined end. + try + erlang:system_info(Key) + catch + error:badarg -> undefined + end. format_system_info(allocated_areas, List) -> [convert_allocated_areas(Value) || Value <- List]; format_system_info(allocator, {_, _, _, List}) -> List; format_system_info(dist_ctrl, List) -> - lists:map(fun({Node, Socket}) -> - {ok, Stats} = inet:getstat(Socket), {Node, Stats} - end, List); + lists:map( + fun({Node, Socket}) -> + {ok, Stats} = inet:getstat(Socket), + {Node, Stats} + end, + List + ); format_system_info(driver_version, Value) -> list_to_binary(Value); format_system_info(machine, Value) -> @@ -216,7 +233,8 @@ mem_info() -> [{total_memory, Total}, {used_memory, Total - Free}]. ftos(F) -> - S = io_lib:format("~.2f", [F]), S. + S = io_lib:format("~.2f", [F]), + S. %%%% erlang vm scheduler_usage fun copied from recon scheduler_usage(Interval) when is_integer(Interval) -> @@ -231,31 +249,40 @@ scheduler_usage(Interval) when is_integer(Interval) -> scheduler_usage_diff(First, Last). scheduler_usage_diff(First, Last) -> - lists:map(fun({{I, A0, T0}, {I, A1, T1}}) -> - {I, (A1 - A0)/(T1 - T0)} - end, lists:zip(lists:sort(First), lists:sort(Last))). + lists:map( + fun({{I, A0, T0}, {I, A1, T1}}) -> + {I, (A1 - A0) / (T1 - T0)} + end, + lists:zip(lists:sort(First), lists:sort(Last)) + ). -get_memory()-> +get_memory() -> get_memory_once(current) ++ erlang:memory(). get_memory(Ks, Keyword) when is_list(Ks) -> Ms = get_memory_once(Keyword) ++ erlang:memory(), [M || M = {K, _} <- Ms, lists:member(K, Ks)]; - get_memory(used, Keyword) -> - lists:sum(lists:map(fun({_, Prop}) -> - container_size(Prop, Keyword, blocks_size) - end, util_alloc())); - + lists:sum( + lists:map( + fun({_, Prop}) -> + container_size(Prop, Keyword, blocks_size) + end, + util_alloc() + ) + ); get_memory(allocated, Keyword) -> - lists:sum(lists:map(fun({_, Prop}) -> - container_size(Prop, Keyword, carriers_size) - end, util_alloc())); - + lists:sum( + lists:map( + fun({_, Prop}) -> + container_size(Prop, Keyword, carriers_size) + end, + util_alloc() + ) + ); get_memory(unused, Keyword) -> Ms = get_memory_once(Keyword), proplists:get_value(allocated, Ms) - proplists:get_value(used, Ms); - get_memory(usage, Keyword) -> Ms = get_memory_once(Keyword), proplists:get_value(used, Ms) / proplists:get_value(allocated, Ms). @@ -263,39 +290,45 @@ get_memory(usage, Keyword) -> %% @private A more quickly function to calculate memory get_memory_once(Keyword) -> Calc = fun({_, Prop}, {N1, N2}) -> - {N1 + container_size(Prop, Keyword, blocks_size), - N2 + container_size(Prop, Keyword, carriers_size)} - end, + { + N1 + container_size(Prop, Keyword, blocks_size), + N2 + container_size(Prop, Keyword, carriers_size) + } + end, {Used, Allocated} = lists:foldl(Calc, {0, 0}, util_alloc()), - [{used, Used}, - {allocated, Allocated}, - {unused, Allocated - Used}, - {usage, Used / Allocated}]. + [ + {used, Used}, + {allocated, Allocated}, + {unused, Allocated - Used}, + {usage, Used / Allocated} + ]. -util_alloc()-> +util_alloc() -> alloc(?UTIL_ALLOCATORS). alloc(Type) -> - [{{T, Instance}, Props} || - {{T, Instance}, Props} <- recon_alloc:allocators(), lists:member(T, Type)]. + [ + {{T, Instance}, Props} + || {{T, Instance}, Props} <- recon_alloc:allocators(), lists:member(T, Type) + ]. container_size(Prop, Keyword, Container) -> Sbcs = container_value(Prop, Keyword, sbcs, Container), Mbcs = container_value(Prop, Keyword, mbcs, Container), - Sbcs+Mbcs. + Sbcs + Mbcs. -container_value(Prop, Keyword, Type, Container) when is_atom(Keyword)-> +container_value(Prop, Keyword, Type, Container) when is_atom(Keyword) -> container_value(Prop, 2, Type, Container); -container_value(Props, Pos, mbcs = Type, Container) when is_integer(Pos)-> - Pool = case proplists:get_value(mbcs_pool, Props) of - PoolProps when PoolProps =/= undefined -> - element(Pos, lists:keyfind(Container, 1, PoolProps)); - _ -> - 0 - end, +container_value(Props, Pos, mbcs = Type, Container) when is_integer(Pos) -> + Pool = + case proplists:get_value(mbcs_pool, Props) of + PoolProps when PoolProps =/= undefined -> + element(Pos, lists:keyfind(Container, 1, PoolProps)); + _ -> + 0 + end, TypeProps = proplists:get_value(Type, Props), Pool + element(Pos, lists:keyfind(Container, 1, TypeProps)); - container_value(Props, Pos, Type, Container) -> TypeProps = proplists:get_value(Type, Props), element(Pos, lists:keyfind(Container, 1, TypeProps)). @@ -320,28 +353,29 @@ get_process_limit() -> erlang:system_info(process_limit). get_ets_list() -> - ets:all(). + ets:all(). get_ets_info() -> [get_ets_info(Tab) || Tab <- ets:all()]. get_ets_info(Tab) -> case ets:info(Tab) of - undefined -> - []; - Entries when is_list(Entries) -> - mapping(Entries) + undefined -> + []; + Entries when is_list(Entries) -> + mapping(Entries) end. mapping(Entries) -> mapping(Entries, []). -mapping([], Acc) -> Acc; -mapping([{owner, V}|Entries], Acc) when is_pid(V) -> +mapping([], Acc) -> + Acc; +mapping([{owner, V} | Entries], Acc) when is_pid(V) -> OwnerInfo = process_info(V), Owner = proplists:get_value(registered_name, OwnerInfo, undefined), - mapping(Entries, [{owner, Owner}|Acc]); -mapping([{Key, Value}|Entries], Acc) -> - mapping(Entries, [{Key, Value}|Acc]). + mapping(Entries, [{owner, Owner} | Acc]); +mapping([{Key, Value} | Entries], Acc) -> + mapping(Entries, [{Key, Value} | Acc]). avg1() -> compat_windows(fun cpu_sup:avg1/0). @@ -357,7 +391,8 @@ cpu_util() -> compat_windows(Fun) -> case os:type() of - {win32, nt} -> 0; + {win32, nt} -> + 0; _Type -> case catch Fun() of Val when is_number(Val) -> Val; diff --git a/apps/emqx/src/emqx_vm_mon.erl b/apps/emqx/src/emqx_vm_mon.erl index e763c7bfb..c7484c741 100644 --- a/apps/emqx/src/emqx_vm_mon.erl +++ b/apps/emqx/src/emqx_vm_mon.erl @@ -24,13 +24,14 @@ -export([start_link/0]). %% gen_server callbacks --export([ init/1 - , handle_call/3 - , handle_cast/2 - , handle_info/2 - , terminate/2 - , code_change/3 - ]). +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). -define(VM_MON, ?MODULE). @@ -62,29 +63,34 @@ handle_info({timeout, _Timer, check}, State) -> ProcessCount = erlang:system_info(process_count), case ProcessCount / erlang:system_info(process_limit) of Percent when Percent > ProcHighWatermark -> - Usage = io_lib:format("~p%", [Percent*100]), + Usage = io_lib:format("~p%", [Percent * 100]), Message = [Usage, " process usage"], - emqx_alarm:activate(too_many_processes, + emqx_alarm:activate( + too_many_processes, #{ usage => Usage, high_watermark => ProcHighWatermark, - low_watermark => ProcLowWatermark}, - Message); + low_watermark => ProcLowWatermark + }, + Message + ); Percent when Percent < ProcLowWatermark -> - Usage = io_lib:format("~p%", [Percent*100]), + Usage = io_lib:format("~p%", [Percent * 100]), Message = [Usage, " process usage"], - emqx_alarm:deactivate(too_many_processes, + emqx_alarm:deactivate( + too_many_processes, #{ usage => Usage, high_watermark => ProcHighWatermark, - low_watermark => ProcLowWatermark}, - Message); + low_watermark => ProcLowWatermark + }, + Message + ); _Precent -> ok end, _ = start_check_timer(), {noreply, State}; - handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index cd5b7c515..4b964b9a1 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -22,92 +22,99 @@ -include("logger.hrl"). -include("types.hrl"). - -ifdef(TEST). -compile(export_all). -compile(nowarn_export_all). -endif. %% API --export([ info/1 - , stats/1 - ]). +-export([ + info/1, + stats/1 +]). --export([ call/2 - , call/3 - ]). +-export([ + call/2, + call/3 +]). %% WebSocket callbacks --export([ init/2 - , websocket_init/1 - , websocket_handle/2 - , websocket_info/2 - , websocket_close/2 - , terminate/3 - ]). +-export([ + init/2, + websocket_init/1, + websocket_handle/2, + websocket_info/2, + websocket_close/2, + terminate/3 +]). %% Export for CT -export([set_field/3]). --import(emqx_misc, - [ maybe_apply/2 - , start_timer/2 - ]). +-import( + emqx_misc, + [ + maybe_apply/2, + start_timer/2 + ] +). -record(state, { - %% Peername of the ws connection - peername :: emqx_types:peername(), - %% Sockname of the ws connection - sockname :: emqx_types:peername(), - %% Sock state - sockstate :: emqx_types:sockstate(), - %% MQTT Piggyback - mqtt_piggyback :: single | multiple, - %% Parse State - parse_state :: emqx_frame:parse_state(), - %% Serialize options - serialize :: emqx_frame:serialize_opts(), - %% Channel - channel :: emqx_channel:channel(), - %% GC State - gc_state :: maybe(emqx_gc:gc_state()), - %% Postponed Packets|Cmds|Events - postponed :: list(emqx_types:packet()|ws_cmd()|tuple()), - %% Stats Timer - stats_timer :: disabled | maybe(reference()), - %% Idle Timeout - idle_timeout :: timeout(), - %% Idle Timer - idle_timer :: maybe(reference()), - %% Zone name - zone :: atom(), - %% Listener Type and Name - listener :: {Type::atom(), Name::atom()}, + %% Peername of the ws connection + peername :: emqx_types:peername(), + %% Sockname of the ws connection + sockname :: emqx_types:peername(), + %% Sock state + sockstate :: emqx_types:sockstate(), + %% MQTT Piggyback + mqtt_piggyback :: single | multiple, + %% Parse State + parse_state :: emqx_frame:parse_state(), + %% Serialize options + serialize :: emqx_frame:serialize_opts(), + %% Channel + channel :: emqx_channel:channel(), + %% GC State + gc_state :: maybe(emqx_gc:gc_state()), + %% Postponed Packets|Cmds|Events + postponed :: list(emqx_types:packet() | ws_cmd() | tuple()), + %% Stats Timer + stats_timer :: disabled | maybe(reference()), + %% Idle Timeout + idle_timeout :: timeout(), + %% Idle Timer + idle_timer :: maybe(reference()), + %% Zone name + zone :: atom(), + %% Listener Type and Name + listener :: {Type :: atom(), Name :: atom()}, - %% Limiter - limiter :: maybe(container()), + %% Limiter + limiter :: maybe(container()), - %% cache operation when overload - limiter_cache :: queue:queue(cache()), + %% cache operation when overload + limiter_cache :: queue:queue(cache()), - %% limiter timers - limiter_timer :: undefined | reference() - }). + %% limiter timers + limiter_timer :: undefined | reference() +}). --record(retry, { types :: list(limiter_type()) - , data :: any() - , next :: check_succ_handler() - }). +-record(retry, { + types :: list(limiter_type()), + data :: any(), + next :: check_succ_handler() +}). --record(cache, { need :: list({pos_integer(), limiter_type()}) - , data :: any() - , next :: check_succ_handler() - }). +-record(cache, { + need :: list({pos_integer(), limiter_type()}), + data :: any(), + next :: check_succ_handler() +}). --type(state() :: #state{}). +-type state() :: #state{}. -type cache() :: #cache{}. --type(ws_cmd() :: {active, boolean()}|close). +-type ws_cmd() :: {active, boolean()} | close. -define(ACTIVE_N, 100). -define(INFO_KEYS, [socktype, peername, sockname, sockstate]). @@ -124,18 +131,20 @@ %% Info, Stats %%-------------------------------------------------------------------- --spec(info(pid()|state()) -> emqx_types:infos()). +-spec info(pid() | state()) -> emqx_types:infos(). info(WsPid) when is_pid(WsPid) -> call(WsPid, info); info(State = #state{channel = Channel}) -> ChanInfo = emqx_channel:info(Channel), SockInfo = maps:from_list( - info(?INFO_KEYS, State)), + info(?INFO_KEYS, State) + ), ChanInfo#{sockinfo => SockInfo}. info(Keys, State) when is_list(Keys) -> [{Key, info(Key, State)} || Key <- Keys]; -info(socktype, _State) -> ws; +info(socktype, _State) -> + ws; info(peername, #state{peername = Peername}) -> Peername; info(sockname, #state{sockname = Sockname}) -> @@ -157,7 +166,7 @@ info(idle_timeout, #state{idle_timeout = Timeout}) -> info(idle_timer, #state{idle_timer = TRef}) -> TRef. --spec(stats(pid()|state()) -> emqx_types:stats()). +-spec stats(pid() | state()) -> emqx_types:stats(). stats(WsPid) when is_pid(WsPid) -> call(WsPid, stats); stats(#state{channel = Channel}) -> @@ -167,7 +176,7 @@ stats(#state{channel = Channel}) -> lists:append([SockStats, ChanStats, ProcStats]). %% kick|discard|takeover --spec(call(pid(), Req :: term()) -> Reply :: term()). +-spec call(pid(), Req :: term()) -> Reply :: term(). call(WsPid, Req) -> call(WsPid, Req, 5000). @@ -191,11 +200,12 @@ call(WsPid, Req, Timeout) when is_pid(WsPid) -> init(Req, #{listener := {Type, Listener}} = Opts) -> %% WS Transport Idle Timeout - WsOpts = #{compress => get_ws_opts(Type, Listener, compress), - deflate_opts => get_ws_opts(Type, Listener, deflate_opts), - max_frame_size => get_ws_opts(Type, Listener, max_frame_size), - idle_timeout => get_ws_opts(Type, Listener, idle_timeout) - }, + WsOpts = #{ + compress => get_ws_opts(Type, Listener, compress), + deflate_opts => get_ws_opts(Type, Listener, deflate_opts), + max_frame_size => get_ws_opts(Type, Listener, max_frame_size), + idle_timeout => get_ws_opts(Type, Listener, idle_timeout) + }, case check_origin_header(Req, Opts) of {error, Reason} -> ?SLOG(error, #{msg => "invalid_origin_header", reason => Reason}), @@ -215,13 +225,17 @@ parse_sec_websocket_protocol(Req, #{listener := {Type, Listener}} = Opts, WsOpts end; Subprotocols -> SupportedSubprotocols = get_ws_opts(Type, Listener, supported_subprotocols), - NSupportedSubprotocols = [list_to_binary(Subprotocol) - || Subprotocol <- SupportedSubprotocols], + NSupportedSubprotocols = [ + list_to_binary(Subprotocol) + || Subprotocol <- SupportedSubprotocols + ], case pick_subprotocol(Subprotocols, NSupportedSubprotocols) of {ok, Subprotocol} -> - Resp = cowboy_req:set_resp_header(<<"sec-websocket-protocol">>, - Subprotocol, - Req), + Resp = cowboy_req:set_resp_header( + <<"sec-websocket-protocol">>, + Subprotocol, + Req + ), {cowboy_websocket, Resp, [Req, Opts], WsOpts}; {error, no_supported_subprotocol} -> {ok, cowboy_req:reply(400, Req), WsOpts} @@ -241,10 +255,10 @@ pick_subprotocol([Subprotocol | Rest], SupportedSubprotocols) -> parse_header_fun_origin(Req, #{listener := {Type, Listener}}) -> case cowboy_req:header(<<"origin">>, Req) of undefined -> - case get_ws_opts(Type, Listener, allow_origin_absence) of - true -> ok; - false -> {error, origin_header_cannot_be_absent} - end; + case get_ws_opts(Type, Listener, allow_origin_absence) of + true -> ok; + false -> {error, origin_header_cannot_be_absent} + end; Value -> case lists:member(Value, get_ws_opts(Type, Listener, check_origins)) of true -> ok; @@ -258,18 +272,23 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) -> false -> ok end. -websocket_init([Req, - #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts]) -> +websocket_init([ + Req, + #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts +]) -> {Peername, Peercert} = - case emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso - maps:get(proxy_header, Req) of + case + emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso + maps:get(proxy_header, Req) + of #{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} -> SourceName = {SrcAddr, SrcPort}, %% Notice: Only CN is available in Proxy Protocol V2 additional info - SourceSSL = case maps:get(cn, SSL, undefined) of - undeined -> nossl; - CN -> [{pp2_ssl_cn, CN}] - end, + SourceSSL = + case maps:get(cn, SSL, undefined) of + undeined -> nossl; + CN -> [{pp2_ssl_cn, CN}] + end, {SourceName, SourceSSL}; #{src_address := SrcAddr, src_port := SrcPort} -> SourceName = {SrcAddr, SrcPort}, @@ -278,25 +297,32 @@ websocket_init([Req, {get_peer(Req, Opts), cowboy_req:cert(Req)} end, Sockname = cowboy_req:sock(Req), - WsCookie = try cowboy_req:parse_cookies(Req) - catch - error:badarg -> - ?SLOG(error, #{msg => "bad_cookie"}), - undefined; - Error:Reason -> - ?SLOG(error, #{msg => "failed_to_parse_cookie", - exception => Error, - reason => Reason}), - undefined - end, - ConnInfo = #{socktype => ws, - peername => Peername, - sockname => Sockname, - peercert => Peercert, - ws_cookie => WsCookie, - conn_mod => ?MODULE - }, - Limiter = emqx_limiter_container:get_limiter_by_names([?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg), + WsCookie = + try + cowboy_req:parse_cookies(Req) + catch + error:badarg -> + ?SLOG(error, #{msg => "bad_cookie"}), + undefined; + Error:Reason -> + ?SLOG(error, #{ + msg => "failed_to_parse_cookie", + exception => Error, + reason => Reason + }), + undefined + end, + ConnInfo = #{ + socktype => ws, + peername => Peername, + sockname => Sockname, + peercert => Peercert, + ws_cookie => WsCookie, + conn_mod => ?MODULE + }, + Limiter = emqx_limiter_container:get_limiter_by_names( + [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg + ), MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback), FrameOpts = #{ strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]), @@ -305,106 +331,109 @@ websocket_init([Req, ParseState = emqx_frame:initial_parse_state(FrameOpts), Serialize = emqx_frame:serialize_opts(), Channel = emqx_channel:init(ConnInfo, Opts), - GcState = case emqx_config:get_zone_conf(Zone, [force_gc]) of - #{enable := false} -> undefined; - GcPolicy -> emqx_gc:init(GcPolicy) - end, - StatsTimer = case emqx_config:get_zone_conf(Zone, [stats, enable]) of - true -> undefined; - false -> disabled - end, + GcState = + case emqx_config:get_zone_conf(Zone, [force_gc]) of + #{enable := false} -> undefined; + GcPolicy -> emqx_gc:init(GcPolicy) + end, + StatsTimer = + case emqx_config:get_zone_conf(Zone, [stats, enable]) of + true -> undefined; + false -> disabled + end, %% MQTT Idle Timeout IdleTimeout = emqx_channel:get_mqtt_conf(Zone, idle_timeout), IdleTimer = start_timer(IdleTimeout, idle_timeout), - case emqx_config:get_zone_conf(emqx_channel:info(zone, Channel), - [force_shutdown]) of + case + emqx_config:get_zone_conf( + emqx_channel:info(zone, Channel), + [force_shutdown] + ) + of #{enable := false} -> ok; ShutdownPolicy -> emqx_misc:tune_heap_size(ShutdownPolicy) end, emqx_logger:set_metadata_peername(esockd:format(Peername)), - {ok, #state{peername = Peername, - sockname = Sockname, - sockstate = running, - mqtt_piggyback = MQTTPiggyback, - limiter = Limiter, - parse_state = ParseState, - serialize = Serialize, - channel = Channel, - gc_state = GcState, - postponed = [], - stats_timer = StatsTimer, - idle_timeout = IdleTimeout, - idle_timer = IdleTimer, - zone = Zone, - listener = {Type, Listener}, - limiter_timer = undefined, - limiter_cache = queue:new() - }, hibernate}. + {ok, + #state{ + peername = Peername, + sockname = Sockname, + sockstate = running, + mqtt_piggyback = MQTTPiggyback, + limiter = Limiter, + parse_state = ParseState, + serialize = Serialize, + channel = Channel, + gc_state = GcState, + postponed = [], + stats_timer = StatsTimer, + idle_timeout = IdleTimeout, + idle_timer = IdleTimer, + zone = Zone, + listener = {Type, Listener}, + limiter_timer = undefined, + limiter_cache = queue:new() + }, + hibernate}. websocket_handle({binary, Data}, State) when is_list(Data) -> websocket_handle({binary, iolist_to_binary(Data)}, State); - websocket_handle({binary, Data}, State) -> State2 = ensure_stats_timer(State), {Packets, State3} = parse_incoming(Data, [], State2), LenMsg = erlang:length(Packets), ByteSize = erlang:iolist_size(Data), inc_recv_stats(LenMsg, ByteSize), - State4 = check_limiter([{ByteSize, ?LIMITER_BYTES_IN}, {LenMsg, ?LIMITER_MESSAGE_IN}], - Packets, - fun when_msg_in/3, - [], - State3), + State4 = check_limiter( + [{ByteSize, ?LIMITER_BYTES_IN}, {LenMsg, ?LIMITER_MESSAGE_IN}], + Packets, + fun when_msg_in/3, + [], + State3 + ), return(State4); - %% Pings should be replied with pongs, cowboy does it automatically %% Pongs can be safely ignored. Clause here simply prevents crash. websocket_handle(Frame, State) when Frame =:= ping; Frame =:= pong -> return(State); - websocket_handle({Frame, _}, State) when Frame =:= ping; Frame =:= pong -> return(State); - websocket_handle({Frame, _}, State) -> %% TODO: should not close the ws connection ?SLOG(error, #{msg => "unexpected_frame", frame => Frame}), shutdown(unexpected_ws_frame, State). websocket_info({call, From, Req}, State) -> handle_call(From, Req, State); - websocket_info({cast, rate_limit}, State) -> - Stats = #{cnt => emqx_pd:reset_counter(incoming_pubs), - oct => emqx_pd:reset_counter(incoming_bytes) - }, + Stats = #{ + cnt => emqx_pd:reset_counter(incoming_pubs), + oct => emqx_pd:reset_counter(incoming_bytes) + }, return(postpone({check_gc, Stats}, State)); - websocket_info({cast, Msg}, State) -> handle_info(Msg, State); - websocket_info({incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, State) -> Serialize = emqx_frame:serialize_opts(ConnPkt), NState = State#state{serialize = Serialize}, handle_incoming(Packet, cancel_idle_timer(NState)); - websocket_info({incoming, Packet}, State) -> handle_incoming(Packet, State); - websocket_info({outgoing, Packets}, State) -> return(enqueue(Packets, State)); - websocket_info({check_gc, Stats}, State) -> return(check_oom(run_gc(Stats, State))); - -websocket_info(Deliver = {deliver, _Topic, _Msg}, - State = #state{listener = {Type, Listener}}) -> +websocket_info( + Deliver = {deliver, _Topic, _Msg}, + State = #state{listener = {Type, Listener}} +) -> ActiveN = get_active_n(Type, Listener), - Delivers = [Deliver|emqx_misc:drain_deliver(ActiveN)], + Delivers = [Deliver | emqx_misc:drain_deliver(ActiveN)], with_channel(handle_deliver, [Delivers], State); - -websocket_info({timeout, _, limit_timeout}, - State) -> +websocket_info( + {timeout, _, limit_timeout}, + State +) -> return(retry_limiter(State)); - websocket_info(check_cache, #state{limiter_cache = Cache} = State) -> case queue:peek(Cache) of empty -> @@ -413,16 +442,12 @@ websocket_info(check_cache, #state{limiter_cache = Cache} = State) -> State2 = State#state{limiter_cache = queue:drop(Cache)}, return(check_limiter(Needs, Data, Next, [check_cache], State2)) end; - websocket_info({timeout, TRef, Msg}, State) when is_reference(TRef) -> handle_timeout(TRef, Msg, State); - websocket_info({shutdown, Reason}, State) -> shutdown(Reason, State); - websocket_info({stop, Reason}, State) -> shutdown(Reason, State); - websocket_info(Info, State) -> handle_info(Info, State). @@ -435,7 +460,6 @@ websocket_close(Reason, State) -> terminate(Reason, _Req, #state{channel = Channel}) -> ?TRACE("SOCKET", "websocket_terminated", #{reason => Reason}), emqx_channel:terminate(Reason, Channel); - terminate(_Reason, _Req, _UnExpectedState) -> ok. @@ -446,15 +470,12 @@ terminate(_Reason, _Req, _UnExpectedState) -> handle_call(From, info, State) -> gen_server:reply(From, info(State)), return(State); - handle_call(From, stats, State) -> gen_server:reply(From, stats(State)), return(State); - handle_call(_From, {ratelimit, Type, Bucket}, State = #state{limiter = Limiter}) -> Limiter2 = emqx_limiter_container:update_by_name(Type, Bucket, Limiter), {reply, ok, State#state{limiter = Limiter2}}; - handle_call(From, Req, State = #state{channel = Channel}) -> case emqx_channel:handle_call(Req, Channel) of {reply, Reply, NChannel} -> @@ -475,28 +496,23 @@ handle_call(From, Req, State = #state{channel = Channel}) -> handle_info({connack, ConnAck}, State) -> return(enqueue(ConnAck, State)); - handle_info({close, Reason}, State) -> ?TRACE("SOCKET", "socket_force_closed", #{reason => Reason}), return(enqueue({close, Reason}, State)); - handle_info({event, connected}, State = #state{channel = Channel}) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:insert_channel_info(ClientId, info(State), stats(State)), return(State); - handle_info({event, disconnected}, State = #state{channel = Channel}) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:set_chan_info(ClientId, info(State)), emqx_cm:connection_closed(ClientId), return(State); - handle_info({event, _Other}, State = #state{channel = Channel}) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:set_chan_info(ClientId, info(State)), emqx_cm:set_chan_stats(ClientId, stats(State)), return(State); - handle_info(Info, State) -> with_channel(handle_info, [Info], State). @@ -506,17 +522,20 @@ handle_info(Info, State) -> handle_timeout(TRef, idle_timeout, State = #state{idle_timer = TRef}) -> shutdown(idle_timeout, State); - handle_timeout(TRef, keepalive, State) when is_reference(TRef) -> RecvOct = emqx_pd:get_counter(recv_oct), handle_timeout(TRef, {keepalive, RecvOct}, State); - -handle_timeout(TRef, emit_stats, State = #state{channel = Channel, - stats_timer = TRef}) -> +handle_timeout( + TRef, + emit_stats, + State = #state{ + channel = Channel, + stats_timer = TRef + } +) -> ClientId = emqx_channel:info(clientid, Channel), emqx_cm:set_chan_stats(ClientId, stats(State)), return(State#state{stats_timer = undefined}); - handle_timeout(TRef, TMsg, State) -> with_channel(handle_timeout, [TRef, TMsg], State). @@ -527,42 +546,56 @@ handle_timeout(TRef, TMsg, State) -> -type limiter_type() :: emqx_limiter_container:limiter_type(). -type container() :: emqx_limiter_container:container(). -type check_succ_handler() :: - fun((any(), list(any()), state()) -> state()). + fun((any(), list(any()), state()) -> state()). --spec check_limiter(list({pos_integer(), limiter_type()}), - any(), - check_succ_handler(), - list(any()), - state()) -> state(). -check_limiter(Needs, - Data, - WhenOk, - Msgs, - #state{limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_cache = Cache} = State) -> +-spec check_limiter( + list({pos_integer(), limiter_type()}), + any(), + check_succ_handler(), + list(any()), + state() +) -> state(). +check_limiter( + Needs, + Data, + WhenOk, + Msgs, + #state{ + limiter = Limiter, + limiter_timer = LimiterTimer, + limiter_cache = Cache + } = State +) -> case LimiterTimer of undefined -> case emqx_limiter_container:check_list(Needs, Limiter) of {ok, Limiter2} -> WhenOk(Data, Msgs, State#state{limiter = Limiter2}); {pause, Time, Limiter2} -> - ?SLOG(warning, #{msg => "pause_time_due_to_rate_limit", - needs => Needs, - time_in_ms => Time}), + ?SLOG(warning, #{ + msg => "pause_time_due_to_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk}, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), TRef = start_timer(Time, limit_timeout), - enqueue({active, false}, - State#state{sockstate = blocked, - limiter = Limiter3, - limiter_timer = TRef}); + enqueue( + {active, false}, + State#state{ + sockstate = blocked, + limiter = Limiter3, + limiter_timer = TRef + } + ); {drop, Limiter2} -> {ok, State#state{limiter = Limiter2}} end; @@ -571,21 +604,27 @@ check_limiter(Needs, State#state{limiter_cache = queue:in(New, Cache)} end. - -spec retry_limiter(state()) -> state(). retry_limiter(#state{limiter = Limiter} = State) -> - #retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context(Limiter), + #retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context( + Limiter + ), case emqx_limiter_container:retry_list(Types, Limiter) of {ok, Limiter2} -> - Next(Data, - [check_cache], - State#state{ limiter = Limiter2 - , limiter_timer = undefined - }); + Next( + Data, + [check_cache], + State#state{ + limiter = Limiter2, + limiter_timer = undefined + } + ); {pause, Time, Limiter2} -> - ?SLOG(warning, #{msg => "pause_time_due_to_rate_limit", - types => Types, - time_in_ms => Time}), + ?SLOG(warning, #{ + msg => "pause_time_due_to_rate_limit", + types => Types, + time_in_ms => Time + }), TRef = start_timer(Time, limit_timeout), @@ -594,7 +633,6 @@ retry_limiter(#state{limiter = Limiter} = State) -> when_msg_in(Packets, [], State) -> postpone(Packets, State); - when_msg_in(Packets, Msgs, State) -> postpone(Packets, enqueue(Msgs, State)). @@ -605,20 +643,22 @@ when_msg_in(Packets, Msgs, State) -> run_gc(Stats, State = #state{gc_state = GcSt}) -> case ?ENABLED(GcSt) andalso emqx_gc:run(Stats, GcSt) of false -> State; - {_IsGC, GcSt1} -> - State#state{gc_state = GcSt1} + {_IsGC, GcSt1} -> State#state{gc_state = GcSt1} end. check_oom(State = #state{channel = Channel}) -> ShutdownPolicy = emqx_config:get_zone_conf( - emqx_channel:info(zone, Channel), [force_shutdown]), + emqx_channel:info(zone, Channel), [force_shutdown] + ), case ShutdownPolicy of - #{enable := false} -> State; + #{enable := false} -> + State; #{enable := true} -> case emqx_misc:check_oom(ShutdownPolicy) of Shutdown = {shutdown, _Reason} -> postpone(Shutdown, State); - _Other -> State + _Other -> + State end end. @@ -628,7 +668,6 @@ check_oom(State = #state{channel = Channel}) -> parse_incoming(<<>>, Packets, State) -> {Packets, State}; - parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> try emqx_frame:parse(Data, ParseState) of {more, NParseState} -> @@ -637,19 +676,21 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> NState = State#state{parse_state = NParseState}, parse_incoming(Rest, [{incoming, Packet} | Packets], NState) catch - throw : ?FRAME_PARSE_ERROR(Reason) -> - ?SLOG(info, #{ reason => Reason - , at_state => emqx_frame:describe_state(ParseState) - , input_bytes => Data - }), + throw:?FRAME_PARSE_ERROR(Reason) -> + ?SLOG(info, #{ + reason => Reason, + at_state => emqx_frame:describe_state(ParseState), + input_bytes => Data + }), FrameError = {frame_error, Reason}, {[{incoming, FrameError} | Packets], State}; - error : Reason : Stacktrace -> - ?SLOG(error, #{ at_state => emqx_frame:describe_state(ParseState) - , input_bytes => Data - , exception => Reason - , stacktrace => Stacktrace - }), + error:Reason:Stacktrace -> + ?SLOG(error, #{ + at_state => emqx_frame:describe_state(ParseState), + input_bytes => Data, + exception => Reason, + stacktrace => Stacktrace + }), FrameError = {frame_error, Reason}, {[{incoming, FrameError} | Packets], State} end. @@ -658,17 +699,20 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> %% Handle incoming packet %%-------------------------------------------------------------------- -handle_incoming(Packet, State = #state{listener = {Type, Listener}}) - when is_record(Packet, mqtt_packet) -> +handle_incoming(Packet, State = #state{listener = {Type, Listener}}) when + is_record(Packet, mqtt_packet) +-> ?TRACE("WS-MQTT", "mqtt_packet_received", #{packet => Packet}), ok = inc_incoming_stats(Packet), - NState = case emqx_pd:get_counter(incoming_pubs) > - get_active_n(Type, Listener) of - true -> postpone({cast, rate_limit}, State); - false -> State - end, + NState = + case + emqx_pd:get_counter(incoming_pubs) > + get_active_n(Type, Listener) + of + true -> postpone({cast, rate_limit}, State); + false -> State + end, with_channel(handle_in, [Packet], NState); - handle_incoming(FrameError, State) -> with_channel(handle_in, [FrameError], State). @@ -678,11 +722,12 @@ handle_incoming(FrameError, State) -> with_channel(Fun, Args, State = #state{channel = Channel}) -> case erlang:apply(emqx_channel, Fun, Args ++ [Channel]) of - ok -> return(State); + ok -> + return(State); {ok, NChannel} -> return(State#state{channel = NChannel}); {ok, Replies, NChannel} -> - return(postpone(Replies, State#state{channel= NChannel})); + return(postpone(Replies, State#state{channel = NChannel})); {shutdown, Reason, NChannel} -> shutdown(Reason, State#state{channel = NChannel}); {shutdown, Reason, Packet, NChannel} -> @@ -694,50 +739,70 @@ with_channel(Fun, Args, State = #state{channel = Channel}) -> %% Handle outgoing packets %%-------------------------------------------------------------------- -handle_outgoing(Packets, State = #state{mqtt_piggyback = MQTTPiggyback, - listener = {Type, Listener}}) -> +handle_outgoing( + Packets, + State = #state{ + mqtt_piggyback = MQTTPiggyback, + listener = {Type, Listener} + } +) -> IoData = lists:map(serialize_and_inc_stats_fun(State), Packets), Oct = iolist_size(IoData), ok = inc_sent_stats(length(Packets), Oct), - NState = case emqx_pd:get_counter(outgoing_pubs) > - get_active_n(Type, Listener) of - true -> - Stats = #{cnt => emqx_pd:reset_counter(outgoing_pubs), - oct => emqx_pd:reset_counter(outgoing_bytes) - }, - postpone({check_gc, Stats}, State); - false -> State - end, + NState = + case + emqx_pd:get_counter(outgoing_pubs) > + get_active_n(Type, Listener) + of + true -> + Stats = #{ + cnt => emqx_pd:reset_counter(outgoing_pubs), + oct => emqx_pd:reset_counter(outgoing_bytes) + }, + postpone({check_gc, Stats}, State); + false -> + State + end, - {case MQTTPiggyback of - single -> [{binary, IoData}]; - multiple -> lists:map(fun(Bin) -> {binary, Bin} end, IoData) - end, - ensure_stats_timer(NState)}. + { + case MQTTPiggyback of + single -> [{binary, IoData}]; + multiple -> lists:map(fun(Bin) -> {binary, Bin} end, IoData) + end, + ensure_stats_timer(NState) + }. serialize_and_inc_stats_fun(#state{serialize = Serialize}) -> fun(Packet) -> try emqx_frame:serialize_pkt(Packet, Serialize) of - <<>> -> ?SLOG(warning, #{msg => "packet_discarded", - reason => "frame_too_large", - packet => emqx_packet:format(Packet)}), - ok = emqx_metrics:inc('delivery.dropped.too_large'), - ok = emqx_metrics:inc('delivery.dropped'), - ok = inc_outgoing_stats({error, message_too_large}), - <<>>; - Data -> ?TRACE("WS-MQTT", "mqtt_packet_sent", #{packet => Packet}), - ok = inc_outgoing_stats(Packet), - Data + <<>> -> + ?SLOG(warning, #{ + msg => "packet_discarded", + reason => "frame_too_large", + packet => emqx_packet:format(Packet) + }), + ok = emqx_metrics:inc('delivery.dropped.too_large'), + ok = emqx_metrics:inc('delivery.dropped'), + ok = inc_outgoing_stats({error, message_too_large}), + <<>>; + Data -> + ?TRACE("WS-MQTT", "mqtt_packet_sent", #{packet => Packet}), + ok = inc_outgoing_stats(Packet), + Data catch %% Maybe Never happen. - throw : ?FRAME_SERIALIZE_ERROR(Reason) -> - ?SLOG(info, #{ reason => Reason - , input_packet => Packet}), + throw:?FRAME_SERIALIZE_ERROR(Reason) -> + ?SLOG(info, #{ + reason => Reason, + input_packet => Packet + }), erlang:error(?FRAME_SERIALIZE_ERROR(Reason)); - error : Reason : Stacktrace -> - ?SLOG(error, #{ input_packet => Packet - , exception => Reason - , stacktrace => Stacktrace}), + error:Reason:Stacktrace -> + ?SLOG(error, #{ + input_packet => Packet, + exception => Reason, + stacktrace => Stacktrace + }), erlang:error(frame_serialize_error) end end. @@ -746,12 +811,14 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) -> %% Inc incoming/outgoing stats %%-------------------------------------------------------------------- --compile({inline, - [ inc_recv_stats/2 - , inc_incoming_stats/1 - , inc_outgoing_stats/1 - , inc_sent_stats/2 - ]}). +-compile( + {inline, [ + inc_recv_stats/2, + inc_incoming_stats/1, + inc_outgoing_stats/1, + inc_sent_stats/2 + ]} +). inc_recv_stats(Cnt, Oct) -> inc_counter(incoming_bytes, Oct), @@ -807,7 +874,6 @@ inc_qos_stats(Type, Packet) -> inc_qos_stats_key(send_msg, ?QOS_0) -> 'send_msg.qos0'; inc_qos_stats_key(send_msg, ?QOS_1) -> 'send_msg.qos1'; inc_qos_stats_key(send_msg, ?QOS_2) -> 'send_msg.qos2'; - inc_qos_stats_key(recv_msg, ?QOS_0) -> 'recv_msg.qos0'; inc_qos_stats_key(recv_msg, ?QOS_1) -> 'recv_msg.qos1'; inc_qos_stats_key(recv_msg, ?QOS_2) -> 'recv_msg.qos2'; @@ -830,10 +896,15 @@ cancel_idle_timer(State = #state{idle_timer = IdleTimer}) -> %%-------------------------------------------------------------------- %% Ensure stats timer -ensure_stats_timer(State = #state{idle_timeout = Timeout, - stats_timer = undefined}) -> +ensure_stats_timer( + State = #state{ + idle_timeout = Timeout, + stats_timer = undefined + } +) -> State#state{stats_timer = start_timer(Timeout, emit_stats)}; -ensure_stats_timer(State) -> State. +ensure_stats_timer(State) -> + State. -compile({inline, [postpone/2, enqueue/2, return/1, shutdown/2]}). @@ -848,15 +919,16 @@ postpone(More, State) when is_list(More) -> lists:foldl(fun postpone/2, State, More). enqueue([Packet], State = #state{postponed = Postponed}) -> - State#state{postponed = [Packet|Postponed]}; -enqueue(Packets, State = #state{postponed = Postponed}) - when is_list(Packets) -> + State#state{postponed = [Packet | Postponed]}; +enqueue(Packets, State = #state{postponed = Postponed}) when + is_list(Packets) +-> State#state{postponed = lists:reverse(Packets) ++ Postponed}; enqueue(Other, State = #state{postponed = Postponed}) -> - State#state{postponed = [Other|Postponed]}. + State#state{postponed = [Other | Postponed]}. shutdown(Reason, State = #state{postponed = Postponed}) -> - return(State#state{postponed = [{shutdown, Reason}|Postponed]}). + return(State#state{postponed = [{shutdown, Reason} | Postponed]}). return(State = #state{postponed = []}) -> {ok, State}; @@ -865,8 +937,10 @@ return(State = #state{postponed = Postponed}) -> ok = lists:foreach(fun trigger/1, Events), State1 = State#state{postponed = []}, case {Packets, Cmds} of - {[], []} -> {ok, State1}; - {[], Cmds} -> {Cmds, State1}; + {[], []} -> + {ok, State1}; + {[], Cmds} -> + {Cmds, State1}; {Packets, Cmds} -> {Frames, State2} = handle_outgoing(Packets, State1), {Frames ++ Cmds, State2} @@ -874,50 +948,56 @@ return(State = #state{postponed = Postponed}) -> classify([], Packets, Cmds, Events) -> {Packets, Cmds, Events}; -classify([Packet|More], Packets, Cmds, Events) - when is_record(Packet, mqtt_packet) -> - classify(More, [Packet|Packets], Cmds, Events); -classify([Cmd = {active, _}|More], Packets, Cmds, Events) -> - classify(More, Packets, [Cmd|Cmds], Events); -classify([Cmd = {shutdown, _Reason}|More], Packets, Cmds, Events) -> - classify(More, Packets, [Cmd|Cmds], Events); -classify([Cmd = close|More], Packets, Cmds, Events) -> - classify(More, Packets, [Cmd|Cmds], Events); -classify([Cmd = {close, _Reason}|More], Packets, Cmds, Events) -> - classify(More, Packets, [Cmd|Cmds], Events); -classify([Event|More], Packets, Cmds, Events) -> - classify(More, Packets, Cmds, [Event|Events]). +classify([Packet | More], Packets, Cmds, Events) when + is_record(Packet, mqtt_packet) +-> + classify(More, [Packet | Packets], Cmds, Events); +classify([Cmd = {active, _} | More], Packets, Cmds, Events) -> + classify(More, Packets, [Cmd | Cmds], Events); +classify([Cmd = {shutdown, _Reason} | More], Packets, Cmds, Events) -> + classify(More, Packets, [Cmd | Cmds], Events); +classify([Cmd = close | More], Packets, Cmds, Events) -> + classify(More, Packets, [Cmd | Cmds], Events); +classify([Cmd = {close, _Reason} | More], Packets, Cmds, Events) -> + classify(More, Packets, [Cmd | Cmds], Events); +classify([Event | More], Packets, Cmds, Events) -> + classify(More, Packets, Cmds, [Event | Events]). trigger(Event) -> erlang:send(self(), Event). get_peer(Req, #{listener := {Type, Listener}}) -> {PeerAddr, PeerPort} = cowboy_req:peer(Req), AddrHeader = cowboy_req:header( - get_ws_opts(Type, Listener, proxy_address_header), Req, <<>>), - ClientAddr = case string:tokens(binary_to_list(AddrHeader), ", ") of - [] -> - undefined; - AddrList -> - hd(AddrList) - end, - Addr = case inet:parse_address(ClientAddr) of - {ok, A} -> - A; - _ -> - PeerAddr - end, + get_ws_opts(Type, Listener, proxy_address_header), Req, <<>> + ), + ClientAddr = + case string:tokens(binary_to_list(AddrHeader), ", ") of + [] -> + undefined; + AddrList -> + hd(AddrList) + end, + Addr = + case inet:parse_address(ClientAddr) of + {ok, A} -> + A; + _ -> + PeerAddr + end, PortHeader = cowboy_req:header( - get_ws_opts(Type, Listener, proxy_port_header), Req, <<>>), - ClientPort = case string:tokens(binary_to_list(PortHeader), ", ") of - [] -> - undefined; - PortList -> - hd(PortList) - end, + get_ws_opts(Type, Listener, proxy_port_header), Req, <<>> + ), + ClientPort = + case string:tokens(binary_to_list(PortHeader), ", ") of + [] -> + undefined; + PortList -> + hd(PortList) + end, try {Addr, list_to_integer(ClientPort)} catch - _:_ -> {Addr, PeerPort} + _:_ -> {Addr, PeerPort} end. %%-------------------------------------------------------------------- @@ -926,7 +1006,7 @@ get_peer(Req, #{listener := {Type, Listener}}) -> set_field(Name, Value, State) -> Pos = emqx_misc:index_of(Name, record_info(fields, state)), - setelement(Pos+1, State, Value). + setelement(Pos + 1, State, Value). get_ws_opts(Type, Listener, Key) -> emqx_config:get_listener_conf(Type, Listener, [websocket, Key]). diff --git a/apps/emqx/src/emqx_zone_schema.erl b/apps/emqx/src/emqx_zone_schema.erl index 44595573d..2a101c0b9 100644 --- a/apps/emqx/src/emqx_zone_schema.erl +++ b/apps/emqx/src/emqx_zone_schema.erl @@ -22,10 +22,16 @@ namespace() -> zone. %% this schema module is not used at root level. %% roots are added only for document generation. -roots() -> ["mqtt", "stats", "flapping_detect", "force_shutdown", - "conn_congestion", "force_gc", - "overload_protection" - ]. +roots() -> + [ + "mqtt", + "stats", + "flapping_detect", + "force_shutdown", + "conn_congestion", + "force_gc", + "overload_protection" + ]. %% zone schemas are clones from the same name from root level %% only not allowed to have default values. @@ -34,6 +40,7 @@ fields(Name) -> %% no default values for zone settings no_default(Sc) -> - fun(default) -> undefined; - (Other) -> hocon_schema:field_schema(Sc, Other) + fun + (default) -> undefined; + (Other) -> hocon_schema:field_schema(Sc, Other) end. diff --git a/apps/emqx/test/emqx_SUITE.erl b/apps/emqx/test/emqx_SUITE.erl index 8ef94b4f6..02e1c8f29 100644 --- a/apps/emqx/test/emqx_SUITE.erl +++ b/apps/emqx/test/emqx_SUITE.erl @@ -51,9 +51,14 @@ t_emqx_pubsub_api(_) -> ?assertEqual([self()], emqx:subscribers(Topic1)), ?assertEqual([self()], emqx:subscribers(Topic2)), - ?assertEqual([{Topic, #{nl => 0, qos => 0, rap => 0, rh => 0, subid => ClientId}}, - {Topic1, #{nl => 0, qos => 1, rap => 0, rh => 0, subid => ClientId}}, - {Topic2, #{nl => 0, qos => 2, rap => 0, rh => 0, subid => ClientId}}], emqx:subscriptions(self())), + ?assertEqual( + [ + {Topic, #{nl => 0, qos => 0, rap => 0, rh => 0, subid => ClientId}}, + {Topic1, #{nl => 0, qos => 1, rap => 0, rh => 0, subid => ClientId}}, + {Topic2, #{nl => 0, qos => 2, rap => 0, rh => 0, subid => ClientId}} + ], + emqx:subscriptions(self()) + ), ?assertEqual(true, emqx:subscribed(self(), Topic)), ?assertEqual(true, emqx:subscribed(ClientId, Topic)), ?assertEqual(true, emqx:subscribed(self(), Topic1)), @@ -92,8 +97,10 @@ t_emqx_pubsub_api(_) -> t_hook_unhook(_) -> ok = emqx:hook(test_hook, {?MODULE, hook_fun1, []}), ok = emqx:hook(test_hook, {?MODULE, hook_fun2, []}), - ?assertEqual({error, already_exists}, - emqx:hook(test_hook, {?MODULE, hook_fun2, []})), + ?assertEqual( + {error, already_exists}, + emqx:hook(test_hook, {?MODULE, hook_fun2, []}) + ), ok = emqx:unhook(test_hook, {?MODULE, hook_fun1}), ok = emqx:unhook(test_hook, {?MODULE, hook_fun2}), @@ -110,7 +117,7 @@ t_run_hook(_) -> ok = emqx:hook(foldl_hook, {?MODULE, hook_fun3, [init]}), ok = emqx:hook(foldl_hook, {?MODULE, hook_fun4, [init]}), ok = emqx:hook(foldl_hook, {?MODULE, hook_fun5, [init]}), - [r5,r4] = emqx:run_fold_hook(foldl_hook, [arg1, arg2], []), + [r5, r4] = emqx:run_fold_hook(foldl_hook, [arg1, arg2], []), [] = emqx:run_fold_hook(unknown_hook, [], []), ok = emqx:hook(foldl_hook2, {?MODULE, hook_fun9, []}), @@ -124,11 +131,17 @@ t_run_hook(_) -> ok = emqx:run_hook(foreach_hook, [arg]), ok = emqx:hook(foreach_filter1_hook, {?MODULE, hook_fun1, []}, {?MODULE, hook_filter1, []}, 0), - ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg])), %% filter passed - ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg1])), %% filter failed + %% filter passed + ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg])), + %% filter failed + ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg1])), - ok = emqx:hook(foldl_filter2_hook, {?MODULE, hook_fun2, []}, {?MODULE, hook_filter2, [init_arg]}), - ok = emqx:hook(foldl_filter2_hook, {?MODULE, hook_fun2_1, []}, {?MODULE, hook_filter2_1, [init_arg]}), + ok = emqx:hook( + foldl_filter2_hook, {?MODULE, hook_fun2, []}, {?MODULE, hook_filter2, [init_arg]} + ), + ok = emqx:hook( + foldl_filter2_hook, {?MODULE, hook_fun2_1, []}, {?MODULE, hook_filter2_1, [init_arg]} + ), ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). @@ -146,14 +159,14 @@ hook_fun2(_, Acc) -> {ok, Acc + 1}. hook_fun2_1(_, Acc) -> {ok, Acc + 1}. hook_fun3(arg1, arg2, _Acc, init) -> ok. -hook_fun4(arg1, arg2, Acc, init) -> {ok, [r4 | Acc]}. -hook_fun5(arg1, arg2, Acc, init) -> {ok, [r5 | Acc]}. +hook_fun4(arg1, arg2, Acc, init) -> {ok, [r4 | Acc]}. +hook_fun5(arg1, arg2, Acc, init) -> {ok, [r5 | Acc]}. hook_fun6(arg, initArg) -> ok. hook_fun7(arg, initArg) -> ok. hook_fun8(arg, initArg) -> ok. -hook_fun9(arg, Acc) -> {stop, [r9 | Acc]}. +hook_fun9(arg, Acc) -> {stop, [r9 | Acc]}. hook_fun10(arg, Acc) -> {stop, [r10 | Acc]}. hook_filter1(arg) -> true; @@ -162,6 +175,6 @@ hook_filter1(_) -> false. hook_filter2(arg, _Acc, init_arg) -> true; hook_filter2(_, _Acc, _IntArg) -> false. -hook_filter2_1(arg, _Acc, init_arg) -> true; +hook_filter2_1(arg, _Acc, init_arg) -> true; hook_filter2_1(arg1, _Acc, init_arg) -> true; -hook_filter2_1(_, _Acc, _IntArg) -> false. +hook_filter2_1(_, _Acc, _IntArg) -> false. diff --git a/apps/emqx/test/emqx_access_control_SUITE.erl b/apps/emqx/test/emqx_access_control_SUITE.erl index e640b16d7..23c43fa65 100644 --- a/apps/emqx/test/emqx_access_control_SUITE.erl +++ b/apps/emqx/test/emqx_access_control_SUITE.erl @@ -45,17 +45,21 @@ t_authorize(_) -> clientinfo() -> clientinfo(#{}). clientinfo(InitProps) -> - maps:merge(#{zone => default, - listener => {tcp, default}, - protocol => mqtt, - peerhost => {127,0,0,1}, - clientid => <<"clientid">>, - username => <<"username">>, - password => <<"passwd">>, - is_superuser => false, - peercert => undefined, - mountpoint => undefined - }, InitProps). + maps:merge( + #{ + zone => default, + listener => {tcp, default}, + protocol => mqtt, + peerhost => {127, 0, 0, 1}, + clientid => <<"clientid">>, + username => <<"username">>, + password => <<"passwd">>, + is_superuser => false, + peercert => undefined, + mountpoint => undefined + }, + InitProps + ). toggle_auth(Bool) when is_boolean(Bool) -> emqx_config:put_zone_conf(default, [auth, enable], Bool). diff --git a/apps/emqx/test/emqx_alarm_SUITE.erl b/apps/emqx/test/emqx_alarm_SUITE.erl index 556ba738f..a7596f457 100644 --- a/apps/emqx/test/emqx_alarm_SUITE.erl +++ b/apps/emqx/test/emqx_alarm_SUITE.erl @@ -29,15 +29,15 @@ init_per_testcase(t_size_limit, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), {ok, _} = emqx:update_config([alarm], #{ - <<"size_limit">> => 2 - }), + <<"size_limit">> => 2 + }), Config; init_per_testcase(_, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), {ok, _} = emqx:update_config([alarm], #{ - <<"validity_period">> => <<"1s">> - }), + <<"validity_period">> => <<"1s">> + }), Config. end_per_testcase(_, _Config) -> @@ -53,7 +53,9 @@ t_alarm(_) -> ok = emqx_alarm:deactivate(unknown_alarm), {error, not_found} = emqx_alarm:deactivate(unknown_alarm), ?assertEqual({error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(activated))), - ?assertNotEqual({error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(deactivated))), + ?assertNotEqual( + {error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(deactivated)) + ), emqx_alarm:delete_all_deactivated_alarms(), ?assertEqual({error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(deactivated))). @@ -64,7 +66,9 @@ t_deactivate_all_alarms(_) -> ?assertNotEqual({error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(activated))), emqx_alarm:deactivate_all_alarms(), - ?assertNotEqual({error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(deactivated))), + ?assertNotEqual( + {error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(deactivated)) + ), emqx_alarm:delete_all_deactivated_alarms(), ?assertEqual({error, not_found}, get_alarm(unknown_alarm, emqx_alarm:get_alarms(deactivated))). @@ -130,7 +134,9 @@ t_format(_Config) -> At = erlang:system_time(microsecond), Details = "test_details", Node = node(), - Activate = #activated_alarm{name = Name, message = Message, activate_at = At, details = Details}, + Activate = #activated_alarm{ + name = Name, message = Message, activate_at = At, details = Details + }, #{ node := Node, name := Name, @@ -138,8 +144,13 @@ t_format(_Config) -> duration := 0, details := Details } = emqx_alarm:format(Activate), - Deactivate = #deactivated_alarm{name = Name, message = Message, activate_at = At, details = Details, - deactivate_at = At}, + Deactivate = #deactivated_alarm{ + name = Name, + message = Message, + activate_at = At, + details = Details, + deactivate_at = At + }, #{ node := Node, name := Name, @@ -149,7 +160,6 @@ t_format(_Config) -> } = emqx_alarm:format(Deactivate), ok. - get_alarm(Name, [Alarm = #{name := Name} | _More]) -> Alarm; get_alarm(Name, [_Alarm | More]) -> diff --git a/apps/emqx/test/emqx_authentication_SUITE.erl b/apps/emqx/test/emqx_authentication_SUITE.erl index 1a3ebb5e4..616665def 100644 --- a/apps/emqx/test/emqx_authentication_SUITE.erl +++ b/apps/emqx/test/emqx_authentication_SUITE.erl @@ -28,27 +28,39 @@ -include("emqx_authentication.hrl"). -define(AUTHN, emqx_authentication). --define(config(KEY), (fun() -> {KEY, _V_} = lists:keyfind(KEY, 1, Config), _V_ end)()). +-define(config(KEY), + (fun() -> + {KEY, _V_} = lists:keyfind(KEY, 1, Config), + _V_ + end)() +). -define(CONF_ROOT, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM). %%------------------------------------------------------------------------------ %% Hocon Schema %%------------------------------------------------------------------------------ -roots() -> [{config, #{type => hoconsc:union([ - hoconsc:ref(?MODULE, type1), - hoconsc:ref(?MODULE, type2)])}}]. +roots() -> + [ + {config, #{ + type => hoconsc:union([ + hoconsc:ref(?MODULE, type1), + hoconsc:ref(?MODULE, type2) + ]) + }} + ]. fields(type1) -> - [ {mechanism, {enum, ['password_based']}} - , {backend, {enum, ['built_in_database']}} - , {enable, fun enable/1} + [ + {mechanism, {enum, ['password_based']}}, + {backend, {enum, ['built_in_database']}}, + {enable, fun enable/1} ]; - fields(type2) -> - [ {mechanism, {enum, ['password_based']}} - , {backend, {enum, ['mysql']}} - , {enable, fun enable/1} + [ + {mechanism, {enum, ['password_based']}}, + {backend, {enum, ['mysql']}}, + {enable, fun enable/1} ]. enable(type) -> boolean(); @@ -61,8 +73,11 @@ enable(_) -> undefined. check_config(C) -> #{config := R} = - hocon_tconf:check_plain(?MODULE, #{<<"config">> => C}, - #{atom_key => true}), + hocon_tconf:check_plain( + ?MODULE, + #{<<"config">> => C}, + #{atom_key => true} + ), R. create(_AuthenticatorID, _Config) -> @@ -98,9 +113,8 @@ end_per_testcase(Case, Config) -> _ = ?MODULE:Case({'end', Config}), ok. - -t_chain({_, Config}) -> Config; - +t_chain({_, Config}) -> + Config; t_chain(Config) when is_list(Config) -> % CRUD of authentication chain ChainName = 'test', @@ -115,28 +129,33 @@ t_chain(Config) when is_list(Config) -> ?assertMatch({error, {not_found, {chain, ChainName}}}, ?AUTHN:lookup_chain(ChainName)), ok. - t_authenticator({'init', Config}) -> - [{"auth1", {'password_based', 'built_in_database'}}, - {"auth2", {'password_based', mysql}} | Config]; - + [ + {"auth1", {'password_based', 'built_in_database'}}, + {"auth2", {'password_based', mysql}} + | Config + ]; t_authenticator(Config) when is_list(Config) -> ChainName = 'test', - AuthenticatorConfig1 = #{mechanism => 'password_based', - backend => 'built_in_database', - enable => true}, + AuthenticatorConfig1 = #{ + mechanism => 'password_based', + backend => 'built_in_database', + enable => true + }, % Create an authenticator when the authentication chain does not exist ?assertEqual( - {error, {not_found, {chain, ChainName}}}, - ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1)), + {error, {not_found, {chain, ChainName}}}, + ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1) + ), ?AUTHN:create_chain(ChainName), % Create an authenticator when the provider does not exist ?assertEqual( - {error, no_available_provider}, - ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1)), + {error, no_available_provider}, + ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1) + ), AuthNType1 = ?config("auth1"), register_provider(AuthNType1, ?MODULE), @@ -144,25 +163,29 @@ t_authenticator(Config) when is_list(Config) -> % CRUD of authencaticator ?assertMatch( - {ok, #{id := ID1, state := #{mark := 1}}}, - ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1)), + {ok, #{id := ID1, state := #{mark := 1}}}, + ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1) + ), ?assertMatch({ok, #{id := ID1}}, ?AUTHN:lookup_authenticator(ChainName, ID1)), ?assertMatch({ok, [#{id := ID1}]}, ?AUTHN:list_authenticators(ChainName)), ?assertEqual( - {error, {already_exists, {authenticator, ID1}}}, - ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1)), + {error, {already_exists, {authenticator, ID1}}}, + ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1) + ), ?assertMatch( - {ok, #{id := ID1, state := #{mark := 2}}}, - ?AUTHN:update_authenticator(ChainName, ID1, AuthenticatorConfig1)), + {ok, #{id := ID1, state := #{mark := 2}}}, + ?AUTHN:update_authenticator(ChainName, ID1, AuthenticatorConfig1) + ), ?assertEqual(ok, ?AUTHN:delete_authenticator(ChainName, ID1)), ?assertEqual( - {error, {not_found, {authenticator, ID1}}}, - ?AUTHN:update_authenticator(ChainName, ID1, AuthenticatorConfig1)), + {error, {not_found, {authenticator, ID1}}}, + ?AUTHN:update_authenticator(ChainName, ID1, AuthenticatorConfig1) + ), ?assertMatch({ok, []}, ?AUTHN:list_authenticators(ChainName)), @@ -170,17 +193,21 @@ t_authenticator(Config) when is_list(Config) -> AuthNType2 = ?config("auth2"), register_provider(AuthNType2, ?MODULE), ID2 = <<"password_based:mysql">>, - AuthenticatorConfig2 = #{mechanism => 'password_based', - backend => mysql, - enable => true}, + AuthenticatorConfig2 = #{ + mechanism => 'password_based', + backend => mysql, + enable => true + }, ?assertMatch( - {ok, #{id := ID1}}, - ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1)), + {ok, #{id := ID1}}, + ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1) + ), ?assertMatch( - {ok, #{id := ID2}}, - ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig2)), + {ok, #{id := ID2}}, + ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig2) + ), % Move authenticator ?assertMatch({ok, [#{id := ID1}, #{id := ID2}]}, ?AUTHN:list_authenticators(ChainName)), @@ -196,179 +223,207 @@ t_authenticator(Config) when is_list(Config) -> ?assertEqual(ok, ?AUTHN:move_authenticator(ChainName, ID2, ?CMD_MOVE_AFTER(ID1))), ?assertMatch({ok, [#{id := ID1}, #{id := ID2}]}, ?AUTHN:list_authenticators(ChainName)); - t_authenticator({'end', Config}) -> ?AUTHN:delete_chain(test), ?AUTHN:deregister_providers([?config("auth1"), ?config("auth2")]), ok. - t_authenticate({init, Config}) -> - [{listener_id, 'tcp:default'}, - {authn_type, {'password_based', 'built_in_database'}} | Config]; - + [ + {listener_id, 'tcp:default'}, + {authn_type, {'password_based', 'built_in_database'}} + | Config + ]; t_authenticate(Config) when is_list(Config) -> ListenerID = ?config(listener_id), AuthNType = ?config(authn_type), - ClientInfo = #{zone => default, - listener => ListenerID, - protocol => mqtt, - username => <<"good">>, - password => <<"any">>}, + ClientInfo = #{ + zone => default, + listener => ListenerID, + protocol => mqtt, + username => <<"good">>, + password => <<"any">> + }, ?assertEqual({ok, #{is_superuser => false}}, emqx_access_control:authenticate(ClientInfo)), register_provider(AuthNType, ?MODULE), - AuthenticatorConfig = #{mechanism => 'password_based', - backend => 'built_in_database', - enable => true}, + AuthenticatorConfig = #{ + mechanism => 'password_based', + backend => 'built_in_database', + enable => true + }, ?AUTHN:create_chain(ListenerID), ?assertMatch({ok, _}, ?AUTHN:create_authenticator(ListenerID, AuthenticatorConfig)), ?assertEqual( - {ok, #{is_superuser => true}}, - emqx_access_control:authenticate(ClientInfo)), + {ok, #{is_superuser => true}}, + emqx_access_control:authenticate(ClientInfo) + ), ?assertEqual( - {error, bad_username_or_password}, - emqx_access_control:authenticate(ClientInfo#{username => <<"bad">>})); - + {error, bad_username_or_password}, + emqx_access_control:authenticate(ClientInfo#{username => <<"bad">>}) + ); t_authenticate({'end', Config}) -> ?AUTHN:delete_chain(?config(listener_id)), ?AUTHN:deregister_provider(?config(authn_type)), ok. - t_update_config({init, Config}) -> Global = 'mqtt:global', AuthNType1 = {'password_based', 'built_in_database'}, AuthNType2 = {'password_based', mysql}, - [{global, Global}, - {"auth1", AuthNType1}, - {"auth2", AuthNType2} | Config]; - + [ + {global, Global}, + {"auth1", AuthNType1}, + {"auth2", AuthNType2} + | Config + ]; t_update_config(Config) when is_list(Config) -> emqx_config_handler:add_handler([?CONF_ROOT], emqx_authentication), ok = register_provider(?config("auth1"), ?MODULE), ok = register_provider(?config("auth2"), ?MODULE), Global = ?config(global), - AuthenticatorConfig1 = #{<<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"built_in_database">>, - <<"enable">> => true}, - AuthenticatorConfig2 = #{<<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"mysql">>, - <<"enable">> => true}, + AuthenticatorConfig1 = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"enable">> => true + }, + AuthenticatorConfig2 = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"mysql">>, + <<"enable">> => true + }, ID1 = <<"password_based:built_in_database">>, ID2 = <<"password_based:mysql">>, ?assertMatch({ok, []}, ?AUTHN:list_chains()), ?assertMatch( - {ok, _}, - update_config([?CONF_ROOT], {create_authenticator, Global, AuthenticatorConfig1})), + {ok, _}, + update_config([?CONF_ROOT], {create_authenticator, Global, AuthenticatorConfig1}) + ), ?assertMatch( - {ok, #{id := ID1, state := #{mark := 1}}}, - ?AUTHN:lookup_authenticator(Global, ID1)), + {ok, #{id := ID1, state := #{mark := 1}}}, + ?AUTHN:lookup_authenticator(Global, ID1) + ), ?assertMatch( - {ok, _}, - update_config([?CONF_ROOT], {create_authenticator, Global, AuthenticatorConfig2})), + {ok, _}, + update_config([?CONF_ROOT], {create_authenticator, Global, AuthenticatorConfig2}) + ), ?assertMatch( - {ok, #{id := ID2, state := #{mark := 1}}}, - ?AUTHN:lookup_authenticator(Global, ID2)), + {ok, #{id := ID2, state := #{mark := 1}}}, + ?AUTHN:lookup_authenticator(Global, ID2) + ), ?assertMatch( - {ok, _}, - update_config([?CONF_ROOT], - {update_authenticator, - Global, - ID1, - AuthenticatorConfig1#{<<"enable">> => false} - })), + {ok, _}, + update_config( + [?CONF_ROOT], + {update_authenticator, Global, ID1, AuthenticatorConfig1#{<<"enable">> => false}} + ) + ), ?assertMatch( - {ok, #{id := ID1, state := #{mark := 2}}}, - ?AUTHN:lookup_authenticator(Global, ID1)), + {ok, #{id := ID1, state := #{mark := 2}}}, + ?AUTHN:lookup_authenticator(Global, ID1) + ), ?assertMatch( - {ok, _}, - update_config([?CONF_ROOT], {move_authenticator, Global, ID2, ?CMD_MOVE_FRONT})), + {ok, _}, + update_config([?CONF_ROOT], {move_authenticator, Global, ID2, ?CMD_MOVE_FRONT}) + ), ?assertMatch({ok, [#{id := ID2}, #{id := ID1}]}, ?AUTHN:list_authenticators(Global)), ?assertMatch({ok, _}, update_config([?CONF_ROOT], {delete_authenticator, Global, ID1})), ?assertEqual( - {error, {not_found, {authenticator, ID1}}}, - ?AUTHN:lookup_authenticator(Global, ID1)), + {error, {not_found, {authenticator, ID1}}}, + ?AUTHN:lookup_authenticator(Global, ID1) + ), ?assertMatch( - {ok, _}, - update_config([?CONF_ROOT], {delete_authenticator, Global, ID2})), + {ok, _}, + update_config([?CONF_ROOT], {delete_authenticator, Global, ID2}) + ), ?assertEqual( - {error, {not_found, {authenticator, ID2}}}, - ?AUTHN:lookup_authenticator(Global, ID2)), + {error, {not_found, {authenticator, ID2}}}, + ?AUTHN:lookup_authenticator(Global, ID2) + ), ListenerID = 'tcp:default', ConfKeyPath = [listeners, tcp, default, ?CONF_ROOT], ?assertMatch( - {ok, _}, - update_config(ConfKeyPath, - {create_authenticator, ListenerID, AuthenticatorConfig1})), + {ok, _}, + update_config( + ConfKeyPath, + {create_authenticator, ListenerID, AuthenticatorConfig1} + ) + ), ?assertMatch( - {ok, #{id := ID1, state := #{mark := 1}}}, - ?AUTHN:lookup_authenticator(ListenerID, ID1)), + {ok, #{id := ID1, state := #{mark := 1}}}, + ?AUTHN:lookup_authenticator(ListenerID, ID1) + ), ?assertMatch( - {ok, _}, - update_config(ConfKeyPath, - {create_authenticator, ListenerID, AuthenticatorConfig2})), + {ok, _}, + update_config( + ConfKeyPath, + {create_authenticator, ListenerID, AuthenticatorConfig2} + ) + ), ?assertMatch( - {ok, #{id := ID2, state := #{mark := 1}}}, - ?AUTHN:lookup_authenticator(ListenerID, ID2)), + {ok, #{id := ID2, state := #{mark := 1}}}, + ?AUTHN:lookup_authenticator(ListenerID, ID2) + ), ?assertMatch( - {ok, _}, - update_config(ConfKeyPath, - {update_authenticator, - ListenerID, - ID1, - AuthenticatorConfig1#{<<"enable">> => false} - })), + {ok, _}, + update_config( + ConfKeyPath, + {update_authenticator, ListenerID, ID1, AuthenticatorConfig1#{<<"enable">> => false}} + ) + ), ?assertMatch( - {ok, #{id := ID1, state := #{mark := 2}}}, - ?AUTHN:lookup_authenticator(ListenerID, ID1)), + {ok, #{id := ID1, state := #{mark := 2}}}, + ?AUTHN:lookup_authenticator(ListenerID, ID1) + ), ?assertMatch( - {ok, _}, - update_config(ConfKeyPath, {move_authenticator, ListenerID, ID2, ?CMD_MOVE_FRONT})), + {ok, _}, + update_config(ConfKeyPath, {move_authenticator, ListenerID, ID2, ?CMD_MOVE_FRONT}) + ), ?assertMatch( - {ok, [#{id := ID2}, #{id := ID1}]}, - ?AUTHN:list_authenticators(ListenerID)), + {ok, [#{id := ID2}, #{id := ID1}]}, + ?AUTHN:list_authenticators(ListenerID) + ), ?assertMatch( - {ok, _}, - update_config(ConfKeyPath, {delete_authenticator, ListenerID, ID1})), + {ok, _}, + update_config(ConfKeyPath, {delete_authenticator, ListenerID, ID1}) + ), ?assertEqual( - {error, {not_found, {authenticator, ID1}}}, - ?AUTHN:lookup_authenticator(ListenerID, ID1)); - + {error, {not_found, {authenticator, ID1}}}, + ?AUTHN:lookup_authenticator(ListenerID, ID1) + ); t_update_config({'end', Config}) -> ?AUTHN:delete_chain(?config(global)), ?AUTHN:deregister_providers([?config("auth1"), ?config("auth2")]), ok. - -t_restart({'init', Config}) -> Config; - +t_restart({'init', Config}) -> + Config; t_restart(Config) when is_list(Config) -> ?assertEqual({ok, []}, ?AUTHN:list_chain_names()), @@ -379,43 +434,48 @@ t_restart(Config) when is_list(Config) -> {ok, _} = supervisor:restart_child(emqx_authentication_sup, ?AUTHN), ?assertEqual({ok, [test_chain]}, ?AUTHN:list_chain_names()); - t_restart({'end', _Config}) -> ?AUTHN:delete_chain(test_chain), ok. - -t_convert_certs({_, Config}) -> Config; - +t_convert_certs({_, Config}) -> + Config; t_convert_certs(Config) when is_list(Config) -> Global = <<"mqtt:global">>, - Certs = certs([ {<<"keyfile">>, "key.pem"} - , {<<"certfile">>, "cert.pem"} - , {<<"cacertfile">>, "cacert.pem"} - ]), + Certs = certs([ + {<<"keyfile">>, "key.pem"}, + {<<"certfile">>, "cert.pem"}, + {<<"cacertfile">>, "cacert.pem"} + ]), CertsDir = certs_dir(Config, [Global, <<"password_based:built_in_database">>]), #{<<"ssl">> := NCerts} = convert_certs(CertsDir, #{<<"ssl">> => Certs}), - Certs2 = certs([ {<<"keyfile">>, "key.pem"} - , {<<"certfile">>, "cert.pem"} - ]), + Certs2 = certs([ + {<<"keyfile">>, "key.pem"}, + {<<"certfile">>, "cert.pem"} + ]), #{<<"ssl">> := NCerts2} = convert_certs( - CertsDir, - #{<<"ssl">> => Certs2}, #{<<"ssl">> => NCerts}), + CertsDir, + #{<<"ssl">> => Certs2}, + #{<<"ssl">> => NCerts} + ), ?assertEqual(maps:get(<<"keyfile">>, NCerts), maps:get(<<"keyfile">>, NCerts2)), ?assertEqual(maps:get(<<"certfile">>, NCerts), maps:get(<<"certfile">>, NCerts2)), - Certs3 = certs([ {<<"keyfile">>, "client-key.pem"} - , {<<"certfile">>, "client-cert.pem"} - , {<<"cacertfile">>, "cacert.pem"} - ]), + Certs3 = certs([ + {<<"keyfile">>, "client-key.pem"}, + {<<"certfile">>, "client-cert.pem"}, + {<<"cacertfile">>, "cacert.pem"} + ]), #{<<"ssl">> := NCerts3} = convert_certs( - CertsDir, - #{<<"ssl">> => Certs3}, #{<<"ssl">> => NCerts2}), + CertsDir, + #{<<"ssl">> => Certs3}, + #{<<"ssl">> => NCerts2} + ), ?assertNotEqual(maps:get(<<"keyfile">>, NCerts2), maps:get(<<"keyfile">>, NCerts3)), ?assertNotEqual(maps:get(<<"certfile">>, NCerts2), maps:get(<<"certfile">>, NCerts3)), @@ -429,10 +489,14 @@ update_config(Path, ConfigRequest) -> certs(Certs) -> CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), - lists:foldl(fun({Key, Filename}, Acc) -> - {ok, Bin} = file:read_file(filename:join([CertsPath, Filename])), - Acc#{Key => Bin} - end, #{}, Certs). + lists:foldl( + fun({Key, Filename}, Acc) -> + {ok, Bin} = file:read_file(filename:join([CertsPath, Filename])), + Acc#{Key => Bin} + end, + #{}, + Certs + ). register_provider(Type, Module) -> ok = ?AUTHN:register_providers([{Type, Module}]). diff --git a/apps/emqx/test/emqx_authz_cache_SUITE.erl b/apps/emqx/test/emqx_authz_cache_SUITE.erl index 7512f6c10..ad06e5a6c 100644 --- a/apps/emqx/test/emqx_authz_cache_SUITE.erl +++ b/apps/emqx/test/emqx_authz_cache_SUITE.erl @@ -41,13 +41,15 @@ t_clean_authz_cache(_) -> {ok, _, _} = emqtt:subscribe(Client, <<"t2">>, 0), emqtt:publish(Client, <<"t1">>, <<"{\"x\":1}">>, 0), ct:sleep(100), - ClientPid = case emqx_cm:lookup_channels(<<"emqx_c">>) of - [Pid] when is_pid(Pid) -> - Pid; - Pids when is_list(Pids) -> - lists:last(Pids); - _ -> {error, not_found} - end, + ClientPid = + case emqx_cm:lookup_channels(<<"emqx_c">>) of + [Pid] when is_pid(Pid) -> + Pid; + Pids when is_list(Pids) -> + lists:last(Pids); + _ -> + {error, not_found} + end, Caches = gen_server:call(ClientPid, list_authz_cache), ct:log("authz caches: ~p", [Caches]), ?assert(length(Caches) > 0), @@ -61,13 +63,15 @@ t_drain_authz_cache(_) -> {ok, _, _} = emqtt:subscribe(Client, <<"t2">>, 0), emqtt:publish(Client, <<"t1">>, <<"{\"x\":1}">>, 0), ct:sleep(100), - ClientPid = case emqx_cm:lookup_channels(<<"emqx_c">>) of - [Pid] when is_pid(Pid) -> - Pid; - Pids when is_list(Pids) -> - lists:last(Pids); - _ -> {error, not_found} - end, + ClientPid = + case emqx_cm:lookup_channels(<<"emqx_c">>) of + [Pid] when is_pid(Pid) -> + Pid; + Pids when is_list(Pids) -> + lists:last(Pids); + _ -> + {error, not_found} + end, Caches = gen_server:call(ClientPid, list_authz_cache), ct:log("authz caches: ~p", [Caches]), ?assert(length(Caches) > 0), diff --git a/apps/emqx/test/emqx_authz_test_mod.erl b/apps/emqx/test/emqx_authz_test_mod.erl index 8a2317c25..79c8f865a 100644 --- a/apps/emqx/test/emqx_authz_test_mod.erl +++ b/apps/emqx/test/emqx_authz_test_mod.erl @@ -17,10 +17,11 @@ -module(emqx_authz_test_mod). %% Authorization callbacks --export([ init/1 - , authorize/2 - , description/0 - ]). +-export([ + init/1, + authorize/2, + description/0 +]). init(AuthzOpts) -> {ok, AuthzOpts}. @@ -30,4 +31,3 @@ authorize({_User, _PubSub, _Topic}, _State) -> description() -> "Test Authorization Mod". - diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index 8e595b928..e983edb42 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -35,12 +35,13 @@ end_per_suite(_Config) -> mria_mnesia:delete_schema(). t_add_delete(_) -> - Banned = #banned{who = {clientid, <<"TestClient">>}, - by = <<"banned suite">>, - reason = <<"test">>, - at = erlang:system_time(second), - until = erlang:system_time(second) + 1 - }, + Banned = #banned{ + who = {clientid, <<"TestClient">>}, + by = <<"banned suite">>, + reason = <<"test">>, + at = erlang:system_time(second), + until = erlang:system_time(second) + 1 + }, {ok, _} = emqx_banned:create(Banned), {error, {already_exist, Banned}} = emqx_banned:create(Banned), ?assertEqual(1, emqx_banned:info(size)), @@ -54,24 +55,28 @@ t_add_delete(_) -> t_check(_) -> {ok, _} = emqx_banned:create(#banned{who = {clientid, <<"BannedClient">>}}), {ok, _} = emqx_banned:create(#banned{who = {username, <<"BannedUser">>}}), - {ok, _} = emqx_banned:create(#banned{who = {peerhost, {192,168,0,1}}}), + {ok, _} = emqx_banned:create(#banned{who = {peerhost, {192, 168, 0, 1}}}), ?assertEqual(3, emqx_banned:info(size)), - ClientInfo1 = #{clientid => <<"BannedClient">>, - username => <<"user">>, - peerhost => {127,0,0,1} - }, - ClientInfo2 = #{clientid => <<"client">>, - username => <<"BannedUser">>, - peerhost => {127,0,0,1} - }, - ClientInfo3 = #{clientid => <<"client">>, - username => <<"user">>, - peerhost => {192,168,0,1} - }, - ClientInfo4 = #{clientid => <<"client">>, - username => <<"user">>, - peerhost => {127,0,0,1} - }, + ClientInfo1 = #{ + clientid => <<"BannedClient">>, + username => <<"user">>, + peerhost => {127, 0, 0, 1} + }, + ClientInfo2 = #{ + clientid => <<"client">>, + username => <<"BannedUser">>, + peerhost => {127, 0, 0, 1} + }, + ClientInfo3 = #{ + clientid => <<"client">>, + username => <<"user">>, + peerhost => {192, 168, 0, 1} + }, + ClientInfo4 = #{ + clientid => <<"client">>, + username => <<"user">>, + peerhost => {127, 0, 0, 1} + }, ClientInfo5 = #{}, ClientInfo6 = #{clientid => <<"client1">>}, ?assert(emqx_banned:check(ClientInfo1)), @@ -82,7 +87,7 @@ t_check(_) -> ?assertNot(emqx_banned:check(ClientInfo6)), ok = emqx_banned:delete({clientid, <<"BannedClient">>}), ok = emqx_banned:delete({username, <<"BannedUser">>}), - ok = emqx_banned:delete({peerhost, {192,168,0,1}}), + ok = emqx_banned:delete({peerhost, {192, 168, 0, 1}}), ?assertNot(emqx_banned:check(ClientInfo1)), ?assertNot(emqx_banned:check(ClientInfo2)), ?assertNot(emqx_banned:check(ClientInfo3)), @@ -91,12 +96,17 @@ t_check(_) -> t_unused(_) -> {ok, Banned} = emqx_banned:start_link(), - {ok, _} = emqx_banned:create(#banned{who = {clientid, <<"BannedClient1">>}, - until = erlang:system_time(second)}), - {ok, _} = emqx_banned:create(#banned{who = {clientid, <<"BannedClient2">>}, - until = erlang:system_time(second) - 1}), + {ok, _} = emqx_banned:create(#banned{ + who = {clientid, <<"BannedClient1">>}, + until = erlang:system_time(second) + }), + {ok, _} = emqx_banned:create(#banned{ + who = {clientid, <<"BannedClient2">>}, + until = erlang:system_time(second) - 1 + }), ?assertEqual(ignored, gen_server:call(Banned, unexpected_req)), ?assertEqual(ok, gen_server:cast(Banned, unexpected_msg)), ?assertEqual(ok, Banned ! ok), - timer:sleep(500), %% expiry timer + %% expiry timer + timer:sleep(500), ok = emqx_banned:stop(). diff --git a/apps/emqx/test/emqx_batch_SUITE.erl b/apps/emqx/test/emqx_batch_SUITE.erl index c458cd8b6..e0f6d733c 100644 --- a/apps/emqx/test/emqx_batch_SUITE.erl +++ b/apps/emqx/test/emqx_batch_SUITE.erl @@ -24,10 +24,11 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_batch_full_commit(_) -> - B0 = emqx_batch:init(#{batch_size => 3, - linger_ms => 2000, - commit_fun => fun(_) -> ok end - }), + B0 = emqx_batch:init(#{ + batch_size => 3, + linger_ms => 2000, + commit_fun => fun(_) -> ok end + }), B3 = lists:foldl(fun(E, B) -> emqx_batch:push(E, B) end, B0, [a, b, c]), ?assertEqual(3, emqx_batch:size(B3)), ?assertEqual([a, b, c], emqx_batch:items(B3)), @@ -38,10 +39,11 @@ t_batch_full_commit(_) -> t_batch_linger_commit(_) -> CommitFun = fun(Q) -> ?assertEqual(3, length(Q)) end, - B0 = emqx_batch:init(#{batch_size => 3, - linger_ms => 500, - commit_fun => CommitFun - }), + B0 = emqx_batch:init(#{ + batch_size => 3, + linger_ms => 500, + commit_fun => CommitFun + }), B3 = lists:foldl(fun(E, B) -> emqx_batch:push(E, B) end, B0, [a, b, c]), ?assertEqual(3, emqx_batch:size(B3)), ?assertEqual([a, b, c], emqx_batch:items(B3)), @@ -50,8 +52,6 @@ t_batch_linger_commit(_) -> B4 = emqx_batch:commit(B3), ?assertEqual(0, emqx_batch:size(B4)), ?assertEqual([], emqx_batch:items(B4)) - after - 1000 -> - error(linger_timer_not_triggered) + after 1000 -> + error(linger_timer_not_triggered) end. - diff --git a/apps/emqx/test/emqx_boot_SUITE.erl b/apps/emqx/test/emqx_boot_SUITE.erl index 1521996dd..028b1d508 100644 --- a/apps/emqx/test/emqx_boot_SUITE.erl +++ b/apps/emqx/test/emqx_boot_SUITE.erl @@ -40,4 +40,3 @@ t_is_enabled(_) -> ?assert(emqx_boot:is_enabled(router)), ?assert(emqx_boot:is_enabled(broker)), ?assert(emqx_boot:is_enabled(listeners)). - diff --git a/apps/emqx/test/emqx_bpapi_SUITE.erl b/apps/emqx/test/emqx_bpapi_SUITE.erl index 7dffd730b..52f9be538 100644 --- a/apps/emqx/test/emqx_bpapi_SUITE.erl +++ b/apps/emqx/test/emqx_bpapi_SUITE.erl @@ -54,11 +54,12 @@ t_announce(Config) -> ?assertMatch(2, emqx_bpapi:supported_version(api1)). fake_records() -> - [ #?TAB{key = {'fake-node@localhost', api1}, version = 2} - , #?TAB{key = {'fake-node2@localhost', api1}, version = 2} - , #?TAB{key = {?multicall, api1}, version = 2} + [ + #?TAB{key = {'fake-node@localhost', api1}, version = 2}, + #?TAB{key = {'fake-node2@localhost', api1}, version = 2}, + #?TAB{key = {?multicall, api1}, version = 2}, - , #?TAB{key = {'fake-node@localhost', api2}, version = 2} - , #?TAB{key = {'fake-node2@localhost', api2}, version = 3} - , #?TAB{key = {?multicall, api2}, version = 2} + #?TAB{key = {'fake-node@localhost', api2}, version = 2}, + #?TAB{key = {'fake-node2@localhost', api2}, version = 3}, + #?TAB{key = {?multicall, api2}, version = 2} ]. diff --git a/apps/emqx/test/emqx_bpapi_static_checks.erl b/apps/emqx/test/emqx_bpapi_static_checks.erl index 3b2d8b2ba..124c06896 100644 --- a/apps/emqx/test/emqx_bpapi_static_checks.erl +++ b/apps/emqx/test/emqx_bpapi_static_checks.erl @@ -21,23 +21,28 @@ %% Using an undocumented API here :( -include_lib("dialyzer/src/dialyzer.hrl"). --type api_dump() :: #{{emqx_bpapi:api(), emqx_bpapi:api_version()} => - #{ calls := [emqx_bpapi:rpc()] - , casts := [emqx_bpapi:rpc()] - }}. +-type api_dump() :: #{ + {emqx_bpapi:api(), emqx_bpapi:api_version()} => + #{ + calls := [emqx_bpapi:rpc()], + casts := [emqx_bpapi:rpc()] + } +}. -type dialyzer_spec() :: {_Type, [_Type]}. -type dialyzer_dump() :: #{mfa() => dialyzer_spec()}. --type fulldump() :: #{ api => api_dump() - , signatures => dialyzer_dump() - , release => string() - }. +-type fulldump() :: #{ + api => api_dump(), + signatures => dialyzer_dump(), + release => string() +}. --type dump_options() :: #{ reldir := file:name() - , plt := file:name() - }. +-type dump_options() :: #{ + reldir := file:name(), + plt := file:name() +}. -type param_types() :: #{emqx_bpapi:var_name() => _Type}. @@ -47,18 +52,25 @@ %% List of known RPC backend modules: -define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc"). %% List of known functions also known to do RPC: --define(RPC_FUNCTIONS, "emqx_cluster_rpc:multicall/3, emqx_cluster_rpc:multicall/5, " - "emqx_plugin_libs_rule:cluster_call/3"). +-define(RPC_FUNCTIONS, + "emqx_cluster_rpc:multicall/3, emqx_cluster_rpc:multicall/5, " + "emqx_plugin_libs_rule:cluster_call/3" +). %% List of functions in the RPC backend modules that we can ignore: --define(IGNORED_RPC_CALLS, "gen_rpc:nodes/0, emqx_rpc:unwrap_erpc/1, rpc:pmap/3"). % TODO: handle pmap + +% TODO: handle pmap +-define(IGNORED_RPC_CALLS, "gen_rpc:nodes/0, emqx_rpc:unwrap_erpc/1, rpc:pmap/3"). %% List of business-layer functions that are exempt from the checks: -define(EXEMPTIONS, - "emqx_mgmt_api:do_query/6," % Reason: legacy code. A fun and a QC query are - % passed in the args, it's futile to try to statically - % check it - "emqx_plugin_libs_rule:cluster_call/3" % Reason: some sort of external plugin API that we - % don't want to break? - ). + % Reason: legacy code. A fun and a QC query are + "emqx_mgmt_api:do_query/6," + % passed in the args, it's futile to try to statically + % check it + + % Reason: some sort of external plugin API that we + "emqx_plugin_libs_rule:cluster_call/3" + % don't want to break? +). -define(XREF, myxref). @@ -87,11 +99,13 @@ run() -> -spec check_compat([file:filename()]) -> boolean(). check_compat(DumpFilenames) -> put(bpapi_ok, true), - Dumps = lists:map(fun(FN) -> - {ok, [Dump]} = file:consult(FN), - Dump - end, - DumpFilenames), + Dumps = lists:map( + fun(FN) -> + {ok, [Dump]} = file:consult(FN), + Dump + end, + DumpFilenames + ), [check_compat(I, J) || I <- Dumps, J <- Dumps], erase(bpapi_ok). @@ -104,76 +118,98 @@ check_compat(Dump1, Dump2) -> %% It's not allowed to change BPAPI modules. Check that no changes %% have been made. (sets nok flag) -spec check_api_immutability(fulldump(), fulldump()) -> ok. -check_api_immutability(#{release := Rel1, api := APIs1}, #{release := Rel2, api := APIs2}) - when Rel2 >= Rel1 -> +check_api_immutability(#{release := Rel1, api := APIs1}, #{release := Rel2, api := APIs2}) when + Rel2 >= Rel1 +-> %% TODO: Handle API deprecation _ = maps:map( - fun(Key = {API, Version}, Val) -> - case maps:get(Key, APIs2, undefined) of - Val -> - ok; - undefined -> - setnok(), - logger:error("API ~p v~p was removed in release ~p without being deprecated.", - [API, Version, Rel2]); - _Val -> - setnok(), - logger:error("API ~p v~p was changed between ~p and ~p. Backplane API should be immutable.", - [API, Version, Rel1, Rel2]) - end - end, - APIs1), + fun(Key = {API, Version}, Val) -> + case maps:get(Key, APIs2, undefined) of + Val -> + ok; + undefined -> + setnok(), + logger:error( + "API ~p v~p was removed in release ~p without being deprecated.", + [API, Version, Rel2] + ); + _Val -> + setnok(), + logger:error( + "API ~p v~p was changed between ~p and ~p. Backplane API should be immutable.", + [API, Version, Rel1, Rel2] + ) + end + end, + APIs1 + ), ok; check_api_immutability(_, _) -> ok. %% Note: sets nok flag -spec typecheck_apis(fulldump(), fulldump()) -> ok. -typecheck_apis( #{release := CallerRelease, api := CallerAPIs, signatures := CallerSigs} - , #{release := CalleeRelease, signatures := CalleeSigs} - ) -> - AllCalls = lists:flatten([[Calls, Casts] - || #{calls := Calls, casts := Casts} <- maps:values(CallerAPIs)]), - lists:foreach(fun({From, To}) -> - Caller = get_param_types(CallerSigs, From), - Callee = get_param_types(CalleeSigs, To), - %% TODO: check return types - case typecheck_rpc(Caller, Callee) of - [] -> - ok; - TypeErrors -> - setnok(), - [logger:error( - "Incompatible RPC call: " - "type of the parameter ~p of RPC call ~s on release ~p " - "is not a subtype of the target function ~s on release ~p.~n" - "Caller type: ~s~nCallee type: ~s~n", - [Var, format_call(From), CallerRelease, - format_call(To), CalleeRelease, - erl_types:t_to_string(CallerType), - erl_types:t_to_string(CalleeType)]) - || {Var, CallerType, CalleeType} <- TypeErrors] - end - end, - AllCalls). +typecheck_apis( + #{release := CallerRelease, api := CallerAPIs, signatures := CallerSigs}, + #{release := CalleeRelease, signatures := CalleeSigs} +) -> + AllCalls = lists:flatten([ + [Calls, Casts] + || #{calls := Calls, casts := Casts} <- maps:values(CallerAPIs) + ]), + lists:foreach( + fun({From, To}) -> + Caller = get_param_types(CallerSigs, From), + Callee = get_param_types(CalleeSigs, To), + %% TODO: check return types + case typecheck_rpc(Caller, Callee) of + [] -> + ok; + TypeErrors -> + setnok(), + [ + logger:error( + "Incompatible RPC call: " + "type of the parameter ~p of RPC call ~s on release ~p " + "is not a subtype of the target function ~s on release ~p.~n" + "Caller type: ~s~nCallee type: ~s~n", + [ + Var, + format_call(From), + CallerRelease, + format_call(To), + CalleeRelease, + erl_types:t_to_string(CallerType), + erl_types:t_to_string(CalleeType) + ] + ) + || {Var, CallerType, CalleeType} <- TypeErrors + ] + end + end, + AllCalls + ). -spec typecheck_rpc(param_types(), param_types()) -> [{emqx_bpapi:var_name(), _Type, _Type}]. typecheck_rpc(Caller, Callee) -> - maps:fold(fun(Var, CalleeType, Acc) -> - #{Var := CallerType} = Caller, - case erl_types:t_is_subtype(CallerType, CalleeType) of - true -> Acc; - false -> [{Var, CallerType, CalleeType}|Acc] - end - end, - [], - Callee). + maps:fold( + fun(Var, CalleeType, Acc) -> + #{Var := CallerType} = Caller, + case erl_types:t_is_subtype(CallerType, CalleeType) of + true -> Acc; + false -> [{Var, CallerType, CalleeType} | Acc] + end + end, + [], + Callee + ). -spec get_param_types(dialyzer_dump(), emqx_bpapi:call()) -> param_types(). get_param_types(Signatures, {M, F, A}) -> Arity = length(A), #{{M, F, Arity} := {_RetType, AttrTypes}} = Signatures, - Arity = length(AttrTypes), % assert + % assert + Arity = length(AttrTypes), maps:from_list(lists:zip(A, AttrTypes)). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -181,13 +217,17 @@ get_param_types(Signatures, {M, F, A}) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% dump() -> - case { filelib:wildcard(project_root_dir() ++ "/*_plt") - , filelib:wildcard(project_root_dir() ++ "/_build/emqx*/lib") - } of - {[PLT|_], [RelDir|_]} -> - dump(#{ plt => PLT - , reldir => RelDir - }); + case + { + filelib:wildcard(project_root_dir() ++ "/*_plt"), + filelib:wildcard(project_root_dir() ++ "/_build/emqx*/lib") + } + of + {[PLT | _], [RelDir | _]} -> + dump(#{ + plt => PLT, + reldir => RelDir + }); _ -> error("failed to guess run options") end. @@ -203,7 +243,7 @@ dump(Opts) -> warn_nonbpapi_rpcs(NonBPAPICalls), APIDump = collect_bpapis(BPAPICalls), DialyzerDump = collect_signatures(PLT, APIDump), - [Release|_] = string:split(emqx_app:get_release(), "-"), + [Release | _] = string:split(emqx_app:get_release(), "-"), dump_api(#{api => APIDump, signatures => DialyzerDump, release => Release}), dump_versions(APIDump), xref:stop(?XREF), @@ -221,8 +261,21 @@ prepare(#{reldir := RelDir, plt := PLT}) -> dialyzer_plt:from_file(PLT). find_remote_calls(_Opts) -> - Query = "XC | (A - [" ?IGNORED_APPS "]:App - [" ?IGNORED_MODULES "]:Mod - [" ?EXEMPTIONS "]) - || (([" ?RPC_MODULES "] : Mod + [" ?RPC_FUNCTIONS "]) - [" ?IGNORED_RPC_CALLS "])", + Query = + "XC | (A - [" + ?IGNORED_APPS + "]:App - [" + ?IGNORED_MODULES + "]:Mod - [" + ?EXEMPTIONS + "])\n" + " || (([" + ?RPC_MODULES + "] : Mod + [" + ?RPC_FUNCTIONS + "]) - [" + ?IGNORED_RPC_CALLS + "])", {ok, Calls} = xref:q(?XREF, Query), logger:info("Calls to RPC modules ~p", [Calls]), {Callers, _Callees} = lists:unzip(Calls), @@ -233,18 +286,23 @@ warn_nonbpapi_rpcs([]) -> ok; warn_nonbpapi_rpcs(L) -> setnok(), - lists:foreach(fun({M, F, A}) -> - logger:error("~p:~p/~p does a remote call outside of a dedicated " - "backplane API module. " - "It may break during rolling cluster upgrade", [M, F, A]) - end, - L). + lists:foreach( + fun({M, F, A}) -> + logger:error( + "~p:~p/~p does a remote call outside of a dedicated " + "backplane API module. " + "It may break during rolling cluster upgrade", + [M, F, A] + ) + end, + L + ). -spec is_bpapi_call(mfa()) -> boolean(). is_bpapi_call({Module, _Function, _Arity}) -> case catch Module:bpapi_meta() of #{api := _} -> true; - _ -> false + _ -> false end. -spec dump_api(fulldump()) -> ok. @@ -259,58 +317,70 @@ dump_versions(APIs) -> logger:notice("Dumping API versions to ~p", [Filename]), ok = filelib:ensure_dir(Filename), {ok, FD} = file:open(Filename, [write]), - lists:foreach(fun(API) -> - ok = io:format(FD, "~p.~n", [API]) - end, - lists:sort(maps:keys(APIs))), + lists:foreach( + fun(API) -> + ok = io:format(FD, "~p.~n", [API]) + end, + lists:sort(maps:keys(APIs)) + ), file:close(FD). -spec collect_bpapis([mfa()]) -> api_dump(). collect_bpapis(L) -> Modules = lists:usort([M || {M, _F, _A} <- L]), - lists:foldl(fun(Mod, Acc) -> - #{ api := API - , version := Vsn - , calls := Calls - , casts := Casts - } = Mod:bpapi_meta(), - Acc#{{API, Vsn} => #{ calls => Calls - , casts => Casts - }} - end, - #{}, - Modules). + lists:foldl( + fun(Mod, Acc) -> + #{ + api := API, + version := Vsn, + calls := Calls, + casts := Casts + } = Mod:bpapi_meta(), + Acc#{ + {API, Vsn} => #{ + calls => Calls, + casts => Casts + } + } + end, + #{}, + Modules + ). -spec collect_signatures(_PLT, api_dump()) -> dialyzer_dump(). collect_signatures(PLT, APIs) -> - maps:fold(fun(_APIAndVersion, #{calls := Calls, casts := Casts}, Acc0) -> - Acc1 = lists:foldl(fun enrich/2, {Acc0, PLT}, Calls), - {Acc, PLT} = lists:foldl(fun enrich/2, Acc1, Casts), - Acc - end, - #{}, - APIs). + maps:fold( + fun(_APIAndVersion, #{calls := Calls, casts := Casts}, Acc0) -> + Acc1 = lists:foldl(fun enrich/2, {Acc0, PLT}, Calls), + {Acc, PLT} = lists:foldl(fun enrich/2, Acc1, Casts), + Acc + end, + #{}, + APIs + ). %% Add information about the call types from the PLT -spec enrich(emqx_bpapi:rpc(), {dialyzer_dump(), _PLT}) -> {dialyzer_dump(), _PLT}. enrich({From0, To0}, {Acc0, PLT}) -> From = call_to_mfa(From0), - To = call_to_mfa(To0), + To = call_to_mfa(To0), case {dialyzer_plt:lookup_contract(PLT, From), dialyzer_plt:lookup(PLT, To)} of {{value, #contract{args = FromArgs}}, {value, TTo}} -> %% TODO: Check return type FromRet = erl_types:t_any(), - Acc = Acc0#{ From => {FromRet, FromArgs} - , To => TTo - }, + Acc = Acc0#{ + From => {FromRet, FromArgs}, + To => TTo + }, {Acc, PLT}; {{value, _}, none} -> setnok(), logger:critical( - "Backplane API function ~s calls a missing remote function ~s", - [format_call(From0), format_call(To0)]), + "Backplane API function ~s calls a missing remote function ~s", + [format_call(From0), format_call(To0)] + ), error(missing_target) - end. + end. -spec call_to_mfa(emqx_bpapi:call()) -> mfa(). call_to_mfa({M, F, A}) -> diff --git a/apps/emqx/test/emqx_broker_SUITE.erl b/apps/emqx/test/emqx_broker_SUITE.erl index 67525d2b7..f6e122059 100644 --- a/apps/emqx/test/emqx_broker_SUITE.erl +++ b/apps/emqx/test/emqx_broker_SUITE.erl @@ -29,26 +29,30 @@ -include_lib("emqx/include/emqx_mqtt.hrl"). all() -> - [ {group, all_cases} - , {group, connected_client_count_group} + [ + {group, all_cases}, + {group, connected_client_count_group} ]. groups() -> TCs = emqx_common_test_helpers:all(?MODULE), - ConnClientTCs = [ t_connected_client_count_persistent - , t_connected_client_count_anonymous - , t_connected_client_count_transient_takeover - , t_connected_client_stats - ], + ConnClientTCs = [ + t_connected_client_count_persistent, + t_connected_client_count_anonymous, + t_connected_client_count_transient_takeover, + t_connected_client_stats + ], OtherTCs = TCs -- ConnClientTCs, - [ {all_cases, [], OtherTCs} - , {connected_client_count_group, [ {group, tcp} - , {group, ws} - , {group, quic} - ]} - , {tcp, [], ConnClientTCs} - , {ws, [], ConnClientTCs} - , {quic, [], ConnClientTCs} + [ + {all_cases, [], OtherTCs}, + {connected_client_count_group, [ + {group, tcp}, + {group, ws}, + {group, quic} + ]}, + {tcp, [], ConnClientTCs}, + {ws, [], ConnClientTCs}, + {quic, [], ConnClientTCs} ]. init_per_group(connected_client_count_group, Config) -> @@ -60,19 +64,22 @@ init_per_group(tcp, Config) -> init_per_group(ws, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), - [ {ssl, false} - , {enable_websocket, true} - , {conn_fun, ws_connect} - , {port, 8083} - , {host, "localhost"} - | Config + [ + {ssl, false}, + {enable_websocket, true}, + {conn_fun, ws_connect}, + {port, 8083}, + {host, "localhost"} + | Config ]; init_per_group(quic, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), - [ {conn_fun, quic_connect} - , {port, 14567} - | Config]; + [ + {conn_fun, quic_connect}, + {port, 14567} + | Config + ]; init_per_group(_Group, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), @@ -102,31 +109,29 @@ end_per_testcase(Case, Config) -> t_stats_fun({init, Config}) -> Parent = self(), F = fun Loop() -> - N1 = emqx_stats:getstat('subscribers.count'), - N2 = emqx_stats:getstat('subscriptions.count'), - N3 = emqx_stats:getstat('suboptions.count'), - case N1 + N2 + N3 =:= 0 of - true -> - Parent ! {ready, self()}, - exit(normal); - false -> - receive - stop -> - exit(normal) - after - 100 -> - Loop() - end + N1 = emqx_stats:getstat('subscribers.count'), + N2 = emqx_stats:getstat('subscriptions.count'), + N3 = emqx_stats:getstat('suboptions.count'), + case N1 + N2 + N3 =:= 0 of + true -> + Parent ! {ready, self()}, + exit(normal); + false -> + receive + stop -> + exit(normal) + after 100 -> + Loop() end - end, + end + end, Pid = spawn_link(F), receive - {ready, P} when P =:= Pid-> + {ready, P} when P =:= Pid -> Config - after - 5000 -> - Pid ! stop, - ct:fail("timedout_waiting_for_sub_stats_to_reach_zero") + after 5000 -> + Pid ! stop, + ct:fail("timedout_waiting_for_sub_stats_to_reach_zero") end; t_stats_fun(Config) when is_list(Config) -> ok = emqx_broker:subscribe(<<"topic">>, <<"clientid">>), @@ -163,25 +168,34 @@ t_subscribed_2(Config) when is_list(Config) -> t_subscribed_2({'end', _Config}) -> emqx_broker:unsubscribe(<<"topic">>). -t_subopts({init, Config}) -> Config; +t_subopts({init, Config}) -> + Config; t_subopts(Config) when is_list(Config) -> ?assertEqual(false, emqx_broker:set_subopts(<<"topic">>, #{qos => 1})), ?assertEqual(undefined, emqx_broker:get_subopts(self(), <<"topic">>)), ?assertEqual(undefined, emqx_broker:get_subopts(<<"clientid">>, <<"topic">>)), emqx_broker:subscribe(<<"topic">>, <<"clientid">>, #{qos => 1}), timer:sleep(200), - ?assertEqual(#{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, - emqx_broker:get_subopts(self(), <<"topic">>)), - ?assertEqual(#{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, - emqx_broker:get_subopts(<<"clientid">>,<<"topic">>)), + ?assertEqual( + #{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, + emqx_broker:get_subopts(self(), <<"topic">>) + ), + ?assertEqual( + #{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, + emqx_broker:get_subopts(<<"clientid">>, <<"topic">>) + ), emqx_broker:subscribe(<<"topic">>, <<"clientid">>, #{qos => 2}), - ?assertEqual(#{nl => 0, qos => 2, rap => 0, rh => 0, subid => <<"clientid">>}, - emqx_broker:get_subopts(self(), <<"topic">>)), + ?assertEqual( + #{nl => 0, qos => 2, rap => 0, rh => 0, subid => <<"clientid">>}, + emqx_broker:get_subopts(self(), <<"topic">>) + ), ?assertEqual(true, emqx_broker:set_subopts(<<"topic">>, #{qos => 0})), - ?assertEqual(#{nl => 0, qos => 0, rap => 0, rh => 0, subid => <<"clientid">>}, - emqx_broker:get_subopts(self(), <<"topic">>)); + ?assertEqual( + #{nl => 0, qos => 0, rap => 0, rh => 0, subid => <<"clientid">>}, + emqx_broker:get_subopts(self(), <<"topic">>) + ); t_subopts({'end', _Config}) -> emqx_broker:unsubscribe(<<"topic">>). @@ -194,12 +208,19 @@ t_topics(Config) when is_list(Config) -> ok = emqx_broker:subscribe(T2, <<"clientId">>), ok = emqx_broker:subscribe(T3, <<"clientId">>), Topics1 = emqx_broker:topics(), - ?assertEqual(true, lists:foldl(fun(Topic, Acc) -> - case lists:member(Topic, Topics1) of - true -> Acc; - false -> false - end - end, true, Topics)); + ?assertEqual( + true, + lists:foldl( + fun(Topic, Acc) -> + case lists:member(Topic, Topics1) of + true -> Acc; + false -> false + end + end, + true, + Topics + ) + ); t_topics({'end', Config}) -> Topics = proplists:get_value(topics, Config), lists:foreach(fun(T) -> emqx_broker:unsubscribe(T) end, Topics). @@ -217,10 +238,14 @@ t_subscriptions({init, Config}) -> Config; t_subscriptions(Config) when is_list(Config) -> ct:sleep(100), - ?assertEqual(#{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, - proplists:get_value(<<"topic">>, emqx_broker:subscriptions(self()))), - ?assertEqual(#{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, - proplists:get_value(<<"topic">>, emqx_broker:subscriptions(<<"clientid">>))); + ?assertEqual( + #{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, + proplists:get_value(<<"topic">>, emqx_broker:subscriptions(self())) + ), + ?assertEqual( + #{nl => 0, qos => 1, rap => 0, rh => 0, subid => <<"clientid">>}, + proplists:get_value(<<"topic">>, emqx_broker:subscriptions(<<"clientid">>)) + ); t_subscriptions({'end', _Config}) -> emqx_broker:unsubscribe(<<"topic">>). @@ -238,12 +263,15 @@ t_sub_pub(Config) when is_list(Config) -> false after 100 -> false - end); + end + ); t_sub_pub({'end', _Config}) -> ok = emqx_broker:unsubscribe(<<"topic">>). -t_nosub_pub({init, Config}) -> Config; -t_nosub_pub({'end', _Config}) -> ok; +t_nosub_pub({init, Config}) -> + Config; +t_nosub_pub({'end', _Config}) -> + ok; t_nosub_pub(Config) when is_list(Config) -> ?assertEqual(0, emqx_metrics:val('messages.dropped')), emqx_broker:publish(emqx_message:make(ct, <<"topic">>, <<"hello">>)), @@ -255,20 +283,24 @@ t_shared_subscribe({init, Config}) -> Config; t_shared_subscribe(Config) when is_list(Config) -> emqx_broker:safe_publish(emqx_message:make(ct, <<"topic">>, <<"hello">>)), - ?assert(receive - {deliver, <<"topic">>, #message{payload = <<"hello">>}} -> - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> + ?assert( + receive + {deliver, <<"topic">>, #message{payload = <<"hello">>}} -> + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), false - end); + after 100 -> + false + end + ); t_shared_subscribe({'end', _Config}) -> emqx_broker:unsubscribe(<<"$share/group/topic">>). -t_shared_subscribe_2({init, Config}) -> Config; -t_shared_subscribe_2({'end', _Config}) -> ok; +t_shared_subscribe_2({init, Config}) -> + Config; +t_shared_subscribe_2({'end', _Config}) -> + ok; t_shared_subscribe_2(_) -> {ok, ConnPid} = emqtt:start_link([{clean_start, true}, {clientid, <<"clientid">>}]), {ok, _} = emqtt:connect(ConnPid), @@ -282,16 +314,26 @@ t_shared_subscribe_2(_) -> ok = emqtt:publish(ConnPid, <<"topic">>, <<"hello">>, 0), Msgs = recv_msgs(2), ?assertEqual(2, length(Msgs)), - ?assertEqual(true, lists:foldl(fun(#{payload := <<"hello">>, topic := <<"topic">>}, Acc) -> - Acc; - (_, _) -> - false - end, true, Msgs)), + ?assertEqual( + true, + lists:foldl( + fun + (#{payload := <<"hello">>, topic := <<"topic">>}, Acc) -> + Acc; + (_, _) -> + false + end, + true, + Msgs + ) + ), emqtt:disconnect(ConnPid), emqtt:disconnect(ConnPid2). -t_shared_subscribe_3({init, Config}) -> Config; -t_shared_subscribe_3({'end', _Config}) -> ok; +t_shared_subscribe_3({init, Config}) -> + Config; +t_shared_subscribe_3({'end', _Config}) -> + ok; t_shared_subscribe_3(_) -> {ok, ConnPid} = emqtt:start_link([{clean_start, true}, {clientid, <<"clientid">>}]), {ok, _} = emqtt:connect(ConnPid), @@ -324,7 +366,8 @@ t_shard(Config) when is_list(Config) -> false after 100 -> false - end); + end + ); t_shard({'end', _Config}) -> emqx_broker:unsubscribe(<<"topic">>), ok = meck:unload(emqx_broker_helper). @@ -339,56 +382,63 @@ t_connected_client_count_persistent(Config) when is_list(Config) -> ConnFun = ?config(conn_fun, Config), ClientID = <<"clientid">>, ?assertEqual(0, emqx_cm:get_connected_client_count()), - {ok, ConnPid0} = emqtt:start_link([ {clean_start, false} - , {clientid, ClientID} - | Config]), + {ok, ConnPid0} = emqtt:start_link([ + {clean_start, false}, + {clientid, ClientID} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid0) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid0) end, + [emqx_cm_connected_client_count_inc] + ), timer:sleep(10), ?assertEqual(1, emqx_cm:get_connected_client_count()), {ok, {ok, [_]}} = wait_for_events( - fun() -> emqtt:disconnect(ConnPid0) end, - [emqx_cm_connected_client_count_dec] - ), + fun() -> emqtt:disconnect(ConnPid0) end, + [emqx_cm_connected_client_count_dec] + ), timer:sleep(10), ?assertEqual(0, emqx_cm:get_connected_client_count()), %% reconnecting - {ok, ConnPid1} = emqtt:start_link([ {clean_start, false} - , {clientid, ClientID} - | Config - ]), + {ok, ConnPid1} = emqtt:start_link([ + {clean_start, false}, + {clientid, ClientID} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid1) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid1) end, + [emqx_cm_connected_client_count_inc] + ), ?assertEqual(1, emqx_cm:get_connected_client_count()), %% taking over - {ok, ConnPid2} = emqtt:start_link([ {clean_start, false} - , {clientid, ClientID} - | Config - ]), + {ok, ConnPid2} = emqtt:start_link([ + {clean_start, false}, + {clientid, ClientID} + | Config + ]), {{ok, _}, {ok, [_, _]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid2) end, - [ emqx_cm_connected_client_count_inc - , emqx_cm_connected_client_count_dec - ], - 500 - ), + fun() -> emqtt:ConnFun(ConnPid2) end, + [ + emqx_cm_connected_client_count_inc, + emqx_cm_connected_client_count_dec + ], + 500 + ), ?assertEqual(1, emqx_cm:get_connected_client_count()), %% abnormal exit of channel process ChanPids = emqx_cm:all_channels(), {ok, {ok, [_, _]}} = wait_for_events( - fun() -> - lists:foreach( - fun(ChanPid) -> exit(ChanPid, kill) end, - ChanPids) - end, - [ emqx_cm_connected_client_count_dec - , emqx_cm_process_down - ] - ), + fun() -> + lists:foreach( + fun(ChanPid) -> exit(ChanPid, kill) end, + ChanPids + ) + end, + [ + emqx_cm_connected_client_count_dec, + emqx_cm_process_down + ] + ), ?assertEqual(0, emqx_cm:get_connected_client_count()), ok; t_connected_client_count_persistent({'end', _Config}) -> @@ -405,57 +455,66 @@ t_connected_client_count_anonymous(Config) when is_list(Config) -> ConnFun = ?config(conn_fun, Config), ?assertEqual(0, emqx_cm:get_connected_client_count()), %% first client - {ok, ConnPid0} = emqtt:start_link([ {clean_start, true} - | Config]), + {ok, ConnPid0} = emqtt:start_link([ + {clean_start, true} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid0) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid0) end, + [emqx_cm_connected_client_count_inc] + ), ?assertEqual(1, emqx_cm:get_connected_client_count()), %% second client - {ok, ConnPid1} = emqtt:start_link([ {clean_start, true} - | Config]), + {ok, ConnPid1} = emqtt:start_link([ + {clean_start, true} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid1) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid1) end, + [emqx_cm_connected_client_count_inc] + ), ?assertEqual(2, emqx_cm:get_connected_client_count()), %% when first client disconnects, shouldn't affect the second {ok, {ok, [_, _]}} = wait_for_events( - fun() -> emqtt:disconnect(ConnPid0) end, - [ emqx_cm_connected_client_count_dec - , emqx_cm_process_down - ] - ), + fun() -> emqtt:disconnect(ConnPid0) end, + [ + emqx_cm_connected_client_count_dec, + emqx_cm_process_down + ] + ), ?assertEqual(1, emqx_cm:get_connected_client_count()), %% reconnecting - {ok, ConnPid2} = emqtt:start_link([ {clean_start, true} - | Config - ]), + {ok, ConnPid2} = emqtt:start_link([ + {clean_start, true} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid2) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid2) end, + [emqx_cm_connected_client_count_inc] + ), ?assertEqual(2, emqx_cm:get_connected_client_count()), {ok, {ok, [_, _]}} = wait_for_events( - fun() -> emqtt:disconnect(ConnPid1) end, - [ emqx_cm_connected_client_count_dec - , emqx_cm_process_down - ] - ), + fun() -> emqtt:disconnect(ConnPid1) end, + [ + emqx_cm_connected_client_count_dec, + emqx_cm_process_down + ] + ), ?assertEqual(1, emqx_cm:get_connected_client_count()), %% abnormal exit of channel process Chans = emqx_cm:all_channels(), {ok, {ok, [_, _]}} = wait_for_events( - fun() -> - lists:foreach( - fun(ChanPid) -> exit(ChanPid, kill) end, - Chans) - end, - [ emqx_cm_connected_client_count_dec - , emqx_cm_process_down - ] - ), + fun() -> + lists:foreach( + fun(ChanPid) -> exit(ChanPid, kill) end, + Chans + ) + end, + [ + emqx_cm_connected_client_count_dec, + emqx_cm_process_down + ] + ), ?assertEqual(0, emqx_cm:get_connected_client_count()), ok; t_connected_client_count_anonymous({'end', _Config}) -> @@ -475,32 +534,38 @@ t_connected_client_count_transient_takeover(Config) when is_list(Config) -> NumClients = 20, {ok, {ok, [_, _]}} = wait_for_events( - fun() -> - lists:foreach( + fun() -> + lists:foreach( fun(_) -> - spawn( - fun() -> - {ok, ConnPid} = - emqtt:start_link([ {clean_start, true} - , {clientid, ClientID} - | Config]), - %% don't assert the result: most of them fail - %% during the race - emqtt:ConnFun(ConnPid), - ok - end), - ok + spawn( + fun() -> + {ok, ConnPid} = + emqtt:start_link([ + {clean_start, true}, + {clientid, ClientID} + | Config + ]), + %% don't assert the result: most of them fail + %% during the race + emqtt:ConnFun(ConnPid), + ok + end + ), + ok end, - lists:seq(1, NumClients)) - end, - %% there can be only one channel that wins the race for the - %% lock for this client id. we also expect a decrement - %% event because the client dies along with the ephemeral - %% process. - [ emqx_cm_connected_client_count_inc - , emqx_cm_connected_client_count_dec - ], - 1000), + lists:seq(1, NumClients) + ) + end, + %% there can be only one channel that wins the race for the + %% lock for this client id. we also expect a decrement + %% event because the client dies along with the ephemeral + %% process. + [ + emqx_cm_connected_client_count_inc, + emqx_cm_connected_client_count_dec + ], + 1000 + ), %% Since more than one pair of inc/dec may be emitted, we need to %% wait for full stabilization timer:sleep(100), @@ -508,28 +573,30 @@ t_connected_client_count_transient_takeover(Config) when is_list(Config) -> %% ephemeral processes above, and all should be dead now. ?assertEqual(0, emqx_cm:get_connected_client_count()), %% connecting again - {ok, ConnPid1} = emqtt:start_link([ {clean_start, true} - , {clientid, ClientID} - | Config - ]), + {ok, ConnPid1} = emqtt:start_link([ + {clean_start, true}, + {clientid, ClientID} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid1) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid1) end, + [emqx_cm_connected_client_count_inc] + ), ?assertEqual(1, emqx_cm:get_connected_client_count()), %% abnormal exit of channel process [ChanPid] = emqx_cm:all_channels(), {ok, {ok, [_, _]}} = wait_for_events( - fun() -> - exit(ChanPid, kill), - ok - end, - [ emqx_cm_connected_client_count_dec - , emqx_cm_process_down - ] - ), + fun() -> + exit(ChanPid, kill), + ok + end, + [ + emqx_cm_connected_client_count_dec, + emqx_cm_process_down + ] + ), ?assertEqual(0, emqx_cm:get_connected_client_count()), ok; t_connected_client_count_transient_takeover({'end', _Config}) -> @@ -546,34 +613,43 @@ t_connected_client_stats(Config) when is_list(Config) -> ?assertEqual(0, emqx_cm:get_connected_client_count()), ?assertEqual(0, emqx_stats:getstat('live_connections.count')), ?assertEqual(0, emqx_stats:getstat('live_connections.max')), - {ok, ConnPid} = emqtt:start_link([ {clean_start, true} - , {clientid, <<"clientid">>} - | Config - ]), + {ok, ConnPid} = emqtt:start_link([ + {clean_start, true}, + {clientid, <<"clientid">>} + | Config + ]), {{ok, _}, {ok, [_]}} = wait_for_events( - fun() -> emqtt:ConnFun(ConnPid) end, - [emqx_cm_connected_client_count_inc] - ), + fun() -> emqtt:ConnFun(ConnPid) end, + [emqx_cm_connected_client_count_inc] + ), timer:sleep(20), %% ensure stats are synchronized {_, {ok, [_]}} = wait_for_stats( - fun emqx_cm:stats_fun/0, - [#{count_stat => 'live_connections.count', - max_stat => 'live_connections.max'}] - ), + fun emqx_cm:stats_fun/0, + [ + #{ + count_stat => 'live_connections.count', + max_stat => 'live_connections.max' + } + ] + ), ?assertEqual(1, emqx_stats:getstat('live_connections.count')), ?assertEqual(1, emqx_stats:getstat('live_connections.max')), {ok, {ok, [_]}} = wait_for_events( - fun() -> emqtt:disconnect(ConnPid) end, - [emqx_cm_connected_client_count_dec] - ), + fun() -> emqtt:disconnect(ConnPid) end, + [emqx_cm_connected_client_count_dec] + ), timer:sleep(20), %% ensure stats are synchronized {_, {ok, [_]}} = wait_for_stats( - fun emqx_cm:stats_fun/0, - [#{count_stat => 'live_connections.count', - max_stat => 'live_connections.max'}] - ), + fun emqx_cm:stats_fun/0, + [ + #{ + count_stat => 'live_connections.count', + max_stat => 'live_connections.max' + } + ] + ), ?assertEqual(0, emqx_stats:getstat('live_connections.count')), ?assertEqual(1, emqx_stats:getstat('live_connections.max')), ok; @@ -604,8 +680,8 @@ wait_for_events(Action, Kinds) -> wait_for_events(Action, Kinds, Timeout) -> Predicate = fun(#{?snk_kind := K}) -> - lists:member(K, Kinds) - end, + lists:member(K, Kinds) + end, N = length(Kinds), {ok, Sub} = snabbkaffe_collector:subscribe(Predicate, N, Timeout, 0), Res = Action(), @@ -617,15 +693,19 @@ wait_for_events(Action, Kinds, Timeout) -> end. wait_for_stats(Action, Stats) -> - Predicate = fun(Event = #{?snk_kind := emqx_stats_setstat}) -> - Stat = maps:with( - [ count_stat - , max_stat - ], Event), - lists:member(Stat, Stats); - (_) -> - false - end, + Predicate = fun + (Event = #{?snk_kind := emqx_stats_setstat}) -> + Stat = maps:with( + [ + count_stat, + max_stat + ], + Event + ), + lists:member(Stat, Stats); + (_) -> + false + end, N = length(Stats), Timeout = 500, {ok, Sub} = snabbkaffe_collector:subscribe(Predicate, N, Timeout, 0), @@ -645,8 +725,9 @@ recv_msgs(0, Msgs) -> recv_msgs(Count, Msgs) -> receive {publish, Msg} -> - recv_msgs(Count-1, [Msg|Msgs]); - _Other -> recv_msgs(Count, Msgs) + recv_msgs(Count - 1, [Msg | Msgs]); + _Other -> + recv_msgs(Count, Msgs) after 100 -> Msgs end. diff --git a/apps/emqx/test/emqx_broker_helper_SUITE.erl b/apps/emqx/test/emqx_broker_helper_SUITE.erl index 16085205b..19053bec1 100644 --- a/apps/emqx/test/emqx_broker_helper_SUITE.erl +++ b/apps/emqx/test/emqx_broker_helper_SUITE.erl @@ -48,8 +48,9 @@ t_register_sub(_) -> ok = emqx_broker_helper:register_sub(self(), <<"clientid">>), try emqx_broker_helper:register_sub(self(), <<"clientid2">>) of _ -> ct:fail(should_throw_error) - catch error:Reason -> - ?assertEqual(Reason, subid_conflict) + catch + error:Reason -> + ?assertEqual(Reason, subid_conflict) end, ?assertEqual(self(), emqx_broker_helper:lookup_subpid(<<"clientid">>)). diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index 256a271b2..40d2618c0 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -24,121 +24,185 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). - all() -> emqx_common_test_helpers:all(?MODULE). force_gc_conf() -> - #{bytes => 16777216,count => 16000,enable => true}. + #{bytes => 16777216, count => 16000, enable => true}. force_shutdown_conf() -> - #{enable => true,max_heap_size => 4194304, max_message_queue_len => 1000}. + #{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}. rate_limit_conf() -> - #{conn_bytes_in => ["100KB","10s"], - conn_messages_in => ["100","10s"], - max_conn_rate => 1000, - quota => - #{conn_messages_routing => infinity, - overall_messages_routing => infinity}}. + #{ + conn_bytes_in => ["100KB", "10s"], + conn_messages_in => ["100", "10s"], + max_conn_rate => 1000, + quota => + #{ + conn_messages_routing => infinity, + overall_messages_routing => infinity + } + }. rpc_conf() -> - #{async_batch_size => 256,authentication_timeout => 5000, - call_receive_timeout => 15000,connect_timeout => 5000, - mode => async,port_discovery => stateless, - send_timeout => 5000,socket_buffer => 1048576, - socket_keepalive_count => 9,socket_keepalive_idle => 900, - socket_keepalive_interval => 75,socket_recbuf => 1048576, - socket_sndbuf => 1048576,tcp_client_num => 1, - tcp_server_port => 5369}. + #{ + async_batch_size => 256, + authentication_timeout => 5000, + call_receive_timeout => 15000, + connect_timeout => 5000, + mode => async, + port_discovery => stateless, + send_timeout => 5000, + socket_buffer => 1048576, + socket_keepalive_count => 9, + socket_keepalive_idle => 900, + socket_keepalive_interval => 75, + socket_recbuf => 1048576, + socket_sndbuf => 1048576, + tcp_client_num => 1, + tcp_server_port => 5369 + }. mqtt_conf() -> - #{await_rel_timeout => 300000,idle_timeout => 15000, - ignore_loop_deliver => false,keepalive_backoff => 0.75, - max_awaiting_rel => 100,max_clientid_len => 65535, - max_inflight => 32,max_mqueue_len => 1000, - max_packet_size => 1048576,max_qos_allowed => 2, - max_subscriptions => infinity,max_topic_alias => 65535, - max_topic_levels => 128,mqueue_default_priority => lowest, - mqueue_priorities => disabled,mqueue_store_qos0 => true, - peer_cert_as_clientid => disabled, - peer_cert_as_username => disabled, - response_information => [],retain_available => true, - retry_interval => 30000,server_keepalive => disabled, - session_expiry_interval => 7200000, - shared_subscription => true,strict_mode => false, - upgrade_qos => false,use_username_as_clientid => false, - wildcard_subscription => true}. - + #{ + await_rel_timeout => 300000, + idle_timeout => 15000, + ignore_loop_deliver => false, + keepalive_backoff => 0.75, + max_awaiting_rel => 100, + max_clientid_len => 65535, + max_inflight => 32, + max_mqueue_len => 1000, + max_packet_size => 1048576, + max_qos_allowed => 2, + max_subscriptions => infinity, + max_topic_alias => 65535, + max_topic_levels => 128, + mqueue_default_priority => lowest, + mqueue_priorities => disabled, + mqueue_store_qos0 => true, + peer_cert_as_clientid => disabled, + peer_cert_as_username => disabled, + response_information => [], + retain_available => true, + retry_interval => 30000, + server_keepalive => disabled, + session_expiry_interval => 7200000, + shared_subscription => true, + strict_mode => false, + upgrade_qos => false, + use_username_as_clientid => false, + wildcard_subscription => true + }. listener_mqtt_tcp_conf() -> - #{acceptors => 16, - zone => default, - access_rules => ["allow all"], - bind => {{0,0,0,0},1883}, - max_connections => 1024000,mountpoint => <<>>, - proxy_protocol => false,proxy_protocol_timeout => 3000, - tcp => #{ - active_n => 100,backlog => 1024,buffer => 4096, - high_watermark => 1048576,nodelay => false, - reuseaddr => true,send_timeout => 15000, - send_timeout_close => true}}. + #{ + acceptors => 16, + zone => default, + access_rules => ["allow all"], + bind => {{0, 0, 0, 0}, 1883}, + max_connections => 1024000, + mountpoint => <<>>, + proxy_protocol => false, + proxy_protocol_timeout => 3000, + tcp => #{ + active_n => 100, + backlog => 1024, + buffer => 4096, + high_watermark => 1048576, + nodelay => false, + reuseaddr => true, + send_timeout => 15000, + send_timeout_close => true + } + }. listener_mqtt_ws_conf() -> - #{acceptors => 16, - zone => default, - access_rules => ["allow all"], - bind => {{0,0,0,0},8083}, - max_connections => 1024000,mountpoint => <<>>, - proxy_protocol => false,proxy_protocol_timeout => 3000, - tcp => - #{active_n => 100,backlog => 1024,buffer => 4096, - high_watermark => 1048576,nodelay => false, - reuseaddr => true,send_timeout => 15000, - send_timeout_close => true}, - websocket => - #{allow_origin_absence => true,check_origin_enable => false, - check_origins => [],compress => false, - deflate_opts => - #{client_max_window_bits => 15,mem_level => 8, - server_max_window_bits => 15}, - fail_if_no_subprotocol => true,idle_timeout => 86400000, - max_frame_size => infinity,mqtt_path => "/mqtt", - mqtt_piggyback => multiple, - proxy_address_header => "x-forwarded-for", - proxy_port_header => "x-forwarded-port", - supported_subprotocols => - ["mqtt","mqtt-v3","mqtt-v3.1.1","mqtt-v5"]}}. + #{ + acceptors => 16, + zone => default, + access_rules => ["allow all"], + bind => {{0, 0, 0, 0}, 8083}, + max_connections => 1024000, + mountpoint => <<>>, + proxy_protocol => false, + proxy_protocol_timeout => 3000, + tcp => + #{ + active_n => 100, + backlog => 1024, + buffer => 4096, + high_watermark => 1048576, + nodelay => false, + reuseaddr => true, + send_timeout => 15000, + send_timeout_close => true + }, + websocket => + #{ + allow_origin_absence => true, + check_origin_enable => false, + check_origins => [], + compress => false, + deflate_opts => + #{ + client_max_window_bits => 15, + mem_level => 8, + server_max_window_bits => 15 + }, + fail_if_no_subprotocol => true, + idle_timeout => 86400000, + max_frame_size => infinity, + mqtt_path => "/mqtt", + mqtt_piggyback => multiple, + proxy_address_header => "x-forwarded-for", + proxy_port_header => "x-forwarded-port", + supported_subprotocols => + ["mqtt", "mqtt-v3", "mqtt-v3.1.1", "mqtt-v5"] + } + }. listeners_conf() -> - #{tcp => #{default => listener_mqtt_tcp_conf()}, - ws => #{default => listener_mqtt_ws_conf()} - }. + #{ + tcp => #{default => listener_mqtt_tcp_conf()}, + ws => #{default => listener_mqtt_ws_conf()} + }. limiter_conf() -> Make = fun() -> - #{bucket => - #{default => - #{capacity => infinity, - initial => 0, - rate => infinity, - per_client => - #{capacity => infinity,divisible => false, - failure_strategy => force,initial => 0,low_water_mark => 0, - max_retry_time => 5000,rate => infinity - } + #{ + bucket => + #{ + default => + #{ + capacity => infinity, + initial => 0, + rate => infinity, + per_client => + #{ + capacity => infinity, + divisible => false, + failure_strategy => force, + initial => 0, + low_water_mark => 0, + max_retry_time => 5000, + rate => infinity } - }, - burst => 0, - rate => infinity - } - end, + } + }, + burst => 0, + rate => infinity + } + end, - lists:foldl(fun(Name, Acc) -> - Acc#{Name => Make()} - end, - #{}, - [bytes_in, message_in, message_routing, connection, batch]). + lists:foldl( + fun(Name, Acc) -> + Acc#{Name => Make()} + end, + #{}, + [bytes_in, message_in, message_routing, connection, batch] + ). stats_conf() -> #{enable => true}. @@ -147,15 +211,16 @@ zone_conf() -> #{}. basic_conf() -> - #{rate_limit => rate_limit_conf(), - force_gc => force_gc_conf(), - force_shutdown => force_shutdown_conf(), - mqtt => mqtt_conf(), - rpc => rpc_conf(), - stats => stats_conf(), - listeners => listeners_conf(), - zones => zone_conf(), - limiter => limiter_conf() + #{ + rate_limit => rate_limit_conf(), + force_gc => force_gc_conf(), + force_shutdown => force_shutdown_conf(), + mqtt => mqtt_conf(), + rpc => rpc_conf(), + stats => stats_conf(), + listeners => listeners_conf(), + zones => zone_conf(), + limiter => limiter_conf() }. set_test_listener_confs() -> @@ -174,8 +239,11 @@ init_per_suite(Config) -> ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), %% Access Control Meck ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]), - ok = meck:expect(emqx_access_control, authenticate, - fun(_) -> {ok, #{is_superuser => false}} end), + ok = meck:expect( + emqx_access_control, + authenticate, + fun(_) -> {ok, #{is_superuser => false}} end + ), ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> allow end), %% Broker Meck ok = meck:new(emqx_broker, [passthrough, no_history, no_link]), @@ -195,20 +263,21 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> - meck:unload([emqx_access_control, - emqx_metrics, - emqx_session, - emqx_broker, - emqx_hooks, - emqx_cm, - emqx_banned - ]). + meck:unload([ + emqx_access_control, + emqx_metrics, + emqx_session, + emqx_broker, + emqx_hooks, + emqx_cm, + emqx_banned + ]). init_per_testcase(TestCase, Config) -> OldConf = set_test_listener_confs(), emqx_common_test_helpers:start_apps([]), check_modify_limiter(TestCase), - [{config, OldConf}|Config]. + [{config, OldConf} | Config]. end_per_testcase(_TestCase, Config) -> emqx_config:put(?config(config, Config)), @@ -230,14 +299,18 @@ modify_limiter() -> Limiter = emqx_config:get([limiter]), #{message_routing := #{bucket := Bucket} = Routing} = Limiter, #{default := #{per_client := Client} = Default} = Bucket, - Client2 = Client#{rate := 5, - initial := 0, - capacity := 5, - low_water_mark := 1}, - Default2 = Default#{per_client := Client2, - rate => 10, - initial => 0, - capacity => 10}, + Client2 = Client#{ + rate := 5, + initial := 0, + capacity := 5, + low_water_mark := 1 + }, + Default2 = Default#{ + per_client := Client2, + rate => 10, + initial => 0, + capacity => 10 + }, Bucket2 = Bucket#{default := Default2}, Routing2 = Routing#{bucket := Bucket2}, @@ -251,39 +324,50 @@ modify_limiter() -> %%-------------------------------------------------------------------- t_chan_info(_) -> - #{conn_state := connected, - clientinfo := ClientInfo - } = emqx_channel:info(channel()), + #{ + conn_state := connected, + clientinfo := ClientInfo + } = emqx_channel:info(channel()), ?assertEqual(clientinfo(), ClientInfo). t_chan_caps(_) -> - ?assertMatch(#{ - max_clientid_len := 65535, - max_qos_allowed := 2, - max_topic_alias := 65535, - max_topic_levels := Level, - retain_available := true, - shared_subscription := true, - subscription_identifiers := true, - wildcard_subscription := true - } when is_integer(Level), emqx_channel:caps(channel())). + ?assertMatch( + #{ + max_clientid_len := 65535, + max_qos_allowed := 2, + max_topic_alias := 65535, + max_topic_levels := Level, + retain_available := true, + shared_subscription := true, + subscription_identifiers := true, + wildcard_subscription := true + } when is_integer(Level), + emqx_channel:caps(channel()) + ). %%-------------------------------------------------------------------- %% Test cases for channel handle_in %%-------------------------------------------------------------------- t_handle_in_connect_packet_sucess(_) -> - ok = meck:expect(emqx_cm, open_session, - fun(true, _ClientInfo, _ConnInfo) -> - {ok, #{session => session(), present => false}} - end), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), IdleChannel = channel(#{conn_state => idle}), {ok, [{event, connected}, {connack, ?CONNACK_PACKET(?RC_SUCCESS, 0, _)}], Channel} = emqx_channel:handle_in(?CONNECT_PACKET(connpkt()), IdleChannel), ClientInfo = emqx_channel:info(clientinfo, Channel), - ?assertMatch(#{clientid := <<"clientid">>, - username := <<"username">> - }, ClientInfo), + ?assertMatch( + #{ + clientid := <<"clientid">>, + username := <<"username">> + }, + ClientInfo + ), ?assertEqual(connected, emqx_channel:info(conn_state, Channel)). t_handle_in_unexpected_connect_packet(_) -> @@ -317,9 +401,9 @@ t_handle_in_unexpected_packet(_) -> t_handle_in_continue_auth(_) -> Properties = #{ - 'Authentication-Method' => <<"failed_auth_method">>, - 'Authentication-Data' => <<"failed_auth_data">> - }, + 'Authentication-Method' => <<"failed_auth_method">>, + 'Authentication-Data' => <<"failed_auth_data">> + }, Channel1 = channel(#{conn_state => connected}), {ok, [{outgoing, ?DISCONNECT_PACKET(?RC_PROTOCOL_ERROR)}, {close, protocol_error}], Channel1} = @@ -331,24 +415,33 @@ t_handle_in_continue_auth(_) -> {ok, [{event, connected}, {connack, ?CONNACK_PACKET(?RC_SUCCESS)}], _} = emqx_channel:handle_in( - ?AUTH_PACKET(?RC_CONTINUE_AUTHENTICATION, Properties), Channel3). + ?AUTH_PACKET(?RC_CONTINUE_AUTHENTICATION, Properties), Channel3 + ). t_handle_in_re_auth(_) -> Properties = #{ - 'Authentication-Method' => <<"failed_auth_method">>, - 'Authentication-Data' => <<"failed_auth_data">> - }, - {ok, [{outgoing, ?DISCONNECT_PACKET(?RC_BAD_AUTHENTICATION_METHOD)}, - {close, bad_authentication_method}], _} = + 'Authentication-Method' => <<"failed_auth_method">>, + 'Authentication-Data' => <<"failed_auth_data">> + }, + {ok, + [ + {outgoing, ?DISCONNECT_PACKET(?RC_BAD_AUTHENTICATION_METHOD)}, + {close, bad_authentication_method} + ], + _} = emqx_channel:handle_in( - ?AUTH_PACKET(?RC_RE_AUTHENTICATE,Properties), - channel() + ?AUTH_PACKET(?RC_RE_AUTHENTICATE, Properties), + channel() ), - {ok, [{outgoing, ?DISCONNECT_PACKET(?RC_BAD_AUTHENTICATION_METHOD)}, - {close, bad_authentication_method}], _} = + {ok, + [ + {outgoing, ?DISCONNECT_PACKET(?RC_BAD_AUTHENTICATION_METHOD)}, + {close, bad_authentication_method} + ], + _} = emqx_channel:handle_in( - ?AUTH_PACKET(?RC_RE_AUTHENTICATE,Properties), - channel(#{conninfo => #{proto_ver => ?MQTT_PROTO_V5, conn_props => undefined}}) + ?AUTH_PACKET(?RC_RE_AUTHENTICATE, Properties), + channel(#{conninfo => #{proto_ver => ?MQTT_PROTO_V5, conn_props => undefined}}) ), Channel1 = channel(), @@ -357,7 +450,7 @@ t_handle_in_re_auth(_) -> {ok, ?AUTH_PACKET(?RC_SUCCESS), _} = emqx_channel:handle_in( - ?AUTH_PACKET(?RC_RE_AUTHENTICATE,Properties), Channel2 + ?AUTH_PACKET(?RC_RE_AUTHENTICATE, Properties), Channel2 ). t_handle_in_qos0_publish(_) -> @@ -396,84 +489,109 @@ t_handle_in_qos2_publish_with_error_return(_) -> {ok, ?PUBREC_PACKET(2, ?RC_NO_MATCHING_SUBSCRIBERS), Channel1} = emqx_channel:handle_in(Publish2, Channel), Publish3 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 3, <<"payload">>), - {ok, [{outgoing, ?DISCONNECT_PACKET(?RC_RECEIVE_MAXIMUM_EXCEEDED)}, - {close, receive_maximum_exceeded}], Channel1} = + {ok, + [ + {outgoing, ?DISCONNECT_PACKET(?RC_RECEIVE_MAXIMUM_EXCEEDED)}, + {close, receive_maximum_exceeded} + ], + Channel1} = emqx_channel:handle_in(Publish3, Channel1). t_handle_in_puback_ok(_) -> Msg = emqx_message:make(<<"t">>, <<"payload">>), - ok = meck:expect(emqx_session, puback, - fun(_, _PacketId, Session) -> {ok, Msg, Session} end), + ok = meck:expect( + emqx_session, + puback, + fun(_, _PacketId, Session) -> {ok, Msg, Session} end + ), Channel = channel(#{conn_state => connected}), {ok, _NChannel} = emqx_channel:handle_in(?PUBACK_PACKET(1, ?RC_SUCCESS), Channel). - % ?assertEqual(#{puback_in => 1}, emqx_channel:info(pub_stats, NChannel)). +% ?assertEqual(#{puback_in => 1}, emqx_channel:info(pub_stats, NChannel)). t_handle_in_puback_id_in_use(_) -> - ok = meck:expect(emqx_session, puback, - fun(_, _, _Session) -> - {error, ?RC_PACKET_IDENTIFIER_IN_USE} - end), + ok = meck:expect( + emqx_session, + puback, + fun(_, _, _Session) -> + {error, ?RC_PACKET_IDENTIFIER_IN_USE} + end + ), {ok, _Channel} = emqx_channel:handle_in(?PUBACK_PACKET(1, ?RC_SUCCESS), channel()). - % ?assertEqual(#{puback_in => 1}, emqx_channel:info(pub_stats, Channel)). +% ?assertEqual(#{puback_in => 1}, emqx_channel:info(pub_stats, Channel)). t_handle_in_puback_id_not_found(_) -> - ok = meck:expect(emqx_session, puback, - fun(_, _, _Session) -> - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} - end), + ok = meck:expect( + emqx_session, + puback, + fun(_, _, _Session) -> + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end + ), {ok, _Channel} = emqx_channel:handle_in(?PUBACK_PACKET(1, ?RC_SUCCESS), channel()). - % ?assertEqual(#{puback_in => 1}, emqx_channel:info(pub_stats, Channel)). +% ?assertEqual(#{puback_in => 1}, emqx_channel:info(pub_stats, Channel)). t_bad_receive_maximum(_) -> - ok = meck:expect(emqx_cm, open_session, - fun(true, _ClientInfo, _ConnInfo) -> - {ok, #{session => session(), present => false}} - end), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), emqx_config:put_zone_conf(default, [mqtt, response_information], test), C1 = channel(#{conn_state => idle}), {shutdown, protocol_error, _, _} = emqx_channel:handle_in( - ?CONNECT_PACKET(connpkt(#{'Receive-Maximum' => 0})), - C1 + ?CONNECT_PACKET(connpkt(#{'Receive-Maximum' => 0})), + C1 ). t_override_client_receive_maximum(_) -> - ok = meck:expect(emqx_cm, open_session, - fun(true, _ClientInfo, _ConnInfo) -> - {ok, #{session => session(), present => false}} - end), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), emqx_config:put_zone_conf(default, [mqtt, response_information], test), emqx_config:put_zone_conf(default, [mqtt, max_inflight], 0), C1 = channel(#{conn_state => idle}), ClientCapacity = 2, {ok, [{event, connected}, _ConnAck], C2} = emqx_channel:handle_in( - ?CONNECT_PACKET(connpkt(#{'Receive-Maximum' => ClientCapacity})), - C1 + ?CONNECT_PACKET(connpkt(#{'Receive-Maximum' => ClientCapacity})), + C1 ), ConnInfo = emqx_channel:info(conninfo, C2), ?assertEqual(ClientCapacity, maps:get(receive_maximum, ConnInfo)). t_handle_in_pubrec_ok(_) -> - Msg = emqx_message:make(test,?QOS_2, <<"t">>, <<"payload">>), + Msg = emqx_message:make(test, ?QOS_2, <<"t">>, <<"payload">>), ok = meck:expect(emqx_session, pubrec, fun(_, _, Session) -> {ok, Msg, Session} end), Channel = channel(#{conn_state => connected}), {ok, ?PUBREL_PACKET(1, ?RC_SUCCESS), _Channel1} = emqx_channel:handle_in(?PUBREC_PACKET(1, ?RC_SUCCESS), Channel). t_handle_in_pubrec_id_in_use(_) -> - ok = meck:expect(emqx_session, pubrec, - fun(_, _, _Session) -> - {error, ?RC_PACKET_IDENTIFIER_IN_USE} - end), + ok = meck:expect( + emqx_session, + pubrec, + fun(_, _, _Session) -> + {error, ?RC_PACKET_IDENTIFIER_IN_USE} + end + ), {ok, ?PUBREL_PACKET(1, ?RC_PACKET_IDENTIFIER_IN_USE), _Channel} = emqx_channel:handle_in(?PUBREC_PACKET(1, ?RC_SUCCESS), channel()). t_handle_in_pubrec_id_not_found(_) -> - ok = meck:expect(emqx_session, pubrec, - fun(_, _, _Session) -> - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} - end), + ok = meck:expect( + emqx_session, + pubrec, + fun(_, _, _Session) -> + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end + ), {ok, ?PUBREL_PACKET(1, ?RC_PACKET_IDENTIFIER_NOT_FOUND), _Channel} = emqx_channel:handle_in(?PUBREC_PACKET(1, ?RC_SUCCESS), channel()). @@ -484,29 +602,38 @@ t_handle_in_pubrel_ok(_) -> emqx_channel:handle_in(?PUBREL_PACKET(1, ?RC_SUCCESS), Channel). t_handle_in_pubrel_not_found_error(_) -> - ok = meck:expect(emqx_session, pubrel, - fun(_, _PacketId, _Session) -> - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} - end), + ok = meck:expect( + emqx_session, + pubrel, + fun(_, _PacketId, _Session) -> + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end + ), {ok, ?PUBCOMP_PACKET(1, ?RC_PACKET_IDENTIFIER_NOT_FOUND), _Channel} = emqx_channel:handle_in(?PUBREL_PACKET(1, ?RC_SUCCESS), channel()). t_handle_in_pubcomp_ok(_) -> ok = meck:expect(emqx_session, pubcomp, fun(_, _, Session) -> {ok, Session} end), {ok, _Channel} = emqx_channel:handle_in(?PUBCOMP_PACKET(1, ?RC_SUCCESS), channel()). - % ?assertEqual(#{pubcomp_in => 1}, emqx_channel:info(pub_stats, Channel)). +% ?assertEqual(#{pubcomp_in => 1}, emqx_channel:info(pub_stats, Channel)). t_handle_in_pubcomp_not_found_error(_) -> - ok = meck:expect(emqx_session, pubcomp, - fun(_, _PacketId, _Session) -> - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} - end), + ok = meck:expect( + emqx_session, + pubcomp, + fun(_, _PacketId, _Session) -> + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end + ), Channel = channel(#{conn_state => connected}), {ok, _Channel1} = emqx_channel:handle_in(?PUBCOMP_PACKET(1, ?RC_SUCCESS), Channel). t_handle_in_subscribe(_) -> - ok = meck:expect(emqx_session, subscribe, - fun(_, _, _, Session) -> {ok, Session} end), + ok = meck:expect( + emqx_session, + subscribe, + fun(_, _, _, Session) -> {ok, Session} end + ), Channel = channel(#{conn_state => connected}), TopicFilters = [{<<"+">>, ?DEFAULT_SUBOPTS}], Subscribe = ?SUBSCRIBE_PACKET(1, #{}, TopicFilters), @@ -514,10 +641,13 @@ t_handle_in_subscribe(_) -> {ok, Replies, _Chan} = emqx_channel:handle_in(Subscribe, Channel). t_handle_in_unsubscribe(_) -> - ok = meck:expect(emqx_session, unsubscribe, - fun(_, _, _, Session) -> - {ok, Session} - end), + ok = meck:expect( + emqx_session, + unsubscribe, + fun(_, _, _, Session) -> + {ok, Session} + end + ), Channel = channel(#{conn_state => connected}), {ok, [{outgoing, ?UNSUBACK_PACKET(1)}, {event, updated}], _Chan} = emqx_channel:handle_in(?UNSUBSCRIBE_PACKET(1, #{}, [<<"+">>]), Channel). @@ -542,7 +672,7 @@ t_handle_in_frame_error(_) -> IdleChannel = channel(#{conn_state => idle}), {shutdown, frame_too_large, _Chan} = emqx_channel:handle_in({frame_error, frame_too_large}, IdleChannel), - ConnectingChan = channel(#{conn_state => connecting}), + ConnectingChan = channel(#{conn_state => connecting}), ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), {shutdown, frame_too_large, ConnackPacket, _} = emqx_channel:handle_in({frame_error, frame_too_large}, ConnectingChan), @@ -560,10 +690,13 @@ t_handle_in_expected_packet(_) -> emqx_channel:handle_in(packet, channel()). t_process_connect(_) -> - ok = meck:expect(emqx_cm, open_session, - fun(true, _ClientInfo, _ConnInfo) -> - {ok, #{session => session(), present => false}} - end), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), {ok, [{event, connected}, {connack, ?CONNACK_PACKET(?RC_SUCCESS)}], _Chan} = emqx_channel:process_connect(#{}, channel(#{conn_state => idle})). @@ -580,7 +713,7 @@ t_process_publish_qos1(_) -> t_process_subscribe(_) -> ok = meck:expect(emqx_session, subscribe, fun(_, _, _, Session) -> {ok, Session} end), - TopicFilters = [ TopicFilter = {<<"+">>, ?DEFAULT_SUBOPTS}], + TopicFilters = [TopicFilter = {<<"+">>, ?DEFAULT_SUBOPTS}], {[{TopicFilter, ?RC_SUCCESS}], _Channel} = emqx_channel:process_subscribe(TopicFilters, #{}, channel()). @@ -590,18 +723,19 @@ t_process_unsubscribe(_) -> {[?RC_SUCCESS], _Channel} = emqx_channel:process_unsubscribe(TopicFilters, #{}, channel()). t_quota_qos0(_) -> - esockd_limiter:start_link(), Cnter = counters:new(1, []), + esockd_limiter:start_link(), + Cnter = counters:new(1, []), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect( - emqx_metrics, - inc, - fun('packets.publish.dropped') -> counters:add(Cnter, 1, 1) end - ), + emqx_metrics, + inc, + fun('packets.publish.dropped') -> counters:add(Cnter, 1, 1) end + ), ok = meck:expect( - emqx_metrics, - val, - fun('packets.publish.dropped') -> counters:get(Cnter, 1) end - ), + emqx_metrics, + val, + fun('packets.publish.dropped') -> counters:get(Cnter, 1) end + ), Chann = channel(#{conn_state => connected, quota => quota()}), Pub = ?PUBLISH_PACKET(?QOS_0, <<"topic">>, undefined, <<"payload">>), @@ -658,7 +792,7 @@ t_handle_deliver(_) -> Msg1 = emqx_message:make(test, ?QOS_2, <<"t2">>, <<"qos2">>), Delivers = [{deliver, <<"+">>, Msg0}, {deliver, <<"+">>, Msg1}], {ok, {outgoing, Packets}, _Ch} = emqx_channel:handle_deliver(Delivers, channel()), - ?assertEqual([?QOS_1, ?QOS_2], [emqx_packet:qos(Pkt)|| Pkt <- Packets]). + ?assertEqual([?QOS_1, ?QOS_2], [emqx_packet:qos(Pkt) || Pkt <- Packets]). t_handle_deliver_nl(_) -> ClientInfo = clientinfo(#{clientid => <<"clientid">>}), @@ -682,8 +816,8 @@ t_handle_out_publish(_) -> t_handle_out_publish_1(_) -> Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"t">>, <<"payload">>), - {ok, {outgoing, [?PUBLISH_PACKET(?QOS_1, <<"t">>, 1, <<"payload">>)]}, _Chan} - = emqx_channel:handle_out(publish, [{1, Msg}], channel()). + {ok, {outgoing, [?PUBLISH_PACKET(?QOS_1, <<"t">>, 1, <<"payload">>)]}, _Chan} = + emqx_channel:handle_out(publish, [{1, Msg}], channel()). t_handle_out_connack_sucess(_) -> {ok, [{event, connected}, {connack, ?CONNACK_PACKET(?RC_SUCCESS, 0, _)}], Channel} = @@ -691,30 +825,39 @@ t_handle_out_connack_sucess(_) -> ?assertEqual(connected, emqx_channel:info(conn_state, Channel)). t_handle_out_connack_response_information(_) -> - ok = meck:expect(emqx_cm, open_session, - fun(true, _ClientInfo, _ConnInfo) -> - {ok, #{session => session(), present => false}} - end), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), emqx_config:put_zone_conf(default, [mqtt, response_information], test), IdleChannel = channel(#{conn_state => idle}), - {ok, [{event, connected}, - {connack, ?CONNACK_PACKET(?RC_SUCCESS, 0, #{'Response-Information' := test})}], - _} = emqx_channel:handle_in( - ?CONNECT_PACKET(connpkt(#{'Request-Response-Information' => 1})), - IdleChannel - ). + {ok, + [ + {event, connected}, + {connack, ?CONNACK_PACKET(?RC_SUCCESS, 0, #{'Response-Information' := test})} + ], + _} = emqx_channel:handle_in( + ?CONNECT_PACKET(connpkt(#{'Request-Response-Information' => 1})), + IdleChannel + ). t_handle_out_connack_not_response_information(_) -> - ok = meck:expect(emqx_cm, open_session, - fun(true, _ClientInfo, _ConnInfo) -> - {ok, #{session => session(), present => false}} - end), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), emqx_config:put_zone_conf(default, [mqtt, response_information], test), IdleChannel = channel(#{conn_state => idle}), {ok, [{event, connected}, {connack, ?CONNACK_PACKET(?RC_SUCCESS, 0, AckProps)}], _} = emqx_channel:handle_in( - ?CONNECT_PACKET(connpkt(#{'Request-Response-Information' => 0})), - IdleChannel + ?CONNECT_PACKET(connpkt(#{'Request-Response-Information' => 0})), + IdleChannel ), ?assertEqual(false, maps:is_key('Response-Information', AckProps)). @@ -781,9 +924,9 @@ t_handle_call_takeover_end(_) -> t_handle_call_quota(_) -> {reply, ok, _Chan} = emqx_channel:handle_call( - {quota, default}, - channel() - ). + {quota, default}, + channel() + ). t_handle_call_unexpected(_) -> {reply, ignored, _Chan} = emqx_channel:handle_call(unexpected_req, channel()). @@ -862,58 +1005,78 @@ t_process_alias_inexistent_alias(_) -> Publish = #mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias' => 1}}, Channel = channel(), ?assertEqual( - {error, ?RC_PROTOCOL_ERROR}, - emqx_channel:process_alias(#mqtt_packet{variable = Publish}, Channel)). + {error, ?RC_PROTOCOL_ERROR}, + emqx_channel:process_alias(#mqtt_packet{variable = Publish}, Channel) + ). t_packing_alias(_) -> - Packet1 = #mqtt_packet{variable = #mqtt_packet_publish{ - topic_name = <<"x">>, - properties = #{'User-Property' => [{<<"k">>, <<"v">>}]} - }}, + Packet1 = #mqtt_packet{ + variable = #mqtt_packet_publish{ + topic_name = <<"x">>, + properties = #{'User-Property' => [{<<"k">>, <<"v">>}]} + } + }, Packet2 = #mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"y">>}}, Channel = emqx_channel:set_field(alias_maximum, #{outbound => 1}, channel()), {RePacket1, NChannel1} = emqx_channel:packing_alias(Packet1, Channel), - ?assertEqual(#mqtt_packet{variable = #mqtt_packet_publish{ - topic_name = <<"x">>, - properties = #{ - 'Topic-Alias' => 1, - 'User-Property' => [{<<"k">>, <<"v">>}] - } - }}, RePacket1), + ?assertEqual( + #mqtt_packet{ + variable = #mqtt_packet_publish{ + topic_name = <<"x">>, + properties = #{ + 'Topic-Alias' => 1, + 'User-Property' => [{<<"k">>, <<"v">>}] + } + } + }, + RePacket1 + ), {RePacket2, NChannel2} = emqx_channel:packing_alias(Packet1, NChannel1), - ?assertEqual(#mqtt_packet{variable = #mqtt_packet_publish{ - topic_name = <<>>, - properties = #{ - 'Topic-Alias' => 1, - 'User-Property' => [{<<"k">>, <<"v">>}] - }}}, RePacket2), + ?assertEqual( + #mqtt_packet{ + variable = #mqtt_packet_publish{ + topic_name = <<>>, + properties = #{ + 'Topic-Alias' => 1, + 'User-Property' => [{<<"k">>, <<"v">>}] + } + } + }, + RePacket2 + ), {RePacket3, _} = emqx_channel:packing_alias(Packet2, NChannel2), ?assertEqual( - #mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"y">>, properties = #{}}}, - RePacket3 + #mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"y">>, properties = #{}}}, + RePacket3 ), - ?assertMatch({#mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"z">>}}, _}, - emqx_channel:packing_alias( - #mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"z">>}}, - channel())). + ?assertMatch( + {#mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"z">>}}, _}, + emqx_channel:packing_alias( + #mqtt_packet{variable = #mqtt_packet_publish{topic_name = <<"z">>}}, + channel() + ) + ). t_packing_alias_inexistent_alias(_) -> Publish = #mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias' => 1}}, Channel = channel(), Packet = #mqtt_packet{variable = Publish}, ExpectedChannel = emqx_channel:set_field( - topic_aliases, - #{ inbound => #{} - , outbound => #{<<>> => 1} - }, - Channel), + topic_aliases, + #{ + inbound => #{}, + outbound => #{<<>> => 1} + }, + Channel + ), ?assertEqual( - {Packet, ExpectedChannel}, - emqx_channel:packing_alias(Packet, Channel)). + {Packet, ExpectedChannel}, + emqx_channel:packing_alias(Packet, Channel) + ). t_check_pub_authz(_) -> emqx_config:put_zone_conf(default, [authorization, enable], true), @@ -932,24 +1095,32 @@ t_check_sub_authzs(_) -> t_enrich_connack_caps(_) -> ok = meck:new(emqx_mqtt_caps, [passthrough, no_history]), - ok = meck:expect(emqx_mqtt_caps, get_caps, - fun(_Zone) -> - #{max_packet_size => 1024, - max_qos_allowed => ?QOS_2, - retain_available => true, - max_topic_alias => 10, - shared_subscription => true, - wildcard_subscription => true - } - end), + ok = meck:expect( + emqx_mqtt_caps, + get_caps, + fun(_Zone) -> + #{ + max_packet_size => 1024, + max_qos_allowed => ?QOS_2, + retain_available => true, + max_topic_alias => 10, + shared_subscription => true, + wildcard_subscription => true + } + end + ), AckProps = emqx_channel:enrich_connack_caps(#{}, channel()), - ?assertMatch(#{'Retain-Available' := 1, - 'Maximum-Packet-Size' := 1024, - 'Topic-Alias-Maximum' := 10, - 'Wildcard-Subscription-Available' := 1, - 'Subscription-Identifier-Available' := 1, - 'Shared-Subscription-Available' := 1 - }, AckProps), + ?assertMatch( + #{ + 'Retain-Available' := 1, + 'Maximum-Packet-Size' := 1024, + 'Topic-Alias-Maximum' := 10, + 'Wildcard-Subscription-Available' := 1, + 'Subscription-Identifier-Available' := 1, + 'Shared-Subscription-Available' := 1 + }, + AckProps + ), ok = meck:unload(emqx_mqtt_caps). %%-------------------------------------------------------------------- @@ -963,19 +1134,22 @@ t_terminate(_) -> t_ws_cookie_init(_) -> WsCookie = [{<<"session_id">>, <<"xyz">>}], - ConnInfo = #{socktype => ws, - peername => {{127,0,0,1}, 3456}, - sockname => {{127,0,0,1}, 1883}, - peercert => nossl, - conn_mod => emqx_ws_connection, - ws_cookie => WsCookie - }, + ConnInfo = #{ + socktype => ws, + peername => {{127, 0, 0, 1}, 3456}, + sockname => {{127, 0, 0, 1}, 1883}, + peercert => nossl, + conn_mod => emqx_ws_connection, + ws_cookie => WsCookie + }, Channel = emqx_channel:init( - ConnInfo, - #{zone => default, - limiter => limiter_cfg(), - listener => {tcp, default} - }), + ConnInfo, + #{ + zone => default, + limiter => limiter_cfg(), + listener => {tcp, default} + } + ), ?assertMatch(#{ws_cookie := WsCookie}, emqx_channel:info(clientinfo, Channel)). %%-------------------------------------------------------------------- @@ -984,45 +1158,58 @@ t_ws_cookie_init(_) -> channel() -> channel(#{}). channel(InitFields) -> - ConnInfo = #{peername => {{127,0,0,1}, 3456}, - sockname => {{127,0,0,1}, 1883}, - conn_mod => emqx_connection, - proto_name => <<"MQTT">>, - proto_ver => ?MQTT_PROTO_V5, - clean_start => true, - keepalive => 30, - clientid => <<"clientid">>, - username => <<"username">>, - conn_props => #{}, - receive_maximum => 100, - expiry_interval => 0 - }, - maps:fold(fun(Field, Value, Channel) -> - emqx_channel:set_field(Field, Value, Channel) - end, - emqx_channel:init( - ConnInfo, - #{zone => default, - limiter => limiter_cfg(), - listener => {tcp, default} - }), - maps:merge(#{clientinfo => clientinfo(), - session => session(), - conn_state => connected - }, InitFields)). + ConnInfo = #{ + peername => {{127, 0, 0, 1}, 3456}, + sockname => {{127, 0, 0, 1}, 1883}, + conn_mod => emqx_connection, + proto_name => <<"MQTT">>, + proto_ver => ?MQTT_PROTO_V5, + clean_start => true, + keepalive => 30, + clientid => <<"clientid">>, + username => <<"username">>, + conn_props => #{}, + receive_maximum => 100, + expiry_interval => 0 + }, + maps:fold( + fun(Field, Value, Channel) -> + emqx_channel:set_field(Field, Value, Channel) + end, + emqx_channel:init( + ConnInfo, + #{ + zone => default, + limiter => limiter_cfg(), + listener => {tcp, default} + } + ), + maps:merge( + #{ + clientinfo => clientinfo(), + session => session(), + conn_state => connected + }, + InitFields + ) + ). clientinfo() -> clientinfo(#{}). clientinfo(InitProps) -> - maps:merge(#{zone => default, - listener => {tcp, default}, - protocol => mqtt, - peerhost => {127,0,0,1}, - clientid => <<"clientid">>, - username => <<"username">>, - is_superuser => false, - peercert => undefined, - mountpoint => undefined - }, InitProps). + maps:merge( + #{ + zone => default, + listener => {tcp, default}, + protocol => mqtt, + peerhost => {127, 0, 0, 1}, + clientid => <<"clientid">>, + username => <<"username">>, + is_superuser => false, + peercert => undefined, + mountpoint => undefined + }, + InitProps + ). topic_filters() -> [{<<"+">>, ?DEFAULT_SUBOPTS}, {<<"#">>, ?DEFAULT_SUBOPTS}]. @@ -1030,24 +1217,26 @@ topic_filters() -> connpkt() -> connpkt(#{}). connpkt(Props) -> #mqtt_packet_connect{ - proto_name = <<"MQTT">>, - proto_ver = ?MQTT_PROTO_V4, - is_bridge = false, - clean_start = true, - keepalive = 30, - properties = Props, - clientid = <<"clientid">>, - username = <<"username">>, - password = <<"passwd">> - }. + proto_name = <<"MQTT">>, + proto_ver = ?MQTT_PROTO_V4, + is_bridge = false, + clean_start = true, + keepalive = 30, + properties = Props, + clientid = <<"clientid">>, + username = <<"username">>, + password = <<"passwd">> + }. session() -> session(#{}). session(InitFields) when is_map(InitFields) -> - maps:fold(fun(Field, Value, Session) -> - emqx_session:set_field(Field, Value, Session) - end, - emqx_session:init(#{max_inflight => 0}), - InitFields). + maps:fold( + fun(Field, Value, Session) -> + emqx_session:set_field(Field, Value, Session) + end, + emqx_session:init(#{max_inflight => 0}), + InitFields + ). %% conn: 5/s; overall: 10/s quota() -> diff --git a/apps/emqx/test/emqx_client_SUITE.erl b/apps/emqx/test/emqx_client_SUITE.erl index 4bf6795d3..d121882ee 100644 --- a/apps/emqx/test/emqx_client_SUITE.erl +++ b/apps/emqx/test/emqx_client_SUITE.erl @@ -25,55 +25,54 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --define(TOPICS, [<<"TopicA">>, - <<"TopicA/B">>, - <<"Topic/C">>, - <<"TopicA/C">>, - <<"/TopicA">> - ]). - --define(WILD_TOPICS, [<<"TopicA/+">>, - <<"+/C">>, - <<"#">>, - <<"/#">>, - <<"/+">>, - <<"+/+">>, - <<"TopicA/#">> - ]). +-define(TOPICS, [ + <<"TopicA">>, + <<"TopicA/B">>, + <<"Topic/C">>, + <<"TopicA/C">>, + <<"/TopicA">> +]). +-define(WILD_TOPICS, [ + <<"TopicA/+">>, + <<"+/C">>, + <<"#">>, + <<"/#">>, + <<"/+">>, + <<"+/+">>, + <<"TopicA/#">> +]). all() -> - [{group, mqttv3}, - {group, mqttv4}, - {group, mqttv5}, - {group, others} + [ + {group, mqttv3}, + {group, mqttv4}, + {group, mqttv5}, + {group, others} ]. groups() -> - [{mqttv3, [non_parallel_tests], - [t_basic_v3 - ]}, - {mqttv4, [non_parallel_tests], - [t_basic_v4, - t_cm, - t_cm_registry, - %% t_will_message, - %% t_offline_message_queueing, - t_overlapping_subscriptions, - %% t_keepalive, - %% t_redelivery_on_reconnect, - %% subscribe_failure_test, - t_dollar_topics - ]}, - {mqttv5, [non_parallel_tests], - [t_basic_with_props_v5 - ]}, - {others, [non_parallel_tests], - [t_username_as_clientid, - t_certcn_as_clientid_default_config_tls, - t_certcn_as_clientid_tlsv1_3, - t_certcn_as_clientid_tlsv1_2 - ]} + [ + {mqttv3, [non_parallel_tests], [t_basic_v3]}, + {mqttv4, [non_parallel_tests], [ + t_basic_v4, + t_cm, + t_cm_registry, + %% t_will_message, + %% t_offline_message_queueing, + t_overlapping_subscriptions, + %% t_keepalive, + %% t_redelivery_on_reconnect, + %% subscribe_failure_test, + t_dollar_topics + ]}, + {mqttv5, [non_parallel_tests], [t_basic_with_props_v5]}, + {others, [non_parallel_tests], [ + t_username_as_clientid, + t_certcn_as_clientid_default_config_tls, + t_certcn_as_clientid_tlsv1_3, + t_certcn_as_clientid_tlsv1_2 + ]} ]. init_per_suite(Config) -> @@ -121,10 +120,12 @@ t_cm_registry(_) -> Pid ! <<"Unexpected info">>. t_will_message(_Config) -> - {ok, C1} = emqtt:start_link([{clean_start, true}, - {will_topic, nth(3, ?TOPICS)}, - {will_payload, <<"client disconnected">>}, - {keepalive, 1}]), + {ok, C1} = emqtt:start_link([ + {clean_start, true}, + {will_topic, nth(3, ?TOPICS)}, + {will_payload, <<"client disconnected">>}, + {keepalive, 1} + ]), {ok, _} = emqtt:connect(C1), {ok, C2} = emqtt:start_link(), @@ -139,14 +140,18 @@ t_will_message(_Config) -> ct:pal("Will message test succeeded"). t_offline_message_queueing(_) -> - {ok, C1} = emqtt:start_link([{clean_start, false}, - {clientid, <<"c1">>}]), + {ok, C1} = emqtt:start_link([ + {clean_start, false}, + {clientid, <<"c1">>} + ]), {ok, _} = emqtt:connect(C1), {ok, _, [2]} = emqtt:subscribe(C1, nth(6, ?WILD_TOPICS), 2), ok = emqtt:disconnect(C1), - {ok, C2} = emqtt:start_link([{clean_start, true}, - {clientid, <<"c2">>}]), + {ok, C2} = emqtt:start_link([ + {clean_start, true}, + {clientid, <<"c2">>} + ]), {ok, _} = emqtt:connect(C2), ok = emqtt:publish(C2, nth(2, ?TOPICS), <<"qos 0">>, 0), @@ -165,8 +170,10 @@ t_overlapping_subscriptions(_) -> {ok, C} = emqtt:start_link([]), {ok, _} = emqtt:connect(C), - {ok, _, [2, 1]} = emqtt:subscribe(C, [{nth(7, ?WILD_TOPICS), 2}, - {nth(1, ?WILD_TOPICS), 1}]), + {ok, _, [2, 1]} = emqtt:subscribe(C, [ + {nth(7, ?WILD_TOPICS), 2}, + {nth(1, ?WILD_TOPICS), 1} + ]), timer:sleep(10), {ok, _} = emqtt:publish(C, nth(4, ?TOPICS), <<"overlapping topic filters">>, 2), timer:sleep(10), @@ -175,12 +182,17 @@ t_overlapping_subscriptions(_) -> ?assert(lists:member(Num, [1, 2])), if Num == 1 -> - ct:pal("This server is publishing one message for all - matching overlapping subscriptions, not one for each."); + ct:pal( + "This server is publishing one message for all\n" + " matching overlapping subscriptions, not one for each." + ); Num == 2 -> - ct:pal("This server is publishing one message per each - matching overlapping subscription."); - true -> ok + ct:pal( + "This server is publishing one message per each\n" + " matching overlapping subscription." + ); + true -> + ok end, emqtt:disconnect(C). @@ -208,10 +220,18 @@ t_redelivery_on_reconnect(_) -> {ok, _, [2]} = emqtt:subscribe(C1, nth(7, ?WILD_TOPICS), 2), timer:sleep(10), ok = emqtt:pause(C1), - {ok, _} = emqtt:publish(C1, nth(2, ?TOPICS), <<>>, - [{qos, 1}, {retain, false}]), - {ok, _} = emqtt:publish(C1, nth(4, ?TOPICS), <<>>, - [{qos, 2}, {retain, false}]), + {ok, _} = emqtt:publish( + C1, + nth(2, ?TOPICS), + <<>>, + [{qos, 1}, {retain, false}] + ), + {ok, _} = emqtt:publish( + C1, + nth(4, ?TOPICS), + <<>>, + [{qos, 2}, {retain, false}] + ), timer:sleep(10), ok = emqtt:disconnect(C1), ?assertEqual(0, length(recv_msgs(2))), @@ -231,13 +251,19 @@ t_redelivery_on_reconnect(_) -> t_dollar_topics(_) -> ct:pal("$ topics test starting"), - {ok, C} = emqtt:start_link([{clean_start, true}, - {keepalive, 0}]), + {ok, C} = emqtt:start_link([ + {clean_start, true}, + {keepalive, 0} + ]), {ok, _} = emqtt:connect(C), {ok, _, [1]} = emqtt:subscribe(C, nth(6, ?WILD_TOPICS), 1), - {ok, _} = emqtt:publish(C, << <<"$">>/binary, (nth(2, ?TOPICS))/binary>>, - <<"test">>, [{qos, 1}, {retain, false}]), + {ok, _} = emqtt:publish( + C, + <<<<"$">>/binary, (nth(2, ?TOPICS))/binary>>, + <<"test">>, + [{qos, 1}, {retain, false}] + ), timer:sleep(10), ?assertEqual(0, length(recv_msgs(1))), ok = emqtt:disconnect(C), @@ -248,9 +274,10 @@ t_dollar_topics(_) -> %%-------------------------------------------------------------------- t_basic_with_props_v5(_) -> - t_basic([{proto_ver, v5}, - {properties, #{'Receive-Maximum' => 4}} - ]). + t_basic([ + {proto_ver, v5}, + {properties, #{'Receive-Maximum' => 4}} + ]). %%-------------------------------------------------------------------- %% General test cases. @@ -285,8 +312,6 @@ t_certcn_as_clientid_tlsv1_3(_) -> t_certcn_as_clientid_tlsv1_2(_) -> tls_certcn_as_clientid('tlsv1.2'). - - %%-------------------------------------------------------------------- %% Helper functions %%-------------------------------------------------------------------- @@ -299,23 +324,23 @@ recv_msgs(0, Msgs) -> recv_msgs(Count, Msgs) -> receive {publish, Msg} -> - recv_msgs(Count-1, [Msg|Msgs]); - _Other -> recv_msgs(Count, Msgs) %%TODO:: remove the branch? + recv_msgs(Count - 1, [Msg | Msgs]); + %%TODO:: remove the branch? + _Other -> + recv_msgs(Count, Msgs) after 100 -> Msgs end. - -confirm_tls_version( Client, RequiredProtocol ) -> +confirm_tls_version(Client, RequiredProtocol) -> Info = emqtt:info(Client), - SocketInfo = proplists:get_value( socket, Info ), + SocketInfo = proplists:get_value(socket, Info), %% emqtt_sock has #ssl_socket.ssl - SSLSocket = element( 3, SocketInfo ), - { ok, SSLInfo } = ssl:connection_information(SSLSocket), - Protocol = proplists:get_value( protocol, SSLInfo ), + SSLSocket = element(3, SocketInfo), + {ok, SSLInfo} = ssl:connection_information(SSLSocket), + Protocol = proplists:get_value(protocol, SSLInfo), RequiredProtocol = Protocol. - tls_certcn_as_clientid(default = TLSVsn) -> tls_certcn_as_clientid(TLSVsn, 'tlsv1.3'); tls_certcn_as_clientid(TLSVsn) -> @@ -328,5 +353,5 @@ tls_certcn_as_clientid(TLSVsn, RequiredTLSVsn) -> {ok, Client} = emqtt:start_link([{port, 8883}, {ssl, true}, {ssl_opts, SslConf}]), {ok, _} = emqtt:connect(Client), #{clientinfo := #{clientid := CN}} = emqx_cm:get_chan_info(CN), - confirm_tls_version( Client, RequiredTLSVsn ), + confirm_tls_version(Client, RequiredTLSVsn), emqtt:disconnect(Client). diff --git a/apps/emqx/test/emqx_cm_SUITE.erl b/apps/emqx/test/emqx_cm_SUITE.erl index d279975f0..8863088dd 100644 --- a/apps/emqx/test/emqx_cm_SUITE.erl +++ b/apps/emqx/test/emqx_cm_SUITE.erl @@ -24,19 +24,26 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -define(CM, emqx_cm). --define(ChanInfo,#{conninfo => - #{socktype => tcp, - peername => {{127,0,0,1}, 5000}, - sockname => {{127,0,0,1}, 1883}, - peercert => nossl, - conn_mod => emqx_connection, - receive_maximum => 100}}). +-define(ChanInfo, #{ + conninfo => + #{ + socktype => tcp, + peername => {{127, 0, 0, 1}, 5000}, + sockname => {{127, 0, 0, 1}, 1883}, + peercert => nossl, + conn_mod => emqx_connection, + receive_maximum => 100 + } +}). -define(WAIT(PATTERN, TIMEOUT, RET), - fun() -> - receive PATTERN -> RET - after TIMEOUT -> error({timeout, ?LINE}) end - end()). + fun() -> + receive + PATTERN -> RET + after TIMEOUT -> error({timeout, ?LINE}) + end + end() +). %%-------------------------------------------------------------------- %% CT callbacks @@ -84,7 +91,7 @@ t_get_set_chan_stats(_) -> ok = emqx_cm:insert_channel_info(<<"clientid">>, Info, Stats), ?assertEqual(Stats, emqx_cm:get_chan_stats(<<"clientid">>)), - Stats1 = [{recv_oct, 10}|Stats], + Stats1 = [{recv_oct, 10} | Stats], true = emqx_cm:set_chan_stats(<<"clientid">>, Stats1), ?assertEqual(Stats1, emqx_cm:get_chan_stats(<<"clientid">>)), ok = emqx_cm:unregister_channel(<<"clientid">>), @@ -95,22 +102,27 @@ t_open_session(_) -> ok = meck:expect(emqx_connection, call, fun(_, _) -> ok end), ok = meck:expect(emqx_connection, call, fun(_, _, _) -> ok end), - ClientInfo = #{zone => default, listener => {tcp, default}, - clientid => <<"clientid">>, - username => <<"username">>, - peerhost => {127,0,0,1}}, - ConnInfo = #{socktype => tcp, - peername => {{127,0,0,1}, 5000}, - sockname => {{127,0,0,1}, 1883}, - peercert => nossl, - conn_mod => emqx_connection, - expiry_interval => 0, - receive_maximum => 100}, - {ok, #{session := Session1, present := false}} - = emqx_cm:open_session(true, ClientInfo, ConnInfo), + ClientInfo = #{ + zone => default, + listener => {tcp, default}, + clientid => <<"clientid">>, + username => <<"username">>, + peerhost => {127, 0, 0, 1} + }, + ConnInfo = #{ + socktype => tcp, + peername => {{127, 0, 0, 1}, 5000}, + sockname => {{127, 0, 0, 1}, 1883}, + peercert => nossl, + conn_mod => emqx_connection, + expiry_interval => 0, + receive_maximum => 100 + }, + {ok, #{session := Session1, present := false}} = + emqx_cm:open_session(true, ClientInfo, ConnInfo), ?assertEqual(100, emqx_session:info(inflight_max, Session1)), - {ok, #{session := Session2, present := false}} - = emqx_cm:open_session(true, ClientInfo, ConnInfo), + {ok, #{session := Session2, present := false}} = + emqx_cm:open_session(true, ClientInfo, ConnInfo), ?assertEqual(100, emqx_session:info(inflight_max, Session2)), emqx_cm:unregister_channel(<<"clientid">>), @@ -121,59 +133,69 @@ rand_client_id() -> t_open_session_race_condition(_) -> ClientId = rand_client_id(), - ClientInfo = #{zone => default, listener => {tcp, default}, - clientid => ClientId, - username => <<"username">>, - peerhost => {127,0,0,1}}, - ConnInfo = #{socktype => tcp, - peername => {{127,0,0,1}, 5000}, - sockname => {{127,0,0,1}, 1883}, - peercert => nossl, - conn_mod => emqx_connection, - expiry_interval => 0, - receive_maximum => 100}, + ClientInfo = #{ + zone => default, + listener => {tcp, default}, + clientid => ClientId, + username => <<"username">>, + peerhost => {127, 0, 0, 1} + }, + ConnInfo = #{ + socktype => tcp, + peername => {{127, 0, 0, 1}, 5000}, + sockname => {{127, 0, 0, 1}, 1883}, + peercert => nossl, + conn_mod => emqx_connection, + expiry_interval => 0, + receive_maximum => 100 + }, Parent = self(), OpenASession = fun() -> - timer:sleep(rand:uniform(100)), - OpenR = (emqx_cm:open_session(true, ClientInfo, ConnInfo)), - Parent ! OpenR, - case OpenR of - {ok, _} -> - receive - {'$gen_call', From, discard} -> - gen_server:reply(From, ok), ok - end; - {error, Reason} -> - exit(Reason) - end - end, + timer:sleep(rand:uniform(100)), + OpenR = (emqx_cm:open_session(true, ClientInfo, ConnInfo)), + Parent ! OpenR, + case OpenR of + {ok, _} -> + receive + {'$gen_call', From, discard} -> + gen_server:reply(From, ok), + ok + end; + {error, Reason} -> + exit(Reason) + end + end, N = 1000, - Pids = lists:flatten([[spawn_monitor(OpenASession), spawn_monitor(OpenASession)] || - _ <- lists:seq(1, N)]), + Pids = lists:flatten([ + [spawn_monitor(OpenASession), spawn_monitor(OpenASession)] + || _ <- lists:seq(1, N) + ]), - WaitingRecv = fun _Wr(N1, N2, 0) -> - {N1, N2}; - _Wr(N1, N2, Rest) -> - receive - {ok, _} -> _Wr(N1+1, N2, Rest-1); - {error, _} -> _Wr(N1, N2+1, Rest-1) - end - end, + WaitingRecv = fun + _Wr(N1, N2, 0) -> + {N1, N2}; + _Wr(N1, N2, Rest) -> + receive + {ok, _} -> _Wr(N1 + 1, N2, Rest - 1); + {error, _} -> _Wr(N1, N2 + 1, Rest - 1) + end + end, {Succeeded, Failed} = WaitingRecv(0, 0, 2 * N), ct:pal("Race condition status: succeeded=~p failed=~p~n", [Succeeded, Failed]), ?assertEqual(2 * N, length(Pids)), WaitForDowns = - fun _Wd([{Pid, _Ref}]) -> Pid; + fun + _Wd([{Pid, _Ref}]) -> + Pid; _Wd(Pids0) -> receive {'DOWN', DownRef, process, DownPid, _} -> ?assert(lists:member({DownPid, DownRef}, Pids0)), _Wd(lists:delete({DownPid, DownRef}, Pids0)) - after - 10000 -> - exit(timeout) + after 10000 -> + exit(timeout) end end, Winner = WaitForDowns(Pids), @@ -184,8 +206,11 @@ t_open_session_race_condition(_) -> ?assertMatch([_], ets:lookup(emqx_channel_registry, ClientId)), exit(Winner, kill), - receive {'DOWN', _, process, Winner, _} -> ok end, - ignored = gen_server:call(?CM, ignore, infinity), %% sync + receive + {'DOWN', _, process, Winner, _} -> ok + end, + %% sync + ignored = gen_server:call(?CM, ignore, infinity), ok = emqx_pool:flush_async_tasks(), ?assertEqual([], emqx_cm:lookup_channels(ClientId)). @@ -254,24 +279,26 @@ test_stepdown_session(Action, Reason) -> #{conninfo := ConnInfo} = ?ChanInfo, FakeSessionFun = fun Loop() -> - receive - {'$gen_call', From, A} when A =:= kick orelse - A =:= discard orelse - A =:= {takeover, 'begin'} orelse - A =:= {takeover, 'end'} -> - case Reason of - normal when A =:= kick orelse A =:= discard -> - gen_server:reply(From, ok); - timeout -> - %% no response to the call - Loop(); - _ -> - exit(Reason) - end; - Msg -> - ct:pal("(~p) fake_session_discarded ~p", [Action, Msg]), - Loop() - end + receive + {'$gen_call', From, A} when + A =:= kick orelse + A =:= discard orelse + A =:= {takeover, 'begin'} orelse + A =:= {takeover, 'end'} + -> + case Reason of + normal when A =:= kick orelse A =:= discard -> + gen_server:reply(From, ok); + timeout -> + %% no response to the call + Loop(); + _ -> + exit(Reason) + end; + Msg -> + ct:pal("(~p) fake_session_discarded ~p", [Action, Msg]), + Loop() + end end, {Pid1, _} = spawn_monitor(FakeSessionFun), {Pid2, _} = spawn_monitor(FakeSessionFun), @@ -280,14 +307,22 @@ test_stepdown_session(Action, Reason) -> ok = emqx_cm:register_channel(ClientId, Pid2, ConnInfo), ?assertEqual([Pid1, Pid2], lists:sort(emqx_cm:lookup_channels(ClientId))), case Reason of - noproc -> exit(Pid1, kill), exit(Pid2, kill); - _ -> ok + noproc -> + exit(Pid1, kill), + exit(Pid2, kill); + _ -> + ok end, - ok = case Action of - kick -> emqx_cm:kick_session(ClientId); - discard -> emqx_cm:discard_session(ClientId); - {takeover, _} -> none = emqx_cm:takeover_session(ClientId), ok - end, + ok = + case Action of + kick -> + emqx_cm:kick_session(ClientId); + discard -> + emqx_cm:discard_session(ClientId); + {takeover, _} -> + none = emqx_cm:takeover_session(ClientId), + ok + end, case Reason =:= timeout orelse Reason =:= noproc of true -> ?assertEqual(killed, ?WAIT({'DOWN', _, process, Pid1, R}, 2_000, R)), @@ -296,7 +331,8 @@ test_stepdown_session(Action, Reason) -> ?assertEqual(Reason, ?WAIT({'DOWN', _, process, Pid1, R}, 2_000, R)), ?assertEqual(Reason, ?WAIT({'DOWN', _, process, Pid2, R}, 2_000, R)) end, - ignored = gen_server:call(?CM, ignore, infinity), % sync + % sync + ignored = gen_server:call(?CM, ignore, infinity), ok = flush_emqx_pool(), ?assertEqual([], emqx_cm:lookup_channels(ClientId)). @@ -311,22 +347,36 @@ flush_emqx_pool() -> Self = self(), L = lists:seq(1, 1000), lists:foreach(fun(I) -> emqx_pool:async_submit(fun() -> Self ! {done, I} end, []) end, L), - lists:foreach(fun(I) -> receive {done, I} -> ok end end, L). + lists:foreach( + fun(I) -> + receive + {done, I} -> ok + end + end, + L + ). t_discard_session_race(_) -> ClientId = rand_client_id(), ?check_trace( - #{timetrap => 60000}, - begin - #{conninfo := ConnInfo0} = ?ChanInfo, - ConnInfo = ConnInfo0#{conn_mod := emqx_ws_connection}, - {Pid, Ref} = spawn_monitor(fun() -> receive stop -> exit(normal) end end), - ok = emqx_cm:register_channel(ClientId, Pid, ConnInfo), - Pid ! stop, - receive {'DOWN', Ref, process, Pid, normal} -> ok end, - ?assertMatch(ok, emqx_cm:discard_session(ClientId)) - end, - []). + #{timetrap => 60000}, + begin + #{conninfo := ConnInfo0} = ?ChanInfo, + ConnInfo = ConnInfo0#{conn_mod := emqx_ws_connection}, + {Pid, Ref} = spawn_monitor(fun() -> + receive + stop -> exit(normal) + end + end), + ok = emqx_cm:register_channel(ClientId, Pid, ConnInfo), + Pid ! stop, + receive + {'DOWN', Ref, process, Pid, normal} -> ok + end, + ?assertMatch(ok, emqx_cm:discard_session(ClientId)) + end, + [] + ). t_takeover_session(_) -> #{conninfo := ConnInfo} = ?ChanInfo, @@ -337,10 +387,13 @@ t_takeover_session(_) -> Parent ! registered, receive {'$gen_call', From, {takeover, 'begin'}} -> - gen_server:reply(From, test), ok + gen_server:reply(From, test), + ok end end), - receive registered -> ok end, + receive + registered -> ok + end, {living, emqx_connection, _, test} = emqx_cm:takeover_session(<<"clientid">>), emqx_cm:unregister_channel(<<"clientid">>). @@ -352,28 +405,40 @@ t_takeover_session_process_gone(_) -> none = emqx_cm:takeover_session(ClientIDTcp), none = emqx_cm:takeover_session(ClientIDWs), meck:new(emqx_connection, [passthrough, no_history]), - meck:expect(emqx_connection, call, - fun(Pid, {takeover, 'begin'}, _) -> - exit({noproc, {gen_server,call,[Pid, takeover_session]}}); - (Pid, What, Args) -> - meck:passthrough([Pid, What, Args]) - end), + meck:expect( + emqx_connection, + call, + fun + (Pid, {takeover, 'begin'}, _) -> + exit({noproc, {gen_server, call, [Pid, takeover_session]}}); + (Pid, What, Args) -> + meck:passthrough([Pid, What, Args]) + end + ), ok = emqx_cm:register_channel(ClientIDTcp, self(), ConnInfo), none = emqx_cm:takeover_session(ClientIDTcp), - meck:expect(emqx_connection, call, - fun(_Pid, {takeover, 'begin'}, _) -> - exit(noproc); - (Pid, What, Args) -> - meck:passthrough([Pid, What, Args]) - end), + meck:expect( + emqx_connection, + call, + fun + (_Pid, {takeover, 'begin'}, _) -> + exit(noproc); + (Pid, What, Args) -> + meck:passthrough([Pid, What, Args]) + end + ), ok = emqx_cm:register_channel(ClientIDWs, self(), ConnInfo), none = emqx_cm:takeover_session(ClientIDWs), - meck:expect(emqx_connection, call, - fun(Pid, {takeover, 'begin'}, _) -> - exit({noproc, {gen_server,call,[Pid, takeover_session]}}); - (Pid, What, Args) -> - meck:passthrough([Pid, What, Args]) - end), + meck:expect( + emqx_connection, + call, + fun + (Pid, {takeover, 'begin'}, _) -> + exit({noproc, {gen_server, call, [Pid, takeover_session]}}); + (Pid, What, Args) -> + meck:passthrough([Pid, What, Args]) + end + ), ok = emqx_cm:register_channel(ClientIDRpc, self(), ConnInfo), none = emqx_cm:takeover_session(ClientIDRpc), emqx_cm:unregister_channel(ClientIDTcp), diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index ac4d1ba03..33943ddbd 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -19,99 +19,109 @@ -define(THIS_APP, ?MODULE). -include_lib("common_test/include/ct.hrl"). --type(special_config_handler() :: fun()). +-type special_config_handler() :: fun(). --type(apps() :: list(atom())). +-type apps() :: list(atom()). --export([ all/1 - , boot_modules/1 - , start_apps/1 - , start_apps/2 - , start_app/4 - , stop_apps/1 - , reload/2 - , app_path/2 - , deps_path/2 - , flush/0 - , flush/1 - ]). +-export([ + all/1, + boot_modules/1, + start_apps/1, + start_apps/2, + start_app/4, + stop_apps/1, + reload/2, + app_path/2, + deps_path/2, + flush/0, + flush/1 +]). --export([ ensure_mnesia_stopped/0 - , wait_for/4 - , change_emqx_opts/1 - , change_emqx_opts/2 - , client_ssl_twoway/0 - , client_ssl_twoway/1 - , client_ssl/0 - , client_ssl/1 - , wait_mqtt_payload/1 - , not_wait_mqtt_payload/1 - , render_config_file/2 - , read_schema_configs/2 - , load_config/2 - , is_tcp_server_available/2 - , is_tcp_server_available/3 - ]). +-export([ + ensure_mnesia_stopped/0, + wait_for/4, + change_emqx_opts/1, + change_emqx_opts/2, + client_ssl_twoway/0, + client_ssl_twoway/1, + client_ssl/0, + client_ssl/1, + wait_mqtt_payload/1, + not_wait_mqtt_payload/1, + render_config_file/2, + read_schema_configs/2, + load_config/2, + is_tcp_server_available/2, + is_tcp_server_available/3 +]). --define( CERTS_PATH(CertName), filename:join( [ "etc", "certs", CertName ]) ). +-define(CERTS_PATH(CertName), filename:join(["etc", "certs", CertName])). --define( MQTT_SSL_TWOWAY, [ { cacertfile, ?CERTS_PATH( "cacert.pem" ) }, - { verify, verify_peer }, - { fail_if_no_peer_cert, true } ] ). +-define(MQTT_SSL_TWOWAY, [ + {cacertfile, ?CERTS_PATH("cacert.pem")}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true} +]). --define( MQTT_SSL_CLIENT_CERTS, [ { keyfile, ?CERTS_PATH( "client-key.pem" ) }, - { cacertfile, ?CERTS_PATH( "cacert.pem" ) }, - { certfile, ?CERTS_PATH( "client-cert.pem" ) } ] ). +-define(MQTT_SSL_CLIENT_CERTS, [ + {keyfile, ?CERTS_PATH("client-key.pem")}, + {cacertfile, ?CERTS_PATH("cacert.pem")}, + {certfile, ?CERTS_PATH("client-cert.pem")} +]). --define( TLS_1_3_CIPHERS, [ { versions, [ 'tlsv1.3' ] }, - { ciphers, [ "TLS_AES_256_GCM_SHA384", - "TLS_AES_128_GCM_SHA256", - "TLS_CHACHA20_POLY1305_SHA256", - "TLS_AES_128_CCM_SHA256", - "TLS_AES_128_CCM_8_SHA256" - ] } - ]). +-define(TLS_1_3_CIPHERS, [ + {versions, ['tlsv1.3']}, + {ciphers, [ + "TLS_AES_256_GCM_SHA384", + "TLS_AES_128_GCM_SHA256", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_AES_128_CCM_SHA256", + "TLS_AES_128_CCM_8_SHA256" + ]} +]). --define( TLS_OLD_CIPHERS, [ { versions, [ 'tlsv1.1', 'tlsv1.2' ] }, - { ciphers, [ "ECDHE-ECDSA-AES256-GCM-SHA384", - "ECDHE-RSA-AES256-GCM-SHA384", - "ECDHE-ECDSA-AES256-SHA384", - "ECDHE-RSA-AES256-SHA384", - "ECDHE-ECDSA-DES-CBC3-SHA", - "ECDH-ECDSA-AES256-GCM-SHA384", - "ECDH-RSA-AES256-GCM-SHA384", - "ECDH-ECDSA-AES256-SHA384", - "ECDH-RSA-AES256-SHA384", - "DHE-DSS-AES256-GCM-SHA384", - "DHE-DSS-AES256-SHA256", - "AES256-GCM-SHA384", - "AES256-SHA256", - "ECDHE-ECDSA-AES128-GCM-SHA256", - "ECDHE-RSA-AES128-GCM-SHA256", - "ECDHE-ECDSA-AES128-SHA256", - "ECDHE-RSA-AES128-SHA256", - "ECDH-ECDSA-AES128-GCM-SHA256", - "ECDH-RSA-AES128-GCM-SHA256", - "ECDH-ECDSA-AES128-SHA256", - "ECDH-RSA-AES128-SHA256", - "DHE-DSS-AES128-GCM-SHA256", - "DHE-DSS-AES128-SHA256", - "AES128-GCM-SHA256", - "AES128-SHA256", - "ECDHE-ECDSA-AES256-SHA", - "ECDHE-RSA-AES256-SHA", - "DHE-DSS-AES256-SHA", - "ECDH-ECDSA-AES256-SHA", - "ECDH-RSA-AES256-SHA", - "AES256-SHA", - "ECDHE-ECDSA-AES128-SHA", - "ECDHE-RSA-AES128-SHA", - "DHE-DSS-AES128-SHA", - "ECDH-ECDSA-AES128-SHA", - "ECDH-RSA-AES128-SHA", - "AES128-SHA" - ] } - ]). +-define(TLS_OLD_CIPHERS, [ + {versions, ['tlsv1.1', 'tlsv1.2']}, + {ciphers, [ + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-DES-CBC3-SHA", + "ECDH-ECDSA-AES256-GCM-SHA384", + "ECDH-RSA-AES256-GCM-SHA384", + "ECDH-ECDSA-AES256-SHA384", + "ECDH-RSA-AES256-SHA384", + "DHE-DSS-AES256-GCM-SHA384", + "DHE-DSS-AES256-SHA256", + "AES256-GCM-SHA384", + "AES256-SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDH-ECDSA-AES128-GCM-SHA256", + "ECDH-RSA-AES128-GCM-SHA256", + "ECDH-ECDSA-AES128-SHA256", + "ECDH-RSA-AES128-SHA256", + "DHE-DSS-AES128-GCM-SHA256", + "DHE-DSS-AES128-SHA256", + "AES128-GCM-SHA256", + "AES128-SHA256", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-DSS-AES256-SHA", + "ECDH-ECDSA-AES256-SHA", + "ECDH-RSA-AES256-SHA", + "AES256-SHA", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "DHE-DSS-AES128-SHA", + "ECDH-ECDSA-AES128-SHA", + "ECDH-RSA-AES128-SHA", + "AES128-SHA" + ]} +]). -define(DEFAULT_TCP_SERVER_CHECK_AVAIL_TIMEOUT, 1000). @@ -120,20 +130,22 @@ %%------------------------------------------------------------------------------ all(Suite) -> - lists:usort([F || {F, 1} <- Suite:module_info(exports), - string:substr(atom_to_list(F), 1, 2) == "t_" - ]). + lists:usort([ + F + || {F, 1} <- Suite:module_info(exports), + string:substr(atom_to_list(F), 1, 2) == "t_" + ]). %% set emqx app boot modules --spec(boot_modules(all|list(atom())) -> ok). +-spec boot_modules(all | list(atom())) -> ok. boot_modules(Mods) -> application:set_env(emqx, boot_modules, Mods). --spec(start_apps(Apps :: apps()) -> ok). +-spec start_apps(Apps :: apps()) -> ok. start_apps(Apps) -> start_apps(Apps, fun(_) -> ok end). --spec(start_apps(Apps :: apps(), Handler :: special_config_handler()) -> ok). +-spec start_apps(Apps :: apps(), Handler :: special_config_handler()) -> ok. start_apps(Apps, Handler) when is_function(Handler) -> %% Load all application code to beam vm first %% Because, minirest, ekka etc.. application will scan these modules @@ -150,12 +162,14 @@ load(App) -> end. start_app(App, Handler) -> - start_app(App, - app_schema(App), - app_path(App, filename:join(["etc", app_conf_file(App)])), - Handler). + start_app( + App, + app_schema(App), + app_path(App, filename:join(["etc", app_conf_file(App)])), + Handler + ). -app_conf_file(emqx_conf) -> "emqx.conf.all"; +app_conf_file(emqx_conf) -> "emqx.conf.all"; app_conf_file(App) -> atom_to_list(App) ++ ".conf". %% TODO: get rid of cuttlefish @@ -165,9 +179,10 @@ app_schema(App) -> Mod. mustache_vars(App) -> - [{platform_data_dir, app_path(App, "data")}, - {platform_etc_dir, app_path(App, "etc")}, - {platform_log_dir, app_path(App, "log")} + [ + {platform_data_dir, app_path(App, "data")}, + {platform_etc_dir, app_path(App, "etc")}, + {platform_log_dir, app_path(App, "log")} ]. start_app(App, Schema, ConfigFile, SpecAppConfig) -> @@ -183,10 +198,11 @@ start_app(App, Schema, ConfigFile, SpecAppConfig) -> end. render_config_file(ConfigFile, Vars0) -> - Temp = case file:read_file(ConfigFile) of - {ok, T} -> T; - {error, Reason} -> error({failed_to_read_config_template, ConfigFile, Reason}) - end, + Temp = + case file:read_file(ConfigFile) of + {ok, T} -> T; + {error, Reason} -> error({failed_to_read_config_template, ConfigFile, Reason}) + end, Vars = [{atom_to_list(N), iolist_to_binary(V)} || {N, V} <- Vars0], Targ = bbmustache:render(Temp, Vars), NewName = ConfigFile ++ ".rendered", @@ -198,13 +214,15 @@ read_schema_configs(Schema, ConfigFile) -> lists:foreach( fun({App, Configs}) -> [application:set_env(App, Par, Value) || {Par, Value} <- Configs] - end, NewConfig). + end, + NewConfig + ). generate_config(SchemaModule, ConfigFile) when is_atom(SchemaModule) -> {ok, Conf0} = hocon:load(ConfigFile, #{format => richmap}), hocon_tconf:generate(SchemaModule, Conf0). --spec(stop_apps(list()) -> ok). +-spec stop_apps(list()) -> ok. stop_apps(Apps) -> [application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]], ok. @@ -274,7 +292,7 @@ safe_relative_path_2(Path) -> filelib:safe_relative_path(Path, Cwd). -endif. --spec(reload(App :: atom(), SpecAppConfig :: special_config_handler()) -> ok). +-spec reload(App :: atom(), SpecAppConfig :: special_config_handler()) -> ok. reload(App, SpecAppConfigHandler) -> application:stop(App), start_app(App, SpecAppConfigHandler), @@ -295,9 +313,12 @@ change_emqx_opts(SslType) -> change_emqx_opts(SslType, MoreOpts) -> {ok, Listeners} = application:get_env(emqx, listeners), NewListeners = - lists:map(fun(Listener) -> - maybe_inject_listener_ssl_options(SslType, MoreOpts, Listener) - end, Listeners), + lists:map( + fun(Listener) -> + maybe_inject_listener_ssl_options(SslType, MoreOpts, Listener) + end, + Listeners + ), emqx_conf:update([listeners], NewListeners, #{}). maybe_inject_listener_ssl_options(SslType, MoreOpts, {sll, Port, Opts}) -> @@ -319,14 +340,20 @@ inject_listener_ssl_options(SslType, Opts, MoreOpts) -> case SslType of ssl_twoway -> CAfile = app_path(emqx, proplists:get_value(cacertfile, ?MQTT_SSL_TWOWAY)), - MutSslList = lists:keyreplace(cacertfile, 1, ?MQTT_SSL_TWOWAY, {cacertfile, CAfile}), + MutSslList = lists:keyreplace( + cacertfile, 1, ?MQTT_SSL_TWOWAY, {cacertfile, CAfile} + ), lists:merge(TupleList2, MutSslList); _ -> - lists:filter(fun ({cacertfile, _}) -> false; - ({verify, _}) -> false; - ({fail_if_no_peer_cert, _}) -> false; - (_) -> true - end, TupleList2) + lists:filter( + fun + ({cacertfile, _}) -> false; + ({verify, _}) -> false; + ({fail_if_no_peer_cert, _}) -> false; + (_) -> true + end, + TupleList2 + ) end, TupleList4 = emqx_misc:merge_opts(TupleList3, proplists:get_value(ssl_options, MoreOpts, [])), NMoreOpts = emqx_misc:merge_opts(MoreOpts, [{ssl_options, TupleList4}]), @@ -337,9 +364,8 @@ flush() -> flush(Msgs) -> receive - M -> flush([M|Msgs]) - after - 0 -> lists:reverse(Msgs) + M -> flush([M | Msgs]) + after 0 -> lists:reverse(Msgs) end. client_ssl_twoway() -> @@ -350,7 +376,7 @@ client_ssl_twoway(TLSVsn) -> %% Paths prepended to cert filenames client_certs() -> - [ { Key, app_path(emqx, FilePath) } || { Key, FilePath } <- ?MQTT_SSL_CLIENT_CERTS ]. + [{Key, app_path(emqx, FilePath)} || {Key, FilePath} <- ?MQTT_SSL_CLIENT_CERTS]. client_ssl() -> client_ssl(default). @@ -358,7 +384,8 @@ client_ssl() -> client_ssl(TLSVsn) -> ciphers(TLSVsn) ++ [{reuse_sessions, true}]. -ciphers(default) -> []; %% determined via config file defaults +%% determined via config file defaults +ciphers(default) -> []; ciphers('tlsv1.3') -> ?TLS_1_3_CIPHERS; ciphers(_OlderTLSVsn) -> ?TLS_OLD_CIPHERS. @@ -386,27 +413,26 @@ wait_for_down(Fn, Ln, Timeout, Pid, Mref, Kill) -> erlang:error({unexpected, Fn, Ln, Result}); {'DOWN', Mref, process, Pid, {crashed, {C, E, S}}} -> erlang:raise(C, {Fn, Ln, E}, S) - after - Timeout -> - case Kill of - true -> - erlang:demonitor(Mref, [flush]), - erlang:exit(Pid, kill), - erlang:error({Fn, Ln, timeout}); - false -> - Pid ! stop, - wait_for_down(Fn, Ln, Timeout, Pid, Mref, true) - end + after Timeout -> + case Kill of + true -> + erlang:demonitor(Mref, [flush]), + erlang:exit(Pid, kill), + erlang:error({Fn, Ln, timeout}); + false -> + Pid ! stop, + wait_for_down(Fn, Ln, Timeout, Pid, Mref, true) + end end. -wait_loop(_F, ok) -> exit(normal); +wait_loop(_F, ok) -> + exit(normal); wait_loop(F, LastRes) -> receive stop -> erlang:exit(LastRes) - after - 100 -> - Res = catch_call(F), - wait_loop(F, Res) + after 100 -> + Res = catch_call(F), + wait_loop(F, Res) end. catch_call(F) -> @@ -416,7 +442,7 @@ catch_call(F) -> Other -> {unexpected, Other} end catch - C : E : S -> + C:E:S -> {crashed, {C, E, S}} end. force_set_config_file_paths(emqx_conf, Paths) -> @@ -429,21 +455,27 @@ force_set_config_file_paths(_, _) -> copy_certs(emqx_conf, Dest0) -> Dest = filename:dirname(Dest0), From = string:replace(Dest, "emqx_conf", "emqx"), - os:cmd( ["cp -rf ", From, "/certs ", Dest, "/"]), + os:cmd(["cp -rf ", From, "/certs ", Dest, "/"]), ok; -copy_certs(_, _) -> ok. +copy_certs(_, _) -> + ok. load_config(SchemaModule, Config) -> ok = emqx_config:delete_override_conf_files(), ok = emqx_config:init_load(SchemaModule, Config). --spec is_tcp_server_available(Host :: inet:socket_address() | inet:hostname(), - Port :: inet:port_number()) -> boolean. +-spec is_tcp_server_available( + Host :: inet:socket_address() | inet:hostname(), + Port :: inet:port_number() +) -> boolean. is_tcp_server_available(Host, Port) -> is_tcp_server_available(Host, Port, ?DEFAULT_TCP_SERVER_CHECK_AVAIL_TIMEOUT). --spec is_tcp_server_available(Host :: inet:socket_address() | inet:hostname(), - Port :: inet:port_number(), Timeout :: integer()) -> boolean. +-spec is_tcp_server_available( + Host :: inet:socket_address() | inet:hostname(), + Port :: inet:port_number(), + Timeout :: integer() +) -> boolean. is_tcp_server_available(Host, Port, Timeout) -> case gen_tcp:connect(Host, Port, [], Timeout) of {ok, Socket} -> diff --git a/apps/emqx/test/emqx_common_test_http.erl b/apps/emqx/test/emqx_common_test_http.erl index c9d840fa4..ae7706d65 100644 --- a/apps/emqx/test/emqx_common_test_http.erl +++ b/apps/emqx/test/emqx_common_test_http.erl @@ -18,15 +18,16 @@ -include_lib("common_test/include/ct.hrl"). --export([ request_api/3 - , request_api/4 - , request_api/5 - , get_http_data/1 - , create_default_app/0 - , delete_default_app/0 - , default_auth_header/0 - , auth_header/2 - ]). +-export([ + request_api/3, + request_api/4, + request_api/5, + get_http_data/1, + create_default_app/0, + delete_default_app/0, + default_auth_header/0, + auth_header/2 +]). request_api(Method, Url, Auth) -> request_api(Method, Url, [], Auth, []). @@ -38,18 +39,20 @@ request_api(Method, Url, QueryParams, Auth, Body) -> request_api(Method, Url, QueryParams, Auth, Body, []). request_api(Method, Url, QueryParams, Auth, Body, HttpOpts) -> - NewUrl = case QueryParams of - [] -> - Url; - _ -> - Url ++ "?" ++ QueryParams - end, - Request = case Body of - [] -> - {NewUrl, [Auth]}; - _ -> - {NewUrl, [Auth], "application/json", emqx_json:encode(Body)} - end, + NewUrl = + case QueryParams of + [] -> + Url; + _ -> + Url ++ "?" ++ QueryParams + end, + Request = + case Body of + [] -> + {NewUrl, [Auth]}; + _ -> + {NewUrl, [Auth], "application/json", emqx_json:encode(Body)} + end, do_request_api(Method, Request, HttpOpts). do_request_api(Method, Request, HttpOpts) -> @@ -57,7 +60,7 @@ do_request_api(Method, Request, HttpOpts) -> case httpc:request(Method, Request, HttpOpts, [{body_format, binary}]) of {error, socket_closed_remotely} -> {error, socket_closed_remotely}; - {ok, {{"HTTP/1.1", Code, _}, _Headers, Return} } -> + {ok, {{"HTTP/1.1", Code, _}, _Headers, Return}} -> {ok, Code, Return}; {ok, {Reason, _, _}} -> {error, Reason} @@ -67,8 +70,8 @@ get_http_data(ResponseBody) -> emqx_json:decode(ResponseBody, [return_maps]). auth_header(User, Pass) -> - Encoded = base64:encode_to_string(lists:append([User,":",Pass])), - {"Authorization","Basic " ++ Encoded}. + Encoded = base64:encode_to_string(lists:append([User, ":", Pass])), + {"Authorization", "Basic " ++ Encoded}. default_auth_header() -> AppId = <<"myappid">>, diff --git a/apps/emqx/test/emqx_config_SUITE.erl b/apps/emqx/test/emqx_config_SUITE.erl index 46ef7cba8..6277994f1 100644 --- a/apps/emqx/test/emqx_config_SUITE.erl +++ b/apps/emqx/test/emqx_config_SUITE.erl @@ -32,20 +32,30 @@ end_per_suite(_Config) -> t_fill_default_values(_) -> Conf = #{ - <<"broker">> => #{ - <<"perf">> => #{}, - <<"route_batch_clean">> => false} + <<"broker">> => #{ + <<"perf">> => #{}, + <<"route_batch_clean">> => false + } }, WithDefaults = emqx_config:fill_defaults(Conf), - ?assertMatch(#{<<"broker">> := - #{<<"enable_session_registry">> := true, - <<"perf">> := - #{<<"route_lock_type">> := key, - <<"trie_compaction">> := true}, - <<"route_batch_clean">> := false, - <<"session_locking_strategy">> := quorum, - <<"shared_dispatch_ack_enabled">> := false, - <<"shared_subscription_strategy">> := round_robin}}, WithDefaults), + ?assertMatch( + #{ + <<"broker">> := + #{ + <<"enable_session_registry">> := true, + <<"perf">> := + #{ + <<"route_lock_type">> := key, + <<"trie_compaction">> := true + }, + <<"route_batch_clean">> := false, + <<"session_locking_strategy">> := quorum, + <<"shared_dispatch_ack_enabled">> := false, + <<"shared_subscription_strategy">> := round_robin + } + }, + WithDefaults + ), %% ensure JSON compatible _ = emqx_json:encode(WithDefaults), ok. diff --git a/apps/emqx/test/emqx_config_handler_SUITE.erl b/apps/emqx/test/emqx_config_handler_SUITE.erl index 821ab0653..6861a0be1 100644 --- a/apps/emqx/test/emqx_config_handler_SUITE.erl +++ b/apps/emqx/test/emqx_config_handler_SUITE.erl @@ -45,8 +45,10 @@ t_handler(_Config) -> BadCallBackMod = emqx, RootKey = sysmon, %% bad - ?assertError(#{msg := "bad_emqx_config_handler_callback", module := BadCallBackMod}, - emqx_config_handler:add_handler([RootKey], BadCallBackMod)), + ?assertError( + #{msg := "bad_emqx_config_handler_callback", module := BadCallBackMod}, + emqx_config_handler:add_handler([RootKey], BadCallBackMod) + ), %% simple ok = emqx_config_handler:add_handler([RootKey], ?MODULE), #{handlers := Handlers0} = emqx_config_handler:info(), @@ -79,19 +81,25 @@ t_handler(_Config) -> t_conflict_handler(_Config) -> ok = emqx_config_handler:add_handler([sysmon, '?', '?'], ?MODULE), - ?assertMatch({error, {conflict, _}}, - emqx_config_handler:add_handler([sysmon, '?', cpu_check_interval], ?MODULE)), + ?assertMatch( + {error, {conflict, _}}, + emqx_config_handler:add_handler([sysmon, '?', cpu_check_interval], ?MODULE) + ), ok = emqx_config_handler:remove_handler([sysmon, '?', '?']), ok = emqx_config_handler:add_handler([sysmon, '?', cpu_check_interval], ?MODULE), - ?assertMatch({error, {conflict, _}}, - emqx_config_handler:add_handler([sysmon, '?', '?'], ?MODULE)), + ?assertMatch( + {error, {conflict, _}}, + emqx_config_handler:add_handler([sysmon, '?', '?'], ?MODULE) + ), ok = emqx_config_handler:remove_handler([sysmon, '?', cpu_check_interval]), %% override ok = emqx_config_handler:add_handler([sysmon], emqx_config_logger), - ?assertMatch(#{handlers := #{sysmon := #{{mod} := emqx_config_logger}}}, - emqx_config_handler:info()), + ?assertMatch( + #{handlers := #{sysmon := #{{mod} := emqx_config_logger}}}, + emqx_config_handler:info() + ), ok. t_root_key_update(_Config) -> @@ -100,20 +108,31 @@ t_root_key_update(_Config) -> ok = emqx_config_handler:add_handler(PathKey, ?MODULE), %% update Old = #{<<"os">> := OS} = emqx:get_raw_config(PathKey), - {ok, Res} = emqx:update_config(PathKey, - Old#{<<"os">> => OS#{<<"cpu_check_interval">> => <<"12s">>}}, Opts), - ?assertMatch(#{config := #{os := #{cpu_check_interval := 12000}}, - post_config_update := #{?MODULE := ok}, - raw_config := #{<<"os">> := #{<<"cpu_check_interval">> := <<"12s">>}}}, - Res), + {ok, Res} = emqx:update_config( + PathKey, + Old#{<<"os">> => OS#{<<"cpu_check_interval">> => <<"12s">>}}, + Opts + ), + ?assertMatch( + #{ + config := #{os := #{cpu_check_interval := 12000}}, + post_config_update := #{?MODULE := ok}, + raw_config := #{<<"os">> := #{<<"cpu_check_interval">> := <<"12s">>}} + }, + Res + ), ?assertMatch(#{os := #{cpu_check_interval := 12000}}, emqx:get_config(PathKey)), %% update sub key SubKey = PathKey ++ [os, cpu_high_watermark], - ?assertEqual({ok,#{config => 0.81, - post_config_update => #{?MODULE => ok}, - raw_config => <<"81%">>}}, - emqx:update_config(SubKey, "81%", Opts)), + ?assertEqual( + {ok, #{ + config => 0.81, + post_config_update => #{?MODULE => ok}, + raw_config => <<"81%">> + }}, + emqx:update_config(SubKey, "81%", Opts) + ), ?assertEqual(0.81, emqx:get_config(SubKey)), ?assertEqual("81%", emqx:get_raw_config(SubKey)), %% remove @@ -128,32 +147,47 @@ t_sub_key_update_remove(_Config) -> Opts = #{}, ok = emqx_config_handler:add_handler(KeyPath, ?MODULE), {ok, Res} = emqx:update_config(KeyPath, <<"60s">>, Opts), - ?assertMatch(#{config := 60000, - post_config_update := #{?MODULE := ok}, - raw_config := <<"60s">>}, - Res), + ?assertMatch( + #{ + config := 60000, + post_config_update := #{?MODULE := ok}, + raw_config := <<"60s">> + }, + Res + ), ?assertMatch(60000, emqx:get_config(KeyPath)), KeyPath2 = [sysmon, os, cpu_low_watermark], ok = emqx_config_handler:add_handler(KeyPath2, ?MODULE), {ok, Res1} = emqx:update_config(KeyPath2, <<"40%">>, Opts), - ?assertMatch(#{config := 0.4, - post_config_update := #{}, - raw_config := <<"40%">>}, - Res1), + ?assertMatch( + #{ + config := 0.4, + post_config_update := #{}, + raw_config := <<"40%">> + }, + Res1 + ), ?assertMatch(0.4, emqx:get_config(KeyPath2)), %% remove - ?assertEqual({ok,#{post_config_update => #{emqx_config_handler_SUITE => ok}}}, - emqx:remove_config(KeyPath)), + ?assertEqual( + {ok, #{post_config_update => #{emqx_config_handler_SUITE => ok}}}, + emqx:remove_config(KeyPath) + ), ?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)), OSKey = maps:keys(emqx:get_raw_config([sysmon, os])), ?assertEqual(false, lists:member(<<"cpu_check_interval">>, OSKey)), ?assert(length(OSKey) > 0), - ?assertEqual({ok,#{config => 60000, - post_config_update => #{?MODULE => ok}, - raw_config => <<"60s">>}}, emqx:reset_config(KeyPath, Opts)), + ?assertEqual( + {ok, #{ + config => 60000, + post_config_update => #{?MODULE => ok}, + raw_config => <<"60s">> + }}, + emqx:reset_config(KeyPath, Opts) + ), OSKey1 = maps:keys(emqx:get_raw_config([sysmon, os])), ?assertEqual(true, lists:member(<<"cpu_check_interval">>, OSKey1)), ?assert(length(OSKey1) > 1), @@ -196,13 +230,19 @@ t_callback_crash(_Config) -> ok. t_pre_callback_error(_Config) -> - callback_error([sysmon, os, mem_check_interval], <<"100s">>, - {error, {pre_config_update, ?MODULE, pre_config_update_error}}), + callback_error( + [sysmon, os, mem_check_interval], + <<"100s">>, + {error, {pre_config_update, ?MODULE, pre_config_update_error}} + ), ok. -t_post_update_error(_Config) -> - callback_error([sysmon, os, sysmem_high_watermark], <<"60%">>, - {error, {post_config_update, ?MODULE, post_config_update_error}}), +t_post_update_error(_Config) -> + callback_error( + [sysmon, os, sysmem_high_watermark], + <<"60%">>, + {error, {post_config_update, ?MODULE, post_config_update_error}} + ), ok. t_handler_root() -> @@ -212,13 +252,19 @@ t_handler_root() -> ok = emqx_config_handler:add_handler(RootKey, ?MODULE), %% update Old = #{<<"sysmon">> := #{<<"os">> := OS}} = emqx:get_raw_config(RootKey), - {ok, Res} = emqx:update_config(RootKey, + {ok, Res} = emqx:update_config( + RootKey, Old#{<<"sysmon">> => #{<<"os">> => OS#{<<"cpu_check_interval">> => <<"12s">>}}}, - Opts), - ?assertMatch(#{config := #{os := #{cpu_check_interval := 12000}}, - post_config_update := #{?MODULE := ok}, - raw_config := #{<<"os">> := #{<<"cpu_check_interval">> := <<"12s">>}}}, - Res), + Opts + ), + ?assertMatch( + #{ + config := #{os := #{cpu_check_interval := 12000}}, + post_config_update := #{?MODULE := ok}, + raw_config := #{<<"os">> := #{<<"cpu_check_interval">> := <<"12s">>}} + }, + Res + ), ?assertMatch(#{sysmon := #{os := #{cpu_check_interval := 12000}}}, emqx:get_config(RootKey)), ok = emqx_config_handler:remove_handler(RootKey), ok. @@ -250,27 +296,34 @@ t_update_sub(_Config) -> %% update sub key #{<<"os">> := OS1} = emqx:get_raw_config(PathKey), {ok, Res} = emqx:update_config(PathKey ++ [os, cpu_check_interval], <<"120s">>, Opts), - ?assertMatch(#{config := 120000, - post_config_update := #{?MODULE := ok}, - raw_config := <<"120s">>}, - Res), + ?assertMatch( + #{ + config := 120000, + post_config_update := #{?MODULE := ok}, + raw_config := <<"120s">> + }, + Res + ), ?assertMatch(#{os := #{cpu_check_interval := 120000}}, emqx:get_config(PathKey)), #{<<"os">> := OS2} = emqx:get_raw_config(PathKey), ?assertEqual(lists:sort(maps:keys(OS1)), lists:sort(maps:keys(OS2))), %% update sub key SubKey = PathKey ++ [os, cpu_high_watermark], - ?assertEqual({ok,#{config => 0.81, - post_config_update => #{?MODULE => ok}, - raw_config => <<"81%">>}}, - emqx:update_config(SubKey, "81%", Opts)), + ?assertEqual( + {ok, #{ + config => 0.81, + post_config_update => #{?MODULE => ok}, + raw_config => <<"81%">> + }}, + emqx:update_config(SubKey, "81%", Opts) + ), ?assertEqual(0.81, emqx:get_config(SubKey)), ?assertEqual("81%", emqx:get_raw_config(SubKey)), ok = emqx_config_handler:remove_handler(PathKey), ok. - pre_config_update([sysmon], UpdateReq, _RawConf) -> {ok, UpdateReq}; pre_config_update([sysmon, os], UpdateReq, _RawConf) -> @@ -300,10 +353,11 @@ wait_for_new_pid() -> undefined -> ct:sleep(10), wait_for_new_pid(); - Pid -> Pid + Pid -> + Pid end. -callback_error(FailedPath, Update, Error) -> +callback_error(FailedPath, Update, Error) -> Opts = #{rawconf_with_defaults => true}, ok = emqx_config_handler:add_handler(FailedPath, ?MODULE), Old = emqx:get_raw_config(FailedPath), diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index f96ac6ca7..5f1ac66fc 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -76,21 +76,26 @@ end_per_suite(_Config) -> ok. init_per_testcase(TestCase, Config) when - TestCase =/= t_ws_pingreq_before_connected -> + TestCase =/= t_ws_pingreq_before_connected +-> ok = meck:expect(emqx_transport, wait, fun(Sock) -> {ok, Sock} end), ok = meck:expect(emqx_transport, type, fun(_Sock) -> tcp end), - ok = meck:expect(emqx_transport, ensure_ok_or_exit, - fun(peername, [sock]) -> {ok, {{127,0,0,1}, 3456}}; - (sockname, [sock]) -> {ok, {{127,0,0,1}, 1883}}; - (peercert, [sock]) -> undefined - end), + ok = meck:expect( + emqx_transport, + ensure_ok_or_exit, + fun + (peername, [sock]) -> {ok, {{127, 0, 0, 1}, 3456}}; + (sockname, [sock]) -> {ok, {{127, 0, 0, 1}, 1883}}; + (peercert, [sock]) -> undefined + end + ), ok = meck:expect(emqx_transport, setopts, fun(_Sock, _Opts) -> ok end), ok = meck:expect(emqx_transport, getopts, fun(_Sock, Options) -> - {ok, [{K, 0} || K <- Options]} - end), + {ok, [{K, 0} || K <- Options]} + end), ok = meck:expect(emqx_transport, getstat, fun(_Sock, Options) -> - {ok, [{K, 0} || K <- Options]} - end), + {ok, [{K, 0} || K <- Options]} + end), ok = meck:expect(emqx_transport, async_send, fun(_Sock, _Data) -> ok end), ok = meck:expect(emqx_transport, async_send, fun(_Sock, _Data, _Opts) -> ok end), ok = meck:expect(emqx_transport, fast_close, fun(_Sock) -> ok end), @@ -112,23 +117,29 @@ end_per_testcase(TestCase, Config) -> %% Test cases %%-------------------------------------------------------------------- t_ws_pingreq_before_connected(_) -> - ?assertMatch({ok, [_, {close,protocol_error}], _}, - handle_msg({incoming, ?PACKET(?PINGREQ)}, st(#{}, #{conn_state => disconnected}))). + ?assertMatch( + {ok, [_, {close, protocol_error}], _}, + handle_msg({incoming, ?PACKET(?PINGREQ)}, st(#{}, #{conn_state => disconnected})) + ). t_info(_) -> CPid = spawn(fun() -> - receive - {'$gen_call', From, info} -> - gen_server:reply(From, emqx_connection:info(st())) - after - 100 -> error("error") - end - end), + receive + {'$gen_call', From, info} -> + gen_server:reply(From, emqx_connection:info(st())) + after 100 -> error("error") + end + end), #{sockinfo := SockInfo} = emqx_connection:info(CPid), - ?assertMatch(#{ peername := {{127,0,0,1},3456}, - sockname := {{127,0,0,1},1883}, - sockstate := idle, - socktype := tcp}, SockInfo). + ?assertMatch( + #{ + peername := {{127, 0, 0, 1}, 3456}, + sockname := {{127, 0, 0, 1}, 1883}, + sockstate := idle, + socktype := tcp + }, + SockInfo + ). t_info_limiter(_) -> Limiter = init_limiter(), @@ -137,33 +148,44 @@ t_info_limiter(_) -> t_stats(_) -> CPid = spawn(fun() -> - receive - {'$gen_call', From, stats} -> - gen_server:reply(From, emqx_connection:stats(st())) - after - 100 -> error("error") - end - end), + receive + {'$gen_call', From, stats} -> + gen_server:reply(From, emqx_connection:stats(st())) + after 100 -> error("error") + end + end), Stats = emqx_connection:stats(CPid), - ?assertMatch([{recv_oct,0}, - {recv_cnt,0}, - {send_oct,0}, - {send_cnt,0}, - {send_pend,0}| _] , Stats). + ?assertMatch( + [ + {recv_oct, 0}, + {recv_cnt, 0}, + {send_oct, 0}, + {send_cnt, 0}, + {send_pend, 0} + | _ + ], + Stats + ). t_process_msg(_) -> - with_conn(fun(CPid) -> - ok = meck:expect(emqx_channel, handle_in, - fun(_Packet, Channel) -> - {ok, Channel} - end), - CPid ! {incoming, ?PACKET(?PINGREQ)}, - CPid ! {incoming, undefined}, - CPid ! {tcp_passive, sock}, - CPid ! {tcp_closed, sock}, - timer:sleep(100), - ok = trap_exit(CPid, {shutdown, tcp_closed}) - end, #{trap_exit => true}). + with_conn( + fun(CPid) -> + ok = meck:expect( + emqx_channel, + handle_in, + fun(_Packet, Channel) -> + {ok, Channel} + end + ), + CPid ! {incoming, ?PACKET(?PINGREQ)}, + CPid ! {incoming, undefined}, + CPid ! {tcp_passive, sock}, + CPid ! {tcp_closed, sock}, + timer:sleep(100), + ok = trap_exit(CPid, {shutdown, tcp_closed}) + end, + #{trap_exit => true} + ). t_ensure_stats_timer(_) -> NStats = emqx_connection:ensure_stats_timer(100, st()), @@ -180,26 +202,34 @@ t_cancel_stats_timer(_) -> t_append_msg(_) -> ?assertEqual([msg], emqx_connection:append_msg([], [msg])), ?assertEqual([msg], emqx_connection:append_msg([], msg)), - ?assertEqual([msg1,msg], emqx_connection:append_msg([msg1], [msg])), - ?assertEqual([msg1,msg], emqx_connection:append_msg([msg1], msg)). + ?assertEqual([msg1, msg], emqx_connection:append_msg([msg1], [msg])), + ?assertEqual([msg1, msg], emqx_connection:append_msg([msg1], msg)). t_handle_msg(_) -> From = {make_ref(), self()}, ?assertMatch({ok, _St}, handle_msg({'$gen_call', From, for_testing}, st())), - ?assertMatch({stop, {shutdown,discarded}, _St}, handle_msg({'$gen_call', From, discard}, st())), - ?assertMatch({stop, {shutdown,discarded}, _St}, handle_msg({'$gen_call', From, discard}, st())), + ?assertMatch( + {stop, {shutdown, discarded}, _St}, handle_msg({'$gen_call', From, discard}, st()) + ), + ?assertMatch( + {stop, {shutdown, discarded}, _St}, handle_msg({'$gen_call', From, discard}, st()) + ), ?assertMatch({ok, [], _St}, handle_msg({tcp, From, <<"for_testing">>}, st())), ?assertMatch({ok, _St}, handle_msg(for_testing, st())). t_handle_msg_incoming(_) -> - ?assertMatch({ok, _Out, _St}, - handle_msg({incoming, ?CONNECT_PACKET(#mqtt_packet_connect{})}, st())), + ?assertMatch( + {ok, _Out, _St}, + handle_msg({incoming, ?CONNECT_PACKET(#mqtt_packet_connect{})}, st()) + ), ok = meck:expect(emqx_channel, handle_in, fun(_Packet, Channel) -> {ok, Channel} end), - ?assertMatch({ok, _St}, - handle_msg({incoming, ?PUBLISH_PACKET(?QOS_1, <<"t">>, 1, <<"payload">>)}, st())), - Sub1 = <>, + ?assertMatch( + {ok, _St}, + handle_msg({incoming, ?PUBLISH_PACKET(?QOS_1, <<"t">>, 1, <<"payload">>)}, st()) + ), + Sub1 = <>, ?assertMatch({ok, _St}, handle_msg({incoming, Sub1}, st())), - Sub2 = <>, + Sub2 = <>, ?assertMatch({ok, _St}, handle_msg({incoming, Sub2}, st())), ?assertMatch({ok, _St}, handle_msg({incoming, undefined}, st())). @@ -209,8 +239,10 @@ t_handle_msg_outgoing(_) -> ?assertEqual(ok, handle_msg({outgoing, ?PUBCOMP_PACKET(1)}, st())). t_handle_msg_tcp_error(_) -> - ?assertMatch({stop, {shutdown, econnreset}, _St}, - handle_msg({tcp_error, sock, econnreset}, st())). + ?assertMatch( + {stop, {shutdown, econnreset}, _St}, + handle_msg({tcp_error, sock, econnreset}, st()) + ). t_handle_msg_tcp_closed(_) -> ?assertMatch({stop, {shutdown, tcp_closed}, _St}, handle_msg({tcp_closed, sock}, st())). @@ -228,8 +260,10 @@ t_handle_msg_inet_reply(_) -> ?assertMatch({ok, _St}, handle_msg({inet_reply, for_testing, ok}, st())), emqx_config:put_listener_conf(tcp, default, [tcp, active_n], 100), ?assertEqual(ok, handle_msg({inet_reply, for_testing, ok}, st())), - ?assertMatch({stop, {shutdown, for_testing}, _St}, - handle_msg({inet_reply, for_testing, {error, for_testing}}, st())). + ?assertMatch( + {stop, {shutdown, for_testing}, _St}, + handle_msg({inet_reply, for_testing, {error, for_testing}}, st()) + ). t_handle_msg_connack(_) -> ?assertEqual(ok, handle_msg({connack, ?CONNACK_PACKET(?CONNACK_ACCEPT)}, st())). @@ -258,25 +292,37 @@ t_handle_call(_) -> ?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)), ?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)), ?assertMatch({reply, ok, _NSt}, handle_call(self(), {ratelimit, []}, St)), - ?assertMatch({reply, ok, _NSt}, - handle_call(self(), {ratelimit, [{bytes_in, default}]}, St)), + ?assertMatch( + {reply, ok, _NSt}, + handle_call(self(), {ratelimit, [{bytes_in, default}]}, St) + ), ?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)), - ?assertMatch({stop, {shutdown,kicked}, ok, _NSt}, - handle_call(self(), kick, St)). + ?assertMatch( + {stop, {shutdown, kicked}, ok, _NSt}, + handle_call(self(), kick, St) + ). t_handle_timeout(_) -> TRef = make_ref(), State = st(#{idle_timer => TRef, stats_timer => TRef, limiter => init_limiter()}), - ?assertMatch({stop, {shutdown,idle_timeout}, _NState}, - emqx_connection:handle_timeout(TRef, idle_timeout, State)), - ?assertMatch({ok, _NState}, - emqx_connection:handle_timeout(TRef, emit_stats, State)), - ?assertMatch({ok, _NState}, - emqx_connection:handle_timeout(TRef, keepalive, State)), + ?assertMatch( + {stop, {shutdown, idle_timeout}, _NState}, + emqx_connection:handle_timeout(TRef, idle_timeout, State) + ), + ?assertMatch( + {ok, _NState}, + emqx_connection:handle_timeout(TRef, emit_stats, State) + ), + ?assertMatch( + {ok, _NState}, + emqx_connection:handle_timeout(TRef, keepalive, State) + ), ok = meck:expect(emqx_transport, getstat, fun(_Sock, _Options) -> {error, for_testing} end), - ?assertMatch({stop, {shutdown,for_testing}, _NState}, - emqx_connection:handle_timeout(TRef, keepalive, State)), + ?assertMatch( + {stop, {shutdown, for_testing}, _NState}, + emqx_connection:handle_timeout(TRef, keepalive, State) + ), ?assertMatch({ok, _NState}, emqx_connection:handle_timeout(TRef, undefined, State)). t_parse_incoming(_) -> @@ -285,14 +331,20 @@ t_parse_incoming(_) -> t_next_incoming_msgs(_) -> State = st(#{}), - ?assertEqual({ok, [{incoming, packet}], State}, - emqx_connection:next_incoming_msgs([packet], [], State)), - ?assertEqual({ok, [{incoming, packet2}, {incoming, packet1}], State}, - emqx_connection:next_incoming_msgs([packet1, packet2], [], State)). + ?assertEqual( + {ok, [{incoming, packet}], State}, + emqx_connection:next_incoming_msgs([packet], [], State) + ), + ?assertEqual( + {ok, [{incoming, packet2}, {incoming, packet1}], State}, + emqx_connection:next_incoming_msgs([packet1, packet2], [], State) + ). t_handle_incoming(_) -> - ?assertMatch({ok, _Out, _NState}, - emqx_connection:handle_incoming(?CONNECT_PACKET(#mqtt_packet_connect{}), st())), + ?assertMatch( + {ok, _Out, _NState}, + emqx_connection:handle_incoming(?CONNECT_PACKET(#mqtt_packet_connect{}), st()) + ), ?assertMatch({ok, _Out, _NState}, emqx_connection:handle_incoming(frame_error, st())). t_with_channel(_) -> @@ -300,26 +352,50 @@ t_with_channel(_) -> ok = meck:expect(emqx_channel, handle_in, fun(_, _) -> ok end), ?assertEqual({ok, State}, emqx_connection:with_channel(handle_in, [for_testing], State)), - ok = meck:expect(emqx_channel, handle_in, fun(_, _) -> Channel = channel(), {ok, Channel} end), + ok = meck:expect(emqx_channel, handle_in, fun(_, _) -> + Channel = channel(), + {ok, Channel} + end), ?assertMatch({ok, _NState}, emqx_connection:with_channel(handle_in, [for_testing], State)), - ok = meck:expect(emqx_channel, handle_in, - fun(_, _) -> Channel = channel(), {ok, ?DISCONNECT_PACKET(),Channel} end), - ?assertMatch({ok, _Out, _NChannel}, - emqx_connection:with_channel(handle_in, [for_testing], State)), + ok = meck:expect( + emqx_channel, + handle_in, + fun(_, _) -> + Channel = channel(), + {ok, ?DISCONNECT_PACKET(), Channel} + end + ), + ?assertMatch( + {ok, _Out, _NChannel}, + emqx_connection:with_channel(handle_in, [for_testing], State) + ), - ok = meck:expect(emqx_channel, handle_in, - fun(_, _) -> Channel = channel(), {shutdown, [for_testing], Channel} end), - ?assertMatch({stop, {shutdown,[for_testing]}, _NState}, - emqx_connection:with_channel(handle_in, [for_testing], State)), + ok = meck:expect( + emqx_channel, + handle_in, + fun(_, _) -> + Channel = channel(), + {shutdown, [for_testing], Channel} + end + ), + ?assertMatch( + {stop, {shutdown, [for_testing]}, _NState}, + emqx_connection:with_channel(handle_in, [for_testing], State) + ), - ok = meck:expect(emqx_channel, handle_in, - fun(_, _) -> - Channel = channel(), - {shutdown, [for_testing], ?DISCONNECT_PACKET(), Channel} - end), - ?assertMatch({stop, {shutdown,[for_testing]}, _NState}, - emqx_connection:with_channel(handle_in, [for_testing], State)), + ok = meck:expect( + emqx_channel, + handle_in, + fun(_, _) -> + Channel = channel(), + {shutdown, [for_testing], ?DISCONNECT_PACKET(), Channel} + end + ), + ?assertMatch( + {stop, {shutdown, [for_testing]}, _NState}, + emqx_connection:with_channel(handle_in, [for_testing], State) + ), meck:unload(emqx_channel). t_handle_outgoing(_) -> @@ -327,26 +403,43 @@ t_handle_outgoing(_) -> ?assertEqual(ok, emqx_connection:handle_outgoing([?PACKET(?PINGRESP)], st())). t_handle_info(_) -> - ?assertMatch({ok, {event,running}, _NState}, - emqx_connection:handle_info(activate_socket, st())), - ?assertMatch({stop, {shutdown, for_testing}, _NStats}, - emqx_connection:handle_info({sock_error, for_testing}, st())), + ?assertMatch( + {ok, {event, running}, _NState}, + emqx_connection:handle_info(activate_socket, st()) + ), + ?assertMatch( + {stop, {shutdown, for_testing}, _NStats}, + emqx_connection:handle_info({sock_error, for_testing}, st()) + ), ?assertMatch({ok, _NState}, emqx_connection:handle_info(for_testing, st())). t_ensure_rate_limit(_) -> WhenOk = fun emqx_connection:next_incoming_msgs/3, - {ok, [], State} = emqx_connection:check_limiter([], [], WhenOk, [], - st(#{limiter => undefined})), + {ok, [], State} = emqx_connection:check_limiter( + [], + [], + WhenOk, + [], + st(#{limiter => undefined}) + ), ?assertEqual(undefined, emqx_connection:info(limiter, State)), Limiter = init_limiter(), {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)), - ok = meck:expect(emqx_htb_limiter, check, - fun(_, Client) -> {pause, 3000, undefined, Client} end), - {ok, State2} = emqx_connection:check_limiter([{1000, bytes_in}], [], - WhenOk, [], st(#{limiter => Limiter})), + ok = meck:expect( + emqx_htb_limiter, + check, + fun(_, Client) -> {pause, 3000, undefined, Client} end + ), + {ok, State2} = emqx_connection:check_limiter( + [{1000, bytes_in}], + [], + WhenOk, + [], + st(#{limiter => Limiter}) + ), meck:unload(emqx_htb_limiter), ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). @@ -396,19 +489,26 @@ t_start_link_exit_on_activate(_) -> t_get_conn_info(_) -> with_conn(fun(CPid) -> - #{sockinfo := SockInfo} = emqx_connection:info(CPid), - ?assertEqual(#{peername => {{127,0,0,1},3456}, - sockname => {{127,0,0,1},1883}, - sockstate => running, - socktype => tcp - }, SockInfo) - end). + #{sockinfo := SockInfo} = emqx_connection:info(CPid), + ?assertEqual( + #{ + peername => {{127, 0, 0, 1}, 3456}, + sockname => {{127, 0, 0, 1}, 1883}, + sockstate => running, + socktype => tcp + }, + SockInfo + ) + end). t_oom_shutdown(init, Config) -> ok = snabbkaffe:start_trace(), ok = meck:new(emqx_misc, [non_strict, passthrough, no_history, no_link]), - meck:expect(emqx_misc, check_oom, - fun(_) -> {shutdown, "fake_oom"} end), + meck:expect( + emqx_misc, + check_oom, + fun(_) -> {shutdown, "fake_oom"} end + ), Config; t_oom_shutdown('end', _Config) -> snabbkaffe:stop(), @@ -418,46 +518,55 @@ t_oom_shutdown('end', _Config) -> t_oom_shutdown(_) -> Opts = #{trap_exit => true}, with_conn( - fun(Pid) -> - Pid ! {tcp_passive, foo}, - {ok, _} = ?block_until(#{?snk_kind := check_oom}, 1000), - {ok, _} = ?block_until(#{?snk_kind := terminate}, 100), - Trace = snabbkaffe:collect_trace(), - ?assertEqual(1, length(?of_kind(terminate, Trace))), - receive - {'EXIT', Pid, Reason} -> - ?assertEqual({shutdown, "fake_oom"}, Reason) - after 1000 -> - error(timeout) - end, - ?assertNot(erlang:is_process_alive(Pid)) - end, Opts), + fun(Pid) -> + Pid ! {tcp_passive, foo}, + {ok, _} = ?block_until(#{?snk_kind := check_oom}, 1000), + {ok, _} = ?block_until(#{?snk_kind := terminate}, 100), + Trace = snabbkaffe:collect_trace(), + ?assertEqual(1, length(?of_kind(terminate, Trace))), + receive + {'EXIT', Pid, Reason} -> + ?assertEqual({shutdown, "fake_oom"}, Reason) + after 1000 -> + error(timeout) + end, + ?assertNot(erlang:is_process_alive(Pid)) + end, + Opts + ), ok. t_cancel_congestion_alarm(_) -> Opts = #{trap_exit => false}, - ok = meck:expect(emqx_transport, getstat, - fun(_Sock, [send_pend]) -> - %% simulate congestion - {ok, [{send_pend, 999}]}; - (_Sock, Options) -> - {ok, [{K, 0} || K <- Options]} - end), + ok = meck:expect( + emqx_transport, + getstat, + fun + (_Sock, [send_pend]) -> + %% simulate congestion + {ok, [{send_pend, 999}]}; + (_Sock, Options) -> + {ok, [{K, 0} || K <- Options]} + end + ), with_conn( - fun(Pid) -> - #{ channel := Channel - , transport := Transport - , socket := Socket - } = emqx_connection:get_state(Pid), - %% precondition - Zone = emqx_channel:info(zone, Channel), - true = emqx_config:get_zone_conf(Zone, [conn_congestion, enable_alarm]), - %% should not raise errors - ok = emqx_congestion:maybe_alarm_conn_congestion(Socket, Transport, Channel), - %% should not raise errors either - ok = emqx_congestion:cancel_alarms(Socket, Transport, Channel), - ok - end, Opts), + fun(Pid) -> + #{ + channel := Channel, + transport := Transport, + socket := Socket + } = emqx_connection:get_state(Pid), + %% precondition + Zone = emqx_channel:info(zone, Channel), + true = emqx_config:get_zone_conf(Zone, [conn_congestion, enable_alarm]), + %% should not raise errors + ok = emqx_congestion:maybe_alarm_conn_congestion(Socket, Transport, Channel), + %% should not raise errors either + ok = emqx_congestion:cancel_alarms(Socket, Transport, Channel), + ok + end, + Opts + ), ok. %%-------------------------------------------------------------------- @@ -465,24 +574,36 @@ t_cancel_congestion_alarm(_) -> %%-------------------------------------------------------------------- exit_on_wait_error(SockErr, Reason) -> - ok = meck:expect(emqx_transport, wait, - fun(_Sock) -> - {error, SockErr} - end), - with_conn(fun(CPid) -> - timer:sleep(100), - trap_exit(CPid, Reason) - end, #{trap_exit => true}). + ok = meck:expect( + emqx_transport, + wait, + fun(_Sock) -> + {error, SockErr} + end + ), + with_conn( + fun(CPid) -> + timer:sleep(100), + trap_exit(CPid, Reason) + end, + #{trap_exit => true} + ). exit_on_activate_error(SockErr, Reason) -> - ok = meck:expect(emqx_transport, setopts, - fun(_Sock, _Opts) -> - {error, SockErr} - end), - with_conn(fun(CPid) -> - timer:sleep(100), - trap_exit(CPid, Reason) - end, #{trap_exit => true}). + ok = meck:expect( + emqx_transport, + setopts, + fun(_Sock, _Opts) -> + {error, SockErr} + end + ), + with_conn( + fun(CPid) -> + timer:sleep(100), + trap_exit(CPid, Reason) + end, + #{trap_exit => true} + ). with_conn(TestFun) -> with_conn(TestFun, #{trap_exit => false}). @@ -490,11 +611,18 @@ with_conn(TestFun) -> with_conn(TestFun, Opts) when is_map(Opts) -> TrapExit = maps:get(trap_exit, Opts, false), process_flag(trap_exit, TrapExit), - {ok, CPid} = emqx_connection:start_link(emqx_transport, sock, - maps:merge(Opts, - #{zone => default, - limiter => limiter_cfg(), - listener => {tcp, default}})), + {ok, CPid} = emqx_connection:start_link( + emqx_transport, + sock, + maps:merge( + Opts, + #{ + zone => default, + limiter => limiter_cfg(), + listener => {tcp, default} + } + ) + ), TestFun(CPid), TrapExit orelse emqx_connection:stop(CPid), ok. @@ -502,9 +630,8 @@ with_conn(TestFun, Opts) when is_map(Opts) -> trap_exit(Pid, Reason) -> receive {'EXIT', Pid, Reason} -> ok; - {'EXIT', Pid, Other} -> error({unexpect_exit, Other}) - after - 100 -> error({expect_exit, Reason}) + {'EXIT', Pid, Other} -> error({unexpect_exit, Other}) + after 100 -> error({expect_exit, Reason}) end. make_frame(Packet) -> @@ -516,50 +643,62 @@ st() -> st(#{}, #{}). st(InitFields) when is_map(InitFields) -> st(InitFields, #{}). st(InitFields, ChannelFields) when is_map(InitFields) -> - St = emqx_connection:init_state(emqx_transport, sock, #{zone => default, - limiter => limiter_cfg(), - listener => {tcp, default}}), - maps:fold(fun(N, V, S) -> emqx_connection:set_field(N, V, S) end, - emqx_connection:set_field(channel, channel(ChannelFields), St), - InitFields - ). + St = emqx_connection:init_state(emqx_transport, sock, #{ + zone => default, + limiter => limiter_cfg(), + listener => {tcp, default} + }), + maps:fold( + fun(N, V, S) -> emqx_connection:set_field(N, V, S) end, + emqx_connection:set_field(channel, channel(ChannelFields), St), + InitFields + ). channel() -> channel(#{}). channel(InitFields) -> - ConnInfo = #{peername => {{127,0,0,1}, 3456}, - sockname => {{127,0,0,1}, 18083}, - conn_mod => emqx_connection, - proto_name => <<"MQTT">>, - proto_ver => ?MQTT_PROTO_V5, - clean_start => true, - keepalive => 30, - clientid => <<"clientid">>, - username => <<"username">>, - receive_maximum => 100, - expiry_interval => 0 - }, - ClientInfo = #{zone => default, - listener => {tcp, default}, - protocol => mqtt, - peerhost => {127,0,0,1}, - clientid => <<"clientid">>, - username => <<"username">>, - is_superuser => false, - peercert => undefined, - mountpoint => undefined - }, + ConnInfo = #{ + peername => {{127, 0, 0, 1}, 3456}, + sockname => {{127, 0, 0, 1}, 18083}, + conn_mod => emqx_connection, + proto_name => <<"MQTT">>, + proto_ver => ?MQTT_PROTO_V5, + clean_start => true, + keepalive => 30, + clientid => <<"clientid">>, + username => <<"username">>, + receive_maximum => 100, + expiry_interval => 0 + }, + ClientInfo = #{ + zone => default, + listener => {tcp, default}, + protocol => mqtt, + peerhost => {127, 0, 0, 1}, + clientid => <<"clientid">>, + username => <<"username">>, + is_superuser => false, + peercert => undefined, + mountpoint => undefined + }, Session = emqx_session:init(#{max_inflight => 0}), - maps:fold(fun(Field, Value, Channel) -> - emqx_channel:set_field(Field, Value, Channel) - end, - emqx_channel:init(ConnInfo, #{ zone => default - , limiter => limiter_cfg() - , listener => {tcp, default} - }), - maps:merge(#{clientinfo => ClientInfo, - session => Session, - conn_state => connected - }, InitFields)). + maps:fold( + fun(Field, Value, Channel) -> + emqx_channel:set_field(Field, Value, Channel) + end, + emqx_channel:init(ConnInfo, #{ + zone => default, + limiter => limiter_cfg(), + listener => {tcp, default} + }), + maps:merge( + #{ + clientinfo => ClientInfo, + session => Session, + conn_state => connected + }, + InitFields + ) + ). handle_msg(Msg, St) -> emqx_connection:handle_msg(Msg, St). diff --git a/apps/emqx/test/emqx_ctl_SUITE.erl b/apps/emqx/test/emqx_ctl_SUITE.erl index e56237d09..605466e79 100644 --- a/apps/emqx/test/emqx_ctl_SUITE.erl +++ b/apps/emqx/test/emqx_ctl_SUITE.erl @@ -37,24 +37,27 @@ end_per_suite(_Config) -> t_reg_unreg_command(_) -> with_ctl_server( - fun(_CtlSrv) -> + fun(_CtlSrv) -> emqx_ctl:register_command(cmd1, {?MODULE, cmd1_fun}), emqx_ctl:register_command(cmd2, {?MODULE, cmd2_fun}), ?assertEqual([{?MODULE, cmd1_fun}], emqx_ctl:lookup_command(cmd1)), ?assertEqual([{?MODULE, cmd2_fun}], emqx_ctl:lookup_command(cmd2)), - ?assertEqual([{cmd1, ?MODULE, cmd1_fun}, {cmd2, ?MODULE, cmd2_fun}], - emqx_ctl:get_commands()), + ?assertEqual( + [{cmd1, ?MODULE, cmd1_fun}, {cmd2, ?MODULE, cmd2_fun}], + emqx_ctl:get_commands() + ), emqx_ctl:unregister_command(cmd1), emqx_ctl:unregister_command(cmd2), ct:sleep(100), ?assertEqual([], emqx_ctl:lookup_command(cmd1)), ?assertEqual([], emqx_ctl:lookup_command(cmd2)), ?assertEqual([], emqx_ctl:get_commands()) - end). + end + ). t_run_commands(_) -> with_ctl_server( - fun(_CtlSrv) -> + fun(_CtlSrv) -> ?assertEqual({error, cmd_not_found}, emqx_ctl:run_command(["cmd", "arg"])), emqx_ctl:register_command(cmd1, {?MODULE, cmd1_fun}), emqx_ctl:register_command(cmd2, {?MODULE, cmd2_fun}), @@ -62,7 +65,8 @@ t_run_commands(_) -> {error, badarg} = emqx_ctl:run_command(["cmd1", "badarg"]), ok = emqx_ctl:run_command(["cmd2", "arg1", "arg2"]), {error, badarg} = emqx_ctl:run_command(["cmd2", "arg1", "badarg"]) - end). + end + ). t_print(_) -> ok = emqx_ctl:print("help"), @@ -84,12 +88,13 @@ t_usage(_) -> t_unexpected(_) -> with_ctl_server( - fun(CtlSrv) -> - ignored = gen_server:call(CtlSrv, unexpected_call), - ok = gen_server:cast(CtlSrv, unexpected_cast), - CtlSrv ! unexpected_info, - ?assert(is_process_alive(CtlSrv)) - end). + fun(CtlSrv) -> + ignored = gen_server:call(CtlSrv, unexpected_call), + ok = gen_server:cast(CtlSrv, unexpected_cast), + CtlSrv ! unexpected_info, + ?assert(is_process_alive(CtlSrv)) + end + ). %%-------------------------------------------------------------------- %% Cmds for test @@ -114,7 +119,8 @@ mock_print() -> meck:expect(emqx_ctl, print, fun(Msg, Arg) -> emqx_ctl:format(Msg, Arg) end), meck:expect(emqx_ctl, usage, fun(Usages) -> emqx_ctl:format_usage(Usages) end), meck:expect(emqx_ctl, usage, fun(CmdParams, CmdDescr) -> - emqx_ctl:format_usage(CmdParams, CmdDescr) end). + emqx_ctl:format_usage(CmdParams, CmdDescr) + end). unmock_print() -> meck:unload(emqx_ctl). diff --git a/apps/emqx/test/emqx_flapping_SUITE.erl b/apps/emqx/test/emqx_flapping_SUITE.erl index 4f2832c0f..f37e20fdc 100644 --- a/apps/emqx/test/emqx_flapping_SUITE.erl +++ b/apps/emqx/test/emqx_flapping_SUITE.erl @@ -26,32 +26,42 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), - emqx_config:put_zone_conf(default, [flapping_detect], - #{max_count => 3, - window_time => 100, % 0.1s - ban_time => 2000 %% 2s - }), + emqx_config:put_zone_conf( + default, + [flapping_detect], + #{ + max_count => 3, + % 0.1s + window_time => 100, + %% 2s + ban_time => 2000 + } + ), Config. end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]), - mria_mnesia:delete_schema(), %% Clean emqx_banned table + %% Clean emqx_banned table + mria_mnesia:delete_schema(), ok. t_detect_check(_) -> - ClientInfo = #{zone => default, - listener => {tcp, default}, - clientid => <<"client007">>, - peerhost => {127,0,0,1} - }, + ClientInfo = #{ + zone => default, + listener => {tcp, default}, + clientid => <<"client007">>, + peerhost => {127, 0, 0, 1} + }, false = emqx_flapping:detect(ClientInfo), false = emqx_banned:check(ClientInfo), false = emqx_flapping:detect(ClientInfo), false = emqx_banned:check(ClientInfo), true = emqx_flapping:detect(ClientInfo), timer:sleep(50), - ct:pal("the table emqx_banned: ~p, nowsec: ~p", [ets:tab2list(emqx_banned), - erlang:system_time(second)]), + ct:pal("the table emqx_banned: ~p, nowsec: ~p", [ + ets:tab2list(emqx_banned), + erlang:system_time(second) + ]), true = emqx_banned:check(ClientInfo), timer:sleep(3000), false = emqx_banned:check(ClientInfo), @@ -63,13 +73,31 @@ t_detect_check(_) -> ok = emqx_flapping:stop(). t_expired_detecting(_) -> - ClientInfo = #{zone => default, - listener => {tcp, default}, - clientid => <<"client008">>, - peerhost => {127,0,0,1}}, + ClientInfo = #{ + zone => default, + listener => {tcp, default}, + clientid => <<"client008">>, + peerhost => {127, 0, 0, 1} + }, false = emqx_flapping:detect(ClientInfo), - ?assertEqual(true, lists:any(fun({flapping, <<"client008">>, _, _, _}) -> true; - (_) -> false end, ets:tab2list(emqx_flapping))), + ?assertEqual( + true, + lists:any( + fun + ({flapping, <<"client008">>, _, _, _}) -> true; + (_) -> false + end, + ets:tab2list(emqx_flapping) + ) + ), timer:sleep(200), - ?assertEqual(true, lists:all(fun({flapping, <<"client008">>, _, _, _}) -> false; - (_) -> true end, ets:tab2list(emqx_flapping))). + ?assertEqual( + true, + lists:all( + fun + ({flapping, <<"client008">>, _, _, _}) -> false; + (_) -> true + end, + ets:tab2list(emqx_flapping) + ) + ). diff --git a/apps/emqx/test/emqx_frame_SUITE.erl b/apps/emqx/test/emqx_frame_SUITE.erl index 7984d59b1..d8d823630 100644 --- a/apps/emqx/test/emqx_frame_SUITE.erl +++ b/apps/emqx/test/emqx_frame_SUITE.erl @@ -24,88 +24,89 @@ -include_lib("common_test/include/ct.hrl"). -define(ASSERT_FRAME_THROW(Reason, Expr), - ?assertThrow(?FRAME_PARSE_ERROR(Reason), Expr)). + ?assertThrow(?FRAME_PARSE_ERROR(Reason), Expr) +). all() -> - [{group, parse}, - {group, connect}, - {group, connack}, - {group, publish}, - {group, puback}, - {group, subscribe}, - {group, suback}, - {group, unsubscribe}, - {group, unsuback}, - {group, ping}, - {group, disconnect}, - {group, auth} + [ + {group, parse}, + {group, connect}, + {group, connack}, + {group, publish}, + {group, puback}, + {group, subscribe}, + {group, suback}, + {group, unsubscribe}, + {group, unsuback}, + {group, ping}, + {group, disconnect}, + {group, auth} ]. groups() -> - [{parse, [parallel], - [ t_parse_cont - , t_parse_frame_too_large - , t_parse_frame_malformed_variable_byte_integer - , t_parse_malformed_utf8_string - ]}, - {connect, [parallel], - [ t_serialize_parse_v3_connect - , t_serialize_parse_v4_connect - , t_serialize_parse_v5_connect - , t_serialize_parse_connect_without_clientid - , t_serialize_parse_connect_with_will - , t_serialize_parse_bridge_connect - ]}, - {connack, [parallel], - [ t_serialize_parse_connack - , t_serialize_parse_connack_v5 - ]}, - {publish, [parallel], - [ t_parse_sticky_frames - , t_serialize_parse_qos0_publish - , t_serialize_parse_qos1_publish - , t_serialize_parse_qos2_publish - , t_serialize_parse_publish_v5 - ]}, - {puback, [parallel], - [ t_serialize_parse_puback - , t_serialize_parse_puback_v3_4 - , t_serialize_parse_puback_v5 - , t_serialize_parse_pubrec - , t_serialize_parse_pubrec_v5 - , t_serialize_parse_pubrel - , t_serialize_parse_pubrel_v5 - , t_serialize_parse_pubcomp - , t_serialize_parse_pubcomp_v5 - ]}, - {subscribe, [parallel], - [ t_serialize_parse_subscribe - , t_serialize_parse_subscribe_v5 - ]}, - {suback, [parallel], - [ t_serialize_parse_suback - , t_serialize_parse_suback_v5 - ]}, - {unsubscribe, [parallel], - [ t_serialize_parse_unsubscribe - , t_serialize_parse_unsubscribe_v5 - ]}, - {unsuback, [parallel], - [ t_serialize_parse_unsuback - , t_serialize_parse_unsuback_v5 - ]}, - {ping, [parallel], - [ t_serialize_parse_pingreq - , t_serialize_parse_pingresp - ]}, - {disconnect, [parallel], - [ t_serialize_parse_disconnect - , t_serialize_parse_disconnect_v5 - ]}, - {auth, [parallel], - [ t_serialize_parse_auth_v5 - ] - }]. + [ + {parse, [parallel], [ + t_parse_cont, + t_parse_frame_too_large, + t_parse_frame_malformed_variable_byte_integer, + t_parse_malformed_utf8_string + ]}, + {connect, [parallel], [ + t_serialize_parse_v3_connect, + t_serialize_parse_v4_connect, + t_serialize_parse_v5_connect, + t_serialize_parse_connect_without_clientid, + t_serialize_parse_connect_with_will, + t_serialize_parse_bridge_connect + ]}, + {connack, [parallel], [ + t_serialize_parse_connack, + t_serialize_parse_connack_v5 + ]}, + {publish, [parallel], [ + t_parse_sticky_frames, + t_serialize_parse_qos0_publish, + t_serialize_parse_qos1_publish, + t_serialize_parse_qos2_publish, + t_serialize_parse_publish_v5 + ]}, + {puback, [parallel], [ + t_serialize_parse_puback, + t_serialize_parse_puback_v3_4, + t_serialize_parse_puback_v5, + t_serialize_parse_pubrec, + t_serialize_parse_pubrec_v5, + t_serialize_parse_pubrel, + t_serialize_parse_pubrel_v5, + t_serialize_parse_pubcomp, + t_serialize_parse_pubcomp_v5 + ]}, + {subscribe, [parallel], [ + t_serialize_parse_subscribe, + t_serialize_parse_subscribe_v5 + ]}, + {suback, [parallel], [ + t_serialize_parse_suback, + t_serialize_parse_suback_v5 + ]}, + {unsubscribe, [parallel], [ + t_serialize_parse_unsubscribe, + t_serialize_parse_unsubscribe_v5 + ]}, + {unsuback, [parallel], [ + t_serialize_parse_unsuback, + t_serialize_parse_unsuback_v5 + ]}, + {ping, [parallel], [ + t_serialize_parse_pingreq, + t_serialize_parse_pingresp + ]}, + {disconnect, [parallel], [ + t_serialize_parse_disconnect, + t_serialize_parse_disconnect_v5 + ]}, + {auth, [parallel], [t_serialize_parse_auth_v5]} + ]. init_per_suite(Config) -> Config. @@ -136,147 +137,164 @@ t_parse_frame_too_large(_) -> ?assertEqual(Packet, parse_serialize(Packet, #{max_size => 2048, version => ?MQTT_PROTO_V4})). t_parse_frame_malformed_variable_byte_integer(_) -> - MalformedPayload = << <<16#80>> || _ <- lists:seq(1, 6) >>, + MalformedPayload = <<<<16#80>> || _ <- lists:seq(1, 6)>>, ParseState = emqx_frame:initial_parse_state(#{}), - ?ASSERT_FRAME_THROW(malformed_variable_byte_integer, - emqx_frame:parse(MalformedPayload, ParseState)). + ?ASSERT_FRAME_THROW( + malformed_variable_byte_integer, + emqx_frame:parse(MalformedPayload, ParseState) + ). t_parse_malformed_utf8_string(_) -> - MalformedPacket = <<16,31,0,4, - %% Specification name, should be "MQTT" - %% 77,81,84,84, - %% malformed 1-Byte UTF-8 in (U+0000 .. U+001F] && [U+007F]) - 16#00,16#01,16#1F,16#7F, + MalformedPacket = + <<16, 31, 0, 4, + %% Specification name, should be "MQTT" + %% 77,81,84,84, + %% malformed 1-Byte UTF-8 in (U+0000 .. U+001F] && [U+007F]) + 16#00, 16#01, 16#1F, 16#7F, - 4,194,0,60, - 0,4,101,109, - 113,120,0,5, - 97,100,109,105, - 110,0,6,112, - 117,98,108,105, - 99>>, + 4, 194, 0, 60, 0, 4, 101, 109, 113, 120, 0, 5, 97, 100, 109, 105, 110, 0, 6, 112, 117, + 98, 108, 105, 99>>, ParseState = emqx_frame:initial_parse_state(#{strict_mode => true}), ?ASSERT_FRAME_THROW(utf8_string_invalid, emqx_frame:parse(MalformedPacket, ParseState)). t_serialize_parse_v3_connect(_) -> - Bin = <<16,37,0,6,77,81,73,115,100,112,3,2,0,60,0,23,109,111,115, - 113,112,117, 98,47,49,48,52,53,49,45,105,77,97,99,46,108, - 111,99,97>>, + Bin = + <<16, 37, 0, 6, 77, 81, 73, 115, 100, 112, 3, 2, 0, 60, 0, 23, 109, 111, 115, 113, 112, 117, + 98, 47, 49, 48, 52, 53, 49, 45, 105, 77, 97, 99, 46, 108, 111, 99, 97>>, Packet = ?CONNECT_PACKET( - #mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V3, - proto_name = <<"MQIsdp">>, - clientid = <<"mosqpub/10451-iMac.loca">>, - clean_start = true, - keepalive = 60 - }), + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V3, + proto_name = <<"MQIsdp">>, + clientid = <<"mosqpub/10451-iMac.loca">>, + clean_start = true, + keepalive = 60 + } + ), {ok, Packet, <<>>, PState} = emqx_frame:parse(Bin), ?assertMatch({none, #{version := ?MQTT_PROTO_V3}}, PState). t_serialize_parse_v4_connect(_) -> - Bin = <<16,35,0,4,77,81,84,84,4,2,0,60,0,23,109,111,115,113,112,117, - 98,47,49,48,52,53,49,45,105,77,97,99,46,108,111,99,97>>, + Bin = + <<16, 35, 0, 4, 77, 81, 84, 84, 4, 2, 0, 60, 0, 23, 109, 111, 115, 113, 112, 117, 98, 47, + 49, 48, 52, 53, 49, 45, 105, 77, 97, 99, 46, 108, 111, 99, 97>>, Packet = ?CONNECT_PACKET( - #mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V4, - proto_name = <<"MQTT">>, - clientid = <<"mosqpub/10451-iMac.loca">>, - clean_start = true, - keepalive = 60 - }), + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V4, + proto_name = <<"MQTT">>, + clientid = <<"mosqpub/10451-iMac.loca">>, + clean_start = true, + keepalive = 60 + } + ), ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch({ok, Packet, <<>>, _}, emqx_frame:parse(Bin)). t_serialize_parse_v5_connect(_) -> - Props = #{'Session-Expiry-Interval' => 60, - 'Receive-Maximum' => 100, - 'Maximum-QoS' => ?QOS_2, - 'Retain-Available' => 1, - 'Maximum-Packet-Size' => 1024, - 'Topic-Alias-Maximum' => 10, - 'Request-Response-Information' => 1, - 'Request-Problem-Information' => 1, - 'Authentication-Method' => <<"oauth2">>, - 'Authentication-Data' => <<"33kx93k">> - }, + Props = #{ + 'Session-Expiry-Interval' => 60, + 'Receive-Maximum' => 100, + 'Maximum-QoS' => ?QOS_2, + 'Retain-Available' => 1, + 'Maximum-Packet-Size' => 1024, + 'Topic-Alias-Maximum' => 10, + 'Request-Response-Information' => 1, + 'Request-Problem-Information' => 1, + 'Authentication-Method' => <<"oauth2">>, + 'Authentication-Data' => <<"33kx93k">> + }, - WillProps = #{'Will-Delay-Interval' => 60, - 'Payload-Format-Indicator' => 1, - 'Message-Expiry-Interval' => 60, - 'Content-Type' => <<"text/json">>, - 'Response-Topic' => <<"topic">>, - 'Correlation-Data' => <<"correlateid">>, - 'User-Property' => [{<<"k">>, <<"v">>}] - }, + WillProps = #{ + 'Will-Delay-Interval' => 60, + 'Payload-Format-Indicator' => 1, + 'Message-Expiry-Interval' => 60, + 'Content-Type' => <<"text/json">>, + 'Response-Topic' => <<"topic">>, + 'Correlation-Data' => <<"correlateid">>, + 'User-Property' => [{<<"k">>, <<"v">>}] + }, Packet = ?CONNECT_PACKET( - #mqtt_packet_connect{proto_name = <<"MQTT">>, - proto_ver = ?MQTT_PROTO_V5, - is_bridge = false, - clean_start = true, - clientid = <<>>, - will_flag = true, - will_qos = ?QOS_1, - will_retain = false, - keepalive = 60, - properties = Props, - will_props = WillProps, - will_topic = <<"topic">>, - will_payload = <<>>, - username = <<"device:1">>, - password = <<"passwd">> - }), + #mqtt_packet_connect{ + proto_name = <<"MQTT">>, + proto_ver = ?MQTT_PROTO_V5, + is_bridge = false, + clean_start = true, + clientid = <<>>, + will_flag = true, + will_qos = ?QOS_1, + will_retain = false, + keepalive = 60, + properties = Props, + will_props = WillProps, + will_topic = <<"topic">>, + will_payload = <<>>, + username = <<"device:1">>, + password = <<"passwd">> + } + ), ?assertEqual(Packet, parse_serialize(Packet)). t_serialize_parse_connect_without_clientid(_) -> - Bin = <<16,12,0,4,77,81,84,84,4,2,0,60,0,0>>, - Packet = ?CONNECT_PACKET(#mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V4, - proto_name = <<"MQTT">>, - clientid = <<>>, - clean_start = true, - keepalive = 60 - }), + Bin = <<16, 12, 0, 4, 77, 81, 84, 84, 4, 2, 0, 60, 0, 0>>, + Packet = ?CONNECT_PACKET(#mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V4, + proto_name = <<"MQTT">>, + clientid = <<>>, + clean_start = true, + keepalive = 60 + }), ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch({ok, Packet, <<>>, _}, emqx_frame:parse(Bin)). t_serialize_parse_connect_with_will(_) -> - Bin = <<16,67,0,6,77,81,73,115,100,112,3,206,0,60,0,23,109,111,115,113,112, - 117,98,47,49,48,52,53,50,45,105,77,97,99,46,108,111,99,97,0,5,47,119, - 105,108,108,0,7,119,105,108,108,109,115,103,0,4,116,101,115,116,0,6, - 112,117,98,108,105,99>>, - Packet = #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}, - variable = #mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V3, - proto_name = <<"MQIsdp">>, - clientid = <<"mosqpub/10452-iMac.loca">>, - clean_start = true, - keepalive = 60, - will_retain = false, - will_qos = ?QOS_1, - will_flag = true, - will_topic = <<"/will">>, - will_payload = <<"willmsg">>, - username = <<"test">>, - password = <<"public">> - }}, + Bin = + <<16, 67, 0, 6, 77, 81, 73, 115, 100, 112, 3, 206, 0, 60, 0, 23, 109, 111, 115, 113, 112, + 117, 98, 47, 49, 48, 52, 53, 50, 45, 105, 77, 97, 99, 46, 108, 111, 99, 97, 0, 5, 47, + 119, 105, 108, 108, 0, 7, 119, 105, 108, 108, 109, 115, 103, 0, 4, 116, 101, 115, 116, + 0, 6, 112, 117, 98, 108, 105, 99>>, + Packet = #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNECT}, + variable = #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V3, + proto_name = <<"MQIsdp">>, + clientid = <<"mosqpub/10452-iMac.loca">>, + clean_start = true, + keepalive = 60, + will_retain = false, + will_qos = ?QOS_1, + will_flag = true, + will_topic = <<"/will">>, + will_payload = <<"willmsg">>, + username = <<"test">>, + password = <<"public">> + } + }, ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch({ok, Packet, <<>>, _}, emqx_frame:parse(Bin)). t_serialize_parse_bridge_connect(_) -> - Bin = <<16,86,0,6,77,81,73,115,100,112,131,44,0,60,0,19,67,95,48,48,58,48,67, - 58,50,57,58,50,66,58,55,55,58,53,50,0,48,36,83,89,83,47,98,114,111,107, - 101,114,47,99,111,110,110,101,99,116,105,111,110,47,67,95,48,48,58,48, - 67,58,50,57,58,50,66,58,55,55,58,53,50,47,115,116,97,116,101,0,1,48>>, + Bin = + <<16, 86, 0, 6, 77, 81, 73, 115, 100, 112, 131, 44, 0, 60, 0, 19, 67, 95, 48, 48, 58, 48, + 67, 58, 50, 57, 58, 50, 66, 58, 55, 55, 58, 53, 50, 0, 48, 36, 83, 89, 83, 47, 98, 114, + 111, 107, 101, 114, 47, 99, 111, 110, 110, 101, 99, 116, 105, 111, 110, 47, 67, 95, 48, + 48, 58, 48, 67, 58, 50, 57, 58, 50, 66, 58, 55, 55, 58, 53, 50, 47, 115, 116, 97, 116, + 101, 0, 1, 48>>, Topic = <<"$SYS/broker/connection/C_00:0C:29:2B:77:52/state">>, - Packet = #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}, - variable = #mqtt_packet_connect{clientid = <<"C_00:0C:29:2B:77:52">>, - proto_ver = 16#03, - proto_name = <<"MQIsdp">>, - is_bridge = true, - will_retain = true, - will_qos = ?QOS_1, - will_flag = true, - clean_start = false, - keepalive = 60, - will_topic = Topic, - will_payload = <<"0">> - }}, + Packet = #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNECT}, + variable = #mqtt_packet_connect{ + clientid = <<"C_00:0C:29:2B:77:52">>, + proto_ver = 16#03, + proto_name = <<"MQIsdp">>, + is_bridge = true, + will_retain = true, + will_qos = ?QOS_1, + will_flag = true, + clean_start = false, + keepalive = 60, + will_topic = Topic, + will_payload = <<"0">> + } + }, ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch({ok, Packet, <<>>, _}, emqx_frame:parse(Bin)), Packet1 = ?CONNECT_PACKET(#mqtt_packet_connect{is_bridge = true}), @@ -284,101 +302,126 @@ t_serialize_parse_bridge_connect(_) -> t_serialize_parse_connack(_) -> Packet = ?CONNACK_PACKET(?RC_SUCCESS), - ?assertEqual(<<32,2,0,0>>, serialize_to_binary(Packet)), + ?assertEqual(<<32, 2, 0, 0>>, serialize_to_binary(Packet)), ?assertEqual(Packet, parse_serialize(Packet)). t_serialize_parse_connack_v5(_) -> - Props = #{'Session-Expiry-Interval' => 60, - 'Receive-Maximum' => 100, - 'Maximum-QoS' => ?QOS_2, - 'Retain-Available' => 1, - 'Maximum-Packet-Size' => 1024, - 'Assigned-Client-Identifier' => <<"id">>, - 'Topic-Alias-Maximum' => 10, - 'Reason-String' => <<>>, - 'Wildcard-Subscription-Available' => 1, - 'Subscription-Identifier-Available' => 1, - 'Shared-Subscription-Available' => 1, - 'Server-Keep-Alive' => 60, - 'Response-Information' => <<"response">>, - 'Server-Reference' => <<"192.168.1.10">>, - 'Authentication-Method' => <<"oauth2">>, - 'Authentication-Data' => <<"33kx93k">> - }, + Props = #{ + 'Session-Expiry-Interval' => 60, + 'Receive-Maximum' => 100, + 'Maximum-QoS' => ?QOS_2, + 'Retain-Available' => 1, + 'Maximum-Packet-Size' => 1024, + 'Assigned-Client-Identifier' => <<"id">>, + 'Topic-Alias-Maximum' => 10, + 'Reason-String' => <<>>, + 'Wildcard-Subscription-Available' => 1, + 'Subscription-Identifier-Available' => 1, + 'Shared-Subscription-Available' => 1, + 'Server-Keep-Alive' => 60, + 'Response-Information' => <<"response">>, + 'Server-Reference' => <<"192.168.1.10">>, + 'Authentication-Method' => <<"oauth2">>, + 'Authentication-Data' => <<"33kx93k">> + }, Packet = ?CONNACK_PACKET(?RC_SUCCESS, 0, Props), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})). t_parse_sticky_frames(_) -> Payload = lists:duplicate(10, 0), - P = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - dup = false, - qos = ?QOS_0, - retain = false}, - variable = #mqtt_packet_publish{topic_name = <<"a/b">>, - packet_id = undefined}, - payload = iolist_to_binary(Payload) - }, + P = #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + dup = false, + qos = ?QOS_0, + retain = false + }, + variable = #mqtt_packet_publish{ + topic_name = <<"a/b">>, + packet_id = undefined + }, + payload = iolist_to_binary(Payload) + }, Bin = serialize_to_binary(P), Size = size(Bin), - <> = Bin, - {more, PState1} = emqx_frame:parse(H), %% needs 2 more bytes + <> = Bin, + %% needs 2 more bytes + {more, PState1} = emqx_frame:parse(H), %% feed 3 bytes as if the next 1 byte belongs to the next packet. {ok, _, <<42>>, PState2} = emqx_frame:parse(iolist_to_binary([TailTwoBytes, 42]), PState1), ?assertMatch({none, _}, PState2). t_serialize_parse_qos0_publish(_) -> - Bin = <<48,14,0,7,120,120,120,47,121,121,121,104,101,108,108,111>>, - Packet = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - dup = false, - qos = ?QOS_0, - retain = false}, - variable = #mqtt_packet_publish{topic_name = <<"xxx/yyy">>, - packet_id = undefined}, - payload = <<"hello">>}, + Bin = <<48, 14, 0, 7, 120, 120, 120, 47, 121, 121, 121, 104, 101, 108, 108, 111>>, + Packet = #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + dup = false, + qos = ?QOS_0, + retain = false + }, + variable = #mqtt_packet_publish{ + topic_name = <<"xxx/yyy">>, + packet_id = undefined + }, + payload = <<"hello">> + }, ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch(Packet, parse_to_packet(Bin, #{strict_mode => true})). t_serialize_parse_qos1_publish(_) -> - Bin = <<50,13,0,5,97,47,98,47,99,0,1,104,97,104,97>>, - Packet = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - dup = false, - qos = ?QOS_1, - retain = false}, - variable = #mqtt_packet_publish{topic_name = <<"a/b/c">>, - packet_id = 1}, - payload = <<"haha">>}, + Bin = <<50, 13, 0, 5, 97, 47, 98, 47, 99, 0, 1, 104, 97, 104, 97>>, + Packet = #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + dup = false, + qos = ?QOS_1, + retain = false + }, + variable = #mqtt_packet_publish{ + topic_name = <<"a/b/c">>, + packet_id = 1 + }, + payload = <<"haha">> + }, ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch(Packet, parse_to_packet(Bin, #{strict_mode => true})), %% strict_mode = true - ?ASSERT_FRAME_THROW(bad_packet_id, parse_serialize(?PUBLISH_PACKET(?QOS_1, <<"Topic">>, 0, <<>>))), + ?ASSERT_FRAME_THROW( + bad_packet_id, parse_serialize(?PUBLISH_PACKET(?QOS_1, <<"Topic">>, 0, <<>>)) + ), %% strict_mode = false _ = parse_serialize(?PUBLISH_PACKET(?QOS_1, <<"Topic">>, 0, <<>>), #{strict_mode => false}). t_serialize_parse_qos2_publish(_) -> Packet = ?PUBLISH_PACKET(?QOS_2, <<"Topic">>, 1, <<>>), - Bin = <<52,9,0,5,84,111,112,105,99,0,1>>, + Bin = <<52, 9, 0, 5, 84, 111, 112, 105, 99, 0, 1>>, ?assertEqual(Packet, parse_serialize(Packet)), ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch(Packet, parse_to_packet(Bin, #{strict_mode => true})), %% strict_mode = true - ?ASSERT_FRAME_THROW(bad_packet_id, parse_serialize(?PUBLISH_PACKET(?QOS_2, <<"Topic">>, 0, <<>>))), + ?ASSERT_FRAME_THROW( + bad_packet_id, parse_serialize(?PUBLISH_PACKET(?QOS_2, <<"Topic">>, 0, <<>>)) + ), %% strict_mode = false _ = parse_serialize(?PUBLISH_PACKET(?QOS_2, <<"Topic">>, 0, <<>>), #{strict_mode => false}). t_serialize_parse_publish_v5(_) -> - Props = #{'Payload-Format-Indicator' => 1, - 'Message-Expiry-Interval' => 60, - 'Topic-Alias' => 16#AB, - 'Response-Topic' => <<"reply">>, - 'Correlation-Data' => <<"correlation-id">>, - 'Subscription-Identifier' => 1, - 'Content-Type' => <<"text/json">>}, + Props = #{ + 'Payload-Format-Indicator' => 1, + 'Message-Expiry-Interval' => 60, + 'Topic-Alias' => 16#AB, + 'Response-Topic' => <<"reply">>, + 'Correlation-Data' => <<"correlation-id">>, + 'Subscription-Identifier' => 1, + 'Content-Type' => <<"text/json">> + }, Packet = ?PUBLISH_PACKET(?QOS_1, <<"$share/group/topic">>, 1, Props, <<"payload">>), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})). t_serialize_parse_puback(_) -> Packet = ?PUBACK_PACKET(1), - ?assertEqual(<<64,2,0,1>>, serialize_to_binary(Packet)), + ?assertEqual(<<64, 2, 0, 1>>, serialize_to_binary(Packet)), ?assertEqual(Packet, parse_serialize(Packet)), %% strict_mode = true ?ASSERT_FRAME_THROW(bad_packet_id, parse_serialize(?PUBACK_PACKET(0))), @@ -386,7 +429,7 @@ t_serialize_parse_puback(_) -> ?PUBACK_PACKET(0) = parse_serialize(?PUBACK_PACKET(0), #{strict_mode => false}). t_serialize_parse_puback_v3_4(_) -> - Bin = <<64,2,0,1>>, + Bin = <<64, 2, 0, 1>>, Packet = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK}, variable = 1}, ?assertEqual(Bin, serialize_to_binary(Packet, ?MQTT_PROTO_V3)), ?assertEqual(Bin, serialize_to_binary(Packet, ?MQTT_PROTO_V4)), @@ -399,7 +442,7 @@ t_serialize_parse_puback_v5(_) -> t_serialize_parse_pubrec(_) -> Packet = ?PUBREC_PACKET(1), - ?assertEqual(<<5:4,0:4,2,0,1>>, serialize_to_binary(Packet)), + ?assertEqual(<<5:4, 0:4, 2, 0, 1>>, serialize_to_binary(Packet)), ?assertEqual(Packet, parse_serialize(Packet)), %% strict_mode = true ?ASSERT_FRAME_THROW(bad_packet_id, parse_serialize(?PUBREC_PACKET(0))), @@ -413,10 +456,10 @@ t_serialize_parse_pubrec_v5(_) -> t_serialize_parse_pubrel(_) -> Packet = ?PUBREL_PACKET(1), Bin = serialize_to_binary(Packet), - ?assertEqual(<<6:4,2:4,2,0,1>>, Bin), + ?assertEqual(<<6:4, 2:4, 2, 0, 1>>, Bin), ?assertEqual(Packet, parse_serialize(Packet)), %% PUBREL with bad qos 0 - Bin0 = <<6:4,0:4,2,0,1>>, + Bin0 = <<6:4, 0:4, 2, 0, 1>>, ?assertMatch(Packet, parse_to_packet(Bin0, #{strict_mode => false})), ?ASSERT_FRAME_THROW(bad_frame_header, parse_to_packet(Bin0, #{strict_mode => true})), %% strict_mode = false @@ -431,7 +474,7 @@ t_serialize_parse_pubrel_v5(_) -> t_serialize_parse_pubcomp(_) -> Packet = ?PUBCOMP_PACKET(1), Bin = serialize_to_binary(Packet), - ?assertEqual(<<7:4,0:4,2,0,1>>, Bin), + ?assertEqual(<<7:4, 0:4, 2, 0, 1>>, Bin), ?assertEqual(Packet, parse_serialize(Packet)), %% strict_mode = false ?PUBCOMP_PACKET(0) = parse_serialize(?PUBCOMP_PACKET(0), #{strict_mode => false}), @@ -444,14 +487,14 @@ t_serialize_parse_pubcomp_v5(_) -> t_serialize_parse_subscribe(_) -> %% SUBSCRIBE(Q1, R0, D0, PacketId=2, TopicTable=[{<<"TopicA">>,2}]) - Bin = <>, - TopicOpts = #{nl => 0 , rap => 0, rh => 0, qos => 2}, + Bin = <>, + TopicOpts = #{nl => 0, rap => 0, rh => 0, qos => 2}, TopicFilters = [{<<"TopicA">>, TopicOpts}], Packet = ?SUBSCRIBE_PACKET(2, TopicFilters), ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch(Packet, parse_to_packet(Bin, #{strict_mode => true})), %% SUBSCRIBE with bad qos 0 - Bin0 = <>, + Bin0 = <>, ?assertMatch(Packet, parse_to_packet(Bin0, #{strict_mode => false})), %% strict_mode = false _ = parse_to_packet(Bin0, #{strict_mode => false}), @@ -460,11 +503,15 @@ t_serialize_parse_subscribe(_) -> _ = parse_serialize(?SUBSCRIBE_PACKET(0, TopicFilters), #{strict_mode => false}), %% strict_mode = true ?ASSERT_FRAME_THROW(bad_packet_id, parse_serialize(?SUBSCRIBE_PACKET(0, TopicFilters))), - ?ASSERT_FRAME_THROW(bad_subqos, parse_serialize(?SUBSCRIBE_PACKET(1, [{<<"t">>, #{qos => 3}}]))). + ?ASSERT_FRAME_THROW( + bad_subqos, parse_serialize(?SUBSCRIBE_PACKET(1, [{<<"t">>, #{qos => 3}}])) + ). t_serialize_parse_subscribe_v5(_) -> - TopicFilters = [{<<"TopicQos0">>, #{rh => 1, qos => ?QOS_2, rap => 0, nl => 0}}, - {<<"TopicQos1">>, #{rh => 1, qos => ?QOS_2, rap => 0, nl => 0}}], + TopicFilters = [ + {<<"TopicQos0">>, #{rh => 1, qos => ?QOS_2, rap => 0, nl => 0}}, + {<<"TopicQos1">>, #{rh => 1, qos => ?QOS_2, rap => 0, nl => 0}} + ], Packet = ?SUBSCRIBE_PACKET(3, #{'Subscription-Identifier' => 16#FFFFFFF}, TopicFilters), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})). @@ -477,20 +524,25 @@ t_serialize_parse_suback(_) -> ?ASSERT_FRAME_THROW(bad_packet_id, parse_serialize(?SUBACK_PACKET(0, [?QOS_0]))). t_serialize_parse_suback_v5(_) -> - Packet = ?SUBACK_PACKET(1, #{'Reason-String' => <<"success">>, - 'User-Property' => [{<<"key">>, <<"value">>}]}, - [?QOS_0, ?QOS_1, 128]), + Packet = ?SUBACK_PACKET( + 1, + #{ + 'Reason-String' => <<"success">>, + 'User-Property' => [{<<"key">>, <<"value">>}] + }, + [?QOS_0, ?QOS_1, 128] + ), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})). t_serialize_parse_unsubscribe(_) -> %% UNSUBSCRIBE(Q1, R1, D0, PacketId=2, TopicTable=[<<"TopicA">>]) - Bin = <>, + Bin = <>, Packet = ?UNSUBSCRIBE_PACKET(2, [<<"TopicA">>]), ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch(Packet, parse_to_packet(Bin, #{strict_mode => true})), %% UNSUBSCRIBE with bad qos %% UNSUBSCRIBE(Q1, R0, D0, PacketId=2, TopicTable=[<<"TopicA">>]) - Bin0 = <>, + Bin0 = <>, ?assertMatch(Packet, parse_to_packet(Bin0, #{strict_mode => false})), ?ASSERT_FRAME_THROW(bad_frame_header, parse_to_packet(Bin0, #{strict_mode => true})), %% strict_mode = false @@ -508,9 +560,14 @@ t_serialize_parse_unsuback(_) -> ?assertEqual(Packet, parse_serialize(Packet)). t_serialize_parse_unsuback_v5(_) -> - Packet = ?UNSUBACK_PACKET(10, #{'Reason-String' => <<"Not authorized">>, - 'User-Property' => [{<<"key">>, <<"val">>}]}, - [16#87, 16#87, 16#87]), + Packet = ?UNSUBACK_PACKET( + 10, + #{ + 'Reason-String' => <<"Not authorized">>, + 'User-Property' => [{<<"key">>, <<"val">>}] + }, + [16#87, 16#87, 16#87] + ), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})). t_serialize_parse_pingreq(_) -> @@ -530,24 +587,37 @@ t_serialize_parse_disconnect(_) -> ?assertEqual(Packet, parse_serialize(Packet)). t_serialize_parse_disconnect_v5(_) -> - Packet = ?DISCONNECT_PACKET(?RC_SUCCESS, - #{'Session-Expiry-Interval' => 60, - 'Reason-String' => <<"server_moved">>, - 'Server-Reference' => <<"192.168.1.10">> - }), + Packet = ?DISCONNECT_PACKET( + ?RC_SUCCESS, + #{ + 'Session-Expiry-Interval' => 60, + 'Reason-String' => <<"server_moved">>, + 'Server-Reference' => <<"192.168.1.10">> + } + ), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})). t_serialize_parse_auth_v5(_) -> - Packet = ?AUTH_PACKET(?RC_SUCCESS, - #{'Authentication-Method' => <<"oauth2">>, - 'Authentication-Data' => <<"3zekkd">>, - 'Reason-String' => <<"success">>, - 'User-Property' => [{<<"key1">>, <<"val1">>}, - {<<"key2">>, <<"val2">>}] - }), + Packet = ?AUTH_PACKET( + ?RC_SUCCESS, + #{ + 'Authentication-Method' => <<"oauth2">>, + 'Authentication-Data' => <<"3zekkd">>, + 'Reason-String' => <<"success">>, + 'User-Property' => [ + {<<"key1">>, <<"val1">>}, + {<<"key2">>, <<"val2">>} + ] + } + ), ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5})), - ?assertEqual(Packet, parse_serialize(Packet, #{version => ?MQTT_PROTO_V5, - strict_mode => true})). + ?assertEqual( + Packet, + parse_serialize(Packet, #{ + version => ?MQTT_PROTO_V5, + strict_mode => true + }) + ). parse_serialize(Packet) -> parse_serialize(Packet, #{strict_mode => true}). diff --git a/apps/emqx/test/emqx_gc_SUITE.erl b/apps/emqx/test/emqx_gc_SUITE.erl index 123bcc44c..f853476fe 100644 --- a/apps/emqx/test/emqx_gc_SUITE.erl +++ b/apps/emqx/test/emqx_gc_SUITE.erl @@ -57,4 +57,3 @@ t_reset(_) -> ?assertEqual(GC, emqx_gc:reset(GC1)), DisabledGC = emqx_gc:init(#{count => 0, bytes => 0}), ?assertEqual(DisabledGC, emqx_gc:reset(DisabledGC)). - diff --git a/apps/emqx/test/emqx_hooks_SUITE.erl b/apps/emqx/test/emqx_hooks_SUITE.erl index a7b96712f..c65045f12 100644 --- a/apps/emqx/test/emqx_hooks_SUITE.erl +++ b/apps/emqx/test/emqx_hooks_SUITE.erl @@ -46,53 +46,72 @@ end_per_testcase(_) -> % error('TODO'). t_add_hook_order(_) -> - ?assert(proper:quickcheck(add_hook_order_prop(), - [{on_output, fun ct:print/2}, - {numtests, 1000}])). + ?assert( + proper:quickcheck( + add_hook_order_prop(), + [ + {on_output, fun ct:print/2}, + {numtests, 1000} + ] + ) + ). add_hook_order_prop() -> %% Note: order is inversed, since higher prio hooks run first: Comparator = fun({Prio1, M1, F1}, {Prio2, M2, F2}) -> - Prio1 > Prio2 orelse - (Prio1 =:= Prio2 andalso {M1, F1} =< {M2, F2}) - end, + Prio1 > Prio2 orelse + (Prio1 =:= Prio2 andalso {M1, F1} =< {M2, F2}) + end, ?FORALL( - Hooks, hooks(), - try - {ok, _} = emqx_hooks:start_link(), - [ok = emqx:hook(prop_hook, {M, F, []}, Prio) || {Prio, M, F} <- Hooks], - Callbacks = emqx_hooks:lookup(prop_hook), - Order = [{Prio, M, F} || {callback, {M, F, _}, _Filter, Prio} <- Callbacks], - ?assertEqual(lists:sort(Comparator, Hooks), - Order), - true - after - emqx_hooks:stop() - end). + Hooks, + hooks(), + try + {ok, _} = emqx_hooks:start_link(), + [ok = emqx:hook(prop_hook, {M, F, []}, Prio) || {Prio, M, F} <- Hooks], + Callbacks = emqx_hooks:lookup(prop_hook), + Order = [{Prio, M, F} || {callback, {M, F, _}, _Filter, Prio} <- Callbacks], + ?assertEqual( + lists:sort(Comparator, Hooks), + Order + ), + true + after + emqx_hooks:stop() + end + ). hooks() -> - ?SUCHTHAT(L0, list({range(-1, 5), atom(), atom()}), - begin - %% Duplicate callbacks are ignored, so check that - %% all callbacks are unique: - L = [{M, F} || {_Prio, M, F} <- L0], - length(lists:usort(L)) =:= length(L0) - end). + ?SUCHTHAT( + L0, + list({range(-1, 5), atom(), atom()}), + begin + %% Duplicate callbacks are ignored, so check that + %% all callbacks are unique: + L = [{M, F} || {_Prio, M, F} <- L0], + length(lists:usort(L)) =:= length(L0) + end + ). t_add_put_del_hook(_) -> {ok, _} = emqx_hooks:start_link(), ok = emqx:hook(test_hook, {?MODULE, hook_fun1, []}), ok = emqx:hook(test_hook, {?MODULE, hook_fun2, []}), - ?assertEqual({error, already_exists}, - emqx:hook(test_hook, {?MODULE, hook_fun2, []})), - Callbacks0 = [{callback, {?MODULE, hook_fun1, []}, undefined, 0}, - {callback, {?MODULE, hook_fun2, []}, undefined, 0}], + ?assertEqual( + {error, already_exists}, + emqx:hook(test_hook, {?MODULE, hook_fun2, []}) + ), + Callbacks0 = [ + {callback, {?MODULE, hook_fun1, []}, undefined, 0}, + {callback, {?MODULE, hook_fun2, []}, undefined, 0} + ], ?assertEqual(Callbacks0, emqx_hooks:lookup(test_hook)), ok = emqx_hooks:put(test_hook, {?MODULE, hook_fun1, [test]}), ok = emqx_hooks:put(test_hook, {?MODULE, hook_fun2, [test]}), - Callbacks1 = [{callback, {?MODULE, hook_fun1, [test]}, undefined, 0}, - {callback, {?MODULE, hook_fun2, [test]}, undefined, 0}], + Callbacks1 = [ + {callback, {?MODULE, hook_fun1, [test]}, undefined, 0}, + {callback, {?MODULE, hook_fun2, [test]}, undefined, 0} + ], ?assertEqual(Callbacks1, emqx_hooks:lookup(test_hook)), ok = emqx:unhook(test_hook, {?MODULE, hook_fun1}), @@ -104,20 +123,24 @@ t_add_put_del_hook(_) -> ok = emqx:hook(emqx_hook, {?MODULE, hook_fun2, []}, 2), ok = emqx:hook(emqx_hook, {?MODULE, hook_fun10, []}, 10), ok = emqx:hook(emqx_hook, {?MODULE, hook_fun9, []}, 9), - Callbacks2 = [{callback, {?MODULE, hook_fun10, []}, undefined, 10}, - {callback, {?MODULE, hook_fun9, []}, undefined, 9}, - {callback, {?MODULE, hook_fun8, []}, undefined, 8}, - {callback, {?MODULE, hook_fun2, []}, undefined, 2}], + Callbacks2 = [ + {callback, {?MODULE, hook_fun10, []}, undefined, 10}, + {callback, {?MODULE, hook_fun9, []}, undefined, 9}, + {callback, {?MODULE, hook_fun8, []}, undefined, 8}, + {callback, {?MODULE, hook_fun2, []}, undefined, 2} + ], ?assertEqual(Callbacks2, emqx_hooks:lookup(emqx_hook)), ok = emqx_hooks:put(emqx_hook, {?MODULE, hook_fun8, [test]}, 3), ok = emqx_hooks:put(emqx_hook, {?MODULE, hook_fun2, [test]}, 4), ok = emqx_hooks:put(emqx_hook, {?MODULE, hook_fun10, [test]}, 1), ok = emqx_hooks:put(emqx_hook, {?MODULE, hook_fun9, [test]}, 2), - Callbacks3 = [{callback, {?MODULE, hook_fun2, [test]}, undefined, 4}, - {callback, {?MODULE, hook_fun8, [test]}, undefined, 3}, - {callback, {?MODULE, hook_fun9, [test]}, undefined, 2}, - {callback, {?MODULE, hook_fun10, [test]}, undefined, 1}], + Callbacks3 = [ + {callback, {?MODULE, hook_fun2, [test]}, undefined, 4}, + {callback, {?MODULE, hook_fun8, [test]}, undefined, 3}, + {callback, {?MODULE, hook_fun9, [test]}, undefined, 2}, + {callback, {?MODULE, hook_fun10, [test]}, undefined, 1} + ], ?assertEqual(Callbacks3, emqx_hooks:lookup(emqx_hook)), ok = emqx:unhook(emqx_hook, {?MODULE, hook_fun2, [test]}), @@ -132,12 +155,13 @@ t_run_hooks(_) -> ok = emqx:hook(foldl_hook, {?MODULE, hook_fun3, [init]}), ok = emqx:hook(foldl_hook, {?MODULE, hook_fun4, [init]}), ok = emqx:hook(foldl_hook, {?MODULE, hook_fun5, [init]}), - [r5,r4] = emqx:run_fold_hook(foldl_hook, [arg1, arg2], []), + [r5, r4] = emqx:run_fold_hook(foldl_hook, [arg1, arg2], []), [] = emqx:run_fold_hook(unknown_hook, [], []), ok = emqx:hook(foldl_hook2, {?MODULE, hook_fun9, []}), ok = emqx:hook(foldl_hook2, {?MODULE, hook_fun10, []}), - [r10] = emqx:run_fold_hook(foldl_hook2, [arg], []), %% Note: 10 is _less_ than 9 per lexicographic order + %% Note: 10 is _less_ than 9 per lexicographic order + [r10] = emqx:run_fold_hook(foldl_hook2, [arg], []), ok = emqx:hook(foreach_hook, {?MODULE, hook_fun6, [initArg]}), {error, already_exists} = emqx:hook(foreach_hook, {?MODULE, hook_fun6, [initArg]}), @@ -146,11 +170,17 @@ t_run_hooks(_) -> ok = emqx:run_hook(foreach_hook, [arg]), ok = emqx:hook(foreach_filter1_hook, {?MODULE, hook_fun1, []}, {?MODULE, hook_filter1, []}, 0), - ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg])), %% filter passed - ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg1])), %% filter failed + %% filter passed + ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg])), + %% filter failed + ?assertEqual(ok, emqx:run_hook(foreach_filter1_hook, [arg1])), - ok = emqx:hook(foldl_filter2_hook, {?MODULE, hook_fun2, []}, {?MODULE, hook_filter2, [init_arg]}), - ok = emqx:hook(foldl_filter2_hook, {?MODULE, hook_fun2_1, []}, {?MODULE, hook_filter2_1, [init_arg]}), + ok = emqx:hook( + foldl_filter2_hook, {?MODULE, hook_fun2, []}, {?MODULE, hook_filter2, [init_arg]} + ), + ok = emqx:hook( + foldl_filter2_hook, {?MODULE, hook_fun2_1, []}, {?MODULE, hook_filter2_1, [init_arg]} + ), ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). @@ -176,14 +206,14 @@ hook_fun2(_, Acc) -> {ok, Acc + 1}. hook_fun2_1(_, Acc) -> {ok, Acc + 1}. hook_fun3(arg1, arg2, _Acc, init) -> ok. -hook_fun4(arg1, arg2, Acc, init) -> {ok, [r4 | Acc]}. -hook_fun5(arg1, arg2, Acc, init) -> {ok, [r5 | Acc]}. +hook_fun4(arg1, arg2, Acc, init) -> {ok, [r4 | Acc]}. +hook_fun5(arg1, arg2, Acc, init) -> {ok, [r5 | Acc]}. hook_fun6(arg, initArg) -> ok. hook_fun7(arg, initArg) -> ok. hook_fun8(arg, initArg) -> ok. -hook_fun9(arg, Acc) -> {stop, [r9 | Acc]}. +hook_fun9(arg, Acc) -> {stop, [r9 | Acc]}. hook_fun10(arg, Acc) -> {stop, [r10 | Acc]}. hook_filter1(arg) -> true; @@ -192,6 +222,6 @@ hook_filter1(_) -> false. hook_filter2(arg, _Acc, init_arg) -> true; hook_filter2(_, _Acc, _IntArg) -> false. -hook_filter2_1(arg, _Acc, init_arg) -> true; +hook_filter2_1(arg, _Acc, init_arg) -> true; hook_filter2_1(arg1, _Acc, init_arg) -> true; -hook_filter2_1(_, _Acc, _IntArg) -> false. +hook_filter2_1(_, _Acc, _IntArg) -> false. diff --git a/apps/emqx/test/emqx_inflight_SUITE.erl b/apps/emqx/test/emqx_inflight_SUITE.erl index 69f3dd055..90e2fb398 100644 --- a/apps/emqx/test/emqx_inflight_SUITE.erl +++ b/apps/emqx/test/emqx_inflight_SUITE.erl @@ -35,8 +35,12 @@ t_lookup(_) -> t_insert(_) -> Inflight = emqx_inflight:insert( - b, 2, emqx_inflight:insert( - a, 1, emqx_inflight:new())), + b, + 2, + emqx_inflight:insert( + a, 1, emqx_inflight:new() + ) + ), ?assertEqual(2, emqx_inflight:size(Inflight)), ?assertEqual({value, 1}, emqx_inflight:lookup(a, Inflight)), ?assertEqual({value, 2}, emqx_inflight:lookup(b, Inflight)), @@ -63,17 +67,25 @@ t_delete(_) -> t_values(_) -> Inflight = emqx_inflight:insert( - b, 2, emqx_inflight:insert( - a, 1, emqx_inflight:new())), - ?assertEqual([1,2], emqx_inflight:values(Inflight)), - ?assertEqual([{a,1},{b,2}], emqx_inflight:to_list(Inflight)). + b, + 2, + emqx_inflight:insert( + a, 1, emqx_inflight:new() + ) + ), + ?assertEqual([1, 2], emqx_inflight:values(Inflight)), + ?assertEqual([{a, 1}, {b, 2}], emqx_inflight:to_list(Inflight)). t_is_full(_) -> Inflight = emqx_inflight:insert(k, v, emqx_inflight:new()), ?assertNot(emqx_inflight:is_full(Inflight)), Inflight1 = emqx_inflight:insert( - b, 2, emqx_inflight:insert( - a, 1, emqx_inflight:new(2))), + b, + 2, + emqx_inflight:insert( + a, 1, emqx_inflight:new(2) + ) + ), ?assert(emqx_inflight:is_full(Inflight1)). t_is_empty(_) -> @@ -85,8 +97,12 @@ t_is_empty(_) -> t_window(_) -> ?assertEqual([], emqx_inflight:window(emqx_inflight:new(0))), Inflight = emqx_inflight:insert( - b, 2, emqx_inflight:insert( - a, 1, emqx_inflight:new(2))), + b, + 2, + emqx_inflight:insert( + a, 1, emqx_inflight:new(2) + ) + ), ?assertEqual([a, b], emqx_inflight:window(Inflight)). % t_to_list(_) -> diff --git a/apps/emqx/test/emqx_json_SUITE.erl b/apps/emqx/test/emqx_json_SUITE.erl index f377d4565..8ab803acf 100644 --- a/apps/emqx/test/emqx_json_SUITE.erl +++ b/apps/emqx/test/emqx_json_SUITE.erl @@ -21,11 +21,14 @@ -include_lib("eunit/include/eunit.hrl"). --import(emqx_json, - [ encode/1 - , decode/1 - , decode/2 - ]). +-import( + emqx_json, + [ + encode/1, + decode/1, + decode/2 + ] +). %% copied jiffy/readme %%-------------------------------------------------------------------- @@ -85,17 +88,28 @@ t_decode_encode(_) -> [{<<"foo">>, <<"bar">>}] = decode(encode([{foo, bar}])), [{<<"foo">>, <<"bar">>}] = decode(encode([{<<"foo">>, <<"bar">>}])), [[{<<"foo">>, <<"bar">>}]] = decode(encode([[{<<"foo">>, <<"bar">>}]])), - [[{<<"foo">>, <<"bar">>}, - {<<"a">>, <<"b">>}], - [{<<"x">>, <<"y">>}]] = decode(encode([[{<<"foo">>, <<"bar">>}, - {<<"a">>, <<"b">>}], - [{<<"x">>, <<"y">>}]])), + [ + [ + {<<"foo">>, <<"bar">>}, + {<<"a">>, <<"b">>} + ], + [{<<"x">>, <<"y">>}] + ] = decode( + encode([ + [ + {<<"foo">>, <<"bar">>}, + {<<"a">>, <<"b">>} + ], + [{<<"x">>, <<"y">>}] + ]) + ), #{<<"foo">> := <<"bar">>} = decode(encode(#{<<"foo">> => <<"bar">>}), [return_maps]), JsonText = <<"{\"bool\":true,\"int\":10,\"foo\":\"bar\"}">>, - JsonMaps = #{<<"bool">> => true, - <<"int">> => 10, - <<"foo">> => <<"bar">> - }, + JsonMaps = #{ + <<"bool">> => true, + <<"int">> => 10, + <<"foo">> => <<"bar">> + }, ?assertEqual(JsonText, encode({decode(JsonText)})), ?assertEqual(JsonMaps, decode(JsonText, [return_maps])). @@ -120,5 +134,5 @@ safe_encode_decode(Term) -> {ok, Json} = emqx_json:safe_encode(Term), case emqx_json:safe_decode(Json) of {ok, {NTerm}} -> NTerm; - {ok, NTerm} -> NTerm + {ok, NTerm} -> NTerm end. diff --git a/apps/emqx/test/emqx_keepalive_SUITE.erl b/apps/emqx/test/emqx_keepalive_SUITE.erl index 7f725e61b..93f68bfca 100644 --- a/apps/emqx/test/emqx_keepalive_SUITE.erl +++ b/apps/emqx/test/emqx_keepalive_SUITE.erl @@ -29,9 +29,14 @@ t_check(_) -> ?assertEqual(0, emqx_keepalive:info(statval, Keepalive)), ?assertEqual(0, emqx_keepalive:info(repeat, Keepalive)), Info = emqx_keepalive:info(Keepalive), - ?assertEqual(#{interval => 60, - statval => 0, - repeat => 0}, Info), + ?assertEqual( + #{ + interval => 60, + statval => 0, + repeat => 0 + }, + Info + ), {ok, Keepalive1} = emqx_keepalive:check(1, Keepalive), ?assertEqual(1, emqx_keepalive:info(statval, Keepalive1)), ?assertEqual(0, emqx_keepalive:info(repeat, Keepalive1)), diff --git a/apps/emqx/test/emqx_limiter_SUITE.erl b/apps/emqx/test/emqx_limiter_SUITE.erl index 9d8c0b8fd..423a3d4ed 100644 --- a/apps/emqx/test/emqx_limiter_SUITE.erl +++ b/apps/emqx/test/emqx_limiter_SUITE.erl @@ -39,16 +39,22 @@ end_per_testcase(_, _) -> %%-------------------------------------------------------------------- t_init(_) -> - Cap1 = 1000, Intv1 = 10, - Cap2 = 2000, Intv2 = 15, + Cap1 = 1000, + Intv1 = 10, + Cap2 = 2000, + Intv2 = 15, undefined = emqx_limiter:init(external, undefined, undefined, []), - #{conn_bytes_in := #{capacity := Cap2, interval := Intv2, tokens := Cap2}, - conn_messages_in := #{capacity := Cap1, interval := Intv1, tokens := Cap1 }} = + #{ + conn_bytes_in := #{capacity := Cap2, interval := Intv2, tokens := Cap2}, + conn_messages_in := #{capacity := Cap1, interval := Intv1, tokens := Cap1} + } = emqx_limiter:info( - emqx_limiter:init(external, {Cap1, Intv1}, {Cap2, Intv2}, [])), - #{conn_bytes_in := #{capacity := Cap2, interval := Intv2, tokens := Cap2 }} = + emqx_limiter:init(external, {Cap1, Intv1}, {Cap2, Intv2}, []) + ), + #{conn_bytes_in := #{capacity := Cap2, interval := Intv2, tokens := Cap2}} = emqx_limiter:info( - emqx_limiter:init(external, undefined, {Cap1, Intv1}, [{conn_bytes_in, {Cap2, Intv2}}])). + emqx_limiter:init(external, undefined, {Cap1, Intv1}, [{conn_bytes_in, {Cap2, Intv2}}]) + ). t_check_conn(_) -> Limiter = emqx_limiter:init(external, [{conn_bytes_in, {100, 1}}]), @@ -75,4 +81,3 @@ t_check_overall(_) -> %% XXX: P = 10000/r = 10000/100 * 1000 = 100s ? {pause, _, Limiter4} = emqx_limiter:check(#{cnt => 10000, oct => 0}, Limiter3), #{overall_messages_routing := #{tokens := 0}} = emqx_limiter:info(Limiter4). - diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index d033a9b6f..0713bf712 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -40,45 +40,58 @@ end_per_suite(_Config) -> application:stop(esockd), application:stop(cowboy). -init_per_testcase(Case, Config) - when Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp -> +init_per_testcase(Case, Config) when + Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp +-> {ok, _} = emqx_config_handler:start_link(), PrevListeners = emqx_config:get([listeners, tcp], #{}), PrevRateLimit = emqx_config:get([rate_limit], #{}), emqx_config:put( - [listeners, tcp], - #{listener_test => #{bind => {"127.0.0.1", 9999}, - max_connections => 4321, - limiter => #{} - } - }), + [listeners, tcp], + #{ + listener_test => #{ + bind => {"127.0.0.1", 9999}, + max_connections => 4321, + limiter => #{} + } + } + ), emqx_config:put([rate_limit], #{max_conn_rate => 1000}), ok = emqx_listeners:start(), - [ {prev_listener_conf, PrevListeners} - , {prev_rate_limit_conf, PrevRateLimit} - | Config]; + [ + {prev_listener_conf, PrevListeners}, + {prev_rate_limit_conf, PrevRateLimit} + | Config + ]; init_per_testcase(t_wss_conn, Config) -> {ok, _} = emqx_config_handler:start_link(), PrevListeners = emqx_config:get([listeners, wss], #{}), emqx_config:put( - [listeners, wss], - #{listener_test => #{bind => {{127,0,0,1}, 9998}, - limiter => #{}, - ssl => #{cacertfile => ?CERTS_PATH("cacert.pem"), - certfile => ?CERTS_PATH("cert.pem"), - keyfile => ?CERTS_PATH("key.pem") - } - } - }), + [listeners, wss], + #{ + listener_test => #{ + bind => {{127, 0, 0, 1}, 9998}, + limiter => #{}, + ssl => #{ + cacertfile => ?CERTS_PATH("cacert.pem"), + certfile => ?CERTS_PATH("cert.pem"), + keyfile => ?CERTS_PATH("key.pem") + } + } + } + ), ok = emqx_listeners:start(), - [ {prev_listener_conf, PrevListeners} - | Config]; + [ + {prev_listener_conf, PrevListeners} + | Config + ]; init_per_testcase(_, Config) -> {ok, _} = emqx_config_handler:start_link(), Config. -end_per_testcase(Case, Config) - when Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp -> +end_per_testcase(Case, Config) when + Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp +-> PrevListener = ?config(prev_listener_conf, Config), PrevRateLimit = ?config(prev_rate_limit_conf, Config), emqx_config:put([listeners, tcp], PrevListener), @@ -98,7 +111,7 @@ end_per_testcase(_, _Config) -> t_start_stop_listeners(_) -> ok = emqx_listeners:start(), - ?assertException(error, _, emqx_listeners:start_listener({ws,{"127.0.0.1", 8083}, []})), + ?assertException(error, _, emqx_listeners:start_listener({ws, {"127.0.0.1", 8083}, []})), ok = emqx_listeners:stop(). t_restart_listeners(_) -> @@ -110,10 +123,10 @@ t_restart_listeners(_) -> t_max_conns_tcp(_) -> %% Note: Using a string representation for the bind address like %% "127.0.0.1" does not work - ?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127,0,0,1}, 9999})). + ?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). t_current_conns_tcp(_) -> - ?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127,0,0,1}, 9999})). + ?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). t_wss_conn(_) -> {ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), @@ -130,9 +143,10 @@ render_config_file() -> NewName. mustache_vars() -> - [{platform_data_dir, local_path(["data"])}, - {platform_etc_dir, local_path(["etc"])}, - {platform_log_dir, local_path(["log"])} + [ + {platform_data_dir, local_path(["data"])}, + {platform_etc_dir, local_path(["etc"])}, + {platform_log_dir, local_path(["log"])} ]. generate_config() -> @@ -141,11 +155,15 @@ generate_config() -> hocon_tconf:generate(emqx_schema, Conf). set_app_env({App, Lists}) -> - lists:foreach(fun({authz_file, _Var}) -> - application:set_env(App, authz_file, local_path(["etc", "authz.conf"])); - ({Par, Var}) -> - application:set_env(App, Par, Var) - end, Lists). + lists:foreach( + fun + ({authz_file, _Var}) -> + application:set_env(App, authz_file, local_path(["etc", "authz.conf"])); + ({Par, Var}) -> + application:set_env(App, Par, Var) + end, + Lists + ). local_path(Components, Module) -> filename:join([get_base_dir(Module) | Components]). diff --git a/apps/emqx/test/emqx_logger_SUITE.erl b/apps/emqx/test/emqx_logger_SUITE.erl index 85afcb655..e7f6c08bc 100644 --- a/apps/emqx/test/emqx_logger_SUITE.erl +++ b/apps/emqx/test/emqx_logger_SUITE.erl @@ -66,20 +66,29 @@ t_primary_log_level(_) -> ?assertEqual(debug, ?LOGGER:get_primary_log_level()). t_get_log_handlers(_) -> - ok = logger:add_handler(logger_std_h_for_test, logger_std_h, #{config => #{type => file, file => "logger_std_h_for_test"}}), - ok = logger:add_handler(logger_disk_log_h_for_test, logger_disk_log_h, #{config => #{file => "logger_disk_log_h_for_test"}}), - ?assertMatch([_|_], ?LOGGER:get_log_handlers()). + ok = logger:add_handler(logger_std_h_for_test, logger_std_h, #{ + config => #{type => file, file => "logger_std_h_for_test"} + }), + ok = logger:add_handler(logger_disk_log_h_for_test, logger_disk_log_h, #{ + config => #{file => "logger_disk_log_h_for_test"} + }), + ?assertMatch([_ | _], ?LOGGER:get_log_handlers()). t_get_log_handler(_) -> - [?assertMatch(#{id := Id}, ?LOGGER:get_log_handler(Id)) - || #{id := Id} <- ?LOGGER:get_log_handlers()]. + [ + ?assertMatch(#{id := Id}, ?LOGGER:get_log_handler(Id)) + || #{id := Id} <- ?LOGGER:get_log_handlers() + ]. t_set_log_handler_level(_) -> - [begin - ?LOGGER:set_log_handler_level(Id, Level), - ?assertMatch(#{id := Id, level := Level}, ?LOGGER:get_log_handler(Id)) - end || #{id := Id} <- ?LOGGER:get_log_handlers(), - Level <- ?SUPPORTED_LEVELS], + [ + begin + ?LOGGER:set_log_handler_level(Id, Level), + ?assertMatch(#{id := Id, level := Level}, ?LOGGER:get_log_handler(Id)) + end + || #{id := Id} <- ?LOGGER:get_log_handlers(), + Level <- ?SUPPORTED_LEVELS + ], ?LOGGER:set_log_level(warning). t_set_log_level(_) -> @@ -95,29 +104,39 @@ t_start_stop_log_handler(_) -> io:format("====== stopped: ~p~n", [?LOGGER:get_log_handlers(stopped)]), StartedN = length(?LOGGER:get_log_handlers(started)), StoppedN = length(?LOGGER:get_log_handlers(stopped)), - [begin - io:format("------ stopping : ~p~n", [Id]), - ok = ?LOGGER:stop_log_handler(Id), - ?assertEqual(StartedN - 1, length(?LOGGER:get_log_handlers(started))), - ?assertEqual(StoppedN + 1, length(?LOGGER:get_log_handlers(stopped))), - io:format("------ starting : ~p~n", [Id]), - ok = ?LOGGER:start_log_handler(Id), - ?assertEqual(StartedN, length(?LOGGER:get_log_handlers(started))), - ?assertEqual(StoppedN, length(?LOGGER:get_log_handlers(stopped))) - end || #{id := Id} <- ?LOGGER:get_log_handlers(started)]. + [ + begin + io:format("------ stopping : ~p~n", [Id]), + ok = ?LOGGER:stop_log_handler(Id), + ?assertEqual(StartedN - 1, length(?LOGGER:get_log_handlers(started))), + ?assertEqual(StoppedN + 1, length(?LOGGER:get_log_handlers(stopped))), + io:format("------ starting : ~p~n", [Id]), + ok = ?LOGGER:start_log_handler(Id), + ?assertEqual(StartedN, length(?LOGGER:get_log_handlers(started))), + ?assertEqual(StoppedN, length(?LOGGER:get_log_handlers(stopped))) + end + || #{id := Id} <- ?LOGGER:get_log_handlers(started) + ]. t_start_stop_log_handler2(_) -> %% start a handler that is already started returns ok - [begin - ok = ?LOGGER:start_log_handler(Id) - end || #{id := Id} <- ?LOGGER:get_log_handlers(started)], + [ + begin + ok = ?LOGGER:start_log_handler(Id) + end + || #{id := Id} <- ?LOGGER:get_log_handlers(started) + ], %% stop a no exists handler returns {not_started, Id} - ?assertMatch({error, {not_started, invalid_handler_id}}, - ?LOGGER:stop_log_handler(invalid_handler_id)), + ?assertMatch( + {error, {not_started, invalid_handler_id}}, + ?LOGGER:stop_log_handler(invalid_handler_id) + ), %% stop a handler that is already stopped returns {not_started, Id} ok = ?LOGGER:stop_log_handler(default), - ?assertMatch({error, {not_started, default}}, - ?LOGGER:stop_log_handler(default)). + ?assertMatch( + {error, {not_started, default}}, + ?LOGGER:stop_log_handler(default) + ). t_set_metadata_peername(_) -> ?assertEqual(ok, ?LOGGER:set_metadata_peername("for_test")). @@ -126,12 +145,11 @@ t_set_metadata_clientid(_) -> ?assertEqual(ok, ?LOGGER:set_metadata_clientid(<<>>)), ?assertEqual(ok, ?LOGGER:set_metadata_clientid("for_test")). - split_toks_at_dot(AllToks) -> case lists:splitwith(fun is_no_dot/1, AllToks) of - {Toks, [{dot,_}=Dot]} -> [Toks ++ [Dot]]; - {Toks, [{dot,_}=Dot | Tl]} -> [Toks ++ [Dot] | split_toks_at_dot(Tl)] + {Toks, [{dot, _} = Dot]} -> [Toks ++ [Dot]]; + {Toks, [{dot, _} = Dot | Tl]} -> [Toks ++ [Dot] | split_toks_at_dot(Tl)] end. -is_no_dot({dot,_}) -> false; -is_no_dot(_) -> true. +is_no_dot({dot, _}) -> false; +is_no_dot(_) -> true. diff --git a/apps/emqx/test/emqx_message_SUITE.erl b/apps/emqx/test/emqx_message_SUITE.erl index d79815785..56313c1ee 100644 --- a/apps/emqx/test/emqx_message_SUITE.erl +++ b/apps/emqx/test/emqx_message_SUITE.erl @@ -159,63 +159,71 @@ t_is_expired(_) -> % error('TODO'). t_to_packet(_) -> - Pkt = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = ?QOS_0, - retain = false, - dup = false - }, - variable = #mqtt_packet_publish{topic_name = <<"topic">>, - packet_id = 10, - properties = #{} - }, - payload = <<"payload">> - }, + Pkt = #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = ?QOS_0, + retain = false, + dup = false + }, + variable = #mqtt_packet_publish{ + topic_name = <<"topic">>, + packet_id = 10, + properties = #{} + }, + payload = <<"payload">> + }, Msg = emqx_message:make(<<"clientid">>, ?QOS_0, <<"topic">>, <<"payload">>), ?assertEqual(Pkt, emqx_message:to_packet(10, Msg)). t_to_packet_with_props(_) -> Props = #{'Subscription-Identifier' => 1}, - Pkt = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = ?QOS_0, - retain = false, - dup = false - }, - variable = #mqtt_packet_publish{topic_name = <<"topic">>, - packet_id = 10, - properties = Props - }, - payload = <<"payload">> - }, + Pkt = #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = ?QOS_0, + retain = false, + dup = false + }, + variable = #mqtt_packet_publish{ + topic_name = <<"topic">>, + packet_id = 10, + properties = Props + }, + payload = <<"payload">> + }, Msg = emqx_message:make(<<"clientid">>, ?QOS_0, <<"topic">>, <<"payload">>), Msg1 = emqx_message:set_header(properties, #{'Subscription-Identifier' => 1}, Msg), ?assertEqual(Pkt, emqx_message:to_packet(10, Msg1)). t_to_map(_) -> Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"topic">>, <<"payload">>), - List = [{id, emqx_message:id(Msg)}, - {qos, ?QOS_1}, - {from, <<"clientid">>}, - {flags, #{}}, - {headers, #{}}, - {topic, <<"topic">>}, - {payload, <<"payload">>}, - {timestamp, emqx_message:timestamp(Msg)}, - {extra, []} - ], + List = [ + {id, emqx_message:id(Msg)}, + {qos, ?QOS_1}, + {from, <<"clientid">>}, + {flags, #{}}, + {headers, #{}}, + {topic, <<"topic">>}, + {payload, <<"payload">>}, + {timestamp, emqx_message:timestamp(Msg)}, + {extra, []} + ], ?assertEqual(List, emqx_message:to_list(Msg)), ?assertEqual(maps:from_list(List), emqx_message:to_map(Msg)). t_from_map(_) -> Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"topic">>, <<"payload">>), - Map = #{id => emqx_message:id(Msg), - qos => ?QOS_1, - from => <<"clientid">>, - flags => #{}, - headers => #{}, - topic => <<"topic">>, - payload => <<"payload">>, - timestamp => emqx_message:timestamp(Msg), - extra => [] - }, + Map = #{ + id => emqx_message:id(Msg), + qos => ?QOS_1, + from => <<"clientid">>, + flags => #{}, + headers => #{}, + topic => <<"topic">>, + payload => <<"payload">>, + timestamp => emqx_message:timestamp(Msg), + extra => [] + }, ?assertEqual(Map, emqx_message:to_map(Msg)), ?assertEqual(Msg, emqx_message:from_map(emqx_message:to_map(Msg))). diff --git a/apps/emqx/test/emqx_metrics_SUITE.erl b/apps/emqx/test/emqx_metrics_SUITE.erl index 95f9a0002..80151d8b0 100644 --- a/apps/emqx/test/emqx_metrics_SUITE.erl +++ b/apps/emqx/test/emqx_metrics_SUITE.erl @@ -26,137 +26,144 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_new(_) -> with_metrics_server( - fun() -> - ok = emqx_metrics:new('metrics.test'), - ok = emqx_metrics:new('metrics.test'), - 0 = emqx_metrics:val('metrics.test'), - ok = emqx_metrics:inc('metrics.test'), - 1 = emqx_metrics:val('metrics.test'), - ok = emqx_metrics:new(counter, 'metrics.test.cnt'), - 0 = emqx_metrics:val('metrics.test.cnt'), - ok = emqx_metrics:inc('metrics.test.cnt'), - 1 = emqx_metrics:val('metrics.test.cnt'), - ok = emqx_metrics:new(gauge, 'metrics.test.total'), - 0 = emqx_metrics:val('metrics.test.total'), - ok = emqx_metrics:inc('metrics.test.total'), - 1 = emqx_metrics:val('metrics.test.total') - end). + fun() -> + ok = emqx_metrics:new('metrics.test'), + ok = emqx_metrics:new('metrics.test'), + 0 = emqx_metrics:val('metrics.test'), + ok = emqx_metrics:inc('metrics.test'), + 1 = emqx_metrics:val('metrics.test'), + ok = emqx_metrics:new(counter, 'metrics.test.cnt'), + 0 = emqx_metrics:val('metrics.test.cnt'), + ok = emqx_metrics:inc('metrics.test.cnt'), + 1 = emqx_metrics:val('metrics.test.cnt'), + ok = emqx_metrics:new(gauge, 'metrics.test.total'), + 0 = emqx_metrics:val('metrics.test.total'), + ok = emqx_metrics:inc('metrics.test.total'), + 1 = emqx_metrics:val('metrics.test.total') + end + ). t_ensure(_) -> with_metrics_server( - fun() -> - ok = emqx_metrics:ensure('metrics.test'), - ok = emqx_metrics:ensure('metrics.test'), - 0 = emqx_metrics:val('metrics.test'), - ok = emqx_metrics:inc('metrics.test'), - 1 = emqx_metrics:val('metrics.test'), - ok = emqx_metrics:ensure(counter, 'metrics.test.cnt'), - 0 = emqx_metrics:val('metrics.test.cnt'), - ok = emqx_metrics:inc('metrics.test.cnt'), - 1 = emqx_metrics:val('metrics.test.cnt'), - ok = emqx_metrics:ensure(gauge, 'metrics.test.total'), - 0 = emqx_metrics:val('metrics.test.total'), - ok = emqx_metrics:inc('metrics.test.total'), - 1 = emqx_metrics:val('metrics.test.total') - end). + fun() -> + ok = emqx_metrics:ensure('metrics.test'), + ok = emqx_metrics:ensure('metrics.test'), + 0 = emqx_metrics:val('metrics.test'), + ok = emqx_metrics:inc('metrics.test'), + 1 = emqx_metrics:val('metrics.test'), + ok = emqx_metrics:ensure(counter, 'metrics.test.cnt'), + 0 = emqx_metrics:val('metrics.test.cnt'), + ok = emqx_metrics:inc('metrics.test.cnt'), + 1 = emqx_metrics:val('metrics.test.cnt'), + ok = emqx_metrics:ensure(gauge, 'metrics.test.total'), + 0 = emqx_metrics:val('metrics.test.total'), + ok = emqx_metrics:inc('metrics.test.total'), + 1 = emqx_metrics:val('metrics.test.total') + end + ). t_all(_) -> with_metrics_server( - fun() -> - Metrics = emqx_metrics:all(), - ?assert(length(Metrics) > 50) - end). + fun() -> + Metrics = emqx_metrics:all(), + ?assert(length(Metrics) > 50) + end + ). t_inc_dec(_) -> with_metrics_server( - fun() -> - ?assertEqual(0, emqx_metrics:val('bytes.received')), - ok = emqx_metrics:inc('bytes.received'), - ok = emqx_metrics:inc('bytes.received', 2), - ok = emqx_metrics:inc('bytes.received', 2), - ?assertEqual(5, emqx_metrics:val('bytes.received')) - end). + fun() -> + ?assertEqual(0, emqx_metrics:val('bytes.received')), + ok = emqx_metrics:inc('bytes.received'), + ok = emqx_metrics:inc('bytes.received', 2), + ok = emqx_metrics:inc('bytes.received', 2), + ?assertEqual(5, emqx_metrics:val('bytes.received')) + end + ). t_inc_recv(_) -> with_metrics_server( - fun() -> - ok = emqx_metrics:inc_recv(?PACKET(?CONNECT)), - ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(0, 0)), - ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(1, 0)), - ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(2, 0)), - ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(3, 0)), - ok = emqx_metrics:inc_recv(?PACKET(?PUBACK)), - ok = emqx_metrics:inc_recv(?PACKET(?PUBREC)), - ok = emqx_metrics:inc_recv(?PACKET(?PUBREL)), - ok = emqx_metrics:inc_recv(?PACKET(?PUBCOMP)), - ok = emqx_metrics:inc_recv(?PACKET(?SUBSCRIBE)), - ok = emqx_metrics:inc_recv(?PACKET(?UNSUBSCRIBE)), - ok = emqx_metrics:inc_recv(?PACKET(?PINGREQ)), - ok = emqx_metrics:inc_recv(?PACKET(?DISCONNECT)), - ok = emqx_metrics:inc_recv(?PACKET(?AUTH)), - ok = emqx_metrics:inc_recv(?PACKET(?RESERVED)), - ?assertEqual(15, emqx_metrics:val('packets.received')), - ?assertEqual(1, emqx_metrics:val('packets.connect.received')), - ?assertEqual(4, emqx_metrics:val('messages.received')), - ?assertEqual(1, emqx_metrics:val('messages.qos0.received')), - ?assertEqual(1, emqx_metrics:val('messages.qos1.received')), - ?assertEqual(1, emqx_metrics:val('messages.qos2.received')), - ?assertEqual(4, emqx_metrics:val('packets.publish.received')), - ?assertEqual(1, emqx_metrics:val('packets.puback.received')), - ?assertEqual(1, emqx_metrics:val('packets.pubrec.received')), - ?assertEqual(1, emqx_metrics:val('packets.pubrel.received')), - ?assertEqual(1, emqx_metrics:val('packets.pubcomp.received')), - ?assertEqual(1, emqx_metrics:val('packets.subscribe.received')), - ?assertEqual(1, emqx_metrics:val('packets.unsubscribe.received')), - ?assertEqual(1, emqx_metrics:val('packets.pingreq.received')), - ?assertEqual(1, emqx_metrics:val('packets.disconnect.received')), - ?assertEqual(1, emqx_metrics:val('packets.auth.received')) - end). + fun() -> + ok = emqx_metrics:inc_recv(?PACKET(?CONNECT)), + ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(0, 0)), + ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(1, 0)), + ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(2, 0)), + ok = emqx_metrics:inc_recv(?PUBLISH_PACKET(3, 0)), + ok = emqx_metrics:inc_recv(?PACKET(?PUBACK)), + ok = emqx_metrics:inc_recv(?PACKET(?PUBREC)), + ok = emqx_metrics:inc_recv(?PACKET(?PUBREL)), + ok = emqx_metrics:inc_recv(?PACKET(?PUBCOMP)), + ok = emqx_metrics:inc_recv(?PACKET(?SUBSCRIBE)), + ok = emqx_metrics:inc_recv(?PACKET(?UNSUBSCRIBE)), + ok = emqx_metrics:inc_recv(?PACKET(?PINGREQ)), + ok = emqx_metrics:inc_recv(?PACKET(?DISCONNECT)), + ok = emqx_metrics:inc_recv(?PACKET(?AUTH)), + ok = emqx_metrics:inc_recv(?PACKET(?RESERVED)), + ?assertEqual(15, emqx_metrics:val('packets.received')), + ?assertEqual(1, emqx_metrics:val('packets.connect.received')), + ?assertEqual(4, emqx_metrics:val('messages.received')), + ?assertEqual(1, emqx_metrics:val('messages.qos0.received')), + ?assertEqual(1, emqx_metrics:val('messages.qos1.received')), + ?assertEqual(1, emqx_metrics:val('messages.qos2.received')), + ?assertEqual(4, emqx_metrics:val('packets.publish.received')), + ?assertEqual(1, emqx_metrics:val('packets.puback.received')), + ?assertEqual(1, emqx_metrics:val('packets.pubrec.received')), + ?assertEqual(1, emqx_metrics:val('packets.pubrel.received')), + ?assertEqual(1, emqx_metrics:val('packets.pubcomp.received')), + ?assertEqual(1, emqx_metrics:val('packets.subscribe.received')), + ?assertEqual(1, emqx_metrics:val('packets.unsubscribe.received')), + ?assertEqual(1, emqx_metrics:val('packets.pingreq.received')), + ?assertEqual(1, emqx_metrics:val('packets.disconnect.received')), + ?assertEqual(1, emqx_metrics:val('packets.auth.received')) + end + ). t_inc_sent(_) -> with_metrics_server( - fun() -> - ok = emqx_metrics:inc_sent(?CONNACK_PACKET(0)), - ok = emqx_metrics:inc_sent(?PUBLISH_PACKET(0, 0)), - ok = emqx_metrics:inc_sent(?PUBLISH_PACKET(1, 0)), - ok = emqx_metrics:inc_sent(?PUBLISH_PACKET(2, 0)), - ok = emqx_metrics:inc_sent(?PUBACK_PACKET(0, 0)), - ok = emqx_metrics:inc_sent(?PUBREC_PACKET(3, 0)), - ok = emqx_metrics:inc_sent(?PACKET(?PUBREL)), - ok = emqx_metrics:inc_sent(?PACKET(?PUBCOMP)), - ok = emqx_metrics:inc_sent(?PACKET(?SUBACK)), - ok = emqx_metrics:inc_sent(?PACKET(?UNSUBACK)), - ok = emqx_metrics:inc_sent(?PACKET(?PINGRESP)), - ok = emqx_metrics:inc_sent(?PACKET(?DISCONNECT)), - ok = emqx_metrics:inc_sent(?PACKET(?AUTH)), - ?assertEqual(13, emqx_metrics:val('packets.sent')), - ?assertEqual(1, emqx_metrics:val('packets.connack.sent')), - ?assertEqual(3, emqx_metrics:val('messages.sent')), - ?assertEqual(1, emqx_metrics:val('messages.qos0.sent')), - ?assertEqual(1, emqx_metrics:val('messages.qos1.sent')), - ?assertEqual(1, emqx_metrics:val('messages.qos2.sent')), - ?assertEqual(3, emqx_metrics:val('packets.publish.sent')), - ?assertEqual(1, emqx_metrics:val('packets.puback.sent')), - ?assertEqual(1, emqx_metrics:val('packets.pubrec.sent')), - ?assertEqual(1, emqx_metrics:val('packets.pubrel.sent')), - ?assertEqual(1, emqx_metrics:val('packets.pubcomp.sent')), - ?assertEqual(1, emqx_metrics:val('packets.suback.sent')), - ?assertEqual(1, emqx_metrics:val('packets.unsuback.sent')), - ?assertEqual(1, emqx_metrics:val('packets.pingresp.sent')), - ?assertEqual(1, emqx_metrics:val('packets.disconnect.sent')), - ?assertEqual(1, emqx_metrics:val('packets.auth.sent')) - end). + fun() -> + ok = emqx_metrics:inc_sent(?CONNACK_PACKET(0)), + ok = emqx_metrics:inc_sent(?PUBLISH_PACKET(0, 0)), + ok = emqx_metrics:inc_sent(?PUBLISH_PACKET(1, 0)), + ok = emqx_metrics:inc_sent(?PUBLISH_PACKET(2, 0)), + ok = emqx_metrics:inc_sent(?PUBACK_PACKET(0, 0)), + ok = emqx_metrics:inc_sent(?PUBREC_PACKET(3, 0)), + ok = emqx_metrics:inc_sent(?PACKET(?PUBREL)), + ok = emqx_metrics:inc_sent(?PACKET(?PUBCOMP)), + ok = emqx_metrics:inc_sent(?PACKET(?SUBACK)), + ok = emqx_metrics:inc_sent(?PACKET(?UNSUBACK)), + ok = emqx_metrics:inc_sent(?PACKET(?PINGRESP)), + ok = emqx_metrics:inc_sent(?PACKET(?DISCONNECT)), + ok = emqx_metrics:inc_sent(?PACKET(?AUTH)), + ?assertEqual(13, emqx_metrics:val('packets.sent')), + ?assertEqual(1, emqx_metrics:val('packets.connack.sent')), + ?assertEqual(3, emqx_metrics:val('messages.sent')), + ?assertEqual(1, emqx_metrics:val('messages.qos0.sent')), + ?assertEqual(1, emqx_metrics:val('messages.qos1.sent')), + ?assertEqual(1, emqx_metrics:val('messages.qos2.sent')), + ?assertEqual(3, emqx_metrics:val('packets.publish.sent')), + ?assertEqual(1, emqx_metrics:val('packets.puback.sent')), + ?assertEqual(1, emqx_metrics:val('packets.pubrec.sent')), + ?assertEqual(1, emqx_metrics:val('packets.pubrel.sent')), + ?assertEqual(1, emqx_metrics:val('packets.pubcomp.sent')), + ?assertEqual(1, emqx_metrics:val('packets.suback.sent')), + ?assertEqual(1, emqx_metrics:val('packets.unsuback.sent')), + ?assertEqual(1, emqx_metrics:val('packets.pingresp.sent')), + ?assertEqual(1, emqx_metrics:val('packets.disconnect.sent')), + ?assertEqual(1, emqx_metrics:val('packets.auth.sent')) + end + ). t_trans(_) -> with_metrics_server( - fun() -> - ok = emqx_metrics:trans(inc, 'bytes.received'), - ok = emqx_metrics:trans(inc, 'bytes.received', 2), - ?assertEqual(0, emqx_metrics:val('bytes.received')), - ok = emqx_metrics:commit(), - ?assertEqual(3, emqx_metrics:val('bytes.received')), - ok = emqx_metrics:commit() - end). + fun() -> + ok = emqx_metrics:trans(inc, 'bytes.received'), + ok = emqx_metrics:trans(inc, 'bytes.received', 2), + ?assertEqual(0, emqx_metrics:val('bytes.received')), + ok = emqx_metrics:commit(), + ?assertEqual(3, emqx_metrics:val('bytes.received')), + ok = emqx_metrics:commit() + end + ). with_metrics_server(Fun) -> {ok, _} = emqx_metrics:start_link(), diff --git a/apps/emqx/test/emqx_misc_SUITE.erl b/apps/emqx/test/emqx_misc_SUITE.erl index 9aad9a5da..b85bbcb0e 100644 --- a/apps/emqx/test/emqx_misc_SUITE.erl +++ b/apps/emqx/test/emqx_misc_SUITE.erl @@ -21,32 +21,40 @@ -include_lib("eunit/include/eunit.hrl"). --define(SOCKOPTS, [binary, - {packet, raw}, - {reuseaddr, true}, - {backlog, 512}, - {nodelay, true} - ]). +-define(SOCKOPTS, [ + binary, + {packet, raw}, + {reuseaddr, true}, + {backlog, 512}, + {nodelay, true} +]). all() -> emqx_common_test_helpers:all(?MODULE). t_merge_opts(_) -> - Opts = emqx_misc:merge_opts(?SOCKOPTS, [raw, - binary, - {backlog, 1024}, - {nodelay, false}, - {max_clients, 1024}, - {acceptors, 16} - ]), + Opts = emqx_misc:merge_opts(?SOCKOPTS, [ + raw, + binary, + {backlog, 1024}, + {nodelay, false}, + {max_clients, 1024}, + {acceptors, 16} + ]), ?assertEqual(1024, proplists:get_value(backlog, Opts)), ?assertEqual(1024, proplists:get_value(max_clients, Opts)), - ?assertEqual([binary, raw, - {acceptors, 16}, - {backlog, 1024}, - {max_clients, 1024}, - {nodelay, false}, - {packet, raw}, - {reuseaddr, true}], lists:sort(Opts)). + ?assertEqual( + [ + binary, + raw, + {acceptors, 16}, + {backlog, 1024}, + {max_clients, 1024}, + {nodelay, false}, + {packet, raw}, + {reuseaddr, true} + ], + lists:sort(Opts) + ). t_maybe_apply(_) -> ?assertEqual(undefined, emqx_misc:maybe_apply(fun(A) -> A end, undefined)), @@ -54,19 +62,25 @@ t_maybe_apply(_) -> t_run_fold(_) -> ?assertEqual(1, emqx_misc:run_fold([], 1, state)), - Add = fun(I, St) -> I+St end, - Mul = fun(I, St) -> I*St end, + Add = fun(I, St) -> I + St end, + Mul = fun(I, St) -> I * St end, ?assertEqual(6, emqx_misc:run_fold([Add, Mul], 1, 2)). t_pipeline(_) -> ?assertEqual({ok, input, state}, emqx_misc:pipeline([], input, state)), - Funs = [fun(_I, _St) -> ok end, - fun(_I, St) -> {ok, St+1} end, - fun(I, St) -> {ok, I+1, St+1} end, - fun(I, St) -> {ok, I*2, St*2} end], + Funs = [ + fun(_I, _St) -> ok end, + fun(_I, St) -> {ok, St + 1} end, + fun(I, St) -> {ok, I + 1, St + 1} end, + fun(I, St) -> {ok, I * 2, St * 2} end + ], ?assertEqual({ok, 4, 6}, emqx_misc:pipeline(Funs, 1, 1)), - ?assertEqual({error, undefined, 1}, emqx_misc:pipeline([fun(_I) -> {error, undefined} end], 1, 1)), - ?assertEqual({error, undefined, 2}, emqx_misc:pipeline([fun(_I, _St) -> {error, undefined, 2} end], 1, 1)). + ?assertEqual( + {error, undefined, 1}, emqx_misc:pipeline([fun(_I) -> {error, undefined} end], 1, 1) + ), + ?assertEqual( + {error, undefined, 2}, emqx_misc:pipeline([fun(_I, _St) -> {error, undefined, 2} end], 1, 1) + ). t_start_timer(_) -> TRef = emqx_misc:start_timer(1, tmsg), @@ -88,20 +102,24 @@ t_proc_stats(_) -> timer:sleep(10), ?assertEqual([], emqx_misc:proc_stats(Pid1)), Pid2 = spawn(fun() -> - ?assertMatch([{mailbox_len, 0}|_], emqx_misc:proc_stats()), - timer:sleep(200) - end), + ?assertMatch([{mailbox_len, 0} | _], emqx_misc:proc_stats()), + timer:sleep(200) + end), timer:sleep(10), Pid2 ! msg, timer:sleep(10), - ?assertMatch([{mailbox_len, 1}|_], emqx_misc:proc_stats(Pid2)). + ?assertMatch([{mailbox_len, 1} | _], emqx_misc:proc_stats(Pid2)). t_drain_deliver(_) -> self() ! {deliver, t1, m1}, self() ! {deliver, t2, m2}, - ?assertEqual([{deliver, t1, m1}, - {deliver, t2, m2} - ], emqx_misc:drain_deliver(2)). + ?assertEqual( + [ + {deliver, t1, m1}, + {deliver, t2, m2} + ], + emqx_misc:drain_deliver(2) + ). t_drain_down(_) -> {Pid1, _Ref1} = erlang:spawn_monitor(fun() -> ok end), @@ -113,15 +131,18 @@ t_drain_down(_) -> t_index_of(_) -> try emqx_misc:index_of(a, []) of _ -> ct:fail(should_throw_error) - catch error:Reason -> - ?assertEqual(badarg, Reason) + catch + error:Reason -> + ?assertEqual(badarg, Reason) end, ?assertEqual(3, emqx_misc:index_of(a, [b, c, a, e, f])). t_check(_) -> - Policy = #{max_message_queue_len => 10, - max_heap_size => 1024 * 1024 * 8, - enable => true}, + Policy = #{ + max_message_queue_len => 10, + max_heap_size => 1024 * 1024 * 8, + enable => true + }, [self() ! {msg, I} || I <- lists:seq(1, 5)], ?assertEqual(ok, emqx_misc:check_oom(Policy)), [self() ! {msg, I} || I <- lists:seq(1, 6)], @@ -132,10 +153,9 @@ drain() -> drain(Acc) -> receive - Msg -> drain([Msg|Acc]) - after - 0 -> - lists:reverse(Acc) + Msg -> drain([Msg | Acc]) + after 0 -> + lists:reverse(Acc) end. t_rand_seed(_) -> diff --git a/apps/emqx/test/emqx_mountpoint_SUITE.erl b/apps/emqx/test/emqx_mountpoint_SUITE.erl index b4ad9efa5..63c5fb888 100644 --- a/apps/emqx/test/emqx_mountpoint_SUITE.erl +++ b/apps/emqx/test/emqx_mountpoint_SUITE.erl @@ -19,11 +19,14 @@ -compile(export_all). -compile(nowarn_export_all). --import(emqx_mountpoint, - [ mount/2 - , unmount/2 - , replvar/2 - ]). +-import( + emqx_mountpoint, + [ + mount/2, + unmount/2, + replvar/2 + ] +). -include_lib("emqx/include/emqx.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -36,12 +39,18 @@ t_mount(_) -> ?assertEqual(<<"topic">>, mount(undefined, <<"topic">>)), ?assertEqual(Msg, mount(undefined, Msg)), ?assertEqual(TopicFilters, mount(undefined, TopicFilters)), - ?assertEqual(<<"device/1/topic">>, - mount(<<"device/1/">>, <<"topic">>)), - ?assertEqual(Msg#message{topic = <<"device/1/topic">>}, - mount(<<"device/1/">>, Msg)), - ?assertEqual([{<<"device/1/topic">>, #{qos => 2}}], - mount(<<"device/1/">>, TopicFilters)). + ?assertEqual( + <<"device/1/topic">>, + mount(<<"device/1/">>, <<"topic">>) + ), + ?assertEqual( + Msg#message{topic = <<"device/1/topic">>}, + mount(<<"device/1/">>, Msg) + ), + ?assertEqual( + [{<<"device/1/topic">>, #{qos => 2}}], + mount(<<"device/1/">>, TopicFilters) + ). t_unmount(_) -> Msg = emqx_message:make(<<"clientid">>, <<"device/1/topic">>, <<"payload">>), @@ -54,14 +63,23 @@ t_unmount(_) -> t_replvar(_) -> ?assertEqual(undefined, replvar(undefined, #{})), - ?assertEqual(<<"mount/user/clientid/">>, - replvar(<<"mount/${username}/${clientid}/">>, - #{clientid => <<"clientid">>, - username => <<"user">> - })), - ?assertEqual(<<"mount/${username}/clientid/">>, - replvar(<<"mount/${username}/${clientid}/">>, - #{clientid => <<"clientid">>, - username => undefined - })). - + ?assertEqual( + <<"mount/user/clientid/">>, + replvar( + <<"mount/${username}/${clientid}/">>, + #{ + clientid => <<"clientid">>, + username => <<"user">> + } + ) + ), + ?assertEqual( + <<"mount/${username}/clientid/">>, + replvar( + <<"mount/${username}/${clientid}/">>, + #{ + clientid => <<"clientid">>, + username => undefined + } + ) + ). diff --git a/apps/emqx/test/emqx_mqtt_SUITE.erl b/apps/emqx/test/emqx_mqtt_SUITE.erl index 48821aba6..f43804991 100644 --- a/apps/emqx/test/emqx_mqtt_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_SUITE.erl @@ -24,10 +24,17 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --define(STATS_KYES, [recv_pkt, recv_msg, send_pkt, send_msg, - recv_oct, recv_cnt, send_oct, send_cnt, - send_pend - ]). +-define(STATS_KYES, [ + recv_pkt, + recv_msg, + send_pkt, + send_msg, + recv_oct, + recv_cnt, + send_oct, + send_cnt, + send_pend +]). all() -> emqx_common_test_helpers:all(?MODULE). @@ -53,44 +60,53 @@ end_per_testcase(TestCase, Config) -> Config. t_conn_stats(_) -> - with_client(fun(CPid) -> - Stats = emqx_connection:stats(CPid), - ct:pal("==== stats: ~p", [Stats]), - [?assert(proplists:get_value(Key, Stats) >= 0) || Key <- ?STATS_KYES] - end, []). + with_client( + fun(CPid) -> + Stats = emqx_connection:stats(CPid), + ct:pal("==== stats: ~p", [Stats]), + [?assert(proplists:get_value(Key, Stats) >= 0) || Key <- ?STATS_KYES] + end, + [] + ). t_tcp_sock_passive(_) -> with_client(fun(CPid) -> CPid ! {tcp_passive, sock} end, []). t_message_expiry_interval(_) -> {CPublish, CControl} = message_expiry_interval_init(), - [message_expiry_interval_exipred(CPublish, CControl, QoS) || QoS <- [0,1,2]], + [message_expiry_interval_exipred(CPublish, CControl, QoS) || QoS <- [0, 1, 2]], emqtt:stop(CPublish), emqtt:stop(CControl). t_message_not_expiry_interval(_) -> {CPublish, CControl} = message_expiry_interval_init(), - [message_expiry_interval_not_exipred(CPublish, CControl, QoS) || QoS <- [0,1,2]], + [message_expiry_interval_not_exipred(CPublish, CControl, QoS) || QoS <- [0, 1, 2]], emqtt:stop(CPublish), emqtt:stop(CControl). message_expiry_interval_init() -> - {ok, CPublish} = emqtt:start_link([{proto_ver,v5}, - {clientid, <<"Client-Publish">>}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 360}}]), - {ok, CVerify} = emqtt:start_link([{proto_ver,v5}, - {clientid, <<"Client-Verify">>}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 360}}]), - {ok, CControl} = emqtt:start_link([{proto_ver,v5}, - {clientid, <<"Client-Control">>}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 360}}]), + {ok, CPublish} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"Client-Publish">>}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 360}} + ]), + {ok, CVerify} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"Client-Verify">>}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 360}} + ]), + {ok, CControl} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"Client-Control">>}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 360}} + ]), {ok, _} = emqtt:connect(CPublish), {ok, _} = emqtt:connect(CVerify), {ok, _} = emqtt:connect(CControl), - %% subscribe and disconnect Client-verify + %% subscribe and disconnect Client-verify emqtt:subscribe(CControl, <<"t/a">>, 1), emqtt:subscribe(CVerify, <<"t/a">>, 1), emqtt:stop(CVerify), @@ -99,11 +115,16 @@ message_expiry_interval_init() -> message_expiry_interval_exipred(CPublish, CControl, QoS) -> ct:pal("~p ~p", [?FUNCTION_NAME, QoS]), %% publish to t/a and waiting for the message expired - emqtt:publish(CPublish, <<"t/a">>, #{'Message-Expiry-Interval' => 1}, - <<"this will be purged in 1s">>, [{qos, QoS}]), + emqtt:publish( + CPublish, + <<"t/a">>, + #{'Message-Expiry-Interval' => 1}, + <<"this will be purged in 1s">>, + [{qos, QoS}] + ), %% CControl make sure publish already store in broker. receive - {publish,#{client_pid := CControl, topic := <<"t/a">>}} -> + {publish, #{client_pid := CControl, topic := <<"t/a">>}} -> ok after 1000 -> ct:fail(should_receive_publish) @@ -111,15 +132,17 @@ message_expiry_interval_exipred(CPublish, CControl, QoS) -> ct:sleep(1100), %% resume the session for Client-Verify - {ok, CVerify} = emqtt:start_link([{proto_ver,v5}, - {clientid, <<"Client-Verify">>}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 360}}]), + {ok, CVerify} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"Client-Verify">>}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 360}} + ]), {ok, _} = emqtt:connect(CVerify), %% verify Client-Verify could not receive the publish message receive - {publish,#{client_pid := CVerify, topic := <<"t/a">>}} -> + {publish, #{client_pid := CVerify, topic := <<"t/a">>}} -> ct:fail(should_have_expired) after 300 -> ok @@ -129,12 +152,17 @@ message_expiry_interval_exipred(CPublish, CControl, QoS) -> message_expiry_interval_not_exipred(CPublish, CControl, QoS) -> ct:pal("~p ~p", [?FUNCTION_NAME, QoS]), %% publish to t/a - emqtt:publish(CPublish, <<"t/a">>, #{'Message-Expiry-Interval' => 20}, - <<"this will be purged in 20s">>, [{qos, QoS}]), + emqtt:publish( + CPublish, + <<"t/a">>, + #{'Message-Expiry-Interval' => 20}, + <<"this will be purged in 20s">>, + [{qos, QoS}] + ), %% CControl make sure publish already store in broker. receive - {publish,#{client_pid := CControl, topic := <<"t/a">>}} -> + {publish, #{client_pid := CControl, topic := <<"t/a">>}} -> ok after 1000 -> ct:fail(should_receive_publish) @@ -143,23 +171,30 @@ message_expiry_interval_not_exipred(CPublish, CControl, QoS) -> %% wait for 1.2s and then resume the session for Client-Verify, the message should not expires %% as Message-Expiry-Interval = 20s ct:sleep(1200), - {ok, CVerify} = emqtt:start_link([{proto_ver,v5}, - {clientid, <<"Client-Verify">>}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 360}}]), + {ok, CVerify} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"Client-Verify">>}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 360}} + ]), {ok, _} = emqtt:connect(CVerify), - %% verify Client-Verify could receive the publish message and the Message-Expiry-Interval is set - receive - {publish,#{client_pid := CVerify, topic := <<"t/a">>, - properties := #{'Message-Expiry-Interval' := MsgExpItvl}}} - when MsgExpItvl =< 20 -> ok; - {publish, _} = Msg -> - ct:fail({incorrect_publish, Msg}) - after 300 -> - ct:fail(no_publish_received) - end, - emqtt:stop(CVerify). + %% verify Client-Verify could receive the publish message and the Message-Expiry-Interval is set + receive + {publish, #{ + client_pid := CVerify, + topic := <<"t/a">>, + properties := #{'Message-Expiry-Interval' := MsgExpItvl} + }} when + MsgExpItvl =< 20 + -> + ok; + {publish, _} = Msg -> + ct:fail({incorrect_publish, Msg}) + after 300 -> + ct:fail(no_publish_received) + end, + emqtt:stop(CVerify). with_client(TestFun, _Options) -> ClientId = <<"t_conn">>, @@ -167,7 +202,8 @@ with_client(TestFun, _Options) -> {ok, _} = emqtt:connect(C), timer:sleep(50), case emqx_cm:lookup_channels(ClientId) of - [] -> ct:fail({client_not_started, ClientId}); + [] -> + ct:fail({client_not_started, ClientId}); [ChanPid] -> TestFun(ChanPid), emqtt:stop(C) @@ -191,30 +227,40 @@ t_async_set_keepalive(_) -> do_async_set_keepalive() -> ClientID = <<"client-tcp-keepalive">>, - {ok, Client} = emqtt:start_link([{host, "localhost"}, - {proto_ver,v5}, - {clientid, ClientID}, - {clean_start, false}]), + {ok, Client} = emqtt:start_link([ + {host, "localhost"}, + {proto_ver, v5}, + {clientid, ClientID}, + {clean_start, false} + ]), {ok, _} = emqtt:connect(Client), - {ok, _} = ?block_until(#{?snk_kind := insert_channel_info, - client_id := ClientID}, 2000, 100), + {ok, _} = ?block_until( + #{ + ?snk_kind := insert_channel_info, + client_id := ClientID + }, + 2000, + 100 + ), [Pid] = emqx_cm:lookup_channels(ClientID), State = emqx_connection:get_state(Pid), Transport = maps:get(transport, State), Socket = maps:get(socket, State), ?assert(is_port(Socket)), Opts = [{raw, 6, 4, 4}, {raw, 6, 5, 4}, {raw, 6, 6, 4}], - {ok, [ {raw, 6, 4, <>} - , {raw, 6, 5, <>} - , {raw, 6, 6, <>} - ]} = Transport:getopts(Socket, Opts), + {ok, [ + {raw, 6, 4, <>}, + {raw, 6, 5, <>}, + {raw, 6, 6, <>} + ]} = Transport:getopts(Socket, Opts), ct:pal("Idle=~p, Interval=~p, Probes=~p", [Idle, Interval, Probes]), emqx_connection:async_set_keepalive(Pid, Idle + 1, Interval + 1, Probes + 1), {ok, _} = ?block_until(#{?snk_kind := "custom_socket_options_successfully"}, 1000), - {ok, [ {raw, 6, 4, <>} - , {raw, 6, 5, <>} - , {raw, 6, 6, <>} - ]} = Transport:getopts(Socket, Opts), + {ok, [ + {raw, 6, 4, <>}, + {raw, 6, 5, <>}, + {raw, 6, 6, <>} + ]} = Transport:getopts(Socket, Opts), ?assertEqual(NewIdle, Idle + 1), ?assertEqual(NewInterval, Interval + 1), ?assertEqual(NewProbes, Probes + 1), diff --git a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl index 34b1c123a..07a412bf4 100644 --- a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl @@ -31,30 +31,41 @@ t_check_pub(_) -> timer:sleep(50), ok = emqx_mqtt_caps:check_pub(default, #{qos => ?QOS_1, retain => false}), PubFlags1 = #{qos => ?QOS_2, retain => false}, - ?assertEqual({error, ?RC_QOS_NOT_SUPPORTED}, - emqx_mqtt_caps:check_pub(default, PubFlags1)), + ?assertEqual( + {error, ?RC_QOS_NOT_SUPPORTED}, + emqx_mqtt_caps:check_pub(default, PubFlags1) + ), PubFlags2 = #{qos => ?QOS_1, retain => true}, - ?assertEqual({error, ?RC_RETAIN_NOT_SUPPORTED}, - emqx_mqtt_caps:check_pub(default, PubFlags2)), + ?assertEqual( + {error, ?RC_RETAIN_NOT_SUPPORTED}, + emqx_mqtt_caps:check_pub(default, PubFlags2) + ), emqx_config:put([zones], OldConf). t_check_sub(_) -> OldConf = emqx:get_config([zones]), - SubOpts = #{rh => 0, - rap => 0, - nl => 0, - qos => ?QOS_2 - }, + SubOpts = #{ + rh => 0, + rap => 0, + nl => 0, + qos => ?QOS_2 + }, emqx_config:put_zone_conf(default, [mqtt, max_topic_levels], 2), emqx_config:put_zone_conf(default, [mqtt, max_qos_allowed], ?QOS_1), emqx_config:put_zone_conf(default, [mqtt, shared_subscription], false), emqx_config:put_zone_conf(default, [mqtt, wildcard_subscription], false), timer:sleep(50), ok = emqx_mqtt_caps:check_sub(default, <<"topic">>, SubOpts), - ?assertEqual({error, ?RC_TOPIC_FILTER_INVALID}, - emqx_mqtt_caps:check_sub(default, <<"a/b/c/d">>, SubOpts)), - ?assertEqual({error, ?RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED}, - emqx_mqtt_caps:check_sub(default, <<"+/#">>, SubOpts)), - ?assertEqual({error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}, - emqx_mqtt_caps:check_sub(default, <<"topic">>, SubOpts#{share => true})), + ?assertEqual( + {error, ?RC_TOPIC_FILTER_INVALID}, + emqx_mqtt_caps:check_sub(default, <<"a/b/c/d">>, SubOpts) + ), + ?assertEqual( + {error, ?RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED}, + emqx_mqtt_caps:check_sub(default, <<"+/#">>, SubOpts) + ), + ?assertEqual( + {error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}, + emqx_mqtt_caps:check_sub(default, <<"topic">>, SubOpts#{share => true}) + ), emqx_config:put([zones], OldConf). diff --git a/apps/emqx/test/emqx_mqtt_props_SUITE.erl b/apps/emqx/test/emqx_mqtt_props_SUITE.erl index ff19bb07f..0b9b90a67 100644 --- a/apps/emqx/test/emqx_mqtt_props_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_props_SUITE.erl @@ -26,58 +26,75 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_id(_) -> foreach_prop( - fun({Id, Prop}) -> - ?assertEqual(Id, emqx_mqtt_props:id(element(1, Prop))) - end), + fun({Id, Prop}) -> + ?assertEqual(Id, emqx_mqtt_props:id(element(1, Prop))) + end + ), ?assertError({bad_property, 'Bad-Property'}, emqx_mqtt_props:id('Bad-Property')). t_name(_) -> foreach_prop( - fun({Id, Prop}) -> - ?assertEqual(emqx_mqtt_props:name(Id), element(1, Prop)) - end), + fun({Id, Prop}) -> + ?assertEqual(emqx_mqtt_props:name(Id), element(1, Prop)) + end + ), ?assertError({unsupported_property, 16#FF}, emqx_mqtt_props:name(16#FF)). t_filter(_) -> - ConnProps = #{'Session-Expiry-Interval' => 1, - 'Maximum-Packet-Size' => 255 - }, - ?assertEqual(ConnProps, - emqx_mqtt_props:filter(?CONNECT, ConnProps)), - PubProps = #{'Payload-Format-Indicator' => 6, - 'Message-Expiry-Interval' => 300, - 'Session-Expiry-Interval' => 300 - }, - ?assertEqual(#{'Payload-Format-Indicator' => 6, - 'Message-Expiry-Interval' => 300 - }, - emqx_mqtt_props:filter(?PUBLISH, PubProps)). + ConnProps = #{ + 'Session-Expiry-Interval' => 1, + 'Maximum-Packet-Size' => 255 + }, + ?assertEqual( + ConnProps, + emqx_mqtt_props:filter(?CONNECT, ConnProps) + ), + PubProps = #{ + 'Payload-Format-Indicator' => 6, + 'Message-Expiry-Interval' => 300, + 'Session-Expiry-Interval' => 300 + }, + ?assertEqual( + #{ + 'Payload-Format-Indicator' => 6, + 'Message-Expiry-Interval' => 300 + }, + emqx_mqtt_props:filter(?PUBLISH, PubProps) + ). t_validate(_) -> - ConnProps = #{'Session-Expiry-Interval' => 1, - 'Maximum-Packet-Size' => 255 - }, + ConnProps = #{ + 'Session-Expiry-Interval' => 1, + 'Maximum-Packet-Size' => 255 + }, ok = emqx_mqtt_props:validate(ConnProps), BadProps = #{'Unknown-Property' => 10}, - ?assertError({bad_property,'Unknown-Property'}, - emqx_mqtt_props:validate(BadProps)). + ?assertError( + {bad_property, 'Unknown-Property'}, + emqx_mqtt_props:validate(BadProps) + ). t_validate_value(_) -> ok = emqx_mqtt_props:validate(#{'Correlation-Data' => <<"correlation-id">>}), ok = emqx_mqtt_props:validate(#{'Reason-String' => <<"Unknown Reason">>}), ok = emqx_mqtt_props:validate(#{'User-Property' => {<<"Prop">>, <<"Val">>}}), ok = emqx_mqtt_props:validate(#{'User-Property' => [{<<"Prop">>, <<"Val">>}]}), - ?assertError({bad_property_value, {'Payload-Format-Indicator', 16#FFFF}}, - emqx_mqtt_props:validate(#{'Payload-Format-Indicator' => 16#FFFF})), - ?assertError({bad_property_value, {'Server-Keep-Alive', 16#FFFFFF}}, - emqx_mqtt_props:validate(#{'Server-Keep-Alive' => 16#FFFFFF})), - ?assertError({bad_property_value, {'Will-Delay-Interval', -16#FF}}, - emqx_mqtt_props:validate(#{'Will-Delay-Interval' => -16#FF})). + ?assertError( + {bad_property_value, {'Payload-Format-Indicator', 16#FFFF}}, + emqx_mqtt_props:validate(#{'Payload-Format-Indicator' => 16#FFFF}) + ), + ?assertError( + {bad_property_value, {'Server-Keep-Alive', 16#FFFFFF}}, + emqx_mqtt_props:validate(#{'Server-Keep-Alive' => 16#FFFFFF}) + ), + ?assertError( + {bad_property_value, {'Will-Delay-Interval', -16#FF}}, + emqx_mqtt_props:validate(#{'Will-Delay-Interval' => -16#FF}) + ). foreach_prop(Fun) -> lists:foreach(Fun, maps:to_list(emqx_mqtt_props:all())). - % t_all(_) -> % error('TODO'). diff --git a/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl b/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl index 13b2ac855..e2fe474f8 100644 --- a/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl @@ -27,29 +27,43 @@ -import(lists, [nth/2]). --define(TOPICS, [<<"TopicA">>, <<"TopicA/B">>, <<"Topic/C">>, <<"TopicA/C">>, - <<"/TopicA">>]). +-define(TOPICS, [ + <<"TopicA">>, + <<"TopicA/B">>, + <<"Topic/C">>, + <<"TopicA/C">>, + <<"/TopicA">> +]). --define(WILD_TOPICS, [<<"TopicA/+">>, <<"+/C">>, <<"#">>, <<"/#">>, <<"/+">>, - <<"+/+">>, <<"TopicA/#">>]). +-define(WILD_TOPICS, [ + <<"TopicA/+">>, + <<"+/C">>, + <<"#">>, + <<"/#">>, + <<"/+">>, + <<"+/+">>, + <<"TopicA/#">> +]). all() -> - [ {group, tcp} - , {group, quic} + [ + {group, tcp}, + {group, quic} ]. groups() -> TCs = emqx_common_test_helpers:all(?MODULE), - [ {tcp, [], TCs} - , {quic, [], TCs} + [ + {tcp, [], TCs}, + {quic, [], TCs} ]. init_per_group(tcp, Config) -> emqx_common_test_helpers:start_apps([]), - [ {port, 1883}, {conn_fun, connect} | Config]; + [{port, 1883}, {conn_fun, connect} | Config]; init_per_group(quic, Config) -> emqx_common_test_helpers:start_apps([]), - [ {port, 14567}, {conn_fun, quic_connect} | Config]; + [{port, 14567}, {conn_fun, quic_connect} | Config]; init_per_group(_, Config) -> emqx_common_test_helpers:stop_apps([]), Config. @@ -94,7 +108,7 @@ receive_messages(0, Msgs) -> receive_messages(Count, Msgs) -> receive {publish, Msg} -> - receive_messages(Count-1, [Msg|Msgs]); + receive_messages(Count - 1, [Msg | Msgs]); _Other -> receive_messages(Count, Msgs) after 1000 -> @@ -113,8 +127,7 @@ waiting_client_process_exit(C) -> receive {'EXIT', C, Reason} -> Reason; _Oth -> error({got_another_message, _Oth}) - after - 1000 -> error({waiting_timeout, C}) + after 1000 -> error({waiting_timeout, C}) end. clean_retained(Topic, Config) -> @@ -148,25 +161,40 @@ t_basic_test(Config) -> t_connect_clean_start(Config) -> ConnFun = ?config(conn_fun, Config), process_flag(trap_exit, true), - {ok, Client1} = emqtt:start_link([{clientid, <<"t_connect_clean_start">>}, - {proto_ver, v5},{clean_start, true} | Config]), + {ok, Client1} = emqtt:start_link([ + {clientid, <<"t_connect_clean_start">>}, + {proto_ver, v5}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), - ?assertEqual(0, client_info(session_present, Client1)), %% [MQTT-3.1.2-4] + %% [MQTT-3.1.2-4] + ?assertEqual(0, client_info(session_present, Client1)), ok = emqtt:pause(Client1), - {ok, Client2} = emqtt:start_link([{clientid, <<"t_connect_clean_start">>}, - {proto_ver, v5},{clean_start, false} | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, <<"t_connect_clean_start">>}, + {proto_ver, v5}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), - ?assertEqual(1, client_info(session_present, Client2)), %% [MQTT-3.1.2-5] + %% [MQTT-3.1.2-5] + ?assertEqual(1, client_info(session_present, Client2)), ?assertEqual(142, receive_disconnect_reasoncode()), waiting_client_process_exit(Client1), ok = emqtt:disconnect(Client2), waiting_client_process_exit(Client2), - {ok, Client3} = emqtt:start_link([{clientid, <<"new_client">>}, - {proto_ver, v5},{clean_start, false} | Config]), + {ok, Client3} = emqtt:start_link([ + {clientid, <<"new_client">>}, + {proto_ver, v5}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client3), - ?assertEqual(0, client_info(session_present, Client3)), %% [MQTT-3.1.2-6] + %% [MQTT-3.1.2-6] + ?assertEqual(0, client_info(session_present, Client3)), ok = emqtt:disconnect(Client3), waiting_client_process_exit(Client3), @@ -177,42 +205,49 @@ t_connect_will_message(Config) -> Topic = nth(1, ?TOPICS), Payload = "will message", - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clean_start, true}, - {will_flag, true}, - {will_topic, Topic}, - {will_payload, Payload} | Config - ]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clean_start, true}, + {will_flag, true}, + {will_topic, Topic}, + {will_payload, Payload} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), [ClientPid] = emqx_cm:lookup_channels(client_info(clientid, Client1)), Info = emqx_connection:info(sys:get_state(ClientPid)), - ?assertNotEqual(undefined, maps:find(will_msg, Info)), %% [MQTT-3.1.2-7] + %% [MQTT-3.1.2-7] + ?assertNotEqual(undefined, maps:find(will_msg, Info)), {ok, Client2} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client2), {ok, _, [2]} = emqtt:subscribe(Client2, Topic, qos2), - ok = emqtt:disconnect(Client1, 4), %% [MQTT-3.14.2-1] - [Msg | _ ] = receive_messages(1), + %% [MQTT-3.14.2-1] + ok = emqtt:disconnect(Client1, 4), + [Msg | _] = receive_messages(1), %% [MQTT-3.1.2-8] ?assertEqual({ok, iolist_to_binary(Topic)}, maps:find(topic, Msg)), ?assertEqual({ok, iolist_to_binary(Payload)}, maps:find(payload, Msg)), ?assertEqual({ok, 0}, maps:find(qos, Msg)), ok = emqtt:disconnect(Client2), - {ok, Client3} = emqtt:start_link([ {proto_ver, v5}, - {clean_start, true}, - {will_flag, true}, - {will_topic, Topic}, - {will_payload, Payload} | Config - ]), + {ok, Client3} = emqtt:start_link([ + {proto_ver, v5}, + {clean_start, true}, + {will_flag, true}, + {will_topic, Topic}, + {will_payload, Payload} + | Config + ]), {ok, _} = emqtt:ConnFun(Client3), {ok, Client4} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client4), {ok, _, [2]} = emqtt:subscribe(Client4, Topic, qos2), ok = emqtt:disconnect(Client3), - ?assertEqual(0, length(receive_messages(1))), %% [MQTT-3.1.2-10] + %% [MQTT-3.1.2-10] + ?assertEqual(0, length(receive_messages(1))), ok = emqtt:disconnect(Client4). t_batch_subscribe(init, Config) -> @@ -228,16 +263,24 @@ t_batch_subscribe(Config) -> ConnFun = ?config(conn_fun, Config), {ok, Client} = emqtt:start_link([{proto_ver, v5}, {clientid, <<"batch_test">>} | Config]), {ok, _} = emqtt:ConnFun(Client), - {ok, _, [?RC_NOT_AUTHORIZED, - ?RC_NOT_AUTHORIZED, - ?RC_NOT_AUTHORIZED]} = emqtt:subscribe(Client, [{<<"t1">>, qos1}, - {<<"t2">>, qos2}, - {<<"t3">>, qos0}]), - {ok, _, [?RC_NO_SUBSCRIPTION_EXISTED, - ?RC_NO_SUBSCRIPTION_EXISTED, - ?RC_NO_SUBSCRIPTION_EXISTED]} = emqtt:unsubscribe(Client, [<<"t1">>, - <<"t2">>, - <<"t3">>]), + {ok, _, [ + ?RC_NOT_AUTHORIZED, + ?RC_NOT_AUTHORIZED, + ?RC_NOT_AUTHORIZED + ]} = emqtt:subscribe(Client, [ + {<<"t1">>, qos1}, + {<<"t2">>, qos2}, + {<<"t3">>, qos0} + ]), + {ok, _, [ + ?RC_NO_SUBSCRIPTION_EXISTED, + ?RC_NO_SUBSCRIPTION_EXISTED, + ?RC_NO_SUBSCRIPTION_EXISTED + ]} = emqtt:unsubscribe(Client, [ + <<"t1">>, + <<"t2">>, + <<"t3">> + ]), emqtt:disconnect(Client). t_connect_will_retain(Config) -> @@ -246,13 +289,15 @@ t_connect_will_retain(Config) -> Topic = nth(1, ?TOPICS), Payload = "will message", - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clean_start, true}, - {will_flag, true}, - {will_topic, Topic}, - {will_payload, Payload}, - {will_retain, false} | Config - ]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clean_start, true}, + {will_flag, true}, + {will_topic, Topic}, + {will_payload, Payload}, + {will_retain, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, Client2} = emqtt:start_link([{proto_ver, v5} | Config]), @@ -260,17 +305,20 @@ t_connect_will_retain(Config) -> {ok, _, [2]} = emqtt:subscribe(Client2, #{}, [{Topic, [{rap, true}, {qos, 2}]}]), ok = emqtt:disconnect(Client1, 4), - [Msg1 | _ ] = receive_messages(1), - ?assertEqual({ok, false}, maps:find(retain, Msg1)), %% [MQTT-3.1.2-14] + [Msg1 | _] = receive_messages(1), + %% [MQTT-3.1.2-14] + ?assertEqual({ok, false}, maps:find(retain, Msg1)), ok = emqtt:disconnect(Client2), - {ok, Client3} = emqtt:start_link([ {proto_ver, v5}, - {clean_start, true}, - {will_flag, true}, - {will_topic, Topic}, - {will_payload, Payload}, - {will_retain, true} | Config - ]), + {ok, Client3} = emqtt:start_link([ + {proto_ver, v5}, + {clean_start, true}, + {will_flag, true}, + {will_topic, Topic}, + {will_payload, Payload}, + {will_retain, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client3), {ok, Client4} = emqtt:start_link([{proto_ver, v5} | Config]), @@ -278,8 +326,9 @@ t_connect_will_retain(Config) -> {ok, _, [2]} = emqtt:subscribe(Client4, #{}, [{Topic, [{rap, true}, {qos, 2}]}]), ok = emqtt:disconnect(Client3, 4), - [Msg2 | _ ] = receive_messages(1), - ?assertEqual({ok, true}, maps:find(retain, Msg2)), %% [MQTT-3.1.2-15] + [Msg2 | _] = receive_messages(1), + %% [MQTT-3.1.2-15] + ?assertEqual({ok, true}, maps:find(retain, Msg2)), ok = emqtt:disconnect(Client4), clean_retained(Topic, Config). @@ -287,9 +336,9 @@ t_connect_idle_timeout(_Config) -> IdleTimeout = 2000, emqx_config:put_zone_conf(default, [mqtt, idle_timeout], IdleTimeout), emqx_config:put_zone_conf(default, [mqtt, idle_timeout], IdleTimeout), - {ok, Sock} = emqtt_sock:connect({127,0,0,1}, 1883, [], 60000), + {ok, Sock} = emqtt_sock:connect({127, 0, 0, 1}, 1883, [], 60000), timer:sleep(IdleTimeout), - ?assertMatch({error, closed}, emqtt_sock:recv(Sock,1024)). + ?assertMatch({error, closed}, emqtt_sock:recv(Sock, 1024)). t_connect_emit_stats_timeout(init, Config) -> NewIdleTimeout = 1000, @@ -306,7 +355,7 @@ t_connect_emit_stats_timeout('end', _Config) -> t_connect_emit_stats_timeout(Config) -> ConnFun = ?config(conn_fun, Config), {_, IdleTimeout} = lists:keyfind(idle_timeout, 1, Config), - {ok, Client} = emqtt:start_link([{proto_ver, v5},{keepalive, 60} | Config]), + {ok, Client} = emqtt:start_link([{proto_ver, v5}, {keepalive, 60} | Config]), {ok, _} = emqtt:ConnFun(Client), [ClientPid] = emqx_cm:lookup_channels(client_info(clientid, Client)), ?assert(is_reference(emqx_connection:info(stats_timer, sys:get_state(ClientPid)))), @@ -322,13 +371,16 @@ t_connect_keepalive_timeout(Config) -> Keepalive = 2, - {ok, Client} = emqtt:start_link([{proto_ver, v5}, - {keepalive, Keepalive} | Config]), + {ok, Client} = emqtt:start_link([ + {proto_ver, v5}, + {keepalive, Keepalive} + | Config + ]), {ok, _} = emqtt:ConnFun(Client), emqtt:pause(Client), receive {disconnected, ReasonCode, _Channel} -> ?assertEqual(141, ReasonCode) - after round(timer:seconds(Keepalive) * 2 * 1.5 ) -> + after round(timer:seconds(Keepalive) * 2 * 1.5) -> error("keepalive timeout") end. @@ -395,13 +447,17 @@ t_connect_keepalive_timeout(Config) -> t_connect_duplicate_clientid(Config) -> ConnFun = ?config(conn_fun, Config), process_flag(trap_exit, true), - {ok, Client1} = emqtt:start_link([ {clientid, <<"t_connect_duplicate_clientid">>}, - {proto_ver, v5} | Config - ]), + {ok, Client1} = emqtt:start_link([ + {clientid, <<"t_connect_duplicate_clientid">>}, + {proto_ver, v5} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), - {ok, Client2} = emqtt:start_link([ {clientid, <<"t_connect_duplicate_clientid">>}, - {proto_ver, v5} | Config - ]), + {ok, Client2} = emqtt:start_link([ + {clientid, <<"t_connect_duplicate_clientid">>}, + {proto_ver, v5} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(142, receive_disconnect_reasoncode()), waiting_client_process_exit(Client1), @@ -416,22 +472,28 @@ t_connect_duplicate_clientid(Config) -> t_connack_session_present(Config) -> ConnFun = ?config(conn_fun, Config), - {ok, Client1} = emqtt:start_link([ {clientid, <<"t_connect_duplicate_clientid">>}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 7200}}, - {clean_start, true} | Config - ]), + {ok, Client1} = emqtt:start_link([ + {clientid, <<"t_connect_duplicate_clientid">>}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 7200}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), - ?assertEqual(0, client_info(session_present, Client1)), %% [MQTT-3.2.2-2] + %% [MQTT-3.2.2-2] + ?assertEqual(0, client_info(session_present, Client1)), ok = emqtt:disconnect(Client1), - {ok, Client2} = emqtt:start_link([ {clientid, <<"t_connect_duplicate_clientid">>}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 7200}}, - {clean_start, false} | Config - ]), + {ok, Client2} = emqtt:start_link([ + {clientid, <<"t_connect_duplicate_clientid">>}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 7200}}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), - ?assertEqual(1, client_info(session_present, Client2)), %% [[MQTT-3.2.2-3]] + %% [[MQTT-3.2.2-3]] + ?assertEqual(1, client_info(session_present, Client2)), ok = emqtt:disconnect(Client2). t_connack_max_qos_allowed(init, Config) -> @@ -451,24 +513,32 @@ t_connack_max_qos_allowed(Config) -> {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, Connack1} = emqtt:ConnFun(Client1), - ?assertEqual(0, maps:get('Maximum-QoS', Connack1)), %% [MQTT-3.2.2-9] + %% [MQTT-3.2.2-9] + ?assertEqual(0, maps:get('Maximum-QoS', Connack1)), - {ok, _, [0]} = emqtt:subscribe(Client1, Topic, 0), %% [MQTT-3.2.2-10] - {ok, _, [1]} = emqtt:subscribe(Client1, Topic, 1), %% [MQTT-3.2.2-10] - {ok, _, [2]} = emqtt:subscribe(Client1, Topic, 2), %% [MQTT-3.2.2-10] + %% [MQTT-3.2.2-10] + {ok, _, [0]} = emqtt:subscribe(Client1, Topic, 0), + %% [MQTT-3.2.2-10] + {ok, _, [1]} = emqtt:subscribe(Client1, Topic, 1), + %% [MQTT-3.2.2-10] + {ok, _, [2]} = emqtt:subscribe(Client1, Topic, 2), {ok, _} = emqtt:publish(Client1, Topic, <<"Unsupported Qos 1">>, qos1), - ?assertEqual(155, receive_disconnect_reasoncode()), %% [MQTT-3.2.2-11] + %% [MQTT-3.2.2-11] + ?assertEqual(155, receive_disconnect_reasoncode()), waiting_client_process_exit(Client1), - {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, - {will_flag, true}, - {will_topic, Topic}, - {will_payload, <<"Unsupported Qos">>}, - {will_qos, 2} | Config - ]), + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {will_flag, true}, + {will_topic, Topic}, + {will_payload, <<"Unsupported Qos">>}, + {will_qos, 2} + | Config + ]), {error, Connack2} = emqtt:ConnFun(Client2), - ?assertMatch({qos_not_supported, _}, Connack2), %% [MQTT-3.2.2-12] + %% [MQTT-3.2.2-12] + ?assertMatch({qos_not_supported, _}, Connack2), waiting_client_process_exit(Client2), %% max_qos_allowed = 1 @@ -477,24 +547,32 @@ t_connack_max_qos_allowed(Config) -> {ok, Client3} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, Connack3} = emqtt:ConnFun(Client3), - ?assertEqual(1, maps:get('Maximum-QoS', Connack3)), %% [MQTT-3.2.2-9] + %% [MQTT-3.2.2-9] + ?assertEqual(1, maps:get('Maximum-QoS', Connack3)), - {ok, _, [0]} = emqtt:subscribe(Client3, Topic, 0), %% [MQTT-3.2.2-10] - {ok, _, [1]} = emqtt:subscribe(Client3, Topic, 1), %% [MQTT-3.2.2-10] - {ok, _, [2]} = emqtt:subscribe(Client3, Topic, 2), %% [MQTT-3.2.2-10] + %% [MQTT-3.2.2-10] + {ok, _, [0]} = emqtt:subscribe(Client3, Topic, 0), + %% [MQTT-3.2.2-10] + {ok, _, [1]} = emqtt:subscribe(Client3, Topic, 1), + %% [MQTT-3.2.2-10] + {ok, _, [2]} = emqtt:subscribe(Client3, Topic, 2), {ok, _} = emqtt:publish(Client3, Topic, <<"Unsupported Qos 2">>, qos2), - ?assertEqual(155, receive_disconnect_reasoncode()), %% [MQTT-3.2.2-11] + %% [MQTT-3.2.2-11] + ?assertEqual(155, receive_disconnect_reasoncode()), waiting_client_process_exit(Client3), - {ok, Client4} = emqtt:start_link([ {proto_ver, v5}, - {will_flag, true}, - {will_topic, Topic}, - {will_payload, <<"Unsupported Qos">>}, - {will_qos, 2} | Config - ]), + {ok, Client4} = emqtt:start_link([ + {proto_ver, v5}, + {will_flag, true}, + {will_topic, Topic}, + {will_payload, <<"Unsupported Qos">>}, + {will_qos, 2} + | Config + ]), {error, Connack4} = emqtt:ConnFun(Client4), - ?assertMatch({qos_not_supported, _}, Connack4), %% [MQTT-3.2.2-12] + %% [MQTT-3.2.2-12] + ?assertMatch({qos_not_supported, _}, Connack4), waiting_client_process_exit(Client4), %% max_qos_allowed = 2 @@ -503,7 +581,8 @@ t_connack_max_qos_allowed(Config) -> {ok, Client5} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, Connack5} = emqtt:ConnFun(Client5), - ?assertEqual(undefined, maps:get('Maximum-QoS', Connack5, undefined)), %% [MQTT-3.2.2-9] + %% [MQTT-3.2.2-9] + ?assertEqual(undefined, maps:get('Maximum-QoS', Connack5, undefined)), ok = emqtt:disconnect(Client5), waiting_client_process_exit(Client5), @@ -513,7 +592,8 @@ t_connack_assigned_clienid(Config) -> ConnFun = ?config(conn_fun, Config), {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), - ?assert(is_binary(client_info(clientid, Client1))), %% [MQTT-3.2.2-16] + %% [MQTT-3.2.2-16] + ?assert(is_binary(client_info(clientid, Client1))), ok = emqtt:disconnect(Client1). %%-------------------------------------------------------------------- @@ -527,19 +607,31 @@ t_publish_rap(Config) -> {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, #{}, [{Topic, [{rap, true}, {qos, 2}]}]), - {ok, _} = emqtt:publish(Client1, Topic, #{}, <<"retained message">>, - [{qos, ?QOS_1}, {retain, true}]), + {ok, _} = emqtt:publish( + Client1, + Topic, + #{}, + <<"retained message">>, + [{qos, ?QOS_1}, {retain, true}] + ), [Msg1 | _] = receive_messages(1), - ?assertEqual(true, maps:get(retain, Msg1)), %% [MQTT-3.3.1-12] + %% [MQTT-3.3.1-12] + ?assertEqual(true, maps:get(retain, Msg1)), ok = emqtt:disconnect(Client1), {ok, Client2} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client2), {ok, _, [2]} = emqtt:subscribe(Client2, #{}, [{Topic, [{rap, false}, {qos, 2}]}]), - {ok, _} = emqtt:publish(Client2, Topic, #{}, <<"retained message">>, - [{qos, ?QOS_1}, {retain, true}]), + {ok, _} = emqtt:publish( + Client2, + Topic, + #{}, + <<"retained message">>, + [{qos, ?QOS_1}, {retain, true}] + ), [Msg2 | _] = receive_messages(1), - ?assertEqual(false, maps:get(retain, Msg2)), %% [MQTT-3.3.1-13] + %% [MQTT-3.3.1-13] + ?assertEqual(false, maps:get(retain, Msg2)), ok = emqtt:disconnect(Client2), clean_retained(Topic, Config). @@ -567,7 +659,8 @@ t_publish_payload_format_indicator(Config) -> {ok, _, [2]} = emqtt:subscribe(Client1, Topic, qos2), ok = emqtt:publish(Client1, Topic, Properties, <<"Payload Format Indicator">>, [{qos, ?QOS_0}]), [Msg1 | _] = receive_messages(1), - ?assertEqual(Properties, maps:get(properties, Msg1)), %% [MQTT-3.3.2-6] + %% [MQTT-3.3.2-6] + ?assertEqual(Properties, maps:get(properties, Msg1)), ok = emqtt:disconnect(Client1). t_publish_topic_alias(Config) -> @@ -578,17 +671,29 @@ t_publish_topic_alias(Config) -> {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), ok = emqtt:publish(Client1, Topic, #{'Topic-Alias' => 0}, <<"Topic-Alias">>, [{qos, ?QOS_0}]), - ?assertEqual(148, receive_disconnect_reasoncode()), %% [MQTT-3.3.2-8] + %% [MQTT-3.3.2-8] + ?assertEqual(148, receive_disconnect_reasoncode()), waiting_client_process_exit(Client1), {ok, Client2} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client2), {ok, _, [2]} = emqtt:subscribe(Client2, Topic, qos2), - ok = emqtt:publish(Client2, Topic, #{'Topic-Alias' => 233}, - <<"Topic-Alias">>, [{qos, ?QOS_0}]), - ok = emqtt:publish(Client2, <<"">>, #{'Topic-Alias' => 233}, - <<"Topic-Alias">>, [{qos, ?QOS_0}]), - ?assertEqual(2, length(receive_messages(2))), %% [MQTT-3.3.2-12] + ok = emqtt:publish( + Client2, + Topic, + #{'Topic-Alias' => 233}, + <<"Topic-Alias">>, + [{qos, ?QOS_0}] + ), + ok = emqtt:publish( + Client2, + <<"">>, + #{'Topic-Alias' => 233}, + <<"Topic-Alias">>, + [{qos, ?QOS_0}] + ), + %% [MQTT-3.3.2-12] + ?assertEqual(2, length(receive_messages(2))), ok = emqtt:disconnect(Client2), waiting_client_process_exit(Client2), @@ -601,9 +706,15 @@ t_publish_response_topic(Config) -> {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), - ok = emqtt:publish(Client1, Topic, #{'Response-Topic' => nth(1, ?WILD_TOPICS)}, - <<"Response-Topic">>, [{qos, ?QOS_0}]), - ?assertEqual(130, receive_disconnect_reasoncode()), %% [MQTT-3.3.2-14] + ok = emqtt:publish( + Client1, + Topic, + #{'Response-Topic' => nth(1, ?WILD_TOPICS)}, + <<"Response-Topic">>, + [{qos, ?QOS_0}] + ), + %% [MQTT-3.3.2-14] + ?assertEqual(130, receive_disconnect_reasoncode()), waiting_client_process_exit(Client1), process_flag(trap_exit, false). @@ -611,19 +722,24 @@ t_publish_response_topic(Config) -> t_publish_properties(Config) -> ConnFun = ?config(conn_fun, Config), Topic = nth(1, ?TOPICS), - Properties = #{ - 'Response-Topic' => Topic, %% [MQTT-3.3.2-15] - 'Correlation-Data' => <<"233">>, %% [MQTT-3.3.2-16] - 'User-Property' => [{<<"a">>, <<"2333">>}], %% [MQTT-3.3.2-18] - 'Content-Type' => <<"2333">> %% [MQTT-3.3.2-20] - }, + Properties = #{ + %% [MQTT-3.3.2-15] + 'Response-Topic' => Topic, + %% [MQTT-3.3.2-16] + 'Correlation-Data' => <<"233">>, + %% [MQTT-3.3.2-18] + 'User-Property' => [{<<"a">>, <<"2333">>}], + %% [MQTT-3.3.2-20] + 'Content-Type' => <<"2333">> + }, {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, Topic, qos2), ok = emqtt:publish(Client1, Topic, Properties, <<"Publish Properties">>, [{qos, ?QOS_0}]), [Msg1 | _] = receive_messages(1), - ?assertEqual(Properties, maps:get(properties, Msg1)), %% [MQTT-3.3.2-16] + %% [MQTT-3.3.2-16] + ?assertEqual(Properties, maps:get(properties, Msg1)), ok = emqtt:disconnect(Client1). t_publish_overlapping_subscriptions(Config) -> @@ -635,12 +751,19 @@ t_publish_overlapping_subscriptions(Config) -> {ok, _} = emqtt:ConnFun(Client1), {ok, _, [1]} = emqtt:subscribe(Client1, Properties, nth(1, ?WILD_TOPICS), qos1), {ok, _, [0]} = emqtt:subscribe(Client1, Properties, nth(3, ?WILD_TOPICS), qos0), - {ok, _} = emqtt:publish(Client1, Topic, #{}, - <<"t_publish_overlapping_subscriptions">>, [{qos, ?QOS_2}]), + {ok, _} = emqtt:publish( + Client1, + Topic, + #{}, + <<"t_publish_overlapping_subscriptions">>, + [{qos, ?QOS_2}] + ), - [Msg1 | _ ] = receive_messages(2), - ?assert( maps:get(qos, Msg1) < 2 ), %% [MQTT-3.3.4-2] - ?assertEqual(Properties, maps:get(properties, Msg1)), %% [MQTT-3.3.4-3] + [Msg1 | _] = receive_messages(2), + %% [MQTT-3.3.4-2] + ?assert(maps:get(qos, Msg1) < 2), + %% [MQTT-3.3.4-3] + ?assertEqual(Properties, maps:get(properties, Msg1)), ok = emqtt:disconnect(Client1). %%-------------------------------------------------------------------- @@ -651,10 +774,11 @@ t_subscribe_topic_alias(Config) -> ConnFun = ?config(conn_fun, Config), Topic1 = nth(1, ?TOPICS), Topic2 = nth(2, ?TOPICS), - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {properties, #{'Topic-Alias-Maximum' => 1}} - | Config - ]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {properties, #{'Topic-Alias-Maximum' => 1}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, Topic1, qos2), {ok, _, [2]} = emqtt:subscribe(Client1, Topic2, qos2), @@ -689,7 +813,8 @@ t_subscribe_no_local(Config) -> {ok, _, [2]} = emqtt:subscribe(Client2, #{}, [{Topic, [{nl, true}, {qos, 2}]}]), ok = emqtt:publish(Client1, Topic, <<"t_subscribe_no_local">>, 0), - ?assertEqual(1, length(receive_messages(2))), %% [MQTT-3.8.3-3] + %% [MQTT-3.8.3-3] + ?assertEqual(1, length(receive_messages(2))), ok = emqtt:disconnect(Client1). t_subscribe_actions(Config) -> @@ -702,11 +827,14 @@ t_subscribe_actions(Config) -> {ok, _, [2]} = emqtt:subscribe(Client1, Properties, Topic, qos2), {ok, _, [1]} = emqtt:subscribe(Client1, Properties, Topic, qos1), {ok, _} = emqtt:publish(Client1, Topic, <<"t_subscribe_actions">>, 2), - [Msg1 | _ ] = receive_messages(1), - ?assertEqual(1, maps:get(qos, Msg1)), %% [MQTT-3.8.4-3] [MQTT-3.8.4-8] + [Msg1 | _] = receive_messages(1), + %% [MQTT-3.8.4-3] [MQTT-3.8.4-8] + ?assertEqual(1, maps:get(qos, Msg1)), %% [MQTT-3.8.4-5] [MQTT-3.8.4-6] [MQTT-3.8.4-7] - {ok, _, [2,2]} = emqtt:subscribe(Client1, [{nth(1, ?TOPICS), qos2}, - {nth(2, ?TOPICS), qos2}] ), + {ok, _, [2, 2]} = emqtt:subscribe(Client1, [ + {nth(1, ?TOPICS), qos2}, + {nth(2, ?TOPICS), qos2} + ]), ok = emqtt:disconnect(Client1). %%-------------------------------------------------------------------- %% Unsubsctibe Unsuback @@ -720,8 +848,10 @@ t_unscbsctibe(Config) -> {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, Topic1, qos2), - {ok, _, [0]} = emqtt:unsubscribe(Client1, Topic1), %% [MQTT-3.10.4-4] - {ok, _, [17]} = emqtt:unsubscribe(Client1, <<"noExistTopic">>), %% [MQTT-3.10.4-5] + %% [MQTT-3.10.4-4] + {ok, _, [0]} = emqtt:unsubscribe(Client1, Topic1), + %% [MQTT-3.10.4-5] + {ok, _, [17]} = emqtt:unsubscribe(Client1, <<"noExistTopic">>), {ok, _, [2, 2]} = emqtt:subscribe(Client1, [{Topic1, qos2}, {Topic2, qos2}]), %% [[MQTT-3.10.4-6]] [MQTT-3.11.3-1] [MQTT-3.11.3-2] @@ -736,7 +866,8 @@ t_pingreq(Config) -> ConnFun = ?config(conn_fun, Config), {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), {ok, _} = emqtt:ConnFun(Client1), - pong = emqtt:ping(Client1), %% [MQTT-3.12.4-1] + %% [MQTT-3.12.4-1] + pong = emqtt:ping(Client1), ok = emqtt:disconnect(Client1). %%-------------------------------------------------------------------- @@ -758,36 +889,50 @@ t_shared_subscriptions_client_terminates_when_qos_eq_2(Config) -> SharedTopic = list_to_binary("$share/sharename/" ++ binary_to_list(<<"TopicA">>)), CRef = counters:new(1, [atomics]), - meck:expect(emqtt, connected, - fun(cast, ?PUBLISH_PACKET(?QOS_2, _PacketId), _State) -> - ok = counters:add(CRef, 1, 1), - {stop, {shutdown, for_testing}}; - (Arg1, ARg2, Arg3) -> meck:passthrough([Arg1, ARg2, Arg3]) - end), + meck:expect( + emqtt, + connected, + fun + (cast, ?PUBLISH_PACKET(?QOS_2, _PacketId), _State) -> + ok = counters:add(CRef, 1, 1), + {stop, {shutdown, for_testing}}; + (Arg1, ARg2, Arg3) -> + meck:passthrough([Arg1, ARg2, Arg3]) + end + ), - {ok, Sub1} = emqtt:start_link([ {proto_ver, v5}, - {clientid, <<"sub_client_1">>}, - {keepalive, 5} | Config - ]), + {ok, Sub1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"sub_client_1">>}, + {keepalive, 5} + | Config + ]), {ok, _} = emqtt:ConnFun(Sub1), {ok, _, [2]} = emqtt:subscribe(Sub1, SharedTopic, qos2), - {ok, Sub2} = emqtt:start_link([{proto_ver, v5}, - {clientid, <<"sub_client_2">>}, - {keepalive, 5} | Config]), + {ok, Sub2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, <<"sub_client_2">>}, + {keepalive, 5} + | Config + ]), {ok, _} = emqtt:ConnFun(Sub2), {ok, _, [2]} = emqtt:subscribe(Sub2, SharedTopic, qos2), {ok, Pub} = emqtt:start_link([{proto_ver, v5}, {clientid, <<"pub_client">>} | Config]), {ok, _} = emqtt:ConnFun(Pub), - {ok, _} = emqtt:publish(Pub, Topic, - <<"t_shared_subscriptions_client_terminates_when_qos_eq_2">>, 2), + {ok, _} = emqtt:publish( + Pub, + Topic, + <<"t_shared_subscriptions_client_terminates_when_qos_eq_2">>, + 2 + ), receive - {'EXIT', _,{shutdown, for_testing}} -> + {'EXIT', _, {shutdown, for_testing}} -> ok after 1000 -> - ct:fail("disconnected timeout") + ct:fail("disconnected timeout") end, ?assertEqual(1, counters:get(CRef, 1)), diff --git a/apps/emqx/test/emqx_mqueue_SUITE.erl b/apps/emqx/test/emqx_mqueue_SUITE.erl index f33661474..c87c42266 100644 --- a/apps/emqx/test/emqx_mqueue_SUITE.erl +++ b/apps/emqx/test/emqx_mqueue_SUITE.erl @@ -35,11 +35,12 @@ t_info(_) -> 5 = ?Q:info(max_len, Q), 0 = ?Q:info(len, Q), 0 = ?Q:info(dropped, Q), - #{store_qos0 := true, - max_len := 5, - len := 0, - dropped := 0 - } = ?Q:info(Q). + #{ + store_qos0 := true, + max_len := 5, + len := 0, + dropped := 0 + } = ?Q:info(Q). t_in(_) -> Opts = #{max_len => 5, store_qos0 => true}, @@ -91,23 +92,29 @@ t_infinity_simple_mqueue(_) -> ?assert(?Q:is_empty(Q)), ?assertEqual(0, ?Q:max_len(Q)), Qx = lists:foldl( - fun(I, AccQ) -> - {_, NewQ} = ?Q:in(#message{qos = 1, payload = iolist_to_binary([I])}, AccQ), - NewQ - end, Q, lists:seq(1, 255)), + fun(I, AccQ) -> + {_, NewQ} = ?Q:in(#message{qos = 1, payload = iolist_to_binary([I])}, AccQ), + NewQ + end, + Q, + lists:seq(1, 255) + ), ?assertEqual(255, ?Q:len(Qx)), ?assertEqual([{len, 255}, {max_len, 0}, {dropped, 0}], ?Q:stats(Qx)), {{value, V}, _Qy} = ?Q:out(Qx), ?assertEqual(<<1>>, V#message.payload). t_priority_mqueue(_) -> - Opts = #{max_len => 3, - priorities => - #{<<"t1">> => 1, - <<"t2">> => 2, - <<"t3">> => 3 - }, - store_qos0 => false}, + Opts = #{ + max_len => 3, + priorities => + #{ + <<"t1">> => 1, + <<"t2">> => 2, + <<"t3">> => 3 + }, + store_qos0 => false + }, Q = ?Q:init(Opts), ?assertEqual(3, ?Q:max_len(Q)), ?assert(?Q:is_empty(Q)), @@ -128,104 +135,136 @@ t_priority_mqueue_conservation(_) -> true = proper:quickcheck(conservation_prop()). t_priority_order(_) -> - Opts = #{max_len => 5, - shift_multiplier => 1, - priorities => - #{<<"t1">> => 0, - <<"t2">> => 1, - <<"t3">> => 2 - }, - store_qos0 => false + Opts = #{ + max_len => 5, + shift_multiplier => 1, + priorities => + #{ + <<"t1">> => 0, + <<"t2">> => 1, + <<"t3">> => 2 }, - Messages = [{Topic, Message} || - Topic <- [<<"t1">>, <<"t2">>, <<"t3">>], - Message <- lists:seq(1, 10)], - Q = lists:foldl(fun({Topic, Message}, Q) -> - element(2, ?Q:in(#message{topic = Topic, qos = 1, payload = Message}, Q)) - end, - ?Q:init(Opts), - Messages), - ?assertMatch([{<<"t3">>, 6}, - {<<"t3">>, 7}, - {<<"t3">>, 8}, + store_qos0 => false + }, + Messages = [ + {Topic, Message} + || Topic <- [<<"t1">>, <<"t2">>, <<"t3">>], + Message <- lists:seq(1, 10) + ], + Q = lists:foldl( + fun({Topic, Message}, Q) -> + element(2, ?Q:in(#message{topic = Topic, qos = 1, payload = Message}, Q)) + end, + ?Q:init(Opts), + Messages + ), + ?assertMatch( + [ + {<<"t3">>, 6}, + {<<"t3">>, 7}, + {<<"t3">>, 8}, - {<<"t2">>, 6}, - {<<"t2">>, 7}, + {<<"t2">>, 6}, + {<<"t2">>, 7}, - {<<"t1">>, 6}, + {<<"t1">>, 6}, - {<<"t3">>, 9}, - {<<"t3">>, 10}, + {<<"t3">>, 9}, + {<<"t3">>, 10}, - {<<"t2">>, 8}, + {<<"t2">>, 8}, - %% Note: for performance reasons we don't reset the - %% counter when we run out of messages with the - %% current prio, so next is t1: - {<<"t1">>, 7}, + %% Note: for performance reasons we don't reset the + %% counter when we run out of messages with the + %% current prio, so next is t1: + {<<"t1">>, 7}, - {<<"t2">>, 9}, - {<<"t2">>, 10}, + {<<"t2">>, 9}, + {<<"t2">>, 10}, - {<<"t1">>, 8}, - {<<"t1">>, 9}, - {<<"t1">>, 10} - ], drain(Q)). + {<<"t1">>, 8}, + {<<"t1">>, 9}, + {<<"t1">>, 10} + ], + drain(Q) + ). t_priority_order2(_) -> - Opts = #{max_len => 5, - shift_multiplier => 2, - priorities => - #{<<"t1">> => 0, - <<"t2">> => 1 - }, - store_qos0 => false + Opts = #{ + max_len => 5, + shift_multiplier => 2, + priorities => + #{ + <<"t1">> => 0, + <<"t2">> => 1 }, - Messages = [{Topic, Message} || - Topic <- [<<"t1">>, <<"t2">>], - Message <- lists:seq(1, 10)], - Q = lists:foldl(fun({Topic, Message}, Q) -> - element(2, ?Q:in(#message{topic = Topic, qos = 1, payload = Message}, Q)) - end, - ?Q:init(Opts), - Messages), - ?assertMatch([{<<"t2">>, 6}, - {<<"t2">>, 7}, - {<<"t2">>, 8}, - {<<"t2">>, 9}, + store_qos0 => false + }, + Messages = [ + {Topic, Message} + || Topic <- [<<"t1">>, <<"t2">>], + Message <- lists:seq(1, 10) + ], + Q = lists:foldl( + fun({Topic, Message}, Q) -> + element(2, ?Q:in(#message{topic = Topic, qos = 1, payload = Message}, Q)) + end, + ?Q:init(Opts), + Messages + ), + ?assertMatch( + [ + {<<"t2">>, 6}, + {<<"t2">>, 7}, + {<<"t2">>, 8}, + {<<"t2">>, 9}, - {<<"t1">>, 6}, - {<<"t1">>, 7}, + {<<"t1">>, 6}, + {<<"t1">>, 7}, - {<<"t2">>, 10}, + {<<"t2">>, 10}, - {<<"t1">>, 8}, - {<<"t1">>, 9}, - {<<"t1">>, 10} - ], drain(Q)). + {<<"t1">>, 8}, + {<<"t1">>, 9}, + {<<"t1">>, 10} + ], + drain(Q) + ). t_infinity_priority_mqueue(_) -> - Opts = #{max_len => 0, - priorities => - #{<<"t">> => 1, - <<"t1">> => 2 - }, - store_qos0 => false}, + Opts = #{ + max_len => 0, + priorities => + #{ + <<"t">> => 1, + <<"t1">> => 2 + }, + store_qos0 => false + }, Q = ?Q:init(Opts), ?assertEqual(0, ?Q:max_len(Q)), - Qx = lists:foldl(fun(I, AccQ) -> - {undefined, AccQ1} = ?Q:in(#message{topic = <<"t1">>, qos = 1, payload = iolist_to_binary([I])}, AccQ), - {undefined, AccQ2} = ?Q:in(#message{topic = <<"t">>, qos = 1, payload = iolist_to_binary([I])}, AccQ1), - AccQ2 - end, Q, lists:seq(1, 255)), + Qx = lists:foldl( + fun(I, AccQ) -> + {undefined, AccQ1} = ?Q:in( + #message{topic = <<"t1">>, qos = 1, payload = iolist_to_binary([I])}, AccQ + ), + {undefined, AccQ2} = ?Q:in( + #message{topic = <<"t">>, qos = 1, payload = iolist_to_binary([I])}, AccQ1 + ), + AccQ2 + end, + Q, + lists:seq(1, 255) + ), ?assertEqual(510, ?Q:len(Qx)), ?assertEqual([{len, 510}, {max_len, 0}, {dropped, 0}], ?Q:stats(Qx)). %%TODO: fixme later t_length_priority_mqueue(_) -> - Opts = #{max_len => 2, - store_qos0 => false - }, + Opts = #{ + max_len => 2, + store_qos0 => false + }, Q = ?Q:init(Opts), 2 = ?Q:max_len(Q), {_, Q1} = ?Q:in(#message{topic = <<"x">>, qos = 1, payload = <<1>>}, Q), @@ -244,34 +283,43 @@ t_dropped(_) -> ?assertEqual(1, ?Q:dropped(Q2)). conservation_prop() -> - ?FORALL({Priorities, Messages}, - ?LET(Priorities, topic_priorities(), - {Priorities, messages(Priorities)}), - try - Opts = #{max_len => 0, - priorities => maps:from_list(Priorities), - store_qos0 => false}, - %% Put messages in - Q1 = lists:foldl(fun({Topic, Message}, Q) -> - element(2, ?Q:in(#message{topic = Topic, qos = 1, payload = Message}, Q)) - end, - ?Q:init(Opts), - Messages), - %% Collect messages - Got = lists:sort(drain(Q1)), - Expected = lists:sort(Messages), - case Expected =:= Got of - true -> - true; - false -> - ct:pal("Mismatch: expected ~p~nGot ~p~n", [Expected, Got]), - false - end - catch - EC:Err:Stack -> - ct:pal("Error: ~p", [{EC, Err, Stack}]), + ?FORALL( + {Priorities, Messages}, + ?LET( + Priorities, + topic_priorities(), + {Priorities, messages(Priorities)} + ), + try + Opts = #{ + max_len => 0, + priorities => maps:from_list(Priorities), + store_qos0 => false + }, + %% Put messages in + Q1 = lists:foldl( + fun({Topic, Message}, Q) -> + element(2, ?Q:in(#message{topic = Topic, qos = 1, payload = Message}, Q)) + end, + ?Q:init(Opts), + Messages + ), + %% Collect messages + Got = lists:sort(drain(Q1)), + Expected = lists:sort(Messages), + case Expected =:= Got of + true -> + true; + false -> + ct:pal("Mismatch: expected ~p~nGot ~p~n", [Expected, Got]), false - end). + end + catch + EC:Err:Stack -> + ct:pal("Error: ~p", [{EC, Err, Stack}]), + false + end + ). %% Proper generators: @@ -295,5 +343,5 @@ drain(Q) -> {empty, _} -> []; {{value, #message{topic = T, payload = P}}, Q1} -> - [{T, P}|drain(Q1)] + [{T, P} | drain(Q1)] end. diff --git a/apps/emqx/test/emqx_olp_SUITE.erl b/apps/emqx/test/emqx_olp_SUITE.erl index e7b7fca98..7a0a2d21d 100644 --- a/apps/emqx/test/emqx_olp_SUITE.erl +++ b/apps/emqx/test/emqx_olp_SUITE.erl @@ -36,19 +36,19 @@ end_per_suite(_Config) -> init_per_testcase(_, Config) -> emqx_olp:enable(), case wait_for(fun() -> lc_sup:whereis_runq_flagman() end, 10) of - true -> ok; - false -> - ct:fail("runq_flagman is not up") + true -> ok; + false -> ct:fail("runq_flagman is not up") end, - ok = load_ctl:put_config(#{ ?RUNQ_MON_F0 => true - , ?RUNQ_MON_F1 => 5 - , ?RUNQ_MON_F2 => 1 - , ?RUNQ_MON_T1 => 200 - , ?RUNQ_MON_T2 => 50 - , ?RUNQ_MON_C1 => 2 - , ?RUNQ_MON_F5 => -1 - }), - Config. + ok = load_ctl:put_config(#{ + ?RUNQ_MON_F0 => true, + ?RUNQ_MON_F1 => 5, + ?RUNQ_MON_F2 => 1, + ?RUNQ_MON_T1 => 200, + ?RUNQ_MON_T2 => 50, + ?RUNQ_MON_C1 => 2, + ?RUNQ_MON_F5 => -1 + }), + Config. %% Test that olp could be enabled/disabled globally t_disable_enable(_Config) -> @@ -97,22 +97,27 @@ burst_runq() -> %% internal helpers worker_parent(N, {M, F, A}) -> - lists:foreach(fun(_) -> - proc_lib:spawn_link(fun() -> apply(M, F, A) end) - end, lists:seq(1, N)), - receive stop -> ok end. + lists:foreach( + fun(_) -> + proc_lib:spawn_link(fun() -> apply(M, F, A) end) + end, + lists:seq(1, N) + ), + receive + stop -> ok + end. busy_loop() -> - erlang:yield(), - busy_loop(). + erlang:yield(), + busy_loop(). wait_for(_Fun, 0) -> - false; + false; wait_for(Fun, Retry) -> - case is_pid(Fun()) of - true -> - true; - false -> - timer:sleep(10), - wait_for(Fun, Retry - 1) - end. + case is_pid(Fun()) of + true -> + true; + false -> + timer:sleep(10), + wait_for(Fun, Retry - 1) + end. diff --git a/apps/emqx/test/emqx_os_mon_SUITE.erl b/apps/emqx/test/emqx_os_mon_SUITE.erl index 4e443b4ca..38bc2acf2 100644 --- a/apps/emqx/test/emqx_os_mon_SUITE.erl +++ b/apps/emqx/test/emqx_os_mon_SUITE.erl @@ -25,17 +25,22 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - emqx_common_test_helpers:start_apps([], - fun(emqx) -> - application:set_env(emqx, os_mon, [ - {cpu_check_interval, 1}, - {cpu_high_watermark, 5}, - {cpu_low_watermark, 80}, - {mem_check_interval, 60}, - {sysmem_high_watermark, 70}, - {procmem_high_watermark, 5}]); - (_) -> ok - end), + emqx_common_test_helpers:start_apps( + [], + fun + (emqx) -> + application:set_env(emqx, os_mon, [ + {cpu_check_interval, 1}, + {cpu_high_watermark, 5}, + {cpu_low_watermark, 80}, + {mem_check_interval, 60}, + {sysmem_high_watermark, 70}, + {procmem_high_watermark, 5} + ]); + (_) -> + ok + end + ), Config. end_per_suite(_Config) -> @@ -56,8 +61,10 @@ t_api(_) -> ?assertEqual(ok, emqx_os_mon:set_procmem_high_watermark(0.11)), ?assertEqual(11, emqx_os_mon:get_procmem_high_watermark()), - ?assertEqual({error, {unexpected_call, ignored}}, - gen_server:call(emqx_os_mon, ignored)), + ?assertEqual( + {error, {unexpected_call, ignored}}, + gen_server:call(emqx_os_mon, ignored) + ), ?assertEqual(ok, gen_server:cast(emqx_os_mon, ignored)), emqx_os_mon ! ignored, gen_server:stop(emqx_os_mon), diff --git a/apps/emqx/test/emqx_packet_SUITE.erl b/apps/emqx/test/emqx_packet_SUITE.erl index 63bbb254a..ff50c3ebe 100644 --- a/apps/emqx/test/emqx_packet_SUITE.erl +++ b/apps/emqx/test/emqx_packet_SUITE.erl @@ -24,69 +24,85 @@ -include_lib("eunit/include/eunit.hrl"). --define(PACKETS, - [{?CONNECT, 'CONNECT', ?CONNECT_PACKET(#mqtt_packet_connect{})}, - {?CONNACK, 'CONNACK', ?CONNACK_PACKET(?RC_SUCCESS)}, - {?PUBLISH, 'PUBLISH', ?PUBLISH_PACKET(?QOS_1)}, - {?PUBACK, 'PUBACK', ?PUBACK_PACKET(1)}, - {?PUBREC, 'PUBREC', ?PUBREC_PACKET(1)}, - {?PUBREL, 'PUBREL', ?PUBREL_PACKET(1)}, - {?PUBCOMP, 'PUBCOMP', ?PUBCOMP_PACKET(1)}, - {?SUBSCRIBE, 'SUBSCRIBE', ?SUBSCRIBE_PACKET(1, [])}, - {?SUBACK, 'SUBACK', ?SUBACK_PACKET(1, [0])}, - {?UNSUBSCRIBE, 'UNSUBSCRIBE', ?UNSUBSCRIBE_PACKET(1, [])}, - {?UNSUBACK, 'UNSUBACK', ?UNSUBACK_PACKET(1)}, - {?DISCONNECT, 'DISCONNECT', ?DISCONNECT_PACKET(?RC_SUCCESS)}, - {?AUTH, 'AUTH', ?AUTH_PACKET()} - ]). +-define(PACKETS, [ + {?CONNECT, 'CONNECT', ?CONNECT_PACKET(#mqtt_packet_connect{})}, + {?CONNACK, 'CONNACK', ?CONNACK_PACKET(?RC_SUCCESS)}, + {?PUBLISH, 'PUBLISH', ?PUBLISH_PACKET(?QOS_1)}, + {?PUBACK, 'PUBACK', ?PUBACK_PACKET(1)}, + {?PUBREC, 'PUBREC', ?PUBREC_PACKET(1)}, + {?PUBREL, 'PUBREL', ?PUBREL_PACKET(1)}, + {?PUBCOMP, 'PUBCOMP', ?PUBCOMP_PACKET(1)}, + {?SUBSCRIBE, 'SUBSCRIBE', ?SUBSCRIBE_PACKET(1, [])}, + {?SUBACK, 'SUBACK', ?SUBACK_PACKET(1, [0])}, + {?UNSUBSCRIBE, 'UNSUBSCRIBE', ?UNSUBSCRIBE_PACKET(1, [])}, + {?UNSUBACK, 'UNSUBACK', ?UNSUBACK_PACKET(1)}, + {?DISCONNECT, 'DISCONNECT', ?DISCONNECT_PACKET(?RC_SUCCESS)}, + {?AUTH, 'AUTH', ?AUTH_PACKET()} +]). all() -> emqx_common_test_helpers:all(?MODULE). t_type(_) -> - lists:foreach(fun({Type, _Name, Packet}) -> - ?assertEqual(Type, emqx_packet:type(Packet)) - end, ?PACKETS). + lists:foreach( + fun({Type, _Name, Packet}) -> + ?assertEqual(Type, emqx_packet:type(Packet)) + end, + ?PACKETS + ). t_type_name(_) -> - lists:foreach(fun({_Type, Name, Packet}) -> - ?assertEqual(Name, emqx_packet:type_name(Packet)) - end, ?PACKETS). + lists:foreach( + fun({_Type, Name, Packet}) -> + ?assertEqual(Name, emqx_packet:type_name(Packet)) + end, + ?PACKETS + ). t_dup(_) -> ?assertEqual(false, emqx_packet:dup(?PUBLISH_PACKET(?QOS_1))). t_qos(_) -> - lists:foreach(fun(QoS) -> - ?assertEqual(QoS, emqx_packet:qos(?PUBLISH_PACKET(QoS))) - end, [?QOS_0, ?QOS_1, ?QOS_2]). + lists:foreach( + fun(QoS) -> + ?assertEqual(QoS, emqx_packet:qos(?PUBLISH_PACKET(QoS))) + end, + [?QOS_0, ?QOS_1, ?QOS_2] + ). t_retain(_) -> ?assertEqual(false, emqx_packet:retain(?PUBLISH_PACKET(?QOS_1))). t_proto_name(_) -> lists:foreach( - fun({Ver, Name}) -> - ConnPkt = ?CONNECT_PACKET(#mqtt_packet_connect{proto_ver = Ver, - proto_name = Name}), - ?assertEqual(Name, emqx_packet:proto_name(ConnPkt)) - end, ?PROTOCOL_NAMES). + fun({Ver, Name}) -> + ConnPkt = ?CONNECT_PACKET(#mqtt_packet_connect{ + proto_ver = Ver, + proto_name = Name + }), + ?assertEqual(Name, emqx_packet:proto_name(ConnPkt)) + end, + ?PROTOCOL_NAMES + ). t_proto_ver(_) -> lists:foreach( - fun(Ver) -> - ConnPkt = ?CONNECT_PACKET(#mqtt_packet_connect{proto_ver = Ver}), - ?assertEqual(Ver, emqx_packet:proto_ver(ConnPkt)) - end, [?MQTT_PROTO_V3, ?MQTT_PROTO_V4, ?MQTT_PROTO_V5]). + fun(Ver) -> + ConnPkt = ?CONNECT_PACKET(#mqtt_packet_connect{proto_ver = Ver}), + ?assertEqual(Ver, emqx_packet:proto_ver(ConnPkt)) + end, + [?MQTT_PROTO_V3, ?MQTT_PROTO_V4, ?MQTT_PROTO_V5] + ). t_connect_info(_) -> - ConnPkt = #mqtt_packet_connect{will_flag = true, - clientid = <<"clientid">>, - username = <<"username">>, - will_retain = true, - will_qos = ?QOS_2, - will_topic = <<"topic">>, - will_payload = <<"payload">> - }, + ConnPkt = #mqtt_packet_connect{ + will_flag = true, + clientid = <<"clientid">>, + username = <<"username">>, + will_retain = true, + will_qos = ?QOS_2, + will_topic = <<"topic">>, + will_payload = <<"payload">> + }, ?assertEqual(<<"MQTT">>, emqx_packet:info(proto_name, ConnPkt)), ?assertEqual(4, emqx_packet:info(proto_ver, ConnPkt)), ?assertEqual(false, emqx_packet:info(is_bridge, ConnPkt)), @@ -157,153 +173,240 @@ t_auth_info(_) -> ?assertEqual(#{}, emqx_packet:info(properties, AuthPkt)). t_set_props(_) -> - Pkts = [#mqtt_packet_connect{}, #mqtt_packet_connack{}, #mqtt_packet_publish{}, - #mqtt_packet_puback{}, #mqtt_packet_subscribe{}, #mqtt_packet_suback{}, - #mqtt_packet_unsubscribe{}, #mqtt_packet_unsuback{}, - #mqtt_packet_disconnect{}, #mqtt_packet_auth{}], + Pkts = [ + #mqtt_packet_connect{}, + #mqtt_packet_connack{}, + #mqtt_packet_publish{}, + #mqtt_packet_puback{}, + #mqtt_packet_subscribe{}, + #mqtt_packet_suback{}, + #mqtt_packet_unsubscribe{}, + #mqtt_packet_unsuback{}, + #mqtt_packet_disconnect{}, + #mqtt_packet_auth{} + ], Props = #{'A-Fake-Props' => true}, - lists:foreach(fun(Pkt) -> - ?assertEqual(Props, emqx_packet:info(properties, emqx_packet:set_props(Props, Pkt))) - end, Pkts). + lists:foreach( + fun(Pkt) -> + ?assertEqual(Props, emqx_packet:info(properties, emqx_packet:set_props(Props, Pkt))) + end, + Pkts + ). t_check_publish(_) -> Props = #{'Response-Topic' => <<"responsetopic">>, 'Topic-Alias' => 1}, ok = emqx_packet:check(?PUBLISH_PACKET(?QOS_1, <<"topic">>, 1, Props, <<"payload">>)), ok = emqx_packet:check(#mqtt_packet_publish{packet_id = 1, topic_name = <<"t">>}), - {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(#mqtt_packet_publish{topic_name = <<>>, - properties = #{'Topic-Alias'=> 0} - }), - {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(?PUBLISH_PACKET(?QOS_1, <<>>, 1, #{}, <<"payload">>)), - {error, ?RC_TOPIC_NAME_INVALID} = emqx_packet:check(?PUBLISH_PACKET(?QOS_1, <<"+/+">>, 1, #{}, <<"payload">>)), - {error, ?RC_TOPIC_ALIAS_INVALID} = emqx_packet:check(?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Topic-Alias' => 0}, <<"payload">>)), + {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(#mqtt_packet_publish{ + topic_name = <<>>, + properties = #{'Topic-Alias' => 0} + }), + {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check( + ?PUBLISH_PACKET(?QOS_1, <<>>, 1, #{}, <<"payload">>) + ), + {error, ?RC_TOPIC_NAME_INVALID} = emqx_packet:check( + ?PUBLISH_PACKET(?QOS_1, <<"+/+">>, 1, #{}, <<"payload">>) + ), + {error, ?RC_TOPIC_ALIAS_INVALID} = emqx_packet:check( + ?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Topic-Alias' => 0}, <<"payload">>) + ), %% TODO:: %% {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Subscription-Identifier' => 10}, <<"payload">>)), - ok = emqx_packet:check(?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Subscription-Identifier' => 10}, <<"payload">>)), - {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Subscription-Identifier' => 0}, <<"payload">>)), - {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Response-Topic' => <<"+/+">>}, <<"payload">>)). + ok = emqx_packet:check( + ?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Subscription-Identifier' => 10}, <<"payload">>) + ), + {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check( + ?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Subscription-Identifier' => 0}, <<"payload">>) + ), + {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check( + ?PUBLISH_PACKET(1, <<"topic">>, 1, #{'Response-Topic' => <<"+/+">>}, <<"payload">>) + ). t_check_subscribe(_) -> - ok = emqx_packet:check(?SUBSCRIBE_PACKET(1, #{'Subscription-Identifier' => 1}, - [{<<"topic">>, #{qos => ?QOS_0}}])), + ok = emqx_packet:check( + ?SUBSCRIBE_PACKET( + 1, + #{'Subscription-Identifier' => 1}, + [{<<"topic">>, #{qos => ?QOS_0}}] + ) + ), {error, ?RC_TOPIC_FILTER_INVALID} = emqx_packet:check(#mqtt_packet_subscribe{topic_filters = []}), {error, ?RC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED} = - emqx_packet:check(?SUBSCRIBE_PACKET(1, #{'Subscription-Identifier' => -1}, - [{<<"topic">>, #{qos => ?QOS_0, rp => 0}}])). + emqx_packet:check( + ?SUBSCRIBE_PACKET( + 1, + #{'Subscription-Identifier' => -1}, + [{<<"topic">>, #{qos => ?QOS_0, rp => 0}}] + ) + ). t_check_unsubscribe(_) -> ok = emqx_packet:check(?UNSUBSCRIBE_PACKET(1, [<<"topic">>])), - {error, ?RC_TOPIC_FILTER_INVALID} = emqx_packet:check(?UNSUBSCRIBE_PACKET(1,[])). + {error, ?RC_TOPIC_FILTER_INVALID} = emqx_packet:check(?UNSUBSCRIBE_PACKET(1, [])). t_check_connect(_) -> Opts = #{max_clientid_len => 5, mqtt_retain_available => false}, ok = emqx_packet:check(#mqtt_packet_connect{}, Opts), - ok = emqx_packet:check(?CONNECT_PACKET(#mqtt_packet_connect{clientid = <<1>>, - properties = #{'Receive-Maximum' => 1}, - will_flag = true, - will_topic = <<"will_topic">>} - ), Opts), - ConnPkt1 = #mqtt_packet_connect{proto_name = <<"MQIsdp">>, - proto_ver = ?MQTT_PROTO_V5 - }, + ok = emqx_packet:check( + ?CONNECT_PACKET(#mqtt_packet_connect{ + clientid = <<1>>, + properties = #{'Receive-Maximum' => 1}, + will_flag = true, + will_topic = <<"will_topic">> + }), + Opts + ), + ConnPkt1 = #mqtt_packet_connect{ + proto_name = <<"MQIsdp">>, + proto_ver = ?MQTT_PROTO_V5 + }, {error, ?RC_UNSUPPORTED_PROTOCOL_VERSION} = emqx_packet:check(ConnPkt1, Opts), - ConnPkt2 = #mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V3, - proto_name = <<"MQIsdp">>, - clientid = <<>> - }, + ConnPkt2 = #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V3, + proto_name = <<"MQIsdp">>, + clientid = <<>> + }, {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID} = emqx_packet:check(ConnPkt2, Opts), ConnPkt3 = #mqtt_packet_connect{clientid = <<"123456">>}, {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID} = emqx_packet:check(ConnPkt3, Opts), - ConnPkt4 = #mqtt_packet_connect{will_flag = true, - will_retain = true - }, + ConnPkt4 = #mqtt_packet_connect{ + will_flag = true, + will_retain = true + }, {error, ?RC_RETAIN_NOT_SUPPORTED} = emqx_packet:check(ConnPkt4, Opts), - ConnPkt5 = #mqtt_packet_connect{will_flag = true, - will_topic = <<"#">> - }, + ConnPkt5 = #mqtt_packet_connect{ + will_flag = true, + will_topic = <<"#">> + }, {error, ?RC_TOPIC_NAME_INVALID} = emqx_packet:check(ConnPkt5, Opts), - ConnPkt6 = ?CONNECT_PACKET(#mqtt_packet_connect{properties = #{'Request-Response-Information' => -1}}), + ConnPkt6 = ?CONNECT_PACKET(#mqtt_packet_connect{ + properties = #{'Request-Response-Information' => -1} + }), {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check(ConnPkt6, Opts), {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check( - ?CONNECT_PACKET(#mqtt_packet_connect{ - properties = #{'Request-Problem-Information' => 2}}), Opts), + ?CONNECT_PACKET(#mqtt_packet_connect{ + properties = #{'Request-Problem-Information' => 2} + }), + Opts + ), {error, ?RC_PROTOCOL_ERROR} = emqx_packet:check( - ?CONNECT_PACKET(#mqtt_packet_connect{ - properties = #{'Receive-Maximum' => 0}}), Opts), - ConnPkt7 = #mqtt_packet_connect{clientid = <<>>, clean_start = false}, + ?CONNECT_PACKET(#mqtt_packet_connect{ + properties = #{'Receive-Maximum' => 0} + }), + Opts + ), + ConnPkt7 = #mqtt_packet_connect{clientid = <<>>, clean_start = false}, {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID} = emqx_packet:check(ConnPkt7, Opts). t_from_to_message(_) -> ExpectedMsg = emqx_message:make(<<"clientid">>, ?QOS_0, <<"topic">>, <<"payload">>), ExpectedMsg1 = emqx_message:set_flags(#{dup => false, retain => false}, ExpectedMsg), - ExpectedMsg2 = emqx_message:set_headers(#{peerhost => {127,0,0,1}, - protocol => mqtt, - properties => #{}, - username => <<"test">> - }, ExpectedMsg1), - Pkt = #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, - qos = ?QOS_0, - retain = false, - dup = false}, - variable = #mqtt_packet_publish{topic_name = <<"topic">>, - packet_id = 10, - properties = #{}}, - payload = <<"payload">>}, - MsgFromPkt = emqx_packet:to_message(Pkt, <<"clientid">>, - #{protocol => mqtt, - username => <<"test">>, - peerhost => {127,0,0,1}}), - ?assertEqual(ExpectedMsg2, MsgFromPkt#message{id = emqx_message:id(ExpectedMsg), - timestamp = emqx_message:timestamp(ExpectedMsg) - }). + ExpectedMsg2 = emqx_message:set_headers( + #{ + peerhost => {127, 0, 0, 1}, + protocol => mqtt, + properties => #{}, + username => <<"test">> + }, + ExpectedMsg1 + ), + Pkt = #mqtt_packet{ + header = #mqtt_packet_header{ + type = ?PUBLISH, + qos = ?QOS_0, + retain = false, + dup = false + }, + variable = #mqtt_packet_publish{ + topic_name = <<"topic">>, + packet_id = 10, + properties = #{} + }, + payload = <<"payload">> + }, + MsgFromPkt = emqx_packet:to_message( + Pkt, + <<"clientid">>, + #{ + protocol => mqtt, + username => <<"test">>, + peerhost => {127, 0, 0, 1} + } + ), + ?assertEqual(ExpectedMsg2, MsgFromPkt#message{ + id = emqx_message:id(ExpectedMsg), + timestamp = emqx_message:timestamp(ExpectedMsg) + }). t_will_msg(_) -> ?assertEqual(undefined, emqx_packet:will_msg(#mqtt_packet_connect{will_flag = false})), - Pkt = #mqtt_packet_connect{will_flag = true, - clientid = <<"clientid">>, - username = "test", - will_retain = true, - will_qos = ?QOS_2, - will_topic = <<"topic">>, - will_props = #{}, - will_payload = <<"payload">> - }, + Pkt = #mqtt_packet_connect{ + will_flag = true, + clientid = <<"clientid">>, + username = "test", + will_retain = true, + will_qos = ?QOS_2, + will_topic = <<"topic">>, + will_props = #{}, + will_payload = <<"payload">> + }, Msg = emqx_packet:will_msg(Pkt), ?assertEqual(<<"clientid">>, Msg#message.from), ?assertEqual(<<"topic">>, Msg#message.topic), - Pkt2 = #mqtt_packet_connect{will_flag = true, - clientid = <<"clientid">>, - username = "test", - will_retain = true, - will_qos = ?QOS_2, - will_topic = <<"topic">>, - will_payload = <<"payload">> - }, + Pkt2 = #mqtt_packet_connect{ + will_flag = true, + clientid = <<"clientid">>, + username = "test", + will_retain = true, + will_qos = ?QOS_2, + will_topic = <<"topic">>, + will_payload = <<"payload">> + }, Msg2 = emqx_packet:will_msg(Pkt2), ?assertEqual(<<"clientid">>, Msg2#message.from), ?assertEqual(<<"topic">>, Msg2#message.topic). t_format(_) -> - io:format("~ts", [emqx_packet:format(#mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK, retain = true, dup = 0}, variable = undefined})]), - io:format("~ts", [emqx_packet:format(#mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK}, variable = 1, payload = <<"payload">>})]), - io:format("~ts", [emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{will_flag = true, - will_retain = true, - will_qos = ?QOS_2, - will_topic = <<"topic">>, - will_payload = <<"payload">>}))]), - io:format("~ts", [emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{password = password}))]), + io:format("~ts", [ + emqx_packet:format(#mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNACK, retain = true, dup = 0}, + variable = undefined + }) + ]), + io:format("~ts", [ + emqx_packet:format(#mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNACK}, variable = 1, payload = <<"payload">> + }) + ]), + io:format("~ts", [ + emqx_packet:format( + ?CONNECT_PACKET(#mqtt_packet_connect{ + will_flag = true, + will_retain = true, + will_qos = ?QOS_2, + will_topic = <<"topic">>, + will_payload = <<"payload">> + }) + ) + ]), + io:format("~ts", [ + emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{password = password})) + ]), io:format("~ts", [emqx_packet:format(?CONNACK_PACKET(?CONNACK_SERVER))]), io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_1, 1))]), io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_2, <<"topic">>, 10, <<"payload">>))]), io:format("~ts", [emqx_packet:format(?PUBACK_PACKET(?PUBACK, 98))]), io:format("~ts", [emqx_packet:format(?PUBREL_PACKET(99))]), - io:format("~ts", [emqx_packet:format(?SUBSCRIBE_PACKET(15, [{<<"topic">>, ?QOS_0}, {<<"topic1">>, ?QOS_1}]))]), + io:format("~ts", [ + emqx_packet:format(?SUBSCRIBE_PACKET(15, [{<<"topic">>, ?QOS_0}, {<<"topic1">>, ?QOS_1}])) + ]), io:format("~ts", [emqx_packet:format(?SUBACK_PACKET(40, [?QOS_0, ?QOS_1]))]), io:format("~ts", [emqx_packet:format(?UNSUBSCRIBE_PACKET(89, [<<"t">>, <<"t2">>]))]), io:format("~ts", [emqx_packet:format(?UNSUBACK_PACKET(90))]), diff --git a/apps/emqx/test/emqx_passwd_SUITE.erl b/apps/emqx/test/emqx_passwd_SUITE.erl index 8bd6e8834..a647fad26 100644 --- a/apps/emqx/test/emqx_passwd_SUITE.erl +++ b/apps/emqx/test/emqx_passwd_SUITE.erl @@ -38,18 +38,21 @@ t_hash_data(_) -> Password = <<"password">>, Password = emqx_passwd:hash_data(plain, Password), - <<"5f4dcc3b5aa765d61d8327deb882cf99">> - = emqx_passwd:hash_data(md5, Password), + <<"5f4dcc3b5aa765d61d8327deb882cf99">> = + emqx_passwd:hash_data(md5, Password), - <<"5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8">> - = emqx_passwd:hash_data(sha, Password), + <<"5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8">> = + emqx_passwd:hash_data(sha, Password), - <<"5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8">> - = emqx_passwd:hash_data(sha256, Password), + <<"5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8">> = + emqx_passwd:hash_data(sha256, Password), Sha512 = iolist_to_binary( - [<<"b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb9">>, - <<"80b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86">>]), + [ + <<"b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb9">>, + <<"80b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86">> + ] + ), Sha512 = emqx_passwd:hash_data(sha512, Password). @@ -74,8 +77,11 @@ t_hash(_) -> false = emqx_passwd:check_pass({sha256, Salt, suffix}, Sha256, WrongPassword), Sha512 = iolist_to_binary( - [<<"fa6a2185b3e0a9a85ef41ffb67ef3c1fb6f74980f8ebf970e4e72e353ed9537d">>, - <<"593083c201dfd6e43e1c8a7aac2bc8dbb119c7dfb7d4b8f131111395bd70e97f">>]), + [ + <<"fa6a2185b3e0a9a85ef41ffb67ef3c1fb6f74980f8ebf970e4e72e353ed9537d">>, + <<"593083c201dfd6e43e1c8a7aac2bc8dbb119c7dfb7d4b8f131111395bd70e97f">> + ] + ), Sha512 = emqx_passwd:hash({sha512, Salt, suffix}, Password), true = emqx_passwd:check_pass({sha512, Salt, suffix}, Sha512, Password), false = emqx_passwd:check_pass({sha512, Salt, suffix}, Sha512, WrongPassword), @@ -92,8 +98,10 @@ t_hash(_) -> BadDKlen = 1 bsl 32, Pbkdf2Salt = <<"ATHENA.MIT.EDUraeburn">>, - Pbkdf2 = <<"01dbee7f4a9e243e988b62c73cda935d" - "a05378b93244ec8f48a99e61ad799d86">>, + Pbkdf2 = << + "01dbee7f4a9e243e988b62c73cda935d" + "a05378b93244ec8f48a99e61ad799d86" + >>, Pbkdf2 = emqx_passwd:hash({pbkdf2, sha, Pbkdf2Salt, 2, 32}, Password), true = emqx_passwd:check_pass({pbkdf2, sha, Pbkdf2Salt, 2, 32}, Pbkdf2, Password), false = emqx_passwd:check_pass({pbkdf2, sha, Pbkdf2Salt, 2, 32}, Pbkdf2, WrongPassword), diff --git a/apps/emqx/test/emqx_pd_SUITE.erl b/apps/emqx/test/emqx_pd_SUITE.erl index dc4185bb6..3c53e9012 100644 --- a/apps/emqx/test/emqx_pd_SUITE.erl +++ b/apps/emqx/test/emqx_pd_SUITE.erl @@ -30,4 +30,3 @@ t_update_counter(_) -> ?assertEqual(3, emqx_pd:get_counter(bytes)), ?assertEqual(3, emqx_pd:reset_counter(bytes)), ?assertEqual(0, emqx_pd:get_counter(bytes)). - diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index 65393baf0..ba91409e8 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -30,8 +30,9 @@ %%-------------------------------------------------------------------- all() -> - [ {group, persistent_store_enabled} - , {group, persistent_store_disabled} + [ + {group, persistent_store_enabled}, + {group, persistent_store_disabled} ]. %% A persistent session can be resumed in two ways: @@ -48,31 +49,38 @@ all() -> groups() -> TCs = emqx_common_test_helpers:all(?MODULE), SnabbkaffeTCs = [TC || TC <- TCs, is_snabbkaffe_tc(TC)], - GCTests = [TC || TC <- TCs, is_gc_tc(TC)], + GCTests = [TC || TC <- TCs, is_gc_tc(TC)], OtherTCs = (TCs -- SnabbkaffeTCs) -- GCTests, - [ {persistent_store_enabled, [ {group, ram_tables} - , {group, disc_tables} - ]} - , {persistent_store_disabled, [ {group, no_kill_connection_process} - ]} - , { ram_tables, [], [ {group, no_kill_connection_process} - , {group, kill_connection_process} - , {group, snabbkaffe} - , {group, gc_tests}]} - , { disc_tables, [], [ {group, no_kill_connection_process} - , {group, kill_connection_process} - , {group, snabbkaffe} - , {group, gc_tests}]} - , {no_kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]} - , { kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]} - , {snabbkaffe, [], [{group, tcp_snabbkaffe}, {group, quic_snabbkaffe}, {group, ws_snabbkaffe}]} - , {tcp, [], OtherTCs} - , {quic, [], OtherTCs} - , {ws, [], OtherTCs} - , {tcp_snabbkaffe, [], SnabbkaffeTCs} - , {quic_snabbkaffe, [], SnabbkaffeTCs} - , {ws_snabbkaffe, [], SnabbkaffeTCs} - , {gc_tests, [], GCTests} + [ + {persistent_store_enabled, [ + {group, ram_tables}, + {group, disc_tables} + ]}, + {persistent_store_disabled, [{group, no_kill_connection_process}]}, + {ram_tables, [], [ + {group, no_kill_connection_process}, + {group, kill_connection_process}, + {group, snabbkaffe}, + {group, gc_tests} + ]}, + {disc_tables, [], [ + {group, no_kill_connection_process}, + {group, kill_connection_process}, + {group, snabbkaffe}, + {group, gc_tests} + ]}, + {no_kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]}, + {kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]}, + {snabbkaffe, [], [ + {group, tcp_snabbkaffe}, {group, quic_snabbkaffe}, {group, ws_snabbkaffe} + ]}, + {tcp, [], OtherTCs}, + {quic, [], OtherTCs}, + {ws, [], OtherTCs}, + {tcp_snabbkaffe, [], SnabbkaffeTCs}, + {quic_snabbkaffe, [], SnabbkaffeTCs}, + {ws_snabbkaffe, [], SnabbkaffeTCs}, + {gc_tests, [], GCTests} ]. is_snabbkaffe_tc(TC) -> @@ -82,19 +90,21 @@ is_gc_tc(TC) -> re:run(atom_to_list(TC), "^t_gc_") /= nomatch. init_per_group(persistent_store_enabled, Config) -> - [{persistent_store_enabled, true}|Config]; + [{persistent_store_enabled, true} | Config]; init_per_group(Group, Config) when Group =:= ram_tables; Group =:= disc_tables -> %% Start Apps - Reply = case Group =:= ram_tables of - true -> ram; - false -> disc - end, + Reply = + case Group =:= ram_tables of + true -> ram; + false -> disc + end, emqx_common_test_helpers:boot_modules(all), meck:new(emqx_config, [non_strict, passthrough, no_history, no_link]), - meck:expect(emqx_config, get, fun(?storage_type_key) -> Reply; - (?is_enabled_key) -> true; - (Other) -> meck:passthrough([Other]) - end), + meck:expect(emqx_config, get, fun + (?storage_type_key) -> Reply; + (?is_enabled_key) -> true; + (Other) -> meck:passthrough([Other]) + end), emqx_common_test_helpers:start_apps([], fun set_special_confs/1), ?assertEqual(true, emqx_persistent_session:is_store_enabled()), ?assertEqual(Reply, emqx_persistent_session:storage_type()), @@ -103,58 +113,70 @@ init_per_group(persistent_store_disabled, Config) -> %% Start Apps emqx_common_test_helpers:boot_modules(all), meck:new(emqx_config, [non_strict, passthrough, no_history, no_link]), - meck:expect(emqx_config, get, fun(?is_enabled_key) -> false; - (Other) -> meck:passthrough([Other]) - end), + meck:expect(emqx_config, get, fun + (?is_enabled_key) -> false; + (Other) -> meck:passthrough([Other]) + end), emqx_common_test_helpers:start_apps([], fun set_special_confs/1), ?assertEqual(false, emqx_persistent_session:is_store_enabled()), - [{persistent_store_enabled, false}|Config]; + [{persistent_store_enabled, false} | Config]; init_per_group(Group, Config) when Group == ws; Group == ws_snabbkaffe -> - [{ssl,false}, - {host,"localhost"}, - {enable_websocket,true}, - {port, 8083}, - {conn_fun, ws_connect}| Config]; + [ + {ssl, false}, + {host, "localhost"}, + {enable_websocket, true}, + {port, 8083}, + {conn_fun, ws_connect} + | Config + ]; init_per_group(Group, Config) when Group == tcp; Group == tcp_snabbkaffe -> - [ {port, 1883}, {conn_fun, connect}| Config]; + [{port, 1883}, {conn_fun, connect} | Config]; init_per_group(Group, Config) when Group == quic; Group == quic_snabbkaffe -> - [ {port, 14567}, {conn_fun, quic_connect} | Config]; + [{port, 14567}, {conn_fun, quic_connect} | Config]; init_per_group(no_kill_connection_process, Config) -> - [ {kill_connection_process, false} | Config]; + [{kill_connection_process, false} | Config]; init_per_group(kill_connection_process, Config) -> - [ {kill_connection_process, true} | Config]; + [{kill_connection_process, true} | Config]; init_per_group(snabbkaffe, Config) -> - [ {kill_connection_process, true} | Config]; + [{kill_connection_process, true} | Config]; init_per_group(gc_tests, Config) -> %% We need to make sure the system does not interfere with this test group. - lists:foreach(fun(ClientId) -> - maybe_kill_connection_process(ClientId, [{kill_connection_process, true}]) - end, emqx_cm:all_client_ids()), + lists:foreach( + fun(ClientId) -> + maybe_kill_connection_process(ClientId, [{kill_connection_process, true}]) + end, + emqx_cm:all_client_ids() + ), emqx_common_test_helpers:stop_apps([]), SessionMsgEts = gc_tests_session_store, MsgEts = gc_tests_msg_store, Pid = spawn(fun() -> - ets:new(SessionMsgEts, [named_table, public, ordered_set]), - ets:new(MsgEts, [named_table, public, ordered_set, {keypos, 2}]), - receive stop -> ok end - end), + ets:new(SessionMsgEts, [named_table, public, ordered_set]), + ets:new(MsgEts, [named_table, public, ordered_set, {keypos, 2}]), + receive + stop -> ok + end + end), meck:new(mnesia, [non_strict, passthrough, no_history, no_link]), - meck:expect(mnesia, dirty_first, fun(?SESS_MSG_TAB_RAM) -> ets:first(SessionMsgEts); - (?SESS_MSG_TAB_DISC) -> ets:first(SessionMsgEts); - (?MSG_TAB_RAM) -> ets:first(MsgEts); - (?MSG_TAB_DISC) -> ets:first(MsgEts); - (X) -> meck:passthrough([X]) - end), - meck:expect(mnesia, dirty_next, fun(?SESS_MSG_TAB_RAM, X) -> ets:next(SessionMsgEts, X); - (?SESS_MSG_TAB_DISC, X) -> ets:next(SessionMsgEts, X); - (?MSG_TAB_RAM, X) -> ets:next(MsgEts, X); - (?MSG_TAB_DISC, X) -> ets:next(MsgEts, X); - (Tab, X) -> meck:passthrough([Tab, X]) - end), - meck:expect(mnesia, dirty_delete, fun(?MSG_TAB_RAM, X) -> ets:delete(MsgEts, X); - (?MSG_TAB_DISC, X) -> ets:delete(MsgEts, X); - (Tab, X) -> meck:passthrough([Tab, X]) - end), + meck:expect(mnesia, dirty_first, fun + (?SESS_MSG_TAB_RAM) -> ets:first(SessionMsgEts); + (?SESS_MSG_TAB_DISC) -> ets:first(SessionMsgEts); + (?MSG_TAB_RAM) -> ets:first(MsgEts); + (?MSG_TAB_DISC) -> ets:first(MsgEts); + (X) -> meck:passthrough([X]) + end), + meck:expect(mnesia, dirty_next, fun + (?SESS_MSG_TAB_RAM, X) -> ets:next(SessionMsgEts, X); + (?SESS_MSG_TAB_DISC, X) -> ets:next(SessionMsgEts, X); + (?MSG_TAB_RAM, X) -> ets:next(MsgEts, X); + (?MSG_TAB_DISC, X) -> ets:next(MsgEts, X); + (Tab, X) -> meck:passthrough([Tab, X]) + end), + meck:expect(mnesia, dirty_delete, fun + (?MSG_TAB_RAM, X) -> ets:delete(MsgEts, X); + (?MSG_TAB_DISC, X) -> ets:delete(MsgEts, X); + (Tab, X) -> meck:passthrough([Tab, X]) + end), [{store_owner, Pid}, {session_msg_store, SessionMsgEts}, {msg_store, MsgEts} | Config]. init_per_suite(Config) -> @@ -196,7 +218,7 @@ init_per_testcase(TestCase, Config) -> end_per_testcase(TestCase, Config) -> case is_snabbkaffe_tc(TestCase) of - true -> snabbkaffe:stop(); + true -> snabbkaffe:stop(); false -> skip end, case erlang:function_exported(?MODULE, TestCase, 2) of @@ -210,20 +232,23 @@ preconfig_per_testcase(TestCase, Config) -> case ?config(tc_group_properties, Config) of [] -> %% We are running a single testcase - {atom_to_binary(TestCase), - init_per_group(tcp, init_per_group(kill_connection_process, Config))}; - [_|_] = Props-> + { + atom_to_binary(TestCase), + init_per_group(tcp, init_per_group(kill_connection_process, Config)) + }; + [_ | _] = Props -> Path = lists:reverse(?config(tc_group_path, Config) ++ Props), Pre0 = [atom_to_list(N) || {name, N} <- lists:flatten(Path)], Pre1 = lists:join("_", Pre0 ++ [atom_to_binary(TestCase)]), - {iolist_to_binary(Pre1), - Config} + {iolist_to_binary(Pre1), Config} end, - [ {topic, iolist_to_binary([BaseName, "/foo"])} - , {stopic, iolist_to_binary([BaseName, "/+"])} - , {stopic_alt, iolist_to_binary([BaseName, "/foo"])} - , {client_id, BaseName} - | Config1]. + [ + {topic, iolist_to_binary([BaseName, "/foo"])}, + {stopic, iolist_to_binary([BaseName, "/+"])}, + {stopic_alt, iolist_to_binary([BaseName, "/foo"])}, + {client_id, BaseName} + | Config1 + ]. %%-------------------------------------------------------------------- %% Helpers @@ -240,7 +265,7 @@ receive_messages(0, Msgs) -> receive_messages(Count, Msgs) -> receive {publish, Msg} -> - receive_messages(Count-1, [Msg|Msgs]); + receive_messages(Count - 1, [Msg | Msgs]); _Other -> receive_messages(Count, Msgs) after 5000 -> @@ -257,7 +282,8 @@ maybe_kill_connection_process(ClientId, Config) -> ?assert(is_pid(ConnectionPid)), Ref = monitor(process, ConnectionPid), ConnectionPid ! die_if_test, - receive {'DOWN', Ref, process, ConnectionPid, normal} -> ok + receive + {'DOWN', Ref, process, ConnectionPid, normal} -> ok after 3000 -> error(process_did_not_die) end, wait_for_cm_unregister(ClientId) @@ -273,20 +299,25 @@ wait_for_cm_unregister(_ClientId, 0) -> error(cm_did_not_unregister); wait_for_cm_unregister(ClientId, N) -> case emqx_cm:lookup_channels(ClientId) of - [] -> ok; - [_] -> timer:sleep(100), wait_for_cm_unregister(ClientId, N - 1) + [] -> + ok; + [_] -> + timer:sleep(100), + wait_for_cm_unregister(ClientId, N - 1) end. snabbkaffe_sync_publish(Topic, Payloads) -> Fun = fun(Client, Payload) -> - ?check_trace( - begin - ?wait_async_action( {ok, _} = emqtt:publish(Client, Topic, Payload, 2) - , #{?snk_kind := ps_persist_msg, payload := Payload} - ) - end, - fun(_, _Trace) -> ok end) - end, + ?check_trace( + begin + ?wait_async_action( + {ok, _} = emqtt:publish(Client, Topic, Payload, 2), + #{?snk_kind := ps_persist_msg, payload := Payload} + ) + end, + fun(_, _Trace) -> ok end + ) + end, do_publish(Payloads, Fun, true). publish(Topic, Payloads) -> @@ -294,42 +325,46 @@ publish(Topic, Payloads) -> publish(Topic, Payloads, WaitForUnregister) -> Fun = fun(Client, Payload) -> - {ok, _} = emqtt:publish(Client, Topic, Payload, 2) - end, + {ok, _} = emqtt:publish(Client, Topic, Payload, 2) + end, do_publish(Payloads, Fun, WaitForUnregister). -do_publish(Payloads = [_|_], PublishFun, WaitForUnregister) -> +do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) -> %% Publish from another process to avoid connection confusion. {Pid, Ref} = spawn_monitor( - fun() -> - %% For convenience, always publish using tcp. - %% The publish path is not what we are testing. - ClientID = <<"ps_SUITE_publisher">>, - {ok, Client} = emqtt:start_link([ {proto_ver, v5} - , {clientid, ClientID} - , {port, 1883} ]), - {ok, _} = emqtt:connect(Client), - lists:foreach(fun(Payload) -> PublishFun(Client, Payload) end, Payloads), - ok = emqtt:disconnect(Client), - %% Snabbkaffe sometimes fails unless all processes are gone. - case WaitForUnregister of - false -> - ok; - true -> - case emqx_cm:lookup_channels(ClientID) of - [] -> - ok; - [ConnectionPid] -> - ?assert(is_pid(ConnectionPid)), - Ref1 = monitor(process, ConnectionPid), - receive {'DOWN', Ref1, process, ConnectionPid, _} -> ok - after 3000 -> error(process_did_not_die) - end, - wait_for_cm_unregister(ClientID) - end - end - end), + fun() -> + %% For convenience, always publish using tcp. + %% The publish path is not what we are testing. + ClientID = <<"ps_SUITE_publisher">>, + {ok, Client} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientID}, + {port, 1883} + ]), + {ok, _} = emqtt:connect(Client), + lists:foreach(fun(Payload) -> PublishFun(Client, Payload) end, Payloads), + ok = emqtt:disconnect(Client), + %% Snabbkaffe sometimes fails unless all processes are gone. + case WaitForUnregister of + false -> + ok; + true -> + case emqx_cm:lookup_channels(ClientID) of + [] -> + ok; + [ConnectionPid] -> + ?assert(is_pid(ConnectionPid)), + Ref1 = monitor(process, ConnectionPid), + receive + {'DOWN', Ref1, process, ConnectionPid, _} -> ok + after 3000 -> error(process_did_not_die) + end, + wait_for_cm_unregister(ClientID) + end + end + end + ), receive {'DOWN', Ref, process, Pid, normal} -> ok; {'DOWN', Ref, process, Pid, What} -> error({failed_publish, What}) @@ -349,10 +384,12 @@ t_connect_session_expiry_interval(Config) -> Payload = <<"test message">>, ClientId = ?config(client_id, Config), - {ok, Client1} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client1} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), ok = emqtt:disconnect(Client1), @@ -361,34 +398,41 @@ t_connect_session_expiry_interval(Config) -> publish(Topic, Payload), - {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), - [Msg | _ ] = receive_messages(1), + [Msg | _] = receive_messages(1), ?assertEqual({ok, iolist_to_binary(Topic)}, maps:find(topic, Msg)), ?assertEqual({ok, iolist_to_binary(Payload)}, maps:find(payload, Msg)), ?assertEqual({ok, 2}, maps:find(qos, Msg)), ok = emqtt:disconnect(Client2). t_without_client_id(Config) -> - process_flag(trap_exit, true), %% Emqtt client dies + %% Emqtt client dies + process_flag(trap_exit, true), ConnFun = ?config(conn_fun, Config), - {ok, Client0} = emqtt:start_link([ {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config]), + {ok, Client0} = emqtt:start_link([ + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), {error, {client_identifier_not_valid, _}} = emqtt:ConnFun(Client0), ok. t_assigned_clientid_persistent_session(Config) -> ConnFun = ?config(conn_fun, Config), - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, true} - | Config]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), AssignedClientId = client_info(clientid, Client1), @@ -396,10 +440,12 @@ t_assigned_clientid_persistent_session(Config) -> maybe_kill_connection_process(AssignedClientId, Config), - {ok, Client2} = emqtt:start_link([ {clientid, AssignedClientId}, - {proto_ver, v5}, - {clean_start, false} - | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, AssignedClientId}, + {proto_ver, v5}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(1, client_info(session_present, Client2)), ok = emqtt:disconnect(Client2). @@ -410,21 +456,25 @@ t_cancel_on_disconnect(Config) -> ConnFun = ?config(conn_fun, Config), ClientId = ?config(client_id, Config), - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, true} - | Config]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), ok = emqtt:disconnect(Client1, 0, #{'Session-Expiry-Interval' => 0}), wait_for_cm_unregister(ClientId), - {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(0, client_info(session_present, Client2)), ok = emqtt:disconnect(Client2). @@ -436,11 +486,13 @@ t_persist_on_disconnect(Config) -> ConnFun = ?config(conn_fun, Config), ClientId = ?config(client_id, Config), - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 0}}, - {clean_start, true} - | Config]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 0}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), %% Strangely enough, the disconnect is reported as successful by emqtt. @@ -448,11 +500,13 @@ t_persist_on_disconnect(Config) -> wait_for_cm_unregister(ClientId), - {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {clean_start, false}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), %% The session should not be known, since it wasn't persisted because of the %% changed expiry interval in the disconnect call. @@ -466,8 +520,11 @@ wait_for_pending(_SId, 0) -> error(exhausted_wait_for_pending); wait_for_pending(SId, N) -> case emqx_persistent_session:pending(SId) of - [] -> timer:sleep(1), wait_for_pending(SId, N - 1); - [_|_] = Pending -> Pending + [] -> + timer:sleep(1), + wait_for_pending(SId, N - 1); + [_ | _] = Pending -> + Pending end. t_process_dies_session_expires(Config) -> @@ -479,11 +536,13 @@ t_process_dies_session_expires(Config) -> Topic = ?config(topic, Config), STopic = ?config(stopic, Config), Payload = <<"test">>, - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 1}}, - {clean_start, true} - | Config]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 1}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), ok = emqtt:disconnect(Client1), @@ -494,7 +553,8 @@ t_process_dies_session_expires(Config) -> SessionId = case ?config(persistent_store_enabled, Config) of - false -> undefined; + false -> + undefined; true -> %% The session should not be marked as expired. {Tag, Session} = emqx_persistent_session:lookup(ClientId), @@ -513,22 +573,28 @@ t_process_dies_session_expires(Config) -> timer:sleep(1100), %% The session should now be marked as expired. - case (?config(kill_connection_process, Config) andalso - ?config(persistent_store_enabled, Config)) of - true -> ?assertMatch({expired, _}, emqx_persistent_session:lookup(ClientId)); + case + (?config(kill_connection_process, Config) andalso + ?config(persistent_store_enabled, Config)) + of + true -> ?assertMatch({expired, _}, emqx_persistent_session:lookup(ClientId)); false -> skip end, - {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config]), + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(0, client_info(session_present, Client2)), - case (?config(kill_connection_process, Config) andalso - ?config(persistent_store_enabled, Config)) of + case + (?config(kill_connection_process, Config) andalso + ?config(persistent_store_enabled, Config)) + of true -> %% The session should be a fresh one {persistent, NewSession} = emqx_persistent_session:lookup(ClientId), @@ -554,11 +620,13 @@ t_publish_while_client_is_gone(Config) -> Payload1 = <<"hello1">>, Payload2 = <<"hello2">>, ClientId = ?config(client_id, Config), - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, true} - | Config]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), @@ -567,11 +635,13 @@ t_publish_while_client_is_gone(Config) -> ok = publish(Topic, [Payload1, Payload2]), - {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config]), + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), Msgs = receive_messages(2), ?assertMatch([_, _], Msgs), @@ -600,11 +670,13 @@ t_clean_start_drops_subscriptions(Config) -> ClientId = ?config(client_id, Config), %% 1. - {ok, Client1} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, true} - | Config]), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), @@ -615,11 +687,13 @@ t_clean_start_drops_subscriptions(Config) -> ok = publish(Topic, Payload1), %% 3. - {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, true} - | Config]), + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(0, client_info(session_present, Client2)), {ok, _, [2]} = emqtt:subscribe(Client2, STopic, qos2), @@ -632,11 +706,13 @@ t_clean_start_drops_subscriptions(Config) -> maybe_kill_connection_process(ClientId, Config), %% 4. - {ok, Client3} = emqtt:start_link([ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config]), + {ok, Client3} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client3), ok = publish(Topic, Payload3), @@ -650,10 +726,12 @@ t_unsubscribe(Config) -> Topic = ?config(topic, Config), STopic = ?config(stopic, Config), ClientId = ?config(client_id, Config), - {ok, Client} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client), {ok, _, [2]} = emqtt:subscribe(Client, STopic, qos2), case emqx_persistent_session:is_store_enabled() of @@ -682,10 +760,12 @@ t_multiple_subscription_matches(Config) -> Payload = <<"test message">>, ClientId = ?config(client_id, Config), - {ok, Client1} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client1} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic1, qos2), {ok, _, [2]} = emqtt:subscribe(Client1, STopic2, qos2), @@ -695,11 +775,13 @@ t_multiple_subscription_matches(Config) -> publish(Topic, Payload), - {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), %% We will receive the same message twice because it matches two subscriptions. @@ -713,14 +795,17 @@ t_multiple_subscription_matches(Config) -> ok = emqtt:disconnect(Client2). t_lost_messages_because_of_gc(init, Config) -> - case (emqx_persistent_session:is_store_enabled() - andalso ?config(kill_connection_process, Config)) of - true -> + case + (emqx_persistent_session:is_store_enabled() andalso + ?config(kill_connection_process, Config)) + of + true -> Retain = 1000, OldRetain = emqx_config:get(?msg_retain, Retain), emqx_config:put(?msg_retain, Retain), - [{retain, Retain}, {old_retain, OldRetain}|Config]; - false -> {skip, only_relevant_with_store_and_kill_process} + [{retain, Retain}, {old_retain, OldRetain} | Config]; + false -> + {skip, only_relevant_with_store_and_kill_process} end; t_lost_messages_because_of_gc('end', Config) -> OldRetain = ?config(old_retain, Config), @@ -735,10 +820,12 @@ t_lost_messages_because_of_gc(Config) -> Retain = ?config(retain, Config), Payload1 = <<"hello1">>, Payload2 = <<"hello2">>, - {ok, Client1} = emqtt:start_link([ {clientid, ClientId}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client1} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), emqtt:disconnect(Client1), @@ -747,11 +834,13 @@ t_lost_messages_because_of_gc(Config) -> timer:sleep(2 * Retain), publish(Topic, Payload2), emqx_persistent_session_gc:message_gc_worker(), - {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, - {clean_start, false}, - {proto_ver, v5}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config]), + {ok, Client2} = emqtt:start_link([ + {clientid, ClientId}, + {clean_start, false}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), {ok, _} = emqtt:ConnFun(Client2), Msgs = receive_messages(2), ?assertMatch([_], Msgs), @@ -759,58 +848,104 @@ t_lost_messages_because_of_gc(Config) -> emqtt:disconnect(Client2), ok. - %%-------------------------------------------------------------------- %% Snabbkaffe helpers %%-------------------------------------------------------------------- check_snabbkaffe_vanilla(Trace) -> - ResumeTrace = [T || #{?snk_kind := K} = T <- Trace, - re:run(to_list(K), "^ps_") /= nomatch], - ?assertMatch([_|_], ResumeTrace), + ResumeTrace = [ + T + || #{?snk_kind := K} = T <- Trace, + re:run(to_list(K), "^ps_") /= nomatch + ], + ?assertMatch([_ | _], ResumeTrace), [_Sid] = lists:usort(?projection(sid, ResumeTrace)), %% Check internal flow of the emqx_cm resuming - ?assert(?strict_causality(#{ ?snk_kind := ps_resuming }, - #{ ?snk_kind := ps_initial_pendings }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_initial_pendings }, - #{ ?snk_kind := ps_persist_pendings }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_persist_pendings }, - #{ ?snk_kind := ps_notify_writers }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_notify_writers }, - #{ ?snk_kind := ps_node_markers }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_node_markers }, - #{ ?snk_kind := ps_resume_session }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_resume_session }, - #{ ?snk_kind := ps_marker_pendings }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_marker_pendings }, - #{ ?snk_kind := ps_marker_pendings_msgs }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_marker_pendings_msgs }, - #{ ?snk_kind := ps_resume_end }, - ResumeTrace)), + ?assert( + ?strict_causality( + #{?snk_kind := ps_resuming}, + #{?snk_kind := ps_initial_pendings}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_initial_pendings}, + #{?snk_kind := ps_persist_pendings}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_persist_pendings}, + #{?snk_kind := ps_notify_writers}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_notify_writers}, + #{?snk_kind := ps_node_markers}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_node_markers}, + #{?snk_kind := ps_resume_session}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_resume_session}, + #{?snk_kind := ps_marker_pendings}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_marker_pendings}, + #{?snk_kind := ps_marker_pendings_msgs}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_marker_pendings_msgs}, + #{?snk_kind := ps_resume_end}, + ResumeTrace + ) + ), %% Check flow between worker and emqx_cm - ?assert(?strict_causality(#{ ?snk_kind := ps_notify_writers }, - #{ ?snk_kind := ps_worker_started }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_marker_pendings }, - #{ ?snk_kind := ps_worker_resume_end }, - ResumeTrace)), - ?assert(?strict_causality(#{ ?snk_kind := ps_worker_resume_end }, - #{ ?snk_kind := ps_worker_shutdown }, - ResumeTrace)), + ?assert( + ?strict_causality( + #{?snk_kind := ps_notify_writers}, + #{?snk_kind := ps_worker_started}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_marker_pendings}, + #{?snk_kind := ps_worker_resume_end}, + ResumeTrace + ) + ), + ?assert( + ?strict_causality( + #{?snk_kind := ps_worker_resume_end}, + #{?snk_kind := ps_worker_shutdown}, + ResumeTrace + ) + ), [Markers] = ?projection(markers, ?of_kind(ps_node_markers, Trace)), ?assertMatch([_], Markers). -to_list(L) when is_list(L) -> L; -to_list(A) when is_atom(A) -> atom_to_list(A); +to_list(L) when is_list(L) -> L; +to_list(A) when is_atom(A) -> atom_to_list(A); to_list(B) when is_binary(B) -> binary_to_list(B). %%-------------------------------------------------------------------- @@ -821,24 +956,27 @@ t_snabbkaffe_vanilla_stages(Config) -> %% Test that all stages of session resume works ok in the simplest case ConnFun = ?config(conn_fun, Config), ClientId = ?config(client_id, Config), - EmqttOpts = [ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config], - {ok, Client1} = emqtt:start_link([{clean_start, true}|EmqttOpts]), + EmqttOpts = [ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ], + {ok, Client1} = emqtt:start_link([{clean_start, true} | EmqttOpts]), {ok, _} = emqtt:ConnFun(Client1), ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), ?check_trace( - begin - {ok, Client2} = emqtt:start_link([{clean_start, false}|EmqttOpts]), - {ok, _} = emqtt:ConnFun(Client2), - ok = emqtt:disconnect(Client2) - end, - fun(ok, Trace) -> - check_snabbkaffe_vanilla(Trace) - end), + begin + {ok, Client2} = emqtt:start_link([{clean_start, false} | EmqttOpts]), + {ok, _} = emqtt:ConnFun(Client2), + ok = emqtt:disconnect(Client2) + end, + fun(ok, Trace) -> + check_snabbkaffe_vanilla(Trace) + end + ), ok. t_snabbkaffe_pending_messages(Config) -> @@ -847,11 +985,13 @@ t_snabbkaffe_pending_messages(Config) -> ClientId = ?config(client_id, Config), Topic = ?config(topic, Config), STopic = ?config(stopic, Config), - Payloads = [<<"test", (integer_to_binary(X))/binary>> || X <- [1,2,3,4,5]], - EmqttOpts = [ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config], + Payloads = [<<"test", (integer_to_binary(X))/binary>> || X <- [1, 2, 3, 4, 5]], + EmqttOpts = [ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ], {ok, Client1} = emqtt:start_link([{clean_start, true} | EmqttOpts]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), @@ -859,25 +999,26 @@ t_snabbkaffe_pending_messages(Config) -> maybe_kill_connection_process(ClientId, Config), ?check_trace( - begin - snabbkaffe_sync_publish(Topic, Payloads), - {ok, Client2} = emqtt:start_link([{clean_start, false} | EmqttOpts]), - {ok, _} = emqtt:ConnFun(Client2), - Msgs = receive_messages(length(Payloads)), - ReceivedPayloads = [P || #{ payload := P } <- Msgs], - ?assertEqual(lists:sort(ReceivedPayloads), lists:sort(Payloads)), - ok = emqtt:disconnect(Client2) - end, - fun(ok, Trace) -> - check_snabbkaffe_vanilla(Trace), - %% Check that all messages was delivered from the DB - [Delivers1] = ?projection(msgs, ?of_kind(ps_persist_pendings_msgs, Trace)), - [Delivers2] = ?projection(msgs, ?of_kind(ps_marker_pendings_msgs, Trace)), - Delivers = Delivers1 ++ Delivers2, - ?assertEqual(length(Payloads), length(Delivers)), - %% Check for no duplicates - ?assertEqual(lists:usort(Delivers), lists:sort(Delivers)) - end), + begin + snabbkaffe_sync_publish(Topic, Payloads), + {ok, Client2} = emqtt:start_link([{clean_start, false} | EmqttOpts]), + {ok, _} = emqtt:ConnFun(Client2), + Msgs = receive_messages(length(Payloads)), + ReceivedPayloads = [P || #{payload := P} <- Msgs], + ?assertEqual(lists:sort(ReceivedPayloads), lists:sort(Payloads)), + ok = emqtt:disconnect(Client2) + end, + fun(ok, Trace) -> + check_snabbkaffe_vanilla(Trace), + %% Check that all messages was delivered from the DB + [Delivers1] = ?projection(msgs, ?of_kind(ps_persist_pendings_msgs, Trace)), + [Delivers2] = ?projection(msgs, ?of_kind(ps_marker_pendings_msgs, Trace)), + Delivers = Delivers1 ++ Delivers2, + ?assertEqual(length(Payloads), length(Delivers)), + %% Check for no duplicates + ?assertEqual(lists:usort(Delivers), lists:sort(Delivers)) + end + ), ok. t_snabbkaffe_buffered_messages(Config) -> @@ -888,10 +1029,12 @@ t_snabbkaffe_buffered_messages(Config) -> STopic = ?config(stopic, Config), Payloads1 = [<<"test", (integer_to_binary(X))/binary>> || X <- [1, 2, 3]], Payloads2 = [<<"test", (integer_to_binary(X))/binary>> || X <- [4, 5, 6]], - EmqttOpts = [ {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}} - | Config], + EmqttOpts = [ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ], {ok, Client1} = emqtt:start_link([{clean_start, true} | EmqttOpts]), {ok, _} = emqtt:ConnFun(Client1), {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), @@ -901,33 +1044,43 @@ t_snabbkaffe_buffered_messages(Config) -> publish(Topic, Payloads1), ?check_trace( - begin - %% Make the resume init phase wait until the first message is delivered. - ?force_ordering( #{ ?snk_kind := ps_worker_deliver }, - #{ ?snk_kind := ps_resume_end }), - Parent = self(), - spawn_link(fun() -> - ?block_until(#{?snk_kind := ps_marker_pendings_msgs}, infinity, 5000), - publish(Topic, Payloads2, true), - Parent ! publish_done, - ok - end), - {ok, Client2} = emqtt:start_link([{clean_start, false} | EmqttOpts]), - {ok, _} = emqtt:ConnFun(Client2), - receive publish_done -> ok after 10000 -> error(too_long_to_publish) end, - Msgs = receive_messages(length(Payloads1) + length(Payloads2) + 1), - ReceivedPayloads = [P || #{ payload := P } <- Msgs], - ?assertEqual(lists:sort(Payloads1 ++ Payloads2), - lists:sort(ReceivedPayloads)), - ok = emqtt:disconnect(Client2) - end, - fun(ok, Trace) -> - check_snabbkaffe_vanilla(Trace), - %% Check that some messages was buffered in the writer process - [Msgs] = ?projection(msgs, ?of_kind(ps_writer_pendings, Trace)), - ?assertMatch(X when 0 < X andalso X =< length(Payloads2), - length(Msgs)) - end), + begin + %% Make the resume init phase wait until the first message is delivered. + ?force_ordering( + #{?snk_kind := ps_worker_deliver}, + #{?snk_kind := ps_resume_end} + ), + Parent = self(), + spawn_link(fun() -> + ?block_until(#{?snk_kind := ps_marker_pendings_msgs}, infinity, 5000), + publish(Topic, Payloads2, true), + Parent ! publish_done, + ok + end), + {ok, Client2} = emqtt:start_link([{clean_start, false} | EmqttOpts]), + {ok, _} = emqtt:ConnFun(Client2), + receive + publish_done -> ok + after 10000 -> error(too_long_to_publish) + end, + Msgs = receive_messages(length(Payloads1) + length(Payloads2) + 1), + ReceivedPayloads = [P || #{payload := P} <- Msgs], + ?assertEqual( + lists:sort(Payloads1 ++ Payloads2), + lists:sort(ReceivedPayloads) + ), + ok = emqtt:disconnect(Client2) + end, + fun(ok, Trace) -> + check_snabbkaffe_vanilla(Trace), + %% Check that some messages was buffered in the writer process + [Msgs] = ?projection(msgs, ?of_kind(ps_writer_pendings, Trace)), + ?assertMatch( + X when 0 < X andalso X =< length(Payloads2), + length(Msgs) + ) + end + ), ok. %%-------------------------------------------------------------------- @@ -953,27 +1106,40 @@ marker_msg(MarkerID, SessionID) -> guid(MicrosecondsAgo) -> %% Make a fake GUID and set a timestamp. - << TS:64, Tail:64 >> = emqx_guid:gen(), - << (TS - MicrosecondsAgo) : 64, Tail:64 >>. + <> = emqx_guid:gen(), + <<(TS - MicrosecondsAgo):64, Tail:64>>. abandoned_session_msg(SessionID) -> abandoned_session_msg(SessionID, 0). abandoned_session_msg(SessionID, MicrosecondsAgo) -> TS = erlang:system_time(microsecond), - {SessionID, <<>>, <<(TS - MicrosecondsAgo) : 64>>, ?ABANDONED}. + {SessionID, <<>>, <<(TS - MicrosecondsAgo):64>>, ?ABANDONED}. fresh_gc_delete_fun() -> Ets = ets:new(gc_collect, [ordered_set]), - fun(delete, Key) -> ets:insert(Ets, {Key}), ok; - (collect, <<>>) -> List = ets:match(Ets, {'$1'}), ets:delete(Ets), lists:append(List); - (_, _Key) -> ok + fun + (delete, Key) -> + ets:insert(Ets, {Key}), + ok; + (collect, <<>>) -> + List = ets:match(Ets, {'$1'}), + ets:delete(Ets), + lists:append(List); + (_, _Key) -> + ok end. fresh_gc_callbacks_fun() -> Ets = ets:new(gc_collect, [ordered_set]), - fun(collect, <<>>) -> List = ets:match(Ets, {'$1'}), ets:delete(Ets), lists:append(List); - (Tag, Key) -> ets:insert(Ets, {{Key, Tag}}), ok + fun + (collect, <<>>) -> + List = ets:match(Ets, {'$1'}), + ets:delete(Ets), + lists:append(List); + (Tag, Key) -> + ets:insert(Ets, {{Key, Tag}}), + ok end. get_gc_delete_messages() -> @@ -986,7 +1152,6 @@ get_gc_callbacks() -> emqx_persistent_session:gc_session_messages(Fun), Fun(collect, <<>>). - t_gc_all_delivered(Config) -> Store = ?config(session_msg_store, Config), STopic = ?config(stopic, Config), @@ -1006,7 +1171,7 @@ t_gc_some_undelivered(Config) -> SessionId = emqx_guid:gen(), MsgIds = [msg_id() || _ <- lists:seq(1, 10)], Delivered = [delivered_msg(X, SessionId, STopic) || X <- MsgIds], - {Delivered1,_Delivered2} = split(Delivered), + {Delivered1, _Delivered2} = split(Delivered), Undelivered = [undelivered_msg(X, SessionId, STopic) || X <- MsgIds], {Undelivered1, Undelivered2} = split(Undelivered), Content = Delivered1 ++ Undelivered1 ++ Undelivered2, @@ -1024,7 +1189,7 @@ t_gc_with_markers(Config) -> MarkerId = msg_id(), MsgIds = [msg_id() || _ <- lists:seq(1, 4)] ++ MsgIds1, Delivered = [delivered_msg(X, SessionId, STopic) || X <- MsgIds], - {Delivered1,_Delivered2} = split(Delivered), + {Delivered1, _Delivered2} = split(Delivered), Undelivered = [undelivered_msg(X, SessionId, STopic) || X <- MsgIds], {Undelivered1, Undelivered2} = split(Undelivered), Markers = [marker_msg(MarkerId, SessionId)], @@ -1041,7 +1206,7 @@ t_gc_abandoned_some_undelivered(Config) -> SessionId = emqx_guid:gen(), MsgIds = [msg_id() || _ <- lists:seq(1, 10)], Delivered = [delivered_msg(X, SessionId, STopic) || X <- MsgIds], - {Delivered1,_Delivered2} = split(Delivered), + {Delivered1, _Delivered2} = split(Delivered), Undelivered = [undelivered_msg(X, SessionId, STopic) || X <- MsgIds], {Undelivered1, Undelivered2} = split(Undelivered), Abandoned = abandoned_session_msg(SessionId), @@ -1066,7 +1231,7 @@ t_gc_abandoned_only_called_on_empty_session(Config) -> %% Since we had messages to delete, we don't expect to get the %% callback on the abandoned session - ?assertEqual([], [ X || {X, abandoned} <- GCMessages]), + ?assertEqual([], [X || {X, abandoned} <- GCMessages]), %% But if we have only the abandoned session marker for this %% session, it should be called. @@ -1074,13 +1239,13 @@ t_gc_abandoned_only_called_on_empty_session(Config) -> UndeliveredOtherSession = undelivered_msg(msg_id(), emqx_guid:gen(), <<"topic">>), ets:insert(Store, [{X, <<>>} || X <- [Abandoned, UndeliveredOtherSession]]), GCMessages2 = get_gc_callbacks(), - ?assertEqual([Abandoned], [ X || {X, abandoned} <- GCMessages2]), + ?assertEqual([Abandoned], [X || {X, abandoned} <- GCMessages2]), ok. t_gc_session_gc_worker(init, Config) -> meck:new(emqx_persistent_session, [passthrough, no_link]), Config; -t_gc_session_gc_worker('end',_Config) -> +t_gc_session_gc_worker('end', _Config) -> meck:unload(emqx_persistent_session), ok. @@ -1099,23 +1264,32 @@ t_gc_session_gc_worker(Config) -> emqx_persistent_session_gc:session_gc_worker(abandoned, AbandonedDeleted), emqx_persistent_session_gc:session_gc_worker(abandoned, AbandonedNotDeleted), History = meck:history(emqx_persistent_session, self()), - DeleteCalls = [ Key || {_Pid, {_, delete_session_message, [Key]}, _Result} - <- History], - ?assertEqual(lists:sort([MsgDeleted, AbandonedDeleted, MarkerDeleted]), - lists:sort(DeleteCalls)), + DeleteCalls = [ + Key + || {_Pid, {_, delete_session_message, [Key]}, _Result} <- + History + ], + ?assertEqual( + lists:sort([MsgDeleted, AbandonedDeleted, MarkerDeleted]), + lists:sort(DeleteCalls) + ), ok. t_gc_message_gc(Config) -> Topic = ?config(topic, Config), ClientID = ?config(client_id, Config), Store = ?config(msg_store, Config), - NewMsgs = [emqx_message:make(ClientID, Topic, integer_to_binary(P)) - || P <- lists:seq(6, 10)], + NewMsgs = [ + emqx_message:make(ClientID, Topic, integer_to_binary(P)) + || P <- lists:seq(6, 10) + ], Retain = 60 * 1000, emqx_config:put(?msg_retain, Retain), - Msgs1 = [emqx_message:make(ClientID, Topic, integer_to_binary(P)) - || P <- lists:seq(1, 5)], - OldMsgs = [M#message{id = guid(Retain*1000)} || M <- Msgs1], + Msgs1 = [ + emqx_message:make(ClientID, Topic, integer_to_binary(P)) + || P <- lists:seq(1, 5) + ], + OldMsgs = [M#message{id = guid(Retain * 1000)} || M <- Msgs1], ets:insert(Store, NewMsgs ++ OldMsgs), ?assertEqual(lists:sort(OldMsgs ++ NewMsgs), ets:tab2list(Store)), ok = emqx_persistent_session_gc:message_gc_worker(), @@ -1128,6 +1302,6 @@ split(List) -> split([], L1, L2) -> {L1, L2}; split([H], L1, L2) -> - {[H|L1], L2}; -split([H1, H2|Left], L1, L2) -> - split(Left, [H1|L1], [H2|L2]). + {[H | L1], L2}; +split([H1, H2 | Left], L1, L2) -> + split(Left, [H1 | L1], [H2 | L2]). diff --git a/apps/emqx/test/emqx_pool_SUITE.erl b/apps/emqx/test/emqx_pool_SUITE.erl index e9c26f1f3..d25a6a75c 100644 --- a/apps/emqx/test/emqx_pool_SUITE.erl +++ b/apps/emqx/test/emqx_pool_SUITE.erl @@ -22,20 +22,22 @@ -include_lib("eunit/include/eunit.hrl"). all() -> - [{group, submit}, - {group, async_submit}, - t_unexpected + [ + {group, submit}, + {group, async_submit}, + t_unexpected ]. groups() -> - [{submit, [sequence], - [t_submit_mfa, - t_submit_fa - ]}, - {async_submit, [sequence], - [t_async_submit_mfa, - t_async_submit_crash - ]} + [ + {submit, [sequence], [ + t_submit_mfa, + t_submit_fa + ]}, + {async_submit, [sequence], [ + t_async_submit_mfa, + t_async_submit_crash + ]} ]. init_per_suite(Config) -> @@ -48,7 +50,7 @@ end_per_suite(_Config) -> init_per_testcase(_, Config) -> {ok, Sup} = emqx_pool_sup:start_link(), - [{pool_sup, Sup}|Config]. + [{pool_sup, Sup} | Config]. end_per_testcase(_, Config) -> Sup = proplists:get_value(pool_sup, Config), @@ -61,11 +63,11 @@ t_submit_mfa(_Config) -> t_submit_fa(_Config) -> Fun = fun(X) -> - case X rem 2 of - 0 -> {true, X div 2}; - _ -> false - end - end, + case X rem 2 of + 0 -> {true, X div 2}; + _ -> false + end + end, Result = emqx_pool:submit(Fun, [2]), ?assertEqual({true, 1}, Result). @@ -84,7 +86,7 @@ t_unexpected(_) -> ok = gen_server:stop(Pid). test_mfa() -> - lists:foldl(fun(X, Sum) -> X + Sum end, 0, [1,2,3,4,5]). + lists:foldl(fun(X, Sum) -> X + Sum end, 0, [1, 2, 3, 4, 5]). % t_async_submit(_) -> % error('TODO'). diff --git a/apps/emqx/test/emqx_pqueue_SUITE.erl b/apps/emqx/test/emqx_pqueue_SUITE.erl index 780a871cd..4e901ed54 100644 --- a/apps/emqx/test/emqx_pqueue_SUITE.erl +++ b/apps/emqx/test/emqx_pqueue_SUITE.erl @@ -78,15 +78,24 @@ t_in(_) -> Q = ?PQ:new(), Els = [a, b, {c, 1}, {d, 1}, {e, infinity}, {f, 2}], Q1 = lists:foldl( - fun({El, P}, Acc) -> - ?PQ:in(El, P, Acc); - (El, Acc) -> - ?PQ:in(El, Acc) - end, Q, Els), - ?assertEqual({pqueue, [{infinity, {queue, [e], [], 1}}, - {-2, {queue, [f], [], 1}}, - {-1, {queue, [d], [c], 2}}, - {0, {queue, [b], [a], 2}}]}, Q1). + fun + ({El, P}, Acc) -> + ?PQ:in(El, P, Acc); + (El, Acc) -> + ?PQ:in(El, Acc) + end, + Q, + Els + ), + ?assertEqual( + {pqueue, [ + {infinity, {queue, [e], [], 1}}, + {-2, {queue, [f], [], 1}}, + {-1, {queue, [d], [c], 2}}, + {0, {queue, [b], [a], 2}} + ]}, + Q1 + ). t_out(_) -> Q = ?PQ:new(), @@ -94,8 +103,9 @@ t_out(_) -> {empty, Q} = ?PQ:out(0, Q), try ?PQ:out(1, Q) of _ -> ct:fail(should_throw_error) - catch error:Reason -> - ?assertEqual(Reason, badarg) + catch + error:Reason -> + ?assertEqual(Reason, badarg) end, {{value, a}, Q} = ?PQ:out(?PQ:from_list([{0, a}])), {{value, a}, {queue, [], [b], 1}} = ?PQ:out(?PQ:from_list([{0, a}, {0, b}])), @@ -128,49 +138,69 @@ t_join(_) -> Q1 = ?PQ:in(a, ?PQ:new()), Q2 = ?PQ:in(b, Q1), Q3 = ?PQ:in(c, Q2), - {queue,[c,b],[a],3} = Q3, + {queue, [c, b], [a], 3} = Q3, Q4 = ?PQ:in(x, ?PQ:new()), Q5 = ?PQ:in(y, Q4), Q6 = ?PQ:in(z, Q5), - {queue,[z,y],[x],3} = Q6, + {queue, [z, y], [x], 3} = Q6, - {queue,[z,y],[a,b,c,x],6} = ?PQ:join(Q3, Q6), + {queue, [z, y], [a, b, c, x], 6} = ?PQ:join(Q3, Q6), PQueue1 = ?PQ:from_list([{1, c}, {1, d}]), PQueue2 = ?PQ:from_list([{1, c}, {1, d}, {0, a}, {0, b}]), PQueue3 = ?PQ:from_list([{1, c}, {1, d}, {-1, a}, {-1, b}]), - {pqueue,[{-1,{queue,[d],[c],2}}, - {0,{queue,[z,y],[x],3}}]} = ?PQ:join(PQueue1, Q6), - {pqueue,[{-1,{queue,[d],[c],2}}, - {0,{queue,[z,y],[x],3}}]} = ?PQ:join(Q6, PQueue1), + {pqueue, [ + {-1, {queue, [d], [c], 2}}, + {0, {queue, [z, y], [x], 3}} + ]} = ?PQ:join(PQueue1, Q6), + {pqueue, [ + {-1, {queue, [d], [c], 2}}, + {0, {queue, [z, y], [x], 3}} + ]} = ?PQ:join(Q6, PQueue1), - {pqueue,[{-1,{queue,[d],[c],2}}, - {0,{queue,[z,y],[a,b,x],5}}]} = ?PQ:join(PQueue2, Q6), - {pqueue,[{-1,{queue,[d],[c],2}}, - {0,{queue,[b],[x,y,z,a],5}}]} = ?PQ:join(Q6, PQueue2), + {pqueue, [ + {-1, {queue, [d], [c], 2}}, + {0, {queue, [z, y], [a, b, x], 5}} + ]} = ?PQ:join(PQueue2, Q6), + {pqueue, [ + {-1, {queue, [d], [c], 2}}, + {0, {queue, [b], [x, y, z, a], 5}} + ]} = ?PQ:join(Q6, PQueue2), - {pqueue,[{-1,{queue,[d],[c],2}}, - {0,{queue,[z,y],[x],3}}, - {1,{queue,[b],[a],2}}]} = ?PQ:join(PQueue3, Q6), - {pqueue,[{-1,{queue,[d],[c],2}}, - {0,{queue,[z,y],[x],3}}, - {1,{queue,[b],[a],2}}]} = ?PQ:join(Q6, PQueue3), + {pqueue, [ + {-1, {queue, [d], [c], 2}}, + {0, {queue, [z, y], [x], 3}}, + {1, {queue, [b], [a], 2}} + ]} = ?PQ:join(PQueue3, Q6), + {pqueue, [ + {-1, {queue, [d], [c], 2}}, + {0, {queue, [z, y], [x], 3}}, + {1, {queue, [b], [a], 2}} + ]} = ?PQ:join(Q6, PQueue3), PQueue4 = ?PQ:from_list([{1, c}, {1, d}]), PQueue5 = ?PQ:from_list([{2, a}, {2, b}]), - {pqueue,[{-2,{queue,[b],[a],2}}, - {-1,{queue,[d],[c],2}}]} = ?PQ:join(PQueue4, PQueue5). + {pqueue, [ + {-2, {queue, [b], [a], 2}}, + {-1, {queue, [d], [c], 2}} + ]} = ?PQ:join(PQueue4, PQueue5). t_filter(_) -> - {pqueue, [{-2, {queue, [10], [4], 2}}, - {-1, {queue, [2], [], 1}}]} = - ?PQ:filter(fun(V) when V rem 2 =:= 0 -> - true; - (_) -> - false - end, ?PQ:from_list([{0, 1}, {0, 3}, {1, 2}, {2, 4}, {2, 10}])). + {pqueue, [ + {-2, {queue, [10], [4], 2}}, + {-1, {queue, [2], [], 1}} + ]} = + ?PQ:filter( + fun + (V) when V rem 2 =:= 0 -> + true; + (_) -> + false + end, + ?PQ:from_list([{0, 1}, {0, 3}, {1, 2}, {2, 4}, {2, 10}]) + ). t_highest(_) -> empty = ?PQ:highest(?PQ:new()), diff --git a/apps/emqx/test/emqx_proper_types.erl b/apps/emqx/test/emqx_proper_types.erl index 245dfc990..1b0972dd6 100644 --- a/apps/emqx/test/emqx_proper_types.erl +++ b/apps/emqx/test/emqx_proper_types.erl @@ -22,31 +22,32 @@ -include("emqx.hrl"). %% High level Types --export([ conninfo/0 - , clientinfo/0 - , sessioninfo/0 - , connack_return_code/0 - , message/0 - , topictab/0 - , topic/0 - , systopic/0 - , subopts/0 - , nodename/0 - , normal_topic/0 - , normal_topic_filter/0 - ]). +-export([ + conninfo/0, + clientinfo/0, + sessioninfo/0, + connack_return_code/0, + message/0, + topictab/0, + topic/0, + systopic/0, + subopts/0, + nodename/0, + normal_topic/0, + normal_topic_filter/0 +]). %% Basic Types --export([ url/0 - , ip/0 - , port/0 - , limited_atom/0 - , limited_latin_atom/0 - ]). +-export([ + url/0, + ip/0, + port/0, + limited_atom/0, + limited_latin_atom/0 +]). %% Iterators --export([ nof/1 - ]). +-export([nof/1]). %%-------------------------------------------------------------------- %% Types High level @@ -54,68 +55,91 @@ %% Type defined emqx_types.erl - conninfo() conninfo() -> - Keys = [{socktype, socktype()}, - {sockname, peername()}, - {peername, peername()}, - {peercert, peercert()}, - {conn_mod, conn_mod()}, - {proto_name, proto_name()}, - {proto_ver, non_neg_integer()}, - {clean_start, boolean()}, - {clientid, clientid()}, - {username, username()}, - {conn_props, properties()}, - {connected, boolean()}, - {connected_at, timestamp()}, - {keepalive, range(0, 16#ffff)}, - {receive_maximum, non_neg_integer()}, - {expiry_interval, non_neg_integer()}], - ?LET({Ks, M}, {Keys, map(limited_atom(), limited_any_term())}, - begin - maps:merge(maps:from_list(Ks), M) - end). + Keys = [ + {socktype, socktype()}, + {sockname, peername()}, + {peername, peername()}, + {peercert, peercert()}, + {conn_mod, conn_mod()}, + {proto_name, proto_name()}, + {proto_ver, non_neg_integer()}, + {clean_start, boolean()}, + {clientid, clientid()}, + {username, username()}, + {conn_props, properties()}, + {connected, boolean()}, + {connected_at, timestamp()}, + {keepalive, range(0, 16#ffff)}, + {receive_maximum, non_neg_integer()}, + {expiry_interval, non_neg_integer()} + ], + ?LET( + {Ks, M}, + {Keys, map(limited_atom(), limited_any_term())}, + begin + maps:merge(maps:from_list(Ks), M) + end + ). clientinfo() -> - Keys = [{zone, zone()}, - {protocol, protocol()}, - {peerhost, ip()}, - {sockport, port()}, - {clientid, clientid()}, - {username, username()}, - {is_bridge, boolean()}, - {is_supuser, boolean()}, - {mountpoint, maybe(utf8())}, - {ws_cookie, maybe(list())} - % password, - % auth_result, - % anonymous, - % cn, - % dn, - ], - ?LET({Ks, M}, {Keys, map(limited_atom(), limited_any_term())}, - begin - maps:merge(maps:from_list(Ks), M) - end). + Keys = [ + {zone, zone()}, + {protocol, protocol()}, + {peerhost, ip()}, + {sockport, port()}, + {clientid, clientid()}, + {username, username()}, + {is_bridge, boolean()}, + {is_supuser, boolean()}, + {mountpoint, maybe(utf8())}, + {ws_cookie, maybe(list())} + % password, + % auth_result, + % anonymous, + % cn, + % dn, + ], + ?LET( + {Ks, M}, + {Keys, map(limited_atom(), limited_any_term())}, + begin + maps:merge(maps:from_list(Ks), M) + end + ). %% See emqx_session:session() type define sessioninfo() -> - ?LET(Session, {session, - clientid(), - sessionid(), % id - boolean(), % is_persistent - subscriptions(), % subscriptions - non_neg_integer(), % max_subscriptions - boolean(), % upgrade_qos - inflight(), % emqx_inflight:inflight() - mqueue(), % emqx_mqueue:mqueue() - packet_id(), % next_pkt_id - safty_timeout(), % retry_interval - awaiting_rel(), % awaiting_rel - non_neg_integer(), % max_awaiting_rel - safty_timeout(), % await_rel_timeout - timestamp() % created_at - }, - emqx_session:info(Session)). + ?LET( + Session, + {session, clientid(), + % id + sessionid(), + % is_persistent + boolean(), + % subscriptions + subscriptions(), + % max_subscriptions + non_neg_integer(), + % upgrade_qos + boolean(), + % emqx_inflight:inflight() + inflight(), + % emqx_mqueue:mqueue() + mqueue(), + % next_pkt_id + packet_id(), + % retry_interval + safty_timeout(), + % awaiting_rel + awaiting_rel(), + % max_awaiting_rel + non_neg_integer(), + % await_rel_timeout + safty_timeout(), + % created_at + timestamp()}, + emqx_session:info(Session) + ). sessionid() -> emqx_guid:gen(). @@ -124,32 +148,53 @@ subscriptions() -> ?LET(L, list({topic(), subopts()}), maps:from_list(L)). inflight() -> - ?LET(MaxLen, non_neg_integer(), + ?LET( + MaxLen, + non_neg_integer(), begin - ?LET(Msgs, limited_list(MaxLen, {packet_id(), message(), timestamp()}), + ?LET( + Msgs, + limited_list(MaxLen, {packet_id(), message(), timestamp()}), begin - lists:foldl(fun({PktId, Msg, Ts}, Ift) -> - try - emqx_inflight:insert(PktId, {Msg, Ts}, Ift) - catch _:_ -> - Ift - end - end, emqx_inflight:new(MaxLen), Msgs) - end) - end). + lists:foldl( + fun({PktId, Msg, Ts}, Ift) -> + try + emqx_inflight:insert(PktId, {Msg, Ts}, Ift) + catch + _:_ -> + Ift + end + end, + emqx_inflight:new(MaxLen), + Msgs + ) + end + ) + end + ). mqueue() -> - ?LET({MaxLen, IsStoreQos0}, {non_neg_integer(), boolean()}, - begin - ?LET(Msgs, limited_list(MaxLen, message()), - begin + ?LET( + {MaxLen, IsStoreQos0}, + {non_neg_integer(), boolean()}, + begin + ?LET( + Msgs, + limited_list(MaxLen, message()), + begin Q = emqx_mqueue:init(#{max_len => MaxLen, store_qos0 => IsStoreQos0}), - lists:foldl(fun(Msg, Acc) -> - {_Dropped, NQ} = emqx_mqueue:in(Msg, Acc), - NQ - end, Q, Msgs) - end) - end). + lists:foldl( + fun(Msg, Acc) -> + {_Dropped, NQ} = emqx_mqueue:in(Msg, Acc), + NQ + end, + Q, + Msgs + ) + end + ) + end + ). message() -> #message{ @@ -157,7 +202,8 @@ message() -> qos = qos(), from = from(), flags = flags(), - headers = map(limited_latin_atom(), limited_any_term()), %% headers + %% headers + headers = map(limited_latin_atom(), limited_any_term()), topic = topic(), payload = payload(), timestamp = timestamp(), @@ -175,35 +221,43 @@ awaiting_rel() -> ?LET(L, list({packet_id(), timestamp()}), maps:from_list(L)). connack_return_code() -> - oneof([ success - , protocol_error - , client_identifier_not_valid - , bad_username_or_password - , bad_clientid_or_password - , username_or_password_undefined - , password_error - , not_authorized - , server_unavailable - , server_busy - , banned - , bad_authentication_method - ]). + oneof([ + success, + protocol_error, + client_identifier_not_valid, + bad_username_or_password, + bad_clientid_or_password, + username_or_password_undefined, + password_error, + not_authorized, + server_unavailable, + server_busy, + banned, + bad_authentication_method + ]). topictab() -> non_empty(list({topic(), subopts()})). topic() -> - oneof([normal_topic(), - normal_topic_filter(), - systopic_broker(), systopic_present(), systopic_stats(), - systopic_metrics(), systopic_alarms(), systopic_mon(), - sharetopic()]). + oneof([ + normal_topic(), + normal_topic_filter(), + systopic_broker(), + systopic_present(), + systopic_stats(), + systopic_metrics(), + systopic_alarms(), + systopic_mon(), + sharetopic() + ]). subopts() -> - ?LET({Nl, Qos, Rap, Rh}, - {range(0, 1), qos(), - range(0, 1), range(0, 1)}, - #{nl => Nl, qos => Qos, rap => Rap, rh => Rh}). + ?LET( + {Nl, Qos, Rap, Rh}, + {range(0, 1), qos(), range(0, 1), range(0, 1)}, + #{nl => Nl, qos => Qos, rap => Rap, rh => Rh} + ). qos() -> range(0, 2). @@ -218,124 +272,198 @@ safty_timeout() -> non_neg_integer(). nodename() -> - ?LET({Name, Ip}, {non_empty(list(latin_char())), ip()}, + ?LET( + {Name, Ip}, + {non_empty(list(latin_char())), ip()}, begin binary_to_atom(iolist_to_binary([Name, "@", inet:ntoa(Ip)]), utf8) - end). + end + ). systopic() -> oneof( - [systopic_broker(), systopic_present(), systopic_stats(), - systopic_metrics(), systopic_alarms(), systopic_mon()]). + [ + systopic_broker(), + systopic_present(), + systopic_stats(), + systopic_metrics(), + systopic_alarms(), + systopic_mon() + ] + ). systopic_broker() -> Topics = [<<"">>, <<"version">>, <<"uptime">>, <<"datetime">>, <<"sysdescr">>], - ?LET({Nodename, T}, - {nodename(), oneof(Topics)}, + ?LET( + {Nodename, T}, + {nodename(), oneof(Topics)}, begin case byte_size(T) of 0 -> <<"$SYS/brokers">>; - _ -> - <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/", T/binary>> + _ -> <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/", T/binary>> end - end). + end + ). systopic_present() -> - ?LET({Nodename, ClientId, T}, - {nodename(), clientid(), oneof([<<"connected">>, <<"disconnected">>])}, + ?LET( + {Nodename, ClientId, T}, + {nodename(), clientid(), oneof([<<"connected">>, <<"disconnected">>])}, begin - <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/clients/", (ensure_bin(ClientId))/binary, "/", T/binary>> - end). + <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/clients/", + (ensure_bin(ClientId))/binary, "/", T/binary>> + end + ). systopic_stats() -> - Topics = [<<"connections/max">>, <<"connections/count">>, - <<"suboptions/max">>, <<"suboptions/count">>, - <<"subscribers/max">>, <<"subscribers/count">>, - <<"subscriptions/max">>, <<"subscriptions/count">>, - <<"subscriptions/shared/max">>, <<"subscriptions/shared/count">>, - <<"topics/max">>, <<"topics/count">>, - <<"routes/max">>, <<"routes/count">> - ], - ?LET({Nodename, T}, - {nodename(), oneof(Topics)}, - <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/stats/", T/binary>>). + Topics = [ + <<"connections/max">>, + <<"connections/count">>, + <<"suboptions/max">>, + <<"suboptions/count">>, + <<"subscribers/max">>, + <<"subscribers/count">>, + <<"subscriptions/max">>, + <<"subscriptions/count">>, + <<"subscriptions/shared/max">>, + <<"subscriptions/shared/count">>, + <<"topics/max">>, + <<"topics/count">>, + <<"routes/max">>, + <<"routes/count">> + ], + ?LET( + {Nodename, T}, + {nodename(), oneof(Topics)}, + <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/stats/", T/binary>> + ). systopic_metrics() -> - Topics = [<<"bytes/received">>, <<"bytes/sent">>, - <<"packets/received">>, <<"packets/sent">>, - <<"packets/connect/received">>, <<"packets/connack/sent">>, - <<"packets/publish/received">>, <<"packets/publish/sent">>, - <<"packets/publish/error">>, <<"packets/publish/auth_error">>, - <<"packets/publish/dropped">>, - <<"packets/puback/received">>, <<"packets/puback/sent">>, - <<"packets/puback/inuse">>, <<"packets/puback/missed">>, - <<"packets/pubrec/received">>, <<"packets/pubrec/sent">>, - <<"packets/pubrec/inuse">>, <<"packets/pubrec/missed">>, - <<"packets/pubrel/received">>, <<"packets/pubrel/sent">>, - <<"packets/pubrel/missed">>, - <<"packets/pubcomp/received">>, <<"packets/pubcomp/sent">>, - <<"packets/pubcomp/inuse">>, <<"packets/pubcomp/missed">>, - <<"packets/subscribe/received">>, <<"packets/subscribe/error">>, - <<"packets/subscribe/auth_error">>, <<"packets/suback/sent">>, - <<"packets/unsubscribe/received">>, <<"packets/unsuback/sent">>, - <<"packets/pingreq/received">>, <<"packets/pingresp/sent">>, - <<"packets/disconnect/received">>, <<"packets/disconnect/sent">>, - <<"packets/auth/received">>, <<"packets/auth/sent">>, - <<"messages/received">>, <<"messages/sent">>, - <<"messages/qos0/received">>, <<"messages/qos0/sent">>, - <<"messages/qos1/received">>, <<"messages/qos1/sent">>, - <<"messages/qos2/received">>, <<"messages/qos2/sent">>, - <<"messages/publish">>, <<"messages/dropped">>, - <<"messages/dropped/expired">>, <<"messages/dropped/no_subscribers">>, - <<"messages/forward">>, - <<"messages/delayed">>, <<"messages/delivered">>, - <<"messages/acked">>], - ?LET({Nodename, T}, - {nodename(), oneof(Topics)}, - <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/metrics/", T/binary>>). + Topics = [ + <<"bytes/received">>, + <<"bytes/sent">>, + <<"packets/received">>, + <<"packets/sent">>, + <<"packets/connect/received">>, + <<"packets/connack/sent">>, + <<"packets/publish/received">>, + <<"packets/publish/sent">>, + <<"packets/publish/error">>, + <<"packets/publish/auth_error">>, + <<"packets/publish/dropped">>, + <<"packets/puback/received">>, + <<"packets/puback/sent">>, + <<"packets/puback/inuse">>, + <<"packets/puback/missed">>, + <<"packets/pubrec/received">>, + <<"packets/pubrec/sent">>, + <<"packets/pubrec/inuse">>, + <<"packets/pubrec/missed">>, + <<"packets/pubrel/received">>, + <<"packets/pubrel/sent">>, + <<"packets/pubrel/missed">>, + <<"packets/pubcomp/received">>, + <<"packets/pubcomp/sent">>, + <<"packets/pubcomp/inuse">>, + <<"packets/pubcomp/missed">>, + <<"packets/subscribe/received">>, + <<"packets/subscribe/error">>, + <<"packets/subscribe/auth_error">>, + <<"packets/suback/sent">>, + <<"packets/unsubscribe/received">>, + <<"packets/unsuback/sent">>, + <<"packets/pingreq/received">>, + <<"packets/pingresp/sent">>, + <<"packets/disconnect/received">>, + <<"packets/disconnect/sent">>, + <<"packets/auth/received">>, + <<"packets/auth/sent">>, + <<"messages/received">>, + <<"messages/sent">>, + <<"messages/qos0/received">>, + <<"messages/qos0/sent">>, + <<"messages/qos1/received">>, + <<"messages/qos1/sent">>, + <<"messages/qos2/received">>, + <<"messages/qos2/sent">>, + <<"messages/publish">>, + <<"messages/dropped">>, + <<"messages/dropped/expired">>, + <<"messages/dropped/no_subscribers">>, + <<"messages/forward">>, + <<"messages/delayed">>, + <<"messages/delivered">>, + <<"messages/acked">> + ], + ?LET( + {Nodename, T}, + {nodename(), oneof(Topics)}, + <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/metrics/", T/binary>> + ). systopic_alarms() -> - ?LET({Nodename, T}, - {nodename(), oneof([<<"alert">>, <<"clear">>])}, - <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/alarms/", T/binary>>). + ?LET( + {Nodename, T}, + {nodename(), oneof([<<"alert">>, <<"clear">>])}, + <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/alarms/", T/binary>> + ). systopic_mon() -> - Topics = [<<"long_gc">>, <<"long_schedule">>, - <<"large_heap">>, <<"busy_port">>, <<"busy_dist_port">>], - ?LET({Nodename, T}, - {nodename(), oneof(Topics)}, - <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/sysmon/", T/binary>>). + Topics = [ + <<"long_gc">>, + <<"long_schedule">>, + <<"large_heap">>, + <<"busy_port">>, + <<"busy_dist_port">> + ], + ?LET( + {Nodename, T}, + {nodename(), oneof(Topics)}, + <<"$SYS/brokers/", (ensure_bin(Nodename))/binary, "/sysmon/", T/binary>> + ). sharetopic() -> - ?LET({Type, Grp, T}, - {oneof([<<"$queue">>, <<"$share">>]), list(latin_char()), normal_topic()}, - <>). + ?LET( + {Type, Grp, T}, + {oneof([<<"$queue">>, <<"$share">>]), list(latin_char()), normal_topic()}, + <> + ). normal_topic() -> - ?LET(L, list(frequency([{3, latin_char()}, {1, $/}])), - list_to_binary(L)). + ?LET( + L, + list(frequency([{3, latin_char()}, {1, $/}])), + list_to_binary(L) + ). normal_topic_filter() -> - ?LET({L, Wild}, {list(list(latin_char())), oneof(['#', '+'])}, - begin - case Wild of - '#' -> - case L of - [] -> <<"#">>; - _ -> iolist_to_binary([lists:join("/", L), "/#"]) - end; - '+' -> + ?LET( + {L, Wild}, + {list(list(latin_char())), oneof(['#', '+'])}, + begin + case Wild of + '#' -> case L of - [] -> <<"+">>; + [] -> <<"#">>; + _ -> iolist_to_binary([lists:join("/", L), "/#"]) + end; + '+' -> + case L of + [] -> + <<"+">>; _ -> - L1 = [case rand:uniform(3) == 1 of - true -> "+"; - _ -> E - end || E <- L], + L1 = [ + case rand:uniform(3) == 1 of + true -> "+"; + _ -> E + end + || E <- L + ], iolist_to_binary(lists:join("/", L1)) end - end - end). + end + end + ). %%-------------------------------------------------------------------- %% Basic Types @@ -355,9 +483,16 @@ peercert() -> oneof([nossl, undefined]). conn_mod() -> - oneof([emqx_connection, emqx_ws_connection, emqx_coap_mqtt_adapter, - emqx_sn_gateway, emqx_lwm2m_protocol, emqx_gbt32960_conn, - emqx_jt808_connection, emqx_tcp_connection]). + oneof([ + emqx_connection, + emqx_ws_connection, + emqx_coap_mqtt_adapter, + emqx_sn_gateway, + emqx_lwm2m_protocol, + emqx_gbt32960_conn, + emqx_jt808_connection, + emqx_tcp_connection + ]). proto_name() -> oneof([<<"MQTT">>, <<"MQTT-SN">>, <<"CoAP">>, <<"LwM2M">>, utf8()]). @@ -383,66 +518,91 @@ protocol() -> oneof([mqtt, 'mqtt-sn', coap, lwm2m, limited_latin_atom()]). url() -> - ?LET({Schema, IP, Port, Path}, {oneof(["http://", "https://"]), ip(), port(), http_path()}, + ?LET( + {Schema, IP, Port, Path}, + {oneof(["http://", "https://"]), ip(), port(), http_path()}, begin - IP1 = case tuple_size(IP) == 8 of - true -> "[" ++ inet:ntoa(IP) ++ "]"; - false -> inet:ntoa(IP) - end, + IP1 = + case tuple_size(IP) == 8 of + true -> "[" ++ inet:ntoa(IP) ++ "]"; + false -> inet:ntoa(IP) + end, lists:concat([Schema, IP1, ":", integer_to_list(Port), "/", Path]) - end). + end + ). ip() -> oneof([ipv4(), ipv6(), ipv6_from_v4()]). ipv4() -> - ?LET(IP, {range(1, 16#ff), range(0, 16#ff), - range(0, 16#ff), range(0, 16#ff)}, IP). + ?LET(IP, {range(1, 16#ff), range(0, 16#ff), range(0, 16#ff), range(0, 16#ff)}, IP). ipv6() -> - ?LET(IP, {range(0, 16#ff), range(0, 16#ff), - range(0, 16#ff), range(0, 16#ff), - range(0, 16#ff), range(0, 16#ff), - range(0, 16#ff), range(0, 16#ff)}, IP). + ?LET( + IP, + { + range(0, 16#ff), + range(0, 16#ff), + range(0, 16#ff), + range(0, 16#ff), + range(0, 16#ff), + range(0, 16#ff), + range(0, 16#ff), + range(0, 16#ff) + }, + IP + ). ipv6_from_v4() -> - ?LET(IP, {range(1, 16#ff), range(0, 16#ff), - range(0, 16#ff), range(0, 16#ff)}, - inet:ipv4_mapped_ipv6_address(IP)). + ?LET( + IP, + {range(1, 16#ff), range(0, 16#ff), range(0, 16#ff), range(0, 16#ff)}, + inet:ipv4_mapped_ipv6_address(IP) + ). port() -> ?LET(Port, range(1, 16#ffff), Port). http_path() -> - list(frequency([{3, latin_char()}, - {1, $/}])). + list( + frequency([ + {3, latin_char()}, + {1, $/} + ]) + ). latin_char() -> oneof([integer($0, $9), integer($A, $Z), integer($a, $z)]). limited_latin_atom() -> - oneof([ 'abc_atom' - , '0123456789' - , 'ABC-ATOM' - , 'abc123ABC' - ]). + oneof([ + 'abc_atom', + '0123456789', + 'ABC-ATOM', + 'abc123ABC' + ]). %% Avoid generating a lot of atom and causing atom table overflows limited_atom() -> - oneof([ 'a_normal_atom' - , '10123_num_prefixed_atom' - , '___dash_prefixed_atom' - , '123' - , binary_to_atom(<<"你好_utf8_atom"/utf8>>) - , '_', ' ', '""', '#$%^&*' - %% The longest atom with 255 chars - , list_to_atom( - lists:append([ "so" - , [ $o || _ <- lists:seq(1, 243)] - , "-long-atom"] - ) - ) - ]). + oneof([ + 'a_normal_atom', + '10123_num_prefixed_atom', + '___dash_prefixed_atom', + '123', + binary_to_atom(<<"你好_utf8_atom"/utf8>>), + '_', + ' ', + '""', + '#$%^&*', + %% The longest atom with 255 chars + list_to_atom( + lists:append([ + "so", + [$o || _ <- lists:seq(1, 243)], + "-long-atom" + ]) + ) + ]). limited_any_term() -> oneof([binary(), number(), string()]). @@ -453,20 +613,25 @@ limited_any_term() -> nof(Ls) when is_list(Ls) -> Len = length(Ls), - ?LET(N, range(0, Len), - begin + ?LET( + N, + range(0, Len), + begin Ns = rand_nl(N, Len, []), [lists:nth(I, Ls) || I <- Ns] - end). + end + ). limited_list(0, T) -> list(T); - limited_list(N, T) -> - ?LET(N2, range(0, N), - begin - [T || _ <- lists:seq(1, N2)] - end). + ?LET( + N2, + range(0, N), + begin + [T || _ <- lists:seq(1, N2)] + end + ). %%-------------------------------------------------------------------- %% Internal funcs @@ -480,7 +645,7 @@ rand_nl(N, L, Acc) -> R = rand:uniform(L), case lists:member(R, Acc) of true -> rand_nl(N, L, Acc); - _ -> rand_nl(N-1, L, [R|Acc]) + _ -> rand_nl(N - 1, L, [R | Acc]) end. ensure_bin(A) when is_atom(A) -> diff --git a/apps/emqx/test/emqx_ratelimiter_SUITE.erl b/apps/emqx/test/emqx_ratelimiter_SUITE.erl index 589e78e8e..8ad7cb3f1 100644 --- a/apps/emqx/test/emqx_ratelimiter_SUITE.erl +++ b/apps/emqx/test/emqx_ratelimiter_SUITE.erl @@ -24,53 +24,57 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --define(BASE_CONF, <<""" -limiter { - bytes_in { - bucket.default { - rate = infinity - capacity = infinity - } - } +-define(BASE_CONF, << + "" + "\n" + "limiter {\n" + " bytes_in {\n" + " bucket.default {\n" + " rate = infinity\n" + " capacity = infinity\n" + " }\n" + " }\n" + "\n" + " message_in {\n" + " bucket.default {\n" + " rate = infinity\n" + " capacity = infinity\n" + " }\n" + " }\n" + "\n" + " connection {\n" + " bucket.default {\n" + " rate = infinity\n" + " capacity = infinity\n" + " }\n" + " }\n" + "\n" + " message_routing {\n" + " bucket.default {\n" + " rate = infinity\n" + " capacity = infinity\n" + " }\n" + " }\n" + "\n" + " batch {\n" + " bucket.retainer {\n" + " rate = infinity\n" + " capacity = infinity\n" + " }\n" + " }\n" + "}\n" + "\n" + "" +>>). - message_in { - bucket.default { - rate = infinity - capacity = infinity - } - } - - connection { - bucket.default { - rate = infinity - capacity = infinity - } - } - - message_routing { - bucket.default { - rate = infinity - capacity = infinity - } - } - - batch { - bucket.retainer { - rate = infinity - capacity = infinity - } - } -} - -""">>). - --record(client, { counter :: counters:counter_ref() - , start :: pos_integer() - , endtime :: pos_integer() - , obtained :: pos_integer() - , rate :: float() - , client :: emqx_htb_limiter:client() - }). +-record(client, { + counter :: counters:counter_ref(), + start :: pos_integer(), + endtime :: pos_integer(), + obtained :: pos_integer(), + rate :: float(), + client :: emqx_htb_limiter:client() +}). -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). -define(RATE(Rate), to_rate(Rate)). @@ -102,205 +106,246 @@ base_conf() -> %%-------------------------------------------------------------------- t_consume(_) -> Cfg = fun(Cfg) -> - Cfg#{rate := 100, - capacity := 100, - initial := 100, - max_retry_time := 1000, - failure_strategy := force} - end, + Cfg#{ + rate := 100, + capacity := 100, + initial := 100, + max_retry_time := 1000, + failure_strategy := force + } + end, Case = fun() -> - Client = connect(default), - {ok, L2} = emqx_htb_limiter:consume(50, Client), - {ok, _L3} = emqx_htb_limiter:consume(150, L2) - end, + Client = connect(default), + {ok, L2} = emqx_htb_limiter:consume(50, Client), + {ok, _L3} = emqx_htb_limiter:consume(150, L2) + end, with_per_client(default, Cfg, Case). t_retry(_) -> Cfg = fun(Cfg) -> - Cfg#{rate := 50, - capacity := 200, - initial := 0, - max_retry_time := 1000, - failure_strategy := force} - end, + Cfg#{ + rate := 50, + capacity := 200, + initial := 0, + max_retry_time := 1000, + failure_strategy := force + } + end, Case = fun() -> - Client = connect(default), - {ok, Client} = emqx_htb_limiter:retry(Client), - {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), - L3 = emqx_htb_limiter:set_retry(Retry, L2), - timer:sleep(500), - {ok, _L4} = emqx_htb_limiter:retry(L3) - end, + Client = connect(default), + {ok, Client} = emqx_htb_limiter:retry(Client), + {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), + L3 = emqx_htb_limiter:set_retry(Retry, L2), + timer:sleep(500), + {ok, _L4} = emqx_htb_limiter:retry(L3) + end, with_per_client(default, Cfg, Case). t_restore(_) -> Cfg = fun(Cfg) -> - Cfg#{rate := 1, - capacity := 200, - initial := 50, - max_retry_time := 100, - failure_strategy := force} - end, + Cfg#{ + rate := 1, + capacity := 200, + initial := 50, + max_retry_time := 100, + failure_strategy := force + } + end, Case = fun() -> - Client = connect(default), - {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), - timer:sleep(200), - {ok, L3} = emqx_htb_limiter:check(Retry, L2), - Avaiable = emqx_htb_limiter:available(L3), - ?assert(Avaiable >= 50) - end, + Client = connect(default), + {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), + timer:sleep(200), + {ok, L3} = emqx_htb_limiter:check(Retry, L2), + Avaiable = emqx_htb_limiter:available(L3), + ?assert(Avaiable >= 50) + end, with_per_client(default, Cfg, Case). t_max_retry_time(_) -> Cfg = fun(Cfg) -> - Cfg#{rate := 1, - capacity := 1, - max_retry_time := 500, - failure_strategy := drop} - end, + Cfg#{ + rate := 1, + capacity := 1, + max_retry_time := 500, + failure_strategy := drop + } + end, Case = fun() -> - Client = connect(default), - Begin = ?NOW, - Result = emqx_htb_limiter:consume(101, Client), - ?assertMatch({drop, _}, Result), - Time = ?NOW - Begin, - ?assert(Time >= 500 andalso Time < 550) - end, + Client = connect(default), + Begin = ?NOW, + Result = emqx_htb_limiter:consume(101, Client), + ?assertMatch({drop, _}, Result), + Time = ?NOW - Begin, + ?assert(Time >= 500 andalso Time < 550) + end, with_per_client(default, Cfg, Case). t_divisible(_) -> Cfg = fun(Cfg) -> - Cfg#{divisible := true, - rate := ?RATE("1000/1s"), - initial := 600, - capacity := 600} - end, + Cfg#{ + divisible := true, + rate := ?RATE("1000/1s"), + initial := 600, + capacity := 600 + } + end, Case = fun() -> - Client = connect(default), - Result = emqx_htb_limiter:check(1000, Client), - ?assertMatch({partial, - 400, - #{continuation := _, - diff := 400, - start := _, - need := 1000}, - _}, Result) - end, + Client = connect(default), + Result = emqx_htb_limiter:check(1000, Client), + ?assertMatch( + {partial, 400, + #{ + continuation := _, + diff := 400, + start := _, + need := 1000 + }, + _}, + Result + ) + end, with_per_client(default, Cfg, Case). t_low_water_mark(_) -> Cfg = fun(Cfg) -> - Cfg#{low_water_mark := 400, - rate := ?RATE("1000/1s"), - initial := 1000, - capacity := 1000} - end, + Cfg#{ + low_water_mark := 400, + rate := ?RATE("1000/1s"), + initial := 1000, + capacity := 1000 + } + end, Case = fun() -> - Client = connect(default), - Result = emqx_htb_limiter:check(500, Client), - ?assertMatch({ok, _}, Result), - {_, Client2} = Result, - Result2 = emqx_htb_limiter:check(101, Client2), - ?assertMatch({pause, - _, - #{continuation := undefined, - diff := 0}, - _}, Result2) - end, + Client = connect(default), + Result = emqx_htb_limiter:check(500, Client), + ?assertMatch({ok, _}, Result), + {_, Client2} = Result, + Result2 = emqx_htb_limiter:check(101, Client2), + ?assertMatch( + {pause, _, + #{ + continuation := undefined, + diff := 0 + }, + _}, + Result2 + ) + end, with_per_client(default, Cfg, Case). t_infinity_client(_) -> Fun = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := infinity, - capacity := infinity}, - Cli2 = Cli#{rate := infinity, capacity := infinity}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := infinity, + capacity := infinity + }, + Cli2 = Cli#{rate := infinity, capacity := infinity}, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - Client = connect(default), - ?assertEqual(infinity, Client), - Result = emqx_htb_limiter:check(100000, Client), - ?assertEqual({ok, Client}, Result) - end, + Client = connect(default), + ?assertEqual(infinity, Client), + Result = emqx_htb_limiter:check(100000, Client), + ?assertEqual({ok, Client}, Result) + end, with_bucket(default, Fun, Case). t_try_restore_agg(_) -> Fun = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := 1, - capacity := 200, - initial := 50}, - Cli2 = Cli#{rate := infinity, capacity := infinity, divisible := true, - max_retry_time := 100, failure_strategy := force}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := 1, + capacity := 200, + initial := 50 + }, + Cli2 = Cli#{ + rate := infinity, + capacity := infinity, + divisible := true, + max_retry_time := 100, + failure_strategy := force + }, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - Client = connect(default), - {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), - timer:sleep(200), - {ok, L3} = emqx_htb_limiter:check(Retry, L2), - Avaiable = emqx_htb_limiter:available(L3), - ?assert(Avaiable >= 50) - end, + Client = connect(default), + {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), + timer:sleep(200), + {ok, L3} = emqx_htb_limiter:check(Retry, L2), + Avaiable = emqx_htb_limiter:available(L3), + ?assert(Avaiable >= 50) + end, with_bucket(default, Fun, Case). t_short_board(_) -> Fun = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := ?RATE("100/1s"), - initial := 0, - capacity := 100}, - Cli2 = Cli#{rate := ?RATE("600/1s"), - capacity := 600, - initial := 600}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := ?RATE("100/1s"), + initial := 0, + capacity := 100 + }, + Cli2 = Cli#{ + rate := ?RATE("600/1s"), + capacity := 600, + initial := 600 + }, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - Counter = counters:new(1, []), - start_client(default, ?NOW + 2000, Counter, 20), - timer:sleep(2100), - check_average_rate(Counter, 2, 100) - end, + Counter = counters:new(1, []), + start_client(default, ?NOW + 2000, Counter, 20), + timer:sleep(2100), + check_average_rate(Counter, 2, 100) + end, with_bucket(default, Fun, Case). t_rate(_) -> Fun = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := ?RATE("100/100ms"), - initial := 0, - capacity := infinity}, - Cli2 = Cli#{rate := infinity, - capacity := infinity, - initial := 0}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := ?RATE("100/100ms"), + initial := 0, + capacity := infinity + }, + Cli2 = Cli#{ + rate := infinity, + capacity := infinity, + initial := 0 + }, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - Client = connect(default), - Ts1 = erlang:system_time(millisecond), - C1 = emqx_htb_limiter:available(Client), - timer:sleep(1000), - Ts2 = erlang:system_time(millisecond), - C2 = emqx_htb_limiter:available(Client), - ShouldInc = floor((Ts2 - Ts1) / 100) * 100, - Inc = C2 - C1, - ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") - end, + Client = connect(default), + Ts1 = erlang:system_time(millisecond), + C1 = emqx_htb_limiter:available(Client), + timer:sleep(1000), + Ts2 = erlang:system_time(millisecond), + C2 = emqx_htb_limiter:available(Client), + ShouldInc = floor((Ts2 - Ts1) / 100) * 100, + Inc = C2 - C1, + ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") + end, with_bucket(default, Fun, Case). t_capacity(_) -> Capacity = 600, Fun = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := ?RATE("100/100ms"), - initial := 0, - capacity := 600}, - Cli2 = Cli#{rate := infinity, - capacity := infinity, - initial := 0}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := ?RATE("100/100ms"), + initial := 0, + capacity := 600 + }, + Cli2 = Cli#{ + rate := infinity, + capacity := infinity, + initial := 0 + }, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - Client = connect(default), - timer:sleep(1000), - C1 = emqx_htb_limiter:available(Client), - ?assertEqual(Capacity, C1, "test bucket capacity") - end, + Client = connect(default), + timer:sleep(1000), + C1 = emqx_htb_limiter:available(Client), + ?assertEqual(Capacity, C1, "test bucket capacity") + end, with_bucket(default, Fun, Case). %%-------------------------------------------------------------------- @@ -308,96 +353,116 @@ t_capacity(_) -> %%-------------------------------------------------------------------- t_collaborative_alloc(_) -> GlobalMod = fun(Cfg) -> - Cfg#{rate := ?RATE("600/1s")} - end, + Cfg#{rate := ?RATE("600/1s")} + end, Bucket1 = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := ?RATE("400/1s"), - initial := 0, - capacity := 600}, - Cli2 = Cli#{rate := ?RATE("50"), - capacity := 100, - initial := 100}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := ?RATE("400/1s"), + initial := 0, + capacity := 600 + }, + Cli2 = Cli#{ + rate := ?RATE("50"), + capacity := 100, + initial := 100 + }, + Bucket2#{per_client := Cli2} + end, Bucket2 = fun(Bucket) -> - Bucket2 = Bucket1(Bucket), - Bucket2#{rate := ?RATE("200/1s")} - end, + Bucket2 = Bucket1(Bucket), + Bucket2#{rate := ?RATE("200/1s")} + end, Case = fun() -> - C1 = counters:new(1, []), - C2 = counters:new(1, []), - start_client(b1, ?NOW + 2000, C1, 20), - start_client(b2, ?NOW + 2000, C2, 30), - timer:sleep(2100), - check_average_rate(C1, 2, 300), - check_average_rate(C2, 2, 300) - end, + C1 = counters:new(1, []), + C2 = counters:new(1, []), + start_client(b1, ?NOW + 2000, C1, 20), + start_client(b2, ?NOW + 2000, C2, 30), + timer:sleep(2100), + check_average_rate(C1, 2, 300), + check_average_rate(C2, 2, 300) + end, - with_global(GlobalMod, - [{b1, Bucket1}, {b2, Bucket2}], - Case). + with_global( + GlobalMod, + [{b1, Bucket1}, {b2, Bucket2}], + Case + ). t_burst(_) -> GlobalMod = fun(Cfg) -> - Cfg#{rate := ?RATE("200/1s"), - burst := ?RATE("400/1s")} - end, + Cfg#{ + rate := ?RATE("200/1s"), + burst := ?RATE("400/1s") + } + end, Bucket = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := ?RATE("200/1s"), - initial := 0, - capacity := 200}, - Cli2 = Cli#{rate := ?RATE("50/1s"), - capacity := 200, - divisible := true}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := ?RATE("200/1s"), + initial := 0, + capacity := 200 + }, + Cli2 = Cli#{ + rate := ?RATE("50/1s"), + capacity := 200, + divisible := true + }, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - C1 = counters:new(1, []), - C2 = counters:new(1, []), - C3 = counters:new(1, []), - start_client(b1, ?NOW + 2000, C1, 20), - start_client(b2, ?NOW + 2000, C2, 30), - start_client(b3, ?NOW + 2000, C3, 30), - timer:sleep(2100), + C1 = counters:new(1, []), + C2 = counters:new(1, []), + C3 = counters:new(1, []), + start_client(b1, ?NOW + 2000, C1, 20), + start_client(b2, ?NOW + 2000, C2, 30), + start_client(b3, ?NOW + 2000, C3, 30), + timer:sleep(2100), - Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]), - in_range(Total / 2, 300) - end, + Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]), + in_range(Total / 2, 300) + end, - with_global(GlobalMod, - [{b1, Bucket}, {b2, Bucket}, {b3, Bucket}], - Case). + with_global( + GlobalMod, + [{b1, Bucket}, {b2, Bucket}, {b3, Bucket}], + Case + ). t_limit_global_with_unlimit_other(_) -> GlobalMod = fun(Cfg) -> - Cfg#{rate := ?RATE("600/1s")} - end, + Cfg#{rate := ?RATE("600/1s")} + end, Bucket = fun(#{per_client := Cli} = Bucket) -> - Bucket2 = Bucket#{rate := infinity, - initial := 0, - capacity := infinity}, - Cli2 = Cli#{rate := infinity, - capacity := infinity, - initial := 0}, - Bucket2#{per_client := Cli2} - end, + Bucket2 = Bucket#{ + rate := infinity, + initial := 0, + capacity := infinity + }, + Cli2 = Cli#{ + rate := infinity, + capacity := infinity, + initial := 0 + }, + Bucket2#{per_client := Cli2} + end, Case = fun() -> - C1 = counters:new(1, []), - start_client(b1, ?NOW + 2000, C1, 20), - timer:sleep(2100), - check_average_rate(C1, 2, 600) - end, + C1 = counters:new(1, []), + start_client(b1, ?NOW + 2000, C1, 20), + timer:sleep(2100), + check_average_rate(C1, 2, 600) + end, - with_global(GlobalMod, - [{b1, Bucket}], - Case). + with_global( + GlobalMod, + [{b1, Bucket}], + Case + ). %%-------------------------------------------------------------------- %% Test Cases container @@ -406,32 +471,46 @@ t_new_container(_) -> C1 = emqx_limiter_container:new(), C2 = emqx_limiter_container:new([message_routing]), C3 = emqx_limiter_container:update_by_name(message_routing, default, C1), - ?assertMatch(#{message_routing := _, - retry_ctx := undefined, - {retry, message_routing} := _}, C2), - ?assertMatch(#{message_routing := _, - retry_ctx := undefined, - {retry, message_routing} := _}, C3), + ?assertMatch( + #{ + message_routing := _, + retry_ctx := undefined, + {retry, message_routing} := _ + }, + C2 + ), + ?assertMatch( + #{ + message_routing := _, + retry_ctx := undefined, + {retry, message_routing} := _ + }, + C3 + ), ok. t_check_container(_) -> Cfg = fun(Cfg) -> - Cfg#{rate := ?RATE("1000/1s"), - initial := 1000, - capacity := 1000} - end, + Cfg#{ + rate := ?RATE("1000/1s"), + initial := 1000, + capacity := 1000 + } + end, Case = fun() -> - C1 = emqx_limiter_container:new([message_routing], - #{message_routing => default}), - {ok, C2} = emqx_limiter_container:check(1000, message_routing, C1), - {pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2), - timer:sleep(Pause), - {ok, C4} = emqx_limiter_container:retry(message_routing, C3), - Context = test, - C5 = emqx_limiter_container:set_retry_context(Context, C4), - RetryData = emqx_limiter_container:get_retry_context(C5), - ?assertEqual(Context, RetryData) - end, + C1 = emqx_limiter_container:new( + [message_routing], + #{message_routing => default} + ), + {ok, C2} = emqx_limiter_container:check(1000, message_routing, C1), + {pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2), + timer:sleep(Pause), + {ok, C4} = emqx_limiter_container:retry(message_routing, C3), + Context = test, + C5 = emqx_limiter_container:set_retry_context(Context, C4), + RetryData = emqx_limiter_container:get_retry_context(C5), + ?assertEqual(Context, RetryData) + end, with_per_client(default, Cfg, Case). %%-------------------------------------------------------------------- @@ -448,7 +527,8 @@ t_limiter_manager(_) -> t_limiter_app(_) -> try _ = emqx_limiter_app:start(undefined, undefined) - catch _:_ -> + catch + _:_ -> ok end, ok = emqx_limiter_app:stop(undefined), @@ -456,11 +536,16 @@ t_limiter_app(_) -> t_limiter_server(_) -> State = emqx_limiter_server:info(message_routing), - ?assertMatch(#{root := _, - counter := _, - index := _, - buckets := _, - type := message_routing}, State), + ?assertMatch( + #{ + root := _, + counter := _, + index := _, + buckets := _, + type := message_routing + }, + State + ), Name = emqx_limiter_server:name(message_routing), ignored = gen_server:call(Name, unexpected_call), @@ -499,50 +584,59 @@ t_schema_unit(_) -> %%% Internal functions %%-------------------------------------------------------------------- start_client(Name, EndTime, Counter, Number) -> - lists:foreach(fun(_) -> - spawn(fun() -> - start_client(Name, EndTime, Counter) - end) - end, - lists:seq(1, Number)). + lists:foreach( + fun(_) -> + spawn(fun() -> + start_client(Name, EndTime, Counter) + end) + end, + lists:seq(1, Number) + ). start_client(Name, EndTime, Counter) -> #{per_client := PerClient} = emqx_config:get([limiter, message_routing, bucket, Name]), #{rate := Rate} = PerClient, - Client = #client{start = ?NOW, - endtime = EndTime, - counter = Counter, - obtained = 0, - rate = Rate, - client = connect(Name) - }, + Client = #client{ + start = ?NOW, + endtime = EndTime, + counter = Counter, + obtained = 0, + rate = Rate, + client = connect(Name) + }, client_loop(Client). %% the simulated client will try to reach the configured rate as much as possible %% note this client will not considered the capacity, so must make sure rate < capacity -client_loop(#client{start = Start, - endtime = EndTime, - obtained = Obtained, - rate = Rate} = State) -> +client_loop( + #client{ + start = Start, + endtime = EndTime, + obtained = Obtained, + rate = Rate + } = State +) -> Now = ?NOW, Period = emqx_limiter_schema:minimum_period(), MinPeriod = erlang:ceil(0.25 * Period), - if Now >= EndTime -> + if + Now >= EndTime -> stop; - Now - Start < MinPeriod -> + Now - Start < MinPeriod -> timer:sleep(client_random_val(MinPeriod)), client_loop(State); - Obtained =< 0 -> + Obtained =< 0 -> Rand = client_random_val(Rate), client_try_check(Rand, State); - true -> + true -> Span = Now - Start, CurrRate = Obtained * Period / Span, - if CurrRate < Rate -> + if + CurrRate < Rate -> Rand = client_random_val(Rate), client_try_check(Rand, State); - true -> + true -> LeftTime = EndTime - Now, CanSleep = erlang:min(LeftTime, client_random_val(MinPeriod div 2)), timer:sleep(CanSleep), @@ -550,10 +644,15 @@ client_loop(#client{start = Start, end end. -client_try_check(Need, #client{counter = Counter, - endtime = EndTime, - obtained = Obtained, - client = Client} = State) -> +client_try_check( + Need, + #client{ + counter = Counter, + endtime = EndTime, + obtained = Obtained, + client = Client + } = State +) -> case emqx_htb_limiter:check(Need, Client) of {ok, Client2} -> case Need of @@ -564,9 +663,10 @@ client_try_check(Need, #client{counter = Counter, client_loop(State#client{obtained = Obtained + Val, client = Client2}); {_, Pause, Retry, Client2} -> LeftTime = EndTime - ?NOW, - if LeftTime =< 0 -> + if + LeftTime =< 0 -> stop; - true -> + true -> timer:sleep(erlang:min(Pause, LeftTime)), client_try_check(Retry, State#client{client = Client2}) end @@ -577,7 +677,6 @@ client_try_check(Need, #client{counter = Counter, %% client's divisible should be true or capacity must be bigger than number of each consume client_random_val(infinity) -> 1000; - %% random in 0.5Range ~ 1Range client_random_val(Range) -> Half = erlang:floor(Range) div 2, @@ -590,13 +689,13 @@ to_rate(Str) -> with_global(Modifier, BuckeTemps, Case) -> Fun = fun(Cfg) -> - #{bucket := #{default := BucketCfg}} = Cfg2 = Modifier(Cfg), - Fun = fun({Name, BMod}, Acc) -> - Acc#{Name => BMod(BucketCfg)} - end, - Buckets = lists:foldl(Fun, #{}, BuckeTemps), - Cfg2#{bucket := Buckets} - end, + #{bucket := #{default := BucketCfg}} = Cfg2 = Modifier(Cfg), + Fun = fun({Name, BMod}, Acc) -> + Acc#{Name => BMod(BucketCfg)} + end, + Buckets = lists:foldl(Fun, #{}, BuckeTemps), + Cfg2#{bucket := Buckets} + end, with_config([limiter, message_routing], Fun, Case). @@ -623,7 +722,8 @@ delay_return(Case) -> try Return = Case(), fun() -> Return end - catch Type:Reason:Trace -> + catch + Type:Reason:Trace -> fun() -> erlang:raise(Type, Reason, Trace) end end. @@ -653,7 +753,7 @@ in_range(_, _) -> in_range(Val, Min, _Max) when Val < Min -> ct:pal("Val:~p smaller than min bound:~p~n", [Val, Min]), false; -in_range(Val, _Min, Max) when Val > Max-> +in_range(Val, _Min, Max) when Val > Max -> ct:pal("Val:~p bigger than max bound:~p~n", [Val, Max]), false; in_range(_, _, _) -> @@ -662,12 +762,11 @@ in_range(_, _, _) -> apply_modifier(Name, Modifier, Cfg) when is_list(Name) -> Pairs = lists:zip(Name, Modifier), apply_modifier(Pairs, Cfg); - apply_modifier(Name, Modifier, #{default := Template} = Cfg) -> Cfg#{Name => Modifier(Template)}. apply_modifier(Pairs, #{default := Template}) -> Fun = fun({N, M}, Acc) -> - Acc#{N => M(Template)} - end, + Acc#{N => M(Template)} + end, lists:foldl(Fun, #{}, Pairs). diff --git a/apps/emqx/test/emqx_request_handler.erl b/apps/emqx/test/emqx_request_handler.erl index a1a120154..7f9407258 100644 --- a/apps/emqx/test/emqx_request_handler.erl +++ b/apps/emqx/test/emqx_request_handler.erl @@ -25,7 +25,7 @@ -type handler() :: fun((CorrData :: binary(), ReqPayload :: binary()) -> RspPayload :: binary()). -spec start_link(topic(), qos(), handler(), emqtt:options()) -> - {ok, pid()} | {error, any()}. + {ok, pid()} | {error, any()}. start_link(RequestTopic, QoS, RequestHandler, Options0) -> Parent = self(), MsgHandler = make_msg_handler(RequestHandler, Parent), @@ -37,21 +37,23 @@ start_link(RequestTopic, QoS, RequestHandler, Options0) -> ok -> {ok, Pid}; {error, _} = Error -> Error catch - C : E : S -> + C:E:S -> emqtt:stop(Pid), {error, {C, E, S}} end; - {error, _} = Error -> Error + {error, _} = Error -> + Error end. stop(Pid) -> emqtt:disconnect(Pid). make_msg_handler(RequestHandler, Parent) -> - #{publish => fun(Msg) -> handle_msg(Msg, RequestHandler, Parent) end, - puback => fun(_Ack) -> ok end, - disconnected => fun(_Reason) -> ok end - }. + #{ + publish => fun(Msg) -> handle_msg(Msg, RequestHandler, Parent) end, + puback => fun(_Ack) -> ok end, + disconnected => fun(_Reason) -> ok end + }. handle_msg(ReqMsg, RequestHandler, Parent) -> #{qos := QoS, properties := Props, payload := ReqPayload} = ReqMsg, @@ -60,14 +62,17 @@ handle_msg(ReqMsg, RequestHandler, Parent) -> CorrData = maps:get('Correlation-Data', Props), RspProps = maps:without(['Response-Topic'], Props), RspPayload = RequestHandler(CorrData, ReqPayload), - RspMsg = #mqtt_msg{qos = QoS, - topic = RspTopic, - props = RspProps, - payload = RspPayload - }, - logger:debug("~p sending response msg to topic ~ts with~n" - "corr-data=~p~npayload=~p", - [?MODULE, RspTopic, CorrData, RspPayload]), + RspMsg = #mqtt_msg{ + qos = QoS, + topic = RspTopic, + props = RspProps, + payload = RspPayload + }, + logger:debug( + "~p sending response msg to topic ~ts with~n" + "corr-data=~p~npayload=~p", + [?MODULE, RspTopic, CorrData, RspPayload] + ), ok = send_response(RspMsg); _ -> Parent ! {discarded, ReqPayload}, @@ -79,16 +84,22 @@ send_response(Msg) -> %% hence delegate to another temp process for the loopback gen_statem call. Client = self(), _ = spawn_link(fun() -> - case emqtt:publish(Client, Msg) of - ok -> ok; - {ok, _} -> ok; - {error, Reason} -> exit({failed_to_publish_response, Reason}) - end - end), + case emqtt:publish(Client, Msg) of + ok -> ok; + {ok, _} -> ok; + {error, Reason} -> exit({failed_to_publish_response, Reason}) + end + end), ok. subscribe(Client, Topic, QoS) -> {ok, _Props, _QoS} = - emqtt:subscribe(Client, [{Topic, [{rh, 2}, {rap, false}, - {nl, true}, {qos, QoS}]}]), + emqtt:subscribe(Client, [ + {Topic, [ + {rh, 2}, + {rap, false}, + {nl, true}, + {qos, QoS} + ]} + ]), ok. diff --git a/apps/emqx/test/emqx_request_responser_SUITE.erl b/apps/emqx/test/emqx_request_responser_SUITE.erl index c98a6cc7f..9daab6f96 100644 --- a/apps/emqx/test/emqx_request_responser_SUITE.erl +++ b/apps/emqx/test/emqx_request_responser_SUITE.erl @@ -40,28 +40,37 @@ request_response(_Config) -> request_response_per_qos(QoS) -> ReqTopic = <<"request_topic">>, RspTopic = <<"response_topic">>, - {ok, Requester} = emqx_request_sender:start_link(RspTopic, QoS, - [{proto_ver, v5}, - {clientid, <<"requester">>}, - {properties, #{ 'Request-Response-Information' => 1}}]), + {ok, Requester} = emqx_request_sender:start_link( + RspTopic, + QoS, + [ + {proto_ver, v5}, + {clientid, <<"requester">>}, + {properties, #{'Request-Response-Information' => 1}} + ] + ), %% This is a square service Square = fun(_CorrData, ReqBin) -> - I = b2i(ReqBin), - i2b(I * I) - end, - {ok, Responder} = emqx_request_handler:start_link(ReqTopic, QoS, Square, - [{proto_ver, v5}, - {clientid, <<"responder">>} - ]), + I = b2i(ReqBin), + i2b(I * I) + end, + {ok, Responder} = emqx_request_handler:start_link( + ReqTopic, + QoS, + Square, + [ + {proto_ver, v5}, + {clientid, <<"responder">>} + ] + ), ok = emqx_request_sender:send(Requester, ReqTopic, RspTopic, <<"corr-1">>, <<"2">>, QoS), receive {response, <<"corr-1">>, <<"4">>} -> ok; Other -> erlang:error({unexpected, Other}) - after - 100 -> - erlang:error(timeout) + after 100 -> + erlang:error(timeout) end, ok = emqx_request_sender:stop(Requester), ok = emqx_request_handler:stop(Responder). diff --git a/apps/emqx/test/emqx_request_sender.erl b/apps/emqx/test/emqx_request_sender.erl index 55292db43..170b15d10 100644 --- a/apps/emqx/test/emqx_request_sender.erl +++ b/apps/emqx/test/emqx_request_sender.erl @@ -31,26 +31,30 @@ start_link(ResponseTopic, QoS, Options0) -> ok -> {ok, Pid}; {error, _} = Error -> Error catch - C : E : S -> + C:E:S -> emqtt:stop(Pid), {error, {C, E, S}} end; - {error, _} = Error -> Error + {error, _} = Error -> + Error end. %% @doc Send a message to request topic with correlation-data `CorrData'. %% Response should be delivered as a `{response, CorrData, Payload}' send(Client, ReqTopic, RspTopic, CorrData, Payload, QoS) -> - Props = #{'Response-Topic' => RspTopic, - 'Correlation-Data' => CorrData - }, - Msg = #mqtt_msg{qos = QoS, - topic = ReqTopic, - props = Props, - payload = Payload - }, + Props = #{ + 'Response-Topic' => RspTopic, + 'Correlation-Data' => CorrData + }, + Msg = #mqtt_msg{ + qos = QoS, + topic = ReqTopic, + props = Props, + payload = Payload + }, case emqtt:publish(Client, Msg) of - ok -> ok; %% QoS = 0 + %% QoS = 0 + ok -> ok; {ok, _} -> ok; {error, _} = E -> E end. @@ -65,10 +69,11 @@ subscribe(Client, Topic, QoS) -> end. make_msg_handler(Parent) -> - #{publish => fun(Msg) -> handle_msg(Msg, Parent) end, - puback => fun(_Ack) -> ok end, - disconnected => fun(_Reason) -> ok end - }. + #{ + publish => fun(Msg) -> handle_msg(Msg, Parent) end, + puback => fun(_Ack) -> ok end, + disconnected => fun(_Reason) -> ok end + }. handle_msg(Msg, Parent) -> #{properties := Props, payload := Payload} = Msg, diff --git a/apps/emqx/test/emqx_router_SUITE.erl b/apps/emqx/test/emqx_router_SUITE.erl index 8677deaa8..8c3d8c0d3 100644 --- a/apps/emqx/test/emqx_router_SUITE.erl +++ b/apps/emqx/test/emqx_router_SUITE.erl @@ -83,11 +83,15 @@ t_match_routes(_) -> ?R:add_route(<<"a/+/c">>, node()), ?R:add_route(<<"a/b/#">>, node()), ?R:add_route(<<"#">>, node()), - ?assertEqual([#route{topic = <<"#">>, dest = node()}, - #route{topic = <<"a/+/c">>, dest = node()}, - #route{topic = <<"a/b/#">>, dest = node()}, - #route{topic = <<"a/b/c">>, dest = node()}], - lists:sort(?R:match_routes(<<"a/b/c">>))), + ?assertEqual( + [ + #route{topic = <<"#">>, dest = node()}, + #route{topic = <<"a/+/c">>, dest = node()}, + #route{topic = <<"a/b/#">>, dest = node()}, + #route{topic = <<"a/b/c">>, dest = node()} + ], + lists:sort(?R:match_routes(<<"a/b/c">>)) + ), ?R:delete_route(<<"a/b/c">>, node()), ?R:delete_route(<<"a/+/c">>, node()), ?R:delete_route(<<"a/b/#">>, node()), @@ -111,5 +115,7 @@ t_unexpected(_) -> Router ! bad_info. clear_tables() -> - lists:foreach(fun mnesia:clear_table/1, - [emqx_route, emqx_trie, emqx_trie_node]). + lists:foreach( + fun mnesia:clear_table/1, + [emqx_route, emqx_trie, emqx_trie_node] + ). diff --git a/apps/emqx/test/emqx_router_helper_SUITE.erl b/apps/emqx/test/emqx_router_helper_SUITE.erl index f00a9b977..4c96091c5 100644 --- a/apps/emqx/test/emqx_router_helper_SUITE.erl +++ b/apps/emqx/test/emqx_router_helper_SUITE.erl @@ -30,16 +30,17 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - DistPid = case net_kernel:nodename() of - ignored -> - %% calling `net_kernel:start' without `epmd' - %% running will result in a failure. - start_epmd(), - {ok, Pid} = net_kernel:start(['test@127.0.0.1', longnames]), - Pid; - _ -> - undefined - end, + DistPid = + case net_kernel:nodename() of + ignored -> + %% calling `net_kernel:start' without `epmd' + %% running will result in a failure. + start_epmd(), + {ok, Pid} = net_kernel:start(['test@127.0.0.1', longnames]), + Pid; + _ -> + undefined + end, emqx_common_test_helpers:start_apps([]), [{dist_pid, DistPid} | Config]. @@ -53,20 +54,22 @@ end_per_suite(Config) -> end, emqx_common_test_helpers:stop_apps([]). -init_per_testcase(TestCase, Config) - when TestCase =:= t_cleanup_membership_mnesia_down; - TestCase =:= t_cleanup_membership_node_down; - TestCase =:= t_cleanup_monitor_node_down -> +init_per_testcase(TestCase, Config) when + TestCase =:= t_cleanup_membership_mnesia_down; + TestCase =:= t_cleanup_membership_node_down; + TestCase =:= t_cleanup_monitor_node_down +-> ok = snabbkaffe:start_trace(), Slave = start_slave(some_node), [{slave, Slave} | Config]; init_per_testcase(_TestCase, Config) -> Config. -end_per_testcase(TestCase, Config) - when TestCase =:= t_cleanup_membership_mnesia_down; - TestCase =:= t_cleanup_membership_node_down; - TestCase =:= t_cleanup_monitor_node_down -> +end_per_testcase(TestCase, Config) when + TestCase =:= t_cleanup_membership_mnesia_down; + TestCase =:= t_cleanup_membership_node_down; + TestCase =:= t_cleanup_monitor_node_down +-> Slave = ?config(slave, Config), stop_slave(Slave), mria:transaction(?ROUTE_SHARD, fun() -> mnesia:clear_table(?ROUTE_TAB) end), @@ -93,9 +96,10 @@ t_cleanup_membership_mnesia_down(Config) -> emqx_router:add_route(<<"d/e/f">>, node()), ?assertMatch([_, _], emqx_router:topics()), ?wait_async_action( - ?ROUTER_HELPER ! {membership, {mnesia, down, Slave}}, - #{?snk_kind := emqx_router_helper_cleanup_done, node := Slave}, - 1_000), + ?ROUTER_HELPER ! {membership, {mnesia, down, Slave}}, + #{?snk_kind := emqx_router_helper_cleanup_done, node := Slave}, + 1_000 + ), ?assertEqual([<<"d/e/f">>], emqx_router:topics()). t_cleanup_membership_node_down(Config) -> @@ -104,9 +108,10 @@ t_cleanup_membership_node_down(Config) -> emqx_router:add_route(<<"d/e/f">>, node()), ?assertMatch([_, _], emqx_router:topics()), ?wait_async_action( - ?ROUTER_HELPER ! {membership, {node, down, Slave}}, - #{?snk_kind := emqx_router_helper_cleanup_done, node := Slave}, - 1_000), + ?ROUTER_HELPER ! {membership, {node, down, Slave}}, + #{?snk_kind := emqx_router_helper_cleanup_done, node := Slave}, + 1_000 + ), ?assertEqual([<<"d/e/f">>], emqx_router:topics()). t_cleanup_monitor_node_down(Config) -> @@ -115,9 +120,10 @@ t_cleanup_monitor_node_down(Config) -> emqx_router:add_route(<<"d/e/f">>, node()), ?assertMatch([_, _], emqx_router:topics()), ?wait_async_action( - stop_slave(Slave), - #{?snk_kind := emqx_router_helper_cleanup_done, node := Slave}, - 1_000), + stop_slave(Slave), + #{?snk_kind := emqx_router_helper_cleanup_done, node := Slave}, + 1_000 + ), ?assertEqual([<<"d/e/f">>], emqx_router:topics()). t_message(_) -> @@ -143,7 +149,8 @@ epmd_path() -> end. start_slave(Name) -> - CommonBeamOpts = "+S 1:1 ", % We want VMs to only occupy a single core + % We want VMs to only occupy a single core + CommonBeamOpts = "+S 1:1 ", {ok, Node} = slave:start_link(host(), Name, CommonBeamOpts ++ ebin_path()), Node. @@ -151,7 +158,8 @@ stop_slave(Node) -> slave:stop(Node). host() -> - [_, Host] = string:tokens(atom_to_list(node()), "@"), Host. + [_, Host] = string:tokens(atom_to_list(node()), "@"), + Host. ebin_path() -> string:join(["-pa" | lists:filter(fun is_lib/1, code:get_path())], " "). diff --git a/apps/emqx/test/emqx_run_sh.erl b/apps/emqx/test/emqx_run_sh.erl index 89be9da94..dfb6b9800 100644 --- a/apps/emqx/test/emqx_run_sh.erl +++ b/apps/emqx/test/emqx_run_sh.erl @@ -17,8 +17,16 @@ -export([do/2]). do(Command, Options0) -> - Options = Options0 ++ [use_stdio, stderr_to_stdout, - exit_status, {line, 906}, hide, eof], + Options = + Options0 ++ + [ + use_stdio, + stderr_to_stdout, + exit_status, + {line, 906}, + hide, + eof + ], Port = erlang:open_port({spawn, Command}, Options), try collect_output(Port, []) diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index 9a90c5de6..faa5bf037 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -19,45 +19,76 @@ -include_lib("eunit/include/eunit.hrl"). ssl_opts_dtls_test() -> - Sc = emqx_schema:server_ssl_opts_schema(#{versions => dtls_all_available, - ciphers => dtls_all_available}, false), + Sc = emqx_schema:server_ssl_opts_schema( + #{ + versions => dtls_all_available, + ciphers => dtls_all_available + }, + false + ), Checked = validate(Sc, #{<<"versions">> => [<<"dtlsv1.2">>, <<"dtlsv1">>]}), - ?assertMatch(#{versions := ['dtlsv1.2', 'dtlsv1'], - ciphers := ["ECDHE-ECDSA-AES256-GCM-SHA384" | _] - }, Checked). + ?assertMatch( + #{ + versions := ['dtlsv1.2', 'dtlsv1'], + ciphers := ["ECDHE-ECDSA-AES256-GCM-SHA384" | _] + }, + Checked + ). ssl_opts_tls_1_3_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>]}), ?assertNot(maps:is_key(handshake_timeout, Checked)), - ?assertMatch(#{versions := ['tlsv1.3'], - ciphers := [_ | _] - }, Checked). + ?assertMatch( + #{ + versions := ['tlsv1.3'], + ciphers := [_ | _] + }, + Checked + ). ssl_opts_tls_for_ranch_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, true), Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>]}), - ?assertMatch(#{versions := ['tlsv1.3'], - ciphers := [_ | _], - handshake_timeout := _ - }, Checked). + ?assertMatch( + #{ + versions := ['tlsv1.3'], + ciphers := [_ | _], + handshake_timeout := _ + }, + Checked + ). ssl_opts_cipher_array_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), - Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>], - <<"ciphers">> => [<<"TLS_AES_256_GCM_SHA384">>, - <<"ECDHE-ECDSA-AES256-GCM-SHA384">>]}), - ?assertMatch(#{versions := ['tlsv1.3'], - ciphers := ["TLS_AES_256_GCM_SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384"] - }, Checked). + Checked = validate(Sc, #{ + <<"versions">> => [<<"tlsv1.3">>], + <<"ciphers">> => [ + <<"TLS_AES_256_GCM_SHA384">>, + <<"ECDHE-ECDSA-AES256-GCM-SHA384">> + ] + }), + ?assertMatch( + #{ + versions := ['tlsv1.3'], + ciphers := ["TLS_AES_256_GCM_SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384"] + }, + Checked + ). ssl_opts_cipher_comma_separated_string_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), - Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>], - <<"ciphers">> => <<"TLS_AES_256_GCM_SHA384,ECDHE-ECDSA-AES256-GCM-SHA384">>}), - ?assertMatch(#{versions := ['tlsv1.3'], - ciphers := ["TLS_AES_256_GCM_SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384"] - }, Checked). + Checked = validate(Sc, #{ + <<"versions">> => [<<"tlsv1.3">>], + <<"ciphers">> => <<"TLS_AES_256_GCM_SHA384,ECDHE-ECDSA-AES256-GCM-SHA384">> + }), + ?assertMatch( + #{ + versions := ['tlsv1.3'], + ciphers := ["TLS_AES_256_GCM_SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384"] + }, + Checked + ). ssl_opts_tls_psk_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), @@ -67,33 +98,46 @@ ssl_opts_tls_psk_test() -> bad_cipher_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), Reason = {bad_ciphers, ["foo"]}, - ?assertThrow({_Sc, [{validation_error, #{reason := Reason}}]}, - validate(Sc, #{<<"versions">> => [<<"tlsv1.2">>], - <<"ciphers">> => [<<"foo">>]})), + ?assertThrow( + {_Sc, [{validation_error, #{reason := Reason}}]}, + validate(Sc, #{ + <<"versions">> => [<<"tlsv1.2">>], + <<"ciphers">> => [<<"foo">>] + }) + ), ok. validate(Schema, Data0) -> - Sc = #{ roots => [ssl_opts] - , fields => #{ssl_opts => Schema} - }, - Data = Data0#{ cacertfile => <<"cacertfile">> - , certfile => <<"certfile">> - , keyfile => <<"keyfile">> - }, + Sc = #{ + roots => [ssl_opts], + fields => #{ssl_opts => Schema} + }, + Data = Data0#{ + cacertfile => <<"cacertfile">>, + certfile => <<"certfile">>, + keyfile => <<"keyfile">> + }, #{ssl_opts := Checked} = - hocon_tconf:check_plain(Sc, #{<<"ssl_opts">> => Data}, - #{atom_key => true}), + hocon_tconf:check_plain( + Sc, + #{<<"ssl_opts">> => Data}, + #{atom_key => true} + ), Checked. ciperhs_schema_test() -> Sc = emqx_schema:ciphers_schema(undefined), WSc = #{roots => [{ciphers, Sc}]}, - ?assertThrow({_, [{validation_error, _}]}, - hocon_tconf:check_plain(WSc, #{<<"ciphers">> => <<"foo,bar">>})). + ?assertThrow( + {_, [{validation_error, _}]}, + hocon_tconf:check_plain(WSc, #{<<"ciphers">> => <<"foo,bar">>}) + ). bad_tls_version_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), Reason = {unsupported_ssl_versions, [foo]}, - ?assertThrow({_Sc, [{validation_error, #{reason := Reason}}]}, - validate(Sc, #{<<"versions">> => [<<"foo">>]})), + ?assertThrow( + {_Sc, [{validation_error, #{reason := Reason}}]}, + validate(Sc, #{<<"versions">> => [<<"foo">>]}) + ), ok. diff --git a/apps/emqx/test/emqx_sequence_SUITE.erl b/apps/emqx/test/emqx_sequence_SUITE.erl index 0cb4242ee..27740c012 100644 --- a/apps/emqx/test/emqx_sequence_SUITE.erl +++ b/apps/emqx/test/emqx_sequence_SUITE.erl @@ -21,11 +21,14 @@ -include_lib("eunit/include/eunit.hrl"). --import(emqx_sequence, - [ nextval/2 - , currval/2 - , reclaim/2 - ]). +-import( + emqx_sequence, + [ + nextval/2, + currval/2, + reclaim/2 + ] +). all() -> emqx_common_test_helpers:all(?MODULE). @@ -61,4 +64,3 @@ t_generate(_) -> ?assertEqual(false, ets:member(seqtab, key)), ?assert(emqx_sequence:delete(seqtab)), ?assertNot(emqx_sequence:delete(seqtab)). - diff --git a/apps/emqx/test/emqx_session_SUITE.erl b/apps/emqx/test/emqx_session_SUITE.erl index dd9c16428..6547c520b 100644 --- a/apps/emqx/test/emqx_session_SUITE.erl +++ b/apps/emqx/test/emqx_session_SUITE.erl @@ -28,9 +28,11 @@ all() -> emqx_common_test_helpers:all(?MODULE). -type inflight_data_phase() :: wait_ack | wait_comp. --record(inflight_data, { phase :: inflight_data_phase() - , message :: emqx_types:message() - , timestamp :: non_neg_integer()}). +-record(inflight_data, { + phase :: inflight_data_phase(), + message :: emqx_types:message(), + timestamp :: non_neg_integer() +}). %%-------------------------------------------------------------------- %% CT callbacks @@ -38,8 +40,10 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_channel_SUITE:set_test_listener_confs(), - ok = meck:new([emqx_hooks, emqx_metrics, emqx_broker], - [passthrough, no_history, no_link]), + ok = meck:new( + [emqx_hooks, emqx_metrics, emqx_broker], + [passthrough, no_history, no_link] + ), ok = meck:expect(emqx_metrics, inc, fun(_) -> ok end), ok = meck:expect(emqx_metrics, inc, fun(_K, _V) -> ok end), ok = meck:expect(emqx_hooks, run, fun(_Hook, _Args) -> ok end), @@ -79,23 +83,31 @@ t_session_init(_) -> %%-------------------------------------------------------------------- t_session_info(_) -> - ?assertMatch(#{subscriptions := #{}, - upgrade_qos := false, - retry_interval := 30000, - await_rel_timeout := 300000 - }, emqx_session:info(session())). + ?assertMatch( + #{ + subscriptions := #{}, + upgrade_qos := false, + retry_interval := 30000, + await_rel_timeout := 300000 + }, + emqx_session:info(session()) + ). t_session_stats(_) -> Stats = emqx_session:stats(session()), - ?assertMatch(#{subscriptions_max := infinity, - inflight_max := 0, - mqueue_len := 0, - mqueue_max := 1000, - mqueue_dropped := 0, - next_pkt_id := 1, - awaiting_rel_cnt := 0, - awaiting_rel_max := 100 - }, maps:from_list(Stats)). + ?assertMatch( + #{ + subscriptions_max := infinity, + inflight_max := 0, + mqueue_len := 0, + mqueue_max := 1000, + mqueue_dropped := 0, + next_pkt_id := 1, + awaiting_rel_cnt := 0, + awaiting_rel_max := 100 + }, + maps:from_list(Stats) + ). %%-------------------------------------------------------------------- %% Test cases for sub/unsub @@ -104,7 +116,8 @@ t_session_stats(_) -> t_subscribe(_) -> ok = meck:expect(emqx_broker, subscribe, fun(_, _, _) -> ok end), {ok, Session} = emqx_session:subscribe( - clientinfo(), <<"#">>, subopts(), session()), + clientinfo(), <<"#">>, subopts(), session() + ), ?assertEqual(1, emqx_session:info(subscriptions_cnt, Session)). t_is_subscriptions_full_false(_) -> @@ -116,7 +129,8 @@ t_is_subscriptions_full_true(_) -> Session = session(#{max_subscriptions => 1}), ?assertNot(emqx_session:is_subscriptions_full(Session)), {ok, Session1} = emqx_session:subscribe( - clientinfo(), <<"t1">>, subopts(), Session), + clientinfo(), <<"t1">>, subopts(), Session + ), ?assert(emqx_session:is_subscriptions_full(Session1)), {error, ?RC_QUOTA_EXCEEDED} = emqx_session:subscribe(clientinfo(), <<"t2">>, subopts(), Session1). @@ -151,9 +165,10 @@ t_publish_qos2(_) -> t_publish_qos2_with_error_return(_) -> ok = meck:expect(emqx_broker, publish, fun(_) -> [] end), - Session = session(#{max_awaiting_rel => 2, - awaiting_rel => #{1 => ts(millisecond)} - }), + Session = session(#{ + max_awaiting_rel => 2, + awaiting_rel => #{1 => ts(millisecond)} + }), Msg = emqx_message:make(clientid, ?QOS_2, <<"t">>, <<"payload">>), {error, ?RC_PACKET_IDENTIFIER_IN_USE} = emqx_session:publish(clientinfo(), 1, Msg, Session), {ok, [], Session1} = emqx_session:publish(clientinfo(), 2, Msg, Session), @@ -165,9 +180,10 @@ t_is_awaiting_full_false(_) -> ?assertNot(emqx_session:is_awaiting_full(Session)). t_is_awaiting_full_true(_) -> - Session = session(#{max_awaiting_rel => 1, - awaiting_rel => #{1 => ts(millisecond)} - }), + Session = session(#{ + max_awaiting_rel => 1, + awaiting_rel => #{1 => ts(millisecond)} + }), ?assert(emqx_session:is_awaiting_full(Session)). t_puback(_) -> @@ -201,7 +217,10 @@ t_pubrec(_) -> Inflight = emqx_inflight:insert(2, with_ts(wait_ack, Msg), emqx_inflight:new()), Session = session(#{inflight => Inflight}), {ok, Msg, Session1} = emqx_session:pubrec(clientinfo(), 2, Session), - ?assertMatch([#inflight_data{phase = wait_comp}], emqx_inflight:values(emqx_session:info(inflight, Session1))). + ?assertMatch( + [#inflight_data{phase = wait_comp}], + emqx_inflight:values(emqx_session:info(inflight, Session1)) + ). t_pubrec_packet_id_in_use_error(_) -> Inflight = emqx_inflight:insert(1, with_ts(wait_comp, undefined), emqx_inflight:new()), @@ -241,13 +260,18 @@ t_pubcomp_error_packetid_not_found(_) -> t_dequeue(_) -> Q = mqueue(#{store_qos0 => true}), {ok, Session} = emqx_session:dequeue(clientinfo(), session(#{mqueue => Q})), - Msgs = [emqx_message:make(clientid, ?QOS_0, <<"t0">>, <<"payload">>), - emqx_message:make(clientid, ?QOS_1, <<"t1">>, <<"payload">>), - emqx_message:make(clientid, ?QOS_2, <<"t2">>, <<"payload">>) - ], - Session1 = lists:foldl(fun(Msg, S) -> - emqx_session:enqueue(clientinfo(), Msg, S) - end, Session, Msgs), + Msgs = [ + emqx_message:make(clientid, ?QOS_0, <<"t0">>, <<"payload">>), + emqx_message:make(clientid, ?QOS_1, <<"t1">>, <<"payload">>), + emqx_message:make(clientid, ?QOS_2, <<"t2">>, <<"payload">>) + ], + Session1 = lists:foldl( + fun(Msg, S) -> + emqx_session:enqueue(clientinfo(), Msg, S) + end, + Session, + Msgs + ), {ok, [{undefined, Msg0}, {1, Msg1}, {2, Msg2}], Session2} = emqx_session:dequeue(clientinfo(), Session1), ?assertEqual(0, emqx_session:info(mqueue_len, Session2)), @@ -259,9 +283,11 @@ t_dequeue(_) -> t_deliver_qos0(_) -> ok = meck:expect(emqx_broker, subscribe, fun(_, _, _) -> ok end), {ok, Session} = emqx_session:subscribe( - clientinfo(), <<"t0">>, subopts(), session()), + clientinfo(), <<"t0">>, subopts(), session() + ), {ok, Session1} = emqx_session:subscribe( - clientinfo(), <<"t1">>, subopts(), Session), + clientinfo(), <<"t1">>, subopts(), Session + ), Deliveries = [delivery(?QOS_0, T) || T <- [<<"t0">>, <<"t1">>]], {ok, [{undefined, Msg1}, {undefined, Msg2}], Session1} = emqx_session:deliver(clientinfo(), Deliveries, Session1), @@ -271,7 +297,8 @@ t_deliver_qos0(_) -> t_deliver_qos1(_) -> ok = meck:expect(emqx_broker, subscribe, fun(_, _, _) -> ok end), {ok, Session} = emqx_session:subscribe( - clientinfo(), <<"t1">>, subopts(#{qos => ?QOS_1}), session()), + clientinfo(), <<"t1">>, subopts(#{qos => ?QOS_1}), session() + ), Delivers = [delivery(?QOS_1, T) || T <- [<<"t1">>, <<"t2">>]], {ok, [{1, Msg1}, {2, Msg2}], Session1} = emqx_session:deliver(clientinfo(), Delivers, Session), ?assertEqual(2, emqx_session:info(inflight_cnt, Session1)), @@ -315,16 +342,24 @@ t_deliver_when_inflight_is_full(_) -> t_enqueue(_) -> %% store_qos0 = true Session = emqx_session:enqueue(clientinfo(), [delivery(?QOS_0, <<"t0">>)], session()), - Session1 = emqx_session:enqueue(clientinfo(), [delivery(?QOS_1, <<"t1">>), - delivery(?QOS_2, <<"t2">>)], Session), + Session1 = emqx_session:enqueue( + clientinfo(), + [ + delivery(?QOS_1, <<"t1">>), + delivery(?QOS_2, <<"t2">>) + ], + Session + ), ?assertEqual(3, emqx_session:info(mqueue_len, Session1)). t_retry(_) -> Delivers = [delivery(?QOS_1, <<"t1">>), delivery(?QOS_2, <<"t2">>)], - RetryIntervalMs = 100, %% 0.1s + %% 0.1s + RetryIntervalMs = 100, Session = session(#{retry_interval => RetryIntervalMs}), {ok, Pubs, Session1} = emqx_session:deliver(clientinfo(), Delivers, Session), - ElapseMs = 200, %% 0.2s + %% 0.2s + ElapseMs = 200, ok = timer:sleep(ElapseMs), Msgs1 = [{I, with_ts(wait_ack, emqx_message:set_flag(dup, Msg))} || {I, Msg} <- Pubs], {ok, Msgs1T, 100, Session2} = emqx_session:retry(clientinfo(), Session1), @@ -394,18 +429,23 @@ mqueue(Opts) -> session() -> session(#{}). session(InitFields) when is_map(InitFields) -> - maps:fold(fun(Field, Value, Session) -> - emqx_session:set_field(Field, Value, Session) - end, - emqx_session:init(#{max_inflight => 0}), - InitFields). - + maps:fold( + fun(Field, Value, Session) -> + emqx_session:set_field(Field, Value, Session) + end, + emqx_session:init(#{max_inflight => 0}), + InitFields + ). clientinfo() -> clientinfo(#{}). clientinfo(Init) -> - maps:merge(#{clientid => <<"clientid">>, - username => <<"username">> - }, Init). + maps:merge( + #{ + clientid => <<"clientid">>, + username => <<"username">> + }, + Init + ). subopts() -> subopts(#{}). subopts(Init) -> @@ -423,27 +463,24 @@ with_ts(Phase, Msg) -> with_ts(Phase, Msg, erlang:system_time(millisecond)). with_ts(Phase, Msg, Ts) -> - #inflight_data{phase = Phase, - message = Msg, - timestamp = Ts}. + #inflight_data{ + phase = Phase, + message = Msg, + timestamp = Ts + }. remove_deliver_flag({Id, Data}) -> {Id, remove_deliver_flag(Data)}; - remove_deliver_flag(#inflight_data{message = Msg} = Data) -> Data#inflight_data{message = remove_deliver_flag(Msg)}; - remove_deliver_flag(List) when is_list(List) -> lists:map(fun remove_deliver_flag/1, List); - remove_deliver_flag(Msg) -> emqx_message:remove_header(deliver_begin_at, Msg). inflight_data_to_msg({Id, Data}) -> {Id, inflight_data_to_msg(Data)}; - inflight_data_to_msg(#inflight_data{message = Msg}) -> Msg; - inflight_data_to_msg(List) when is_list(List) -> lists:map(fun inflight_data_to_msg/1, List). diff --git a/apps/emqx/test/emqx_shared_sub_SUITE.erl b/apps/emqx/test/emqx_shared_sub_SUITE.erl index 47560f459..6b171206a 100644 --- a/apps/emqx/test/emqx_shared_sub_SUITE.erl +++ b/apps/emqx/test/emqx_shared_sub_SUITE.erl @@ -26,8 +26,10 @@ -define(SUITE, ?MODULE). -define(wait(For, Timeout), - emqx_common_test_helpers:wait_for( - ?FUNCTION_NAME, ?LINE, fun() -> For end, Timeout)). + emqx_common_test_helpers:wait_for( + ?FUNCTION_NAME, ?LINE, fun() -> For end, Timeout + ) +). -define(ack, shared_sub_ack). -define(no_ack, no_ack). @@ -49,20 +51,39 @@ t_maybe_nack_dropped(_) -> ?assertEqual(ok, emqx_shared_sub:maybe_nack_dropped(#message{headers = #{}})), Msg = #message{headers = #{shared_dispatch_ack => {self(), for_test}}}, ?assertEqual(ok, emqx_shared_sub:maybe_nack_dropped(Msg)), - ?assertEqual(ok,receive {for_test, {shared_sub_nack, dropped}} -> ok after 100 -> timeout end). + ?assertEqual( + ok, + receive + {for_test, {shared_sub_nack, dropped}} -> ok + after 100 -> timeout + end + ). t_nack_no_connection(_) -> Msg = #message{headers = #{shared_dispatch_ack => {self(), for_test}}}, ?assertEqual(ok, emqx_shared_sub:nack_no_connection(Msg)), - ?assertEqual(ok,receive {for_test, {shared_sub_nack, no_connection}} -> ok - after 100 -> timeout end). + ?assertEqual( + ok, + receive + {for_test, {shared_sub_nack, no_connection}} -> ok + after 100 -> timeout + end + ). t_maybe_ack(_) -> ?assertEqual(#message{headers = #{}}, emqx_shared_sub:maybe_ack(#message{headers = #{}})), Msg = #message{headers = #{shared_dispatch_ack => {self(), for_test}}}, - ?assertEqual(#message{headers = #{shared_dispatch_ack => ?no_ack}}, - emqx_shared_sub:maybe_ack(Msg)), - ?assertEqual(ok,receive {for_test, ?ack} -> ok after 100 -> timeout end). + ?assertEqual( + #message{headers = #{shared_dispatch_ack => ?no_ack}}, + emqx_shared_sub:maybe_ack(Msg) + ), + ?assertEqual( + ok, + receive + {for_test, ?ack} -> ok + after 100 -> timeout + end + ). % t_subscribers(_) -> % error('TODO'). @@ -79,12 +100,14 @@ t_random_basic(_) -> ?assertEqual(true, subscribed(<<"group1">>, Topic, self())), emqx:publish(MsgQoS2), receive - {deliver, Topic0, #message{from = ClientId0, - payload = Payload0}} = M-> - ct:pal("==== received: ~p", [M]), - ?assertEqual(Topic, Topic0), - ?assertEqual(ClientId, ClientId0), - ?assertEqual(Payload, Payload0) + {deliver, Topic0, #message{ + from = ClientId0, + payload = Payload0 + }} = M -> + ct:pal("==== received: ~p", [M]), + ?assertEqual(Topic, Topic0), + ?assertEqual(ClientId, ClientId0), + ?assertEqual(Payload, Payload0) after 1000 -> ct:fail(waiting_basic_failed) end, ok. @@ -118,12 +141,12 @@ t_no_connection_nack(_) -> %% wait for the subscriptions to show up ct:sleep(200), MkPayload = fun(PacketId) -> - iolist_to_binary(["hello-", integer_to_list(PacketId)]) - end, + iolist_to_binary(["hello-", integer_to_list(PacketId)]) + end, SendF = fun(PacketId) -> - M = emqx_message:make(Publisher, QoS, Topic, MkPayload(PacketId)), - emqx:publish(M#message{id = PacketId}) - end, + M = emqx_message:make(Publisher, QoS, Topic, MkPayload(PacketId)), + emqx:publish(M#message{id = PacketId}) + end, SendF(1), timer:sleep(200), %% This is the connection which was picked by broker to dispatch (sticky) for 1st message @@ -198,19 +221,25 @@ t_hash_topic(_) -> emqx:publish(Message1), Me = self(), WaitF = fun(ExpectedPayload) -> - case last_message(ExpectedPayload, [ConnPid1, ConnPid2]) of - {true, Pid} -> - Me ! {subscriber, Pid}, - true; - Other -> - Other - end - end, + case last_message(ExpectedPayload, [ConnPid1, ConnPid2]) of + {true, Pid} -> + Me ! {subscriber, Pid}, + true; + Other -> + Other + end + end, WaitF(<<"hello1">>), - UsedSubPid1 = receive {subscriber, P1} -> P1 end, + UsedSubPid1 = + receive + {subscriber, P1} -> P1 + end, emqx_broker:publish(Message2), WaitF(<<"hello2">>), - UsedSubPid2 = receive {subscriber, P2} -> P2 end, + UsedSubPid2 = + receive + {subscriber, P2} -> P2 + end, ?assert(UsedSubPid1 =/= UsedSubPid2), emqtt:stop(ConnPid1), emqtt:stop(ConnPid2), @@ -262,19 +291,25 @@ test_two_messages(Strategy, WithAck) -> emqx:publish(Message1), Me = self(), WaitF = fun(ExpectedPayload) -> - case last_message(ExpectedPayload, [ConnPid1, ConnPid2]) of - {true, Pid} -> - Me ! {subscriber, Pid}, - true; - Other -> - Other - end - end, + case last_message(ExpectedPayload, [ConnPid1, ConnPid2]) of + {true, Pid} -> + Me ! {subscriber, Pid}, + true; + Other -> + Other + end + end, WaitF(<<"hello1">>), - UsedSubPid1 = receive {subscriber, P1} -> P1 end, + UsedSubPid1 = + receive + {subscriber, P1} -> P1 + end, emqx_broker:publish(Message2), WaitF(<<"hello2">>), - UsedSubPid2 = receive {subscriber, P2} -> P2 end, + UsedSubPid2 = + receive + {subscriber, P2} -> P2 + end, case Strategy of sticky -> ?assert(UsedSubPid1 =:= UsedSubPid2); round_robin -> ?assert(UsedSubPid1 =/= UsedSubPid2); @@ -297,11 +332,15 @@ last_message(ExpectedPayload, Pids) -> t_dispatch(_) -> ok = ensure_config(random), Topic = <<"foo">>, - ?assertEqual({error, no_subscribers}, - emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}})), + ?assertEqual( + {error, no_subscribers}, + emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}}) + ), emqx:subscribe(Topic, #{qos => 2, share => <<"group1">>}), - ?assertEqual({ok, 1}, - emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}})). + ?assertEqual( + {ok, 1}, + emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}}) + ). % t_unsubscribe(_) -> % error('TODO'). @@ -337,9 +376,10 @@ recv_msgs(0, Msgs) -> recv_msgs(Count, Msgs) -> receive {publish, Msg} -> - recv_msgs(Count-1, [Msg|Msgs]); - _Other -> recv_msgs(Count, Msgs) %%TODO:: remove the branch? + recv_msgs(Count - 1, [Msg | Msgs]); + %%TODO:: remove the branch? + _Other -> + recv_msgs(Count, Msgs) after 100 -> Msgs end. - diff --git a/apps/emqx/test/emqx_static_checks.erl b/apps/emqx/test/emqx_static_checks.erl index aaa7963ef..6dc362e28 100644 --- a/apps/emqx/test/emqx_static_checks.erl +++ b/apps/emqx/test/emqx_static_checks.erl @@ -29,8 +29,10 @@ init_per_suite(Config) -> end_per_suite(_Config) -> logger:notice( - "If this test suite failed, and you are unsure why, read this:~n" - "https://github.com/emqx/emqx/blob/master/apps/emqx/src/bpapi/README.md", []). + "If this test suite failed, and you are unsure why, read this:~n" + "https://github.com/emqx/emqx/blob/master/apps/emqx/src/bpapi/README.md", + [] + ). check_if_versions_consistent(OldData, NewData) -> %% OldData can contain a wider list of BPAPI versions @@ -45,9 +47,10 @@ t_run_check(_) -> check_if_versions_consistent(OldData, NewData) orelse begin logger:critical( - "BPAPI versions were changed, but not committed to the repo.\n" - "Run 'make && make static_checks' and then add the changed " - "'bpapi.versions' files to the commit."), + "BPAPI versions were changed, but not committed to the repo.\n" + "Run 'make && make static_checks' and then add the changed " + "'bpapi.versions' files to the commit." + ), error(version_mismatch) end catch diff --git a/apps/emqx/test/emqx_stats_SUITE.erl b/apps/emqx/test/emqx_stats_SUITE.erl index 5184671bc..913c50622 100644 --- a/apps/emqx/test/emqx_stats_SUITE.erl +++ b/apps/emqx/test/emqx_stats_SUITE.erl @@ -23,12 +23,12 @@ all() -> emqx_common_test_helpers:all(?MODULE). -t_cast_useless_msg(_)-> +t_cast_useless_msg(_) -> emqx_stats:setstat('notExis', 1), with_proc(fun() -> emqx_stats ! useless, ?assertEqual(ok, gen_server:cast(emqx_stats, useless)) - end). + end). t_get_error_state(_) -> Conns = emqx_stats:getstats(), @@ -62,44 +62,53 @@ t_get_state(_) -> t_update_interval(_) -> TickMs = 200, - with_proc(fun() -> - SleepMs = TickMs * 2 + TickMs div 2, %% sleep for 2.5 ticks - emqx_stats:cancel_update(cm_stats), - UpdFun = fun() -> emqx_stats:setstat('connections.count', 1) end, - ok = emqx_stats:update_interval(stats_test, UpdFun), - timer:sleep(SleepMs), - ok = emqx_stats:update_interval(stats_test, UpdFun), - timer:sleep(SleepMs), - ?assertEqual(1, emqx_stats:getstat('connections.count')) - end, TickMs). + with_proc( + fun() -> + %% sleep for 2.5 ticks + SleepMs = TickMs * 2 + TickMs div 2, + emqx_stats:cancel_update(cm_stats), + UpdFun = fun() -> emqx_stats:setstat('connections.count', 1) end, + ok = emqx_stats:update_interval(stats_test, UpdFun), + timer:sleep(SleepMs), + ok = emqx_stats:update_interval(stats_test, UpdFun), + timer:sleep(SleepMs), + ?assertEqual(1, emqx_stats:getstat('connections.count')) + end, + TickMs + ). t_helper(_) -> TickMs = 200, TestF = fun(CbModule, CbFun) -> - SleepMs = TickMs + TickMs div 2, %% sleep for 1.5 ticks - Ref = make_ref(), - Tester = self(), - UpdFun = - fun() -> - CbModule:CbFun(), - Tester ! Ref, - ok - end, - ok = emqx_stats:update_interval(stats_test, UpdFun), - timer:sleep(SleepMs), - receive Ref -> ok after 2000 -> error(timeout) end + %% sleep for 1.5 ticks + SleepMs = TickMs + TickMs div 2, + Ref = make_ref(), + Tester = self(), + UpdFun = + fun() -> + CbModule:CbFun(), + Tester ! Ref, + ok + end, + ok = emqx_stats:update_interval(stats_test, UpdFun), + timer:sleep(SleepMs), + receive + Ref -> ok + after 2000 -> error(timeout) + end end, MkTestFun = fun(CbModule, CbFun) -> - fun() -> - with_proc(fun() -> TestF(CbModule, CbFun) end, TickMs) - end + fun() -> + with_proc(fun() -> TestF(CbModule, CbFun) end, TickMs) + end end, - [{"emqx_broker", MkTestFun(emqx_broker, stats_fun)}, - {"emqx_sm", MkTestFun(emqx_sm, stats_fun)}, - {"emqx_router_helper", MkTestFun(emqx_router_helper, stats_fun)}, - {"emqx_cm", MkTestFun(emqx_cm, stats_fun)} + [ + {"emqx_broker", MkTestFun(emqx_broker, stats_fun)}, + {"emqx_sm", MkTestFun(emqx_sm, stats_fun)}, + {"emqx_router_helper", MkTestFun(emqx_router_helper, stats_fun)}, + {"emqx_cm", MkTestFun(emqx_cm, stats_fun)} ]. with_proc(F) -> diff --git a/apps/emqx/test/emqx_sys_mon_SUITE.erl b/apps/emqx/test/emqx_sys_mon_SUITE.erl index 338188c21..98d3e7019 100644 --- a/apps/emqx/test/emqx_sys_mon_SUITE.erl +++ b/apps/emqx/test/emqx_sys_mon_SUITE.erl @@ -26,55 +26,82 @@ -define(FAKE_PORT, hd(erlang:ports())). -define(FAKE_INFO, [{timeout, 100}, {in, foo}, {out, {?MODULE, bar, 1}}]). --define(INPUTINFO, [{self(), long_gc, - fmt("long_gc warning: pid = ~p", [self()]), ?FAKE_INFO}, - {self(), long_schedule, - fmt("long_schedule warning: pid = ~p", [self()]), ?FAKE_INFO}, - {self(), large_heap, - fmt("large_heap warning: pid = ~p", [self()]), ?FAKE_INFO}, - {self(), busy_port, - fmt("busy_port warning: suspid = ~p, port = ~p", - [self(), ?FAKE_PORT]), ?FAKE_PORT}, - %% for the case when the port is missing, for some - %% reason. - {self(), busy_port, - fmt("busy_port warning: suspid = ~p, port = ~p", - [self(), []]), []}, - {self(), busy_dist_port, - fmt("busy_dist_port warning: suspid = ~p, port = ~p", - [self(), ?FAKE_PORT]), ?FAKE_PORT}, - {?FAKE_PORT, long_schedule, - fmt("long_schedule warning: port = ~p", [?FAKE_PORT]), ?FAKE_INFO} - ]). +-define(INPUTINFO, [ + {self(), long_gc, fmt("long_gc warning: pid = ~p", [self()]), ?FAKE_INFO}, + {self(), long_schedule, fmt("long_schedule warning: pid = ~p", [self()]), ?FAKE_INFO}, + {self(), large_heap, fmt("large_heap warning: pid = ~p", [self()]), ?FAKE_INFO}, + { + self(), + busy_port, + fmt( + "busy_port warning: suspid = ~p, port = ~p", + [self(), ?FAKE_PORT] + ), + ?FAKE_PORT + }, + %% for the case when the port is missing, for some + %% reason. + { + self(), + busy_port, + fmt( + "busy_port warning: suspid = ~p, port = ~p", + [self(), []] + ), + [] + }, + { + self(), + busy_dist_port, + fmt( + "busy_dist_port warning: suspid = ~p, port = ~p", + [self(), ?FAKE_PORT] + ), + ?FAKE_PORT + }, + {?FAKE_PORT, long_schedule, fmt("long_schedule warning: port = ~p", [?FAKE_PORT]), ?FAKE_INFO} +]). all() -> emqx_common_test_helpers:all(?MODULE). init_per_testcase(t_sys_mon, Config) -> emqx_common_test_helpers:boot_modules(all), - emqx_common_test_helpers:start_apps([], - fun(emqx) -> - application:set_env(emqx, sysmon, [{busy_dist_port,true}, - {busy_port,false}, - {large_heap,8388608}, - {long_schedule,240}, - {long_gc,0}]), - ok; - (_) -> ok - end), + emqx_common_test_helpers:start_apps( + [], + fun + (emqx) -> + application:set_env(emqx, sysmon, [ + {busy_dist_port, true}, + {busy_port, false}, + {large_heap, 8388608}, + {long_schedule, 240}, + {long_gc, 0} + ]), + ok; + (_) -> + ok + end + ), Config; init_per_testcase(t_sys_mon2, Config) -> emqx_common_test_helpers:boot_modules(all), - emqx_common_test_helpers:start_apps([], - fun(emqx) -> - application:set_env(emqx, sysmon, [{busy_dist_port,false}, - {busy_port,true}, - {large_heap,8388608}, - {long_schedule,0}, - {long_gc,200}, - {nothing, 0}]), - ok; - (_) -> ok - end), + emqx_common_test_helpers:start_apps( + [], + fun + (emqx) -> + application:set_env(emqx, sysmon, [ + {busy_dist_port, false}, + {busy_port, true}, + {large_heap, 8388608}, + {long_schedule, 0}, + {long_gc, 200}, + {nothing, 0} + ]), + ok; + (_) -> + ok + end + ), Config; init_per_testcase(t_procinfo, Config) -> emqx_common_test_helpers:boot_modules(all), @@ -103,36 +130,50 @@ t_procinfo_initial_call_and_stacktrace(_) -> {spawned, SomePid} -> ok after 100 -> - error(process_not_spawned) + error(process_not_spawned) end, ProcInfo = emqx_sys_mon:procinfo(SomePid), ?assertEqual( - {?MODULE, some_function, ['Argument__1','Argument__2']}, - proplists:get_value(proc_lib_initial_call, ProcInfo)), + {?MODULE, some_function, ['Argument__1', 'Argument__2']}, + proplists:get_value(proc_lib_initial_call, ProcInfo) + ), ?assertMatch( - [{?MODULE, some_function, 2, - [{file, _}, - {line, _}]}, - {proc_lib, init_p_do_apply, 3, - [{file, _}, - {line, _}]}], - proplists:get_value(current_stacktrace, ProcInfo)), + [ + {?MODULE, some_function, 2, [ + {file, _}, + {line, _} + ]}, + {proc_lib, init_p_do_apply, 3, [ + {file, _}, + {line, _} + ]} + ], + proplists:get_value(current_stacktrace, ProcInfo) + ), SomePid ! stop. t_sys_mon(_Config) -> lists:foreach( - fun({PidOrPort, SysMonName, ValidateInfo, InfoOrPort}) -> - validate_sys_mon_info(PidOrPort, SysMonName, ValidateInfo, InfoOrPort) - end, ?INPUTINFO). + fun({PidOrPort, SysMonName, ValidateInfo, InfoOrPort}) -> + validate_sys_mon_info(PidOrPort, SysMonName, ValidateInfo, InfoOrPort) + end, + ?INPUTINFO + ). %% Existing port, but closed. t_sys_mon_dead_port(_Config) -> process_flag(trap_exit, true), Port = dead_port(), {PidOrPort, SysMonName, ValidateInfo, InfoOrPort} = - {self(), busy_port, - fmt("busy_port warning: suspid = ~p, port = ~p", - [self(), Port]), Port}, + { + self(), + busy_port, + fmt( + "busy_port warning: suspid = ~p, port = ~p", + [self(), Port] + ), + Port + }, validate_sys_mon_info(PidOrPort, SysMonName, ValidateInfo, InfoOrPort). t_sys_mon2(_Config) -> @@ -149,12 +190,11 @@ validate_sys_mon_info(PidOrPort, SysMonName, ValidateInfo, InfoOrPort) -> timer:sleep(100), ?SYSMON ! {monitor, PidOrPort, SysMonName, InfoOrPort}, receive - {publish, #{payload := Info}} -> + {publish, #{payload := Info}} -> ?assertEqual(ValidateInfo, binary_to_list(Info)), ct:pal("OK - received msg: ~p~n", [Info]) - after - 1000 -> - ct:fail(timeout) + after 1000 -> + ct:fail(timeout) end, emqtt:stop(C). diff --git a/apps/emqx/test/emqx_tables_SUITE.erl b/apps/emqx/test/emqx_tables_SUITE.erl index 44cdf4b15..dcab2ac73 100644 --- a/apps/emqx/test/emqx_tables_SUITE.erl +++ b/apps/emqx/test/emqx_tables_SUITE.erl @@ -42,4 +42,3 @@ t_delete(_) -> ok = emqx_tables:delete(?TAB), ok = emqx_tables:delete(?TAB), ?assertEqual(undefined, ets:info(?TAB, name)). - diff --git a/apps/emqx/test/emqx_takeover_SUITE.erl b/apps/emqx/test/emqx_takeover_SUITE.erl index 4ef008edd..381657ddf 100644 --- a/apps/emqx/test/emqx_takeover_SUITE.erl +++ b/apps/emqx/test/emqx_takeover_SUITE.erl @@ -52,21 +52,27 @@ t_takeover(_) -> emqtt:subscribe(C1, <<"t">>, 1), spawn(fun() -> - [begin + [ + begin emqx:publish(lists:nth(I, AllMsgs)), timer:sleep(rand:uniform(10)) - end || I <- lists:seq(1, Pos)] - end), + end + || I <- lists:seq(1, Pos) + ] + end), emqtt:pause(C1), - timer:sleep(?CNT*10), + timer:sleep(?CNT * 10), load_meck(ClientId), spawn(fun() -> - [begin + [ + begin emqx:publish(lists:nth(I, AllMsgs)), timer:sleep(rand:uniform(10)) - end || I <- lists:seq(Pos+1, ?CNT)] - end), + end + || I <- lists:seq(Pos + 1, ?CNT) + ] + end), {ok, C2} = emqtt:start_link([{clientid, ClientId}, {clean_start, false}]), {ok, _} = emqtt:connect(C2), @@ -86,14 +92,15 @@ t_takover_in_cluster(_) -> load_meck(ClientId) -> meck:new(fake_conn_mod, [non_strict]), - HookTakeover = fun(Pid, Msg = {takeover, 'begin'}) -> - emqx_connection:call(Pid, Msg); - (Pid, Msg = {takeover, 'end'}) -> - timer:sleep(?CNT*10), - emqx_connection:call(Pid, Msg); - (Pid, Msg) -> - emqx_connection:call(Pid, Msg) - end, + HookTakeover = fun + (Pid, Msg = {takeover, 'begin'}) -> + emqx_connection:call(Pid, Msg); + (Pid, Msg = {takeover, 'end'}) -> + timer:sleep(?CNT * 10), + emqx_connection:call(Pid, Msg); + (Pid, Msg) -> + emqx_connection:call(Pid, Msg) + end, meck:expect(fake_conn_mod, call, HookTakeover), [ChanPid] = emqx_cm:lookup_channels(ClientId), ChanInfo = #{conninfo := ConnInfo} = emqx_cm:get_chan_info(ClientId), @@ -108,36 +115,43 @@ all_received_publishs() -> all_received_publishs(Ls) -> receive - M = {publish, _Pub} -> all_received_publishs([M|Ls]); + M = {publish, _Pub} -> all_received_publishs([M | Ls]); _ -> all_received_publishs(Ls) after 100 -> lists:reverse(Ls) end. assert_messages_missed(Ls1, Ls2) -> - Missed = lists:filtermap(fun(Msg) -> - No = emqx_message:payload(Msg), - case lists:any(fun({publish, #{payload := No1}}) -> No1 == No end, Ls2) of - true -> false; - false -> {true, No} - end - end, Ls1), + Missed = lists:filtermap( + fun(Msg) -> + No = emqx_message:payload(Msg), + case lists:any(fun({publish, #{payload := No1}}) -> No1 == No end, Ls2) of + true -> false; + false -> {true, No} + end + end, + Ls1 + ), case Missed of - [] -> ok; + [] -> + ok; _ -> - ct:fail("Miss messages: ~p", [Missed]), error + ct:fail("Miss messages: ~p", [Missed]), + error end. assert_messages_order([], []) -> ok; -assert_messages_order([Msg|Ls1], [{publish, #{payload := No}}|Ls2]) -> +assert_messages_order([Msg | Ls1], [{publish, #{payload := No}} | Ls2]) -> case emqx_message:payload(Msg) == No of false -> - ct:fail("Message order is not correct, expected: ~p, received: ~p", [emqx_message:payload(Msg), No]), + ct:fail("Message order is not correct, expected: ~p, received: ~p", [ + emqx_message:payload(Msg), No + ]), error; - true -> assert_messages_order(Ls1, Ls2) + true -> + assert_messages_order(Ls1, Ls2) end. messages(Cnt) -> [emqx_message:make(ct, 1, ?TOPIC, integer_to_binary(I)) || I <- lists:seq(1, Cnt)]. - diff --git a/apps/emqx/test/emqx_tls_lib_tests.erl b/apps/emqx/test/emqx_tls_lib_tests.erl index 0f58d8347..ca0dfa553 100644 --- a/apps/emqx/test/emqx_tls_lib_tests.erl +++ b/apps/emqx/test/emqx_tls_lib_tests.erl @@ -40,9 +40,10 @@ ciphers_format_test_() -> String = ?TLS_13_CIPHER ++ "," ++ ?TLS_12_CIPHER, Binary = bin(String), List = [?TLS_13_CIPHER, ?TLS_12_CIPHER], - [ {"string", fun() -> test_cipher_format(String) end} - , {"binary", fun() -> test_cipher_format(Binary) end} - , {"string-list", fun() -> test_cipher_format(List) end} + [ + {"string", fun() -> test_cipher_format(String) end}, + {"binary", fun() -> test_cipher_format(Binary) end}, + {"string-list", fun() -> test_cipher_format(List) end} ]. test_cipher_format(Input) -> @@ -53,52 +54,73 @@ tls_versions_test() -> ?assert(lists:member('tlsv1.3', emqx_tls_lib:default_versions())). tls_version_unknown_test() -> - ?assertEqual(emqx_tls_lib:default_versions(), - emqx_tls_lib:integral_versions([])), - ?assertEqual(emqx_tls_lib:default_versions(), - emqx_tls_lib:integral_versions(<<>>)), - ?assertEqual(emqx_tls_lib:default_versions(), - emqx_tls_lib:integral_versions("foo")), - ?assertError(#{reason := no_available_tls_version}, - emqx_tls_lib:integral_versions([foo])). + ?assertEqual( + emqx_tls_lib:default_versions(), + emqx_tls_lib:integral_versions([]) + ), + ?assertEqual( + emqx_tls_lib:default_versions(), + emqx_tls_lib:integral_versions(<<>>) + ), + ?assertEqual( + emqx_tls_lib:default_versions(), + emqx_tls_lib:integral_versions("foo") + ), + ?assertError( + #{reason := no_available_tls_version}, + emqx_tls_lib:integral_versions([foo]) + ). cipher_suites_no_duplication_test() -> AllCiphers = emqx_tls_lib:default_ciphers(), ?assertEqual(length(AllCiphers), length(lists:usort(AllCiphers))). ssl_files_failure_test_() -> - [{"undefined_is_undefined", - fun() -> - ?assertEqual({ok, undefined}, - emqx_tls_lib:ensure_ssl_files("dir", undefined)) end}, - {"no_op_if_disabled", - fun() -> - Disabled = #{<<"enable">> => false, foo => bar}, - ?assertEqual({ok, Disabled}, - emqx_tls_lib:ensure_ssl_files("dir", Disabled)) end}, - {"enoent_key_file", - fun() -> - NonExistingFile = filename:join("/tmp", integer_to_list(erlang:system_time(microsecond))), - ?assertMatch({error, #{file_read := enoent, pem_check := invalid_pem}}, - emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"keyfile">> => NonExistingFile})) - end}, - {"bad_pem_string", - fun() -> - %% not valid unicode - ?assertMatch({error, #{reason := invalid_file_path_or_pem_string, which_option := <<"keyfile">>}}, - emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"keyfile">> => <<255, 255>>})), - %% not printable - ?assertMatch({error, #{reason := invalid_file_path_or_pem_string}}, - emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"keyfile">> => <<33, 22>>})), - TmpFile = filename:join("/tmp", integer_to_list(erlang:system_time(microsecond))), - try - ok = file:write_file(TmpFile, <<"not a valid pem">>), - ?assertMatch({error, #{file_read := not_pem}}, - emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"cacertfile">> => bin(TmpFile)})) - after - file:delete(TmpFile) - end - end} + [ + {"undefined_is_undefined", fun() -> + ?assertEqual( + {ok, undefined}, + emqx_tls_lib:ensure_ssl_files("dir", undefined) + ) + end}, + {"no_op_if_disabled", fun() -> + Disabled = #{<<"enable">> => false, foo => bar}, + ?assertEqual( + {ok, Disabled}, + emqx_tls_lib:ensure_ssl_files("dir", Disabled) + ) + end}, + {"enoent_key_file", fun() -> + NonExistingFile = filename:join( + "/tmp", integer_to_list(erlang:system_time(microsecond)) + ), + ?assertMatch( + {error, #{file_read := enoent, pem_check := invalid_pem}}, + emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"keyfile">> => NonExistingFile}) + ) + end}, + {"bad_pem_string", fun() -> + %% not valid unicode + ?assertMatch( + {error, #{reason := invalid_file_path_or_pem_string, which_option := <<"keyfile">>}}, + emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"keyfile">> => <<255, 255>>}) + ), + %% not printable + ?assertMatch( + {error, #{reason := invalid_file_path_or_pem_string}}, + emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"keyfile">> => <<33, 22>>}) + ), + TmpFile = filename:join("/tmp", integer_to_list(erlang:system_time(microsecond))), + try + ok = file:write_file(TmpFile, <<"not a valid pem">>), + ?assertMatch( + {error, #{file_read := not_pem}}, + emqx_tls_lib:ensure_ssl_files("/tmp", #{<<"cacertfile">> => bin(TmpFile)}) + ) + after + file:delete(TmpFile) + end + end} ]. ssl_files_save_delete_test() -> @@ -140,19 +162,21 @@ ssl_file_replace_test() -> bin(X) -> iolist_to_binary(X). test_key() -> -""" ------BEGIN EC PRIVATE KEY----- -MHQCAQEEICKTbbathzvD8zvgjL7qRHhW4alS0+j0Loo7WeYX9AxaoAcGBSuBBAAK -oUQDQgAEJBdF7MIdam5T4YF3JkEyaPKdG64TVWCHwr/plC0QzNVJ67efXwxlVGTo -ju0VBj6tOX1y6C0U+85VOM0UU5xqvw== ------END EC PRIVATE KEY----- -""". + "" + "\n" + "-----BEGIN EC PRIVATE KEY-----\n" + "MHQCAQEEICKTbbathzvD8zvgjL7qRHhW4alS0+j0Loo7WeYX9AxaoAcGBSuBBAAK\n" + "oUQDQgAEJBdF7MIdam5T4YF3JkEyaPKdG64TVWCHwr/plC0QzNVJ67efXwxlVGTo\n" + "ju0VBj6tOX1y6C0U+85VOM0UU5xqvw==\n" + "-----END EC PRIVATE KEY-----\n" + "". test_key2() -> -""" ------BEGIN EC PRIVATE KEY----- -MHQCAQEEID9UlIyAlLFw0irkRHX29N+ZGivGtDjlVJvATY3B0TTmoAcGBSuBBAAK -oUQDQgAEUwiarudRNAT25X11js8gE9G+q0GdsT53QJQjRtBO+rTwuCW1vhLzN0Ve -AbToUD4JmV9m/XwcSVH06ZaWqNuC5w== ------END EC PRIVATE KEY----- -""". + "" + "\n" + "-----BEGIN EC PRIVATE KEY-----\n" + "MHQCAQEEID9UlIyAlLFw0irkRHX29N+ZGivGtDjlVJvATY3B0TTmoAcGBSuBBAAK\n" + "oUQDQgAEUwiarudRNAT25X11js8gE9G+q0GdsT53QJQjRtBO+rTwuCW1vhLzN0Ve\n" + "AbToUD4JmV9m/XwcSVH06ZaWqNuC5w==\n" + "-----END EC PRIVATE KEY-----\n" + "". diff --git a/apps/emqx/test/emqx_topic_SUITE.erl b/apps/emqx/test/emqx_topic_SUITE.erl index fd54a03cc..13383b69e 100644 --- a/apps/emqx/test/emqx_topic_SUITE.erl +++ b/apps/emqx/test/emqx_topic_SUITE.erl @@ -22,36 +22,39 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("emqx/include/emqx_placeholder.hrl"). --import(emqx_topic, - [ wildcard/1 - , match/2 - , validate/1 - , prepend/2 - , join/1 - , words/1 - , systop/1 - , feed_var/3 - , parse/1 - , parse/2 - ]). +-import( + emqx_topic, + [ + wildcard/1, + match/2, + validate/1, + prepend/2, + join/1, + words/1, + systop/1, + feed_var/3, + parse/1, + parse/2 + ] +). -define(N, 100000). all() -> emqx_common_test_helpers:all(?MODULE). t_wildcard(_) -> - true = wildcard(<<"a/b/#">>), - true = wildcard(<<"a/+/#">>), + true = wildcard(<<"a/b/#">>), + true = wildcard(<<"a/+/#">>), false = wildcard(<<"">>), false = wildcard(<<"a/b/c">>). t_match1(_) -> - true = match(<<"a/b/c">>, <<"a/b/+">>), - true = match(<<"a/b/c">>, <<"a/#">>), - true = match(<<"abcd/ef/g">>, <<"#">>), - true = match(<<"abc/de/f">>, <<"abc/de/f">>), - true = match(<<"abc">>, <<"+">>), - true = match(<<"a/b/c">>, <<"a/b/c">>), + true = match(<<"a/b/c">>, <<"a/b/+">>), + true = match(<<"a/b/c">>, <<"a/#">>), + true = match(<<"abcd/ef/g">>, <<"#">>), + true = match(<<"abc/de/f">>, <<"abc/de/f">>), + true = match(<<"abc">>, <<"+">>), + true = match(<<"a/b/c">>, <<"a/b/c">>), false = match(<<"a/b/c">>, <<"a/c/d">>), false = match(<<"$share/x/y">>, <<"+">>), false = match(<<"$share/x/y">>, <<"+/x/y">>), @@ -61,17 +64,17 @@ t_match1(_) -> false = match(<<"house">>, <<"house/+">>). t_match2(_) -> - true = match(<<"sport/tennis/player1">>, <<"sport/tennis/player1/#">>), - true = match(<<"sport/tennis/player1/ranking">>, <<"sport/tennis/player1/#">>), - true = match(<<"sport/tennis/player1/score/wimbledon">>, <<"sport/tennis/player1/#">>), - true = match(<<"sport">>, <<"sport/#">>), - true = match(<<"sport">>, <<"#">>), - true = match(<<"/sport/football/score/1">>, <<"#">>), - true = match(<<"Topic/C">>, <<"+/+">>), - true = match(<<"TopicA/B">>, <<"+/+">>), - true = match(<<"TopicA/C">>, <<"+/+">>), - true = match(<<"abc">>, <<"+">>), - true = match(<<"a/b/c">>, <<"a/b/c">>), + true = match(<<"sport/tennis/player1">>, <<"sport/tennis/player1/#">>), + true = match(<<"sport/tennis/player1/ranking">>, <<"sport/tennis/player1/#">>), + true = match(<<"sport/tennis/player1/score/wimbledon">>, <<"sport/tennis/player1/#">>), + true = match(<<"sport">>, <<"sport/#">>), + true = match(<<"sport">>, <<"#">>), + true = match(<<"/sport/football/score/1">>, <<"#">>), + true = match(<<"Topic/C">>, <<"+/+">>), + true = match(<<"TopicA/B">>, <<"+/+">>), + true = match(<<"TopicA/C">>, <<"+/+">>), + true = match(<<"abc">>, <<"+">>), + true = match(<<"a/b/c">>, <<"a/b/c">>), false = match(<<"a/b/c">>, <<"a/c/d">>), false = match(<<"$share/x/y">>, <<"+">>), false = match(<<"$share/x/y">>, <<"+/x/y">>), @@ -88,28 +91,28 @@ t_match3(_) -> true = match(<<"device/60019423a83c/dust/type">>, <<"device/60019423a83c/#">>). t_sigle_level_match(_) -> - true = match(<<"sport/tennis/player1">>, <<"sport/tennis/+">>), + true = match(<<"sport/tennis/player1">>, <<"sport/tennis/+">>), false = match(<<"sport/tennis/player1/ranking">>, <<"sport/tennis/+">>), false = match(<<"sport">>, <<"sport/+">>), - true = match(<<"sport/">>, <<"sport/+">>), - true = match(<<"/finance">>, <<"+/+">>), - true = match(<<"/finance">>, <<"/+">>), + true = match(<<"sport/">>, <<"sport/+">>), + true = match(<<"/finance">>, <<"+/+">>), + true = match(<<"/finance">>, <<"/+">>), false = match(<<"/finance">>, <<"+">>), - true = match(<<"/devices/$dev1">>, <<"/devices/+">>), - true = match(<<"/devices/$dev1/online">>, <<"/devices/+/online">>). + true = match(<<"/devices/$dev1">>, <<"/devices/+">>), + true = match(<<"/devices/$dev1/online">>, <<"/devices/+/online">>). t_sys_match(_) -> - true = match(<<"$SYS/broker/clients/testclient">>, <<"$SYS/#">>), - true = match(<<"$SYS/broker">>, <<"$SYS/+">>), + true = match(<<"$SYS/broker/clients/testclient">>, <<"$SYS/#">>), + true = match(<<"$SYS/broker">>, <<"$SYS/+">>), false = match(<<"$SYS/broker">>, <<"+/+">>), false = match(<<"$SYS/broker">>, <<"#">>). 't_#_match'(_) -> - true = match(<<"a/b/c">>, <<"#">>), - true = match(<<"a/b/c">>, <<"+/#">>), + true = match(<<"a/b/c">>, <<"#">>), + true = match(<<"a/b/c">>, <<"+/#">>), false = match(<<"$SYS/brokers">>, <<"#">>), - true = match(<<"a/b/$c">>, <<"a/b/#">>), - true = match(<<"a/b/$c">>, <<"a/#">>). + true = match(<<"a/b/$c">>, <<"a/b/#">>), + true = match(<<"a/b/$c">>, <<"a/#">>). t_match_perf(_) -> true = match(<<"a/b/ccc">>, <<"a/#">>), @@ -154,8 +157,10 @@ t_levels(_) -> ?assertEqual(4, emqx_topic:levels(<<"a/b/c/d">>)). t_tokens(_) -> - ?assertEqual([<<"a">>, <<"b">>, <<"+">>, <<"#">>], - emqx_topic:tokens(<<"a/b/+/#">>)). + ?assertEqual( + [<<"a">>, <<"b">>, <<"+">>, <<"#">>], + emqx_topic:tokens(<<"a/b/+/#">>) + ). t_words(_) -> Topic = <<"/abkc/19383/+/akakdkkdkak/#">>, @@ -178,30 +183,50 @@ t_systop(_) -> SysTop1 = iolist_to_binary(["$SYS/brokers/", atom_to_list(node()), "/xyz"]), ?assertEqual(SysTop1, systop('xyz')), SysTop2 = iolist_to_binary(["$SYS/brokers/", atom_to_list(node()), "/abc"]), - ?assertEqual(SysTop2,systop(<<"abc">>)). + ?assertEqual(SysTop2, systop(<<"abc">>)). t_feed_var(_) -> - ?assertEqual(<<"$queue/client/clientId">>, - feed_var(<<"$c">>, <<"clientId">>, <<"$queue/client/$c">>)), - ?assertEqual(<<"username/test/client/x">>, - feed_var( ?PH_USERNAME, <<"test">> - , <<"username/", ?PH_USERNAME/binary, "/client/x">>)), - ?assertEqual(<<"username/test/client/clientId">>, - feed_var( ?PH_CLIENTID, <<"clientId">> - , <<"username/test/client/", ?PH_CLIENTID/binary>>)). + ?assertEqual( + <<"$queue/client/clientId">>, + feed_var(<<"$c">>, <<"clientId">>, <<"$queue/client/$c">>) + ), + ?assertEqual( + <<"username/test/client/x">>, + feed_var( + ?PH_USERNAME, + <<"test">>, + <<"username/", ?PH_USERNAME/binary, "/client/x">> + ) + ), + ?assertEqual( + <<"username/test/client/clientId">>, + feed_var( + ?PH_CLIENTID, + <<"clientId">>, + <<"username/test/client/", ?PH_CLIENTID/binary>> + ) + ). long_topic() -> iolist_to_binary([[integer_to_list(I), "/"] || I <- lists:seq(0, 66666)]). t_parse(_) -> - ?assertError({invalid_topic_filter, <<"$queue/t">>}, - parse(<<"$queue/t">>, #{share => <<"g">>})), - ?assertError({invalid_topic_filter, <<"$share/g/t">>}, - parse(<<"$share/g/t">>, #{share => <<"g">>})), - ?assertError({invalid_topic_filter, <<"$share/t">>}, - parse(<<"$share/t">>)), - ?assertError({invalid_topic_filter, <<"$share/+/t">>}, - parse(<<"$share/+/t">>)), + ?assertError( + {invalid_topic_filter, <<"$queue/t">>}, + parse(<<"$queue/t">>, #{share => <<"g">>}) + ), + ?assertError( + {invalid_topic_filter, <<"$share/g/t">>}, + parse(<<"$share/g/t">>, #{share => <<"g">>}) + ), + ?assertError( + {invalid_topic_filter, <<"$share/t">>}, + parse(<<"$share/t">>) + ), + ?assertError( + {invalid_topic_filter, <<"$share/+/t">>}, + parse(<<"$share/+/t">>) + ), ?assertEqual({<<"a/b/+/#">>, #{}}, parse(<<"a/b/+/#">>)), ?assertEqual({<<"a/b/+/#">>, #{qos => 1}}, parse({<<"a/b/+/#">>, #{qos => 1}})), ?assertEqual({<<"topic">>, #{share => <<"$queue">>}}, parse(<<"$queue/topic">>)), @@ -213,9 +238,14 @@ t_parse(_) -> ?assertEqual({<<"$fastlane/topic">>, #{}}, parse(<<"$fastlane/topic">>)). bench(Case, Fun, Args) -> - {Time, ok} = timer:tc(fun lists:foreach/2, - [fun(_) -> apply(Fun, Args) end, - lists:seq(1, ?N) - ]), - ct:pal("Time consumed by ~ts: ~.3f(us)~nCall ~ts per second: ~w", - [Case, Time/?N, Case, (?N * 1000000) div Time]). + {Time, ok} = timer:tc( + fun lists:foreach/2, + [ + fun(_) -> apply(Fun, Args) end, + lists:seq(1, ?N) + ] + ), + ct:pal( + "Time consumed by ~ts: ~.3f(us)~nCall ~ts per second: ~w", + [Case, Time / ?N, Case, (?N * 1000000) div Time] + ). diff --git a/apps/emqx/test/emqx_trace_SUITE.erl b/apps/emqx/test/emqx_trace_SUITE.erl index a4731390d..762abe201 100644 --- a/apps/emqx/test/emqx_trace_SUITE.erl +++ b/apps/emqx/test/emqx_trace_SUITE.erl @@ -67,11 +67,11 @@ t_base_create_delete(_Config) -> ?assertEqual({error, {duplicate_condition, Name}}, emqx_trace:create(AnotherTrace)), [TraceRec] = emqx_trace:list(), Expect = #emqx_trace{ - name = Name, - type = clientid, - filter = ClientId, + name = Name, + type = clientid, + filter = ClientId, start_at = Now, - end_at = Now + 30 * 60 + end_at = Now + 30 * 60 }, ?assertEqual(Expect, TraceRec), ExpectFormat = [ @@ -91,14 +91,23 @@ t_base_create_delete(_Config) -> ok. t_create_size_max(_Config) -> - lists:map(fun(Seq) -> - Name = list_to_binary("name" ++ integer_to_list(Seq)), - Trace = [{name, Name}, {type, topic}, - {topic, list_to_binary("/x/y/" ++ integer_to_list(Seq))}], - {ok, _} = emqx_trace:create(Trace) - end, lists:seq(1, 30)), - Trace31 = [{<<"name">>, <<"name31">>}, - {<<"type">>, topic}, {<<"topic">>, <<"/x/y/31">>}], + lists:map( + fun(Seq) -> + Name = list_to_binary("name" ++ integer_to_list(Seq)), + Trace = [ + {name, Name}, + {type, topic}, + {topic, list_to_binary("/x/y/" ++ integer_to_list(Seq))} + ], + {ok, _} = emqx_trace:create(Trace) + end, + lists:seq(1, 30) + ), + Trace31 = [ + {<<"name">>, <<"name31">>}, + {<<"type">>, topic}, + {<<"topic">>, <<"/x/y/31">>} + ], {error, _} = emqx_trace:create(Trace31), ok = emqx_trace:delete(<<"name30">>), {ok, _} = emqx_trace:create(Trace31), @@ -118,23 +127,36 @@ t_create_failed(_Config) -> {error, Reason4} = emqx_trace:create([Name, {<<"type">>, clientid}]), ?assertEqual(<<"required clientid field">>, iolist_to_binary(Reason4)), - InvalidPackets4 = [{<<"name">>, <<"/test">>}, {<<"clientid">>, <<"t">>}, - {<<"type">>, clientid}], + InvalidPackets4 = [ + {<<"name">>, <<"/test">>}, + {<<"clientid">>, <<"t">>}, + {<<"type">>, clientid} + ], {error, Reason5} = emqx_trace:create(InvalidPackets4), ?assertEqual(<<"Name should be ^[A-Za-z]+[A-Za-z0-9-_]*$">>, iolist_to_binary(Reason5)), - ?assertEqual({error, "type=[topic,clientid,ip_address] required"}, - emqx_trace:create([{<<"name">>, <<"test-name">>}, {<<"clientid">>, <<"good">>}])), + ?assertEqual( + {error, "type=[topic,clientid,ip_address] required"}, + emqx_trace:create([{<<"name">>, <<"test-name">>}, {<<"clientid">>, <<"good">>}]) + ), - ?assertEqual({error, "ip address: einval"}, - emqx_trace:create([Name, {<<"type">>, ip_address}, - {<<"ip_address">>, <<"test-name">>}])), + ?assertEqual( + {error, "ip address: einval"}, + emqx_trace:create([ + Name, + {<<"type">>, ip_address}, + {<<"ip_address">>, <<"test-name">>} + ]) + ), ok. t_create_default(_Config) -> {error, "name required"} = emqx_trace:create([]), - {ok, _} = emqx_trace:create([{<<"name">>, <<"test-name">>}, - {<<"type">>, clientid}, {<<"clientid">>, <<"good">>}]), + {ok, _} = emqx_trace:create([ + {<<"name">>, <<"test-name">>}, + {<<"type">>, clientid}, + {<<"clientid">>, <<"good">>} + ]), [#emqx_trace{name = <<"test-name">>}] = emqx_trace:list(), ok = emqx_trace:clear(), Now = erlang:system_time(second), @@ -154,8 +176,11 @@ t_create_default(_Config) -> {<<"end_at">>, Now + 3} ], {error, "failed by start_at >= end_at"} = emqx_trace:create(Trace2), - {ok, _} = emqx_trace:create([{<<"name">>, <<"test-name">>}, - {<<"type">>, topic}, {<<"topic">>, <<"/x/y/z">>}]), + {ok, _} = emqx_trace:create([ + {<<"name">>, <<"test-name">>}, + {<<"type">>, topic}, + {<<"topic">>, <<"/x/y/z">>} + ]), [#emqx_trace{start_at = Start, end_at = End}] = emqx_trace:list(), ?assertEqual(10 * 60, End - Start), ?assertEqual(true, Start - erlang:system_time(second) < 5), @@ -171,15 +196,21 @@ t_create_with_extra_fields(_Config) -> {<<"ip_address">>, <<"127.0.0.1">>} ], {ok, _} = emqx_trace:create(Trace), - ?assertMatch([#emqx_trace{name = <<"test-name">>, filter = <<"/x/y/z">>, type = topic}], - emqx_trace:list()), + ?assertMatch( + [#emqx_trace{name = <<"test-name">>, filter = <<"/x/y/z">>, type = topic}], + emqx_trace:list() + ), ok. t_update_enable(_Config) -> Name = <<"test-name">>, Now = erlang:system_time(second), - {ok, _} = emqx_trace:create([{<<"name">>, Name}, {<<"type">>, topic}, - {<<"topic">>, <<"/x/y/z">>}, {<<"end_at">>, Now + 2}]), + {ok, _} = emqx_trace:create([ + {<<"name">>, Name}, + {<<"type">>, topic}, + {<<"topic">>, <<"/x/y/z">>}, + {<<"end_at">>, Now + 2} + ]), [#emqx_trace{enable = Enable}] = emqx_trace:list(), ?assertEqual(Enable, true), ok = emqx_trace:update(Name, false), @@ -197,15 +228,27 @@ t_update_enable(_Config) -> t_load_state(_Config) -> Now = erlang:system_time(second), - Running = #{name => <<"Running">>, type => topic, - topic => <<"/x/y/1">>, start_at => Now - 1, - end_at => Now + 2}, - Waiting = [{<<"name">>, <<"Waiting">>}, {<<"type">>, topic}, - {<<"topic">>, <<"/x/y/2">>}, {<<"start_at">>, Now + 3}, - {<<"end_at">>, Now + 8}], - Finished = [{<<"name">>, <<"Finished">>}, {<<"type">>, topic}, - {<<"topic">>, <<"/x/y/3">>}, {<<"start_at">>, Now - 5}, - {<<"end_at">>, Now}], + Running = #{ + name => <<"Running">>, + type => topic, + topic => <<"/x/y/1">>, + start_at => Now - 1, + end_at => Now + 2 + }, + Waiting = [ + {<<"name">>, <<"Waiting">>}, + {<<"type">>, topic}, + {<<"topic">>, <<"/x/y/2">>}, + {<<"start_at">>, Now + 3}, + {<<"end_at">>, Now + 8} + ], + Finished = [ + {<<"name">>, <<"Finished">>}, + {<<"type">>, topic}, + {<<"topic">>, <<"/x/y/3">>}, + {<<"start_at">>, Now - 5}, + {<<"end_at">>, Now} + ], {ok, _} = emqx_trace:create(Running), {ok, _} = emqx_trace:create(Waiting), {error, "end_at time has already passed"} = emqx_trace:create(Finished), @@ -227,8 +270,12 @@ t_client_event(_Config) -> ClientId = <<"client-test">>, Now = erlang:system_time(second), Name = <<"test_client_id_event">>, - {ok, _} = emqx_trace:create([{<<"name">>, Name}, - {<<"type">>, clientid}, {<<"clientid">>, ClientId}, {<<"start_at">>, Now}]), + {ok, _} = emqx_trace:create([ + {<<"name">>, Name}, + {<<"type">>, clientid}, + {<<"clientid">>, ClientId}, + {<<"start_at">>, Now} + ]), ok = emqx_trace_handler_SUITE:filesync(Name, clientid), {ok, Client} = emqtt:start_link([{clean_start, true}, {clientid, ClientId}]), {ok, _} = emqtt:connect(Client), @@ -236,8 +283,12 @@ t_client_event(_Config) -> ok = emqtt:publish(Client, <<"/test">>, #{}, <<"1">>, [{qos, 0}]), ok = emqtt:publish(Client, <<"/test">>, #{}, <<"2">>, [{qos, 0}]), ok = emqx_trace_handler_SUITE:filesync(Name, clientid), - {ok, _} = emqx_trace:create([{<<"name">>, <<"test_topic">>}, - {<<"type">>, topic}, {<<"topic">>, <<"/test">>}, {<<"start_at">>, Now}]), + {ok, _} = emqx_trace:create([ + {<<"name">>, <<"test_topic">>}, + {<<"type">>, topic}, + {<<"topic">>, <<"/test">>}, + {<<"start_at">>, Now} + ]), ok = emqx_trace_handler_SUITE:filesync(<<"test_topic">>, topic), {ok, Bin} = file:read_file(emqx_trace:log_file(Name, Now)), ok = emqtt:publish(Client, <<"/test">>, #{}, <<"3">>, [{qos, 0}]), @@ -261,7 +312,7 @@ t_get_log_filename(_Config) -> {<<"type">>, ip_address}, {<<"ip_address">>, <<"127.0.0.1">>}, {<<"start_at">>, Now}, - {<<"end_at">>, Now +2} + {<<"end_at">>, Now + 2} ], {ok, _} = emqx_trace:create(Trace), ?assertEqual({error, not_found}, emqx_trace:get_trace_filename(<<"test">>)), @@ -287,13 +338,25 @@ t_find_closed_time(_Config) -> Now = erlang:system_time(second), Traces2 = [], ?assertEqual(DefaultMs, emqx_trace:find_closest_time(Traces2, Now)), - Traces3 = [#emqx_trace{name = <<"disable">>, start_at = Now + 1, - end_at = Now + 2, enable = false}], + Traces3 = [ + #emqx_trace{ + name = <<"disable">>, + start_at = Now + 1, + end_at = Now + 2, + enable = false + } + ], ?assertEqual(DefaultMs, emqx_trace:find_closest_time(Traces3, Now)), Traces4 = [#emqx_trace{name = <<"running">>, start_at = Now, end_at = Now + 10, enable = true}], ?assertEqual(10000, emqx_trace:find_closest_time(Traces4, Now)), - Traces5 = [#emqx_trace{name = <<"waiting">>, start_at = Now + 2, - end_at = Now + 10, enable = true}], + Traces5 = [ + #emqx_trace{ + name = <<"waiting">>, + start_at = Now + 2, + end_at = Now + 10, + enable = true + } + ], ?assertEqual(2000, emqx_trace:find_closest_time(Traces5, Now)), Traces = [ #emqx_trace{name = <<"waiting">>, start_at = Now + 1, end_at = Now + 2, enable = true}, diff --git a/apps/emqx/test/emqx_trace_handler_SUITE.erl b/apps/emqx/test/emqx_trace_handler_SUITE.erl index 6b7dde75a..ff4815c30 100644 --- a/apps/emqx/test/emqx_trace_handler_SUITE.erl +++ b/apps/emqx/test/emqx_trace_handler_SUITE.erl @@ -22,11 +22,12 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --define(CLIENT, [{host, "localhost"}, - {clientid, <<"client">>}, - {username, <<"testuser">>}, - {password, <<"pass">>} - ]). +-define(CLIENT, [ + {host, "localhost"}, + {clientid, <<"client">>}, + {username, <<"testuser">>}, + {password, <<"pass">>} +]). all() -> [t_trace_clientid, t_trace_topic, t_trace_ip_address, t_trace_clientid_utf8]. @@ -42,7 +43,7 @@ init_per_testcase(t_trace_clientid, Config) -> init(), Config; init_per_testcase(_Case, Config) -> - _ = [logger:remove_handler(Id) ||#{id := Id} <- emqx_trace_handler:running()], + _ = [logger:remove_handler(Id) || #{id := Id} <- emqx_trace_handler:running()], init(), Config. @@ -69,13 +70,32 @@ t_trace_clientid(_Config) -> ?assert(filelib:is_regular("tmp/client3.log")), %% Get current traces - ?assertMatch([#{type := clientid, filter := <<"client">>, name := <<"CLI-client1">>, - level := debug, dst := "tmp/client.log"}, - #{type := clientid, filter := <<"client2">>, name := <<"CLI-client2">> - , level := debug, dst := "tmp/client2.log"}, - #{type := clientid, filter := <<"client3">>, name := <<"CLI-client3">>, - level := debug, dst := "tmp/client3.log"} - ], emqx_trace_handler:running()), + ?assertMatch( + [ + #{ + type := clientid, + filter := <<"client">>, + name := <<"CLI-client1">>, + level := debug, + dst := "tmp/client.log" + }, + #{ + type := clientid, + filter := <<"client2">>, + name := <<"CLI-client2">>, + level := debug, + dst := "tmp/client2.log" + }, + #{ + type := clientid, + filter := <<"client3">>, + name := <<"CLI-client3">>, + level := debug, + dst := "tmp/client3.log" + } + ], + emqx_trace_handler:running() + ), %% Client with clientid = "client" publishes a "hi" message to "a/b/c". {ok, T} = emqtt:start_link(?CLIENT), @@ -108,7 +128,12 @@ t_trace_clientid_utf8(_) -> emqx_trace:check(), {ok, T} = emqtt:start_link([{clientid, Utf8Id}]), emqtt:connect(T), - [begin emqtt:publish(T, <<"a/b/c">>, <<"hi">>) end|| _ <- lists:seq(1, 10)], + [ + begin + emqtt:publish(T, <<"a/b/c">>, <<"hi">>) + end + || _ <- lists:seq(1, 10) + ], emqtt:ping(T), ok = filesync("CLI-UTF8", clientid), @@ -133,12 +158,25 @@ t_trace_topic(_Config) -> ?assert(filelib:is_regular("tmp/topic_trace_y.log")), %% Get current traces - ?assertMatch([#{type := topic, filter := <<"x/#">>, - level := debug, dst := "tmp/topic_trace_x.log", name := <<"CLI-TOPIC-1">>}, - #{type := topic, filter := <<"y/#">>, - name := <<"CLI-TOPIC-2">>, level := debug, dst := "tmp/topic_trace_y.log"} - ], - emqx_trace_handler:running()), + ?assertMatch( + [ + #{ + type := topic, + filter := <<"x/#">>, + level := debug, + dst := "tmp/topic_trace_x.log", + name := <<"CLI-TOPIC-1">> + }, + #{ + type := topic, + filter := <<"y/#">>, + name := <<"CLI-TOPIC-2">>, + level := debug, + dst := "tmp/topic_trace_y.log" + } + ], + emqx_trace_handler:running() + ), %% Client with clientid = "client" publishes a "hi" message to "x/y/z". emqtt:publish(T, <<"x/y/z">>, <<"hi1">>), @@ -169,8 +207,13 @@ t_trace_ip_address(_Config) -> %% Start tracing ok = emqx_trace_handler:install("CLI-IP-1", ip_address, "127.0.0.1", all, "tmp/ip_trace_x.log"), - ok = emqx_trace_handler:install("CLI-IP-2", ip_address, - "192.168.1.1", all, "tmp/ip_trace_y.log"), + ok = emqx_trace_handler:install( + "CLI-IP-2", + ip_address, + "192.168.1.1", + all, + "tmp/ip_trace_y.log" + ), emqx_trace:check(), ok = filesync(<<"CLI-IP-1">>, ip_address), ok = filesync(<<"CLI-IP-2">>, ip_address), @@ -180,14 +223,25 @@ t_trace_ip_address(_Config) -> ?assert(filelib:is_regular("tmp/ip_trace_y.log")), %% Get current traces - ?assertMatch([#{type := ip_address, filter := "127.0.0.1", - name := <<"CLI-IP-1">>, - level := debug, dst := "tmp/ip_trace_x.log"}, - #{type := ip_address, filter := "192.168.1.1", - name := <<"CLI-IP-2">>, - level := debug, dst := "tmp/ip_trace_y.log"} - ], - emqx_trace_handler:running()), + ?assertMatch( + [ + #{ + type := ip_address, + filter := "127.0.0.1", + name := <<"CLI-IP-1">>, + level := debug, + dst := "tmp/ip_trace_x.log" + }, + #{ + type := ip_address, + filter := "192.168.1.1", + name := <<"CLI-IP-2">>, + level := debug, + dst := "tmp/ip_trace_y.log" + } + ], + emqx_trace_handler:running() + ), %% Client with clientid = "client" publishes a "hi" message to "x/y/z". emqtt:publish(T, <<"x/y/z">>, <<"hi1">>), @@ -212,13 +266,13 @@ t_trace_ip_address(_Config) -> emqtt:disconnect(T), ?assertEqual([], emqx_trace_handler:running()). - filesync(Name, Type) -> ct:sleep(50), filesync(Name, Type, 3). %% sometime the handler process is not started yet. -filesync(_Name, _Type, 0) -> ok; +filesync(_Name, _Type, 0) -> + ok; filesync(Name0, Type, Retry) -> Name = case is_binary(Name0) of @@ -226,13 +280,13 @@ filesync(Name0, Type, Retry) -> false -> list_to_binary(Name0) end, try - Handler = binary_to_atom(<<"trace_", - (atom_to_binary(Type))/binary, "_", Name/binary>>), + Handler = binary_to_atom(<<"trace_", (atom_to_binary(Type))/binary, "_", Name/binary>>), ok = logger_disk_log_h:filesync(Handler) - catch E:R -> - ct:pal("Filesync error:~p ~p~n", [{Name, Type, Retry}, {E, R}]), - ct:sleep(100), - filesync(Name, Type, Retry - 1) + catch + E:R -> + ct:pal("Filesync error:~p ~p~n", [{Name, Type, Retry}, {E, R}]), + ct:sleep(100), + filesync(Name, Type, Retry - 1) end. init() -> diff --git a/apps/emqx/test/emqx_trie_SUITE.erl b/apps/emqx/test/emqx_trie_SUITE.erl index 0bb4fd7bc..d05e463f4 100644 --- a/apps/emqx/test/emqx_trie_SUITE.erl +++ b/apps/emqx/test/emqx_trie_SUITE.erl @@ -23,8 +23,9 @@ -include_lib("eunit/include/eunit.hrl"). all() -> - [{group, compact}, - {group, not_compact} + [ + {group, compact}, + {group, not_compact} ]. groups() -> @@ -60,39 +61,38 @@ end_per_testcase(_TestCase, _Config) -> t_insert(_) -> Fun = fun() -> - ?TRIE:insert(<<"sensor/1/metric/2">>), - ?TRIE:insert(<<"sensor/+/#">>), - ?TRIE:insert(<<"sensor/#">>) - end, + ?TRIE:insert(<<"sensor/1/metric/2">>), + ?TRIE:insert(<<"sensor/+/#">>), + ?TRIE:insert(<<"sensor/#">>) + end, ?assertEqual({atomic, ok}, trans(Fun)), ?assertEqual([<<"sensor/#">>], ?TRIE:match(<<"sensor">>)). t_match(_) -> Machted = [<<"sensor/#">>, <<"sensor/+/#">>], trans(fun() -> - ?TRIE:insert(<<"sensor/1/metric/2">>), - ?TRIE:insert(<<"sensor/+/#">>), - ?TRIE:insert(<<"sensor/#">>) - end), + ?TRIE:insert(<<"sensor/1/metric/2">>), + ?TRIE:insert(<<"sensor/+/#">>), + ?TRIE:insert(<<"sensor/#">>) + end), ?assertEqual(Machted, lists:sort(?TRIE:match(<<"sensor/1">>))). t_match_invalid(_) -> trans(fun() -> - ?TRIE:insert(<<"sensor/1/metric/2">>), - ?TRIE:insert(<<"sensor/+/#">>), - ?TRIE:insert(<<"sensor/#">>) - end), + ?TRIE:insert(<<"sensor/1/metric/2">>), + ?TRIE:insert(<<"sensor/+/#">>), + ?TRIE:insert(<<"sensor/#">>) + end), ?assertEqual([], lists:sort(?TRIE:match(<<"sensor/+">>))), ?assertEqual([], lists:sort(?TRIE:match(<<"#">>))). - t_match2(_) -> Matched = [<<"#">>, <<"+/#">>, <<"+/+/#">>], trans(fun() -> - ?TRIE:insert(<<"#">>), - ?TRIE:insert(<<"+/#">>), - ?TRIE:insert(<<"+/+/#">>) - end), + ?TRIE:insert(<<"#">>), + ?TRIE:insert(<<"+/#">>), + ?TRIE:insert(<<"+/+/#">>) + end), ?assertEqual(Matched, lists:sort(?TRIE:match(<<"a/b/c">>))), ?assertEqual([], ?TRIE:match(<<"$SYS/broker/zenmq">>)). @@ -117,8 +117,10 @@ t_match5(_) -> Topics = [<<"#">>, <>, <>], trans(fun() -> lists:foreach(fun emqx_trie:insert/1, Topics) end), ?assertEqual([<<"#">>, <>], lists:sort(emqx_trie:match(T))), - ?assertEqual([<<"#">>, <>, <>], - lists:sort(emqx_trie:match(<>))). + ?assertEqual( + [<<"#">>, <>, <>], + lists:sort(emqx_trie:match(<>)) + ). t_match6(_) -> T = <<"a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y/z">>, @@ -141,45 +143,45 @@ t_empty(_) -> t_delete(_) -> trans(fun() -> - ?TRIE:insert(<<"sensor/1/#">>), - ?TRIE:insert(<<"sensor/1/metric/2">>), - ?TRIE:insert(<<"sensor/1/metric/3">>) - end), + ?TRIE:insert(<<"sensor/1/#">>), + ?TRIE:insert(<<"sensor/1/metric/2">>), + ?TRIE:insert(<<"sensor/1/metric/3">>) + end), trans(fun() -> - ?TRIE:delete(<<"sensor/1/metric/2">>), - ?TRIE:delete(<<"sensor/1/metric">>), - ?TRIE:delete(<<"sensor/1/metric">>) - end), + ?TRIE:delete(<<"sensor/1/metric/2">>), + ?TRIE:delete(<<"sensor/1/metric">>), + ?TRIE:delete(<<"sensor/1/metric">>) + end), ?assertEqual([<<"sensor/1/#">>], ?TRIE:match(<<"sensor/1/x">>)). t_delete2(_) -> trans(fun() -> - ?TRIE:insert(<<"sensor">>), - ?TRIE:insert(<<"sensor/1/metric/2">>), - ?TRIE:insert(<<"sensor/+/metric/3">>) - end), + ?TRIE:insert(<<"sensor">>), + ?TRIE:insert(<<"sensor/1/metric/2">>), + ?TRIE:insert(<<"sensor/+/metric/3">>) + end), trans(fun() -> - ?TRIE:delete(<<"sensor">>), - ?TRIE:delete(<<"sensor/1/metric/2">>), - ?TRIE:delete(<<"sensor/+/metric/3">>), - ?TRIE:delete(<<"sensor/+/metric/3">>) - end), + ?TRIE:delete(<<"sensor">>), + ?TRIE:delete(<<"sensor/1/metric/2">>), + ?TRIE:delete(<<"sensor/+/metric/3">>), + ?TRIE:delete(<<"sensor/+/metric/3">>) + end), ?assertEqual([], ?TRIE:match(<<"sensor">>)), ?assertEqual([], ?TRIE:match(<<"sensor/1">>)). t_delete3(_) -> trans(fun() -> - ?TRIE:insert(<<"sensor/+">>), - ?TRIE:insert(<<"sensor/+/metric/2">>), - ?TRIE:insert(<<"sensor/+/metric/3">>) - end), + ?TRIE:insert(<<"sensor/+">>), + ?TRIE:insert(<<"sensor/+/metric/2">>), + ?TRIE:insert(<<"sensor/+/metric/3">>) + end), trans(fun() -> - ?TRIE:delete(<<"sensor/+/metric/2">>), - ?TRIE:delete(<<"sensor/+/metric/3">>), - ?TRIE:delete(<<"sensor">>), - ?TRIE:delete(<<"sensor/+">>), - ?TRIE:delete(<<"sensor/+/unknown">>) - end), + ?TRIE:delete(<<"sensor/+/metric/2">>), + ?TRIE:delete(<<"sensor/+/metric/3">>), + ?TRIE:delete(<<"sensor">>), + ?TRIE:delete(<<"sensor/+">>), + ?TRIE:delete(<<"sensor/+/unknown">>) + end), ?assertEqual([], ?TRIE:match(<<"sensor">>)), ?assertEqual([], ?TRIE:lookup_topic(<<"sensor/+">>, ?TRIE)). diff --git a/apps/emqx/test/emqx_vm_SUITE.erl b/apps/emqx/test/emqx_vm_SUITE.erl index 7babdaec0..c5e50e2bb 100644 --- a/apps/emqx/test/emqx_vm_SUITE.erl +++ b/apps/emqx/test/emqx_vm_SUITE.erl @@ -27,27 +27,29 @@ t_load(_Config) -> ?assertMatch([{load1, _}, {load5, _}, {load15, _}], emqx_vm:loads()). t_systeminfo(_Config) -> - ?assertEqual(emqx_vm:system_info_keys(), - [Key || {Key, _} <- emqx_vm:get_system_info()]), + ?assertEqual( + emqx_vm:system_info_keys(), + [Key || {Key, _} <- emqx_vm:get_system_info()] + ), ?assertEqual(undefined, emqx_vm:get_system_info(undefined)). t_mem_info(_Config) -> application:ensure_all_started(os_mon), MemInfo = emqx_vm:mem_info(), - [{total_memory, _}, {used_memory, _}]= MemInfo, + [{total_memory, _}, {used_memory, _}] = MemInfo, application:stop(os_mon). t_process_info(_Config) -> ProcessInfo = emqx_vm:get_process_info(), - ?assertEqual(emqx_vm:process_info_keys(), [K || {K, _V}<- ProcessInfo]). + ?assertEqual(emqx_vm:process_info_keys(), [K || {K, _V} <- ProcessInfo]). t_process_gc(_Config) -> GcInfo = emqx_vm:get_process_gc_info(), - ?assertEqual(emqx_vm:process_gc_info_keys(), [K || {K, _V}<- GcInfo]). + ?assertEqual(emqx_vm:process_gc_info_keys(), [K || {K, _V} <- GcInfo]). t_get_ets_list(_Config) -> ets:new(test, [named_table]), - Ets = emqx_vm:get_ets_list(), + Ets = emqx_vm:get_ets_list(), true = lists:member(test, Ets). t_get_ets_info(_Config) -> @@ -57,12 +59,19 @@ t_get_ets_info(_Config) -> test = proplists:get_value(name, EtsInfo), Tid = proplists:get_value(id, EtsInfo), EtsInfos = emqx_vm:get_ets_info(), - ?assertEqual(true, lists:foldl(fun(Info, Acc) -> - case proplists:get_value(id, Info) of - Tid -> true; - _ -> Acc - end - end, false, EtsInfos)). + ?assertEqual( + true, + lists:foldl( + fun(Info, Acc) -> + case proplists:get_value(id, Info) of + Tid -> true; + _ -> Acc + end + end, + false, + EtsInfos + ) + ). t_scheduler_usage(_Config) -> emqx_vm:scheduler_usage(5000). @@ -93,4 +102,3 @@ do_recv(Sock) -> {error, closed} -> ok end. - diff --git a/apps/emqx/test/emqx_vm_mon_SUITE.erl b/apps/emqx/test/emqx_vm_mon_SUITE.erl index a09d0877e..aa0a78532 100644 --- a/apps/emqx/test/emqx_vm_mon_SUITE.erl +++ b/apps/emqx/test/emqx_vm_mon_SUITE.erl @@ -27,10 +27,11 @@ init_per_testcase(t_alarms, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), emqx_config:put([sysmon, vm], #{ - process_high_watermark => 0, - process_low_watermark => 0, - process_check_interval => 100 %% 1s - }), + process_high_watermark => 0, + process_low_watermark => 0, + %% 1s + process_check_interval => 100 + }), ok = supervisor:terminate_child(emqx_sys_sup, emqx_vm_mon), {ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_vm_mon), Config; diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index 455cf3e43..3ddac7d42 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -23,15 +23,25 @@ -compile(export_all). -compile(nowarn_export_all). --import(emqx_ws_connection, - [ websocket_handle/2 - , websocket_info/2 - , websocket_close/2 - ]). +-import( + emqx_ws_connection, + [ + websocket_handle/2, + websocket_info/2, + websocket_close/2 + ] +). --define(STATS_KEYS, [recv_oct, recv_cnt, send_oct, send_cnt, - recv_pkt, recv_msg, send_pkt, send_msg - ]). +-define(STATS_KEYS, [ + recv_oct, + recv_cnt, + send_oct, + send_cnt, + recv_pkt, + recv_msg, + send_pkt, + send_msg +]). -define(ws_conn, emqx_ws_connection). @@ -47,7 +57,7 @@ init_per_testcase(TestCase, Config) when TestCase =/= t_ws_check_origin, TestCase =/= t_ws_pingreq_before_connected, TestCase =/= t_ws_non_check_origin - -> +-> %% Meck Cm ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), @@ -55,8 +65,8 @@ init_per_testcase(TestCase, Config) when %% Mock cowboy_req ok = meck:new(cowboy_req, [passthrough, no_history, no_link]), ok = meck:expect(cowboy_req, header, fun(_, _, _) -> <<>> end), - ok = meck:expect(cowboy_req, peer, fun(_) -> {{127,0,0,1}, 3456} end), - ok = meck:expect(cowboy_req, sock, fun(_) -> {{127,0,0,1}, 18083} end), + ok = meck:expect(cowboy_req, peer, fun(_) -> {{127, 0, 0, 1}, 3456} end), + ok = meck:expect(cowboy_req, sock, fun(_) -> {{127, 0, 0, 1}, 18083} end), ok = meck:expect(cowboy_req, cert, fun(_) -> undefined end), ok = meck:expect(cowboy_req, parse_cookies, fun(_) -> error(badarg) end), %% Mock emqx_access_control @@ -70,8 +80,8 @@ init_per_testcase(TestCase, Config) when ok = meck:new(emqx_broker, [passthrough, no_history, no_link]), ok = meck:expect(emqx_broker, subscribe, fun(_, _, _) -> ok end), ok = meck:expect(emqx_broker, publish, fun(#message{topic = Topic}) -> - [{node(), Topic, 1}] - end), + [{node(), Topic, 1}] + end), ok = meck:expect(emqx_broker, unsubscribe, fun(_) -> ok end), %% Mock emqx_metrics ok = meck:new(emqx_metrics, [passthrough, no_history, no_link]), @@ -80,27 +90,28 @@ init_per_testcase(TestCase, Config) when ok = meck:expect(emqx_metrics, inc_recv, fun(_) -> ok end), ok = meck:expect(emqx_metrics, inc_sent, fun(_) -> ok end), Config; - init_per_testcase(_, Config) -> ok = emqx_common_test_helpers:start_apps([]), Config. end_per_testcase(TestCase, _Config) when - TestCase =/= t_ws_sub_protocols_mqtt_equivalents, - TestCase =/= t_ws_sub_protocols_mqtt, - TestCase =/= t_ws_check_origin, - TestCase =/= t_ws_non_check_origin, - TestCase =/= t_ws_pingreq_before_connected - -> - lists:foreach(fun meck:unload/1, - [emqx_cm, - cowboy_req, - emqx_access_control, - emqx_broker, - emqx_hooks, - emqx_metrics - ]); - + TestCase =/= t_ws_sub_protocols_mqtt_equivalents, + TestCase =/= t_ws_sub_protocols_mqtt, + TestCase =/= t_ws_check_origin, + TestCase =/= t_ws_non_check_origin, + TestCase =/= t_ws_pingreq_before_connected +-> + lists:foreach( + fun meck:unload/1, + [ + emqx_cm, + cowboy_req, + emqx_access_control, + emqx_broker, + emqx_hooks, + emqx_metrics + ] + ); end_per_testcase(_, Config) -> emqx_common_test_helpers:stop_apps([]), Config. @@ -120,36 +131,51 @@ end_per_suite(_) -> t_info(_) -> WsPid = spawn(fun() -> - receive {call, From, info} -> - gen_server:reply(From, ?ws_conn:info(st())) - end - end), + receive + {call, From, info} -> + gen_server:reply(From, ?ws_conn:info(st())) + end + end), #{sockinfo := SockInfo} = ?ws_conn:call(WsPid, info), - #{socktype := ws, - peername := {{127,0,0,1}, 3456}, - sockname := {{127,0,0,1}, 18083}, - sockstate := running - } = SockInfo. + #{ + socktype := ws, + peername := {{127, 0, 0, 1}, 3456}, + sockname := {{127, 0, 0, 1}, 18083}, + sockstate := running + } = SockInfo. set_ws_opts(Key, Val) -> emqx_config:put_listener_conf(ws, default, [websocket, Key], Val). t_header(_) -> - ok = meck:expect(cowboy_req, header, - fun(<<"x-forwarded-for">>, _, _) -> <<"100.100.100.100, 99.99.99.99">>; - (<<"x-forwarded-port">>, _, _) -> <<"1000">> end), + ok = meck:expect( + cowboy_req, + header, + fun + (<<"x-forwarded-for">>, _, _) -> <<"100.100.100.100, 99.99.99.99">>; + (<<"x-forwarded-port">>, _, _) -> <<"1000">> + end + ), set_ws_opts(proxy_address_header, <<"x-forwarded-for">>), set_ws_opts(proxy_port_header, <<"x-forwarded-port">>), - {ok, St, _} = ?ws_conn:websocket_init([req, #{zone => default, - limiter => limiter_cfg(), - listener => {ws, default}}]), + {ok, St, _} = ?ws_conn:websocket_init([ + req, + #{ + zone => default, + limiter => limiter_cfg(), + listener => {ws, default} + } + ]), WsPid = spawn(fun() -> - receive {call, From, info} -> - gen_server:reply(From, ?ws_conn:info(St)) - end end), + receive + {call, From, info} -> + gen_server:reply(From, ?ws_conn:info(St)) + end + end), #{sockinfo := SockInfo} = ?ws_conn:call(WsPid, info), - #{socktype := ws, - peername := {{100,100,100,100}, 1000}, + #{ + socktype := ws, + peername := {{100, 100, 100, 100}, 1000}, sockstate := running } = SockInfo. @@ -164,7 +190,7 @@ t_info_channel(_) -> t_info_gc_state(_) -> GcSt = emqx_gc:init(#{count => 10, bytes => 1000}), GcInfo = ?ws_conn:info(gc_state, st(#{gc_state => GcSt})), - ?assertEqual(#{cnt => {10,10}, oct => {1000,1000}}, GcInfo). + ?assertEqual(#{cnt => {10, 10}, oct => {1000, 1000}}, GcInfo). t_info_postponed(_) -> ?assertEqual([], ?ws_conn:info(postponed, st())), @@ -173,20 +199,34 @@ t_info_postponed(_) -> t_stats(_) -> WsPid = spawn(fun() -> - receive {call, From, stats} -> - gen_server:reply(From, ?ws_conn:stats(st())) - end - end), + receive + {call, From, stats} -> + gen_server:reply(From, ?ws_conn:stats(st())) + end + end), Stats = ?ws_conn:call(WsPid, stats), - [?assert(lists:member(V, Stats)) || V <- - [{recv_oct, 0}, {recv_cnt, 0}, {send_oct, 0}, {send_cnt, 0}, - {recv_pkt, 0}, {recv_msg, 0}, {send_pkt, 0}, {send_msg, 0}]]. + [ + ?assert(lists:member(V, Stats)) + || V <- + [ + {recv_oct, 0}, + {recv_cnt, 0}, + {send_oct, 0}, + {send_cnt, 0}, + {recv_pkt, 0}, + {recv_msg, 0}, + {send_pkt, 0}, + {send_msg, 0} + ] + ]. t_call(_) -> Info = ?ws_conn:info(st()), WsPid = spawn(fun() -> - receive {call, From, info} -> gen_server:reply(From, Info) end - end), + receive + {call, From, info} -> gen_server:reply(From, Info) + end + end), ?assertEqual(Info, ?ws_conn:call(WsPid, info)). t_ws_pingreq_before_connected(_) -> @@ -199,23 +239,24 @@ ws_pingreq(State) -> receive {gun_up, WPID, _Proto} -> StreamRef = gun:ws_upgrade(WPID, "/mqtt", [], #{ - protocols => [{<<"mqtt">>, gun_ws_h}]}), + protocols => [{<<"mqtt">>, gun_ws_h}] + }), ws_pingreq(State#{wref => StreamRef}); {gun_down, _WPID, _, Reason, _, _} -> State#{result => {gun_down, Reason}}; {gun_upgrade, WPID, _Ref, _Proto, _Data} -> ct:pal("-- gun_upgrade, send ping-req"), - PingReq = {binary, <<192,0>>}, + PingReq = {binary, <<192, 0>>}, ok = gun:ws_send(WPID, PingReq), gun:flush(WPID), ws_pingreq(State); - {gun_ws, _WPID, _Ref, {binary, <<208,0>>}} -> + {gun_ws, _WPID, _Ref, {binary, <<208, 0>>}} -> ct:fail(unexpected_pingresp); {gun_ws, _WPID, _Ref, Frame} -> ct:pal("gun received frame: ~p", [Frame]), ws_pingreq(State); Message -> - ct:pal("Received Unknown Message on Gun: ~p~n",[Message]), + ct:pal("Received Unknown Message on Gun: ~p~n", [Message]), ws_pingreq(State) after 1000 -> ct:fail(ws_timeout) @@ -223,43 +264,73 @@ ws_pingreq(State) -> t_ws_sub_protocols_mqtt(_) -> {ok, _} = application:ensure_all_started(gun), - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt">>]})). + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{protocols => [<<"mqtt">>]}) + ). t_ws_sub_protocols_mqtt_equivalents(_) -> {ok, _} = application:ensure_all_started(gun), %% also support mqtt-v3, mqtt-v3.1.1, mqtt-v5 - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt-v3">>]})), - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt-v3.1.1">>]})), - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt-v5">>]})), - ?assertMatch({gun_response, {_, 400, _}}, - start_ws_client(#{protocols => [<<"not-mqtt">>]})). + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{protocols => [<<"mqtt-v3">>]}) + ), + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{protocols => [<<"mqtt-v3.1.1">>]}) + ), + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{protocols => [<<"mqtt-v5">>]}) + ), + ?assertMatch( + {gun_response, {_, 400, _}}, + start_ws_client(#{protocols => [<<"not-mqtt">>]}) + ). t_ws_check_origin(_) -> emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], true), - emqx_config:put_listener_conf(ws, default, [websocket, check_origins], - [<<"http://localhost:18083">>]), + emqx_config:put_listener_conf( + ws, + default, + [websocket, check_origins], + [<<"http://localhost:18083">>] + ), {ok, _} = application:ensure_all_started(gun), - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt">>], - headers => [{<<"origin">>, <<"http://localhost:18083">>}]})), - ?assertMatch({gun_response, {_, 403, _}}, - start_ws_client(#{protocols => [<<"mqtt">>], - headers => [{<<"origin">>, <<"http://localhost:18080">>}]})). + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{ + protocols => [<<"mqtt">>], + headers => [{<<"origin">>, <<"http://localhost:18083">>}] + }) + ), + ?assertMatch( + {gun_response, {_, 403, _}}, + start_ws_client(#{ + protocols => [<<"mqtt">>], + headers => [{<<"origin">>, <<"http://localhost:18080">>}] + }) + ). t_ws_non_check_origin(_) -> emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], false), emqx_config:put_listener_conf(ws, default, [websocket, check_origins], []), {ok, _} = application:ensure_all_started(gun), - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt">>], - headers => [{<<"origin">>, <<"http://localhost:18083">>}]})), - ?assertMatch({gun_upgrade, _}, - start_ws_client(#{protocols => [<<"mqtt">>], - headers => [{<<"origin">>, <<"http://localhost:18080">>}]})). + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{ + protocols => [<<"mqtt">>], + headers => [{<<"origin">>, <<"http://localhost:18083">>}] + }) + ), + ?assertMatch( + {gun_upgrade, _}, + start_ws_client(#{ + protocols => [<<"mqtt">>], + headers => [{<<"origin">>, <<"http://localhost:18080">>}] + }) + ). t_init(_) -> Opts = #{listener => {ws, default}, zone => default, limiter => limiter_cfg()}, @@ -273,8 +344,9 @@ t_init(_) -> t_websocket_handle_binary(_) -> {ok, _} = websocket_handle({binary, <<>>}, st()), {ok, _} = websocket_handle({binary, [<<>>]}, st()), - {ok, _} = websocket_handle({binary, <<192,0>>}, st()), - receive {incoming, ?PACKET(?PINGREQ)} -> ok + {ok, _} = websocket_handle({binary, <<192, 0>>}, st()), + receive + {incoming, ?PACKET(?PINGREQ)} -> ok after 0 -> error(expect_incoming_pingreq) end. @@ -308,26 +380,26 @@ t_websocket_info_cast(_) -> t_websocket_info_incoming(_) -> ConnPkt = #mqtt_packet_connect{ - proto_name = <<"MQTT">>, - proto_ver = ?MQTT_PROTO_V5, - is_bridge = false, - clean_start = true, - keepalive = 60, - properties = undefined, - clientid = <<"clientid">>, - username = <<"username">>, - password = <<"passwd">> - }, - {[{close,protocol_error}], St1} = websocket_info({incoming, ?CONNECT_PACKET(ConnPkt)}, st()), + proto_name = <<"MQTT">>, + proto_ver = ?MQTT_PROTO_V5, + is_bridge = false, + clean_start = true, + keepalive = 60, + properties = undefined, + clientid = <<"clientid">>, + username = <<"username">>, + password = <<"passwd">> + }, + {[{close, protocol_error}], St1} = websocket_info({incoming, ?CONNECT_PACKET(ConnPkt)}, st()), % ?assertEqual(<<224,2,130,0>>, iolist_to_binary(IoData1)), %% PINGREQ {[{binary, IoData2}], St2} = websocket_info({incoming, ?PACKET(?PINGREQ)}, St1), - ?assertEqual(<<208,0>>, iolist_to_binary(IoData2)), + ?assertEqual(<<208, 0>>, iolist_to_binary(IoData2)), %% PUBLISH Publish = ?PUBLISH_PACKET(?QOS_1, <<"t">>, 1, <<"payload">>), {[{binary, IoData3}], _St3} = websocket_info({incoming, Publish}, St2), - ?assertEqual(<<64,4,0,1,0,0>>, iolist_to_binary(IoData3)). + ?assertEqual(<<64, 4, 0, 1, 0, 0>>, iolist_to_binary(IoData3)). t_websocket_info_check_gc(_) -> Stats = #{cnt => 10, oct => 1000}, @@ -338,7 +410,7 @@ t_websocket_info_deliver(_) -> Msg1 = emqx_message:make(clientid, ?QOS_1, <<"t">>, <<"">>), self() ! {deliver, <<"#">>, Msg1}, {ok, _St} = websocket_info({deliver, <<"#">>, Msg0}, st()). - % ?assertEqual(<<48,3,0,1,116,50,5,0,1,116,0,1>>, iolist_to_binary(IoData)). +% ?assertEqual(<<48,3,0,1,116,50,5,0,1,116,0,1>>, iolist_to_binary(IoData)). t_websocket_info_timeout_limiter(_) -> Ref = make_ref(), @@ -377,14 +449,14 @@ t_handle_info_connack(_) -> ConnAck = ?CONNACK_PACKET(?RC_SUCCESS), {[{binary, IoData}], _St} = ?ws_conn:handle_info({connack, ConnAck}, st()), - ?assertEqual(<<32,2,0,0>>, iolist_to_binary(IoData)). + ?assertEqual(<<32, 2, 0, 0>>, iolist_to_binary(IoData)). t_handle_info_close(_) -> {[{close, _}], _St} = ?ws_conn:handle_info({close, protocol_error}, st()). t_handle_info_event(_) -> - ok = meck:expect(emqx_cm, register_channel, fun(_,_,_) -> ok end), - ok = meck:expect(emqx_cm, insert_channel_info, fun(_,_,_) -> ok end), + ok = meck:expect(emqx_cm, register_channel, fun(_, _, _) -> ok end), + ok = meck:expect(emqx_cm, insert_channel_info, fun(_, _, _) -> ok end), ok = meck:expect(emqx_cm, connection_closed, fun(_) -> true end), {ok, _} = ?ws_conn:handle_info({event, connected}, st()), {ok, _} = ?ws_conn:handle_info({event, disconnected}, st()), @@ -401,14 +473,15 @@ t_handle_timeout_keepalive(_) -> t_handle_timeout_emit_stats(_) -> TRef = make_ref(), {ok, St} = ?ws_conn:handle_timeout( - TRef, emit_stats, st(#{stats_timer => TRef})), + TRef, emit_stats, st(#{stats_timer => TRef}) + ), ?assertEqual(undefined, ?ws_conn:info(stats_timer, St)). t_ensure_rate_limit(_) -> %% XXX In the future, limiter should provide API for config update Path = [limiter, bytes_in, bucket, default, per_client], PerClient = emqx_config:get(Path), - {ok, Rate}= emqx_limiter_schema:to_rate("50MB"), + {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), emqx_config:put(Path, PerClient#{rate := Rate}), emqx_limiter_server:update_config(bytes_in), timer:sleep(100), @@ -418,11 +491,13 @@ t_ensure_rate_limit(_) -> %% must bigger than value in emqx_ratelimit_SUITE {ok, Need} = emqx_limiter_schema:to_capacity("1GB"), - St1 = ?ws_conn:check_limiter([{Need, bytes_in}], - [], - fun(_, _, S) -> S end, - [], - St), + St1 = ?ws_conn:check_limiter( + [{Need, bytes_in}], + [], + fun(_, _, S) -> S end, + [], + St + ), ?assertEqual(blocked, ?ws_conn:info(sockstate, St1)), ?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)), @@ -431,13 +506,13 @@ t_ensure_rate_limit(_) -> timer:sleep(100). t_parse_incoming(_) -> - {Packets, St} = ?ws_conn:parse_incoming(<<48,3>>, [], st()), - {Packets1, _} = ?ws_conn:parse_incoming(<<0,1,116>>, Packets, St), + {Packets, St} = ?ws_conn:parse_incoming(<<48, 3>>, [], st()), + {Packets1, _} = ?ws_conn:parse_incoming(<<0, 1, 116>>, Packets, St), Packet = ?PUBLISH_PACKET(?QOS_0, <<"t">>, undefined, <<>>), ?assertMatch([{incoming, Packet}], Packets1). t_parse_incoming_frame_error(_) -> - {Packets, _St} = ?ws_conn:parse_incoming(<<3,2,1,0>>, [], st()), + {Packets, _St} = ?ws_conn:parse_incoming(<<3, 2, 1, 0>>, [], st()), FrameError = {frame_error, function_clause}, [{incoming, FrameError}] = Packets. @@ -445,12 +520,13 @@ t_handle_incomming_frame_error(_) -> FrameError = {frame_error, bad_qos}, Serialize = emqx_frame:serialize_fun(#{version => 5, max_size => 16#FFFF}), {[{close, bad_qos}], _St} = ?ws_conn:handle_incoming(FrameError, st(#{serialize => Serialize})). - % ?assertEqual(<<224,2,129,0>>, iolist_to_binary(IoData)). +% ?assertEqual(<<224,2,129,0>>, iolist_to_binary(IoData)). t_handle_outgoing(_) -> - Packets = [?PUBLISH_PACKET(?QOS_1, <<"t1">>, 1, <<"payload">>), - ?PUBLISH_PACKET(?QOS_2, <<"t2">>, 2, <<"payload">>) - ], + Packets = [ + ?PUBLISH_PACKET(?QOS_1, <<"t1">>, 1, <<"payload">>), + ?PUBLISH_PACKET(?QOS_2, <<"t2">>, 2, <<"payload">>) + ], {[{binary, IoData1}, {binary, IoData2}], _St} = ?ws_conn:handle_outgoing(Packets, st()), ?assert(is_binary(iolist_to_binary(IoData1))), ?assert(is_binary(iolist_to_binary(IoData2))). @@ -474,49 +550,65 @@ t_shutdown(_) -> st() -> st(#{}). st(InitFields) when is_map(InitFields) -> - {ok, St, _} = ?ws_conn:websocket_init([req, #{zone => default, - listener => {ws, default}, - limiter => limiter_cfg()}]), - maps:fold(fun(N, V, S) -> ?ws_conn:set_field(N, V, S) end, - ?ws_conn:set_field(channel, channel(), St), - InitFields - ). + {ok, St, _} = ?ws_conn:websocket_init([ + req, + #{ + zone => default, + listener => {ws, default}, + limiter => limiter_cfg() + } + ]), + maps:fold( + fun(N, V, S) -> ?ws_conn:set_field(N, V, S) end, + ?ws_conn:set_field(channel, channel(), St), + InitFields + ). channel() -> channel(#{}). channel(InitFields) -> - ConnInfo = #{peername => {{127,0,0,1}, 3456}, - sockname => {{127,0,0,1}, 18083}, - conn_mod => emqx_ws_connection, - proto_name => <<"MQTT">>, - proto_ver => ?MQTT_PROTO_V5, - clean_start => true, - keepalive => 30, - clientid => <<"clientid">>, - username => <<"username">>, - receive_maximum => 100, - expiry_interval => 0 - }, - ClientInfo = #{zone => default, - listener => {ws, default}, - protocol => mqtt, - peerhost => {127,0,0,1}, - clientid => <<"clientid">>, - username => <<"username">>, - is_superuser => false, - peercert => undefined, - mountpoint => undefined - }, + ConnInfo = #{ + peername => {{127, 0, 0, 1}, 3456}, + sockname => {{127, 0, 0, 1}, 18083}, + conn_mod => emqx_ws_connection, + proto_name => <<"MQTT">>, + proto_ver => ?MQTT_PROTO_V5, + clean_start => true, + keepalive => 30, + clientid => <<"clientid">>, + username => <<"username">>, + receive_maximum => 100, + expiry_interval => 0 + }, + ClientInfo = #{ + zone => default, + listener => {ws, default}, + protocol => mqtt, + peerhost => {127, 0, 0, 1}, + clientid => <<"clientid">>, + username => <<"username">>, + is_superuser => false, + peercert => undefined, + mountpoint => undefined + }, Session = emqx_session:init(#{max_inflight => 0}), - maps:fold(fun(Field, Value, Channel) -> - emqx_channel:set_field(Field, Value, Channel) - end, - emqx_channel:init(ConnInfo, #{zone => default, - listener => {ws, default}, - limiter => limiter_cfg()}), - maps:merge(#{clientinfo => ClientInfo, - session => Session, - conn_state => connected - }, InitFields)). + maps:fold( + fun(Field, Value, Channel) -> + emqx_channel:set_field(Field, Value, Channel) + end, + emqx_channel:init(ConnInfo, #{ + zone => default, + listener => {ws, default}, + limiter => limiter_cfg() + }), + maps:merge( + #{ + clientinfo => ClientInfo, + session => Session, + conn_state => connected + }, + InitFields + ) + ). start_ws_client(State) -> Host = maps:get(host, State, "127.0.0.1"), @@ -530,8 +622,12 @@ ws_client(State) -> receive {gun_up, WPID, _Proto} -> #{protocols := Protos} = State, - StreamRef = gun:ws_upgrade(WPID, "/mqtt", maps:get(headers, State, []), - #{protocols => [{P, gun_ws_h} || P <- Protos]}), + StreamRef = gun:ws_upgrade( + WPID, + "/mqtt", + maps:get(headers, State, []), + #{protocols => [{P, gun_ws_h} || P <- Protos]} + ), ws_client(State#{wref => StreamRef}); {gun_down, _WPID, _, Reason, _, _} -> State#{result => {gun_down, Reason}}; @@ -544,20 +640,20 @@ ws_client(State) -> State#{result => {gun_response, Rsp}}; {gun_error, _WPID, _Ref, _Reason} -> State#{result => {gun_error, _Reason}}; - {'DOWN',_PID,process,_WPID,_Reason} -> + {'DOWN', _PID, process, _WPID, _Reason} -> State#{result => {down, _Reason}}; {gun_ws, _WPID, Frame} -> case Frame of - close -> - self() ! stop; - {close,_Code,_Message} -> - self() ! stop; - {text,TextData} -> - io:format("Received Text Frame: ~p~n",[TextData]); - {binary,BinData} -> - io:format("Received Binary Frame: ~p~n",[BinData]); - _ -> - io:format("Received Unhandled Frame: ~p~n",[Frame]) + close -> + self() ! stop; + {close, _Code, _Message} -> + self() ! stop; + {text, TextData} -> + io:format("Received Text Frame: ~p~n", [TextData]); + {binary, BinData} -> + io:format("Received Binary Frame: ~p~n", [BinData]); + _ -> + io:format("Received Unhandled Frame: ~p~n", [Frame]) end, ws_client(State); stop -> @@ -566,7 +662,7 @@ ws_client(State) -> gun:shutdown(WPID), State#{result => {stop, normal}}; Message -> - ct:pal("Received Unknown Message on Gun: ~p~n",[Message]), + ct:pal("Received Unknown Message on Gun: ~p~n", [Message]), ws_client(State) after 1000 -> ct:fail(ws_timeout)