chore: format code apps/emqx
This commit is contained in:
parent
80b4313f00
commit
91bcf02970
|
|
@ -25,11 +25,12 @@
|
|||
-define(ROUTE_SHARD, route_shard).
|
||||
-define(PERSISTENT_SESSION_SHARD, emqx_persistent_session_shard).
|
||||
|
||||
-define(BOOT_SHARDS, [ ?ROUTE_SHARD
|
||||
, ?COMMON_SHARD
|
||||
, ?SHARED_SUB_SHARD
|
||||
, ?PERSISTENT_SESSION_SHARD
|
||||
]).
|
||||
-define(BOOT_SHARDS, [
|
||||
?ROUTE_SHARD,
|
||||
?COMMON_SHARD,
|
||||
?SHARED_SUB_SHARD,
|
||||
?PERSISTENT_SESSION_SHARD
|
||||
]).
|
||||
|
||||
%% Banner
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -63,96 +64,99 @@
|
|||
|
||||
%% See 'Application Message' in MQTT Version 5.0
|
||||
-record(message, {
|
||||
%% Global unique message ID
|
||||
id :: binary(),
|
||||
%% Message QoS
|
||||
qos = 0,
|
||||
%% Message from
|
||||
from :: atom() | binary(),
|
||||
%% Message flags
|
||||
flags = #{} :: emqx_types:flags(),
|
||||
%% Message headers. May contain any metadata. e.g. the
|
||||
%% protocol version number, username, peerhost or
|
||||
%% the PUBLISH properties (MQTT 5.0).
|
||||
headers = #{} :: emqx_types:headers(),
|
||||
%% Topic that the message is published to
|
||||
topic :: emqx_types:topic(),
|
||||
%% Message Payload
|
||||
payload :: emqx_types:payload(),
|
||||
%% Timestamp (Unit: millisecond)
|
||||
timestamp :: integer(),
|
||||
%% not used so far, for future extension
|
||||
extra = [] :: term()
|
||||
}).
|
||||
%% Global unique message ID
|
||||
id :: binary(),
|
||||
%% Message QoS
|
||||
qos = 0,
|
||||
%% Message from
|
||||
from :: atom() | binary(),
|
||||
%% Message flags
|
||||
flags = #{} :: emqx_types:flags(),
|
||||
%% Message headers. May contain any metadata. e.g. the
|
||||
%% protocol version number, username, peerhost or
|
||||
%% the PUBLISH properties (MQTT 5.0).
|
||||
headers = #{} :: emqx_types:headers(),
|
||||
%% Topic that the message is published to
|
||||
topic :: emqx_types:topic(),
|
||||
%% Message Payload
|
||||
payload :: emqx_types:payload(),
|
||||
%% Timestamp (Unit: millisecond)
|
||||
timestamp :: integer(),
|
||||
%% not used so far, for future extension
|
||||
extra = [] :: term()
|
||||
}).
|
||||
|
||||
-record(delivery, {
|
||||
sender :: pid(), %% Sender of the delivery
|
||||
message :: #message{} %% The message delivered
|
||||
}).
|
||||
%% Sender of the delivery
|
||||
sender :: pid(),
|
||||
%% The message delivered
|
||||
message :: #message{}
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Route
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(route, {
|
||||
topic :: binary(),
|
||||
dest :: node() | {binary(), node()} | emqx_session:sessionID()
|
||||
}).
|
||||
topic :: binary(),
|
||||
dest :: node() | {binary(), node()} | emqx_session:sessionID()
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Plugin
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(plugin, {
|
||||
name :: atom(),
|
||||
dir :: string() | undefined,
|
||||
descr :: string(),
|
||||
vendor :: string() | undefined,
|
||||
active = false :: boolean(),
|
||||
info = #{} :: map()
|
||||
}).
|
||||
name :: atom(),
|
||||
dir :: string() | undefined,
|
||||
descr :: string(),
|
||||
vendor :: string() | undefined,
|
||||
active = false :: boolean(),
|
||||
info = #{} :: map()
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Command
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(command, {
|
||||
name :: atom(),
|
||||
action :: atom(),
|
||||
args = [] :: list(),
|
||||
opts = [] :: list(),
|
||||
usage :: string(),
|
||||
descr :: string()
|
||||
}).
|
||||
name :: atom(),
|
||||
action :: atom(),
|
||||
args = [] :: list(),
|
||||
opts = [] :: list(),
|
||||
usage :: string(),
|
||||
descr :: string()
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Banned
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(banned, {
|
||||
who :: {clientid, binary()}
|
||||
| {peerhost, inet:ip_address()}
|
||||
| {username, binary()},
|
||||
by :: binary(),
|
||||
reason :: binary(),
|
||||
at :: integer(),
|
||||
until :: integer()
|
||||
}).
|
||||
who ::
|
||||
{clientid, binary()}
|
||||
| {peerhost, inet:ip_address()}
|
||||
| {username, binary()},
|
||||
by :: binary(),
|
||||
reason :: binary(),
|
||||
at :: integer(),
|
||||
until :: integer()
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Authentication
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(authenticator,
|
||||
{ id :: binary()
|
||||
, provider :: module()
|
||||
, enable :: boolean()
|
||||
, state :: map()
|
||||
}).
|
||||
-record(authenticator, {
|
||||
id :: binary(),
|
||||
provider :: module(),
|
||||
enable :: boolean(),
|
||||
state :: map()
|
||||
}).
|
||||
|
||||
-record(chain,
|
||||
{ name :: atom()
|
||||
, authenticators :: [#authenticator{}]
|
||||
}).
|
||||
-record(chain, {
|
||||
name :: atom(),
|
||||
authenticators :: [#authenticator{}]
|
||||
}).
|
||||
|
||||
-endif.
|
||||
|
|
|
|||
|
|
@ -23,8 +23,13 @@
|
|||
%% MQTT SockOpts
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(MQTT_SOCKOPTS, [binary, {packet, raw}, {reuseaddr, true},
|
||||
{backlog, 512}, {nodelay, true}]).
|
||||
-define(MQTT_SOCKOPTS, [
|
||||
binary,
|
||||
{packet, raw},
|
||||
{reuseaddr, true},
|
||||
{backlog, 512},
|
||||
{nodelay, true}
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT Protocol Version and Names
|
||||
|
|
@ -36,40 +41,45 @@
|
|||
-define(MQTT_PROTO_V5, 5).
|
||||
|
||||
-define(PROTOCOL_NAMES, [
|
||||
{?MQTT_SN_PROTO_V1, <<"MQTT-SN">>}, %% XXX:Compatible with emqx-sn plug-in
|
||||
%% XXX:Compatible with emqx-sn plug-in
|
||||
{?MQTT_SN_PROTO_V1, <<"MQTT-SN">>},
|
||||
{?MQTT_PROTO_V3, <<"MQIsdp">>},
|
||||
{?MQTT_PROTO_V4, <<"MQTT">>},
|
||||
{?MQTT_PROTO_V5, <<"MQTT">>}]).
|
||||
{?MQTT_PROTO_V5, <<"MQTT">>}
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT QoS Levels
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(QOS_0, 0). %% At most once
|
||||
-define(QOS_1, 1). %% At least once
|
||||
-define(QOS_2, 2). %% Exactly once
|
||||
%% At most once
|
||||
-define(QOS_0, 0).
|
||||
%% At least once
|
||||
-define(QOS_1, 1).
|
||||
%% Exactly once
|
||||
-define(QOS_2, 2).
|
||||
|
||||
-define(IS_QOS(I), (I >= ?QOS_0 andalso I =< ?QOS_2)).
|
||||
|
||||
-define(QOS_I(Name),
|
||||
begin
|
||||
(case Name of
|
||||
?QOS_0 -> ?QOS_0;
|
||||
qos0 -> ?QOS_0;
|
||||
at_most_once -> ?QOS_0;
|
||||
?QOS_1 -> ?QOS_1;
|
||||
qos1 -> ?QOS_1;
|
||||
at_least_once -> ?QOS_1;
|
||||
?QOS_2 -> ?QOS_2;
|
||||
qos2 -> ?QOS_2;
|
||||
exactly_once -> ?QOS_2
|
||||
end)
|
||||
end).
|
||||
-define(QOS_I(Name), begin
|
||||
(case Name of
|
||||
?QOS_0 -> ?QOS_0;
|
||||
qos0 -> ?QOS_0;
|
||||
at_most_once -> ?QOS_0;
|
||||
?QOS_1 -> ?QOS_1;
|
||||
qos1 -> ?QOS_1;
|
||||
at_least_once -> ?QOS_1;
|
||||
?QOS_2 -> ?QOS_2;
|
||||
qos2 -> ?QOS_2;
|
||||
exactly_once -> ?QOS_2
|
||||
end)
|
||||
end).
|
||||
|
||||
-define(IS_QOS_NAME(I),
|
||||
(I =:= qos0 orelse I =:= at_most_once orelse
|
||||
I =:= qos1 orelse I =:= at_least_once orelse
|
||||
I =:= qos2 orelse I =:= exactly_once)).
|
||||
(I =:= qos0 orelse I =:= at_most_once orelse
|
||||
I =:= qos1 orelse I =:= at_least_once orelse
|
||||
I =:= qos2 orelse I =:= exactly_once)
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Maximum ClientId Length.
|
||||
|
|
@ -81,83 +91,105 @@
|
|||
%% MQTT Control Packet Types
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(RESERVED, 0). %% Reserved
|
||||
-define(CONNECT, 1). %% Client request to connect to Server
|
||||
-define(CONNACK, 2). %% Server to Client: Connect acknowledgment
|
||||
-define(PUBLISH, 3). %% Publish message
|
||||
-define(PUBACK, 4). %% Publish acknowledgment
|
||||
-define(PUBREC, 5). %% Publish received (assured delivery part 1)
|
||||
-define(PUBREL, 6). %% Publish release (assured delivery part 2)
|
||||
-define(PUBCOMP, 7). %% Publish complete (assured delivery part 3)
|
||||
-define(SUBSCRIBE, 8). %% Client subscribe request
|
||||
-define(SUBACK, 9). %% Server Subscribe acknowledgment
|
||||
-define(UNSUBSCRIBE, 10). %% Unsubscribe request
|
||||
-define(UNSUBACK, 11). %% Unsubscribe acknowledgment
|
||||
-define(PINGREQ, 12). %% PING request
|
||||
-define(PINGRESP, 13). %% PING response
|
||||
-define(DISCONNECT, 14). %% Client or Server is disconnecting
|
||||
-define(AUTH, 15). %% Authentication exchange
|
||||
%% Reserved
|
||||
-define(RESERVED, 0).
|
||||
%% Client request to connect to Server
|
||||
-define(CONNECT, 1).
|
||||
%% Server to Client: Connect acknowledgment
|
||||
-define(CONNACK, 2).
|
||||
%% Publish message
|
||||
-define(PUBLISH, 3).
|
||||
%% Publish acknowledgment
|
||||
-define(PUBACK, 4).
|
||||
%% Publish received (assured delivery part 1)
|
||||
-define(PUBREC, 5).
|
||||
%% Publish release (assured delivery part 2)
|
||||
-define(PUBREL, 6).
|
||||
%% Publish complete (assured delivery part 3)
|
||||
-define(PUBCOMP, 7).
|
||||
%% Client subscribe request
|
||||
-define(SUBSCRIBE, 8).
|
||||
%% Server Subscribe acknowledgment
|
||||
-define(SUBACK, 9).
|
||||
%% Unsubscribe request
|
||||
-define(UNSUBSCRIBE, 10).
|
||||
%% Unsubscribe acknowledgment
|
||||
-define(UNSUBACK, 11).
|
||||
%% PING request
|
||||
-define(PINGREQ, 12).
|
||||
%% PING response
|
||||
-define(PINGRESP, 13).
|
||||
%% Client or Server is disconnecting
|
||||
-define(DISCONNECT, 14).
|
||||
%% Authentication exchange
|
||||
-define(AUTH, 15).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT V3.1.1 Connect Return Codes
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(CONNACK_ACCEPT, 0). %% Connection accepted
|
||||
-define(CONNACK_PROTO_VER, 1). %% Unacceptable protocol version
|
||||
-define(CONNACK_INVALID_ID, 2). %% Client Identifier is correct UTF-8 but not allowed by the Server
|
||||
-define(CONNACK_SERVER, 3). %% Server unavailable
|
||||
-define(CONNACK_CREDENTIALS, 4). %% Username or password is malformed
|
||||
-define(CONNACK_AUTH, 5). %% Client is not authorized to connect
|
||||
%% Connection accepted
|
||||
-define(CONNACK_ACCEPT, 0).
|
||||
%% Unacceptable protocol version
|
||||
-define(CONNACK_PROTO_VER, 1).
|
||||
%% Client Identifier is correct UTF-8 but not allowed by the Server
|
||||
-define(CONNACK_INVALID_ID, 2).
|
||||
%% Server unavailable
|
||||
-define(CONNACK_SERVER, 3).
|
||||
%% Username or password is malformed
|
||||
-define(CONNACK_CREDENTIALS, 4).
|
||||
%% Client is not authorized to connect
|
||||
-define(CONNACK_AUTH, 5).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT V5.0 Reason Codes
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(RC_SUCCESS, 16#00).
|
||||
-define(RC_NORMAL_DISCONNECTION, 16#00).
|
||||
-define(RC_GRANTED_QOS_0, 16#00).
|
||||
-define(RC_GRANTED_QOS_1, 16#01).
|
||||
-define(RC_GRANTED_QOS_2, 16#02).
|
||||
-define(RC_DISCONNECT_WITH_WILL_MESSAGE, 16#04).
|
||||
-define(RC_NO_MATCHING_SUBSCRIBERS, 16#10).
|
||||
-define(RC_NO_SUBSCRIPTION_EXISTED, 16#11).
|
||||
-define(RC_CONTINUE_AUTHENTICATION, 16#18).
|
||||
-define(RC_RE_AUTHENTICATE, 16#19).
|
||||
-define(RC_UNSPECIFIED_ERROR, 16#80).
|
||||
-define(RC_MALFORMED_PACKET, 16#81).
|
||||
-define(RC_PROTOCOL_ERROR, 16#82).
|
||||
-define(RC_IMPLEMENTATION_SPECIFIC_ERROR, 16#83).
|
||||
-define(RC_UNSUPPORTED_PROTOCOL_VERSION, 16#84).
|
||||
-define(RC_CLIENT_IDENTIFIER_NOT_VALID, 16#85).
|
||||
-define(RC_BAD_USER_NAME_OR_PASSWORD, 16#86).
|
||||
-define(RC_NOT_AUTHORIZED, 16#87).
|
||||
-define(RC_SERVER_UNAVAILABLE, 16#88).
|
||||
-define(RC_SERVER_BUSY, 16#89).
|
||||
-define(RC_BANNED, 16#8A).
|
||||
-define(RC_SERVER_SHUTTING_DOWN, 16#8B).
|
||||
-define(RC_BAD_AUTHENTICATION_METHOD, 16#8C).
|
||||
-define(RC_KEEP_ALIVE_TIMEOUT, 16#8D).
|
||||
-define(RC_SESSION_TAKEN_OVER, 16#8E).
|
||||
-define(RC_TOPIC_FILTER_INVALID, 16#8F).
|
||||
-define(RC_TOPIC_NAME_INVALID, 16#90).
|
||||
-define(RC_PACKET_IDENTIFIER_IN_USE, 16#91).
|
||||
-define(RC_PACKET_IDENTIFIER_NOT_FOUND, 16#92).
|
||||
-define(RC_RECEIVE_MAXIMUM_EXCEEDED, 16#93).
|
||||
-define(RC_TOPIC_ALIAS_INVALID, 16#94).
|
||||
-define(RC_PACKET_TOO_LARGE, 16#95).
|
||||
-define(RC_MESSAGE_RATE_TOO_HIGH, 16#96).
|
||||
-define(RC_QUOTA_EXCEEDED, 16#97).
|
||||
-define(RC_ADMINISTRATIVE_ACTION, 16#98).
|
||||
-define(RC_PAYLOAD_FORMAT_INVALID, 16#99).
|
||||
-define(RC_RETAIN_NOT_SUPPORTED, 16#9A).
|
||||
-define(RC_QOS_NOT_SUPPORTED, 16#9B).
|
||||
-define(RC_USE_ANOTHER_SERVER, 16#9C).
|
||||
-define(RC_SERVER_MOVED, 16#9D).
|
||||
-define(RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED, 16#9E).
|
||||
-define(RC_CONNECTION_RATE_EXCEEDED, 16#9F).
|
||||
-define(RC_MAXIMUM_CONNECT_TIME, 16#A0).
|
||||
-define(RC_SUCCESS, 16#00).
|
||||
-define(RC_NORMAL_DISCONNECTION, 16#00).
|
||||
-define(RC_GRANTED_QOS_0, 16#00).
|
||||
-define(RC_GRANTED_QOS_1, 16#01).
|
||||
-define(RC_GRANTED_QOS_2, 16#02).
|
||||
-define(RC_DISCONNECT_WITH_WILL_MESSAGE, 16#04).
|
||||
-define(RC_NO_MATCHING_SUBSCRIBERS, 16#10).
|
||||
-define(RC_NO_SUBSCRIPTION_EXISTED, 16#11).
|
||||
-define(RC_CONTINUE_AUTHENTICATION, 16#18).
|
||||
-define(RC_RE_AUTHENTICATE, 16#19).
|
||||
-define(RC_UNSPECIFIED_ERROR, 16#80).
|
||||
-define(RC_MALFORMED_PACKET, 16#81).
|
||||
-define(RC_PROTOCOL_ERROR, 16#82).
|
||||
-define(RC_IMPLEMENTATION_SPECIFIC_ERROR, 16#83).
|
||||
-define(RC_UNSUPPORTED_PROTOCOL_VERSION, 16#84).
|
||||
-define(RC_CLIENT_IDENTIFIER_NOT_VALID, 16#85).
|
||||
-define(RC_BAD_USER_NAME_OR_PASSWORD, 16#86).
|
||||
-define(RC_NOT_AUTHORIZED, 16#87).
|
||||
-define(RC_SERVER_UNAVAILABLE, 16#88).
|
||||
-define(RC_SERVER_BUSY, 16#89).
|
||||
-define(RC_BANNED, 16#8A).
|
||||
-define(RC_SERVER_SHUTTING_DOWN, 16#8B).
|
||||
-define(RC_BAD_AUTHENTICATION_METHOD, 16#8C).
|
||||
-define(RC_KEEP_ALIVE_TIMEOUT, 16#8D).
|
||||
-define(RC_SESSION_TAKEN_OVER, 16#8E).
|
||||
-define(RC_TOPIC_FILTER_INVALID, 16#8F).
|
||||
-define(RC_TOPIC_NAME_INVALID, 16#90).
|
||||
-define(RC_PACKET_IDENTIFIER_IN_USE, 16#91).
|
||||
-define(RC_PACKET_IDENTIFIER_NOT_FOUND, 16#92).
|
||||
-define(RC_RECEIVE_MAXIMUM_EXCEEDED, 16#93).
|
||||
-define(RC_TOPIC_ALIAS_INVALID, 16#94).
|
||||
-define(RC_PACKET_TOO_LARGE, 16#95).
|
||||
-define(RC_MESSAGE_RATE_TOO_HIGH, 16#96).
|
||||
-define(RC_QUOTA_EXCEEDED, 16#97).
|
||||
-define(RC_ADMINISTRATIVE_ACTION, 16#98).
|
||||
-define(RC_PAYLOAD_FORMAT_INVALID, 16#99).
|
||||
-define(RC_RETAIN_NOT_SUPPORTED, 16#9A).
|
||||
-define(RC_QOS_NOT_SUPPORTED, 16#9B).
|
||||
-define(RC_USE_ANOTHER_SERVER, 16#9C).
|
||||
-define(RC_SERVER_MOVED, 16#9D).
|
||||
-define(RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED, 16#9E).
|
||||
-define(RC_CONNECTION_RATE_EXCEEDED, 16#9F).
|
||||
-define(RC_MAXIMUM_CONNECT_TIME, 16#A0).
|
||||
-define(RC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED, 16#A1).
|
||||
-define(RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED, 16#A2).
|
||||
-define(RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED, 16#A2).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Maximum MQTT Packet ID and Length
|
||||
|
|
@ -180,367 +212,455 @@
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(mqtt_packet_header, {
|
||||
type = ?RESERVED,
|
||||
dup = false,
|
||||
qos = ?QOS_0,
|
||||
retain = false
|
||||
}).
|
||||
type = ?RESERVED,
|
||||
dup = false,
|
||||
qos = ?QOS_0,
|
||||
retain = false
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT Packets
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(DEFAULT_SUBOPTS, #{rh => 0, %% Retain Handling
|
||||
rap => 0, %% Retain as Publish
|
||||
nl => 0, %% No Local
|
||||
qos => 0 %% QoS
|
||||
}).
|
||||
%% Retain Handling
|
||||
-define(DEFAULT_SUBOPTS, #{
|
||||
rh => 0,
|
||||
%% Retain as Publish
|
||||
rap => 0,
|
||||
%% No Local
|
||||
nl => 0,
|
||||
%% QoS
|
||||
qos => 0
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_connect, {
|
||||
proto_name = <<"MQTT">>,
|
||||
proto_ver = ?MQTT_PROTO_V4,
|
||||
is_bridge = false,
|
||||
clean_start = true,
|
||||
will_flag = false,
|
||||
will_qos = ?QOS_0,
|
||||
will_retain = false,
|
||||
keepalive = 0,
|
||||
properties = #{},
|
||||
clientid = <<>>,
|
||||
will_props = #{},
|
||||
will_topic = undefined,
|
||||
will_payload = undefined,
|
||||
username = undefined,
|
||||
password = undefined
|
||||
}).
|
||||
proto_name = <<"MQTT">>,
|
||||
proto_ver = ?MQTT_PROTO_V4,
|
||||
is_bridge = false,
|
||||
clean_start = true,
|
||||
will_flag = false,
|
||||
will_qos = ?QOS_0,
|
||||
will_retain = false,
|
||||
keepalive = 0,
|
||||
properties = #{},
|
||||
clientid = <<>>,
|
||||
will_props = #{},
|
||||
will_topic = undefined,
|
||||
will_payload = undefined,
|
||||
username = undefined,
|
||||
password = undefined
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_connack, {
|
||||
ack_flags,
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
ack_flags,
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_publish, {
|
||||
topic_name,
|
||||
packet_id,
|
||||
properties = #{}
|
||||
}).
|
||||
topic_name,
|
||||
packet_id,
|
||||
properties = #{}
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_puback, {
|
||||
packet_id,
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
packet_id,
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_subscribe, {
|
||||
packet_id,
|
||||
properties = #{},
|
||||
topic_filters
|
||||
}).
|
||||
packet_id,
|
||||
properties = #{},
|
||||
topic_filters
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_suback, {
|
||||
packet_id,
|
||||
properties = #{},
|
||||
reason_codes
|
||||
}).
|
||||
packet_id,
|
||||
properties = #{},
|
||||
reason_codes
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_unsubscribe, {
|
||||
packet_id,
|
||||
properties = #{},
|
||||
topic_filters
|
||||
}).
|
||||
packet_id,
|
||||
properties = #{},
|
||||
topic_filters
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_unsuback, {
|
||||
packet_id,
|
||||
properties = #{},
|
||||
reason_codes
|
||||
}).
|
||||
packet_id,
|
||||
properties = #{},
|
||||
reason_codes
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_disconnect, {
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
|
||||
-record(mqtt_packet_auth, {
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
reason_code,
|
||||
properties = #{}
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT Control Packet
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(mqtt_packet, {
|
||||
header :: #mqtt_packet_header{},
|
||||
variable :: #mqtt_packet_connect{}
|
||||
| #mqtt_packet_connack{}
|
||||
| #mqtt_packet_publish{}
|
||||
| #mqtt_packet_puback{}
|
||||
| #mqtt_packet_subscribe{}
|
||||
| #mqtt_packet_suback{}
|
||||
| #mqtt_packet_unsubscribe{}
|
||||
| #mqtt_packet_unsuback{}
|
||||
| #mqtt_packet_disconnect{}
|
||||
| #mqtt_packet_auth{}
|
||||
| pos_integer()
|
||||
| undefined,
|
||||
payload :: binary() | undefined
|
||||
}).
|
||||
header :: #mqtt_packet_header{},
|
||||
variable ::
|
||||
#mqtt_packet_connect{}
|
||||
| #mqtt_packet_connack{}
|
||||
| #mqtt_packet_publish{}
|
||||
| #mqtt_packet_puback{}
|
||||
| #mqtt_packet_subscribe{}
|
||||
| #mqtt_packet_suback{}
|
||||
| #mqtt_packet_unsubscribe{}
|
||||
| #mqtt_packet_unsuback{}
|
||||
| #mqtt_packet_disconnect{}
|
||||
| #mqtt_packet_auth{}
|
||||
| pos_integer()
|
||||
| undefined,
|
||||
payload :: binary() | undefined
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT Message Internal
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(mqtt_msg, {
|
||||
qos = ?QOS_0,
|
||||
retain = false,
|
||||
dup = false,
|
||||
packet_id,
|
||||
topic,
|
||||
props,
|
||||
payload
|
||||
}).
|
||||
qos = ?QOS_0,
|
||||
retain = false,
|
||||
dup = false,
|
||||
packet_id,
|
||||
topic,
|
||||
props,
|
||||
payload
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT Packet Match
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(CONNECT_PACKET(),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}}).
|
||||
-define(CONNECT_PACKET(), #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}}).
|
||||
|
||||
-define(CONNECT_PACKET(Var),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT},
|
||||
variable = Var}).
|
||||
-define(CONNECT_PACKET(Var), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?CONNECT},
|
||||
variable = Var
|
||||
}).
|
||||
|
||||
-define(CONNACK_PACKET(ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK},
|
||||
variable = #mqtt_packet_connack{ack_flags = 0,
|
||||
reason_code = ReasonCode}
|
||||
}).
|
||||
-define(CONNACK_PACKET(ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?CONNACK},
|
||||
variable = #mqtt_packet_connack{
|
||||
ack_flags = 0,
|
||||
reason_code = ReasonCode
|
||||
}
|
||||
}).
|
||||
|
||||
-define(CONNACK_PACKET(ReasonCode, SessPresent),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK},
|
||||
variable = #mqtt_packet_connack{ack_flags = SessPresent,
|
||||
reason_code = ReasonCode}
|
||||
}).
|
||||
-define(CONNACK_PACKET(ReasonCode, SessPresent), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?CONNACK},
|
||||
variable = #mqtt_packet_connack{
|
||||
ack_flags = SessPresent,
|
||||
reason_code = ReasonCode
|
||||
}
|
||||
}).
|
||||
|
||||
-define(CONNACK_PACKET(ReasonCode, SessPresent, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK},
|
||||
variable = #mqtt_packet_connack{ack_flags = SessPresent,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(CONNACK_PACKET(ReasonCode, SessPresent, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?CONNACK},
|
||||
variable = #mqtt_packet_connack{
|
||||
ack_flags = SessPresent,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(AUTH_PACKET(),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?AUTH},
|
||||
variable = #mqtt_packet_auth{reason_code = 0}
|
||||
}).
|
||||
-define(AUTH_PACKET(), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?AUTH},
|
||||
variable = #mqtt_packet_auth{reason_code = 0}
|
||||
}).
|
||||
|
||||
-define(AUTH_PACKET(ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?AUTH},
|
||||
variable = #mqtt_packet_auth{reason_code = ReasonCode}
|
||||
}).
|
||||
-define(AUTH_PACKET(ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?AUTH},
|
||||
variable = #mqtt_packet_auth{reason_code = ReasonCode}
|
||||
}).
|
||||
|
||||
-define(AUTH_PACKET(ReasonCode, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?AUTH},
|
||||
variable = #mqtt_packet_auth{reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(AUTH_PACKET(ReasonCode, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?AUTH},
|
||||
variable = #mqtt_packet_auth{
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBLISH_PACKET(QoS),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, qos = QoS}}).
|
||||
-define(PUBLISH_PACKET(QoS), #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH, qos = QoS}}).
|
||||
|
||||
-define(PUBLISH_PACKET(QoS, PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH,
|
||||
qos = QoS},
|
||||
variable = #mqtt_packet_publish{packet_id = PacketId}
|
||||
}).
|
||||
-define(PUBLISH_PACKET(QoS, PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
qos = QoS
|
||||
},
|
||||
variable = #mqtt_packet_publish{packet_id = PacketId}
|
||||
}).
|
||||
|
||||
-define(PUBLISH_PACKET(QoS, Topic, PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH,
|
||||
qos = QoS},
|
||||
variable = #mqtt_packet_publish{topic_name = Topic,
|
||||
packet_id = PacketId}
|
||||
}).
|
||||
-define(PUBLISH_PACKET(QoS, Topic, PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
qos = QoS
|
||||
},
|
||||
variable = #mqtt_packet_publish{
|
||||
topic_name = Topic,
|
||||
packet_id = PacketId
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBLISH_PACKET(QoS, Topic, PacketId, Payload),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH,
|
||||
qos = QoS},
|
||||
variable = #mqtt_packet_publish{topic_name = Topic,
|
||||
packet_id = PacketId},
|
||||
payload = Payload
|
||||
}).
|
||||
-define(PUBLISH_PACKET(QoS, Topic, PacketId, Payload), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
qos = QoS
|
||||
},
|
||||
variable = #mqtt_packet_publish{
|
||||
topic_name = Topic,
|
||||
packet_id = PacketId
|
||||
},
|
||||
payload = Payload
|
||||
}).
|
||||
|
||||
-define(PUBLISH_PACKET(QoS, Topic, PacketId, Properties, Payload),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH,
|
||||
qos = QoS},
|
||||
variable = #mqtt_packet_publish{topic_name = Topic,
|
||||
packet_id = PacketId,
|
||||
properties = Properties},
|
||||
payload = Payload
|
||||
}).
|
||||
-define(PUBLISH_PACKET(QoS, Topic, PacketId, Properties, Payload), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
qos = QoS
|
||||
},
|
||||
variable = #mqtt_packet_publish{
|
||||
topic_name = Topic,
|
||||
packet_id = PacketId,
|
||||
properties = Properties
|
||||
},
|
||||
payload = Payload
|
||||
}).
|
||||
|
||||
-define(PUBACK_PACKET(PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = 0}
|
||||
}).
|
||||
-define(PUBACK_PACKET(PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBACK},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = 0
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBACK_PACKET(PacketId, ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode}
|
||||
}).
|
||||
-define(PUBACK_PACKET(PacketId, ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBACK},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBACK_PACKET(PacketId, ReasonCode, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBACK},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(PUBACK_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBACK},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBREC_PACKET(PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBREC},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = 0}
|
||||
}).
|
||||
-define(PUBREC_PACKET(PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBREC},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = 0
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBREC_PACKET(PacketId, ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBREC},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode}
|
||||
}).
|
||||
-define(PUBREC_PACKET(PacketId, ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBREC},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBREC_PACKET(PacketId, ReasonCode, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBREC},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(PUBREC_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBREC},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBREL_PACKET(PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = 0}
|
||||
}).
|
||||
-define(PUBREL_PACKET(PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBREL,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = 0
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBREL_PACKET(PacketId, ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode}
|
||||
}).
|
||||
-define(PUBREL_PACKET(PacketId, ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBREL,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBREL_PACKET(PacketId, ReasonCode, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(PUBREL_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBREL,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBCOMP_PACKET(PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBCOMP},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = 0}
|
||||
}).
|
||||
-define(PUBCOMP_PACKET(PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBCOMP},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = 0
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBCOMP_PACKET(PacketId, ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBCOMP},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode}
|
||||
}).
|
||||
-define(PUBCOMP_PACKET(PacketId, ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBCOMP},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PUBCOMP_PACKET(PacketId, ReasonCode, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBCOMP},
|
||||
variable = #mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(PUBCOMP_PACKET(PacketId, ReasonCode, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBCOMP},
|
||||
variable = #mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(SUBSCRIBE_PACKET(PacketId, TopicFilters),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?SUBSCRIBE,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_subscribe{packet_id = PacketId,
|
||||
topic_filters = TopicFilters}
|
||||
}).
|
||||
-define(SUBSCRIBE_PACKET(PacketId, TopicFilters), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?SUBSCRIBE,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_subscribe{
|
||||
packet_id = PacketId,
|
||||
topic_filters = TopicFilters
|
||||
}
|
||||
}).
|
||||
|
||||
-define(SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?SUBSCRIBE,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_subscribe{packet_id = PacketId,
|
||||
properties = Properties,
|
||||
topic_filters = TopicFilters}
|
||||
}).
|
||||
-define(SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?SUBSCRIBE,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_subscribe{
|
||||
packet_id = PacketId,
|
||||
properties = Properties,
|
||||
topic_filters = TopicFilters
|
||||
}
|
||||
}).
|
||||
|
||||
-define(SUBACK_PACKET(PacketId, ReasonCodes),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?SUBACK},
|
||||
variable = #mqtt_packet_suback{packet_id = PacketId,
|
||||
reason_codes = ReasonCodes}
|
||||
}).
|
||||
-define(SUBACK_PACKET(PacketId, ReasonCodes), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?SUBACK},
|
||||
variable = #mqtt_packet_suback{
|
||||
packet_id = PacketId,
|
||||
reason_codes = ReasonCodes
|
||||
}
|
||||
}).
|
||||
|
||||
-define(SUBACK_PACKET(PacketId, Properties, ReasonCodes),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?SUBACK},
|
||||
variable = #mqtt_packet_suback{packet_id = PacketId,
|
||||
properties = Properties,
|
||||
reason_codes = ReasonCodes}
|
||||
}).
|
||||
-define(SUBACK_PACKET(PacketId, Properties, ReasonCodes), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?SUBACK},
|
||||
variable = #mqtt_packet_suback{
|
||||
packet_id = PacketId,
|
||||
properties = Properties,
|
||||
reason_codes = ReasonCodes
|
||||
}
|
||||
}).
|
||||
|
||||
-define(UNSUBSCRIBE_PACKET(PacketId, TopicFilters),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBSCRIBE,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_unsubscribe{packet_id = PacketId,
|
||||
topic_filters = TopicFilters}
|
||||
}).
|
||||
-define(UNSUBSCRIBE_PACKET(PacketId, TopicFilters), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?UNSUBSCRIBE,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_unsubscribe{
|
||||
packet_id = PacketId,
|
||||
topic_filters = TopicFilters
|
||||
}
|
||||
}).
|
||||
|
||||
-define(UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBSCRIBE,
|
||||
qos = ?QOS_1},
|
||||
variable = #mqtt_packet_unsubscribe{packet_id = PacketId,
|
||||
properties = Properties,
|
||||
topic_filters = TopicFilters}
|
||||
}).
|
||||
-define(UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), #mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?UNSUBSCRIBE,
|
||||
qos = ?QOS_1
|
||||
},
|
||||
variable = #mqtt_packet_unsubscribe{
|
||||
packet_id = PacketId,
|
||||
properties = Properties,
|
||||
topic_filters = TopicFilters
|
||||
}
|
||||
}).
|
||||
|
||||
-define(UNSUBACK_PACKET(PacketId),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK},
|
||||
variable = #mqtt_packet_unsuback{packet_id = PacketId}
|
||||
}).
|
||||
-define(UNSUBACK_PACKET(PacketId), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?UNSUBACK},
|
||||
variable = #mqtt_packet_unsuback{packet_id = PacketId}
|
||||
}).
|
||||
|
||||
-define(UNSUBACK_PACKET(PacketId, ReasonCodes),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK},
|
||||
variable = #mqtt_packet_unsuback{packet_id = PacketId,
|
||||
reason_codes = ReasonCodes}
|
||||
}).
|
||||
-define(UNSUBACK_PACKET(PacketId, ReasonCodes), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?UNSUBACK},
|
||||
variable = #mqtt_packet_unsuback{
|
||||
packet_id = PacketId,
|
||||
reason_codes = ReasonCodes
|
||||
}
|
||||
}).
|
||||
|
||||
-define(UNSUBACK_PACKET(PacketId, Properties, ReasonCodes),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK},
|
||||
variable = #mqtt_packet_unsuback{packet_id = PacketId,
|
||||
properties = Properties,
|
||||
reason_codes = ReasonCodes}
|
||||
}).
|
||||
-define(UNSUBACK_PACKET(PacketId, Properties, ReasonCodes), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?UNSUBACK},
|
||||
variable = #mqtt_packet_unsuback{
|
||||
packet_id = PacketId,
|
||||
properties = Properties,
|
||||
reason_codes = ReasonCodes
|
||||
}
|
||||
}).
|
||||
|
||||
-define(DISCONNECT_PACKET(),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?DISCONNECT},
|
||||
variable = #mqtt_packet_disconnect{reason_code = 0}
|
||||
}).
|
||||
-define(DISCONNECT_PACKET(), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?DISCONNECT},
|
||||
variable = #mqtt_packet_disconnect{reason_code = 0}
|
||||
}).
|
||||
|
||||
-define(DISCONNECT_PACKET(ReasonCode),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?DISCONNECT},
|
||||
variable = #mqtt_packet_disconnect{reason_code = ReasonCode}
|
||||
}).
|
||||
-define(DISCONNECT_PACKET(ReasonCode), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?DISCONNECT},
|
||||
variable = #mqtt_packet_disconnect{reason_code = ReasonCode}
|
||||
}).
|
||||
|
||||
-define(DISCONNECT_PACKET(ReasonCode, Properties),
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?DISCONNECT},
|
||||
variable = #mqtt_packet_disconnect{reason_code = ReasonCode,
|
||||
properties = Properties}
|
||||
}).
|
||||
-define(DISCONNECT_PACKET(ReasonCode, Properties), #mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?DISCONNECT},
|
||||
variable = #mqtt_packet_disconnect{
|
||||
reason_code = ReasonCode,
|
||||
properties = Properties
|
||||
}
|
||||
}).
|
||||
|
||||
-define(PACKET(Type), #mqtt_packet{header = #mqtt_packet_header{type = Type}}).
|
||||
|
||||
-define(SHARE, "$share").
|
||||
-define(SHARE(Group, Topic), emqx_topic:join([<<?SHARE>>, Group, Topic])).
|
||||
-define(IS_SHARE(Topic), case Topic of <<?SHARE, _/binary>> -> true; _ -> false end).
|
||||
-define(IS_SHARE(Topic),
|
||||
case Topic of
|
||||
<<?SHARE, _/binary>> -> true;
|
||||
_ -> false
|
||||
end
|
||||
).
|
||||
|
||||
-define(FRAME_PARSE_ERROR(Reason), {frame_parse_error, Reason}).
|
||||
-define(FRAME_SERIALIZE_ERROR(Reason), {frame_serialize_error, Reason}).
|
||||
|
|
|
|||
|
|
@ -17,99 +17,99 @@
|
|||
-ifndef(EMQX_PLACEHOLDER_HRL).
|
||||
-define(EMQX_PLACEHOLDER_HRL, true).
|
||||
|
||||
-define(PH(Type), <<"${", Type/binary, "}">> ).
|
||||
-define(PH(Type), <<"${", Type/binary, "}">>).
|
||||
|
||||
%% action: publish/subscribe/all
|
||||
-define(PH_ACTION, <<"${action}">> ).
|
||||
-define(PH_ACTION, <<"${action}">>).
|
||||
|
||||
%% cert
|
||||
-define(PH_CERT_SUBJECT, <<"${cert_subject}">> ).
|
||||
-define(PH_CERT_CN_NAME, <<"${cert_common_name}">> ).
|
||||
-define(PH_CERT_SUBJECT, <<"${cert_subject}">>).
|
||||
-define(PH_CERT_CN_NAME, <<"${cert_common_name}">>).
|
||||
|
||||
%% MQTT
|
||||
-define(PH_PASSWORD, <<"${password}">> ).
|
||||
-define(PH_CLIENTID, <<"${clientid}">> ).
|
||||
-define(PH_FROM_CLIENTID, <<"${from_clientid}">> ).
|
||||
-define(PH_USERNAME, <<"${username}">> ).
|
||||
-define(PH_FROM_USERNAME, <<"${from_username}">> ).
|
||||
-define(PH_TOPIC, <<"${topic}">> ).
|
||||
-define(PH_PASSWORD, <<"${password}">>).
|
||||
-define(PH_CLIENTID, <<"${clientid}">>).
|
||||
-define(PH_FROM_CLIENTID, <<"${from_clientid}">>).
|
||||
-define(PH_USERNAME, <<"${username}">>).
|
||||
-define(PH_FROM_USERNAME, <<"${from_username}">>).
|
||||
-define(PH_TOPIC, <<"${topic}">>).
|
||||
%% MQTT payload
|
||||
-define(PH_PAYLOAD, <<"${payload}">> ).
|
||||
-define(PH_PAYLOAD, <<"${payload}">>).
|
||||
%% client IPAddress
|
||||
-define(PH_PEERHOST, <<"${peerhost}">> ).
|
||||
-define(PH_PEERHOST, <<"${peerhost}">>).
|
||||
%% ip & port
|
||||
-define(PH_HOST, <<"${host}">> ).
|
||||
-define(PH_PORT, <<"${port}">> ).
|
||||
-define(PH_HOST, <<"${host}">>).
|
||||
-define(PH_PORT, <<"${port}">>).
|
||||
%% Enumeration of message QoS 0,1,2
|
||||
-define(PH_QOS, <<"${qos}">> ).
|
||||
-define(PH_FLAGS, <<"${flags}">> ).
|
||||
-define(PH_QOS, <<"${qos}">>).
|
||||
-define(PH_FLAGS, <<"${flags}">>).
|
||||
%% Additional data related to process within the MQTT message
|
||||
-define(PH_HEADERS, <<"${headers}">> ).
|
||||
-define(PH_HEADERS, <<"${headers}">>).
|
||||
%% protocol name
|
||||
-define(PH_PROTONAME, <<"${proto_name}">> ).
|
||||
-define(PH_PROTONAME, <<"${proto_name}">>).
|
||||
%% protocol version
|
||||
-define(PH_PROTOVER, <<"${proto_ver}">> ).
|
||||
-define(PH_PROTOVER, <<"${proto_ver}">>).
|
||||
%% MQTT keepalive interval
|
||||
-define(PH_KEEPALIVE, <<"${keepalive}">> ).
|
||||
-define(PH_KEEPALIVE, <<"${keepalive}">>).
|
||||
%% MQTT clean_start
|
||||
-define(PH_CLEAR_START, <<"${clean_start}">> ).
|
||||
-define(PH_CLEAR_START, <<"${clean_start}">>).
|
||||
%% MQTT Session Expiration time
|
||||
-define(PH_EXPIRY_INTERVAL, <<"${expiry_interval}">> ).
|
||||
-define(PH_EXPIRY_INTERVAL, <<"${expiry_interval}">>).
|
||||
|
||||
%% Time when PUBLISH message reaches Broker (ms)
|
||||
-define(PH_PUBLISH_RECEIVED_AT, <<"${publish_received_at}">>).
|
||||
-define(PH_PUBLISH_RECEIVED_AT, <<"${publish_received_at}">>).
|
||||
%% Mountpoint for bridging messages
|
||||
-define(PH_MOUNTPOINT, <<"${mountpoint}">> ).
|
||||
-define(PH_MOUNTPOINT, <<"${mountpoint}">>).
|
||||
%% IPAddress and Port of terminal
|
||||
-define(PH_PEERNAME, <<"${peername}">> ).
|
||||
-define(PH_PEERNAME, <<"${peername}">>).
|
||||
%% IPAddress and Port listened by emqx
|
||||
-define(PH_SOCKNAME, <<"${sockname}">> ).
|
||||
-define(PH_SOCKNAME, <<"${sockname}">>).
|
||||
%% whether it is MQTT bridge connection
|
||||
-define(PH_IS_BRIDGE, <<"${is_bridge}">> ).
|
||||
-define(PH_IS_BRIDGE, <<"${is_bridge}">>).
|
||||
%% Terminal connection completion time (s)
|
||||
-define(PH_CONNECTED_AT, <<"${connected_at}">> ).
|
||||
-define(PH_CONNECTED_AT, <<"${connected_at}">>).
|
||||
%% Event trigger time(millisecond)
|
||||
-define(PH_TIMESTAMP, <<"${timestamp}">> ).
|
||||
-define(PH_TIMESTAMP, <<"${timestamp}">>).
|
||||
%% Terminal disconnection completion time (s)
|
||||
-define(PH_DISCONNECTED_AT, <<"${disconnected_at}">> ).
|
||||
-define(PH_DISCONNECTED_AT, <<"${disconnected_at}">>).
|
||||
|
||||
-define(PH_NODE, <<"${node}">> ).
|
||||
-define(PH_REASON, <<"${reason}">> ).
|
||||
-define(PH_NODE, <<"${node}">>).
|
||||
-define(PH_REASON, <<"${reason}">>).
|
||||
|
||||
-define(PH_ENDPOINT_NAME, <<"${endpoint_name}">> ).
|
||||
-define(PH_ENDPOINT_NAME, <<"${endpoint_name}">>).
|
||||
|
||||
%% sync change these place holder with binary def.
|
||||
-define(PH_S_ACTION, "${action}" ).
|
||||
-define(PH_S_CERT_SUBJECT, "${cert_subject}" ).
|
||||
-define(PH_S_CERT_CN_NAME, "${cert_common_name}" ).
|
||||
-define(PH_S_PASSWORD, "${password}" ).
|
||||
-define(PH_S_CLIENTID, "${clientid}" ).
|
||||
-define(PH_S_FROM_CLIENTID, "${from_clientid}" ).
|
||||
-define(PH_S_USERNAME, "${username}" ).
|
||||
-define(PH_S_FROM_USERNAME, "${from_username}" ).
|
||||
-define(PH_S_TOPIC, "${topic}" ).
|
||||
-define(PH_S_PAYLOAD, "${payload}" ).
|
||||
-define(PH_S_PEERHOST, "${peerhost}" ).
|
||||
-define(PH_S_HOST, "${host}" ).
|
||||
-define(PH_S_PORT, "${port}" ).
|
||||
-define(PH_S_QOS, "${qos}" ).
|
||||
-define(PH_S_FLAGS, "${flags}" ).
|
||||
-define(PH_S_HEADERS, "${headers}" ).
|
||||
-define(PH_S_PROTONAME, "${proto_name}" ).
|
||||
-define(PH_S_PROTOVER, "${proto_ver}" ).
|
||||
-define(PH_S_KEEPALIVE, "${keepalive}" ).
|
||||
-define(PH_S_CLEAR_START, "${clean_start}" ).
|
||||
-define(PH_S_EXPIRY_INTERVAL, "${expiry_interval}" ).
|
||||
-define(PH_S_PUBLISH_RECEIVED_AT, "${publish_received_at}" ).
|
||||
-define(PH_S_MOUNTPOINT, "${mountpoint}" ).
|
||||
-define(PH_S_PEERNAME, "${peername}" ).
|
||||
-define(PH_S_SOCKNAME, "${sockname}" ).
|
||||
-define(PH_S_IS_BRIDGE, "${is_bridge}" ).
|
||||
-define(PH_S_CONNECTED_AT, "${connected_at}" ).
|
||||
-define(PH_S_TIMESTAMP, "${timestamp}" ).
|
||||
-define(PH_S_DISCONNECTED_AT, "${disconnected_at}" ).
|
||||
-define(PH_S_NODE, "${node}" ).
|
||||
-define(PH_S_REASON, "${reason}" ).
|
||||
-define(PH_S_ENDPOINT_NAME, "${endpoint_name}" ).
|
||||
-define(PH_S_ACTION, "${action}").
|
||||
-define(PH_S_CERT_SUBJECT, "${cert_subject}").
|
||||
-define(PH_S_CERT_CN_NAME, "${cert_common_name}").
|
||||
-define(PH_S_PASSWORD, "${password}").
|
||||
-define(PH_S_CLIENTID, "${clientid}").
|
||||
-define(PH_S_FROM_CLIENTID, "${from_clientid}").
|
||||
-define(PH_S_USERNAME, "${username}").
|
||||
-define(PH_S_FROM_USERNAME, "${from_username}").
|
||||
-define(PH_S_TOPIC, "${topic}").
|
||||
-define(PH_S_PAYLOAD, "${payload}").
|
||||
-define(PH_S_PEERHOST, "${peerhost}").
|
||||
-define(PH_S_HOST, "${host}").
|
||||
-define(PH_S_PORT, "${port}").
|
||||
-define(PH_S_QOS, "${qos}").
|
||||
-define(PH_S_FLAGS, "${flags}").
|
||||
-define(PH_S_HEADERS, "${headers}").
|
||||
-define(PH_S_PROTONAME, "${proto_name}").
|
||||
-define(PH_S_PROTOVER, "${proto_ver}").
|
||||
-define(PH_S_KEEPALIVE, "${keepalive}").
|
||||
-define(PH_S_CLEAR_START, "${clean_start}").
|
||||
-define(PH_S_EXPIRY_INTERVAL, "${expiry_interval}").
|
||||
-define(PH_S_PUBLISH_RECEIVED_AT, "${publish_received_at}").
|
||||
-define(PH_S_MOUNTPOINT, "${mountpoint}").
|
||||
-define(PH_S_PEERNAME, "${peername}").
|
||||
-define(PH_S_SOCKNAME, "${sockname}").
|
||||
-define(PH_S_IS_BRIDGE, "${is_bridge}").
|
||||
-define(PH_S_CONNECTED_AT, "${connected_at}").
|
||||
-define(PH_S_TIMESTAMP, "${timestamp}").
|
||||
-define(PH_S_DISCONNECTED_AT, "${disconnected_at}").
|
||||
-define(PH_S_NODE, "${node}").
|
||||
-define(PH_S_REASON, "${reason}").
|
||||
-define(PH_S_ENDPOINT_NAME, "${endpoint_name}").
|
||||
|
||||
-endif.
|
||||
|
|
|
|||
|
|
@ -15,66 +15,66 @@
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% Bad Request
|
||||
-define(BAD_REQUEST, 'BAD_REQUEST').
|
||||
-define(NOT_MATCH, 'NOT_MATCH').
|
||||
-define(BAD_REQUEST, 'BAD_REQUEST').
|
||||
-define(NOT_MATCH, 'NOT_MATCH').
|
||||
|
||||
-define(ALREADY_EXISTS, 'ALREADY_EXISTS').
|
||||
-define(BAD_CONFIG_SCHEMA, 'BAD_CONFIG_SCHEMA').
|
||||
-define(BAD_LISTENER_ID, 'BAD_LISTENER_ID').
|
||||
-define(BAD_NODE_NAME, 'BAD_NODE_NAME').
|
||||
-define(BAD_RPC, 'BAD_RPC').
|
||||
-define(BAD_TOPIC, 'BAD_TOPIC').
|
||||
-define(EXCEED_LIMIT, 'EXCEED_LIMIT').
|
||||
-define(INVALID_PARAMETER, 'INVALID_PARAMETER').
|
||||
-define(CONFLICT, 'CONFLICT').
|
||||
-define(NO_DEFAULT_VALUE, 'NO_DEFAULT_VALUE').
|
||||
-define(DEPENDENCY_EXISTS, 'DEPENDENCY_EXISTS').
|
||||
-define(MESSAGE_ID_SCHEMA_ERROR, 'MESSAGE_ID_SCHEMA_ERROR').
|
||||
-define(INVALID_ID, 'INVALID_ID').
|
||||
-define(ALREADY_EXISTS, 'ALREADY_EXISTS').
|
||||
-define(BAD_CONFIG_SCHEMA, 'BAD_CONFIG_SCHEMA').
|
||||
-define(BAD_LISTENER_ID, 'BAD_LISTENER_ID').
|
||||
-define(BAD_NODE_NAME, 'BAD_NODE_NAME').
|
||||
-define(BAD_RPC, 'BAD_RPC').
|
||||
-define(BAD_TOPIC, 'BAD_TOPIC').
|
||||
-define(EXCEED_LIMIT, 'EXCEED_LIMIT').
|
||||
-define(INVALID_PARAMETER, 'INVALID_PARAMETER').
|
||||
-define(CONFLICT, 'CONFLICT').
|
||||
-define(NO_DEFAULT_VALUE, 'NO_DEFAULT_VALUE').
|
||||
-define(DEPENDENCY_EXISTS, 'DEPENDENCY_EXISTS').
|
||||
-define(MESSAGE_ID_SCHEMA_ERROR, 'MESSAGE_ID_SCHEMA_ERROR').
|
||||
-define(INVALID_ID, 'INVALID_ID').
|
||||
|
||||
%% Resource Not Found
|
||||
-define(NOT_FOUND, 'NOT_FOUND').
|
||||
-define(CLIENTID_NOT_FOUND, 'CLIENTID_NOT_FOUND').
|
||||
-define(CLIENT_NOT_FOUND, 'CLIENT_NOT_FOUND').
|
||||
-define(MESSAGE_ID_NOT_FOUND, 'MESSAGE_ID_NOT_FOUND').
|
||||
-define(RESOURCE_NOT_FOUND, 'RESOURCE_NOT_FOUND').
|
||||
-define(TOPIC_NOT_FOUND, 'TOPIC_NOT_FOUND').
|
||||
-define(USER_NOT_FOUND, 'USER_NOT_FOUND').
|
||||
-define(NOT_FOUND, 'NOT_FOUND').
|
||||
-define(CLIENTID_NOT_FOUND, 'CLIENTID_NOT_FOUND').
|
||||
-define(CLIENT_NOT_FOUND, 'CLIENT_NOT_FOUND').
|
||||
-define(MESSAGE_ID_NOT_FOUND, 'MESSAGE_ID_NOT_FOUND').
|
||||
-define(RESOURCE_NOT_FOUND, 'RESOURCE_NOT_FOUND').
|
||||
-define(TOPIC_NOT_FOUND, 'TOPIC_NOT_FOUND').
|
||||
-define(USER_NOT_FOUND, 'USER_NOT_FOUND').
|
||||
|
||||
%% Internal error
|
||||
-define(INTERNAL_ERROR, 'INTERNAL_ERROR').
|
||||
-define(SOURCE_ERROR, 'SOURCE_ERROR').
|
||||
-define(UPDATE_FAILED, 'UPDATE_FAILED').
|
||||
-define(REST_FAILED, 'REST_FAILED').
|
||||
-define(CLIENT_NOT_RESPONSE, 'CLIENT_NOT_RESPONSE').
|
||||
-define(INTERNAL_ERROR, 'INTERNAL_ERROR').
|
||||
-define(SOURCE_ERROR, 'SOURCE_ERROR').
|
||||
-define(UPDATE_FAILED, 'UPDATE_FAILED').
|
||||
-define(REST_FAILED, 'REST_FAILED').
|
||||
-define(CLIENT_NOT_RESPONSE, 'CLIENT_NOT_RESPONSE').
|
||||
|
||||
%% All codes
|
||||
-define(ERROR_CODES,
|
||||
[ {'BAD_REQUEST', <<"Request parameters are not legal">>}
|
||||
, {'NOT_MATCH', <<"Conditions are not matched">>}
|
||||
, {'ALREADY_EXISTS', <<"Resource already existed">>}
|
||||
, {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}
|
||||
, {'BAD_LISTENER_ID', <<"Bad listener ID">>}
|
||||
, {'BAD_NODE_NAME', <<"Bad Node Name">>}
|
||||
, {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}
|
||||
, {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}
|
||||
, {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}
|
||||
, {'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}
|
||||
, {'CONFLICT', <<"Conflicting request resources">>}
|
||||
, {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}
|
||||
, {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}
|
||||
, {'MESSAGE_ID_SCHEMA_ERROR', <<"Message ID parsing error">>}
|
||||
, {'INVALID_ID', <<"Bad ID schema">>}
|
||||
, {'MESSAGE_ID_NOT_FOUND', <<"Message ID does not exist">>}
|
||||
, {'NOT_FOUND', <<"Resource was not found or does not exist">>}
|
||||
, {'CLIENTID_NOT_FOUND', <<"Client ID was not found or does not exist">>}
|
||||
, {'CLIENT_NOT_FOUND', <<"Client was not found or does not exist(usually not a MQTT client)">>}
|
||||
, {'RESOURCE_NOT_FOUND', <<"Resource not found">>}
|
||||
, {'TOPIC_NOT_FOUND', <<"Topic not found">>}
|
||||
, {'USER_NOT_FOUND', <<"User not found">>}
|
||||
, {'INTERNAL_ERROR', <<"Server inter error">>}
|
||||
, {'SOURCE_ERROR', <<"Source error">>}
|
||||
, {'UPDATE_FAILED', <<"Update failed">>}
|
||||
, {'REST_FAILED', <<"Reset source or config failed">>}
|
||||
, {'CLIENT_NOT_RESPONSE', <<"Client not responding">>}
|
||||
]).
|
||||
-define(ERROR_CODES, [
|
||||
{'BAD_REQUEST', <<"Request parameters are not legal">>},
|
||||
{'NOT_MATCH', <<"Conditions are not matched">>},
|
||||
{'ALREADY_EXISTS', <<"Resource already existed">>},
|
||||
{'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>},
|
||||
{'BAD_LISTENER_ID', <<"Bad listener ID">>},
|
||||
{'BAD_NODE_NAME', <<"Bad Node Name">>},
|
||||
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
|
||||
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
|
||||
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
|
||||
{'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>},
|
||||
{'CONFLICT', <<"Conflicting request resources">>},
|
||||
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
|
||||
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},
|
||||
{'MESSAGE_ID_SCHEMA_ERROR', <<"Message ID parsing error">>},
|
||||
{'INVALID_ID', <<"Bad ID schema">>},
|
||||
{'MESSAGE_ID_NOT_FOUND', <<"Message ID does not exist">>},
|
||||
{'NOT_FOUND', <<"Resource was not found or does not exist">>},
|
||||
{'CLIENTID_NOT_FOUND', <<"Client ID was not found or does not exist">>},
|
||||
{'CLIENT_NOT_FOUND', <<"Client was not found or does not exist(usually not a MQTT client)">>},
|
||||
{'RESOURCE_NOT_FOUND', <<"Resource not found">>},
|
||||
{'TOPIC_NOT_FOUND', <<"Topic not found">>},
|
||||
{'USER_NOT_FOUND', <<"User not found">>},
|
||||
{'INTERNAL_ERROR', <<"Server inter error">>},
|
||||
{'SOURCE_ERROR', <<"Source error">>},
|
||||
{'UPDATE_FAILED', <<"Update failed">>},
|
||||
{'REST_FAILED', <<"Reset source or config failed">>},
|
||||
{'CLIENT_NOT_RESPONSE', <<"Client not responding">>}
|
||||
]).
|
||||
|
|
|
|||
|
|
@ -19,34 +19,43 @@
|
|||
|
||||
%% structured logging
|
||||
-define(SLOG(Level, Data),
|
||||
?SLOG(Level, Data, #{})).
|
||||
?SLOG(Level, Data, #{})
|
||||
).
|
||||
|
||||
%% structured logging, meta is for handler's filter.
|
||||
-define(SLOG(Level, Data, Meta),
|
||||
%% check 'allow' here, only evaluate Data and Meta when necessary
|
||||
%% check 'allow' here, only evaluate Data and Meta when necessary
|
||||
case logger:allow(Level, ?MODULE) of
|
||||
true ->
|
||||
logger:log(Level, (Data), (Meta#{ mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY}
|
||||
, line => ?LINE
|
||||
}));
|
||||
logger:log(
|
||||
Level,
|
||||
(Data),
|
||||
(Meta#{
|
||||
mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY},
|
||||
line => ?LINE
|
||||
})
|
||||
);
|
||||
false ->
|
||||
ok
|
||||
end).
|
||||
end
|
||||
).
|
||||
|
||||
-define(TRACE_FILTER, emqx_trace_filter).
|
||||
|
||||
%% Only evaluate when necessary
|
||||
%% Always debug the trace events.
|
||||
-define(TRACE(Tag, Msg, Meta),
|
||||
begin
|
||||
-define(TRACE(Tag, Msg, Meta), begin
|
||||
case persistent_term:get(?TRACE_FILTER, undefined) of
|
||||
undefined -> ok;
|
||||
[] -> ok;
|
||||
List -> emqx_trace:log(List, Msg, Meta#{trace_tag => Tag})
|
||||
end,
|
||||
?SLOG(debug, (emqx_trace_formatter:format_meta(Meta))#{msg => Msg, tag => Tag},
|
||||
#{is_trace => false})
|
||||
end).
|
||||
?SLOG(
|
||||
debug,
|
||||
(emqx_trace_formatter:format_meta(Meta))#{msg => Msg, tag => Tag},
|
||||
#{is_trace => false}
|
||||
)
|
||||
end).
|
||||
|
||||
%% print to 'user' group leader
|
||||
-define(ULOG(Fmt, Args), io:format(user, Fmt, Args)).
|
||||
|
|
|
|||
|
|
@ -14,13 +14,12 @@
|
|||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-type(maybe(T) :: undefined | T).
|
||||
-type maybe(T) :: undefined | T.
|
||||
|
||||
-type(startlink_ret() :: {ok, pid()} | ignore | {error, term()}).
|
||||
-type startlink_ret() :: {ok, pid()} | ignore | {error, term()}.
|
||||
|
||||
-type(ok_or_error(Reason) :: ok | {error, Reason}).
|
||||
-type ok_or_error(Reason) :: ok | {error, Reason}.
|
||||
|
||||
-type(ok_or_error(Value, Reason) :: {ok, Value} | {error, Reason}).
|
||||
|
||||
-type(mfargs() :: {module(), atom(), [term()]}).
|
||||
-type ok_or_error(Value, Reason) :: {ok, Value} | {error, Reason}.
|
||||
|
||||
-type mfargs() :: {module(), atom(), [term()]}.
|
||||
|
|
|
|||
|
|
@ -1,42 +1,53 @@
|
|||
%% -*- mode: erlang -*-
|
||||
|
||||
{erl_opts, [warn_unused_vars,warn_shadow_vars,warn_unused_import,
|
||||
warn_obsolete_guard,compressed]}.
|
||||
{erl_opts, [
|
||||
warn_unused_vars,
|
||||
warn_shadow_vars,
|
||||
warn_unused_import,
|
||||
warn_obsolete_guard,
|
||||
compressed
|
||||
]}.
|
||||
|
||||
{xref_checks,[undefined_function_calls,undefined_functions,locals_not_used,
|
||||
deprecated_function_calls,warnings_as_errors,deprecated_functions]}.
|
||||
{xref_checks, [
|
||||
undefined_function_calls,
|
||||
undefined_functions,
|
||||
locals_not_used,
|
||||
deprecated_function_calls,
|
||||
warnings_as_errors,
|
||||
deprecated_functions
|
||||
]}.
|
||||
|
||||
%% Deps here may duplicate with emqx.git root level rebar.config
|
||||
%% but there not be any descrpancy.
|
||||
%% This rebar.config is necessary because the app may be used as a
|
||||
%% `git_subdir` dependency in other projects.
|
||||
{deps,
|
||||
[ {lc, {git, "https://github.com/qzhuyan/lc.git", {tag, "0.1.2"}}}
|
||||
, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}
|
||||
, {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.8.6"}}}
|
||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}}
|
||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.2"}}}
|
||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
||||
, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.26.3"}}}
|
||||
, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}
|
||||
, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}
|
||||
, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "0.18.0"}}}
|
||||
]}.
|
||||
{deps, [
|
||||
{lc, {git, "https://github.com/qzhuyan/lc.git", {tag, "0.1.2"}}},
|
||||
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
|
||||
{typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.8.6"}}},
|
||||
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
|
||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.2"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.26.3"}}},
|
||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "0.18.0"}}}
|
||||
]}.
|
||||
|
||||
{plugins, [{rebar3_proper, "0.12.1"}]}.
|
||||
{extra_src_dirs, [{"etc", [recursive]}]}.
|
||||
{profiles, [
|
||||
{test,
|
||||
[{deps,
|
||||
[ {meck, "0.9.2"}
|
||||
, {proper, "1.4.0"}
|
||||
, {bbmustache,"1.10.0"}
|
||||
, {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.4.8"}}}
|
||||
]},
|
||||
{extra_src_dirs, [{"test",[recursive]}]}
|
||||
]}
|
||||
{test, [
|
||||
{deps, [
|
||||
{meck, "0.9.2"},
|
||||
{proper, "1.4.0"},
|
||||
{bbmustache, "1.10.0"},
|
||||
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.4.8"}}}
|
||||
]},
|
||||
{extra_src_dirs, [{"test", [recursive]}]}
|
||||
]}
|
||||
]}.
|
||||
|
||||
{dialyzer, [
|
||||
|
|
@ -46,5 +57,6 @@
|
|||
{plt_apps, all_apps},
|
||||
{plt_extra_apps, [hocon]},
|
||||
{statistics, true}
|
||||
]
|
||||
}.
|
||||
]}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
|||
|
|
@ -1,28 +1,31 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx,
|
||||
[{id, "emqx"},
|
||||
{description, "EMQX Core"},
|
||||
{vsn, "5.0.0"}, % strict semver, bump manually!
|
||||
{modules, []},
|
||||
{registered, []},
|
||||
{applications, [ kernel
|
||||
, stdlib
|
||||
, gproc
|
||||
, gen_rpc
|
||||
, mria
|
||||
, esockd
|
||||
, cowboy
|
||||
, sasl
|
||||
, os_mon
|
||||
, jiffy
|
||||
, lc
|
||||
, hocon
|
||||
]},
|
||||
{mod, {emqx_app,[]}},
|
||||
{env, []},
|
||||
{licenses, ["Apache-2.0"]},
|
||||
{maintainers, ["EMQX Team <contact@emqx.io>"]},
|
||||
{links, [{"Homepage", "https://emqx.io/"},
|
||||
{"Github", "https://github.com/emqx/emqx"}
|
||||
]}
|
||||
{application, emqx, [
|
||||
{id, "emqx"},
|
||||
{description, "EMQX Core"},
|
||||
% strict semver, bump manually!
|
||||
{vsn, "5.0.0"},
|
||||
{modules, []},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
gproc,
|
||||
gen_rpc,
|
||||
mria,
|
||||
esockd,
|
||||
cowboy,
|
||||
sasl,
|
||||
os_mon,
|
||||
jiffy,
|
||||
lc,
|
||||
hocon
|
||||
]},
|
||||
{mod, {emqx_app, []}},
|
||||
{env, []},
|
||||
{licenses, ["Apache-2.0"]},
|
||||
{maintainers, ["EMQX Team <contact@emqx.io>"]},
|
||||
{links, [
|
||||
{"Homepage", "https://emqx.io/"},
|
||||
{"Github", "https://github.com/emqx/emqx"}
|
||||
]}
|
||||
]}.
|
||||
|
|
|
|||
|
|
@ -23,49 +23,54 @@
|
|||
-elvis([{elvis_style, god_modules, disable}]).
|
||||
|
||||
%% Start/Stop the application
|
||||
-export([ start/0
|
||||
, is_running/0
|
||||
, is_running/1
|
||||
, stop/0
|
||||
]).
|
||||
-export([
|
||||
start/0,
|
||||
is_running/0,
|
||||
is_running/1,
|
||||
stop/0
|
||||
]).
|
||||
|
||||
%% PubSub API
|
||||
-export([ subscribe/1
|
||||
, subscribe/2
|
||||
, subscribe/3
|
||||
, publish/1
|
||||
, unsubscribe/1
|
||||
]).
|
||||
-export([
|
||||
subscribe/1,
|
||||
subscribe/2,
|
||||
subscribe/3,
|
||||
publish/1,
|
||||
unsubscribe/1
|
||||
]).
|
||||
|
||||
%% PubSub management API
|
||||
-export([ topics/0
|
||||
, subscriptions/1
|
||||
, subscribers/1
|
||||
, subscribed/2
|
||||
]).
|
||||
-export([
|
||||
topics/0,
|
||||
subscriptions/1,
|
||||
subscribers/1,
|
||||
subscribed/2
|
||||
]).
|
||||
|
||||
%% Hooks API
|
||||
-export([ hook/2
|
||||
, hook/3
|
||||
, hook/4
|
||||
, unhook/2
|
||||
, run_hook/2
|
||||
, run_fold_hook/3
|
||||
]).
|
||||
-export([
|
||||
hook/2,
|
||||
hook/3,
|
||||
hook/4,
|
||||
unhook/2,
|
||||
run_hook/2,
|
||||
run_fold_hook/3
|
||||
]).
|
||||
|
||||
%% Configs APIs
|
||||
-export([ get_config/1
|
||||
, get_config/2
|
||||
, get_raw_config/1
|
||||
, get_raw_config/2
|
||||
, update_config/2
|
||||
, update_config/3
|
||||
, remove_config/1
|
||||
, remove_config/2
|
||||
, reset_config/2
|
||||
, data_dir/0
|
||||
, certs_dir/0
|
||||
]).
|
||||
-export([
|
||||
get_config/1,
|
||||
get_config/2,
|
||||
get_raw_config/1,
|
||||
get_raw_config/2,
|
||||
update_config/2,
|
||||
update_config/3,
|
||||
remove_config/1,
|
||||
remove_config/2,
|
||||
reset_config/2,
|
||||
data_dir/0,
|
||||
certs_dir/0
|
||||
]).
|
||||
|
||||
-define(APP, ?MODULE).
|
||||
|
||||
|
|
@ -74,55 +79,58 @@
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Start emqx application
|
||||
-spec(start() -> {ok, list(atom())} | {error, term()}).
|
||||
-spec start() -> {ok, list(atom())} | {error, term()}.
|
||||
start() ->
|
||||
application:ensure_all_started(?APP).
|
||||
|
||||
%% @doc Stop emqx application.
|
||||
-spec(stop() -> ok | {error, term()}).
|
||||
-spec stop() -> ok | {error, term()}.
|
||||
stop() ->
|
||||
application:stop(?APP).
|
||||
|
||||
%% @doc Is emqx running?
|
||||
-spec(is_running(node()) -> boolean()).
|
||||
-spec is_running(node()) -> boolean().
|
||||
is_running(Node) ->
|
||||
case emqx_proto_v1:is_running(Node) of
|
||||
{badrpc, _} -> false;
|
||||
Result -> Result
|
||||
{badrpc, _} -> false;
|
||||
Result -> Result
|
||||
end.
|
||||
|
||||
%% @doc Is emqx running on this node?
|
||||
-spec(is_running() -> boolean()).
|
||||
-spec is_running() -> boolean().
|
||||
is_running() ->
|
||||
case whereis(?APP) of
|
||||
undefined -> false;
|
||||
_ -> true
|
||||
_ -> true
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% PubSub API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(subscribe(emqx_types:topic() | string()) -> ok).
|
||||
-spec subscribe(emqx_types:topic() | string()) -> ok.
|
||||
subscribe(Topic) ->
|
||||
emqx_broker:subscribe(iolist_to_binary(Topic)).
|
||||
|
||||
-spec(subscribe(emqx_types:topic() | string(), emqx_types:subid() | emqx_types:subopts()) -> ok).
|
||||
subscribe(Topic, SubId) when is_atom(SubId); is_binary(SubId)->
|
||||
-spec subscribe(emqx_types:topic() | string(), emqx_types:subid() | emqx_types:subopts()) -> ok.
|
||||
subscribe(Topic, SubId) when is_atom(SubId); is_binary(SubId) ->
|
||||
emqx_broker:subscribe(iolist_to_binary(Topic), SubId);
|
||||
subscribe(Topic, SubOpts) when is_map(SubOpts) ->
|
||||
emqx_broker:subscribe(iolist_to_binary(Topic), SubOpts).
|
||||
|
||||
-spec(subscribe(emqx_types:topic() | string(),
|
||||
emqx_types:subid() | pid(), emqx_types:subopts()) -> ok).
|
||||
-spec subscribe(
|
||||
emqx_types:topic() | string(),
|
||||
emqx_types:subid() | pid(),
|
||||
emqx_types:subopts()
|
||||
) -> ok.
|
||||
subscribe(Topic, SubId, SubOpts) when (is_atom(SubId) orelse is_binary(SubId)), is_map(SubOpts) ->
|
||||
emqx_broker:subscribe(iolist_to_binary(Topic), SubId, SubOpts).
|
||||
|
||||
-spec(publish(emqx_types:message()) -> emqx_types:publish_result()).
|
||||
-spec publish(emqx_types:message()) -> emqx_types:publish_result().
|
||||
publish(Msg) ->
|
||||
emqx_broker:publish(Msg).
|
||||
|
||||
-spec(unsubscribe(emqx_types:topic() | string()) -> ok).
|
||||
-spec unsubscribe(emqx_types:topic() | string()) -> ok.
|
||||
unsubscribe(Topic) ->
|
||||
emqx_broker:unsubscribe(iolist_to_binary(Topic)).
|
||||
|
||||
|
|
@ -130,18 +138,18 @@ unsubscribe(Topic) ->
|
|||
%% PubSub management API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(topics() -> list(emqx_types:topic())).
|
||||
-spec topics() -> list(emqx_types:topic()).
|
||||
topics() -> emqx_router:topics().
|
||||
|
||||
-spec(subscribers(emqx_types:topic() | string()) -> [pid()]).
|
||||
-spec subscribers(emqx_types:topic() | string()) -> [pid()].
|
||||
subscribers(Topic) ->
|
||||
emqx_broker:subscribers(iolist_to_binary(Topic)).
|
||||
|
||||
-spec(subscriptions(pid()) -> [{emqx_types:topic(), emqx_types:subopts()}]).
|
||||
-spec subscriptions(pid()) -> [{emqx_types:topic(), emqx_types:subopts()}].
|
||||
subscriptions(SubPid) when is_pid(SubPid) ->
|
||||
emqx_broker:subscriptions(SubPid).
|
||||
|
||||
-spec(subscribed(pid() | emqx_types:subid(), emqx_types:topic() | string()) -> boolean()).
|
||||
-spec subscribed(pid() | emqx_types:subid(), emqx_types:topic() | string()) -> boolean().
|
||||
subscribed(SubPid, Topic) when is_pid(SubPid) ->
|
||||
emqx_broker:subscribed(SubPid, iolist_to_binary(Topic));
|
||||
subscribed(SubId, Topic) when is_atom(SubId); is_binary(SubId) ->
|
||||
|
|
@ -151,33 +159,35 @@ subscribed(SubId, Topic) when is_atom(SubId); is_binary(SubId) ->
|
|||
%% Hooks API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(hook(emqx_hooks:hookpoint(), emqx_hooks:action()) -> ok | {error, already_exists}).
|
||||
-spec hook(emqx_hooks:hookpoint(), emqx_hooks:action()) -> ok | {error, already_exists}.
|
||||
hook(HookPoint, Action) ->
|
||||
emqx_hooks:add(HookPoint, Action).
|
||||
|
||||
-spec(hook(emqx_hooks:hookpoint(),
|
||||
emqx_hooks:action(),
|
||||
emqx_hooks:filter() | integer() | list())
|
||||
-> ok | {error, already_exists}).
|
||||
-spec hook(
|
||||
emqx_hooks:hookpoint(),
|
||||
emqx_hooks:action(),
|
||||
emqx_hooks:filter() | integer() | list()
|
||||
) ->
|
||||
ok | {error, already_exists}.
|
||||
hook(HookPoint, Action, Priority) when is_integer(Priority) ->
|
||||
emqx_hooks:add(HookPoint, Action, Priority);
|
||||
hook(HookPoint, Action, {_M, _F, _A} = Filter ) ->
|
||||
hook(HookPoint, Action, {_M, _F, _A} = Filter) ->
|
||||
emqx_hooks:add(HookPoint, Action, Filter).
|
||||
|
||||
-spec(hook(emqx_hooks:hookpoint(), emqx_hooks:action(), emqx_hooks:filter(), integer())
|
||||
-> ok | {error, already_exists}).
|
||||
-spec hook(emqx_hooks:hookpoint(), emqx_hooks:action(), emqx_hooks:filter(), integer()) ->
|
||||
ok | {error, already_exists}.
|
||||
hook(HookPoint, Action, Filter, Priority) ->
|
||||
emqx_hooks:add(HookPoint, Action, Filter, Priority).
|
||||
|
||||
-spec(unhook(emqx_hooks:hookpoint(), emqx_hooks:action() | {module(), atom()}) -> ok).
|
||||
-spec unhook(emqx_hooks:hookpoint(), emqx_hooks:action() | {module(), atom()}) -> ok.
|
||||
unhook(HookPoint, Action) ->
|
||||
emqx_hooks:del(HookPoint, Action).
|
||||
|
||||
-spec(run_hook(emqx_hooks:hookpoint(), list(any())) -> ok | stop).
|
||||
-spec run_hook(emqx_hooks:hookpoint(), list(any())) -> ok | stop.
|
||||
run_hook(HookPoint, Args) ->
|
||||
emqx_hooks:run(HookPoint, Args).
|
||||
|
||||
-spec(run_fold_hook(emqx_hooks:hookpoint(), list(any()), any()) -> any()).
|
||||
-spec run_fold_hook(emqx_hooks:hookpoint(), list(any()), any()) -> any().
|
||||
run_fold_hook(HookPoint, Args, Acc) ->
|
||||
emqx_hooks:run_fold(HookPoint, Args, Acc).
|
||||
|
||||
|
|
@ -202,12 +212,18 @@ get_raw_config(KeyPath, Default) ->
|
|||
update_config(KeyPath, UpdateReq) ->
|
||||
update_config(KeyPath, UpdateReq, #{}).
|
||||
|
||||
-spec update_config(emqx_map_lib:config_key_path(), emqx_config:update_request(),
|
||||
emqx_config:update_opts()) ->
|
||||
-spec update_config(
|
||||
emqx_map_lib:config_key_path(),
|
||||
emqx_config:update_request(),
|
||||
emqx_config:update_opts()
|
||||
) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
update_config([RootName | _] = KeyPath, UpdateReq, Opts) ->
|
||||
emqx_config_handler:update_config(emqx_config:get_schema_mod(RootName), KeyPath,
|
||||
{{update, UpdateReq}, Opts}).
|
||||
emqx_config_handler:update_config(
|
||||
emqx_config:get_schema_mod(RootName),
|
||||
KeyPath,
|
||||
{{update, UpdateReq}, Opts}
|
||||
).
|
||||
|
||||
-spec remove_config(emqx_map_lib:config_key_path()) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
|
|
@ -217,16 +233,22 @@ remove_config(KeyPath) ->
|
|||
-spec remove_config(emqx_map_lib:config_key_path(), emqx_config:update_opts()) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
remove_config([RootName | _] = KeyPath, Opts) ->
|
||||
emqx_config_handler:update_config(emqx_config:get_schema_mod(RootName),
|
||||
KeyPath, {remove, Opts}).
|
||||
emqx_config_handler:update_config(
|
||||
emqx_config:get_schema_mod(RootName),
|
||||
KeyPath,
|
||||
{remove, Opts}
|
||||
).
|
||||
|
||||
-spec reset_config(emqx_map_lib:config_key_path(), emqx_config:update_opts()) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
reset_config([RootName | _] = KeyPath, Opts) ->
|
||||
case emqx_config:get_default_value(KeyPath) of
|
||||
{ok, Default} ->
|
||||
emqx_config_handler:update_config(emqx_config:get_schema_mod(RootName), KeyPath,
|
||||
{{update, Default}, Opts});
|
||||
emqx_config_handler:update_config(
|
||||
emqx_config:get_schema_mod(RootName),
|
||||
KeyPath,
|
||||
{{update, Default}, Opts}
|
||||
);
|
||||
{error, _} = Error ->
|
||||
Error
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -18,16 +18,21 @@
|
|||
|
||||
-include("emqx.hrl").
|
||||
|
||||
-export([ authenticate/1
|
||||
, authorize/3
|
||||
]).
|
||||
-export([
|
||||
authenticate/1,
|
||||
authorize/3
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(authenticate(emqx_types:clientinfo()) ->
|
||||
{ok, map()} | {ok, map(), binary()} | {continue, map()} | {continue, binary(), map()} | {error, term()}).
|
||||
-spec authenticate(emqx_types:clientinfo()) ->
|
||||
{ok, map()}
|
||||
| {ok, map(), binary()}
|
||||
| {continue, map()}
|
||||
| {continue, binary(), map()}
|
||||
| {error, term()}.
|
||||
authenticate(Credential) ->
|
||||
case run_hooks('client.authenticate', [Credential], {ok, #{is_superuser => false}}) of
|
||||
ok ->
|
||||
|
|
@ -37,14 +42,16 @@ authenticate(Credential) ->
|
|||
end.
|
||||
|
||||
%% @doc Check Authorization
|
||||
-spec authorize(emqx_types:clientinfo(), emqx_types:pubsub(), emqx_types:topic())
|
||||
-> allow | deny.
|
||||
-spec authorize(emqx_types:clientinfo(), emqx_types:pubsub(), emqx_types:topic()) ->
|
||||
allow | deny.
|
||||
authorize(ClientInfo, PubSub, Topic) ->
|
||||
Result = case emqx_authz_cache:is_enabled() of
|
||||
true -> check_authorization_cache(ClientInfo, PubSub, Topic);
|
||||
false -> do_authorize(ClientInfo, PubSub, Topic)
|
||||
end,
|
||||
inc_acl_metrics(Result), Result.
|
||||
Result =
|
||||
case emqx_authz_cache:is_enabled() of
|
||||
true -> check_authorization_cache(ClientInfo, PubSub, Topic);
|
||||
false -> do_authorize(ClientInfo, PubSub, Topic)
|
||||
end,
|
||||
inc_acl_metrics(Result),
|
||||
Result.
|
||||
|
||||
check_authorization_cache(ClientInfo, PubSub, Topic) ->
|
||||
case emqx_authz_cache:get_authz_cache(PubSub, Topic) of
|
||||
|
|
@ -60,7 +67,7 @@ check_authorization_cache(ClientInfo, PubSub, Topic) ->
|
|||
do_authorize(ClientInfo, PubSub, Topic) ->
|
||||
NoMatch = emqx:get_config([authorization, no_match], allow),
|
||||
case run_hooks('client.authorize', [ClientInfo, PubSub, Topic], NoMatch) of
|
||||
allow -> allow;
|
||||
allow -> allow;
|
||||
_Other -> deny
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -26,44 +26,45 @@
|
|||
|
||||
-boot_mnesia({mnesia, [boot]}).
|
||||
|
||||
-export([start_link/0
|
||||
]).
|
||||
-export([start_link/0]).
|
||||
%% API
|
||||
-export([ activate/1
|
||||
, activate/2
|
||||
, activate/3
|
||||
, deactivate/1
|
||||
, deactivate/2
|
||||
, deactivate/3
|
||||
, delete_all_deactivated_alarms/0
|
||||
, get_alarms/0
|
||||
, get_alarms/1
|
||||
, format/1
|
||||
]).
|
||||
-export([
|
||||
activate/1,
|
||||
activate/2,
|
||||
activate/3,
|
||||
deactivate/1,
|
||||
deactivate/2,
|
||||
deactivate/3,
|
||||
delete_all_deactivated_alarms/0,
|
||||
get_alarms/0,
|
||||
get_alarms/1,
|
||||
format/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-record(activated_alarm, {
|
||||
name :: binary() | atom(),
|
||||
details :: map() | list(),
|
||||
message :: binary(),
|
||||
activate_at :: integer()
|
||||
}).
|
||||
name :: binary() | atom(),
|
||||
details :: map() | list(),
|
||||
message :: binary(),
|
||||
activate_at :: integer()
|
||||
}).
|
||||
|
||||
-record(deactivated_alarm, {
|
||||
activate_at :: integer(),
|
||||
name :: binary() | atom(),
|
||||
details :: map() | list(),
|
||||
message :: binary(),
|
||||
deactivate_at :: integer() | infinity
|
||||
}).
|
||||
activate_at :: integer(),
|
||||
name :: binary() | atom(),
|
||||
details :: map() | list(),
|
||||
message :: binary(),
|
||||
deactivate_at :: integer() | infinity
|
||||
}).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
|
|
@ -75,18 +76,26 @@
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
mnesia(boot) ->
|
||||
ok = mria:create_table(?ACTIVATED_ALARM,
|
||||
[{type, set},
|
||||
{storage, disc_copies},
|
||||
{local_content, true},
|
||||
{record_name, activated_alarm},
|
||||
{attributes, record_info(fields, activated_alarm)}]),
|
||||
ok = mria:create_table(?DEACTIVATED_ALARM,
|
||||
[{type, ordered_set},
|
||||
{storage, disc_copies},
|
||||
{local_content, true},
|
||||
{record_name, deactivated_alarm},
|
||||
{attributes, record_info(fields, deactivated_alarm)}]).
|
||||
ok = mria:create_table(
|
||||
?ACTIVATED_ALARM,
|
||||
[
|
||||
{type, set},
|
||||
{storage, disc_copies},
|
||||
{local_content, true},
|
||||
{record_name, activated_alarm},
|
||||
{attributes, record_info(fields, activated_alarm)}
|
||||
]
|
||||
),
|
||||
ok = mria:create_table(
|
||||
?DEACTIVATED_ALARM,
|
||||
[
|
||||
{type, ordered_set},
|
||||
{storage, disc_copies},
|
||||
{local_content, true},
|
||||
{record_name, deactivated_alarm},
|
||||
{attributes, record_info(fields, deactivated_alarm)}
|
||||
]
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
|
|
@ -124,10 +133,8 @@ get_alarms() ->
|
|||
-spec get_alarms(all | activated | deactivated) -> [map()].
|
||||
get_alarms(all) ->
|
||||
gen_server:call(?MODULE, {get_alarms, all});
|
||||
|
||||
get_alarms(activated) ->
|
||||
gen_server:call(?MODULE, {get_alarms, activated});
|
||||
|
||||
get_alarms(deactivated) ->
|
||||
gen_server:call(?MODULE, {get_alarms, deactivated}).
|
||||
|
||||
|
|
@ -139,17 +146,24 @@ format(#activated_alarm{name = Name, message = Message, activate_at = At, detail
|
|||
node => node(),
|
||||
name => Name,
|
||||
message => Message,
|
||||
duration => (Now - At) div 1000, %% to millisecond
|
||||
%% to millisecond
|
||||
duration => (Now - At) div 1000,
|
||||
activate_at => to_rfc3339(At),
|
||||
details => Details
|
||||
};
|
||||
format(#deactivated_alarm{name = Name, message = Message, activate_at = At, details = Details,
|
||||
deactivate_at = DAt}) ->
|
||||
format(#deactivated_alarm{
|
||||
name = Name,
|
||||
message = Message,
|
||||
activate_at = At,
|
||||
details = Details,
|
||||
deactivate_at = DAt
|
||||
}) ->
|
||||
#{
|
||||
node => node(),
|
||||
name => Name,
|
||||
message => Message,
|
||||
duration => (DAt - At) div 1000, %% to millisecond
|
||||
%% to millisecond
|
||||
duration => (DAt - At) div 1000,
|
||||
activate_at => to_rfc3339(At),
|
||||
deactivate_at => to_rfc3339(DAt),
|
||||
details => Details
|
||||
|
|
@ -169,9 +183,11 @@ init([]) ->
|
|||
{ok, #{}, get_validity_period()}.
|
||||
|
||||
handle_call({activate_alarm, Name, Details, Message}, _From, State) ->
|
||||
Res = mria:transaction(mria:local_content_shard(),
|
||||
Res = mria:transaction(
|
||||
mria:local_content_shard(),
|
||||
fun create_activate_alarm/3,
|
||||
[Name, Details, Message]),
|
||||
[Name, Details, Message]
|
||||
),
|
||||
case Res of
|
||||
{atomic, Alarm} ->
|
||||
do_actions(activate, Alarm, emqx:get_config([alarm, actions])),
|
||||
|
|
@ -179,7 +195,6 @@ handle_call({activate_alarm, Name, Details, Message}, _From, State) ->
|
|||
{aborted, Reason} ->
|
||||
{reply, Reason, State, get_validity_period()}
|
||||
end;
|
||||
|
||||
handle_call({deactivate_alarm, Name, Details, Message}, _From, State) ->
|
||||
case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
|
||||
[] ->
|
||||
|
|
@ -188,30 +203,29 @@ handle_call({deactivate_alarm, Name, Details, Message}, _From, State) ->
|
|||
deactivate_alarm(Alarm, Details, Message),
|
||||
{reply, ok, State, get_validity_period()}
|
||||
end;
|
||||
|
||||
handle_call(delete_all_deactivated_alarms, _From, State) ->
|
||||
clear_table(?DEACTIVATED_ALARM),
|
||||
{reply, ok, State, get_validity_period()};
|
||||
|
||||
handle_call({get_alarms, all}, _From, State) ->
|
||||
{atomic, Alarms} =
|
||||
mria:ro_transaction(
|
||||
mria:local_content_shard(),
|
||||
fun() ->
|
||||
[normalize(Alarm) ||
|
||||
Alarm <- ets:tab2list(?ACTIVATED_ALARM)
|
||||
++ ets:tab2list(?DEACTIVATED_ALARM)]
|
||||
end),
|
||||
mria:local_content_shard(),
|
||||
fun() ->
|
||||
[
|
||||
normalize(Alarm)
|
||||
|| Alarm <-
|
||||
ets:tab2list(?ACTIVATED_ALARM) ++
|
||||
ets:tab2list(?DEACTIVATED_ALARM)
|
||||
]
|
||||
end
|
||||
),
|
||||
{reply, Alarms, State, get_validity_period()};
|
||||
|
||||
handle_call({get_alarms, activated}, _From, State) ->
|
||||
Alarms = [normalize(Alarm) || Alarm <- ets:tab2list(?ACTIVATED_ALARM)],
|
||||
{reply, Alarms, State, get_validity_period()};
|
||||
|
||||
handle_call({get_alarms, deactivated}, _From, State) ->
|
||||
Alarms = [normalize(Alarm) || Alarm <- ets:tab2list(?DEACTIVATED_ALARM)],
|
||||
{reply, Alarms, State, get_validity_period()};
|
||||
|
||||
handle_call(Req, From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call_req => Req, from => From}),
|
||||
{reply, ignored, State, get_validity_period()}.
|
||||
|
|
@ -224,7 +238,6 @@ handle_info(timeout, State) ->
|
|||
Period = get_validity_period(),
|
||||
delete_expired_deactivated_alarms(erlang:system_time(microsecond) - Period * 1000),
|
||||
{noreply, State, Period};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info_req => Info}),
|
||||
{noreply, State, get_validity_period()}.
|
||||
|
|
@ -247,31 +260,50 @@ create_activate_alarm(Name, Details, Message) ->
|
|||
[#activated_alarm{name = Name}] ->
|
||||
mnesia:abort({error, already_existed});
|
||||
[] ->
|
||||
Alarm = #activated_alarm{name = Name,
|
||||
Alarm = #activated_alarm{
|
||||
name = Name,
|
||||
details = Details,
|
||||
message = normalize_message(Name, iolist_to_binary(Message)),
|
||||
activate_at = erlang:system_time(microsecond)},
|
||||
activate_at = erlang:system_time(microsecond)
|
||||
},
|
||||
ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write),
|
||||
Alarm
|
||||
end.
|
||||
|
||||
deactivate_alarm(#activated_alarm{activate_at = ActivateAt, name = Name,
|
||||
details = Details0, message = Msg0}, Details, Message) ->
|
||||
deactivate_alarm(
|
||||
#activated_alarm{
|
||||
activate_at = ActivateAt,
|
||||
name = Name,
|
||||
details = Details0,
|
||||
message = Msg0
|
||||
},
|
||||
Details,
|
||||
Message
|
||||
) ->
|
||||
SizeLimit = emqx:get_config([alarm, size_limit]),
|
||||
case SizeLimit > 0 andalso (mnesia:table_info(?DEACTIVATED_ALARM, size) >= SizeLimit) of
|
||||
true ->
|
||||
case mnesia:dirty_first(?DEACTIVATED_ALARM) of
|
||||
'$end_of_table' -> ok;
|
||||
ActivateAt2 ->
|
||||
mria:dirty_delete(?DEACTIVATED_ALARM, ActivateAt2)
|
||||
ActivateAt2 -> mria:dirty_delete(?DEACTIVATED_ALARM, ActivateAt2)
|
||||
end;
|
||||
false -> ok
|
||||
false ->
|
||||
ok
|
||||
end,
|
||||
HistoryAlarm = make_deactivated_alarm(ActivateAt, Name, Details0, Msg0,
|
||||
erlang:system_time(microsecond)),
|
||||
DeActAlarm = make_deactivated_alarm(ActivateAt, Name, Details,
|
||||
normalize_message(Name, iolist_to_binary(Message)),
|
||||
erlang:system_time(microsecond)),
|
||||
HistoryAlarm = make_deactivated_alarm(
|
||||
ActivateAt,
|
||||
Name,
|
||||
Details0,
|
||||
Msg0,
|
||||
erlang:system_time(microsecond)
|
||||
),
|
||||
DeActAlarm = make_deactivated_alarm(
|
||||
ActivateAt,
|
||||
Name,
|
||||
Details,
|
||||
normalize_message(Name, iolist_to_binary(Message)),
|
||||
erlang:system_time(microsecond)
|
||||
),
|
||||
mria:dirty_write(?DEACTIVATED_ALARM, HistoryAlarm),
|
||||
mria:dirty_delete(?ACTIVATED_ALARM, Name),
|
||||
do_actions(deactivate, DeActAlarm, emqx:get_config([alarm, actions])).
|
||||
|
|
@ -282,22 +314,32 @@ make_deactivated_alarm(ActivateAt, Name, Details, Message, DeActivateAt) ->
|
|||
name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
deactivate_at = DeActivateAt}.
|
||||
deactivate_at = DeActivateAt
|
||||
}.
|
||||
|
||||
deactivate_all_alarms() ->
|
||||
lists:foreach(
|
||||
fun(#activated_alarm{name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
activate_at = ActivateAt}) ->
|
||||
mria:dirty_write(?DEACTIVATED_ALARM,
|
||||
fun(
|
||||
#activated_alarm{
|
||||
name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
activate_at = ActivateAt
|
||||
}
|
||||
) ->
|
||||
mria:dirty_write(
|
||||
?DEACTIVATED_ALARM,
|
||||
#deactivated_alarm{
|
||||
activate_at = ActivateAt,
|
||||
name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
deactivate_at = erlang:system_time(microsecond)})
|
||||
end, ets:tab2list(?ACTIVATED_ALARM)),
|
||||
deactivate_at = erlang:system_time(microsecond)
|
||||
}
|
||||
)
|
||||
end,
|
||||
ets:tab2list(?ACTIVATED_ALARM)
|
||||
),
|
||||
clear_table(?ACTIVATED_ALARM).
|
||||
|
||||
%% Delete all records from the given table, ignore result.
|
||||
|
|
@ -346,8 +388,14 @@ do_actions(deactivate, Alarm = #deactivated_alarm{name = Name}, [log | More]) ->
|
|||
do_actions(Operation, Alarm, [publish | More]) ->
|
||||
Topic = topic(Operation),
|
||||
{ok, Payload} = emqx_json:safe_encode(normalize(Alarm)),
|
||||
Message = emqx_message:make(?MODULE, 0, Topic, Payload, #{sys => true},
|
||||
#{properties => #{'Content-Type' => <<"application/json">>}}),
|
||||
Message = emqx_message:make(
|
||||
?MODULE,
|
||||
0,
|
||||
Topic,
|
||||
Payload,
|
||||
#{sys => true},
|
||||
#{properties => #{'Content-Type' => <<"application/json">>}}
|
||||
),
|
||||
_ = emqx_broker:safe_publish(Message),
|
||||
do_actions(Operation, Alarm, More).
|
||||
|
||||
|
|
@ -356,28 +404,37 @@ topic(activate) ->
|
|||
topic(deactivate) ->
|
||||
emqx_topic:systop(<<"alarms/deactivate">>).
|
||||
|
||||
normalize(#activated_alarm{name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
activate_at = ActivateAt}) ->
|
||||
#{name => Name,
|
||||
details => Details,
|
||||
message => Message,
|
||||
activate_at => ActivateAt,
|
||||
deactivate_at => infinity,
|
||||
activated => true};
|
||||
normalize(#deactivated_alarm{activate_at = ActivateAt,
|
||||
name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
deactivate_at = DeactivateAt}) ->
|
||||
#{name => Name,
|
||||
details => Details,
|
||||
message => Message,
|
||||
activate_at => ActivateAt,
|
||||
deactivate_at => DeactivateAt,
|
||||
activated => false}.
|
||||
normalize(#activated_alarm{
|
||||
name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
activate_at = ActivateAt
|
||||
}) ->
|
||||
#{
|
||||
name => Name,
|
||||
details => Details,
|
||||
message => Message,
|
||||
activate_at => ActivateAt,
|
||||
deactivate_at => infinity,
|
||||
activated => true
|
||||
};
|
||||
normalize(#deactivated_alarm{
|
||||
activate_at = ActivateAt,
|
||||
name = Name,
|
||||
details = Details,
|
||||
message = Message,
|
||||
deactivate_at = DeactivateAt
|
||||
}) ->
|
||||
#{
|
||||
name => Name,
|
||||
details => Details,
|
||||
message => Message,
|
||||
activate_at => ActivateAt,
|
||||
deactivate_at => DeactivateAt,
|
||||
activated => false
|
||||
}.
|
||||
|
||||
normalize_message(Name, <<"">>) ->
|
||||
list_to_binary(io_lib:format("~p", [Name]));
|
||||
normalize_message(_Name, Message) -> Message.
|
||||
normalize_message(_Name, Message) ->
|
||||
Message.
|
||||
|
|
|
|||
|
|
@ -22,18 +22,19 @@
|
|||
-include("logger.hrl").
|
||||
-include_lib("lc/include/lc.hrl").
|
||||
|
||||
|
||||
%% gen_event callbacks
|
||||
-export([ init/1
|
||||
, handle_event/2
|
||||
, handle_call/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_event/2,
|
||||
handle_call/2,
|
||||
handle_info/2,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
-export([ load/0
|
||||
, unload/0
|
||||
]).
|
||||
-export([
|
||||
load/0,
|
||||
unload/0
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
|
|
@ -52,43 +53,44 @@ unload() ->
|
|||
|
||||
init({_Args, {alarm_handler, _ExistingAlarms}}) ->
|
||||
{ok, []};
|
||||
|
||||
init(_) ->
|
||||
{ok, []}.
|
||||
|
||||
handle_event({set_alarm, {system_memory_high_watermark, []}}, State) ->
|
||||
HighWatermark = emqx_os_mon:get_sysmem_high_watermark(),
|
||||
Message = to_bin("System memory usage is higher than ~p%", [HighWatermark]),
|
||||
emqx_alarm:activate(high_system_memory_usage,
|
||||
#{high_watermark => HighWatermark}, Message),
|
||||
emqx_alarm:activate(
|
||||
high_system_memory_usage,
|
||||
#{high_watermark => HighWatermark},
|
||||
Message
|
||||
),
|
||||
{ok, State};
|
||||
|
||||
handle_event({set_alarm, {process_memory_high_watermark, Pid}}, State) ->
|
||||
HighWatermark = emqx_os_mon:get_procmem_high_watermark(),
|
||||
Message = to_bin("Process memory usage is higher than ~p%", [HighWatermark]),
|
||||
emqx_alarm:activate(high_process_memory_usage,
|
||||
#{pid => list_to_binary(pid_to_list(Pid)),
|
||||
high_watermark => HighWatermark}, Message),
|
||||
emqx_alarm:activate(
|
||||
high_process_memory_usage,
|
||||
#{
|
||||
pid => list_to_binary(pid_to_list(Pid)),
|
||||
high_watermark => HighWatermark
|
||||
},
|
||||
Message
|
||||
),
|
||||
{ok, State};
|
||||
|
||||
handle_event({clear_alarm, system_memory_high_watermark}, State) ->
|
||||
_ = emqx_alarm:deactivate(high_system_memory_usage),
|
||||
{ok, State};
|
||||
|
||||
handle_event({clear_alarm, process_memory_high_watermark}, State) ->
|
||||
_ = emqx_alarm:deactivate(high_process_memory_usage),
|
||||
{ok, State};
|
||||
|
||||
handle_event({set_alarm, {?LC_ALARM_ID_RUNQ, Info}}, State) ->
|
||||
#{node := Node, runq_length := Len} = Info,
|
||||
Message = to_bin("VM is overloaded on node: ~p: ~p", [Node, Len]),
|
||||
emqx_alarm:activate(runq_overload, Info, Message),
|
||||
{ok, State};
|
||||
|
||||
handle_event({clear_alarm, ?LC_ALARM_ID_RUNQ}, State) ->
|
||||
_ = emqx_alarm:deactivate(runq_overload),
|
||||
{ok, State};
|
||||
|
||||
handle_event(_, State) ->
|
||||
{ok, State}.
|
||||
|
||||
|
|
|
|||
|
|
@ -18,16 +18,17 @@
|
|||
|
||||
-behaviour(application).
|
||||
|
||||
-export([ start/2
|
||||
, prep_stop/1
|
||||
, stop/1
|
||||
, get_description/0
|
||||
, get_release/0
|
||||
, set_init_config_load_done/0
|
||||
, get_init_config_load_done/0
|
||||
, set_init_tnx_id/1
|
||||
, get_init_tnx_id/0
|
||||
]).
|
||||
-export([
|
||||
start/2,
|
||||
prep_stop/1,
|
||||
stop/1,
|
||||
get_description/0,
|
||||
get_release/0,
|
||||
set_init_config_load_done/0,
|
||||
get_init_config_load_done/0,
|
||||
set_init_tnx_id/1,
|
||||
get_init_tnx_id/0
|
||||
]).
|
||||
|
||||
-include("emqx.hrl").
|
||||
-include("logger.hrl").
|
||||
|
|
@ -54,8 +55,8 @@ start(_Type, _Args) ->
|
|||
prep_stop(_State) ->
|
||||
ok = emqx_alarm_handler:unload(),
|
||||
emqx_config:remove_handlers(),
|
||||
emqx_boot:is_enabled(listeners)
|
||||
andalso emqx_listeners:stop().
|
||||
emqx_boot:is_enabled(listeners) andalso
|
||||
emqx_listeners:stop().
|
||||
|
||||
stop(_State) -> ok.
|
||||
|
||||
|
|
@ -93,14 +94,19 @@ maybe_start_listeners() ->
|
|||
|
||||
maybe_start_quicer() ->
|
||||
case is_quicer_app_present() andalso is_quic_listener_configured() of
|
||||
true -> {ok, _} = application:ensure_all_started(quicer), ok;
|
||||
false -> ok
|
||||
true ->
|
||||
{ok, _} = application:ensure_all_started(quicer),
|
||||
ok;
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
|
||||
is_quicer_app_present() ->
|
||||
case application:load(quicer) of
|
||||
ok -> true;
|
||||
{error, {already_loaded, _}} -> true;
|
||||
ok ->
|
||||
true;
|
||||
{error, {already_loaded, _}} ->
|
||||
true;
|
||||
_ ->
|
||||
?SLOG(info, #{msg => "quicer_app_not_found"}),
|
||||
false
|
||||
|
|
|
|||
|
|
@ -31,65 +31,69 @@
|
|||
-define(CONF_ROOT, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM).
|
||||
|
||||
%% The authentication entrypoint.
|
||||
-export([ authenticate/2
|
||||
]).
|
||||
-export([authenticate/2]).
|
||||
|
||||
%% Authenticator manager process start/stop
|
||||
-export([ start_link/0
|
||||
, stop/0
|
||||
, get_providers/0
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
stop/0,
|
||||
get_providers/0
|
||||
]).
|
||||
|
||||
%% Authenticator management APIs
|
||||
-export([ initialize_authentication/2
|
||||
, register_provider/2
|
||||
, register_providers/1
|
||||
, deregister_provider/1
|
||||
, deregister_providers/1
|
||||
, create_chain/1
|
||||
, delete_chain/1
|
||||
, lookup_chain/1
|
||||
, list_chains/0
|
||||
, list_chain_names/0
|
||||
, create_authenticator/2
|
||||
, delete_authenticator/2
|
||||
, update_authenticator/3
|
||||
, lookup_authenticator/2
|
||||
, list_authenticators/1
|
||||
, move_authenticator/3
|
||||
]).
|
||||
-export([
|
||||
initialize_authentication/2,
|
||||
register_provider/2,
|
||||
register_providers/1,
|
||||
deregister_provider/1,
|
||||
deregister_providers/1,
|
||||
create_chain/1,
|
||||
delete_chain/1,
|
||||
lookup_chain/1,
|
||||
list_chains/0,
|
||||
list_chain_names/0,
|
||||
create_authenticator/2,
|
||||
delete_authenticator/2,
|
||||
update_authenticator/3,
|
||||
lookup_authenticator/2,
|
||||
list_authenticators/1,
|
||||
move_authenticator/3
|
||||
]).
|
||||
|
||||
%% APIs for observer built_in_database
|
||||
-export([ import_users/3
|
||||
, add_user/3
|
||||
, delete_user/3
|
||||
, update_user/4
|
||||
, lookup_user/3
|
||||
, list_users/3
|
||||
]).
|
||||
-export([
|
||||
import_users/3,
|
||||
add_user/3,
|
||||
delete_user/3,
|
||||
update_user/4,
|
||||
lookup_user/3,
|
||||
list_users/3
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
%% utility functions
|
||||
-export([ authenticator_id/1
|
||||
]).
|
||||
-export([authenticator_id/1]).
|
||||
|
||||
%% proxy callback
|
||||
-export([ pre_config_update/3
|
||||
, post_config_update/5
|
||||
]).
|
||||
-export([
|
||||
pre_config_update/3,
|
||||
post_config_update/5
|
||||
]).
|
||||
|
||||
-export_type([ authenticator_id/0
|
||||
, position/0
|
||||
, chain_name/0
|
||||
]).
|
||||
-export_type([
|
||||
authenticator_id/0,
|
||||
position/0,
|
||||
chain_name/0
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
|
|
@ -104,85 +108,108 @@
|
|||
-type authn_type() :: atom() | {atom(), atom()}.
|
||||
-type provider() :: module().
|
||||
|
||||
-type chain() :: #{name := chain_name(),
|
||||
authenticators := [authenticator()]}.
|
||||
-type chain() :: #{
|
||||
name := chain_name(),
|
||||
authenticators := [authenticator()]
|
||||
}.
|
||||
|
||||
-type authenticator() :: #{id := authenticator_id(),
|
||||
provider := provider(),
|
||||
enable := boolean(),
|
||||
state := map()}.
|
||||
-type authenticator() :: #{
|
||||
id := authenticator_id(),
|
||||
provider := provider(),
|
||||
enable := boolean(),
|
||||
state := map()
|
||||
}.
|
||||
|
||||
-type config() :: emqx_authentication_config:config().
|
||||
-type state() :: #{atom() => term()}.
|
||||
-type extra() :: #{is_superuser := boolean(),
|
||||
atom() => term()}.
|
||||
-type user_info() :: #{user_id := binary(),
|
||||
atom() => term()}.
|
||||
-type extra() :: #{
|
||||
is_superuser := boolean(),
|
||||
atom() => term()
|
||||
}.
|
||||
-type user_info() :: #{
|
||||
user_id := binary(),
|
||||
atom() => term()
|
||||
}.
|
||||
|
||||
%% @doc check_config takes raw config from config file,
|
||||
%% parse and validate it, and return parsed result.
|
||||
-callback check_config(config()) -> config().
|
||||
|
||||
-callback create(AuthenticatorID, Config)
|
||||
-> {ok, State}
|
||||
| {error, term()}
|
||||
when AuthenticatorID::authenticator_id(), Config::config(), State::state().
|
||||
-callback create(AuthenticatorID, Config) ->
|
||||
{ok, State}
|
||||
| {error, term()}
|
||||
when
|
||||
AuthenticatorID :: authenticator_id(), Config :: config(), State :: state().
|
||||
|
||||
-callback update(Config, State)
|
||||
-> {ok, NewState}
|
||||
| {error, term()}
|
||||
when Config::config(), State::state(), NewState::state().
|
||||
-callback update(Config, State) ->
|
||||
{ok, NewState}
|
||||
| {error, term()}
|
||||
when
|
||||
Config :: config(), State :: state(), NewState :: state().
|
||||
|
||||
-callback authenticate(Credential, State)
|
||||
-> ignore
|
||||
| {ok, Extra}
|
||||
| {ok, Extra, AuthData}
|
||||
| {continue, AuthCache}
|
||||
| {continue, AuthData, AuthCache}
|
||||
| {error, term()}
|
||||
when Credential::map(), State::state(), Extra::extra(), AuthData::binary(), AuthCache::map().
|
||||
-callback authenticate(Credential, State) ->
|
||||
ignore
|
||||
| {ok, Extra}
|
||||
| {ok, Extra, AuthData}
|
||||
| {continue, AuthCache}
|
||||
| {continue, AuthData, AuthCache}
|
||||
| {error, term()}
|
||||
when
|
||||
Credential :: map(),
|
||||
State :: state(),
|
||||
Extra :: extra(),
|
||||
AuthData :: binary(),
|
||||
AuthCache :: map().
|
||||
|
||||
-callback destroy(State)
|
||||
-> ok
|
||||
when State::state().
|
||||
-callback destroy(State) ->
|
||||
ok
|
||||
when
|
||||
State :: state().
|
||||
|
||||
-callback import_users(Filename, State)
|
||||
-> ok
|
||||
| {error, term()}
|
||||
when Filename::binary(), State::state().
|
||||
-callback import_users(Filename, State) ->
|
||||
ok
|
||||
| {error, term()}
|
||||
when
|
||||
Filename :: binary(), State :: state().
|
||||
|
||||
-callback add_user(UserInfo, State)
|
||||
-> {ok, User}
|
||||
| {error, term()}
|
||||
when UserInfo::user_info(), State::state(), User::user_info().
|
||||
-callback add_user(UserInfo, State) ->
|
||||
{ok, User}
|
||||
| {error, term()}
|
||||
when
|
||||
UserInfo :: user_info(), State :: state(), User :: user_info().
|
||||
|
||||
-callback delete_user(UserID, State)
|
||||
-> ok
|
||||
| {error, term()}
|
||||
when UserID::binary(), State::state().
|
||||
-callback delete_user(UserID, State) ->
|
||||
ok
|
||||
| {error, term()}
|
||||
when
|
||||
UserID :: binary(), State :: state().
|
||||
|
||||
-callback update_user(UserID, UserInfo, State)
|
||||
-> {ok, User}
|
||||
| {error, term()}
|
||||
when UserID::binary(), UserInfo::map(), State::state(), User::user_info().
|
||||
-callback update_user(UserID, UserInfo, State) ->
|
||||
{ok, User}
|
||||
| {error, term()}
|
||||
when
|
||||
UserID :: binary(), UserInfo :: map(), State :: state(), User :: user_info().
|
||||
|
||||
-callback lookup_user(UserID, UserInfo, State)
|
||||
-> {ok, User}
|
||||
| {error, term()}
|
||||
when UserID::binary(), UserInfo::map(), State::state(), User::user_info().
|
||||
-callback lookup_user(UserID, UserInfo, State) ->
|
||||
{ok, User}
|
||||
| {error, term()}
|
||||
when
|
||||
UserID :: binary(), UserInfo :: map(), State :: state(), User :: user_info().
|
||||
|
||||
-callback list_users(State)
|
||||
-> {ok, Users}
|
||||
when State::state(), Users::[user_info()].
|
||||
-callback list_users(State) ->
|
||||
{ok, Users}
|
||||
when
|
||||
State :: state(), Users :: [user_info()].
|
||||
|
||||
-optional_callbacks([ import_users/2
|
||||
, add_user/2
|
||||
, delete_user/2
|
||||
, update_user/3
|
||||
, lookup_user/3
|
||||
, list_users/1
|
||||
, check_config/1
|
||||
]).
|
||||
-optional_callbacks([
|
||||
import_users/2,
|
||||
add_user/2,
|
||||
delete_user/2,
|
||||
update_user/3,
|
||||
lookup_user/3,
|
||||
list_users/1,
|
||||
check_config/1
|
||||
]).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Authenticate
|
||||
|
|
@ -235,22 +262,26 @@ authenticator_id(Config) ->
|
|||
|
||||
%% @doc Call this API to initialize authenticators implemented in another APP.
|
||||
-spec initialize_authentication(chain_name(), [config()]) -> ok.
|
||||
initialize_authentication(_, []) -> ok;
|
||||
initialize_authentication(_, []) ->
|
||||
ok;
|
||||
initialize_authentication(ChainName, AuthenticatorsConfig) ->
|
||||
_ = create_chain(ChainName),
|
||||
CheckedConfig = to_list(AuthenticatorsConfig),
|
||||
lists:foreach(fun(AuthenticatorConfig) ->
|
||||
case create_authenticator(ChainName, AuthenticatorConfig) of
|
||||
{ok, _} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_create_authenticator",
|
||||
authenticator => authenticator_id(AuthenticatorConfig),
|
||||
reason => Reason
|
||||
})
|
||||
end
|
||||
end, CheckedConfig).
|
||||
lists:foreach(
|
||||
fun(AuthenticatorConfig) ->
|
||||
case create_authenticator(ChainName, AuthenticatorConfig) of
|
||||
{ok, _} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_create_authenticator",
|
||||
authenticator => authenticator_id(AuthenticatorConfig),
|
||||
reason => Reason
|
||||
})
|
||||
end
|
||||
end,
|
||||
CheckedConfig
|
||||
).
|
||||
|
||||
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
|
||||
start_link() ->
|
||||
|
|
@ -392,32 +423,38 @@ init(_Opts) ->
|
|||
|
||||
handle_call(get_providers, _From, #{providers := Providers} = State) ->
|
||||
reply(Providers, State);
|
||||
handle_call({register_providers, Providers}, _From,
|
||||
#{providers := Reg0} = State) ->
|
||||
handle_call(
|
||||
{register_providers, Providers},
|
||||
_From,
|
||||
#{providers := Reg0} = State
|
||||
) ->
|
||||
case lists:filter(fun({T, _}) -> maps:is_key(T, Reg0) end, Providers) of
|
||||
[] ->
|
||||
Reg = lists:foldl(fun({AuthNType, Module}, Pin) ->
|
||||
Pin#{AuthNType => Module}
|
||||
end, Reg0, Providers),
|
||||
Reg = lists:foldl(
|
||||
fun({AuthNType, Module}, Pin) ->
|
||||
Pin#{AuthNType => Module}
|
||||
end,
|
||||
Reg0,
|
||||
Providers
|
||||
),
|
||||
reply(ok, State#{providers := Reg});
|
||||
Clashes ->
|
||||
reply({error, {authentication_type_clash, Clashes}}, State)
|
||||
end;
|
||||
|
||||
handle_call({deregister_providers, AuthNTypes}, _From, #{providers := Providers} = State) ->
|
||||
reply(ok, State#{providers := maps:without(AuthNTypes, Providers)});
|
||||
|
||||
handle_call({create_chain, Name}, _From, State) ->
|
||||
case ets:member(?CHAINS_TAB, Name) of
|
||||
true ->
|
||||
reply({error, {already_exists, {chain, Name}}}, State);
|
||||
false ->
|
||||
Chain = #chain{name = Name,
|
||||
authenticators = []},
|
||||
Chain = #chain{
|
||||
name = Name,
|
||||
authenticators = []
|
||||
},
|
||||
true = ets:insert(?CHAINS_TAB, Chain),
|
||||
reply({ok, serialize_chain(Chain)}, State)
|
||||
end;
|
||||
|
||||
handle_call({delete_chain, Name}, _From, State) ->
|
||||
case ets:lookup(?CHAINS_TAB, Name) of
|
||||
[] ->
|
||||
|
|
@ -427,59 +464,48 @@ handle_call({delete_chain, Name}, _From, State) ->
|
|||
true = ets:delete(?CHAINS_TAB, Name),
|
||||
reply(ok, maybe_unhook(State))
|
||||
end;
|
||||
|
||||
handle_call({create_authenticator, ChainName, Config}, _From, #{providers := Providers} = State) ->
|
||||
UpdateFun = fun(Chain) ->
|
||||
handle_create_authenticator(Chain, Config, Providers)
|
||||
end,
|
||||
handle_create_authenticator(Chain, Config, Providers)
|
||||
end,
|
||||
Reply = update_chain(ChainName, UpdateFun),
|
||||
reply(Reply, maybe_hook(State));
|
||||
|
||||
handle_call({delete_authenticator, ChainName, AuthenticatorID}, _From, State) ->
|
||||
UpdateFun = fun(Chain) ->
|
||||
handle_delete_authenticator(Chain, AuthenticatorID)
|
||||
end,
|
||||
handle_delete_authenticator(Chain, AuthenticatorID)
|
||||
end,
|
||||
Reply = update_chain(ChainName, UpdateFun),
|
||||
reply(Reply, maybe_unhook(State));
|
||||
|
||||
handle_call({update_authenticator, ChainName, AuthenticatorID, Config}, _From, State) ->
|
||||
UpdateFun = fun(Chain) ->
|
||||
handle_update_authenticator(Chain, AuthenticatorID, Config)
|
||||
end,
|
||||
handle_update_authenticator(Chain, AuthenticatorID, Config)
|
||||
end,
|
||||
Reply = update_chain(ChainName, UpdateFun),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({move_authenticator, ChainName, AuthenticatorID, Position}, _From, State) ->
|
||||
UpdateFun = fun(Chain) ->
|
||||
handle_move_authenticator(Chain, AuthenticatorID, Position)
|
||||
end,
|
||||
handle_move_authenticator(Chain, AuthenticatorID, Position)
|
||||
end,
|
||||
Reply = update_chain(ChainName, UpdateFun),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({import_users, ChainName, AuthenticatorID, Filename}, _From, State) ->
|
||||
Reply = call_authenticator(ChainName, AuthenticatorID, import_users, [Filename]),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({add_user, ChainName, AuthenticatorID, UserInfo}, _From, State) ->
|
||||
Reply = call_authenticator(ChainName, AuthenticatorID, add_user, [UserInfo]),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({delete_user, ChainName, AuthenticatorID, UserID}, _From, State) ->
|
||||
Reply = call_authenticator(ChainName, AuthenticatorID, delete_user, [UserID]),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({update_user, ChainName, AuthenticatorID, UserID, NewUserInfo}, _From, State) ->
|
||||
Reply = call_authenticator(ChainName, AuthenticatorID, update_user, [UserID, NewUserInfo]),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({lookup_user, ChainName, AuthenticatorID, UserID}, _From, State) ->
|
||||
Reply = call_authenticator(ChainName, AuthenticatorID, lookup_user, [UserID]),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call({list_users, ChainName, AuthenticatorID, FuzzyParams}, _From, State) ->
|
||||
Reply = call_authenticator(ChainName, AuthenticatorID, list_users, [FuzzyParams]),
|
||||
reply(Reply, State);
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -494,10 +520,15 @@ handle_info(Info, State) ->
|
|||
|
||||
terminate(Reason, _State) ->
|
||||
case Reason of
|
||||
normal -> ok;
|
||||
{shutdown, _} -> ok;
|
||||
Other -> ?SLOG(error, #{msg => "emqx_authentication_terminating",
|
||||
reason => Other})
|
||||
normal ->
|
||||
ok;
|
||||
{shutdown, _} ->
|
||||
ok;
|
||||
Other ->
|
||||
?SLOG(error, #{
|
||||
msg => "emqx_authentication_terminating",
|
||||
reason => Other
|
||||
})
|
||||
end,
|
||||
emqx_config_handler:remove_handler([?CONF_ROOT]),
|
||||
emqx_config_handler:remove_handler([listeners, '?', '?', ?CONF_ROOT]),
|
||||
|
|
@ -521,15 +552,18 @@ handle_update_authenticator(Chain, AuthenticatorID, Config) ->
|
|||
case Provider:update(Config, ST) of
|
||||
{ok, NewST} ->
|
||||
NewAuthenticator = Authenticator#authenticator{
|
||||
state = NewST,
|
||||
enable = maps:get(enable, Config)},
|
||||
state = NewST,
|
||||
enable = maps:get(enable, Config)
|
||||
},
|
||||
NewAuthenticators = replace_authenticator(
|
||||
AuthenticatorID,
|
||||
NewAuthenticator,
|
||||
Authenticators),
|
||||
AuthenticatorID,
|
||||
NewAuthenticator,
|
||||
Authenticators
|
||||
),
|
||||
true = ets:insert(
|
||||
?CHAINS_TAB,
|
||||
Chain#chain{authenticators = NewAuthenticators}),
|
||||
?CHAINS_TAB,
|
||||
Chain#chain{authenticators = NewAuthenticators}
|
||||
),
|
||||
{ok, serialize_authenticator(NewAuthenticator)};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
|
|
@ -541,8 +575,8 @@ handle_update_authenticator(Chain, AuthenticatorID, Config) ->
|
|||
|
||||
handle_delete_authenticator(Chain, AuthenticatorID) ->
|
||||
MatchFun = fun(#authenticator{id = ID}) ->
|
||||
ID =:= AuthenticatorID
|
||||
end,
|
||||
ID =:= AuthenticatorID
|
||||
end,
|
||||
case do_delete_authenticators(MatchFun, Chain) of
|
||||
[] -> {error, {not_found, {authenticator, AuthenticatorID}}};
|
||||
[AuthenticatorID] -> ok
|
||||
|
|
@ -569,9 +603,11 @@ handle_create_authenticator(Chain, Config, Providers) ->
|
|||
{ok, Authenticator} ->
|
||||
NAuthenticators =
|
||||
Authenticators ++
|
||||
[Authenticator#authenticator{enable = maps:get(enable, Config)}],
|
||||
true = ets:insert(?CHAINS_TAB,
|
||||
Chain#chain{authenticators = NAuthenticators}),
|
||||
[Authenticator#authenticator{enable = maps:get(enable, Config)}],
|
||||
true = ets:insert(
|
||||
?CHAINS_TAB,
|
||||
Chain#chain{authenticators = NAuthenticators}
|
||||
),
|
||||
{ok, serialize_authenticator(Authenticator)};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
|
|
@ -593,23 +629,28 @@ do_authenticate([#authenticator{id = ID, provider = Provider, state = State} | M
|
|||
{stop, Result}
|
||||
catch
|
||||
Class:Reason:Stacktrace ->
|
||||
?SLOG(warning, #{msg => "unexpected_error_in_authentication",
|
||||
exception => Class,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace,
|
||||
authenticator => ID}),
|
||||
?SLOG(warning, #{
|
||||
msg => "unexpected_error_in_authentication",
|
||||
exception => Class,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace,
|
||||
authenticator => ID
|
||||
}),
|
||||
do_authenticate(More, Credential)
|
||||
end.
|
||||
|
||||
|
||||
reply(Reply, State) ->
|
||||
{reply, Reply, State}.
|
||||
|
||||
create_chain_table() ->
|
||||
try
|
||||
_ = ets:new(?CHAINS_TAB, [named_table, set, public,
|
||||
{keypos, #chain.name},
|
||||
{read_concurrency, true}]),
|
||||
_ = ets:new(?CHAINS_TAB, [
|
||||
named_table,
|
||||
set,
|
||||
public,
|
||||
{keypos, #chain.name},
|
||||
{read_concurrency, true}
|
||||
]),
|
||||
ok
|
||||
catch
|
||||
error:badarg -> ok
|
||||
|
|
@ -629,9 +670,15 @@ global_chain(_) ->
|
|||
'unknown:global'.
|
||||
|
||||
maybe_hook(#{hooked := false} = State) ->
|
||||
case lists:any(fun(#chain{authenticators = []}) -> false;
|
||||
(_) -> true
|
||||
end, ets:tab2list(?CHAINS_TAB)) of
|
||||
case
|
||||
lists:any(
|
||||
fun
|
||||
(#chain{authenticators = []}) -> false;
|
||||
(_) -> true
|
||||
end,
|
||||
ets:tab2list(?CHAINS_TAB)
|
||||
)
|
||||
of
|
||||
true ->
|
||||
_ = emqx:hook('client.authenticate', {?MODULE, authenticate, []}),
|
||||
State#{hooked => true};
|
||||
|
|
@ -642,9 +689,15 @@ maybe_hook(State) ->
|
|||
State.
|
||||
|
||||
maybe_unhook(#{hooked := true} = State) ->
|
||||
case lists:all(fun(#chain{authenticators = []}) -> true;
|
||||
(_) -> false
|
||||
end, ets:tab2list(?CHAINS_TAB)) of
|
||||
case
|
||||
lists:all(
|
||||
fun
|
||||
(#chain{authenticators = []}) -> true;
|
||||
(_) -> false
|
||||
end,
|
||||
ets:tab2list(?CHAINS_TAB)
|
||||
)
|
||||
of
|
||||
true ->
|
||||
_ = emqx:unhook('client.authenticate', {?MODULE, authenticate, []}),
|
||||
State#{hooked => false};
|
||||
|
|
@ -661,10 +714,12 @@ do_create_authenticator(AuthenticatorID, #{enable := Enable} = Config, Providers
|
|||
Provider ->
|
||||
case Provider:create(AuthenticatorID, Config) of
|
||||
{ok, State} ->
|
||||
Authenticator = #authenticator{id = AuthenticatorID,
|
||||
provider = Provider,
|
||||
enable = Enable,
|
||||
state = State},
|
||||
Authenticator = #authenticator{
|
||||
id = AuthenticatorID,
|
||||
provider = Provider,
|
||||
enable = Enable,
|
||||
state = State
|
||||
},
|
||||
{ok, Authenticator};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
|
|
@ -675,8 +730,9 @@ do_delete_authenticators(MatchFun, #chain{authenticators = Authenticators} = Cha
|
|||
{Matching, Others} = lists:partition(MatchFun, Authenticators),
|
||||
|
||||
MatchingIDs = lists:map(
|
||||
fun(#authenticator{id = ID}) -> ID end,
|
||||
Matching),
|
||||
fun(#authenticator{id = ID}) -> ID end,
|
||||
Matching
|
||||
),
|
||||
|
||||
ok = lists:foreach(fun do_destroy_authenticator/1, Matching),
|
||||
true = ets:insert(?CHAINS_TAB, Chain#chain{authenticators = Others}),
|
||||
|
|
@ -708,8 +764,12 @@ do_move_authenticator(ID, Authenticators, Position) ->
|
|||
|
||||
insert(_, [], {_, RelatedID}, _) ->
|
||||
{error, {not_found, {authenticator, RelatedID}}};
|
||||
insert(Authenticator, [#authenticator{id = RelatedID} = Related | Rest],
|
||||
{Relative, RelatedID}, Acc) ->
|
||||
insert(
|
||||
Authenticator,
|
||||
[#authenticator{id = RelatedID} = Related | Rest],
|
||||
{Relative, RelatedID},
|
||||
Acc
|
||||
) ->
|
||||
case Relative of
|
||||
before ->
|
||||
{ok, lists:reverse(Acc) ++ [Authenticator, Related | Rest]};
|
||||
|
|
@ -744,24 +804,30 @@ call_authenticator(ChainName, AuthenticatorID, Func, Args) ->
|
|||
end,
|
||||
update_chain(ChainName, UpdateFun).
|
||||
|
||||
serialize_chain(#chain{name = Name,
|
||||
authenticators = Authenticators}) ->
|
||||
#{ name => Name
|
||||
, authenticators => serialize_authenticators(Authenticators)
|
||||
}.
|
||||
serialize_chain(#chain{
|
||||
name = Name,
|
||||
authenticators = Authenticators
|
||||
}) ->
|
||||
#{
|
||||
name => Name,
|
||||
authenticators => serialize_authenticators(Authenticators)
|
||||
}.
|
||||
|
||||
serialize_authenticators(Authenticators) ->
|
||||
[serialize_authenticator(Authenticator) || Authenticator <- Authenticators].
|
||||
|
||||
serialize_authenticator(#authenticator{id = ID,
|
||||
provider = Provider,
|
||||
enable = Enable,
|
||||
state = State}) ->
|
||||
#{ id => ID
|
||||
, provider => Provider
|
||||
, enable => Enable
|
||||
, state => State
|
||||
}.
|
||||
serialize_authenticator(#authenticator{
|
||||
id = ID,
|
||||
provider = Provider,
|
||||
enable = Enable,
|
||||
state = State
|
||||
}) ->
|
||||
#{
|
||||
id => ID,
|
||||
provider => Provider,
|
||||
enable => Enable,
|
||||
state => State
|
||||
}.
|
||||
|
||||
authn_type(#{mechanism := Mechanism, backend := Backend}) ->
|
||||
{Mechanism, Backend};
|
||||
|
|
|
|||
|
|
@ -19,13 +19,15 @@
|
|||
|
||||
-behaviour(emqx_config_handler).
|
||||
|
||||
-export([ pre_config_update/3
|
||||
, post_config_update/5
|
||||
]).
|
||||
-export([
|
||||
pre_config_update/3,
|
||||
post_config_update/5
|
||||
]).
|
||||
|
||||
-export([ authenticator_id/1
|
||||
, authn_type/1
|
||||
]).
|
||||
-export([
|
||||
authenticator_id/1,
|
||||
authn_type/1
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([convert_certs/2, convert_certs/3, clear_certs/2]).
|
||||
|
|
@ -36,32 +38,35 @@
|
|||
-include("logger.hrl").
|
||||
-include("emqx_authentication.hrl").
|
||||
|
||||
-type parsed_config() :: #{mechanism := atom(),
|
||||
backend => atom(),
|
||||
atom() => term()}.
|
||||
-type parsed_config() :: #{
|
||||
mechanism := atom(),
|
||||
backend => atom(),
|
||||
atom() => term()
|
||||
}.
|
||||
-type raw_config() :: #{binary() => term()}.
|
||||
-type config() :: parsed_config() | raw_config().
|
||||
|
||||
-type authenticator_id() :: emqx_authentication:authenticator_id().
|
||||
-type position() :: emqx_authentication:position().
|
||||
-type chain_name() :: emqx_authentication:chain_name().
|
||||
-type update_request() :: {create_authenticator, chain_name(), map()}
|
||||
| {delete_authenticator, chain_name(), authenticator_id()}
|
||||
| {update_authenticator, chain_name(), authenticator_id(), map()}
|
||||
| {move_authenticator, chain_name(), authenticator_id(), position()}.
|
||||
-type update_request() ::
|
||||
{create_authenticator, chain_name(), map()}
|
||||
| {delete_authenticator, chain_name(), authenticator_id()}
|
||||
| {update_authenticator, chain_name(), authenticator_id(), map()}
|
||||
| {move_authenticator, chain_name(), authenticator_id(), position()}.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Callbacks of config handler
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
-spec pre_config_update(list(atom()), update_request(), emqx_config:raw_config())
|
||||
-> {ok, map() | list()} | {error, term()}.
|
||||
-spec pre_config_update(list(atom()), update_request(), emqx_config:raw_config()) ->
|
||||
{ok, map() | list()} | {error, term()}.
|
||||
pre_config_update(_, UpdateReq, OldConfig) ->
|
||||
try do_pre_config_update(UpdateReq, to_list(OldConfig)) of
|
||||
{error, Reason} -> {error, Reason};
|
||||
{ok, NewConfig} -> {ok, return_map(NewConfig)}
|
||||
catch
|
||||
throw : Reason ->
|
||||
throw:Reason ->
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
|
|
@ -70,23 +75,29 @@ do_pre_config_update({create_authenticator, ChainName, Config}, OldConfig) ->
|
|||
NConfig = convert_certs(CertsDir, Config),
|
||||
{ok, OldConfig ++ [NConfig]};
|
||||
do_pre_config_update({delete_authenticator, _ChainName, AuthenticatorID}, OldConfig) ->
|
||||
NewConfig = lists:filter(fun(OldConfig0) ->
|
||||
AuthenticatorID =/= authenticator_id(OldConfig0)
|
||||
end, OldConfig),
|
||||
NewConfig = lists:filter(
|
||||
fun(OldConfig0) ->
|
||||
AuthenticatorID =/= authenticator_id(OldConfig0)
|
||||
end,
|
||||
OldConfig
|
||||
),
|
||||
{ok, NewConfig};
|
||||
do_pre_config_update({update_authenticator, ChainName, AuthenticatorID, Config}, OldConfig) ->
|
||||
CertsDir = certs_dir(ChainName, AuthenticatorID),
|
||||
NewConfig = lists:map(
|
||||
fun(OldConfig0) ->
|
||||
case AuthenticatorID =:= authenticator_id(OldConfig0) of
|
||||
true -> convert_certs(CertsDir, Config, OldConfig0);
|
||||
false -> OldConfig0
|
||||
end
|
||||
end, OldConfig),
|
||||
fun(OldConfig0) ->
|
||||
case AuthenticatorID =:= authenticator_id(OldConfig0) of
|
||||
true -> convert_certs(CertsDir, Config, OldConfig0);
|
||||
false -> OldConfig0
|
||||
end
|
||||
end,
|
||||
OldConfig
|
||||
),
|
||||
{ok, NewConfig};
|
||||
do_pre_config_update({move_authenticator, _ChainName, AuthenticatorID, Position}, OldConfig) ->
|
||||
case split_by_id(AuthenticatorID, OldConfig) of
|
||||
{error, Reason} -> {error, Reason};
|
||||
{error, Reason} ->
|
||||
{error, Reason};
|
||||
{ok, BeforeFound, [Found | AfterFound]} ->
|
||||
case Position of
|
||||
?CMD_MOVE_FRONT ->
|
||||
|
|
@ -110,13 +121,14 @@ do_pre_config_update({move_authenticator, _ChainName, AuthenticatorID, Position}
|
|||
end
|
||||
end.
|
||||
|
||||
-spec post_config_update(list(atom()),
|
||||
update_request(),
|
||||
map() | list(),
|
||||
emqx_config:raw_config(),
|
||||
emqx_config:app_envs()
|
||||
)
|
||||
-> ok | {ok, map()} | {error, term()}.
|
||||
-spec post_config_update(
|
||||
list(atom()),
|
||||
update_request(),
|
||||
map() | list(),
|
||||
emqx_config:raw_config(),
|
||||
emqx_config:app_envs()
|
||||
) ->
|
||||
ok | {ok, map()} | {error, term()}.
|
||||
post_config_update(_, UpdateReq, NewConfig, OldConfig, AppEnvs) ->
|
||||
do_post_config_update(UpdateReq, check_configs(to_list(NewConfig)), OldConfig, AppEnvs).
|
||||
|
||||
|
|
@ -124,8 +136,12 @@ do_post_config_update({create_authenticator, ChainName, Config}, NewConfig, _Old
|
|||
NConfig = get_authenticator_config(authenticator_id(Config), NewConfig),
|
||||
_ = emqx_authentication:create_chain(ChainName),
|
||||
emqx_authentication:create_authenticator(ChainName, NConfig);
|
||||
do_post_config_update({delete_authenticator, ChainName, AuthenticatorID},
|
||||
_NewConfig, OldConfig, _AppEnvs) ->
|
||||
do_post_config_update(
|
||||
{delete_authenticator, ChainName, AuthenticatorID},
|
||||
_NewConfig,
|
||||
OldConfig,
|
||||
_AppEnvs
|
||||
) ->
|
||||
case emqx_authentication:delete_authenticator(ChainName, AuthenticatorID) of
|
||||
ok ->
|
||||
Config = get_authenticator_config(AuthenticatorID, to_list(OldConfig)),
|
||||
|
|
@ -134,16 +150,24 @@ do_post_config_update({delete_authenticator, ChainName, AuthenticatorID},
|
|||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end;
|
||||
do_post_config_update({update_authenticator, ChainName, AuthenticatorID, Config},
|
||||
NewConfig, _OldConfig, _AppEnvs) ->
|
||||
do_post_config_update(
|
||||
{update_authenticator, ChainName, AuthenticatorID, Config},
|
||||
NewConfig,
|
||||
_OldConfig,
|
||||
_AppEnvs
|
||||
) ->
|
||||
case get_authenticator_config(authenticator_id(Config), NewConfig) of
|
||||
{error, not_found} ->
|
||||
{error, {not_found, {authenticator, AuthenticatorID}}};
|
||||
NConfig ->
|
||||
emqx_authentication:update_authenticator(ChainName, AuthenticatorID, NConfig)
|
||||
end;
|
||||
do_post_config_update({move_authenticator, ChainName, AuthenticatorID, Position},
|
||||
_NewConfig, _OldConfig, _AppEnvs) ->
|
||||
do_post_config_update(
|
||||
{move_authenticator, ChainName, AuthenticatorID, Position},
|
||||
_NewConfig,
|
||||
_OldConfig,
|
||||
_AppEnvs
|
||||
) ->
|
||||
emqx_authentication:move_authenticator(ChainName, AuthenticatorID, Position).
|
||||
|
||||
check_configs(Configs) ->
|
||||
|
|
@ -154,38 +178,45 @@ do_check_config(Config, Providers) ->
|
|||
Type = authn_type(Config),
|
||||
case maps:get(Type, Providers, false) of
|
||||
false ->
|
||||
?SLOG(warning, #{msg => "unknown_authn_type",
|
||||
type => Type,
|
||||
providers => Providers}),
|
||||
?SLOG(warning, #{
|
||||
msg => "unknown_authn_type",
|
||||
type => Type,
|
||||
providers => Providers
|
||||
}),
|
||||
throw({unknown_authn_type, Type});
|
||||
Module ->
|
||||
do_check_config(Type, Config, Module)
|
||||
end.
|
||||
|
||||
do_check_config(Type, Config, Module) ->
|
||||
F = case erlang:function_exported(Module, check_config, 1) of
|
||||
F =
|
||||
case erlang:function_exported(Module, check_config, 1) of
|
||||
true ->
|
||||
fun Module:check_config/1;
|
||||
false ->
|
||||
fun(C) ->
|
||||
Key = list_to_binary(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME),
|
||||
AtomKey = list_to_atom(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME),
|
||||
R = hocon_tconf:check_plain(Module, #{Key => C},
|
||||
#{atom_key => true}),
|
||||
maps:get(AtomKey, R)
|
||||
Key = list_to_binary(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME),
|
||||
AtomKey = list_to_atom(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME),
|
||||
R = hocon_tconf:check_plain(
|
||||
Module,
|
||||
#{Key => C},
|
||||
#{atom_key => true}
|
||||
),
|
||||
maps:get(AtomKey, R)
|
||||
end
|
||||
end,
|
||||
try
|
||||
F(Config)
|
||||
catch
|
||||
C : E : S ->
|
||||
?SLOG(warning, #{msg => "failed_to_check_config",
|
||||
config => Config,
|
||||
type => Type,
|
||||
exception => C,
|
||||
reason => E,
|
||||
stacktrace => S
|
||||
}),
|
||||
C:E:S ->
|
||||
?SLOG(warning, #{
|
||||
msg => "failed_to_check_config",
|
||||
config => Config,
|
||||
type => Type,
|
||||
exception => C,
|
||||
reason => E,
|
||||
stacktrace => S
|
||||
}),
|
||||
throw({bad_authenticator_config, #{type => Type, reason => E}})
|
||||
end.
|
||||
|
||||
|
|
@ -232,17 +263,23 @@ get_authenticator_config(AuthenticatorID, AuthenticatorsConfig) ->
|
|||
end.
|
||||
|
||||
split_by_id(ID, AuthenticatorsConfig) ->
|
||||
case lists:foldl(
|
||||
fun(C, {P1, P2, F0}) ->
|
||||
F = case ID =:= authenticator_id(C) of
|
||||
true -> true;
|
||||
false -> F0
|
||||
end,
|
||||
case F of
|
||||
false -> {[C | P1], P2, F};
|
||||
true -> {P1, [C | P2], F}
|
||||
end
|
||||
end, {[], [], false}, AuthenticatorsConfig) of
|
||||
case
|
||||
lists:foldl(
|
||||
fun(C, {P1, P2, F0}) ->
|
||||
F =
|
||||
case ID =:= authenticator_id(C) of
|
||||
true -> true;
|
||||
false -> F0
|
||||
end,
|
||||
case F of
|
||||
false -> {[C | P1], P2, F};
|
||||
true -> {P1, [C | P2], F}
|
||||
end
|
||||
end,
|
||||
{[], [], false},
|
||||
AuthenticatorsConfig
|
||||
)
|
||||
of
|
||||
{_, _, false} ->
|
||||
{error, {not_found, {authenticator, ID}}};
|
||||
{Part1, Part2, true} ->
|
||||
|
|
@ -273,7 +310,7 @@ authenticator_id(_C) ->
|
|||
throw({missing_parameter, #{name => mechanism}}).
|
||||
|
||||
%% @doc Make the authentication type.
|
||||
authn_type(#{mechanism := M, backend := B}) -> {atom(M), atom(B)};
|
||||
authn_type(#{mechanism := M, backend := B}) -> {atom(M), atom(B)};
|
||||
authn_type(#{mechanism := M}) -> atom(M);
|
||||
authn_type(#{<<"mechanism">> := M, <<"backend">> := B}) -> {atom(M), atom(B)};
|
||||
authn_type(#{<<"mechanism">> := M}) -> atom(M).
|
||||
|
|
|
|||
|
|
@ -34,15 +34,19 @@ start_link() ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
init([]) ->
|
||||
SupFlags = #{strategy => one_for_one,
|
||||
intensity => 100,
|
||||
period => 10},
|
||||
SupFlags = #{
|
||||
strategy => one_for_one,
|
||||
intensity => 100,
|
||||
period => 10
|
||||
},
|
||||
|
||||
AuthN = #{id => emqx_authentication,
|
||||
start => {emqx_authentication, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 1000,
|
||||
type => worker,
|
||||
modules => [emqx_authentication]},
|
||||
AuthN = #{
|
||||
id => emqx_authentication,
|
||||
start => {emqx_authentication, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 1000,
|
||||
type => worker,
|
||||
modules => [emqx_authentication]
|
||||
},
|
||||
|
||||
{ok, {SupFlags, [AuthN]}}.
|
||||
|
|
|
|||
|
|
@ -18,52 +18,54 @@
|
|||
|
||||
-include("emqx.hrl").
|
||||
|
||||
-export([ list_authz_cache/0
|
||||
, get_authz_cache/2
|
||||
, put_authz_cache/3
|
||||
, cleanup_authz_cache/0
|
||||
, empty_authz_cache/0
|
||||
, dump_authz_cache/0
|
||||
, get_cache_max_size/0
|
||||
, get_cache_ttl/0
|
||||
, is_enabled/0
|
||||
, drain_cache/0
|
||||
, drain_cache/1
|
||||
]).
|
||||
-export([
|
||||
list_authz_cache/0,
|
||||
get_authz_cache/2,
|
||||
put_authz_cache/3,
|
||||
cleanup_authz_cache/0,
|
||||
empty_authz_cache/0,
|
||||
dump_authz_cache/0,
|
||||
get_cache_max_size/0,
|
||||
get_cache_ttl/0,
|
||||
is_enabled/0,
|
||||
drain_cache/0,
|
||||
drain_cache/1
|
||||
]).
|
||||
|
||||
%% export for test
|
||||
-export([ cache_k/2
|
||||
, cache_v/1
|
||||
, get_cache_size/0
|
||||
, get_newest_key/0
|
||||
, get_oldest_key/0
|
||||
]).
|
||||
-export([
|
||||
cache_k/2,
|
||||
cache_v/1,
|
||||
get_cache_size/0,
|
||||
get_newest_key/0,
|
||||
get_oldest_key/0
|
||||
]).
|
||||
|
||||
-type(authz_result() :: allow | deny).
|
||||
-type(system_time() :: integer()).
|
||||
-type(cache_key() :: {emqx_types:pubsub(), emqx_types:topic()}).
|
||||
-type(cache_val() :: {authz_result(), system_time()}).
|
||||
-type authz_result() :: allow | deny.
|
||||
-type system_time() :: integer().
|
||||
-type cache_key() :: {emqx_types:pubsub(), emqx_types:topic()}.
|
||||
-type cache_val() :: {authz_result(), system_time()}.
|
||||
|
||||
-type(authz_cache_entry() :: {cache_key(), cache_val()}).
|
||||
-type authz_cache_entry() :: {cache_key(), cache_val()}.
|
||||
|
||||
%% Wrappers for key and value
|
||||
cache_k(PubSub, Topic)-> {PubSub, Topic}.
|
||||
cache_v(AuthzResult)-> {AuthzResult, time_now()}.
|
||||
cache_k(PubSub, Topic) -> {PubSub, Topic}.
|
||||
cache_v(AuthzResult) -> {AuthzResult, time_now()}.
|
||||
drain_k() -> {?MODULE, drain_timestamp}.
|
||||
|
||||
-spec(is_enabled() -> boolean()).
|
||||
-spec is_enabled() -> boolean().
|
||||
is_enabled() ->
|
||||
emqx:get_config([authorization, cache, enable], false).
|
||||
|
||||
-spec(get_cache_max_size() -> integer()).
|
||||
-spec get_cache_max_size() -> integer().
|
||||
get_cache_max_size() ->
|
||||
emqx:get_config([authorization, cache, max_size]).
|
||||
|
||||
-spec(get_cache_ttl() -> integer()).
|
||||
-spec get_cache_ttl() -> integer().
|
||||
get_cache_ttl() ->
|
||||
emqx:get_config([authorization, cache, ttl]).
|
||||
|
||||
-spec(list_authz_cache() -> [authz_cache_entry()]).
|
||||
-spec list_authz_cache() -> [authz_cache_entry()].
|
||||
list_authz_cache() ->
|
||||
cleanup_authz_cache(),
|
||||
map_authz_cache(fun(Cache) -> Cache end).
|
||||
|
|
@ -73,23 +75,29 @@ list_authz_cache() ->
|
|||
authz_result() | not_found.
|
||||
get_authz_cache(PubSub, Topic) ->
|
||||
case erlang:get(cache_k(PubSub, Topic)) of
|
||||
undefined -> not_found;
|
||||
undefined ->
|
||||
not_found;
|
||||
{AuthzResult, CachedAt} ->
|
||||
if_expired(get_cache_ttl(), CachedAt,
|
||||
fun(false) ->
|
||||
AuthzResult;
|
||||
(true) ->
|
||||
cleanup_authz_cache(),
|
||||
not_found
|
||||
end)
|
||||
if_expired(
|
||||
get_cache_ttl(),
|
||||
CachedAt,
|
||||
fun
|
||||
(false) ->
|
||||
AuthzResult;
|
||||
(true) ->
|
||||
cleanup_authz_cache(),
|
||||
not_found
|
||||
end
|
||||
)
|
||||
end.
|
||||
|
||||
%% If the cache get full, and also the latest one
|
||||
%% is expired, then delete all the cache entries
|
||||
-spec put_authz_cache(emqx_types:pubsub(), emqx_types:topic(), authz_result())
|
||||
-> ok.
|
||||
-spec put_authz_cache(emqx_types:pubsub(), emqx_types:topic(), authz_result()) ->
|
||||
ok.
|
||||
put_authz_cache(PubSub, Topic, AuthzResult) ->
|
||||
MaxSize = get_cache_max_size(), true = (MaxSize =/= 0),
|
||||
MaxSize = get_cache_max_size(),
|
||||
true = (MaxSize =/= 0),
|
||||
Size = get_cache_size(),
|
||||
case Size < MaxSize of
|
||||
true ->
|
||||
|
|
@ -97,37 +105,42 @@ put_authz_cache(PubSub, Topic, AuthzResult) ->
|
|||
false ->
|
||||
NewestK = get_newest_key(),
|
||||
{_AuthzResult, CachedAt} = erlang:get(NewestK),
|
||||
if_expired(get_cache_ttl(), CachedAt,
|
||||
fun(true) ->
|
||||
% all cache expired, cleanup first
|
||||
empty_authz_cache(),
|
||||
add_authz(PubSub, Topic, AuthzResult);
|
||||
(false) ->
|
||||
% cache full, perform cache replacement
|
||||
evict_authz_cache(),
|
||||
add_authz(PubSub, Topic, AuthzResult)
|
||||
end)
|
||||
if_expired(
|
||||
get_cache_ttl(),
|
||||
CachedAt,
|
||||
fun
|
||||
(true) ->
|
||||
% all cache expired, cleanup first
|
||||
empty_authz_cache(),
|
||||
add_authz(PubSub, Topic, AuthzResult);
|
||||
(false) ->
|
||||
% cache full, perform cache replacement
|
||||
evict_authz_cache(),
|
||||
add_authz(PubSub, Topic, AuthzResult)
|
||||
end
|
||||
)
|
||||
end.
|
||||
|
||||
%% delete all the authz entries
|
||||
-spec(empty_authz_cache() -> ok).
|
||||
-spec empty_authz_cache() -> ok.
|
||||
empty_authz_cache() ->
|
||||
foreach_authz_cache(fun({CacheK, _CacheV}) -> erlang:erase(CacheK) end),
|
||||
set_cache_size(0),
|
||||
keys_queue_set(queue:new()).
|
||||
|
||||
%% delete the oldest authz entry
|
||||
-spec(evict_authz_cache() -> ok).
|
||||
-spec evict_authz_cache() -> ok.
|
||||
evict_authz_cache() ->
|
||||
OldestK = keys_queue_out(),
|
||||
erlang:erase(OldestK),
|
||||
decr_cache_size().
|
||||
|
||||
%% cleanup all the expired cache entries
|
||||
-spec(cleanup_authz_cache() -> ok).
|
||||
-spec cleanup_authz_cache() -> ok.
|
||||
cleanup_authz_cache() ->
|
||||
keys_queue_set(
|
||||
cleanup_authz(get_cache_ttl(), keys_queue_get())).
|
||||
cleanup_authz(get_cache_ttl(), keys_queue_get())
|
||||
).
|
||||
|
||||
get_oldest_key() ->
|
||||
keys_queue_pick(queue_front()).
|
||||
|
|
@ -144,8 +157,11 @@ dump_authz_cache() ->
|
|||
map_authz_cache(fun(Cache) -> Cache end).
|
||||
|
||||
map_authz_cache(Fun) ->
|
||||
[Fun(R) || R = {{SubPub, _T}, _Authz} <- erlang:get(),
|
||||
SubPub =:= publish orelse SubPub =:= subscribe].
|
||||
[
|
||||
Fun(R)
|
||||
|| R = {{SubPub, _T}, _Authz} <- erlang:get(),
|
||||
SubPub =:= publish orelse SubPub =:= subscribe
|
||||
].
|
||||
foreach_authz_cache(Fun) ->
|
||||
_ = map_authz_cache(Fun),
|
||||
ok.
|
||||
|
|
@ -174,8 +190,7 @@ add_authz(PubSub, Topic, AuthzResult) ->
|
|||
V = cache_v(AuthzResult),
|
||||
case erlang:get(K) of
|
||||
undefined -> add_new_authz(K, V);
|
||||
{_AuthzResult, _CachedAt} ->
|
||||
update_authz(K, V)
|
||||
{_AuthzResult, _CachedAt} -> update_authz(K, V)
|
||||
end.
|
||||
|
||||
add_new_authz(K, V) ->
|
||||
|
|
@ -191,30 +206,38 @@ cleanup_authz(TTL, KeysQ) ->
|
|||
case queue:out(KeysQ) of
|
||||
{{value, OldestK}, KeysQ2} ->
|
||||
{_AuthzResult, CachedAt} = erlang:get(OldestK),
|
||||
if_expired(TTL, CachedAt,
|
||||
fun(false) -> KeysQ;
|
||||
(true) ->
|
||||
erlang:erase(OldestK),
|
||||
decr_cache_size(),
|
||||
cleanup_authz(TTL, KeysQ2)
|
||||
end);
|
||||
{empty, KeysQ} -> KeysQ
|
||||
if_expired(
|
||||
TTL,
|
||||
CachedAt,
|
||||
fun
|
||||
(false) ->
|
||||
KeysQ;
|
||||
(true) ->
|
||||
erlang:erase(OldestK),
|
||||
decr_cache_size(),
|
||||
cleanup_authz(TTL, KeysQ2)
|
||||
end
|
||||
);
|
||||
{empty, KeysQ} ->
|
||||
KeysQ
|
||||
end.
|
||||
|
||||
incr_cache_size() ->
|
||||
erlang:put(authz_cache_size, get_cache_size() + 1), ok.
|
||||
erlang:put(authz_cache_size, get_cache_size() + 1),
|
||||
ok.
|
||||
decr_cache_size() ->
|
||||
Size = get_cache_size(),
|
||||
case Size > 1 of
|
||||
true ->
|
||||
erlang:put(authz_cache_size, Size-1);
|
||||
erlang:put(authz_cache_size, Size - 1);
|
||||
false ->
|
||||
erlang:put(authz_cache_size, 0)
|
||||
erlang:put(authz_cache_size, 0)
|
||||
end,
|
||||
ok.
|
||||
|
||||
set_cache_size(N) ->
|
||||
erlang:put(authz_cache_size, N), ok.
|
||||
erlang:put(authz_cache_size, N),
|
||||
ok.
|
||||
|
||||
%%% Ordered Keys Q %%%
|
||||
keys_queue_in(Key) ->
|
||||
|
|
@ -225,7 +248,8 @@ keys_queue_in(Key) ->
|
|||
keys_queue_out() ->
|
||||
case queue:out(keys_queue_get()) of
|
||||
{{value, OldestK}, Q2} ->
|
||||
keys_queue_set(Q2), OldestK;
|
||||
keys_queue_set(Q2),
|
||||
OldestK;
|
||||
{empty, _Q} ->
|
||||
undefined
|
||||
end.
|
||||
|
|
@ -242,12 +266,17 @@ keys_queue_pick(Pick) ->
|
|||
end.
|
||||
|
||||
keys_queue_remove(Key, KeysQ) ->
|
||||
queue:filter(fun
|
||||
(K) when K =:= Key -> false; (_) -> true
|
||||
end, KeysQ).
|
||||
queue:filter(
|
||||
fun
|
||||
(K) when K =:= Key -> false;
|
||||
(_) -> true
|
||||
end,
|
||||
KeysQ
|
||||
).
|
||||
|
||||
keys_queue_set(KeysQ) ->
|
||||
erlang:put(authz_keys_q, KeysQ), ok.
|
||||
erlang:put(authz_keys_q, KeysQ),
|
||||
ok.
|
||||
keys_queue_get() ->
|
||||
case erlang:get(authz_keys_q) of
|
||||
undefined -> queue:new();
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@
|
|||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
|
||||
%% Mnesia bootstrap
|
||||
-export([mnesia/1]).
|
||||
|
||||
|
|
@ -30,23 +29,25 @@
|
|||
|
||||
-export([start_link/0, stop/0]).
|
||||
|
||||
-export([ check/1
|
||||
, create/1
|
||||
, look_up/1
|
||||
, delete/1
|
||||
, info/1
|
||||
, format/1
|
||||
, parse/1
|
||||
]).
|
||||
-export([
|
||||
check/1,
|
||||
create/1,
|
||||
look_up/1,
|
||||
delete/1,
|
||||
info/1,
|
||||
format/1,
|
||||
parse/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-elvis([{elvis_style, state_record_and_type, disable}]).
|
||||
|
||||
|
|
@ -63,68 +64,71 @@
|
|||
|
||||
mnesia(boot) ->
|
||||
ok = mria:create_table(?BANNED_TAB, [
|
||||
{type, set},
|
||||
{rlog_shard, ?COMMON_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, banned},
|
||||
{attributes, record_info(fields, banned)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}]).
|
||||
{type, set},
|
||||
{rlog_shard, ?COMMON_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, banned},
|
||||
{attributes, record_info(fields, banned)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}
|
||||
]).
|
||||
|
||||
%% @doc Start the banned server.
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||
|
||||
%% for tests
|
||||
-spec(stop() -> ok).
|
||||
-spec stop() -> ok.
|
||||
stop() -> gen_server:stop(?MODULE).
|
||||
|
||||
-spec(check(emqx_types:clientinfo()) -> boolean()).
|
||||
-spec check(emqx_types:clientinfo()) -> boolean().
|
||||
check(ClientInfo) ->
|
||||
do_check({clientid, maps:get(clientid, ClientInfo, undefined)})
|
||||
orelse do_check({username, maps:get(username, ClientInfo, undefined)})
|
||||
orelse do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}).
|
||||
do_check({clientid, maps:get(clientid, ClientInfo, undefined)}) orelse
|
||||
do_check({username, maps:get(username, ClientInfo, undefined)}) orelse
|
||||
do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}).
|
||||
|
||||
do_check({_, undefined}) ->
|
||||
false;
|
||||
do_check(Who) when is_tuple(Who) ->
|
||||
case mnesia:dirty_read(?BANNED_TAB, Who) of
|
||||
[] -> false;
|
||||
[#banned{until = Until}] ->
|
||||
Until > erlang:system_time(second)
|
||||
[#banned{until = Until}] -> Until > erlang:system_time(second)
|
||||
end.
|
||||
|
||||
format(#banned{who = Who0,
|
||||
by = By,
|
||||
reason = Reason,
|
||||
at = At,
|
||||
until = Until}) ->
|
||||
format(#banned{
|
||||
who = Who0,
|
||||
by = By,
|
||||
reason = Reason,
|
||||
at = At,
|
||||
until = Until
|
||||
}) ->
|
||||
{As, Who} = maybe_format_host(Who0),
|
||||
#{
|
||||
as => As,
|
||||
who => Who,
|
||||
by => By,
|
||||
as => As,
|
||||
who => Who,
|
||||
by => By,
|
||||
reason => Reason,
|
||||
at => to_rfc3339(At),
|
||||
until => to_rfc3339(Until)
|
||||
at => to_rfc3339(At),
|
||||
until => to_rfc3339(Until)
|
||||
}.
|
||||
|
||||
parse(Params) ->
|
||||
case pares_who(Params) of
|
||||
{error, Reason} -> {error, Reason};
|
||||
Who ->
|
||||
By = maps:get(<<"by">>, Params, <<"mgmt_api">>),
|
||||
{error, Reason} ->
|
||||
{error, Reason};
|
||||
Who ->
|
||||
By = maps:get(<<"by">>, Params, <<"mgmt_api">>),
|
||||
Reason = maps:get(<<"reason">>, Params, <<"">>),
|
||||
At = maps:get(<<"at">>, Params, erlang:system_time(second)),
|
||||
Until = maps:get(<<"until">>, Params, At + 5 * 60),
|
||||
At = maps:get(<<"at">>, Params, erlang:system_time(second)),
|
||||
Until = maps:get(<<"until">>, Params, At + 5 * 60),
|
||||
case Until > erlang:system_time(second) of
|
||||
true ->
|
||||
#banned{
|
||||
who = Who,
|
||||
by = By,
|
||||
who = Who,
|
||||
by = By,
|
||||
reason = Reason,
|
||||
at = At,
|
||||
until = Until
|
||||
at = At,
|
||||
until = Until
|
||||
};
|
||||
false ->
|
||||
ErrorReason =
|
||||
|
|
@ -151,13 +155,15 @@ maybe_format_host({As, Who}) ->
|
|||
to_rfc3339(Timestamp) ->
|
||||
list_to_binary(calendar:system_time_to_rfc3339(Timestamp, [{unit, second}])).
|
||||
|
||||
-spec(create(emqx_types:banned() | map()) ->
|
||||
{ok, emqx_types:banned()} | {error, {already_exist, emqx_types:banned()}}).
|
||||
create(#{who := Who,
|
||||
by := By,
|
||||
reason := Reason,
|
||||
at := At,
|
||||
until := Until}) ->
|
||||
-spec create(emqx_types:banned() | map()) ->
|
||||
{ok, emqx_types:banned()} | {error, {already_exist, emqx_types:banned()}}.
|
||||
create(#{
|
||||
who := Who,
|
||||
by := By,
|
||||
reason := Reason,
|
||||
at := At,
|
||||
until := Until
|
||||
}) ->
|
||||
Banned = #banned{
|
||||
who = Who,
|
||||
by = By,
|
||||
|
|
@ -166,8 +172,7 @@ create(#{who := Who,
|
|||
until = Until
|
||||
},
|
||||
create(Banned);
|
||||
|
||||
create(Banned = #banned{who = Who}) ->
|
||||
create(Banned = #banned{who = Who}) ->
|
||||
case look_up(Who) of
|
||||
[] ->
|
||||
mria:dirty_write(?BANNED_TAB, Banned),
|
||||
|
|
@ -176,8 +181,10 @@ create(Banned = #banned{who = Who}) ->
|
|||
%% Don't support shorten or extend the until time by overwrite.
|
||||
%% We don't support update api yet, user must delete then create new one.
|
||||
case Until > erlang:system_time(second) of
|
||||
true -> {error, {already_exist, OldBanned}};
|
||||
false -> %% overwrite expired one is ok.
|
||||
true ->
|
||||
{error, {already_exist, OldBanned}};
|
||||
%% overwrite expired one is ok.
|
||||
false ->
|
||||
mria:dirty_write(?BANNED_TAB, Banned),
|
||||
{ok, Banned}
|
||||
end
|
||||
|
|
@ -188,10 +195,12 @@ look_up(Who) when is_map(Who) ->
|
|||
look_up(Who) ->
|
||||
mnesia:dirty_read(?BANNED_TAB, Who).
|
||||
|
||||
-spec(delete({clientid, emqx_types:clientid()}
|
||||
| {username, emqx_types:username()}
|
||||
| {peerhost, emqx_types:peerhost()}) -> ok).
|
||||
delete(Who) when is_map(Who)->
|
||||
-spec delete(
|
||||
{clientid, emqx_types:clientid()}
|
||||
| {username, emqx_types:username()}
|
||||
| {peerhost, emqx_types:peerhost()}
|
||||
) -> ok.
|
||||
delete(Who) when is_map(Who) ->
|
||||
delete(pares_who(Who));
|
||||
delete(Who) ->
|
||||
mria:dirty_delete(?BANNED_TAB, Who).
|
||||
|
|
@ -217,7 +226,6 @@ handle_cast(Msg, State) ->
|
|||
handle_info({timeout, TRef, expire}, State = #{expiry_timer := TRef}) ->
|
||||
_ = mria:transaction(?COMMON_SHARD, fun expire_banned_items/1, [erlang:system_time(second)]),
|
||||
{noreply, ensure_expiry_timer(State), hibernate};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -242,7 +250,12 @@ ensure_expiry_timer(State) ->
|
|||
|
||||
expire_banned_items(Now) ->
|
||||
mnesia:foldl(
|
||||
fun(B = #banned{until = Until}, _Acc) when Until < Now ->
|
||||
mnesia:delete_object(?BANNED_TAB, B, sticky_write);
|
||||
(_, _Acc) -> ok
|
||||
end, ok, ?BANNED_TAB).
|
||||
fun
|
||||
(B = #banned{until = Until}, _Acc) when Until < Now ->
|
||||
mnesia:delete_object(?BANNED_TAB, B, sticky_write);
|
||||
(_, _Acc) ->
|
||||
ok
|
||||
end,
|
||||
ok,
|
||||
?BANNED_TAB
|
||||
).
|
||||
|
|
|
|||
|
|
@ -17,9 +17,10 @@
|
|||
-module(emqx_base62).
|
||||
|
||||
%% APIs
|
||||
-export([ encode/1
|
||||
, decode/1
|
||||
]).
|
||||
-export([
|
||||
encode/1,
|
||||
decode/1
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
|
|
@ -29,7 +30,7 @@
|
|||
-spec encode(string() | integer() | binary()) -> binary().
|
||||
encode(I) when is_integer(I) ->
|
||||
encode(integer_to_binary(I));
|
||||
encode(S) when is_list(S)->
|
||||
encode(S) when is_list(S) ->
|
||||
encode(unicode:characters_to_binary(S));
|
||||
encode(B) when is_binary(B) ->
|
||||
encode(B, <<>>).
|
||||
|
|
@ -47,21 +48,22 @@ decode(B) when is_binary(B) ->
|
|||
|
||||
encode(<<Index1:6, Index2:6, Index3:6, Index4:6, Rest/binary>>, Acc) ->
|
||||
CharList = [encode_char(Index1), encode_char(Index2), encode_char(Index3), encode_char(Index4)],
|
||||
NewAcc = <<Acc/binary,(iolist_to_binary(CharList))/binary>>,
|
||||
NewAcc = <<Acc/binary, (iolist_to_binary(CharList))/binary>>,
|
||||
encode(Rest, NewAcc);
|
||||
encode(<<Index1:6, Index2:6, Index3:4>>, Acc) ->
|
||||
CharList = [encode_char(Index1), encode_char(Index2), encode_char(Index3)],
|
||||
NewAcc = <<Acc/binary,(iolist_to_binary(CharList))/binary>>,
|
||||
NewAcc = <<Acc/binary, (iolist_to_binary(CharList))/binary>>,
|
||||
encode(<<>>, NewAcc);
|
||||
encode(<<Index1:6, Index2:2>>, Acc) ->
|
||||
CharList = [encode_char(Index1), encode_char(Index2)],
|
||||
NewAcc = <<Acc/binary,(iolist_to_binary(CharList))/binary>>,
|
||||
NewAcc = <<Acc/binary, (iolist_to_binary(CharList))/binary>>,
|
||||
encode(<<>>, NewAcc);
|
||||
encode(<<>>, Acc) ->
|
||||
Acc.
|
||||
|
||||
decode(<<Head:8, Rest/binary>>, Acc)
|
||||
when bit_size(Rest) >= 8->
|
||||
decode(<<Head:8, Rest/binary>>, Acc) when
|
||||
bit_size(Rest) >= 8
|
||||
->
|
||||
case Head == $9 of
|
||||
true ->
|
||||
<<Head1:8, Rest1/binary>> = Rest,
|
||||
|
|
@ -85,7 +87,6 @@ decode(<<Head:8, Rest/binary>>, Acc) ->
|
|||
decode(<<>>, Acc) ->
|
||||
Acc.
|
||||
|
||||
|
||||
encode_char(I) when I < 26 ->
|
||||
$A + I;
|
||||
encode_char(I) when I < 52 ->
|
||||
|
|
@ -97,9 +98,9 @@ encode_char(I) ->
|
|||
|
||||
decode_char(I) when I >= $a andalso I =< $z ->
|
||||
I + 26 - $a;
|
||||
decode_char(I) when I >= $0 andalso I =< $8->
|
||||
decode_char(I) when I >= $0 andalso I =< $8 ->
|
||||
I + 52 - $0;
|
||||
decode_char(I) when I >= $A andalso I =< $Z->
|
||||
decode_char(I) when I >= $A andalso I =< $Z ->
|
||||
I - $A.
|
||||
|
||||
decode_char(9, I) ->
|
||||
|
|
|
|||
|
|
@ -17,62 +17,69 @@
|
|||
-module(emqx_batch).
|
||||
|
||||
%% APIs
|
||||
-export([ init/1
|
||||
, push/2
|
||||
, commit/1
|
||||
, size/1
|
||||
, items/1
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
push/2,
|
||||
commit/1,
|
||||
size/1,
|
||||
items/1
|
||||
]).
|
||||
|
||||
-export_type([options/0, batch/0]).
|
||||
|
||||
-record(batch, {
|
||||
batch_size :: non_neg_integer(),
|
||||
batch_q :: list(any()),
|
||||
linger_ms :: pos_integer(),
|
||||
linger_timer :: reference() | undefined,
|
||||
commit_fun :: function()
|
||||
}).
|
||||
batch_size :: non_neg_integer(),
|
||||
batch_q :: list(any()),
|
||||
linger_ms :: pos_integer(),
|
||||
linger_timer :: reference() | undefined,
|
||||
commit_fun :: function()
|
||||
}).
|
||||
|
||||
-type(options() :: #{
|
||||
batch_size => non_neg_integer(),
|
||||
linger_ms => pos_integer(),
|
||||
commit_fun := function()
|
||||
}).
|
||||
-type options() :: #{
|
||||
batch_size => non_neg_integer(),
|
||||
linger_ms => pos_integer(),
|
||||
commit_fun := function()
|
||||
}.
|
||||
|
||||
-opaque(batch() :: #batch{}).
|
||||
-opaque batch() :: #batch{}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(init(options()) -> batch()).
|
||||
-spec init(options()) -> batch().
|
||||
init(Opts) when is_map(Opts) ->
|
||||
#batch{batch_size = maps:get(batch_size, Opts, 1000),
|
||||
batch_q = [],
|
||||
linger_ms = maps:get(linger_ms, Opts, 1000),
|
||||
commit_fun = maps:get(commit_fun, Opts)}.
|
||||
#batch{
|
||||
batch_size = maps:get(batch_size, Opts, 1000),
|
||||
batch_q = [],
|
||||
linger_ms = maps:get(linger_ms, Opts, 1000),
|
||||
commit_fun = maps:get(commit_fun, Opts)
|
||||
}.
|
||||
|
||||
-spec(push(any(), batch()) -> batch()).
|
||||
push(El, Batch = #batch{batch_q = Q,
|
||||
linger_ms = Ms,
|
||||
linger_timer = undefined})
|
||||
when length(Q) == 0 ->
|
||||
-spec push(any(), batch()) -> batch().
|
||||
push(
|
||||
El,
|
||||
Batch = #batch{
|
||||
batch_q = Q,
|
||||
linger_ms = Ms,
|
||||
linger_timer = undefined
|
||||
}
|
||||
) when
|
||||
length(Q) == 0
|
||||
->
|
||||
TRef = erlang:send_after(Ms, self(), batch_linger_expired),
|
||||
Batch#batch{batch_q = [El], linger_timer = TRef};
|
||||
|
||||
%% no limit.
|
||||
push(El, Batch = #batch{batch_size = 0, batch_q = Q}) ->
|
||||
Batch#batch{batch_q = [El|Q]};
|
||||
|
||||
push(El, Batch = #batch{batch_size = MaxSize, batch_q = Q})
|
||||
when length(Q) >= MaxSize ->
|
||||
commit(Batch#batch{batch_q = [El|Q]});
|
||||
|
||||
Batch#batch{batch_q = [El | Q]};
|
||||
push(El, Batch = #batch{batch_size = MaxSize, batch_q = Q}) when
|
||||
length(Q) >= MaxSize
|
||||
->
|
||||
commit(Batch#batch{batch_q = [El | Q]});
|
||||
push(El, Batch = #batch{batch_q = Q}) ->
|
||||
Batch#batch{batch_q = [El|Q]}.
|
||||
Batch#batch{batch_q = [El | Q]}.
|
||||
|
||||
-spec(commit(batch()) -> batch()).
|
||||
-spec commit(batch()) -> batch().
|
||||
commit(Batch = #batch{batch_q = Q, commit_fun = Commit}) ->
|
||||
_ = Commit(lists:reverse(Q)),
|
||||
reset(Batch).
|
||||
|
|
@ -81,11 +88,10 @@ reset(Batch = #batch{linger_timer = TRef}) ->
|
|||
_ = emqx_misc:cancel_timer(TRef),
|
||||
Batch#batch{batch_q = [], linger_timer = undefined}.
|
||||
|
||||
-spec(size(batch()) -> non_neg_integer()).
|
||||
-spec size(batch()) -> non_neg_integer().
|
||||
size(#batch{batch_q = Q}) ->
|
||||
length(Q).
|
||||
|
||||
-spec(items(batch()) -> list(any())).
|
||||
-spec items(batch()) -> list(any()).
|
||||
items(#batch{batch_q = Q}) ->
|
||||
lists:reverse(Q).
|
||||
|
||||
|
|
|
|||
|
|
@ -20,10 +20,9 @@
|
|||
|
||||
-define(BOOT_MODULES, [router, broker, listeners]).
|
||||
|
||||
-spec(is_enabled(all|router|broker|listeners) -> boolean()).
|
||||
-spec is_enabled(all | router | broker | listeners) -> boolean().
|
||||
is_enabled(Mod) ->
|
||||
(BootMods = boot_modules()) =:= all orelse lists:member(Mod, BootMods).
|
||||
|
||||
boot_modules() ->
|
||||
application:get_env(emqx, boot_modules, ?BOOT_MODULES).
|
||||
|
||||
|
|
|
|||
|
|
@ -23,35 +23,38 @@
|
|||
-include("types.hrl").
|
||||
-include("emqx_mqtt.hrl").
|
||||
|
||||
|
||||
-export([start_link/2]).
|
||||
|
||||
%% PubSub
|
||||
-export([ subscribe/1
|
||||
, subscribe/2
|
||||
, subscribe/3
|
||||
]).
|
||||
-export([
|
||||
subscribe/1,
|
||||
subscribe/2,
|
||||
subscribe/3
|
||||
]).
|
||||
|
||||
-export([unsubscribe/1]).
|
||||
|
||||
-export([subscriber_down/1]).
|
||||
|
||||
-export([ publish/1
|
||||
, safe_publish/1
|
||||
]).
|
||||
-export([
|
||||
publish/1,
|
||||
safe_publish/1
|
||||
]).
|
||||
|
||||
-export([dispatch/2]).
|
||||
|
||||
%% PubSub Infos
|
||||
-export([ subscriptions/1
|
||||
, subscriptions_via_topic/1
|
||||
, subscribers/1
|
||||
, subscribed/2
|
||||
]).
|
||||
-export([
|
||||
subscriptions/1,
|
||||
subscriptions_via_topic/1,
|
||||
subscribers/1,
|
||||
subscribed/2
|
||||
]).
|
||||
|
||||
-export([ get_subopts/2
|
||||
, set_subopts/2
|
||||
]).
|
||||
-export([
|
||||
get_subopts/2,
|
||||
set_subopts/2
|
||||
]).
|
||||
|
||||
-export([topics/0]).
|
||||
|
||||
|
|
@ -59,13 +62,14 @@
|
|||
-export([stats_fun/0]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-import(emqx_tables, [lookup_value/2, lookup_value/3]).
|
||||
|
||||
|
|
@ -84,17 +88,21 @@
|
|||
%% Guards
|
||||
-define(IS_SUBID(Id), (is_binary(Id) orelse is_atom(Id))).
|
||||
|
||||
-spec(start_link(atom(), pos_integer()) -> startlink_ret()).
|
||||
-spec start_link(atom(), pos_integer()) -> startlink_ret().
|
||||
start_link(Pool, Id) ->
|
||||
ok = create_tabs(),
|
||||
gen_server:start_link({local, emqx_misc:proc_name(?BROKER, Id)},
|
||||
?MODULE, [Pool, Id], []).
|
||||
gen_server:start_link(
|
||||
{local, emqx_misc:proc_name(?BROKER, Id)},
|
||||
?MODULE,
|
||||
[Pool, Id],
|
||||
[]
|
||||
).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Create tabs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
-spec(create_tabs() -> ok).
|
||||
-spec create_tabs() -> ok.
|
||||
create_tabs() ->
|
||||
TabOpts = [public, {read_concurrency, true}, {write_concurrency, true}],
|
||||
|
||||
|
|
@ -113,28 +121,31 @@ create_tabs() ->
|
|||
%% Subscribe API
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
-spec(subscribe(emqx_types:topic()) -> ok).
|
||||
-spec subscribe(emqx_types:topic()) -> ok.
|
||||
subscribe(Topic) when is_binary(Topic) ->
|
||||
subscribe(Topic, undefined).
|
||||
|
||||
-spec(subscribe(emqx_types:topic(), emqx_types:subid() | emqx_types:subopts()) -> ok).
|
||||
-spec subscribe(emqx_types:topic(), emqx_types:subid() | emqx_types:subopts()) -> ok.
|
||||
subscribe(Topic, SubId) when is_binary(Topic), ?IS_SUBID(SubId) ->
|
||||
subscribe(Topic, SubId, ?DEFAULT_SUBOPTS);
|
||||
subscribe(Topic, SubOpts) when is_binary(Topic), is_map(SubOpts) ->
|
||||
subscribe(Topic, undefined, SubOpts).
|
||||
|
||||
-spec(subscribe(emqx_types:topic(), emqx_types:subid(), emqx_types:subopts()) -> ok).
|
||||
-spec subscribe(emqx_types:topic(), emqx_types:subid(), emqx_types:subopts()) -> ok.
|
||||
subscribe(Topic, SubId, SubOpts0) when is_binary(Topic), ?IS_SUBID(SubId), is_map(SubOpts0) ->
|
||||
SubOpts = maps:merge(?DEFAULT_SUBOPTS, SubOpts0),
|
||||
_ = emqx_trace:subscribe(Topic, SubId, SubOpts),
|
||||
SubPid = self(),
|
||||
case ets:member(?SUBOPTION, {SubPid, Topic}) of
|
||||
false -> %% New
|
||||
%% New
|
||||
false ->
|
||||
ok = emqx_broker_helper:register_sub(SubPid, SubId),
|
||||
do_subscribe(Topic, SubPid, with_subid(SubId, SubOpts));
|
||||
true -> %% Existed
|
||||
%% Existed
|
||||
true ->
|
||||
set_subopts(SubPid, Topic, with_subid(SubId, SubOpts)),
|
||||
ok %% ensure to return 'ok'
|
||||
%% ensure to return 'ok'
|
||||
ok
|
||||
end.
|
||||
|
||||
-compile({inline, [with_subid/2]}).
|
||||
|
|
@ -151,14 +162,15 @@ do_subscribe(Topic, SubPid, SubOpts) ->
|
|||
|
||||
do_subscribe(undefined, Topic, SubPid, SubOpts) ->
|
||||
case emqx_broker_helper:get_sub_shard(SubPid, Topic) of
|
||||
0 -> true = ets:insert(?SUBSCRIBER, {Topic, SubPid}),
|
||||
true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}),
|
||||
call(pick(Topic), {subscribe, Topic});
|
||||
I -> true = ets:insert(?SUBSCRIBER, {{shard, Topic, I}, SubPid}),
|
||||
true = ets:insert(?SUBOPTION, {{SubPid, Topic}, maps:put(shard, I, SubOpts)}),
|
||||
call(pick({Topic, I}), {subscribe, Topic, I})
|
||||
0 ->
|
||||
true = ets:insert(?SUBSCRIBER, {Topic, SubPid}),
|
||||
true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}),
|
||||
call(pick(Topic), {subscribe, Topic});
|
||||
I ->
|
||||
true = ets:insert(?SUBSCRIBER, {{shard, Topic, I}, SubPid}),
|
||||
true = ets:insert(?SUBOPTION, {{SubPid, Topic}, maps:put(shard, I, SubOpts)}),
|
||||
call(pick({Topic, I}), {subscribe, Topic, I})
|
||||
end;
|
||||
|
||||
%% Shared subscription
|
||||
do_subscribe(Group, Topic, SubPid, SubOpts) ->
|
||||
true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}),
|
||||
|
|
@ -168,7 +180,7 @@ do_subscribe(Group, Topic, SubPid, SubOpts) ->
|
|||
%% Unsubscribe API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(unsubscribe(emqx_types:topic()) -> ok).
|
||||
-spec unsubscribe(emqx_types:topic()) -> ok.
|
||||
unsubscribe(Topic) when is_binary(Topic) ->
|
||||
SubPid = self(),
|
||||
case ets:lookup(?SUBOPTION, {SubPid, Topic}) of
|
||||
|
|
@ -176,7 +188,8 @@ unsubscribe(Topic) when is_binary(Topic) ->
|
|||
_ = emqx_broker_helper:reclaim_seq(Topic),
|
||||
_ = emqx_trace:unsubscribe(Topic, SubOpts),
|
||||
do_unsubscribe(Topic, SubPid, SubOpts);
|
||||
[] -> ok
|
||||
[] ->
|
||||
ok
|
||||
end.
|
||||
|
||||
do_unsubscribe(Topic, SubPid, SubOpts) ->
|
||||
|
|
@ -187,12 +200,13 @@ do_unsubscribe(Topic, SubPid, SubOpts) ->
|
|||
|
||||
do_unsubscribe(undefined, Topic, SubPid, SubOpts) ->
|
||||
case maps:get(shard, SubOpts, 0) of
|
||||
0 -> true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}),
|
||||
cast(pick(Topic), {unsubscribed, Topic});
|
||||
I -> true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}),
|
||||
cast(pick({Topic, I}), {unsubscribed, Topic, I})
|
||||
0 ->
|
||||
true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}),
|
||||
cast(pick(Topic), {unsubscribed, Topic});
|
||||
I ->
|
||||
true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}),
|
||||
cast(pick({Topic, I}), {unsubscribed, Topic, I})
|
||||
end;
|
||||
|
||||
do_unsubscribe(Group, Topic, SubPid, _SubOpts) ->
|
||||
emqx_shared_sub:unsubscribe(Group, Topic, SubPid).
|
||||
|
||||
|
|
@ -200,14 +214,16 @@ do_unsubscribe(Group, Topic, SubPid, _SubOpts) ->
|
|||
%% Publish
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(publish(emqx_types:message()) -> emqx_types:publish_result()).
|
||||
-spec publish(emqx_types:message()) -> emqx_types:publish_result().
|
||||
publish(Msg) when is_record(Msg, message) ->
|
||||
_ = emqx_trace:publish(Msg),
|
||||
emqx_message:is_sys(Msg) orelse emqx_metrics:inc('messages.publish'),
|
||||
case emqx_hooks:run_fold('message.publish', [], emqx_message:clean_dup(Msg)) of
|
||||
#message{headers = #{allow_publish := false}, topic = Topic} ->
|
||||
?TRACE("MQTT", "msg_publish_not_allowed", #{message => emqx_message:to_log_map(Msg),
|
||||
topic => Topic}),
|
||||
?TRACE("MQTT", "msg_publish_not_allowed", #{
|
||||
message => emqx_message:to_log_map(Msg),
|
||||
topic => Topic
|
||||
}),
|
||||
[];
|
||||
Msg1 = #message{topic = Topic} ->
|
||||
emqx_persistent_session:persist_message(Msg1),
|
||||
|
|
@ -215,19 +231,21 @@ publish(Msg) when is_record(Msg, message) ->
|
|||
end.
|
||||
|
||||
%% Called internally
|
||||
-spec(safe_publish(emqx_types:message()) -> emqx_types:publish_result()).
|
||||
-spec safe_publish(emqx_types:message()) -> emqx_types:publish_result().
|
||||
safe_publish(Msg) when is_record(Msg, message) ->
|
||||
try
|
||||
publish(Msg)
|
||||
catch
|
||||
Error : Reason : Stk->
|
||||
?SLOG(error,#{
|
||||
msg => "publishing_error",
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
payload => emqx_message:to_log_map(Msg),
|
||||
stacktrace => Stk
|
||||
},
|
||||
Error:Reason:Stk ->
|
||||
?SLOG(
|
||||
error,
|
||||
#{
|
||||
msg => "publishing_error",
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
payload => emqx_message:to_log_map(Msg),
|
||||
stacktrace => Stk
|
||||
},
|
||||
#{topic => Msg#message.topic}
|
||||
),
|
||||
[]
|
||||
|
|
@ -240,17 +258,20 @@ delivery(Msg) -> #delivery{sender = self(), message = Msg}.
|
|||
%% Route
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(route([emqx_types:route_entry()], emqx_types:delivery())
|
||||
-> emqx_types:publish_result()).
|
||||
-spec route([emqx_types:route_entry()], emqx_types:delivery()) ->
|
||||
emqx_types:publish_result().
|
||||
route([], #delivery{message = Msg}) ->
|
||||
ok = emqx_hooks:run('message.dropped', [Msg, #{node => node()}, no_subscribers]),
|
||||
ok = inc_dropped_cnt(Msg),
|
||||
[];
|
||||
|
||||
route(Routes, Delivery) ->
|
||||
lists:foldl(fun(Route, Acc) ->
|
||||
[do_route(Route, Delivery) | Acc]
|
||||
end, [], Routes).
|
||||
lists:foldl(
|
||||
fun(Route, Acc) ->
|
||||
[do_route(Route, Delivery) | Acc]
|
||||
end,
|
||||
[],
|
||||
Routes
|
||||
).
|
||||
|
||||
do_route({To, Node}, Delivery) when Node =:= node() ->
|
||||
{Node, To, dispatch(To, Delivery)};
|
||||
|
|
@ -259,43 +280,52 @@ do_route({To, Node}, Delivery) when is_atom(Node) ->
|
|||
do_route({To, Group}, Delivery) when is_tuple(Group); is_binary(Group) ->
|
||||
{share, To, emqx_shared_sub:dispatch(Group, To, Delivery)}.
|
||||
|
||||
aggre([]) -> [];
|
||||
aggre([]) ->
|
||||
[];
|
||||
aggre([#route{topic = To, dest = Node}]) when is_atom(Node) ->
|
||||
[{To, Node}];
|
||||
aggre([#route{topic = To, dest = {Group, _Node}}]) ->
|
||||
[{To, Group}];
|
||||
aggre(Routes) ->
|
||||
lists:foldl(
|
||||
fun(#route{topic = To, dest = Node}, Acc) when is_atom(Node) ->
|
||||
[{To, Node} | Acc];
|
||||
(#route{topic = To, dest = {Group, _Node}}, Acc) ->
|
||||
lists:usort([{To, Group} | Acc])
|
||||
end, [], Routes).
|
||||
fun
|
||||
(#route{topic = To, dest = Node}, Acc) when is_atom(Node) ->
|
||||
[{To, Node} | Acc];
|
||||
(#route{topic = To, dest = {Group, _Node}}, Acc) ->
|
||||
lists:usort([{To, Group} | Acc])
|
||||
end,
|
||||
[],
|
||||
Routes
|
||||
).
|
||||
|
||||
%% @doc Forward message to another node.
|
||||
-spec(forward(node(), emqx_types:topic(), emqx_types:delivery(), RpcMode::sync | async)
|
||||
-> emqx_types:deliver_result()).
|
||||
-spec forward(node(), emqx_types:topic(), emqx_types:delivery(), RpcMode :: sync | async) ->
|
||||
emqx_types:deliver_result().
|
||||
forward(Node, To, Delivery, async) ->
|
||||
true = emqx_broker_proto_v1:forward_async(Node, To, Delivery),
|
||||
emqx_metrics:inc('messages.forward');
|
||||
forward(Node, To, Delivery, sync) ->
|
||||
case emqx_broker_proto_v1:forward(Node, To, Delivery) of
|
||||
{Err, Reason} when Err =:= badrpc; Err =:= badtcp ->
|
||||
?SLOG(error, #{
|
||||
msg => "sync_forward_msg_to_node_failed",
|
||||
node => Node,
|
||||
Err => Reason
|
||||
}, #{topic => To}),
|
||||
?SLOG(
|
||||
error,
|
||||
#{
|
||||
msg => "sync_forward_msg_to_node_failed",
|
||||
node => Node,
|
||||
Err => Reason
|
||||
},
|
||||
#{topic => To}
|
||||
),
|
||||
{error, badrpc};
|
||||
Result ->
|
||||
emqx_metrics:inc('messages.forward'),
|
||||
Result
|
||||
end.
|
||||
|
||||
-spec(dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result()).
|
||||
-spec dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result().
|
||||
dispatch(Topic, Delivery = #delivery{}) when is_binary(Topic) ->
|
||||
case emqx:is_running() of
|
||||
true ->
|
||||
true ->
|
||||
do_dispatch(Topic, Delivery);
|
||||
false ->
|
||||
%% In a rare case emqx_router_helper process may delay
|
||||
|
|
@ -308,81 +338,92 @@ dispatch(Topic, Delivery = #delivery{}) when is_binary(Topic) ->
|
|||
-compile({inline, [inc_dropped_cnt/1]}).
|
||||
inc_dropped_cnt(Msg) ->
|
||||
case emqx_message:is_sys(Msg) of
|
||||
true -> ok;
|
||||
false -> ok = emqx_metrics:inc('messages.dropped'),
|
||||
emqx_metrics:inc('messages.dropped.no_subscribers')
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
ok = emqx_metrics:inc('messages.dropped'),
|
||||
emqx_metrics:inc('messages.dropped.no_subscribers')
|
||||
end.
|
||||
|
||||
-compile({inline, [subscribers/1]}).
|
||||
-spec(subscribers(emqx_types:topic() | {shard, emqx_types:topic(), non_neg_integer()})
|
||||
-> [pid()]).
|
||||
-spec subscribers(emqx_types:topic() | {shard, emqx_types:topic(), non_neg_integer()}) ->
|
||||
[pid()].
|
||||
subscribers(Topic) when is_binary(Topic) ->
|
||||
lookup_value(?SUBSCRIBER, Topic, []);
|
||||
subscribers(Shard = {shard, _Topic, _I}) ->
|
||||
subscribers(Shard = {shard, _Topic, _I}) ->
|
||||
lookup_value(?SUBSCRIBER, Shard, []).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Subscriber is down
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(subscriber_down(pid()) -> true).
|
||||
-spec subscriber_down(pid()) -> true.
|
||||
subscriber_down(SubPid) ->
|
||||
lists:foreach(
|
||||
fun(Topic) ->
|
||||
case lookup_value(?SUBOPTION, {SubPid, Topic}) of
|
||||
SubOpts when is_map(SubOpts) ->
|
||||
_ = emqx_broker_helper:reclaim_seq(Topic),
|
||||
true = ets:delete(?SUBOPTION, {SubPid, Topic}),
|
||||
case maps:get(shard, SubOpts, 0) of
|
||||
0 -> true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}),
|
||||
ok = cast(pick(Topic), {unsubscribed, Topic});
|
||||
I -> true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}),
|
||||
ok = cast(pick({Topic, I}), {unsubscribed, Topic, I})
|
||||
end;
|
||||
undefined -> ok
|
||||
end
|
||||
end, lookup_value(?SUBSCRIPTION, SubPid, [])),
|
||||
fun(Topic) ->
|
||||
case lookup_value(?SUBOPTION, {SubPid, Topic}) of
|
||||
SubOpts when is_map(SubOpts) ->
|
||||
_ = emqx_broker_helper:reclaim_seq(Topic),
|
||||
true = ets:delete(?SUBOPTION, {SubPid, Topic}),
|
||||
case maps:get(shard, SubOpts, 0) of
|
||||
0 ->
|
||||
true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}),
|
||||
ok = cast(pick(Topic), {unsubscribed, Topic});
|
||||
I ->
|
||||
true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}),
|
||||
ok = cast(pick({Topic, I}), {unsubscribed, Topic, I})
|
||||
end;
|
||||
undefined ->
|
||||
ok
|
||||
end
|
||||
end,
|
||||
lookup_value(?SUBSCRIPTION, SubPid, [])
|
||||
),
|
||||
ets:delete(?SUBSCRIPTION, SubPid).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Management APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(subscriptions(pid() | emqx_types:subid())
|
||||
-> [{emqx_types:topic(), emqx_types:subopts()}]).
|
||||
-spec subscriptions(pid() | emqx_types:subid()) ->
|
||||
[{emqx_types:topic(), emqx_types:subopts()}].
|
||||
subscriptions(SubPid) when is_pid(SubPid) ->
|
||||
[{Topic, lookup_value(?SUBOPTION, {SubPid, Topic}, #{})}
|
||||
|| Topic <- lookup_value(?SUBSCRIPTION, SubPid, [])];
|
||||
[
|
||||
{Topic, lookup_value(?SUBOPTION, {SubPid, Topic}, #{})}
|
||||
|| Topic <- lookup_value(?SUBSCRIPTION, SubPid, [])
|
||||
];
|
||||
subscriptions(SubId) ->
|
||||
case emqx_broker_helper:lookup_subpid(SubId) of
|
||||
SubPid when is_pid(SubPid) ->
|
||||
subscriptions(SubPid);
|
||||
undefined -> []
|
||||
undefined ->
|
||||
[]
|
||||
end.
|
||||
|
||||
-spec(subscriptions_via_topic(emqx_types:topic()) -> [emqx_types:subopts()]).
|
||||
-spec subscriptions_via_topic(emqx_types:topic()) -> [emqx_types:subopts()].
|
||||
subscriptions_via_topic(Topic) ->
|
||||
MatchSpec = [{{{'_', '$1'}, '_'}, [{'=:=', '$1', Topic}], ['$_']}],
|
||||
ets:select(?SUBOPTION, MatchSpec).
|
||||
|
||||
-spec(subscribed(pid() | emqx_types:subid(), emqx_types:topic()) -> boolean()).
|
||||
-spec subscribed(pid() | emqx_types:subid(), emqx_types:topic()) -> boolean().
|
||||
subscribed(SubPid, Topic) when is_pid(SubPid) ->
|
||||
ets:member(?SUBOPTION, {SubPid, Topic});
|
||||
subscribed(SubId, Topic) when ?IS_SUBID(SubId) ->
|
||||
SubPid = emqx_broker_helper:lookup_subpid(SubId),
|
||||
ets:member(?SUBOPTION, {SubPid, Topic}).
|
||||
|
||||
-spec(get_subopts(pid(), emqx_types:topic()) -> maybe(emqx_types:subopts())).
|
||||
-spec get_subopts(pid(), emqx_types:topic()) -> maybe(emqx_types:subopts()).
|
||||
get_subopts(SubPid, Topic) when is_pid(SubPid), is_binary(Topic) ->
|
||||
lookup_value(?SUBOPTION, {SubPid, Topic});
|
||||
get_subopts(SubId, Topic) when ?IS_SUBID(SubId) ->
|
||||
case emqx_broker_helper:lookup_subpid(SubId) of
|
||||
SubPid when is_pid(SubPid) ->
|
||||
get_subopts(SubPid, Topic);
|
||||
undefined -> undefined
|
||||
undefined ->
|
||||
undefined
|
||||
end.
|
||||
|
||||
-spec(set_subopts(emqx_types:topic(), emqx_types:subopts()) -> boolean()).
|
||||
-spec set_subopts(emqx_types:topic(), emqx_types:subopts()) -> boolean().
|
||||
set_subopts(Topic, NewOpts) when is_binary(Topic), is_map(NewOpts) ->
|
||||
set_subopts(self(), Topic, NewOpts).
|
||||
|
||||
|
|
@ -392,10 +433,11 @@ set_subopts(SubPid, Topic, NewOpts) ->
|
|||
case ets:lookup(?SUBOPTION, Sub) of
|
||||
[{_, OldOpts}] ->
|
||||
ets:insert(?SUBOPTION, {Sub, maps:merge(OldOpts, NewOpts)});
|
||||
[] -> false
|
||||
[] ->
|
||||
false
|
||||
end.
|
||||
|
||||
-spec(topics() -> [emqx_types:topic()]).
|
||||
-spec topics() -> [emqx_types:topic()].
|
||||
topics() ->
|
||||
emqx_router:topics().
|
||||
|
||||
|
|
@ -441,18 +483,18 @@ init([Pool, Id]) ->
|
|||
handle_call({subscribe, Topic}, _From, State) ->
|
||||
Ok = emqx_router:do_add_route(Topic),
|
||||
{reply, Ok, State};
|
||||
|
||||
handle_call({subscribe, Topic, I}, _From, State) ->
|
||||
Shard = {Topic, I},
|
||||
Ok = case get(Shard) of
|
||||
undefined ->
|
||||
_ = put(Shard, true),
|
||||
true = ets:insert(?SUBSCRIBER, {Topic, {shard, I}}),
|
||||
cast(pick(Topic), {subscribe, Topic});
|
||||
true -> ok
|
||||
end,
|
||||
Ok =
|
||||
case get(Shard) of
|
||||
undefined ->
|
||||
_ = put(Shard, true),
|
||||
true = ets:insert(?SUBSCRIBER, {Topic, {shard, I}}),
|
||||
cast(pick(Topic), {subscribe, Topic});
|
||||
true ->
|
||||
ok
|
||||
end,
|
||||
{reply, Ok, State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -460,30 +502,28 @@ handle_call(Req, _From, State) ->
|
|||
handle_cast({subscribe, Topic}, State) ->
|
||||
case emqx_router:do_add_route(Topic) of
|
||||
ok -> ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "failed_to_add_route", reason => Reason})
|
||||
{error, Reason} -> ?SLOG(error, #{msg => "failed_to_add_route", reason => Reason})
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_cast({unsubscribed, Topic}, State) ->
|
||||
case ets:member(?SUBSCRIBER, Topic) of
|
||||
false ->
|
||||
_ = emqx_router:do_delete_route(Topic),
|
||||
ok;
|
||||
true -> ok
|
||||
true ->
|
||||
ok
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_cast({unsubscribed, Topic, I}, State) ->
|
||||
case ets:member(?SUBSCRIBER, {shard, Topic, I}) of
|
||||
false ->
|
||||
_ = erase({Topic, I}),
|
||||
true = ets:delete_object(?SUBSCRIBER, {Topic, {shard, I}}),
|
||||
cast(pick(Topic), {unsubscribed, Topic});
|
||||
true -> ok
|
||||
true ->
|
||||
ok
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
|
@ -502,12 +542,15 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(do_dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result()).
|
||||
-spec do_dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result().
|
||||
do_dispatch(Topic, #delivery{message = Msg}) ->
|
||||
DispN = lists:foldl(
|
||||
fun(Sub, N) ->
|
||||
N + do_dispatch(Sub, Topic, Msg)
|
||||
end, 0, subscribers(Topic)),
|
||||
fun(Sub, N) ->
|
||||
N + do_dispatch(Sub, Topic, Msg)
|
||||
end,
|
||||
0,
|
||||
subscribers(Topic)
|
||||
),
|
||||
case DispN of
|
||||
0 ->
|
||||
ok = emqx_hooks:run('message.dropped', [Msg, #{node => node()}, no_subscribers]),
|
||||
|
|
@ -520,11 +563,16 @@ do_dispatch(Topic, #delivery{message = Msg}) ->
|
|||
do_dispatch(SubPid, Topic, Msg) when is_pid(SubPid) ->
|
||||
case erlang:is_process_alive(SubPid) of
|
||||
true ->
|
||||
SubPid ! {deliver, Topic, Msg}, 1;
|
||||
false -> 0
|
||||
SubPid ! {deliver, Topic, Msg},
|
||||
1;
|
||||
false ->
|
||||
0
|
||||
end;
|
||||
do_dispatch({shard, I}, Topic, Msg) ->
|
||||
lists:foldl(
|
||||
fun(SubPid, N) ->
|
||||
N + do_dispatch(SubPid, Topic, Msg)
|
||||
end, 0, subscribers({shard, Topic, I})).
|
||||
end,
|
||||
0,
|
||||
subscribers({shard, Topic, I})
|
||||
).
|
||||
|
|
|
|||
|
|
@ -25,13 +25,14 @@
|
|||
run1() -> run1(80, 1000, 80, 10000).
|
||||
|
||||
run1(Subs, SubOps, Pubs, PubOps) ->
|
||||
run(#{subscribers => Subs,
|
||||
publishers => Pubs,
|
||||
sub_ops => SubOps,
|
||||
pub_ops => PubOps,
|
||||
sub_ptn => <<"device/{{id}}/+/{{num}}/#">>,
|
||||
pub_ptn => <<"device/{{id}}/foo/{{num}}/bar/1/2/3/4/5">>
|
||||
}).
|
||||
run(#{
|
||||
subscribers => Subs,
|
||||
publishers => Pubs,
|
||||
sub_ops => SubOps,
|
||||
pub_ops => PubOps,
|
||||
sub_ptn => <<"device/{{id}}/+/{{num}}/#">>,
|
||||
pub_ptn => <<"device/{{id}}/foo/{{num}}/bar/1/2/3/4/5">>
|
||||
}).
|
||||
|
||||
%% setting fields:
|
||||
%% - subscribers: spawn this number of subscriber workers
|
||||
|
|
@ -43,20 +44,23 @@ run1(Subs, SubOps, Pubs, PubOps) ->
|
|||
%% replaced by worker id and {{num}} replaced by topic number.
|
||||
%% - pub_ptn: topic pattern used to benchmark publish (match) performance
|
||||
%% e.g. a/x/{{id}}/{{num}}/foo/bar
|
||||
run(#{subscribers := Subs,
|
||||
publishers := Pubs,
|
||||
sub_ops := SubOps,
|
||||
pub_ops := PubOps
|
||||
} = Settings) ->
|
||||
SubsPids = start_callers(Subs, fun start_subscriber/1, Settings),
|
||||
run(
|
||||
#{
|
||||
subscribers := Subs,
|
||||
publishers := Pubs,
|
||||
sub_ops := SubOps,
|
||||
pub_ops := PubOps
|
||||
} = Settings
|
||||
) ->
|
||||
SubsPids = start_callers(Subs, fun start_subscriber/1, Settings),
|
||||
PubsPids = start_callers(Pubs, fun start_publisher/1, Settings),
|
||||
_ = collect_results(SubsPids, subscriber_ready),
|
||||
io:format(user, "subscribe ...~n", []),
|
||||
{T1, SubsTime} =
|
||||
?T(begin
|
||||
lists:foreach(fun(Pid) -> Pid ! start_subscribe end, SubsPids),
|
||||
collect_results(SubsPids, subscribe_time)
|
||||
end),
|
||||
lists:foreach(fun(Pid) -> Pid ! start_subscribe end, SubsPids),
|
||||
collect_results(SubsPids, subscribe_time)
|
||||
end),
|
||||
io:format(user, "InsertTotalTime: ~ts~n", [ns(T1)]),
|
||||
io:format(user, "InsertTimeAverage: ~ts~n", [ns(SubsTime / Subs)]),
|
||||
io:format(user, "InsertRps: ~p~n", [rps(Subs * SubOps, T1)]),
|
||||
|
|
@ -64,9 +68,9 @@ run(#{subscribers := Subs,
|
|||
io:format(user, "lookup ...~n", []),
|
||||
{T2, PubsTime} =
|
||||
?T(begin
|
||||
lists:foreach(fun(Pid) -> Pid ! start_lookup end, PubsPids),
|
||||
collect_results(PubsPids, lookup_time)
|
||||
end),
|
||||
lists:foreach(fun(Pid) -> Pid ! start_lookup end, PubsPids),
|
||||
collect_results(PubsPids, lookup_time)
|
||||
end),
|
||||
io:format(user, "LookupTotalTime: ~ts~n", [ns(T2)]),
|
||||
io:format(user, "LookupTimeAverage: ~ts~n", [ns(PubsTime / Pubs)]),
|
||||
io:format(user, "LookupRps: ~p~n", [rps(Pubs * PubOps, T2)]),
|
||||
|
|
@ -76,14 +80,15 @@ run(#{subscribers := Subs,
|
|||
io:format(user, "unsubscribe ...~n", []),
|
||||
{T3, ok} =
|
||||
?T(begin
|
||||
lists:foreach(fun(Pid) -> Pid ! stop end, SubsPids),
|
||||
wait_until_empty()
|
||||
end),
|
||||
lists:foreach(fun(Pid) -> Pid ! stop end, SubsPids),
|
||||
wait_until_empty()
|
||||
end),
|
||||
io:format(user, "TimeToUnsubscribeAll: ~ts~n", [ns(T3)]).
|
||||
|
||||
wait_until_empty() ->
|
||||
case emqx_trie:empty() of
|
||||
true -> ok;
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
timer:sleep(5),
|
||||
wait_until_empty()
|
||||
|
|
@ -98,13 +103,13 @@ ns(T) -> io_lib:format("~p(ns)", [T]).
|
|||
ram_bytes() ->
|
||||
Wordsize = erlang:system_info(wordsize),
|
||||
mnesia:table_info(emqx_trie, memory) * Wordsize +
|
||||
case lists:member(emqx_trie_node, ets:all()) of
|
||||
true ->
|
||||
%% before 4.3
|
||||
mnesia:table_info(emqx_trie_node, memory) * Wordsize;
|
||||
false ->
|
||||
0
|
||||
end.
|
||||
case lists:member(emqx_trie_node, ets:all()) of
|
||||
true ->
|
||||
%% before 4.3
|
||||
mnesia:table_info(emqx_trie_node, memory) * Wordsize;
|
||||
false ->
|
||||
0
|
||||
end.
|
||||
|
||||
start_callers(N, F, Settings) ->
|
||||
start_callers(N, F, Settings, []).
|
||||
|
|
@ -117,7 +122,8 @@ start_callers(N, F, Settings, Acc) ->
|
|||
collect_results(Pids, Tag) ->
|
||||
collect_results(Pids, Tag, 0).
|
||||
|
||||
collect_results([], _Tag, R) -> R;
|
||||
collect_results([], _Tag, R) ->
|
||||
R;
|
||||
collect_results([Pid | Pids], Tag, R) ->
|
||||
receive
|
||||
{Pid, Tag, N} ->
|
||||
|
|
@ -128,40 +134,43 @@ start_subscriber(#{id := Id, sub_ops := N, sub_ptn := SubPtn}) ->
|
|||
Parent = self(),
|
||||
proc_lib:spawn_link(
|
||||
fun() ->
|
||||
SubTopics = make_topics(SubPtn, Id, N),
|
||||
Parent ! {self(), subscriber_ready, 0},
|
||||
receive
|
||||
start_subscribe ->
|
||||
ok
|
||||
end,
|
||||
{Ts, _} = ?T(subscribe(SubTopics)),
|
||||
_ = erlang:send(Parent, {self(), subscribe_time, Ts/ N}),
|
||||
%% subscribers should not exit before publish test is done
|
||||
receive
|
||||
stop ->
|
||||
ok
|
||||
end
|
||||
end).
|
||||
SubTopics = make_topics(SubPtn, Id, N),
|
||||
Parent ! {self(), subscriber_ready, 0},
|
||||
receive
|
||||
start_subscribe ->
|
||||
ok
|
||||
end,
|
||||
{Ts, _} = ?T(subscribe(SubTopics)),
|
||||
_ = erlang:send(Parent, {self(), subscribe_time, Ts / N}),
|
||||
%% subscribers should not exit before publish test is done
|
||||
receive
|
||||
stop ->
|
||||
ok
|
||||
end
|
||||
end
|
||||
).
|
||||
|
||||
start_publisher(#{id := Id, pub_ops := N, pub_ptn := PubPtn, subscribers := Subs}) ->
|
||||
Parent = self(),
|
||||
proc_lib:spawn_link(
|
||||
fun() ->
|
||||
L = lists:seq(1, N),
|
||||
[Topic] = make_topics(PubPtn, (Id rem Subs) + 1, 1),
|
||||
receive
|
||||
start_lookup ->
|
||||
ok
|
||||
end,
|
||||
{Tm, ok} = ?T(lists:foreach(fun(_) -> match(Topic) end, L)),
|
||||
_ = erlang:send(Parent, {self(), lookup_time, Tm / N}),
|
||||
ok
|
||||
end).
|
||||
fun() ->
|
||||
L = lists:seq(1, N),
|
||||
[Topic] = make_topics(PubPtn, (Id rem Subs) + 1, 1),
|
||||
receive
|
||||
start_lookup ->
|
||||
ok
|
||||
end,
|
||||
{Tm, ok} = ?T(lists:foreach(fun(_) -> match(Topic) end, L)),
|
||||
_ = erlang:send(Parent, {self(), lookup_time, Tm / N}),
|
||||
ok
|
||||
end
|
||||
).
|
||||
|
||||
match(Topic) ->
|
||||
[_] = emqx_router:match_routes(Topic).
|
||||
|
||||
subscribe([]) -> ok;
|
||||
subscribe([]) ->
|
||||
ok;
|
||||
subscribe([Topic | Rest]) ->
|
||||
ok = emqx_broker:subscribe(Topic),
|
||||
subscribe(Rest).
|
||||
|
|
|
|||
|
|
@ -21,26 +21,27 @@
|
|||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
|
||||
-export([start_link/0]).
|
||||
|
||||
%% APIs
|
||||
-export([ register_sub/2
|
||||
, lookup_subid/1
|
||||
, lookup_subpid/1
|
||||
, get_sub_shard/2
|
||||
, create_seq/1
|
||||
, reclaim_seq/1
|
||||
]).
|
||||
-export([
|
||||
register_sub/2,
|
||||
lookup_subid/1,
|
||||
lookup_subpid/1,
|
||||
get_sub_shard/2,
|
||||
create_seq/1,
|
||||
reclaim_seq/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
|
|
@ -55,11 +56,11 @@
|
|||
|
||||
-define(BATCH_SIZE, 100000).
|
||||
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?HELPER}, ?MODULE, [], []).
|
||||
|
||||
-spec(register_sub(pid(), emqx_types:subid()) -> ok).
|
||||
-spec register_sub(pid(), emqx_types:subid()) -> ok.
|
||||
register_sub(SubPid, SubId) when is_pid(SubPid) ->
|
||||
case ets:lookup(?SUBMON, SubPid) of
|
||||
[] ->
|
||||
|
|
@ -70,31 +71,31 @@ register_sub(SubPid, SubId) when is_pid(SubPid) ->
|
|||
error(subid_conflict)
|
||||
end.
|
||||
|
||||
-spec(lookup_subid(pid()) -> maybe(emqx_types:subid())).
|
||||
-spec lookup_subid(pid()) -> maybe(emqx_types:subid()).
|
||||
lookup_subid(SubPid) when is_pid(SubPid) ->
|
||||
emqx_tables:lookup_value(?SUBMON, SubPid).
|
||||
|
||||
-spec(lookup_subpid(emqx_types:subid()) -> maybe(pid())).
|
||||
-spec lookup_subpid(emqx_types:subid()) -> maybe(pid()).
|
||||
lookup_subpid(SubId) ->
|
||||
emqx_tables:lookup_value(?SUBID, SubId).
|
||||
|
||||
-spec(get_sub_shard(pid(), emqx_types:topic()) -> non_neg_integer()).
|
||||
-spec get_sub_shard(pid(), emqx_types:topic()) -> non_neg_integer().
|
||||
get_sub_shard(SubPid, Topic) ->
|
||||
case create_seq(Topic) of
|
||||
Seq when Seq =< ?SHARD -> 0;
|
||||
_ -> erlang:phash2(SubPid, shards_num()) + 1
|
||||
end.
|
||||
|
||||
-spec(shards_num() -> pos_integer()).
|
||||
-spec shards_num() -> pos_integer().
|
||||
shards_num() ->
|
||||
%% Dynamic sharding later...
|
||||
ets:lookup_element(?HELPER, shards, 2).
|
||||
|
||||
-spec(create_seq(emqx_types:topic()) -> emqx_sequence:seqid()).
|
||||
-spec create_seq(emqx_types:topic()) -> emqx_sequence:seqid().
|
||||
create_seq(Topic) ->
|
||||
emqx_sequence:nextval(?SUBSEQ, Topic).
|
||||
|
||||
-spec(reclaim_seq(emqx_types:topic()) -> emqx_sequence:seqid()).
|
||||
-spec reclaim_seq(emqx_types:topic()) -> emqx_sequence:seqid().
|
||||
reclaim_seq(Topic) ->
|
||||
emqx_sequence:reclaim(?SUBSEQ, Topic).
|
||||
|
||||
|
|
@ -125,7 +126,6 @@ handle_cast({register_sub, SubPid, SubId}, State = #{pmon := PMon}) ->
|
|||
true = (SubId =:= undefined) orelse ets:insert(?SUBID, {SubId, SubPid}),
|
||||
true = ets:insert(?SUBMON, {SubPid, SubId}),
|
||||
{noreply, State#{pmon := emqx_pmon:monitor(SubPid, PMon)}};
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
|
@ -133,10 +133,10 @@ handle_cast(Msg, State) ->
|
|||
handle_info({'DOWN', _MRef, process, SubPid, _Reason}, State = #{pmon := PMon}) ->
|
||||
SubPids = [SubPid | emqx_misc:drain_down(?BATCH_SIZE)],
|
||||
ok = emqx_pool:async_submit(
|
||||
fun lists:foreach/2, [fun clean_down/1, SubPids]),
|
||||
fun lists:foreach/2, [fun clean_down/1, SubPids]
|
||||
),
|
||||
{_, PMon1} = emqx_pmon:erase_all(SubPids, PMon),
|
||||
{noreply, State#{pmon := PMon1}};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -156,9 +156,10 @@ clean_down(SubPid) ->
|
|||
case ets:lookup(?SUBMON, SubPid) of
|
||||
[{_, SubId}] ->
|
||||
true = ets:delete(?SUBMON, SubPid),
|
||||
true = (SubId =:= undefined)
|
||||
orelse ets:delete_object(?SUBID, {SubId, SubPid}),
|
||||
true =
|
||||
(SubId =:= undefined) orelse
|
||||
ets:delete_object(?SUBID, {SubId, SubPid}),
|
||||
emqx_broker:subscriber_down(SubPid);
|
||||
[] -> ok
|
||||
[] ->
|
||||
ok
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -32,32 +32,41 @@ start_link() ->
|
|||
init([]) ->
|
||||
%% Broker pool
|
||||
PoolSize = emqx_vm:schedulers() * 2,
|
||||
BrokerPool = emqx_pool_sup:spec([broker_pool, hash, PoolSize,
|
||||
{emqx_broker, start_link, []}]),
|
||||
BrokerPool = emqx_pool_sup:spec([
|
||||
broker_pool,
|
||||
hash,
|
||||
PoolSize,
|
||||
{emqx_broker, start_link, []}
|
||||
]),
|
||||
|
||||
%% Shared subscription
|
||||
SharedSub = #{id => shared_sub,
|
||||
start => {emqx_shared_sub, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 2000,
|
||||
type => worker,
|
||||
modules => [emqx_shared_sub]},
|
||||
SharedSub = #{
|
||||
id => shared_sub,
|
||||
start => {emqx_shared_sub, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 2000,
|
||||
type => worker,
|
||||
modules => [emqx_shared_sub]
|
||||
},
|
||||
|
||||
%% Authentication
|
||||
AuthNSup = #{id => emqx_authentication_sup,
|
||||
start => {emqx_authentication_sup, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [emqx_authentication_sup]},
|
||||
AuthNSup = #{
|
||||
id => emqx_authentication_sup,
|
||||
start => {emqx_authentication_sup, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [emqx_authentication_sup]
|
||||
},
|
||||
|
||||
%% Broker helper
|
||||
Helper = #{id => helper,
|
||||
start => {emqx_broker_helper, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 2000,
|
||||
type => worker,
|
||||
modules => [emqx_broker_helper]},
|
||||
Helper = #{
|
||||
id => helper,
|
||||
start => {emqx_broker_helper, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 2000,
|
||||
type => worker,
|
||||
modules => [emqx_broker_helper]
|
||||
},
|
||||
|
||||
{ok, {{one_for_all, 0, 1}, [BrokerPool, SharedSub, AuthNSup, Helper]}}.
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -23,80 +23,89 @@
|
|||
-include("types.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
|
||||
-export([start_link/0]).
|
||||
|
||||
-export([ register_channel/3
|
||||
, unregister_channel/1
|
||||
, insert_channel_info/3
|
||||
]).
|
||||
-export([
|
||||
register_channel/3,
|
||||
unregister_channel/1,
|
||||
insert_channel_info/3
|
||||
]).
|
||||
|
||||
-export([connection_closed/1]).
|
||||
|
||||
-export([ get_chan_info/1
|
||||
, get_chan_info/2
|
||||
, set_chan_info/2
|
||||
]).
|
||||
-export([
|
||||
get_chan_info/1,
|
||||
get_chan_info/2,
|
||||
set_chan_info/2
|
||||
]).
|
||||
|
||||
-export([ get_chan_stats/1
|
||||
, get_chan_stats/2
|
||||
, set_chan_stats/2
|
||||
]).
|
||||
-export([
|
||||
get_chan_stats/1,
|
||||
get_chan_stats/2,
|
||||
set_chan_stats/2
|
||||
]).
|
||||
|
||||
-export([get_chann_conn_mod/2]).
|
||||
|
||||
-export([ open_session/3
|
||||
, discard_session/1
|
||||
, discard_session/2
|
||||
, takeover_session/1
|
||||
, takeover_session/2
|
||||
, kick_session/1
|
||||
, kick_session/2
|
||||
]).
|
||||
-export([
|
||||
open_session/3,
|
||||
discard_session/1,
|
||||
discard_session/2,
|
||||
takeover_session/1,
|
||||
takeover_session/2,
|
||||
kick_session/1,
|
||||
kick_session/2
|
||||
]).
|
||||
|
||||
-export([ lookup_channels/1
|
||||
, lookup_channels/2
|
||||
-export([
|
||||
lookup_channels/1,
|
||||
lookup_channels/2,
|
||||
|
||||
, lookup_client/1
|
||||
]).
|
||||
lookup_client/1
|
||||
]).
|
||||
|
||||
%% Test/debug interface
|
||||
-export([ all_channels/0
|
||||
, all_client_ids/0
|
||||
]).
|
||||
-export([
|
||||
all_channels/0,
|
||||
all_client_ids/0
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
%% Internal export
|
||||
-export([ stats_fun/0
|
||||
, clean_down/1
|
||||
, mark_channel_connected/1
|
||||
, mark_channel_disconnected/1
|
||||
, get_connected_client_count/0
|
||||
-export([
|
||||
stats_fun/0,
|
||||
clean_down/1,
|
||||
mark_channel_connected/1,
|
||||
mark_channel_disconnected/1,
|
||||
get_connected_client_count/0,
|
||||
|
||||
, do_kick_session/3
|
||||
, do_get_chan_stats/2
|
||||
, do_get_chan_info/2
|
||||
, do_get_chann_conn_mod/2
|
||||
]).
|
||||
do_kick_session/3,
|
||||
do_get_chan_stats/2,
|
||||
do_get_chan_info/2,
|
||||
do_get_chann_conn_mod/2
|
||||
]).
|
||||
|
||||
-export_type([ channel_info/0
|
||||
, chan_pid/0
|
||||
]).
|
||||
-export_type([
|
||||
channel_info/0,
|
||||
chan_pid/0
|
||||
]).
|
||||
|
||||
-type(chan_pid() :: pid()).
|
||||
-type chan_pid() :: pid().
|
||||
|
||||
-type(channel_info() :: { _Chan :: {emqx_types:clientid(), pid()}
|
||||
, _Info :: emqx_types:infos()
|
||||
, _Stats :: emqx_types:stats()
|
||||
}).
|
||||
-type channel_info() :: {
|
||||
_Chan :: {emqx_types:clientid(), pid()},
|
||||
_Info :: emqx_types:infos(),
|
||||
_Stats :: emqx_types:stats()
|
||||
}.
|
||||
|
||||
-include("emqx_cm.hrl").
|
||||
|
||||
|
|
@ -106,12 +115,12 @@
|
|||
-define(CHAN_INFO_TAB, emqx_channel_info).
|
||||
-define(CHAN_LIVE_TAB, emqx_channel_live).
|
||||
|
||||
-define(CHAN_STATS,
|
||||
[{?CHAN_TAB, 'channels.count', 'channels.max'},
|
||||
{?CHAN_TAB, 'sessions.count', 'sessions.max'},
|
||||
{?CHAN_CONN_TAB, 'connections.count', 'connections.max'},
|
||||
{?CHAN_LIVE_TAB, 'live_connections.count', 'live_connections.max'}
|
||||
]).
|
||||
-define(CHAN_STATS, [
|
||||
{?CHAN_TAB, 'channels.count', 'channels.max'},
|
||||
{?CHAN_TAB, 'sessions.count', 'sessions.max'},
|
||||
{?CHAN_CONN_TAB, 'connections.count', 'connections.max'},
|
||||
{?CHAN_LIVE_TAB, 'live_connections.count', 'live_connections.max'}
|
||||
]).
|
||||
|
||||
%% Batch drain
|
||||
-define(BATCH_SIZE, 100000).
|
||||
|
|
@ -120,12 +129,13 @@
|
|||
-define(CM, ?MODULE).
|
||||
|
||||
%% linting overrides
|
||||
-elvis([ {elvis_style, invalid_dynamic_call, #{ignore => [emqx_cm]}}
|
||||
, {elvis_style, god_modules, #{ignore => [emqx_cm]}}
|
||||
]).
|
||||
-elvis([
|
||||
{elvis_style, invalid_dynamic_call, #{ignore => [emqx_cm]}},
|
||||
{elvis_style, god_modules, #{ignore => [emqx_cm]}}
|
||||
]).
|
||||
|
||||
%% @doc Start the channel manager.
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?CM}, ?MODULE, [], []).
|
||||
|
||||
|
|
@ -134,9 +144,11 @@ start_link() ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Insert/Update the channel info and stats to emqx_channel table
|
||||
-spec(insert_channel_info(emqx_types:clientid(),
|
||||
emqx_types:infos(),
|
||||
emqx_types:stats()) -> ok).
|
||||
-spec insert_channel_info(
|
||||
emqx_types:clientid(),
|
||||
emqx_types:infos(),
|
||||
emqx_types:stats()
|
||||
) -> ok.
|
||||
insert_channel_info(ClientId, Info, Stats) ->
|
||||
Chan = {ClientId, self()},
|
||||
true = ets:insert(?CHAN_INFO_TAB, {Chan, Info, Stats}),
|
||||
|
|
@ -160,7 +172,7 @@ register_channel(ClientId, ChanPid, #{conn_mod := ConnMod}) when is_pid(ChanPid)
|
|||
cast({registered, Chan}).
|
||||
|
||||
%% @doc Unregister a channel.
|
||||
-spec(unregister_channel(emqx_types:clientid()) -> ok).
|
||||
-spec unregister_channel(emqx_types:clientid()) -> ok.
|
||||
unregister_channel(ClientId) when is_binary(ClientId) ->
|
||||
true = do_unregister_channel({ClientId, self()}),
|
||||
ok.
|
||||
|
|
@ -172,143 +184,158 @@ do_unregister_channel(Chan) ->
|
|||
true = ets:delete(?CHAN_INFO_TAB, Chan),
|
||||
ets:delete_object(?CHAN_TAB, Chan).
|
||||
|
||||
-spec(connection_closed(emqx_types:clientid()) -> true).
|
||||
-spec connection_closed(emqx_types:clientid()) -> true.
|
||||
connection_closed(ClientId) ->
|
||||
connection_closed(ClientId, self()).
|
||||
|
||||
-spec(connection_closed(emqx_types:clientid(), chan_pid()) -> true).
|
||||
-spec connection_closed(emqx_types:clientid(), chan_pid()) -> true.
|
||||
connection_closed(ClientId, ChanPid) ->
|
||||
ets:delete_object(?CHAN_CONN_TAB, {ClientId, ChanPid}).
|
||||
|
||||
%% @doc Get info of a channel.
|
||||
-spec(get_chan_info(emqx_types:clientid()) -> maybe(emqx_types:infos())).
|
||||
-spec get_chan_info(emqx_types:clientid()) -> maybe(emqx_types:infos()).
|
||||
get_chan_info(ClientId) ->
|
||||
with_channel(ClientId, fun(ChanPid) -> get_chan_info(ClientId, ChanPid) end).
|
||||
|
||||
-spec(do_get_chan_info(emqx_types:clientid(), chan_pid())
|
||||
-> maybe(emqx_types:infos())).
|
||||
-spec do_get_chan_info(emqx_types:clientid(), chan_pid()) ->
|
||||
maybe(emqx_types:infos()).
|
||||
do_get_chan_info(ClientId, ChanPid) ->
|
||||
Chan = {ClientId, ChanPid},
|
||||
try ets:lookup_element(?CHAN_INFO_TAB, Chan, 2)
|
||||
try
|
||||
ets:lookup_element(?CHAN_INFO_TAB, Chan, 2)
|
||||
catch
|
||||
error:badarg -> undefined
|
||||
end.
|
||||
|
||||
-spec(get_chan_info(emqx_types:clientid(), chan_pid())
|
||||
-> maybe(emqx_types:infos())).
|
||||
-spec get_chan_info(emqx_types:clientid(), chan_pid()) ->
|
||||
maybe(emqx_types:infos()).
|
||||
get_chan_info(ClientId, ChanPid) ->
|
||||
wrap_rpc(emqx_cm_proto_v1:get_chan_info(ClientId, ChanPid)).
|
||||
|
||||
%% @doc Update infos of the channel.
|
||||
-spec(set_chan_info(emqx_types:clientid(), emqx_types:attrs()) -> boolean()).
|
||||
-spec set_chan_info(emqx_types:clientid(), emqx_types:attrs()) -> boolean().
|
||||
set_chan_info(ClientId, Info) when is_binary(ClientId) ->
|
||||
Chan = {ClientId, self()},
|
||||
try ets:update_element(?CHAN_INFO_TAB, Chan, {2, Info})
|
||||
try
|
||||
ets:update_element(?CHAN_INFO_TAB, Chan, {2, Info})
|
||||
catch
|
||||
error:badarg -> false
|
||||
end.
|
||||
|
||||
%% @doc Get channel's stats.
|
||||
-spec(get_chan_stats(emqx_types:clientid()) -> maybe(emqx_types:stats())).
|
||||
-spec get_chan_stats(emqx_types:clientid()) -> maybe(emqx_types:stats()).
|
||||
get_chan_stats(ClientId) ->
|
||||
with_channel(ClientId, fun(ChanPid) -> get_chan_stats(ClientId, ChanPid) end).
|
||||
|
||||
-spec(do_get_chan_stats(emqx_types:clientid(), chan_pid())
|
||||
-> maybe(emqx_types:stats())).
|
||||
-spec do_get_chan_stats(emqx_types:clientid(), chan_pid()) ->
|
||||
maybe(emqx_types:stats()).
|
||||
do_get_chan_stats(ClientId, ChanPid) ->
|
||||
Chan = {ClientId, ChanPid},
|
||||
try ets:lookup_element(?CHAN_INFO_TAB, Chan, 3)
|
||||
try
|
||||
ets:lookup_element(?CHAN_INFO_TAB, Chan, 3)
|
||||
catch
|
||||
error:badarg -> undefined
|
||||
end.
|
||||
|
||||
-spec(get_chan_stats(emqx_types:clientid(), chan_pid())
|
||||
-> maybe(emqx_types:stats())).
|
||||
-spec get_chan_stats(emqx_types:clientid(), chan_pid()) ->
|
||||
maybe(emqx_types:stats()).
|
||||
get_chan_stats(ClientId, ChanPid) ->
|
||||
wrap_rpc(emqx_cm_proto_v1:get_chan_stats(ClientId, ChanPid)).
|
||||
|
||||
%% @doc Set channel's stats.
|
||||
-spec(set_chan_stats(emqx_types:clientid(), emqx_types:stats()) -> boolean()).
|
||||
-spec set_chan_stats(emqx_types:clientid(), emqx_types:stats()) -> boolean().
|
||||
set_chan_stats(ClientId, Stats) when is_binary(ClientId) ->
|
||||
set_chan_stats(ClientId, self(), Stats).
|
||||
|
||||
-spec(set_chan_stats(emqx_types:clientid(), chan_pid(), emqx_types:stats())
|
||||
-> boolean()).
|
||||
-spec set_chan_stats(emqx_types:clientid(), chan_pid(), emqx_types:stats()) ->
|
||||
boolean().
|
||||
set_chan_stats(ClientId, ChanPid, Stats) ->
|
||||
Chan = {ClientId, ChanPid},
|
||||
try ets:update_element(?CHAN_INFO_TAB, Chan, {3, Stats})
|
||||
try
|
||||
ets:update_element(?CHAN_INFO_TAB, Chan, {3, Stats})
|
||||
catch
|
||||
error:badarg -> false
|
||||
end.
|
||||
|
||||
%% @doc Open a session.
|
||||
-spec(open_session(boolean(), emqx_types:clientinfo(), emqx_types:conninfo())
|
||||
-> {ok, #{session := emqx_session:session(),
|
||||
present := boolean(),
|
||||
pendings => list()}}
|
||||
| {error, Reason :: term()}).
|
||||
-spec open_session(boolean(), emqx_types:clientinfo(), emqx_types:conninfo()) ->
|
||||
{ok, #{
|
||||
session := emqx_session:session(),
|
||||
present := boolean(),
|
||||
pendings => list()
|
||||
}}
|
||||
| {error, Reason :: term()}.
|
||||
open_session(true, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
|
||||
Self = self(),
|
||||
CleanStart = fun(_) ->
|
||||
ok = discard_session(ClientId),
|
||||
ok = emqx_persistent_session:discard_if_present(ClientId),
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}}
|
||||
end,
|
||||
ok = discard_session(ClientId),
|
||||
ok = emqx_persistent_session:discard_if_present(ClientId),
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}}
|
||||
end,
|
||||
emqx_cm_locker:trans(ClientId, CleanStart);
|
||||
|
||||
open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
|
||||
Self = self(),
|
||||
ResumeStart = fun(_) ->
|
||||
CreateSess =
|
||||
fun() ->
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo,ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}}
|
||||
end,
|
||||
case takeover_session(ClientId) of
|
||||
{persistent, Session} ->
|
||||
%% This is a persistent session without a managing process.
|
||||
{Session1, Pendings} =
|
||||
emqx_persistent_session:resume(ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
CreateSess =
|
||||
fun() ->
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo, ConnInfo, Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}}
|
||||
end,
|
||||
case takeover_session(ClientId) of
|
||||
{persistent, Session} ->
|
||||
%% This is a persistent session without a managing process.
|
||||
{Session1, Pendings} =
|
||||
emqx_persistent_session:resume(ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
|
||||
{ok, #{session => Session1,
|
||||
present => true,
|
||||
pendings => Pendings}};
|
||||
{living, ConnMod, ChanPid, Session} ->
|
||||
ok = emqx_session:resume(ClientInfo, Session),
|
||||
case request_stepdown(
|
||||
{takeover, 'end'},
|
||||
ConnMod,
|
||||
ChanPid) of
|
||||
{ok, Pendings} ->
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1,
|
||||
present => true,
|
||||
pendings => Pendings}};
|
||||
{error, _} ->
|
||||
CreateSess()
|
||||
end;
|
||||
{expired, OldSession} ->
|
||||
_ = emqx_persistent_session:discard(ClientId, OldSession),
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist( ClientInfo
|
||||
, ConnInfo
|
||||
, Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}};
|
||||
none ->
|
||||
CreateSess()
|
||||
end
|
||||
end,
|
||||
{ok, #{
|
||||
session => Session1,
|
||||
present => true,
|
||||
pendings => Pendings
|
||||
}};
|
||||
{living, ConnMod, ChanPid, Session} ->
|
||||
ok = emqx_session:resume(ClientInfo, Session),
|
||||
case
|
||||
request_stepdown(
|
||||
{takeover, 'end'},
|
||||
ConnMod,
|
||||
ChanPid
|
||||
)
|
||||
of
|
||||
{ok, Pendings} ->
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo, ConnInfo, Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{
|
||||
session => Session1,
|
||||
present => true,
|
||||
pendings => Pendings
|
||||
}};
|
||||
{error, _} ->
|
||||
CreateSess()
|
||||
end;
|
||||
{expired, OldSession} ->
|
||||
_ = emqx_persistent_session:discard(ClientId, OldSession),
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo,
|
||||
ConnInfo,
|
||||
Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}};
|
||||
none ->
|
||||
CreateSess()
|
||||
end
|
||||
end,
|
||||
emqx_cm_locker:trans(ClientId, ResumeStart).
|
||||
|
||||
create_session(ClientInfo, ConnInfo) ->
|
||||
|
|
@ -318,36 +345,40 @@ create_session(ClientInfo, ConnInfo) ->
|
|||
ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]),
|
||||
Session.
|
||||
|
||||
get_session_confs(#{zone := Zone, clientid := ClientId}, #{receive_maximum := MaxInflight, expiry_interval := EI}) ->
|
||||
#{clientid => ClientId,
|
||||
max_subscriptions => get_mqtt_conf(Zone, max_subscriptions),
|
||||
upgrade_qos => get_mqtt_conf(Zone, upgrade_qos),
|
||||
max_inflight => MaxInflight,
|
||||
retry_interval => get_mqtt_conf(Zone, retry_interval),
|
||||
await_rel_timeout => get_mqtt_conf(Zone, await_rel_timeout),
|
||||
mqueue => mqueue_confs(Zone),
|
||||
%% TODO: Add conf for allowing/disallowing persistent sessions.
|
||||
%% Note that the connection info is already enriched to have
|
||||
%% default config values for session expiry.
|
||||
is_persistent => EI > 0
|
||||
}.
|
||||
get_session_confs(#{zone := Zone, clientid := ClientId}, #{
|
||||
receive_maximum := MaxInflight, expiry_interval := EI
|
||||
}) ->
|
||||
#{
|
||||
clientid => ClientId,
|
||||
max_subscriptions => get_mqtt_conf(Zone, max_subscriptions),
|
||||
upgrade_qos => get_mqtt_conf(Zone, upgrade_qos),
|
||||
max_inflight => MaxInflight,
|
||||
retry_interval => get_mqtt_conf(Zone, retry_interval),
|
||||
await_rel_timeout => get_mqtt_conf(Zone, await_rel_timeout),
|
||||
mqueue => mqueue_confs(Zone),
|
||||
%% TODO: Add conf for allowing/disallowing persistent sessions.
|
||||
%% Note that the connection info is already enriched to have
|
||||
%% default config values for session expiry.
|
||||
is_persistent => EI > 0
|
||||
}.
|
||||
|
||||
mqueue_confs(Zone) ->
|
||||
#{max_len => get_mqtt_conf(Zone, max_mqueue_len),
|
||||
store_qos0 => get_mqtt_conf(Zone, mqueue_store_qos0),
|
||||
priorities => get_mqtt_conf(Zone, mqueue_priorities),
|
||||
default_priority => get_mqtt_conf(Zone, mqueue_default_priority)
|
||||
}.
|
||||
#{
|
||||
max_len => get_mqtt_conf(Zone, max_mqueue_len),
|
||||
store_qos0 => get_mqtt_conf(Zone, mqueue_store_qos0),
|
||||
priorities => get_mqtt_conf(Zone, mqueue_priorities),
|
||||
default_priority => get_mqtt_conf(Zone, mqueue_default_priority)
|
||||
}.
|
||||
|
||||
get_mqtt_conf(Zone, Key) ->
|
||||
emqx_config:get_zone_conf(Zone, [mqtt, Key]).
|
||||
|
||||
%% @doc Try to takeover a session.
|
||||
-spec takeover_session(emqx_types:clientid()) ->
|
||||
none
|
||||
| {living, atom(), pid(), emqx_session:session()}
|
||||
| {persistent, emqx_session:session()}
|
||||
| {expired, emqx_session:session()}.
|
||||
none
|
||||
| {living, atom(), pid(), emqx_session:session()}
|
||||
| {persistent, emqx_session:session()}
|
||||
| {expired, emqx_session:session()}.
|
||||
takeover_session(ClientId) ->
|
||||
case lookup_channels(ClientId) of
|
||||
[] ->
|
||||
|
|
@ -357,20 +388,28 @@ takeover_session(ClientId) ->
|
|||
ChanPids ->
|
||||
[ChanPid | StalePids] = lists:reverse(ChanPids),
|
||||
?SLOG(warning, #{msg => "more_than_one_channel_found", chan_pids => ChanPids}),
|
||||
lists:foreach(fun(StalePid) ->
|
||||
catch discard_session(ClientId, StalePid)
|
||||
end, StalePids),
|
||||
lists:foreach(
|
||||
fun(StalePid) ->
|
||||
catch discard_session(ClientId, StalePid)
|
||||
end,
|
||||
StalePids
|
||||
),
|
||||
takeover_session(ClientId, ChanPid)
|
||||
end.
|
||||
|
||||
takeover_session(ClientId, Pid) ->
|
||||
try do_takeover_session(ClientId, Pid)
|
||||
try
|
||||
do_takeover_session(ClientId, Pid)
|
||||
catch
|
||||
_ : R when R == noproc;
|
||||
R == timeout;
|
||||
R == unexpected_exception -> %% request_stepdown/3
|
||||
_:R when
|
||||
R == noproc;
|
||||
R == timeout;
|
||||
%% request_stepdown/3
|
||||
R == unexpected_exception
|
||||
->
|
||||
emqx_persistent_session:lookup(ClientId);
|
||||
_ : {'EXIT', {noproc, _}} -> % rpc_call/3
|
||||
% rpc_call/3
|
||||
_:{'EXIT', {noproc, _}} ->
|
||||
emqx_persistent_session:lookup(ClientId)
|
||||
end.
|
||||
|
||||
|
|
@ -390,7 +429,7 @@ do_takeover_session(ClientId, ChanPid) ->
|
|||
wrap_rpc(emqx_cm_proto_v1:takeover_session(ClientId, ChanPid)).
|
||||
|
||||
%% @doc Discard all the sessions identified by the ClientId.
|
||||
-spec(discard_session(emqx_types:clientid()) -> ok).
|
||||
-spec discard_session(emqx_types:clientid()) -> ok.
|
||||
discard_session(ClientId) when is_binary(ClientId) ->
|
||||
case lookup_channels(ClientId) of
|
||||
[] -> ok;
|
||||
|
|
@ -401,11 +440,12 @@ discard_session(ClientId) when is_binary(ClientId) ->
|
|||
%% If failed to kick (e.g. timeout) force a kill.
|
||||
%% Keeping the stale pid around, or returning error or raise an exception
|
||||
%% benefits nobody.
|
||||
-spec request_stepdown(Action, module(), pid())
|
||||
-> ok
|
||||
| {ok, emqx_session:session() | list(emqx_type:deliver())}
|
||||
| {error, term()}
|
||||
when Action :: kick | discard | {takeover, 'begin'} | {takeover, 'end'}.
|
||||
-spec request_stepdown(Action, module(), pid()) ->
|
||||
ok
|
||||
| {ok, emqx_session:session() | list(emqx_type:deliver())}
|
||||
| {error, term()}
|
||||
when
|
||||
Action :: kick | discard | {takeover, 'begin'} | {takeover, 'end'}.
|
||||
request_stepdown(Action, ConnMod, Pid) ->
|
||||
Timeout =
|
||||
case Action == kick orelse Action == discard of
|
||||
|
|
@ -420,27 +460,40 @@ request_stepdown(Action, ConnMod, Pid) ->
|
|||
ok -> ok;
|
||||
Reply -> {ok, Reply}
|
||||
catch
|
||||
_ : noproc -> % emqx_ws_connection: call
|
||||
% emqx_ws_connection: call
|
||||
_:noproc ->
|
||||
ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}),
|
||||
{error, noproc};
|
||||
_ : {noproc, _} -> % emqx_connection: gen_server:call
|
||||
% emqx_connection: gen_server:call
|
||||
_:{noproc, _} ->
|
||||
ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}),
|
||||
{error, noproc};
|
||||
_ : {shutdown, _} ->
|
||||
_:{shutdown, _} ->
|
||||
ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}),
|
||||
{error, noproc};
|
||||
_ : {{shutdown, _}, _} ->
|
||||
_:{{shutdown, _}, _} ->
|
||||
ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}),
|
||||
{error, noproc};
|
||||
_ : {timeout, {gen_server, call, _}} ->
|
||||
?tp(warning, "session_stepdown_request_timeout",
|
||||
#{pid => Pid, action => Action, stale_channel => stale_channel_info(Pid)}),
|
||||
_:{timeout, {gen_server, call, _}} ->
|
||||
?tp(
|
||||
warning,
|
||||
"session_stepdown_request_timeout",
|
||||
#{pid => Pid, action => Action, stale_channel => stale_channel_info(Pid)}
|
||||
),
|
||||
ok = force_kill(Pid),
|
||||
{error, timeout};
|
||||
_ : Error : St ->
|
||||
?tp(error, "session_stepdown_request_exception",
|
||||
#{pid => Pid, action => Action, reason => Error, stacktrace => St,
|
||||
stale_channel => stale_channel_info(Pid)}),
|
||||
_:Error:St ->
|
||||
?tp(
|
||||
error,
|
||||
"session_stepdown_request_exception",
|
||||
#{
|
||||
pid => Pid,
|
||||
action => Action,
|
||||
reason => Error,
|
||||
stacktrace => St,
|
||||
stale_channel => stale_channel_info(Pid)
|
||||
}
|
||||
),
|
||||
ok = force_kill(Pid),
|
||||
{error, unexpected_exception}
|
||||
end,
|
||||
|
|
@ -477,33 +530,46 @@ kick_session(Action, ClientId, ChanPid) ->
|
|||
try
|
||||
wrap_rpc(emqx_cm_proto_v1:kick_session(Action, ClientId, ChanPid))
|
||||
catch
|
||||
Error : Reason ->
|
||||
Error:Reason ->
|
||||
%% This should mostly be RPC failures.
|
||||
%% However, if the node is still running the old version
|
||||
%% code (prior to emqx app 4.3.10) some of the RPC handler
|
||||
%% exceptions may get propagated to a new version node
|
||||
?SLOG(error, #{ msg => "failed_to_kick_session_on_remote_node"
|
||||
, node => node(ChanPid)
|
||||
, action => Action
|
||||
, error => Error
|
||||
, reason => Reason
|
||||
},
|
||||
#{clientid => ClientId})
|
||||
?SLOG(
|
||||
error,
|
||||
#{
|
||||
msg => "failed_to_kick_session_on_remote_node",
|
||||
node => node(ChanPid),
|
||||
action => Action,
|
||||
error => Error,
|
||||
reason => Reason
|
||||
},
|
||||
#{clientid => ClientId}
|
||||
)
|
||||
end.
|
||||
|
||||
kick_session(ClientId) ->
|
||||
case lookup_channels(ClientId) of
|
||||
[] ->
|
||||
?SLOG(warning, #{msg => "kicked_an_unknown_session"},
|
||||
#{clientid => ClientId}),
|
||||
?SLOG(
|
||||
warning,
|
||||
#{msg => "kicked_an_unknown_session"},
|
||||
#{clientid => ClientId}
|
||||
),
|
||||
ok;
|
||||
ChanPids ->
|
||||
case length(ChanPids) > 1 of
|
||||
true ->
|
||||
?SLOG(warning, #{msg => "more_than_one_channel_found",
|
||||
chan_pids => ChanPids},
|
||||
#{clientid => ClientId});
|
||||
false -> ok
|
||||
?SLOG(
|
||||
warning,
|
||||
#{
|
||||
msg => "more_than_one_channel_found",
|
||||
chan_pids => ChanPids
|
||||
},
|
||||
#{clientid => ClientId}
|
||||
);
|
||||
false ->
|
||||
ok
|
||||
end,
|
||||
lists:foreach(fun(Pid) -> kick_session(ClientId, Pid) end, ChanPids)
|
||||
end.
|
||||
|
|
@ -514,9 +580,9 @@ kick_session(ClientId) ->
|
|||
|
||||
with_channel(ClientId, Fun) ->
|
||||
case lookup_channels(ClientId) of
|
||||
[] -> undefined;
|
||||
[] -> undefined;
|
||||
[Pid] -> Fun(Pid);
|
||||
Pids -> Fun(lists:last(Pids))
|
||||
Pids -> Fun(lists:last(Pids))
|
||||
end.
|
||||
|
||||
%% @doc Get all registered channel pids. Debug/test interface
|
||||
|
|
@ -529,14 +595,13 @@ all_client_ids() ->
|
|||
Pat = [{{'$1', '_'}, [], ['$1']}],
|
||||
ets:select(?CHAN_TAB, Pat).
|
||||
|
||||
|
||||
%% @doc Lookup channels.
|
||||
-spec(lookup_channels(emqx_types:clientid()) -> list(chan_pid())).
|
||||
-spec lookup_channels(emqx_types:clientid()) -> list(chan_pid()).
|
||||
lookup_channels(ClientId) ->
|
||||
lookup_channels(global, ClientId).
|
||||
|
||||
%% @doc Lookup local or global channels.
|
||||
-spec(lookup_channels(local | global, emqx_types:clientid()) -> list(chan_pid())).
|
||||
-spec lookup_channels(local | global, emqx_types:clientid()) -> list(chan_pid()).
|
||||
lookup_channels(global, ClientId) ->
|
||||
case emqx_cm_registry:is_enabled() of
|
||||
true ->
|
||||
|
|
@ -544,21 +609,22 @@ lookup_channels(global, ClientId) ->
|
|||
false ->
|
||||
lookup_channels(local, ClientId)
|
||||
end;
|
||||
|
||||
lookup_channels(local, ClientId) ->
|
||||
[ChanPid || {_, ChanPid} <- ets:lookup(?CHAN_TAB, ClientId)].
|
||||
|
||||
-spec lookup_client({clientid, emqx_types:clientid()} | {username, emqx_types:username()}) ->
|
||||
[channel_info()].
|
||||
[channel_info()].
|
||||
lookup_client({username, Username}) ->
|
||||
MatchSpec = [{ {'_', #{clientinfo => #{username => '$1'}}, '_'}
|
||||
, [{'=:=','$1', Username}]
|
||||
, ['$_']
|
||||
}],
|
||||
MatchSpec = [
|
||||
{{'_', #{clientinfo => #{username => '$1'}}, '_'}, [{'=:=', '$1', Username}], ['$_']}
|
||||
],
|
||||
ets:select(emqx_channel_info, MatchSpec);
|
||||
lookup_client({clientid, ClientId}) ->
|
||||
[Rec || Key <- ets:lookup(emqx_channel, ClientId)
|
||||
, Rec <- ets:lookup(emqx_channel_info, Key)].
|
||||
[
|
||||
Rec
|
||||
|| Key <- ets:lookup(emqx_channel, ClientId),
|
||||
Rec <- ets:lookup(emqx_channel_info, Key)
|
||||
].
|
||||
|
||||
%% @private
|
||||
wrap_rpc(Result) ->
|
||||
|
|
@ -636,10 +702,12 @@ update_stats({Tab, Stat, MaxStat}) ->
|
|||
end.
|
||||
|
||||
-spec do_get_chann_conn_mod(emqx_types:clientid(), chan_pid()) ->
|
||||
module() | undefined.
|
||||
module() | undefined.
|
||||
do_get_chann_conn_mod(ClientId, ChanPid) ->
|
||||
Chan = {ClientId, ChanPid},
|
||||
try [ConnMod] = ets:lookup_element(?CHAN_CONN_TAB, Chan, 2), ConnMod
|
||||
try
|
||||
[ConnMod] = ets:lookup_element(?CHAN_CONN_TAB, Chan, 2),
|
||||
ConnMod
|
||||
catch
|
||||
error:badarg -> undefined
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -21,46 +21,53 @@
|
|||
|
||||
-export([start_link/0]).
|
||||
|
||||
-export([ trans/2
|
||||
, trans/3
|
||||
, lock/1
|
||||
, lock/2
|
||||
, unlock/1
|
||||
]).
|
||||
-export([
|
||||
trans/2,
|
||||
trans/3,
|
||||
lock/1,
|
||||
lock/2,
|
||||
unlock/1
|
||||
]).
|
||||
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
ekka_locker:start_link(?MODULE).
|
||||
|
||||
-spec(trans(emqx_types:clientid(), fun(([node()]) -> any())) -> any()).
|
||||
-spec trans(emqx_types:clientid(), fun(([node()]) -> any())) -> any().
|
||||
trans(ClientId, Fun) ->
|
||||
trans(ClientId, Fun, undefined).
|
||||
|
||||
-spec(trans(maybe(emqx_types:clientid()),
|
||||
fun(([node()])-> any()), ekka_locker:piggyback()) -> any()).
|
||||
-spec trans(
|
||||
maybe(emqx_types:clientid()),
|
||||
fun(([node()]) -> any()),
|
||||
ekka_locker:piggyback()
|
||||
) -> any().
|
||||
trans(undefined, Fun, _Piggyback) ->
|
||||
Fun([]);
|
||||
trans(ClientId, Fun, Piggyback) ->
|
||||
case lock(ClientId, Piggyback) of
|
||||
{true, Nodes} ->
|
||||
try Fun(Nodes) after unlock(ClientId) end;
|
||||
try
|
||||
Fun(Nodes)
|
||||
after
|
||||
unlock(ClientId)
|
||||
end;
|
||||
{false, _Nodes} ->
|
||||
{error, client_id_unavailable}
|
||||
end.
|
||||
|
||||
-spec(lock(emqx_types:clientid()) -> ekka_locker:lock_result()).
|
||||
-spec lock(emqx_types:clientid()) -> ekka_locker:lock_result().
|
||||
lock(ClientId) ->
|
||||
ekka_locker:acquire(?MODULE, ClientId, strategy()).
|
||||
|
||||
-spec(lock(emqx_types:clientid(), ekka_locker:piggyback()) -> ekka_locker:lock_result()).
|
||||
-spec lock(emqx_types:clientid(), ekka_locker:piggyback()) -> ekka_locker:lock_result().
|
||||
lock(ClientId, Piggyback) ->
|
||||
ekka_locker:acquire(?MODULE, ClientId, strategy(), Piggyback).
|
||||
|
||||
-spec(unlock(emqx_types:clientid()) -> {boolean(), [node()]}).
|
||||
-spec unlock(emqx_types:clientid()) -> {boolean(), [node()]}.
|
||||
unlock(ClientId) ->
|
||||
ekka_locker:release(?MODULE, ClientId, strategy()).
|
||||
|
||||
-spec(strategy() -> local | leader | quorum | all).
|
||||
-spec strategy() -> local | leader | quorum | all.
|
||||
strategy() ->
|
||||
emqx:get_config([broker, session_locking_strategy]).
|
||||
|
||||
|
|
|
|||
|
|
@ -23,25 +23,26 @@
|
|||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
|
||||
-export([start_link/0]).
|
||||
|
||||
-export([is_enabled/0]).
|
||||
|
||||
-export([ register_channel/1
|
||||
, unregister_channel/1
|
||||
]).
|
||||
-export([
|
||||
register_channel/1,
|
||||
unregister_channel/1
|
||||
]).
|
||||
|
||||
-export([lookup_channels/1]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-define(REGISTRY, ?MODULE).
|
||||
-define(TAB, emqx_channel_registry).
|
||||
|
|
@ -50,7 +51,7 @@
|
|||
-record(channel, {chid, pid}).
|
||||
|
||||
%% @doc Start the global channel registry.
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?REGISTRY}, ?MODULE, [], []).
|
||||
|
||||
|
|
@ -59,16 +60,17 @@ start_link() ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Is the global registry enabled?
|
||||
-spec(is_enabled() -> boolean()).
|
||||
-spec is_enabled() -> boolean().
|
||||
is_enabled() ->
|
||||
emqx:get_config([broker, enable_session_registry]).
|
||||
|
||||
%% @doc Register a global channel.
|
||||
-spec(register_channel(emqx_types:clientid()
|
||||
| {emqx_types:clientid(), pid()}) -> ok).
|
||||
-spec register_channel(
|
||||
emqx_types:clientid()
|
||||
| {emqx_types:clientid(), pid()}
|
||||
) -> ok.
|
||||
register_channel(ClientId) when is_binary(ClientId) ->
|
||||
register_channel({ClientId, self()});
|
||||
|
||||
register_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) ->
|
||||
case is_enabled() of
|
||||
true -> mria:dirty_write(?TAB, record(ClientId, ChanPid));
|
||||
|
|
@ -76,11 +78,12 @@ register_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid)
|
|||
end.
|
||||
|
||||
%% @doc Unregister a global channel.
|
||||
-spec(unregister_channel(emqx_types:clientid()
|
||||
| {emqx_types:clientid(), pid()}) -> ok).
|
||||
-spec unregister_channel(
|
||||
emqx_types:clientid()
|
||||
| {emqx_types:clientid(), pid()}
|
||||
) -> ok.
|
||||
unregister_channel(ClientId) when is_binary(ClientId) ->
|
||||
unregister_channel({ClientId, self()});
|
||||
|
||||
unregister_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) ->
|
||||
case is_enabled() of
|
||||
true -> mria:dirty_delete_object(?TAB, record(ClientId, ChanPid));
|
||||
|
|
@ -88,7 +91,7 @@ unregister_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid
|
|||
end.
|
||||
|
||||
%% @doc Lookup the global channels.
|
||||
-spec(lookup_channels(emqx_types:clientid()) -> list(pid())).
|
||||
-spec lookup_channels(emqx_types:clientid()) -> list(pid()).
|
||||
lookup_channels(ClientId) ->
|
||||
[ChanPid || #channel{pid = ChanPid} <- mnesia:dirty_read(?TAB, ClientId)].
|
||||
|
||||
|
|
@ -102,13 +105,18 @@ record(ClientId, ChanPid) ->
|
|||
init([]) ->
|
||||
mria_config:set_dirty_shard(?CM_SHARD, true),
|
||||
ok = mria:create_table(?TAB, [
|
||||
{type, bag},
|
||||
{rlog_shard, ?CM_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, channel},
|
||||
{attributes, record_info(fields, channel)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]),
|
||||
{type, bag},
|
||||
{rlog_shard, ?CM_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, channel},
|
||||
{attributes, record_info(fields, channel)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]),
|
||||
ok = mria_rlog:wait_for_shards([?CM_SHARD], infinity),
|
||||
ok = ekka:monitor(membership),
|
||||
{ok, #{}}.
|
||||
|
|
@ -124,14 +132,11 @@ handle_cast(Msg, State) ->
|
|||
handle_info({membership, {mnesia, down, Node}}, State) ->
|
||||
cleanup_channels(Node),
|
||||
{noreply, State};
|
||||
|
||||
handle_info({membership, {node, down, Node}}, State) ->
|
||||
cleanup_channels(Node),
|
||||
{noreply, State};
|
||||
|
||||
handle_info({membership, _Event}, State) ->
|
||||
{noreply, State};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -147,10 +152,12 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
cleanup_channels(Node) ->
|
||||
global:trans({?LOCK, self()},
|
||||
fun() ->
|
||||
mria:transaction(?CM_SHARD, fun do_cleanup_channels/1, [Node])
|
||||
end).
|
||||
global:trans(
|
||||
{?LOCK, self()},
|
||||
fun() ->
|
||||
mria:transaction(?CM_SHARD, fun do_cleanup_channels/1, [Node])
|
||||
end
|
||||
).
|
||||
|
||||
do_cleanup_channels(Node) ->
|
||||
Pat = [{#channel{pid = '$1', _ = '_'}, [{'==', {node, '$1'}, Node}], ['$_']}],
|
||||
|
|
|
|||
|
|
@ -34,15 +34,16 @@ start_link() ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
init([]) ->
|
||||
SupFlags = #{strategy => one_for_one,
|
||||
intensity => 100,
|
||||
period => 10
|
||||
},
|
||||
Banned = child_spec(emqx_banned, 1000, worker),
|
||||
SupFlags = #{
|
||||
strategy => one_for_one,
|
||||
intensity => 100,
|
||||
period => 10
|
||||
},
|
||||
Banned = child_spec(emqx_banned, 1000, worker),
|
||||
Flapping = child_spec(emqx_flapping, 1000, worker),
|
||||
Locker = child_spec(emqx_cm_locker, 5000, worker),
|
||||
Locker = child_spec(emqx_cm_locker, 5000, worker),
|
||||
Registry = child_spec(emqx_cm_registry, 5000, worker),
|
||||
Manager = child_spec(emqx_cm, 5000, worker),
|
||||
Manager = child_spec(emqx_cm, 5000, worker),
|
||||
{ok, {SupFlags, [Banned, Flapping, Locker, Registry, Manager]}}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -50,10 +51,11 @@ init([]) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
child_spec(Mod, Shutdown, Type) ->
|
||||
#{id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => Shutdown,
|
||||
type => Type,
|
||||
modules => [Mod]
|
||||
}.
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => Shutdown,
|
||||
type => Type,
|
||||
modules => [Mod]
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -18,61 +18,68 @@
|
|||
-compile({no_auto_import, [get/0, get/1, put/2, erase/1]}).
|
||||
-elvis([{elvis_style, god_modules, disable}]).
|
||||
|
||||
-export([ init_load/1
|
||||
, init_load/2
|
||||
, read_override_conf/1
|
||||
, delete_override_conf_files/0
|
||||
, check_config/2
|
||||
, fill_defaults/1
|
||||
, fill_defaults/2
|
||||
, save_configs/5
|
||||
, save_to_app_env/1
|
||||
, save_to_config_map/2
|
||||
, save_to_override_conf/2
|
||||
]).
|
||||
-export([
|
||||
init_load/1,
|
||||
init_load/2,
|
||||
read_override_conf/1,
|
||||
delete_override_conf_files/0,
|
||||
check_config/2,
|
||||
fill_defaults/1,
|
||||
fill_defaults/2,
|
||||
save_configs/5,
|
||||
save_to_app_env/1,
|
||||
save_to_config_map/2,
|
||||
save_to_override_conf/2
|
||||
]).
|
||||
|
||||
-export([ get_root/1
|
||||
, get_root_raw/1
|
||||
]).
|
||||
-export([
|
||||
get_root/1,
|
||||
get_root_raw/1
|
||||
]).
|
||||
|
||||
-export([ get_default_value/1
|
||||
]).
|
||||
-export([get_default_value/1]).
|
||||
|
||||
-export([ get/1
|
||||
, get/2
|
||||
, find/1
|
||||
, find_raw/1
|
||||
, put/1
|
||||
, put/2
|
||||
, erase/1
|
||||
]).
|
||||
-export([
|
||||
get/1,
|
||||
get/2,
|
||||
find/1,
|
||||
find_raw/1,
|
||||
put/1,
|
||||
put/2,
|
||||
erase/1
|
||||
]).
|
||||
|
||||
-export([ get_raw/1
|
||||
, get_raw/2
|
||||
, put_raw/1
|
||||
, put_raw/2
|
||||
]).
|
||||
-export([
|
||||
get_raw/1,
|
||||
get_raw/2,
|
||||
put_raw/1,
|
||||
put_raw/2
|
||||
]).
|
||||
|
||||
-export([ save_schema_mod_and_names/1
|
||||
, get_schema_mod/0
|
||||
, get_schema_mod/1
|
||||
, get_root_names/0
|
||||
]).
|
||||
-export([
|
||||
save_schema_mod_and_names/1,
|
||||
get_schema_mod/0,
|
||||
get_schema_mod/1,
|
||||
get_root_names/0
|
||||
]).
|
||||
|
||||
-export([ get_zone_conf/2
|
||||
, get_zone_conf/3
|
||||
, put_zone_conf/3
|
||||
]).
|
||||
-export([
|
||||
get_zone_conf/2,
|
||||
get_zone_conf/3,
|
||||
put_zone_conf/3
|
||||
]).
|
||||
|
||||
-export([ get_listener_conf/3
|
||||
, get_listener_conf/4
|
||||
, put_listener_conf/4
|
||||
, find_listener_conf/3
|
||||
]).
|
||||
-export([
|
||||
get_listener_conf/3,
|
||||
get_listener_conf/4,
|
||||
put_listener_conf/4,
|
||||
find_listener_conf/3
|
||||
]).
|
||||
|
||||
-export([ add_handlers/0
|
||||
, remove_handlers/0
|
||||
]).
|
||||
-export([
|
||||
add_handlers/0,
|
||||
remove_handlers/0
|
||||
]).
|
||||
|
||||
-include("logger.hrl").
|
||||
|
||||
|
|
@ -88,25 +95,34 @@
|
|||
AtomKeyPath -> EXP
|
||||
catch
|
||||
error:badarg -> EXP_ON_FAIL
|
||||
end).
|
||||
end
|
||||
).
|
||||
|
||||
-export_type([update_request/0, raw_config/0, config/0, app_envs/0,
|
||||
update_opts/0, update_cmd/0, update_args/0,
|
||||
update_error/0, update_result/0]).
|
||||
-export_type([
|
||||
update_request/0,
|
||||
raw_config/0,
|
||||
config/0,
|
||||
app_envs/0,
|
||||
update_opts/0,
|
||||
update_cmd/0,
|
||||
update_args/0,
|
||||
update_error/0,
|
||||
update_result/0
|
||||
]).
|
||||
|
||||
-type update_request() :: term().
|
||||
-type update_cmd() :: {update, update_request()} | remove.
|
||||
-type update_opts() :: #{
|
||||
%% rawconf_with_defaults:
|
||||
%% fill the default values into the `raw_config` field of the return value
|
||||
%% defaults to `false`
|
||||
rawconf_with_defaults => boolean(),
|
||||
%% persistent:
|
||||
%% save the updated config to the emqx_override.conf file
|
||||
%% defaults to `true`
|
||||
persistent => boolean(),
|
||||
override_to => local | cluster
|
||||
}.
|
||||
%% rawconf_with_defaults:
|
||||
%% fill the default values into the `raw_config` field of the return value
|
||||
%% defaults to `false`
|
||||
rawconf_with_defaults => boolean(),
|
||||
%% persistent:
|
||||
%% save the updated config to the emqx_override.conf file
|
||||
%% defaults to `true`
|
||||
persistent => boolean(),
|
||||
override_to => local | cluster
|
||||
}.
|
||||
-type update_args() :: {update_cmd(), Opts :: update_opts()}.
|
||||
-type update_stage() :: pre_config_update | post_config_update.
|
||||
-type update_error() :: {update_stage(), module(), term()} | {save_configs, term()} | term().
|
||||
|
|
@ -149,8 +165,11 @@ find([]) ->
|
|||
Res -> {ok, Res}
|
||||
end;
|
||||
find(KeyPath) ->
|
||||
?ATOM_CONF_PATH(KeyPath, emqx_map_lib:deep_find(AtomKeyPath, get_root(KeyPath)),
|
||||
{not_found, KeyPath}).
|
||||
?ATOM_CONF_PATH(
|
||||
KeyPath,
|
||||
emqx_map_lib:deep_find(AtomKeyPath, get_root(KeyPath)),
|
||||
{not_found, KeyPath}
|
||||
).
|
||||
|
||||
-spec find_raw(emqx_map_lib:config_key_path()) ->
|
||||
{ok, term()} | {not_found, emqx_map_lib:config_key_path(), term()}.
|
||||
|
|
@ -166,17 +185,21 @@ find_raw(KeyPath) ->
|
|||
-spec get_zone_conf(atom(), emqx_map_lib:config_key_path()) -> term().
|
||||
get_zone_conf(Zone, KeyPath) ->
|
||||
case find(?ZONE_CONF_PATH(Zone, KeyPath)) of
|
||||
{not_found, _, _} -> %% not found in zones, try to find the global config
|
||||
%% not found in zones, try to find the global config
|
||||
{not_found, _, _} ->
|
||||
?MODULE:get(KeyPath);
|
||||
{ok, Value} -> Value
|
||||
{ok, Value} ->
|
||||
Value
|
||||
end.
|
||||
|
||||
-spec get_zone_conf(atom(), emqx_map_lib:config_key_path(), term()) -> term().
|
||||
get_zone_conf(Zone, KeyPath, Default) ->
|
||||
case find(?ZONE_CONF_PATH(Zone, KeyPath)) of
|
||||
{not_found, _, _} -> %% not found in zones, try to find the global config
|
||||
%% not found in zones, try to find the global config
|
||||
{not_found, _, _} ->
|
||||
?MODULE:get(KeyPath, Default);
|
||||
{ok, Value} -> Value
|
||||
{ok, Value} ->
|
||||
Value
|
||||
end.
|
||||
|
||||
-spec put_zone_conf(atom(), emqx_map_lib:config_key_path(), term()) -> ok.
|
||||
|
|
@ -202,9 +225,13 @@ find_listener_conf(Type, Listener, KeyPath) ->
|
|||
|
||||
-spec put(map()) -> ok.
|
||||
put(Config) ->
|
||||
maps:fold(fun(RootName, RootValue, _) ->
|
||||
maps:fold(
|
||||
fun(RootName, RootValue, _) ->
|
||||
?MODULE:put([RootName], RootValue)
|
||||
end, ok, Config).
|
||||
end,
|
||||
ok,
|
||||
Config
|
||||
).
|
||||
|
||||
erase(RootName) ->
|
||||
persistent_term:erase(?PERSIS_KEY(?CONF, bin(RootName))),
|
||||
|
|
@ -219,12 +246,14 @@ get_default_value([RootName | _] = KeyPath) ->
|
|||
case find_raw([RootName]) of
|
||||
{ok, RawConf} ->
|
||||
RawConf1 = emqx_map_lib:deep_remove(BinKeyPath, #{bin(RootName) => RawConf}),
|
||||
try fill_defaults(get_schema_mod(RootName), RawConf1) of FullConf ->
|
||||
case emqx_map_lib:deep_find(BinKeyPath, FullConf) of
|
||||
{not_found, _, _} -> {error, no_default_value};
|
||||
{ok, Val} -> {ok, Val}
|
||||
end
|
||||
catch error : Reason -> {error, Reason}
|
||||
try fill_defaults(get_schema_mod(RootName), RawConf1) of
|
||||
FullConf ->
|
||||
case emqx_map_lib:deep_find(BinKeyPath, FullConf) of
|
||||
{not_found, _, _} -> {error, no_default_value};
|
||||
{ok, Val} -> {ok, Val}
|
||||
end
|
||||
catch
|
||||
error:Reason -> {error, Reason}
|
||||
end;
|
||||
{not_found, _, _} ->
|
||||
{error, {rootname_not_found, RootName}}
|
||||
|
|
@ -238,9 +267,13 @@ get_raw(KeyPath, Default) -> do_get(?RAW_CONF, KeyPath, Default).
|
|||
|
||||
-spec put_raw(map()) -> ok.
|
||||
put_raw(Config) ->
|
||||
maps:fold(fun(RootName, RootV, _) ->
|
||||
maps:fold(
|
||||
fun(RootName, RootV, _) ->
|
||||
?MODULE:put_raw([RootName], RootV)
|
||||
end, ok, hocon_maps:ensure_plain(Config)).
|
||||
end,
|
||||
ok,
|
||||
hocon_maps:ensure_plain(Config)
|
||||
).
|
||||
|
||||
-spec put_raw(emqx_map_lib:config_key_path(), term()) -> ok.
|
||||
put_raw(KeyPath, Config) -> do_put(?RAW_CONF, KeyPath, Config).
|
||||
|
|
@ -268,10 +301,12 @@ init_load(SchemaMod, RawConf) when is_map(RawConf) ->
|
|||
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
|
||||
%% check configs against the schema
|
||||
{_AppEnvs, CheckedConf} =
|
||||
check_config(SchemaMod, RawConfWithOverrides , #{}),
|
||||
check_config(SchemaMod, RawConfWithOverrides, #{}),
|
||||
RootNames = get_root_names(),
|
||||
ok = save_to_config_map(maps:with(get_atom_root_names(), CheckedConf),
|
||||
maps:with(RootNames, RawConfWithOverrides)).
|
||||
ok = save_to_config_map(
|
||||
maps:with(get_atom_root_names(), CheckedConf),
|
||||
maps:with(RootNames, RawConfWithOverrides)
|
||||
).
|
||||
|
||||
parse_hocon(Conf) ->
|
||||
IncDirs = include_dirs(),
|
||||
|
|
@ -279,12 +314,13 @@ parse_hocon(Conf) ->
|
|||
{ok, HoconMap} ->
|
||||
HoconMap;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "failed_to_load_hocon_conf",
|
||||
reason => Reason,
|
||||
pwd => file:get_cwd(),
|
||||
include_dirs => IncDirs,
|
||||
config_file => Conf
|
||||
}),
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_load_hocon_conf",
|
||||
reason => Reason,
|
||||
pwd => file:get_cwd(),
|
||||
include_dirs => IncDirs,
|
||||
config_file => Conf
|
||||
}),
|
||||
error(failed_to_load_hocon_conf)
|
||||
end.
|
||||
|
||||
|
|
@ -299,22 +335,26 @@ include_dirs() ->
|
|||
[filename:join(emqx:data_dir(), "configs")].
|
||||
|
||||
merge_envs(SchemaMod, RawConf) ->
|
||||
Opts = #{required => false, %% TODO: evil, remove, required should be declared in schema
|
||||
format => map,
|
||||
apply_override_envs => true
|
||||
},
|
||||
%% TODO: evil, remove, required should be declared in schema
|
||||
Opts = #{
|
||||
required => false,
|
||||
format => map,
|
||||
apply_override_envs => true
|
||||
},
|
||||
hocon_tconf:merge_env_overrides(SchemaMod, RawConf, all, Opts).
|
||||
|
||||
-spec check_config(hocon_schema:schema(), raw_config()) -> {AppEnvs, CheckedConf}
|
||||
when AppEnvs :: app_envs(), CheckedConf :: config().
|
||||
-spec check_config(hocon_schema:schema(), raw_config()) -> {AppEnvs, CheckedConf} when
|
||||
AppEnvs :: app_envs(), CheckedConf :: config().
|
||||
check_config(SchemaMod, RawConf) ->
|
||||
check_config(SchemaMod, RawConf, #{}).
|
||||
|
||||
check_config(SchemaMod, RawConf, Opts0) ->
|
||||
Opts1 = #{return_plain => true,
|
||||
required => false, %% TODO: evil, remove, required should be declared in schema
|
||||
format => map
|
||||
},
|
||||
Opts1 = #{
|
||||
return_plain => true,
|
||||
%% TODO: evil, remove, required should be declared in schema
|
||||
required => false,
|
||||
format => map
|
||||
},
|
||||
Opts = maps:merge(Opts0, Opts1),
|
||||
{AppEnvs, CheckedConf} =
|
||||
hocon_tconf:map_translate(SchemaMod, RawConf, Opts),
|
||||
|
|
@ -323,21 +363,28 @@ check_config(SchemaMod, RawConf, Opts0) ->
|
|||
-spec fill_defaults(raw_config()) -> map().
|
||||
fill_defaults(RawConf) ->
|
||||
RootNames = get_root_names(),
|
||||
maps:fold(fun(Key, Conf, Acc) ->
|
||||
maps:fold(
|
||||
fun(Key, Conf, Acc) ->
|
||||
SubMap = #{Key => Conf},
|
||||
WithDefaults = case lists:member(Key, RootNames) of
|
||||
true -> fill_defaults(get_schema_mod(Key), SubMap);
|
||||
false -> SubMap
|
||||
end,
|
||||
WithDefaults =
|
||||
case lists:member(Key, RootNames) of
|
||||
true -> fill_defaults(get_schema_mod(Key), SubMap);
|
||||
false -> SubMap
|
||||
end,
|
||||
maps:merge(Acc, WithDefaults)
|
||||
end, #{}, RawConf).
|
||||
end,
|
||||
#{},
|
||||
RawConf
|
||||
).
|
||||
|
||||
-spec fill_defaults(module(), raw_config()) -> map().
|
||||
fill_defaults(SchemaMod, RawConf) ->
|
||||
hocon_tconf:check_plain(SchemaMod, RawConf,
|
||||
hocon_tconf:check_plain(
|
||||
SchemaMod,
|
||||
RawConf,
|
||||
#{required => false, only_fill_defaults => true},
|
||||
root_names_from_conf(RawConf)).
|
||||
|
||||
root_names_from_conf(RawConf)
|
||||
).
|
||||
|
||||
%% @doc Only for test cleanups.
|
||||
%% Delete override config files.
|
||||
|
|
@ -408,9 +455,12 @@ save_configs(_AppEnvs, Conf, RawConf, OverrideConf, Opts) ->
|
|||
|
||||
-spec save_to_app_env([tuple()]) -> ok.
|
||||
save_to_app_env(AppEnvs) ->
|
||||
lists:foreach(fun({AppName, Envs}) ->
|
||||
lists:foreach(
|
||||
fun({AppName, Envs}) ->
|
||||
[application:set_env(AppName, Par, Val) || {Par, Val} <- Envs]
|
||||
end, AppEnvs).
|
||||
end,
|
||||
AppEnvs
|
||||
).
|
||||
|
||||
-spec save_to_config_map(config(), raw_config()) -> ok.
|
||||
save_to_config_map(Conf, RawConf) ->
|
||||
|
|
@ -422,15 +472,19 @@ save_to_override_conf(undefined, _) ->
|
|||
ok;
|
||||
save_to_override_conf(RawConf, Opts) ->
|
||||
case override_conf_file(Opts) of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
FileName ->
|
||||
ok = filelib:ensure_dir(FileName),
|
||||
case file:write_file(FileName, hocon_pp:do(RawConf, #{})) of
|
||||
ok -> ok;
|
||||
ok ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "failed_to_write_override_file",
|
||||
filename => FileName,
|
||||
reason => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_write_override_file",
|
||||
filename => FileName,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason}
|
||||
end
|
||||
end.
|
||||
|
|
@ -449,7 +503,8 @@ load_hocon_file(FileName, LoadType) ->
|
|||
Opts = #{include_dirs => include_dirs(), format => LoadType},
|
||||
{ok, Raw0} = hocon:load(FileName, Opts),
|
||||
Raw0;
|
||||
false -> #{}
|
||||
false ->
|
||||
#{}
|
||||
end.
|
||||
|
||||
do_get(Type, KeyPath) ->
|
||||
|
|
@ -461,11 +516,16 @@ do_get(Type, KeyPath) ->
|
|||
end.
|
||||
|
||||
do_get(Type, [], Default) ->
|
||||
AllConf = lists:foldl(fun
|
||||
AllConf = lists:foldl(
|
||||
fun
|
||||
({?PERSIS_KEY(Type0, RootName), Conf}, AccIn) when Type0 == Type ->
|
||||
AccIn#{conf_key(Type0, RootName) => Conf};
|
||||
(_, AccIn) -> AccIn
|
||||
end, #{}, persistent_term:get()),
|
||||
(_, AccIn) ->
|
||||
AccIn
|
||||
end,
|
||||
#{},
|
||||
persistent_term:get()
|
||||
),
|
||||
case AllConf =:= #{} of
|
||||
true -> Default;
|
||||
false -> AllConf
|
||||
|
|
@ -477,23 +537,33 @@ do_get(Type, [RootName | KeyPath], Default) ->
|
|||
do_deep_get(Type, KeyPath, RootV, Default).
|
||||
|
||||
do_put(Type, [], DeepValue) ->
|
||||
maps:fold(fun(RootName, Value, _Res) ->
|
||||
maps:fold(
|
||||
fun(RootName, Value, _Res) ->
|
||||
do_put(Type, [RootName], Value)
|
||||
end, ok, DeepValue);
|
||||
end,
|
||||
ok,
|
||||
DeepValue
|
||||
);
|
||||
do_put(Type, [RootName | KeyPath], DeepValue) ->
|
||||
OldValue = do_get(Type, [RootName], #{}),
|
||||
NewValue = do_deep_put(Type, KeyPath, OldValue, DeepValue),
|
||||
persistent_term:put(?PERSIS_KEY(Type, bin(RootName)), NewValue).
|
||||
|
||||
do_deep_get(?CONF, KeyPath, Map, Default) ->
|
||||
?ATOM_CONF_PATH(KeyPath, emqx_map_lib:deep_get(AtomKeyPath, Map, Default),
|
||||
Default);
|
||||
?ATOM_CONF_PATH(
|
||||
KeyPath,
|
||||
emqx_map_lib:deep_get(AtomKeyPath, Map, Default),
|
||||
Default
|
||||
);
|
||||
do_deep_get(?RAW_CONF, KeyPath, Map, Default) ->
|
||||
emqx_map_lib:deep_get([bin(Key) || Key <- KeyPath], Map, Default).
|
||||
|
||||
do_deep_put(?CONF, KeyPath, Map, Value) ->
|
||||
?ATOM_CONF_PATH(KeyPath, emqx_map_lib:deep_put(AtomKeyPath, Map, Value),
|
||||
error({not_found, KeyPath}));
|
||||
?ATOM_CONF_PATH(
|
||||
KeyPath,
|
||||
emqx_map_lib:deep_put(AtomKeyPath, Map, Value),
|
||||
error({not_found, KeyPath})
|
||||
);
|
||||
do_deep_put(?RAW_CONF, KeyPath, Map, Value) ->
|
||||
emqx_map_lib:deep_put([bin(Key) || Key <- KeyPath], Map, Value).
|
||||
|
||||
|
|
|
|||
|
|
@ -23,23 +23,26 @@
|
|||
-behaviour(gen_server).
|
||||
|
||||
%% API functions
|
||||
-export([ start_link/0
|
||||
, stop/0
|
||||
, add_handler/2
|
||||
, remove_handler/1
|
||||
, update_config/3
|
||||
, get_raw_cluster_override_conf/0
|
||||
, info/0
|
||||
, merge_to_old_config/2
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
stop/0,
|
||||
add_handler/2,
|
||||
remove_handler/1,
|
||||
update_config/3,
|
||||
get_raw_cluster_override_conf/0,
|
||||
info/0,
|
||||
merge_to_old_config/2
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-define(MOD, {mod}).
|
||||
-define(WKEY, '?').
|
||||
|
|
@ -47,16 +50,22 @@
|
|||
-type handler_name() :: module().
|
||||
-type handlers() :: #{emqx_config:config_key() => handlers(), ?MOD => handler_name()}.
|
||||
|
||||
-optional_callbacks([ pre_config_update/3
|
||||
, post_config_update/5
|
||||
]).
|
||||
-optional_callbacks([
|
||||
pre_config_update/3,
|
||||
post_config_update/5
|
||||
]).
|
||||
|
||||
-callback pre_config_update([atom()], emqx_config:update_request(), emqx_config:raw_config()) ->
|
||||
{ok, emqx_config:update_request()} | {error, term()}.
|
||||
|
||||
-callback post_config_update([atom()], emqx_config:update_request(), emqx_config:config(),
|
||||
emqx_config:config(), emqx_config:app_envs()) ->
|
||||
ok | {ok, Result::any()} | {error, Reason::term()}.
|
||||
-callback post_config_update(
|
||||
[atom()],
|
||||
emqx_config:update_request(),
|
||||
emqx_config:config(),
|
||||
emqx_config:config(),
|
||||
emqx_config:app_envs()
|
||||
) ->
|
||||
ok | {ok, Result :: any()} | {error, Reason :: term()}.
|
||||
|
||||
-type state() :: #{
|
||||
handlers := handlers(),
|
||||
|
|
@ -106,9 +115,11 @@ handle_call({add_handler, ConfKeyPath, HandlerName}, _From, State = #{handlers :
|
|||
{ok, NewHandlers} -> {reply, ok, State#{handlers => NewHandlers}};
|
||||
{error, _Reason} = Error -> {reply, Error, State}
|
||||
end;
|
||||
|
||||
handle_call({change_config, SchemaModule, ConfKeyPath, UpdateArgs}, _From,
|
||||
#{handlers := Handlers} = State) ->
|
||||
handle_call(
|
||||
{change_config, SchemaModule, ConfKeyPath, UpdateArgs},
|
||||
_From,
|
||||
#{handlers := Handlers} = State
|
||||
) ->
|
||||
Result = handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs),
|
||||
{reply, Result, State};
|
||||
handle_call(get_raw_cluster_override_conf, _From, State) ->
|
||||
|
|
@ -140,13 +151,14 @@ deep_put_handler([], Handlers, Mod) ->
|
|||
deep_put_handler([Key | KeyPath], Handlers, Mod) ->
|
||||
SubHandlers = maps:get(Key, Handlers, #{}),
|
||||
case deep_put_handler(KeyPath, SubHandlers, Mod) of
|
||||
{ok, NewSubHandlers} ->
|
||||
{ok, NewSubHandlers} ->
|
||||
NewHandlers = Handlers#{Key => NewSubHandlers},
|
||||
case check_handler_conflict(NewHandlers) of
|
||||
ok -> {ok, NewHandlers};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end;
|
||||
{error, _Reason} = Error -> Error
|
||||
{error, _Reason} = Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
%% Make sure that Specify Key and ?WKEY cannot be on the same level.
|
||||
|
|
@ -165,34 +177,45 @@ check_handler_conflict(Handlers) ->
|
|||
filter_top_level_handlers(Handlers) ->
|
||||
maps:fold(
|
||||
fun
|
||||
(K, #{?MOD := _}, Acc) -> [K | Acc];
|
||||
(K, #{?MOD := _}, Acc) -> [K | Acc];
|
||||
(_K, #{}, Acc) -> Acc;
|
||||
(?MOD, _, Acc) -> Acc
|
||||
end, [], Handlers).
|
||||
end,
|
||||
[],
|
||||
Handlers
|
||||
).
|
||||
|
||||
handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) ->
|
||||
try
|
||||
do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs)
|
||||
catch
|
||||
throw : Reason ->
|
||||
throw:Reason ->
|
||||
{error, Reason};
|
||||
Error : Reason : ST ->
|
||||
?SLOG(error, #{msg => "change_config_crashed",
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
update_req => UpdateArgs,
|
||||
module => SchemaModule,
|
||||
key_path => ConfKeyPath,
|
||||
stacktrace => ST
|
||||
}),
|
||||
Error:Reason:ST ->
|
||||
?SLOG(error, #{
|
||||
msg => "change_config_crashed",
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
update_req => UpdateArgs,
|
||||
module => SchemaModule,
|
||||
key_path => ConfKeyPath,
|
||||
stacktrace => ST
|
||||
}),
|
||||
{error, config_update_crashed}
|
||||
end.
|
||||
|
||||
do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) ->
|
||||
case process_update_request(ConfKeyPath, Handlers, UpdateArgs) of
|
||||
{ok, NewRawConf, OverrideConf, Opts} ->
|
||||
check_and_save_configs(SchemaModule, ConfKeyPath, Handlers, NewRawConf,
|
||||
OverrideConf, UpdateArgs, Opts);
|
||||
check_and_save_configs(
|
||||
SchemaModule,
|
||||
ConfKeyPath,
|
||||
Handlers,
|
||||
NewRawConf,
|
||||
OverrideConf,
|
||||
UpdateArgs,
|
||||
Opts
|
||||
);
|
||||
{error, Result} ->
|
||||
{error, Result}
|
||||
end.
|
||||
|
|
@ -211,7 +234,8 @@ process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}) ->
|
|||
{ok, NewRawConf} ->
|
||||
OverrideConf = update_override_config(NewRawConf, Opts),
|
||||
{ok, NewRawConf, OverrideConf, Opts};
|
||||
Error -> Error
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) ->
|
||||
|
|
@ -219,8 +243,13 @@ do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) ->
|
|||
|
||||
do_update_config([], Handlers, OldRawConf, UpdateReq, ConfKeyPath) ->
|
||||
call_pre_config_update(Handlers, OldRawConf, UpdateReq, ConfKeyPath);
|
||||
do_update_config([ConfKey | SubConfKeyPath], Handlers, OldRawConf,
|
||||
UpdateReq, ConfKeyPath0) ->
|
||||
do_update_config(
|
||||
[ConfKey | SubConfKeyPath],
|
||||
Handlers,
|
||||
OldRawConf,
|
||||
UpdateReq,
|
||||
ConfKeyPath0
|
||||
) ->
|
||||
ConfKeyPath = ConfKeyPath0 ++ [ConfKey],
|
||||
ConfKeyBin = bin(ConfKey),
|
||||
SubOldRawConf = get_sub_config(ConfKeyBin, OldRawConf),
|
||||
|
|
@ -230,43 +259,110 @@ do_update_config([ConfKey | SubConfKeyPath], Handlers, OldRawConf,
|
|||
Error -> Error
|
||||
end.
|
||||
|
||||
check_and_save_configs(SchemaModule, ConfKeyPath, Handlers, NewRawConf, OverrideConf,
|
||||
UpdateArgs, Opts) ->
|
||||
check_and_save_configs(
|
||||
SchemaModule,
|
||||
ConfKeyPath,
|
||||
Handlers,
|
||||
NewRawConf,
|
||||
OverrideConf,
|
||||
UpdateArgs,
|
||||
Opts
|
||||
) ->
|
||||
OldConf = emqx_config:get_root(ConfKeyPath),
|
||||
Schema = schema(SchemaModule, ConfKeyPath),
|
||||
{AppEnvs, NewConf} = emqx_config:check_config(Schema, NewRawConf),
|
||||
case do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, #{}) of
|
||||
{ok, Result0} ->
|
||||
remove_from_local_if_cluster_change(ConfKeyPath, Opts),
|
||||
case save_configs(ConfKeyPath, AppEnvs, NewConf, NewRawConf, OverrideConf,
|
||||
UpdateArgs, Opts) of
|
||||
case
|
||||
save_configs(
|
||||
ConfKeyPath,
|
||||
AppEnvs,
|
||||
NewConf,
|
||||
NewRawConf,
|
||||
OverrideConf,
|
||||
UpdateArgs,
|
||||
Opts
|
||||
)
|
||||
of
|
||||
{ok, Result1} ->
|
||||
{ok, Result1#{post_config_update => Result0}};
|
||||
Error -> Error
|
||||
Error ->
|
||||
Error
|
||||
end;
|
||||
Error -> Error
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, Result) ->
|
||||
do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs,
|
||||
Result, []).
|
||||
do_post_config_update(
|
||||
ConfKeyPath,
|
||||
Handlers,
|
||||
OldConf,
|
||||
NewConf,
|
||||
AppEnvs,
|
||||
UpdateArgs,
|
||||
Result,
|
||||
[]
|
||||
).
|
||||
|
||||
do_post_config_update([], Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, Result,
|
||||
ConfKeyPath) ->
|
||||
call_post_config_update(Handlers, OldConf, NewConf, AppEnvs, up_req(UpdateArgs),
|
||||
Result, ConfKeyPath);
|
||||
do_post_config_update([ConfKey | SubConfKeyPath], Handlers, OldConf, NewConf, AppEnvs,
|
||||
UpdateArgs, Result, ConfKeyPath0) ->
|
||||
do_post_config_update(
|
||||
[],
|
||||
Handlers,
|
||||
OldConf,
|
||||
NewConf,
|
||||
AppEnvs,
|
||||
UpdateArgs,
|
||||
Result,
|
||||
ConfKeyPath
|
||||
) ->
|
||||
call_post_config_update(
|
||||
Handlers,
|
||||
OldConf,
|
||||
NewConf,
|
||||
AppEnvs,
|
||||
up_req(UpdateArgs),
|
||||
Result,
|
||||
ConfKeyPath
|
||||
);
|
||||
do_post_config_update(
|
||||
[ConfKey | SubConfKeyPath],
|
||||
Handlers,
|
||||
OldConf,
|
||||
NewConf,
|
||||
AppEnvs,
|
||||
UpdateArgs,
|
||||
Result,
|
||||
ConfKeyPath0
|
||||
) ->
|
||||
ConfKeyPath = ConfKeyPath0 ++ [ConfKey],
|
||||
SubOldConf = get_sub_config(ConfKey, OldConf),
|
||||
SubNewConf = get_sub_config(ConfKey, NewConf),
|
||||
SubHandlers = get_sub_handlers(ConfKey, Handlers),
|
||||
case do_post_config_update(SubConfKeyPath, SubHandlers, SubOldConf, SubNewConf, AppEnvs,
|
||||
UpdateArgs, Result, ConfKeyPath) of
|
||||
case
|
||||
do_post_config_update(
|
||||
SubConfKeyPath,
|
||||
SubHandlers,
|
||||
SubOldConf,
|
||||
SubNewConf,
|
||||
AppEnvs,
|
||||
UpdateArgs,
|
||||
Result,
|
||||
ConfKeyPath
|
||||
)
|
||||
of
|
||||
{ok, Result1} ->
|
||||
call_post_config_update(Handlers, OldConf, NewConf, AppEnvs, up_req(UpdateArgs),
|
||||
Result1, ConfKeyPath);
|
||||
Error -> Error
|
||||
call_post_config_update(
|
||||
Handlers,
|
||||
OldConf,
|
||||
NewConf,
|
||||
AppEnvs,
|
||||
up_req(UpdateArgs),
|
||||
Result1,
|
||||
ConfKeyPath
|
||||
);
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
get_sub_handlers(ConfKey, Handlers) ->
|
||||
|
|
@ -277,7 +373,8 @@ get_sub_handlers(ConfKey, Handlers) ->
|
|||
|
||||
get_sub_config(ConfKey, Conf) when is_map(Conf) ->
|
||||
maps:get(ConfKey, Conf, undefined);
|
||||
get_sub_config(_, _Conf) -> %% the Conf is a primitive
|
||||
%% the Conf is a primitive
|
||||
get_sub_config(_, _Conf) ->
|
||||
undefined.
|
||||
|
||||
call_pre_config_update(#{?MOD := HandlerName}, OldRawConf, UpdateReq, ConfKeyPath) ->
|
||||
|
|
@ -287,25 +384,48 @@ call_pre_config_update(#{?MOD := HandlerName}, OldRawConf, UpdateReq, ConfKeyPat
|
|||
{ok, NewUpdateReq} -> {ok, NewUpdateReq};
|
||||
{error, Reason} -> {error, {pre_config_update, HandlerName, Reason}}
|
||||
end;
|
||||
false -> merge_to_old_config(UpdateReq, OldRawConf)
|
||||
false ->
|
||||
merge_to_old_config(UpdateReq, OldRawConf)
|
||||
end;
|
||||
call_pre_config_update(_Handlers, OldRawConf, UpdateReq, _ConfKeyPath) ->
|
||||
merge_to_old_config(UpdateReq, OldRawConf).
|
||||
|
||||
call_post_config_update(#{?MOD := HandlerName}, OldConf, NewConf,
|
||||
AppEnvs, UpdateReq, Result, ConfKeyPath) ->
|
||||
call_post_config_update(
|
||||
#{?MOD := HandlerName},
|
||||
OldConf,
|
||||
NewConf,
|
||||
AppEnvs,
|
||||
UpdateReq,
|
||||
Result,
|
||||
ConfKeyPath
|
||||
) ->
|
||||
case erlang:function_exported(HandlerName, post_config_update, 5) of
|
||||
true ->
|
||||
case HandlerName:post_config_update(ConfKeyPath, UpdateReq,
|
||||
NewConf, OldConf, AppEnvs) of
|
||||
case
|
||||
HandlerName:post_config_update(
|
||||
ConfKeyPath,
|
||||
UpdateReq,
|
||||
NewConf,
|
||||
OldConf,
|
||||
AppEnvs
|
||||
)
|
||||
of
|
||||
ok -> {ok, Result};
|
||||
{ok, Result1} -> {ok, Result#{HandlerName => Result1}};
|
||||
{error, Reason} -> {error, {post_config_update, HandlerName, Reason}}
|
||||
end;
|
||||
false -> {ok, Result}
|
||||
false ->
|
||||
{ok, Result}
|
||||
end;
|
||||
call_post_config_update(_Handlers, _OldConf, _NewConf, _AppEnvs,
|
||||
_UpdateReq, Result, _ConfKeyPath) ->
|
||||
call_post_config_update(
|
||||
_Handlers,
|
||||
_OldConf,
|
||||
_NewConf,
|
||||
_AppEnvs,
|
||||
_UpdateReq,
|
||||
Result,
|
||||
_ConfKeyPath
|
||||
) ->
|
||||
{ok, Result}.
|
||||
|
||||
save_configs(ConfKeyPath, AppEnvs, CheckedConf, NewRawConf, OverrideConf, UpdateArgs, Opts) ->
|
||||
|
|
@ -350,8 +470,10 @@ up_req({remove, _Opts}) -> '$remove';
|
|||
up_req({{update, Req}, _Opts}) -> Req.
|
||||
|
||||
return_change_result(ConfKeyPath, {{update, _Req}, Opts}) ->
|
||||
#{config => emqx_config:get(ConfKeyPath),
|
||||
raw_config => return_rawconf(ConfKeyPath, Opts)};
|
||||
#{
|
||||
config => emqx_config:get(ConfKeyPath),
|
||||
raw_config => return_rawconf(ConfKeyPath, Opts)
|
||||
};
|
||||
return_change_result(_ConfKeyPath, {remove, _Opts}) ->
|
||||
#{}.
|
||||
|
||||
|
|
@ -378,19 +500,24 @@ do_remove_handler(ConfKeyPath, Handlers) ->
|
|||
NewHandlers = emqx_map_lib:deep_remove(ConfKeyPath ++ [?MOD], Handlers),
|
||||
remove_empty_leaf(ConfKeyPath, NewHandlers).
|
||||
|
||||
remove_empty_leaf([], Handlers) -> Handlers;
|
||||
remove_empty_leaf([], Handlers) ->
|
||||
Handlers;
|
||||
remove_empty_leaf(KeyPath, Handlers) ->
|
||||
case emqx_map_lib:deep_find(KeyPath, Handlers) =:= {ok, #{}} of
|
||||
true -> %% empty leaf
|
||||
%% empty leaf
|
||||
true ->
|
||||
Handlers1 = emqx_map_lib:deep_remove(KeyPath, Handlers),
|
||||
SubKeyPath = lists:sublist(KeyPath, length(KeyPath) - 1),
|
||||
remove_empty_leaf(SubKeyPath, Handlers1);
|
||||
false -> Handlers
|
||||
false ->
|
||||
Handlers
|
||||
end.
|
||||
|
||||
assert_callback_function(Mod) ->
|
||||
case erlang:function_exported(Mod, pre_config_update, 3) orelse
|
||||
erlang:function_exported(Mod, post_config_update, 5) of
|
||||
case
|
||||
erlang:function_exported(Mod, pre_config_update, 3) orelse
|
||||
erlang:function_exported(Mod, post_config_update, 5)
|
||||
of
|
||||
true -> ok;
|
||||
false -> error(#{msg => "bad_emqx_config_handler_callback", module => Mod})
|
||||
end,
|
||||
|
|
@ -410,12 +537,16 @@ schema(SchemaModule, [RootKey | _]) ->
|
|||
validations => hocon_schema:validations(SchemaModule)
|
||||
}.
|
||||
|
||||
parse_translations(#{translate_to := TRs } = Field, Name, SchemaModule) ->
|
||||
parse_translations(#{translate_to := TRs} = Field, Name, SchemaModule) ->
|
||||
{
|
||||
{Name, maps:remove(translate_to, Field)},
|
||||
lists:foldl(fun(T, Acc) ->
|
||||
Acc#{T => hocon_schema:translation(SchemaModule, T)}
|
||||
end, #{}, TRs)
|
||||
lists:foldl(
|
||||
fun(T, Acc) ->
|
||||
Acc#{T => hocon_schema:translation(SchemaModule, T)}
|
||||
end,
|
||||
#{},
|
||||
TRs
|
||||
)
|
||||
};
|
||||
parse_translations(Field, Name, _SchemaModule) ->
|
||||
{{Name, Field}, #{}}.
|
||||
|
|
|
|||
|
|
@ -16,21 +16,41 @@
|
|||
|
||||
-module(emqx_congestion).
|
||||
|
||||
-export([ maybe_alarm_conn_congestion/3
|
||||
, cancel_alarms/3
|
||||
]).
|
||||
-export([
|
||||
maybe_alarm_conn_congestion/3,
|
||||
cancel_alarms/3
|
||||
]).
|
||||
|
||||
-elvis([{elvis_style, invalid_dynamic_call, #{ignore => [emqx_congestion]}}]).
|
||||
|
||||
-define(ALARM_CONN_CONGEST(Channel, Reason),
|
||||
list_to_binary(
|
||||
io_lib:format("~ts/~ts/~ts",
|
||||
[Reason, emqx_channel:info(clientid, Channel),
|
||||
maps:get(username, emqx_channel:info(clientinfo, Channel),
|
||||
<<"unknown_user">>)]))).
|
||||
list_to_binary(
|
||||
io_lib:format(
|
||||
"~ts/~ts/~ts",
|
||||
[
|
||||
Reason,
|
||||
emqx_channel:info(clientid, Channel),
|
||||
maps:get(
|
||||
username,
|
||||
emqx_channel:info(clientinfo, Channel),
|
||||
<<"unknown_user">>
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
).
|
||||
|
||||
-define(ALARM_CONN_INFO_KEYS, [socktype, sockname, peername, clientid, username,
|
||||
proto_name, proto_ver, connected_at, conn_state]).
|
||||
-define(ALARM_CONN_INFO_KEYS, [
|
||||
socktype,
|
||||
sockname,
|
||||
peername,
|
||||
clientid,
|
||||
username,
|
||||
proto_name,
|
||||
proto_ver,
|
||||
connected_at,
|
||||
conn_state
|
||||
]).
|
||||
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
|
||||
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
|
||||
-define(PROC_INFO_KEYS, [message_queue_len, memory, reductions]).
|
||||
|
|
@ -40,7 +60,8 @@
|
|||
|
||||
maybe_alarm_conn_congestion(Socket, Transport, Channel) ->
|
||||
case is_alarm_enabled(Channel) of
|
||||
false -> ok;
|
||||
false ->
|
||||
ok;
|
||||
true ->
|
||||
case is_tcp_congested(Socket, Transport) of
|
||||
true -> alarm_congestion(Socket, Transport, Channel, conn_congestion);
|
||||
|
|
@ -49,12 +70,15 @@ maybe_alarm_conn_congestion(Socket, Transport, Channel) ->
|
|||
end.
|
||||
|
||||
cancel_alarms(Socket, Transport, Channel) ->
|
||||
lists:foreach(fun(Reason) ->
|
||||
case has_alarm_sent(Reason) of
|
||||
true -> do_cancel_alarm_congestion(Socket, Transport, Channel, Reason);
|
||||
false -> ok
|
||||
end
|
||||
end, ?ALL_ALARM_REASONS).
|
||||
lists:foreach(
|
||||
fun(Reason) ->
|
||||
case has_alarm_sent(Reason) of
|
||||
true -> do_cancel_alarm_congestion(Socket, Transport, Channel, Reason);
|
||||
false -> ok
|
||||
end
|
||||
end,
|
||||
?ALL_ALARM_REASONS
|
||||
).
|
||||
|
||||
is_alarm_enabled(Channel) ->
|
||||
Zone = emqx_channel:info(zone, Channel),
|
||||
|
|
@ -62,7 +86,8 @@ is_alarm_enabled(Channel) ->
|
|||
|
||||
alarm_congestion(Socket, Transport, Channel, Reason) ->
|
||||
case has_alarm_sent(Reason) of
|
||||
false -> do_alarm_congestion(Socket, Transport, Channel, Reason);
|
||||
false ->
|
||||
do_alarm_congestion(Socket, Transport, Channel, Reason);
|
||||
true ->
|
||||
%% pretend we have sent an alarm again
|
||||
update_alarm_sent_at(Reason)
|
||||
|
|
@ -70,8 +95,10 @@ alarm_congestion(Socket, Transport, Channel, Reason) ->
|
|||
|
||||
cancel_alarm_congestion(Socket, Transport, Channel, Reason) ->
|
||||
Zone = emqx_channel:info(zone, Channel),
|
||||
WontClearIn = emqx_config:get_zone_conf(Zone, [conn_congestion,
|
||||
min_alarm_sustain_duration]),
|
||||
WontClearIn = emqx_config:get_zone_conf(Zone, [
|
||||
conn_congestion,
|
||||
min_alarm_sustain_duration
|
||||
]),
|
||||
case has_alarm_sent(Reason) andalso long_time_since_last_alarm(Reason, WontClearIn) of
|
||||
true -> do_cancel_alarm_congestion(Socket, Transport, Channel, Reason);
|
||||
false -> ok
|
||||
|
|
@ -130,14 +157,16 @@ timenow() ->
|
|||
tcp_congestion_alarm_details(Socket, Transport, Channel) ->
|
||||
ProcInfo = process_info(self(), ?PROC_INFO_KEYS),
|
||||
BasicInfo = [{pid, list_to_binary(pid_to_list(self()))} | ProcInfo],
|
||||
Stat = case Transport:getstat(Socket, ?ALARM_SOCK_STATS_KEYS) of
|
||||
{ok, Stat0} -> Stat0;
|
||||
{error, _} -> []
|
||||
end,
|
||||
Opts = case Transport:getopts(Socket, ?ALARM_SOCK_OPTS_KEYS) of
|
||||
{ok, Opts0} -> Opts0;
|
||||
{error, _} -> []
|
||||
end,
|
||||
Stat =
|
||||
case Transport:getstat(Socket, ?ALARM_SOCK_STATS_KEYS) of
|
||||
{ok, Stat0} -> Stat0;
|
||||
{error, _} -> []
|
||||
end,
|
||||
Opts =
|
||||
case Transport:getopts(Socket, ?ALARM_SOCK_OPTS_KEYS) of
|
||||
{ok, Opts0} -> Opts0;
|
||||
{error, _} -> []
|
||||
end,
|
||||
SockInfo = Stat ++ Opts,
|
||||
ConnInfo = [conn_info(Key, Channel) || Key <- ?ALARM_CONN_INFO_KEYS],
|
||||
maps:from_list(BasicInfo ++ ConnInfo ++ SockInfo).
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -21,67 +21,71 @@
|
|||
-include("types.hrl").
|
||||
-include("logger.hrl").
|
||||
|
||||
|
||||
-export([start_link/0, stop/0]).
|
||||
|
||||
-export([ register_command/2
|
||||
, register_command/3
|
||||
, unregister_command/1
|
||||
]).
|
||||
-export([
|
||||
register_command/2,
|
||||
register_command/3,
|
||||
unregister_command/1
|
||||
]).
|
||||
|
||||
-export([ run_command/1
|
||||
, run_command/2
|
||||
, lookup_command/1
|
||||
, get_commands/0
|
||||
]).
|
||||
-export([
|
||||
run_command/1,
|
||||
run_command/2,
|
||||
lookup_command/1,
|
||||
get_commands/0
|
||||
]).
|
||||
|
||||
-export([ print/1
|
||||
, print/2
|
||||
, usage/1
|
||||
, usage/2
|
||||
]).
|
||||
-export([
|
||||
print/1,
|
||||
print/2,
|
||||
usage/1,
|
||||
usage/2
|
||||
]).
|
||||
|
||||
%% Exports mainly for test cases
|
||||
-export([ format/2
|
||||
, format_usage/1
|
||||
, format_usage/2
|
||||
]).
|
||||
-export([
|
||||
format/2,
|
||||
format_usage/1,
|
||||
format_usage/2
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-record(state, {seq = 0}).
|
||||
|
||||
-type(cmd() :: atom()).
|
||||
-type(cmd_params() :: string()).
|
||||
-type(cmd_descr() :: string()).
|
||||
-type(cmd_usage() :: {cmd_params(), cmd_descr()}).
|
||||
-type cmd() :: atom().
|
||||
-type cmd_params() :: string().
|
||||
-type cmd_descr() :: string().
|
||||
-type cmd_usage() :: {cmd_params(), cmd_descr()}.
|
||||
|
||||
-define(SERVER, ?MODULE).
|
||||
-define(CMD_TAB, emqx_command).
|
||||
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
|
||||
|
||||
-spec(stop() -> ok).
|
||||
-spec stop() -> ok.
|
||||
stop() -> gen_server:stop(?SERVER).
|
||||
|
||||
-spec(register_command(cmd(), {module(), atom()}) -> ok).
|
||||
-spec register_command(cmd(), {module(), atom()}) -> ok.
|
||||
register_command(Cmd, MF) when is_atom(Cmd) ->
|
||||
register_command(Cmd, MF, []).
|
||||
|
||||
-spec(register_command(cmd(), {module(), atom()}, list()) -> ok).
|
||||
-spec register_command(cmd(), {module(), atom()}, list()) -> ok.
|
||||
register_command(Cmd, MF, Opts) when is_atom(Cmd) ->
|
||||
call({register_command, Cmd, MF, Opts}).
|
||||
|
||||
-spec(unregister_command(cmd()) -> ok).
|
||||
-spec unregister_command(cmd()) -> ok.
|
||||
unregister_command(Cmd) when is_atom(Cmd) ->
|
||||
cast({unregister_command, Cmd}).
|
||||
|
||||
|
|
@ -89,14 +93,15 @@ call(Req) -> gen_server:call(?SERVER, Req).
|
|||
|
||||
cast(Msg) -> gen_server:cast(?SERVER, Msg).
|
||||
|
||||
-spec(run_command(list(string())) -> ok | {error, term()}).
|
||||
-spec run_command(list(string())) -> ok | {error, term()}.
|
||||
run_command([]) ->
|
||||
run_command(help, []);
|
||||
run_command([Cmd | Args]) ->
|
||||
run_command(list_to_atom(Cmd), Args).
|
||||
|
||||
-spec(run_command(cmd(), list(string())) -> ok | {error, term()}).
|
||||
run_command(help, []) -> help();
|
||||
-spec run_command(cmd(), list(string())) -> ok | {error, term()}.
|
||||
run_command(help, []) ->
|
||||
help();
|
||||
run_command(Cmd, Args) when is_atom(Cmd) ->
|
||||
case lookup_command(Cmd) of
|
||||
[{Mod, Fun}] ->
|
||||
|
|
@ -104,24 +109,26 @@ run_command(Cmd, Args) when is_atom(Cmd) ->
|
|||
_ -> ok
|
||||
catch
|
||||
_:Reason:Stacktrace ->
|
||||
?SLOG(error, #{msg => "ctl_command_crashed",
|
||||
stacktrace => Stacktrace,
|
||||
reason => Reason
|
||||
}),
|
||||
?SLOG(error, #{
|
||||
msg => "ctl_command_crashed",
|
||||
stacktrace => Stacktrace,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason}
|
||||
end;
|
||||
[] ->
|
||||
help(), {error, cmd_not_found}
|
||||
help(),
|
||||
{error, cmd_not_found}
|
||||
end.
|
||||
|
||||
-spec(lookup_command(cmd()) -> [{module(), atom()}]).
|
||||
-spec lookup_command(cmd()) -> [{module(), atom()}].
|
||||
lookup_command(Cmd) when is_atom(Cmd) ->
|
||||
case ets:match(?CMD_TAB, {{'_', Cmd}, '$1', '_'}) of
|
||||
[El] -> El;
|
||||
[] -> []
|
||||
[] -> []
|
||||
end.
|
||||
|
||||
-spec(get_commands() -> list({cmd(), module(), atom()})).
|
||||
-spec get_commands() -> list({cmd(), module(), atom()}).
|
||||
get_commands() ->
|
||||
[{Cmd, M, F} || {{_Seq, Cmd}, {M, F}, _Opts} <- ets:tab2list(?CMD_TAB)].
|
||||
|
||||
|
|
@ -131,42 +138,52 @@ help() ->
|
|||
print("No commands available.~n");
|
||||
Cmds ->
|
||||
print("Usage: ~ts~n", [?MODULE]),
|
||||
lists:foreach(fun({_, {Mod, Cmd}, _}) ->
|
||||
print("~110..-s~n", [""]), Mod:Cmd(usage)
|
||||
end, Cmds)
|
||||
lists:foreach(
|
||||
fun({_, {Mod, Cmd}, _}) ->
|
||||
print("~110..-s~n", [""]),
|
||||
Mod:Cmd(usage)
|
||||
end,
|
||||
Cmds
|
||||
)
|
||||
end.
|
||||
|
||||
-spec(print(io:format()) -> ok).
|
||||
-spec print(io:format()) -> ok.
|
||||
print(Msg) ->
|
||||
io:format("~ts", [format(Msg, [])]).
|
||||
|
||||
-spec(print(io:format(), [term()]) -> ok).
|
||||
-spec print(io:format(), [term()]) -> ok.
|
||||
print(Format, Args) ->
|
||||
io:format("~ts", [format(Format, Args)]).
|
||||
|
||||
-spec(usage([cmd_usage()]) -> ok).
|
||||
-spec usage([cmd_usage()]) -> ok.
|
||||
usage(UsageList) ->
|
||||
io:format(format_usage(UsageList)).
|
||||
|
||||
-spec(usage(cmd_params(), cmd_descr()) -> ok).
|
||||
-spec usage(cmd_params(), cmd_descr()) -> ok.
|
||||
usage(CmdParams, Desc) ->
|
||||
io:format(format_usage(CmdParams, Desc)).
|
||||
|
||||
-spec(format(io:format(), [term()]) -> string()).
|
||||
-spec format(io:format(), [term()]) -> string().
|
||||
format(Format, Args) ->
|
||||
lists:flatten(io_lib:format(Format, Args)).
|
||||
|
||||
-spec(format_usage([cmd_usage()]) -> [string()]).
|
||||
-spec format_usage([cmd_usage()]) -> [string()].
|
||||
format_usage(UsageList) ->
|
||||
Width = lists:foldl(fun({CmdStr, _}, W) ->
|
||||
max(iolist_size(CmdStr), W)
|
||||
end, 0, UsageList),
|
||||
Width = lists:foldl(
|
||||
fun({CmdStr, _}, W) ->
|
||||
max(iolist_size(CmdStr), W)
|
||||
end,
|
||||
0,
|
||||
UsageList
|
||||
),
|
||||
lists:map(
|
||||
fun({CmdParams, Desc}) ->
|
||||
format_usage(CmdParams, Desc, Width)
|
||||
end, UsageList).
|
||||
end,
|
||||
UsageList
|
||||
).
|
||||
|
||||
-spec(format_usage(cmd_params(), cmd_descr()) -> string()).
|
||||
-spec format_usage(cmd_params(), cmd_descr()) -> string().
|
||||
format_usage(CmdParams, Desc) ->
|
||||
format_usage(CmdParams, Desc, 0).
|
||||
|
||||
|
|
@ -177,9 +194,13 @@ format_usage(CmdParams, Desc, Width) ->
|
|||
DescLines = split_cmd(Desc),
|
||||
Zipped = zip_cmd(CmdLines, DescLines),
|
||||
Fmt = "~-" ++ integer_to_list(Width + 1) ++ "s# ~ts~n",
|
||||
lists:foldl(fun({CmdStr, DescStr}, Usage) ->
|
||||
Usage ++ format(Fmt, [CmdStr, DescStr])
|
||||
end, "", Zipped).
|
||||
lists:foldl(
|
||||
fun({CmdStr, DescStr}, Usage) ->
|
||||
Usage ++ format(Fmt, [CmdStr, DescStr])
|
||||
end,
|
||||
"",
|
||||
Zipped
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% gen_server callbacks
|
||||
|
|
@ -191,13 +212,13 @@ init([]) ->
|
|||
|
||||
handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq}) ->
|
||||
case ets:match(?CMD_TAB, {{'$1', Cmd}, '_', '_'}) of
|
||||
[] -> ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts});
|
||||
[] ->
|
||||
ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts});
|
||||
[[OriginSeq] | _] ->
|
||||
?SLOG(warning, #{msg => "CMD_overidden", cmd => Cmd, mf => MF}),
|
||||
true = ets:insert(?CMD_TAB, {{OriginSeq, Cmd}, MF, Opts})
|
||||
end,
|
||||
{reply, ok, next_seq(State)};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -205,7 +226,6 @@ handle_call(Req, _From, State) ->
|
|||
handle_cast({unregister_command, Cmd}, State) ->
|
||||
ets:match_delete(?CMD_TAB, {{'_', Cmd}, '_', '_'}),
|
||||
noreply(State);
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
noreply(State).
|
||||
|
|
|
|||
|
|
@ -18,16 +18,19 @@
|
|||
-include_lib("typerefl/include/types.hrl").
|
||||
|
||||
%% API
|
||||
-export([ to_epoch_millisecond/1
|
||||
, to_epoch_second/1
|
||||
]).
|
||||
-export([ epoch_to_rfc3339/1
|
||||
, epoch_to_rfc3339/2
|
||||
]).
|
||||
-export([
|
||||
to_epoch_millisecond/1,
|
||||
to_epoch_second/1
|
||||
]).
|
||||
-export([
|
||||
epoch_to_rfc3339/1,
|
||||
epoch_to_rfc3339/2
|
||||
]).
|
||||
|
||||
-reflect_type([ epoch_millisecond/0
|
||||
, epoch_second/0
|
||||
]).
|
||||
-reflect_type([
|
||||
epoch_millisecond/0,
|
||||
epoch_second/0
|
||||
]).
|
||||
|
||||
-type epoch_second() :: non_neg_integer().
|
||||
-type epoch_millisecond() :: non_neg_integer().
|
||||
|
|
@ -47,8 +50,9 @@ to_epoch(DateTime, Unit) ->
|
|||
{_Epoch, []} -> {error, bad_epoch};
|
||||
_ -> {ok, calendar:rfc3339_to_system_time(DateTime, [{unit, Unit}])}
|
||||
end
|
||||
catch error: _ ->
|
||||
{error, bad_rfc3339_timestamp}
|
||||
catch
|
||||
error:_ ->
|
||||
{error, bad_rfc3339_timestamp}
|
||||
end.
|
||||
|
||||
epoch_to_rfc3339(TimeStamp) ->
|
||||
|
|
@ -69,8 +73,11 @@ fields(bar) ->
|
|||
{millisecond, ?MODULE:epoch_millisecond()}
|
||||
].
|
||||
|
||||
-define(FORMAT(_Sec_, _Ms_), lists:flatten(
|
||||
io_lib:format("bar={second=~w,millisecond=~w}", [_Sec_, _Ms_]))).
|
||||
-define(FORMAT(_Sec_, _Ms_),
|
||||
lists:flatten(
|
||||
io_lib:format("bar={second=~w,millisecond=~w}", [_Sec_, _Ms_])
|
||||
)
|
||||
).
|
||||
|
||||
epoch_ok_test() ->
|
||||
Args = [
|
||||
|
|
@ -78,31 +85,43 @@ epoch_ok_test() ->
|
|||
{1, 1, 1, 1},
|
||||
{"2022-01-01T08:00:00+08:00", "2022-01-01T08:00:00+08:00", 1640995200, 1640995200000}
|
||||
],
|
||||
lists:foreach(fun({Sec, Ms, EpochSec, EpochMs}) ->
|
||||
check_ok(?FORMAT(Sec, Ms), EpochSec, EpochMs)
|
||||
end, Args),
|
||||
lists:foreach(
|
||||
fun({Sec, Ms, EpochSec, EpochMs}) ->
|
||||
check_ok(?FORMAT(Sec, Ms), EpochSec, EpochMs)
|
||||
end,
|
||||
Args
|
||||
),
|
||||
ok.
|
||||
|
||||
check_ok(Input, Sec, Ms) ->
|
||||
{ok, Data} = hocon:binary(Input, #{}),
|
||||
?assertMatch(#{bar := #{second := Sec, millisecond := Ms}},
|
||||
hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])),
|
||||
?assertMatch(
|
||||
#{bar := #{second := Sec, millisecond := Ms}},
|
||||
hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])
|
||||
),
|
||||
ok.
|
||||
|
||||
epoch_failed_test() ->
|
||||
Args = [
|
||||
{-1, -1},
|
||||
{"1s", "1s"},
|
||||
{"2022-13-13T08:00:00+08:00", "2022-13-13T08:00:00+08:00"}],
|
||||
lists:foreach(fun({Sec, Ms}) ->
|
||||
check_failed(?FORMAT(Sec, Ms))
|
||||
end, Args),
|
||||
{"2022-13-13T08:00:00+08:00", "2022-13-13T08:00:00+08:00"}
|
||||
],
|
||||
lists:foreach(
|
||||
fun({Sec, Ms}) ->
|
||||
check_failed(?FORMAT(Sec, Ms))
|
||||
end,
|
||||
Args
|
||||
),
|
||||
ok.
|
||||
|
||||
check_failed(Input) ->
|
||||
{ok, Data} = hocon:binary(Input, #{}),
|
||||
?assertException(throw, _,
|
||||
hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])),
|
||||
?assertException(
|
||||
throw,
|
||||
_,
|
||||
hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])
|
||||
),
|
||||
ok.
|
||||
|
||||
-endif.
|
||||
|
|
|
|||
|
|
@ -28,13 +28,14 @@
|
|||
-export([detect/1]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
%% Tab
|
||||
-define(FLAPPING_TAB, ?MODULE).
|
||||
|
|
@ -42,31 +43,31 @@
|
|||
-define(FLAPPING_THRESHOLD, 30).
|
||||
-define(FLAPPING_DURATION, 60000).
|
||||
-define(FLAPPING_BANNED_INTERVAL, 300000).
|
||||
-define(DEFAULT_DETECT_POLICY,
|
||||
#{max_count => ?FLAPPING_THRESHOLD,
|
||||
window_time => ?FLAPPING_DURATION,
|
||||
ban_time => ?FLAPPING_BANNED_INTERVAL
|
||||
}).
|
||||
-define(DEFAULT_DETECT_POLICY, #{
|
||||
max_count => ?FLAPPING_THRESHOLD,
|
||||
window_time => ?FLAPPING_DURATION,
|
||||
ban_time => ?FLAPPING_BANNED_INTERVAL
|
||||
}).
|
||||
|
||||
-record(flapping, {
|
||||
clientid :: emqx_types:clientid(),
|
||||
peerhost :: emqx_types:peerhost(),
|
||||
started_at :: pos_integer(),
|
||||
detect_cnt :: integer()
|
||||
}).
|
||||
clientid :: emqx_types:clientid(),
|
||||
peerhost :: emqx_types:peerhost(),
|
||||
started_at :: pos_integer(),
|
||||
detect_cnt :: integer()
|
||||
}).
|
||||
|
||||
-opaque(flapping() :: #flapping{}).
|
||||
-opaque flapping() :: #flapping{}.
|
||||
|
||||
-export_type([flapping/0]).
|
||||
|
||||
-spec(start_link() -> emqx_types:startlink_ret()).
|
||||
-spec start_link() -> emqx_types:startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||
|
||||
stop() -> gen_server:stop(?MODULE).
|
||||
|
||||
%% @doc Detect flapping when a MQTT client disconnected.
|
||||
-spec(detect(emqx_types:clientinfo()) -> boolean()).
|
||||
-spec detect(emqx_types:clientinfo()) -> boolean().
|
||||
detect(#{clientid := ClientId, peerhost := PeerHost, zone := Zone}) ->
|
||||
Policy = #{max_count := Threshold} = get_policy(Zone),
|
||||
%% The initial flapping record sets the detect_cnt to 0.
|
||||
|
|
@ -83,7 +84,8 @@ detect(#{clientid := ClientId, peerhost := PeerHost, zone := Zone}) ->
|
|||
[Flapping] ->
|
||||
ok = gen_server:cast(?MODULE, {detected, Flapping, Policy}),
|
||||
true;
|
||||
[] -> false
|
||||
[] ->
|
||||
false
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
@ -97,11 +99,13 @@ now_diff(TS) -> erlang:system_time(millisecond) - TS.
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
init([]) ->
|
||||
ok = emqx_tables:new(?FLAPPING_TAB, [public, set,
|
||||
{keypos, #flapping.clientid},
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]),
|
||||
ok = emqx_tables:new(?FLAPPING_TAB, [
|
||||
public,
|
||||
set,
|
||||
{keypos, #flapping.clientid},
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]),
|
||||
start_timers(),
|
||||
{ok, #{}, hibernate}.
|
||||
|
||||
|
|
@ -109,49 +113,65 @@ handle_call(Req, _From, State) ->
|
|||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
||||
handle_cast({detected, #flapping{clientid = ClientId,
|
||||
peerhost = PeerHost,
|
||||
started_at = StartedAt,
|
||||
detect_cnt = DetectCnt},
|
||||
#{window_time := WindTime, ban_time := Interval}}, State) ->
|
||||
handle_cast(
|
||||
{detected,
|
||||
#flapping{
|
||||
clientid = ClientId,
|
||||
peerhost = PeerHost,
|
||||
started_at = StartedAt,
|
||||
detect_cnt = DetectCnt
|
||||
},
|
||||
#{window_time := WindTime, ban_time := Interval}},
|
||||
State
|
||||
) ->
|
||||
case now_diff(StartedAt) < WindTime of
|
||||
true -> %% Flapping happened:(
|
||||
?SLOG(warning, #{
|
||||
msg => "flapping_detected",
|
||||
peer_host => fmt_host(PeerHost),
|
||||
detect_cnt => DetectCnt,
|
||||
wind_time_in_ms => WindTime
|
||||
}, #{clientid => ClientId}),
|
||||
%% Flapping happened:(
|
||||
true ->
|
||||
?SLOG(
|
||||
warning,
|
||||
#{
|
||||
msg => "flapping_detected",
|
||||
peer_host => fmt_host(PeerHost),
|
||||
detect_cnt => DetectCnt,
|
||||
wind_time_in_ms => WindTime
|
||||
},
|
||||
#{clientid => ClientId}
|
||||
),
|
||||
Now = erlang:system_time(second),
|
||||
Banned = #banned{who = {clientid, ClientId},
|
||||
by = <<"flapping detector">>,
|
||||
reason = <<"flapping is detected">>,
|
||||
at = Now,
|
||||
until = Now + (Interval div 1000)},
|
||||
Banned = #banned{
|
||||
who = {clientid, ClientId},
|
||||
by = <<"flapping detector">>,
|
||||
reason = <<"flapping is detected">>,
|
||||
at = Now,
|
||||
until = Now + (Interval div 1000)
|
||||
},
|
||||
{ok, _} = emqx_banned:create(Banned),
|
||||
ok;
|
||||
false ->
|
||||
?SLOG(warning, #{
|
||||
msg => "client_disconnected",
|
||||
peer_host => fmt_host(PeerHost),
|
||||
detect_cnt => DetectCnt,
|
||||
interval => Interval
|
||||
}, #{clientid => ClientId})
|
||||
?SLOG(
|
||||
warning,
|
||||
#{
|
||||
msg => "client_disconnected",
|
||||
peer_host => fmt_host(PeerHost),
|
||||
detect_cnt => DetectCnt,
|
||||
interval => Interval
|
||||
},
|
||||
#{clientid => ClientId}
|
||||
)
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
||||
handle_info({timeout, _TRef, {garbage_collect, Zone}}, State) ->
|
||||
Timestamp = erlang:system_time(millisecond)
|
||||
- maps:get(window_time, get_policy(Zone)),
|
||||
MatchSpec = [{{'_', '_', '_', '$1', '_'},[{'<', '$1', Timestamp}], [true]}],
|
||||
Timestamp =
|
||||
erlang:system_time(millisecond) -
|
||||
maps:get(window_time, get_policy(Zone)),
|
||||
MatchSpec = [{{'_', '_', '_', '$1', '_'}, [{'<', '$1', Timestamp}], [true]}],
|
||||
ets:select_delete(?FLAPPING_TAB, MatchSpec),
|
||||
_ = start_timer(Zone),
|
||||
{noreply, State, hibernate};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -167,11 +187,16 @@ start_timer(Zone) ->
|
|||
emqx_misc:start_timer(WindTime, {garbage_collect, Zone}).
|
||||
|
||||
start_timers() ->
|
||||
lists:foreach(fun({Zone, _ZoneConf}) ->
|
||||
lists:foreach(
|
||||
fun({Zone, _ZoneConf}) ->
|
||||
start_timer(Zone)
|
||||
end, maps:to_list(emqx:get_config([zones], #{}))).
|
||||
end,
|
||||
maps:to_list(emqx:get_config([zones], #{}))
|
||||
).
|
||||
|
||||
fmt_host(PeerHost) ->
|
||||
try inet:ntoa(PeerHost)
|
||||
catch _:_ -> PeerHost
|
||||
try
|
||||
inet:ntoa(PeerHost)
|
||||
catch
|
||||
_:_ -> PeerHost
|
||||
end.
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -28,22 +28,27 @@
|
|||
|
||||
-include("types.hrl").
|
||||
|
||||
-export([ init/1
|
||||
, run/2
|
||||
, run/3
|
||||
, info/1
|
||||
, reset/1
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
run/2,
|
||||
run/3,
|
||||
info/1,
|
||||
reset/1
|
||||
]).
|
||||
|
||||
-export_type([opts/0, gc_state/0]).
|
||||
|
||||
-type(opts() :: #{count => integer(),
|
||||
bytes => integer()}).
|
||||
-type opts() :: #{
|
||||
count => integer(),
|
||||
bytes => integer()
|
||||
}.
|
||||
|
||||
-type(st() :: #{cnt => {integer(), integer()},
|
||||
oct => {integer(), integer()}}).
|
||||
-type st() :: #{
|
||||
cnt => {integer(), integer()},
|
||||
oct => {integer(), integer()}
|
||||
}.
|
||||
|
||||
-opaque(gc_state() :: {gc_state, st()}).
|
||||
-opaque gc_state() :: {gc_state, st()}.
|
||||
|
||||
-define(GCS(St), {gc_state, St}).
|
||||
|
||||
|
|
@ -51,27 +56,27 @@
|
|||
-define(ENABLED(X), (is_integer(X) andalso X > 0)).
|
||||
|
||||
%% @doc Initialize force GC state.
|
||||
-spec(init(opts()) -> gc_state()).
|
||||
-spec init(opts()) -> gc_state().
|
||||
init(#{count := Count, bytes := Bytes}) ->
|
||||
Cnt = [{cnt, {Count, Count}} || ?ENABLED(Count)],
|
||||
Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)],
|
||||
?GCS(maps:from_list(Cnt ++ Oct)).
|
||||
|
||||
%% @doc Try to run GC based on reduntions of count or bytes.
|
||||
-spec(run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state())
|
||||
-> {boolean(), gc_state()}).
|
||||
-spec run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state()) ->
|
||||
{boolean(), gc_state()}.
|
||||
run(#{cnt := Cnt, oct := Oct}, GcSt) ->
|
||||
run(Cnt, Oct, GcSt).
|
||||
|
||||
-spec(run(pos_integer(), pos_integer(), gc_state())
|
||||
-> {boolean(), gc_state()}).
|
||||
-spec run(pos_integer(), pos_integer(), gc_state()) ->
|
||||
{boolean(), gc_state()}.
|
||||
run(Cnt, Oct, ?GCS(St)) ->
|
||||
{Res, St1} = do_run([{cnt, Cnt}, {oct, Oct}], St),
|
||||
{Res, ?GCS(St1)}.
|
||||
|
||||
do_run([], St) ->
|
||||
{false, St};
|
||||
do_run([{K, N}|T], St) ->
|
||||
do_run([{K, N} | T], St) ->
|
||||
case dec(K, N, St) of
|
||||
{true, St1} ->
|
||||
erlang:garbage_collect(),
|
||||
|
|
@ -81,11 +86,11 @@ do_run([{K, N}|T], St) ->
|
|||
end.
|
||||
|
||||
%% @doc Info of GC state.
|
||||
-spec(info(maybe(gc_state())) -> maybe(map())).
|
||||
-spec info(maybe(gc_state())) -> maybe(map()).
|
||||
info(?GCS(St)) -> St.
|
||||
|
||||
%% @doc Reset counters to zero.
|
||||
-spec(reset(maybe(gc_state())) -> gc_state()).
|
||||
-spec reset(maybe(gc_state())) -> gc_state().
|
||||
reset(?GCS(St)) ->
|
||||
?GCS(do_reset(St)).
|
||||
|
||||
|
|
@ -93,7 +98,7 @@ reset(?GCS(St)) ->
|
|||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(dec(cnt | oct, pos_integer(), st()) -> {boolean(), st()}).
|
||||
-spec dec(cnt | oct, pos_integer(), st()) -> {boolean(), st()}.
|
||||
dec(Key, Num, St) ->
|
||||
case maps:get(Key, St, ?disabled) of
|
||||
?disabled ->
|
||||
|
|
@ -113,4 +118,3 @@ do_reset(Key, St) ->
|
|||
?disabled -> St;
|
||||
{Init, _} -> maps:put(Key, {Init, Init}, St)
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -30,17 +30,17 @@
|
|||
|
||||
-module(emqx_guid).
|
||||
|
||||
-export([ gen/0
|
||||
, new/0
|
||||
, timestamp/1
|
||||
, to_hexstr/1
|
||||
, from_hexstr/1
|
||||
, to_base62/1
|
||||
, from_base62/1
|
||||
]).
|
||||
-export([
|
||||
gen/0,
|
||||
new/0,
|
||||
timestamp/1,
|
||||
to_hexstr/1,
|
||||
from_hexstr/1,
|
||||
to_base62/1,
|
||||
from_base62/1
|
||||
]).
|
||||
|
||||
-export_type([ guid/0
|
||||
]).
|
||||
-export_type([guid/0]).
|
||||
|
||||
-define(TAG_VERSION, 131).
|
||||
-define(PID_EXT, 103).
|
||||
|
|
@ -48,21 +48,23 @@
|
|||
|
||||
-define(MAX_SEQ, 16#FFFF).
|
||||
|
||||
-type(guid() :: <<_:128>>).
|
||||
-type guid() :: <<_:128>>.
|
||||
|
||||
%% @doc Generate a global unique id.
|
||||
-spec(gen() -> guid()).
|
||||
-spec gen() -> guid().
|
||||
gen() ->
|
||||
Guid = case get(guid) of
|
||||
undefined -> new();
|
||||
{_Ts, NPid, Seq} -> next(NPid, Seq)
|
||||
end,
|
||||
put(guid, Guid), bin(Guid).
|
||||
Guid =
|
||||
case get(guid) of
|
||||
undefined -> new();
|
||||
{_Ts, NPid, Seq} -> next(NPid, Seq)
|
||||
end,
|
||||
put(guid, Guid),
|
||||
bin(Guid).
|
||||
|
||||
new() ->
|
||||
{ts(), npid(), 0}.
|
||||
|
||||
-spec(timestamp(guid()) -> integer()).
|
||||
-spec timestamp(guid()) -> integer().
|
||||
timestamp(<<Ts:64, _/binary>>) ->
|
||||
Ts.
|
||||
|
||||
|
|
@ -78,11 +80,10 @@ ts() -> erlang:system_time(micro_seconds).
|
|||
|
||||
%% Copied from https://github.com/okeuday/uuid.git.
|
||||
npid() ->
|
||||
<<NodeD01, NodeD02, NodeD03, NodeD04, NodeD05,
|
||||
NodeD06, NodeD07, NodeD08, NodeD09, NodeD10,
|
||||
NodeD11, NodeD12, NodeD13, NodeD14, NodeD15,
|
||||
NodeD16, NodeD17, NodeD18, NodeD19, NodeD20>> =
|
||||
crypto:hash(sha, erlang:list_to_binary(erlang:atom_to_list(node()))),
|
||||
<<NodeD01, NodeD02, NodeD03, NodeD04, NodeD05, NodeD06, NodeD07, NodeD08, NodeD09, NodeD10,
|
||||
NodeD11, NodeD12, NodeD13, NodeD14, NodeD15, NodeD16, NodeD17, NodeD18, NodeD19,
|
||||
NodeD20>> =
|
||||
crypto:hash(sha, erlang:list_to_binary(erlang:atom_to_list(node()))),
|
||||
|
||||
PidBin =
|
||||
case erlang:term_to_binary(self()) of
|
||||
|
|
@ -95,38 +96,44 @@ npid() ->
|
|||
end,
|
||||
|
||||
% 72/86 bits for the Erlang pid
|
||||
<<PidID1:8, PidID2:8, PidID3:8, PidID4:8, % ID (Node specific, 15 bits)
|
||||
PidSR1:8, PidSR2:8, PidSR3:8, PidSR4:8, % Serial (extra uniqueness)
|
||||
PidCreation/binary % Node Creation Count
|
||||
>> = PidBin,
|
||||
|
||||
PidCR1 = case PidCreation of
|
||||
<<D1>> ->
|
||||
D1;
|
||||
<<D1, D2, D3, D4>> ->
|
||||
D1 bxor D2 bxor D3 bxor D4
|
||||
end,
|
||||
% ID (Node specific, 15 bits)
|
||||
<<PidID1:8, PidID2:8, PidID3:8, PidID4:8,
|
||||
% Serial (extra uniqueness)
|
||||
PidSR1:8, PidSR2:8, PidSR3:8, PidSR4:8,
|
||||
% Node Creation Count
|
||||
PidCreation/binary>> = PidBin,
|
||||
|
||||
PidCR1 =
|
||||
case PidCreation of
|
||||
<<D1>> ->
|
||||
D1;
|
||||
<<D1, D2, D3, D4>> ->
|
||||
D1 bxor D2 bxor D3 bxor D4
|
||||
end,
|
||||
|
||||
% reduce the 160 bit NodeData checksum to 16 bits
|
||||
NodeByte1 = ((((((((NodeD01 bxor NodeD02)
|
||||
bxor NodeD03)
|
||||
bxor NodeD04)
|
||||
bxor NodeD05)
|
||||
bxor NodeD06)
|
||||
bxor NodeD07)
|
||||
bxor NodeD08)
|
||||
bxor NodeD09)
|
||||
bxor NodeD10,
|
||||
NodeByte2 = (((((((((NodeD11 bxor NodeD12)
|
||||
bxor NodeD13)
|
||||
bxor NodeD14)
|
||||
bxor NodeD15)
|
||||
bxor NodeD16)
|
||||
bxor NodeD17)
|
||||
bxor NodeD18)
|
||||
bxor NodeD19)
|
||||
bxor NodeD20)
|
||||
bxor PidCR1,
|
||||
NodeByte1 =
|
||||
((((((((NodeD01 bxor NodeD02) bxor
|
||||
NodeD03) bxor
|
||||
NodeD04) bxor
|
||||
NodeD05) bxor
|
||||
NodeD06) bxor
|
||||
NodeD07) bxor
|
||||
NodeD08) bxor
|
||||
NodeD09) bxor
|
||||
NodeD10,
|
||||
NodeByte2 =
|
||||
(((((((((NodeD11 bxor NodeD12) bxor
|
||||
NodeD13) bxor
|
||||
NodeD14) bxor
|
||||
NodeD15) bxor
|
||||
NodeD16) bxor
|
||||
NodeD17) bxor
|
||||
NodeD18) bxor
|
||||
NodeD19) bxor
|
||||
NodeD20) bxor
|
||||
PidCR1,
|
||||
|
||||
% reduce the Erlang pid to 32 bits
|
||||
PidByte1 = PidID1 bxor PidSR4,
|
||||
|
|
@ -134,9 +141,7 @@ npid() ->
|
|||
PidByte3 = PidID3 bxor PidSR2,
|
||||
PidByte4 = PidID4 bxor PidSR1,
|
||||
|
||||
<<NPid:48>> = <<NodeByte1:8, NodeByte2:8,
|
||||
PidByte1:8, PidByte2:8,
|
||||
PidByte3:8, PidByte4:8>>,
|
||||
<<NPid:48>> = <<NodeByte1:8, NodeByte2:8, PidByte1:8, PidByte2:8, PidByte3:8, PidByte4:8>>,
|
||||
NPid.
|
||||
|
||||
to_hexstr(I) when byte_size(I) =:= 16 ->
|
||||
|
|
@ -149,5 +154,5 @@ to_base62(<<I:128>>) ->
|
|||
emqx_base62:encode(I).
|
||||
|
||||
from_base62(S) ->
|
||||
I = binary_to_integer( emqx_base62:decode(S)),
|
||||
I = binary_to_integer(emqx_base62:decode(S)),
|
||||
<<I:128>>.
|
||||
|
|
|
|||
|
|
@ -17,22 +17,22 @@
|
|||
%% @doc HOCON schema help module
|
||||
-module(emqx_hocon).
|
||||
|
||||
-export([ format_path/1
|
||||
, check/2
|
||||
]).
|
||||
-export([
|
||||
format_path/1,
|
||||
check/2
|
||||
]).
|
||||
|
||||
%% @doc Format hocon config field path to dot-separated string in iolist format.
|
||||
-spec format_path([atom() | string() | binary()]) -> iolist().
|
||||
format_path([]) -> "";
|
||||
format_path([Name]) -> iol(Name);
|
||||
format_path([Name | Rest]) ->
|
||||
[iol(Name) , "." | format_path(Rest)].
|
||||
format_path([Name | Rest]) -> [iol(Name), "." | format_path(Rest)].
|
||||
|
||||
%% @doc Plain check the input config.
|
||||
%% The input can either be `richmap' or plain `map'.
|
||||
%% Always return plain map with atom keys.
|
||||
-spec check(module(), hocon:config() | iodata()) ->
|
||||
{ok, hocon:config()} | {error, any()}.
|
||||
{ok, hocon:config()} | {error, any()}.
|
||||
check(SchemaModule, Conf) when is_map(Conf) ->
|
||||
%% TODO: remove required
|
||||
%% fields should state required or not in their schema
|
||||
|
|
@ -40,7 +40,7 @@ check(SchemaModule, Conf) when is_map(Conf) ->
|
|||
try
|
||||
{ok, hocon_tconf:check_plain(SchemaModule, Conf, Opts)}
|
||||
catch
|
||||
throw : Reason ->
|
||||
throw:Reason ->
|
||||
{error, Reason}
|
||||
end;
|
||||
check(SchemaModule, HoconText) ->
|
||||
|
|
|
|||
|
|
@ -22,42 +22,46 @@
|
|||
-include("types.hrl").
|
||||
-include_lib("stdlib/include/ms_transform.hrl").
|
||||
|
||||
|
||||
-export([ start_link/0
|
||||
, stop/0
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
stop/0
|
||||
]).
|
||||
|
||||
%% Hooks API
|
||||
-export([ add/2
|
||||
, add/3
|
||||
, add/4
|
||||
, put/2
|
||||
, put/3
|
||||
, put/4
|
||||
, del/2
|
||||
, run/2
|
||||
, run_fold/3
|
||||
, lookup/1
|
||||
]).
|
||||
-export([
|
||||
add/2,
|
||||
add/3,
|
||||
add/4,
|
||||
put/2,
|
||||
put/3,
|
||||
put/4,
|
||||
del/2,
|
||||
run/2,
|
||||
run_fold/3,
|
||||
lookup/1
|
||||
]).
|
||||
|
||||
-export([ callback_action/1
|
||||
, callback_filter/1
|
||||
, callback_priority/1
|
||||
]).
|
||||
-export([
|
||||
callback_action/1,
|
||||
callback_filter/1,
|
||||
callback_priority/1
|
||||
]).
|
||||
|
||||
%% gen_server Function Exports
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-export_type([ hookpoint/0
|
||||
, action/0
|
||||
, filter/0
|
||||
]).
|
||||
-export_type([
|
||||
hookpoint/0,
|
||||
action/0,
|
||||
filter/0
|
||||
]).
|
||||
|
||||
%% Multiple callbacks can be registered on a hookpoint.
|
||||
%% The execution order depends on the priority value:
|
||||
|
|
@ -67,32 +71,36 @@
|
|||
%% - The execution order is the adding order of callbacks if they have
|
||||
%% equal priority values.
|
||||
|
||||
-type(hookpoint() :: atom() | binary()).
|
||||
-type(action() :: {module(), atom(), [term()] | undefined}).
|
||||
-type(filter() :: {module(), atom(), [term()] | undefined}).
|
||||
-type hookpoint() :: atom() | binary().
|
||||
-type action() :: {module(), atom(), [term()] | undefined}.
|
||||
-type filter() :: {module(), atom(), [term()] | undefined}.
|
||||
|
||||
-record(callback, {
|
||||
action :: action(),
|
||||
filter :: maybe(filter()),
|
||||
priority :: integer()
|
||||
}).
|
||||
action :: action(),
|
||||
filter :: maybe(filter()),
|
||||
priority :: integer()
|
||||
}).
|
||||
|
||||
-type(callback() :: #callback{}).
|
||||
-type callback() :: #callback{}.
|
||||
|
||||
-record(hook, {
|
||||
name :: hookpoint(),
|
||||
callbacks :: list(#callback{})
|
||||
}).
|
||||
name :: hookpoint(),
|
||||
callbacks :: list(#callback{})
|
||||
}).
|
||||
|
||||
-define(TAB, ?MODULE).
|
||||
-define(SERVER, ?MODULE).
|
||||
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?SERVER},
|
||||
?MODULE, [], [{hibernate_after, 1000}]).
|
||||
gen_server:start_link(
|
||||
{local, ?SERVER},
|
||||
?MODULE,
|
||||
[],
|
||||
[{hibernate_after, 1000}]
|
||||
).
|
||||
|
||||
-spec(stop() -> ok).
|
||||
-spec stop() -> ok.
|
||||
stop() ->
|
||||
gen_server:stop(?SERVER, normal, infinity).
|
||||
|
||||
|
|
@ -107,65 +115,63 @@ callback_action(#callback{action = A}) -> A.
|
|||
callback_filter(#callback{filter = F}) -> F.
|
||||
|
||||
%% @doc Get callback priority.
|
||||
callback_priority(#callback{priority= P}) -> P.
|
||||
callback_priority(#callback{priority = P}) -> P.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Hooks API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Register a callback
|
||||
-spec(add(hookpoint(), action() | callback()) -> ok_or_error(already_exists)).
|
||||
-spec add(hookpoint(), action() | callback()) -> ok_or_error(already_exists).
|
||||
add(HookPoint, Callback) when is_record(Callback, callback) ->
|
||||
gen_server:call(?SERVER, {add, HookPoint, Callback}, infinity);
|
||||
add(HookPoint, Action) when is_function(Action); is_tuple(Action) ->
|
||||
add(HookPoint, #callback{action = Action, priority = 0}).
|
||||
|
||||
-spec(add(hookpoint(), action(), filter() | integer() | list())
|
||||
-> ok_or_error(already_exists)).
|
||||
-spec add(hookpoint(), action(), filter() | integer() | list()) ->
|
||||
ok_or_error(already_exists).
|
||||
add(HookPoint, Action, {_M, _F, _A} = Filter) ->
|
||||
add(HookPoint, #callback{action = Action, filter = Filter, priority = 0});
|
||||
add(HookPoint, Action, Priority) when is_integer(Priority) ->
|
||||
add(HookPoint, #callback{action = Action, priority = Priority}).
|
||||
|
||||
-spec(add(hookpoint(), action(), filter(), integer())
|
||||
-> ok_or_error(already_exists)).
|
||||
-spec add(hookpoint(), action(), filter(), integer()) ->
|
||||
ok_or_error(already_exists).
|
||||
add(HookPoint, Action, Filter, Priority) when is_integer(Priority) ->
|
||||
add(HookPoint, #callback{action = Action, filter = Filter, priority = Priority}).
|
||||
|
||||
%% @doc Like add/2, it register a callback, discard 'already_exists' error.
|
||||
-spec(put(hookpoint(), action() | callback()) -> ok).
|
||||
-spec put(hookpoint(), action() | callback()) -> ok.
|
||||
put(HookPoint, Callback) when is_record(Callback, callback) ->
|
||||
case add(HookPoint, Callback) of
|
||||
ok -> ok;
|
||||
{error, already_exists} ->
|
||||
gen_server:call(?SERVER, {put, HookPoint, Callback}, infinity)
|
||||
{error, already_exists} -> gen_server:call(?SERVER, {put, HookPoint, Callback}, infinity)
|
||||
end;
|
||||
put(HookPoint, Action) when is_function(Action); is_tuple(Action) ->
|
||||
?MODULE:put(HookPoint, #callback{action = Action, priority = 0}).
|
||||
|
||||
-spec(put(hookpoint(), action(), filter() | integer() | list()) -> ok).
|
||||
-spec put(hookpoint(), action(), filter() | integer() | list()) -> ok.
|
||||
put(HookPoint, Action, {_M, _F, _A} = Filter) ->
|
||||
?MODULE:put(HookPoint, #callback{action = Action, filter = Filter, priority = 0});
|
||||
put(HookPoint, Action, Priority) when is_integer(Priority) ->
|
||||
?MODULE:put(HookPoint, #callback{action = Action, priority = Priority}).
|
||||
|
||||
-spec(put(hookpoint(), action(), filter(), integer()) -> ok).
|
||||
-spec put(hookpoint(), action(), filter(), integer()) -> ok.
|
||||
put(HookPoint, Action, Filter, Priority) when is_integer(Priority) ->
|
||||
?MODULE:put(HookPoint, #callback{action = Action, filter = Filter, priority = Priority}).
|
||||
|
||||
|
||||
%% @doc Unregister a callback.
|
||||
-spec(del(hookpoint(), action() | {module(), atom()}) -> ok).
|
||||
-spec del(hookpoint(), action() | {module(), atom()}) -> ok.
|
||||
del(HookPoint, Action) ->
|
||||
gen_server:cast(?SERVER, {del, HookPoint, Action}).
|
||||
|
||||
%% @doc Run hooks.
|
||||
-spec(run(hookpoint(), list(Arg::term())) -> ok).
|
||||
-spec run(hookpoint(), list(Arg :: term())) -> ok.
|
||||
run(HookPoint, Args) ->
|
||||
do_run(lookup(HookPoint), Args).
|
||||
|
||||
%% @doc Run hooks with Accumulator.
|
||||
-spec(run_fold(hookpoint(), list(Arg::term()), Acc::term()) -> Acc::term()).
|
||||
-spec run_fold(hookpoint(), list(Arg :: term()), Acc :: term()) -> Acc :: term().
|
||||
run_fold(HookPoint, Args, Acc) ->
|
||||
do_run_fold(lookup(HookPoint), Args, Acc).
|
||||
|
||||
|
|
@ -187,9 +193,9 @@ do_run_fold([#callback{action = Action, filter = Filter} | Callbacks], Args, Acc
|
|||
%% stop the hook chain
|
||||
stop -> Acc;
|
||||
%% stop the hook chain with NewAcc
|
||||
{stop, NewAcc} -> NewAcc;
|
||||
{stop, NewAcc} -> NewAcc;
|
||||
%% continue the hook chain with NewAcc
|
||||
{ok, NewAcc} -> do_run_fold(Callbacks, Args, NewAcc);
|
||||
{ok, NewAcc} -> do_run_fold(Callbacks, Args, NewAcc);
|
||||
%% continue the hook chain, in following cases:
|
||||
%% - the filter validation failed with 'false'
|
||||
%% - the callback returns any term other than 'stop' or {'stop', NewAcc}
|
||||
|
|
@ -198,10 +204,9 @@ do_run_fold([#callback{action = Action, filter = Filter} | Callbacks], Args, Acc
|
|||
do_run_fold([], _Args, Acc) ->
|
||||
Acc.
|
||||
|
||||
-spec(filter_passed(filter(), Args::term()) -> true | false).
|
||||
-spec filter_passed(filter(), Args :: term()) -> true | false.
|
||||
filter_passed(undefined, _Args) -> true;
|
||||
filter_passed(Filter, Args) ->
|
||||
execute(Filter, Args).
|
||||
filter_passed(Filter, Args) -> execute(Filter, Args).
|
||||
|
||||
safe_execute({M, F, A}, Args) ->
|
||||
try execute({M, F, A}, Args) of
|
||||
|
|
@ -222,12 +227,13 @@ execute({M, F, A}, Args) ->
|
|||
erlang:apply(M, F, Args ++ A).
|
||||
|
||||
%% @doc Lookup callbacks.
|
||||
-spec(lookup(hookpoint()) -> [callback()]).
|
||||
-spec lookup(hookpoint()) -> [callback()].
|
||||
lookup(HookPoint) ->
|
||||
case ets:lookup(?TAB, HookPoint) of
|
||||
[#hook{callbacks = Callbacks}] ->
|
||||
Callbacks;
|
||||
[] -> []
|
||||
[] ->
|
||||
[]
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -239,19 +245,23 @@ init([]) ->
|
|||
{ok, #{}}.
|
||||
|
||||
handle_call({add, HookPoint, Callback = #callback{action = {M, F, _}}}, _From, State) ->
|
||||
Reply = case lists:any(fun (#callback{action = {M0, F0, _}}) ->
|
||||
M0 =:= M andalso F0 =:= F
|
||||
end, Callbacks = lookup(HookPoint)) of
|
||||
true -> {error, already_exists};
|
||||
false -> insert_hook(HookPoint, add_callback(Callback, Callbacks))
|
||||
end,
|
||||
Reply =
|
||||
case
|
||||
lists:any(
|
||||
fun(#callback{action = {M0, F0, _}}) ->
|
||||
M0 =:= M andalso F0 =:= F
|
||||
end,
|
||||
Callbacks = lookup(HookPoint)
|
||||
)
|
||||
of
|
||||
true -> {error, already_exists};
|
||||
false -> insert_hook(HookPoint, add_callback(Callback, Callbacks))
|
||||
end,
|
||||
{reply, Reply, State};
|
||||
|
||||
handle_call({put, HookPoint, Callback = #callback{action = {M, F, _}}}, _From, State) ->
|
||||
Callbacks = del_callback({M, F}, lookup(HookPoint)),
|
||||
Reply = update_hook(HookPoint, add_callback(Callback, Callbacks)),
|
||||
{reply, Reply, State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", req => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -264,7 +274,6 @@ handle_cast({del, HookPoint, Action}, State) ->
|
|||
insert_hook(HookPoint, Callbacks)
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", req => Msg}),
|
||||
{noreply, State}.
|
||||
|
|
@ -284,9 +293,10 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%%------------------------------------------------------------------------------
|
||||
|
||||
insert_hook(HookPoint, Callbacks) ->
|
||||
ets:insert(?TAB, #hook{name = HookPoint, callbacks = Callbacks}), ok.
|
||||
ets:insert(?TAB, #hook{name = HookPoint, callbacks = Callbacks}),
|
||||
ok.
|
||||
update_hook(HookPoint, Callbacks) ->
|
||||
Ms = ets:fun2ms(fun ({hook, K, V}) when K =:= HookPoint -> {hook, K, Callbacks} end),
|
||||
Ms = ets:fun2ms(fun({hook, K, V}) when K =:= HookPoint -> {hook, K, Callbacks} end),
|
||||
ets:select_replace(emqx_hooks, Ms),
|
||||
ok.
|
||||
|
||||
|
|
@ -295,11 +305,17 @@ add_callback(C, Callbacks) ->
|
|||
|
||||
add_callback(C, [], Acc) ->
|
||||
lists:reverse([C | Acc]);
|
||||
add_callback(C1 = #callback{priority = P1}, [C2 = #callback{priority = P2} | More], Acc)
|
||||
when P1 < P2 ->
|
||||
add_callback(C1 = #callback{priority = P1}, [C2 = #callback{priority = P2} | More], Acc) when
|
||||
P1 < P2
|
||||
->
|
||||
add_callback(C1, More, [C2 | Acc]);
|
||||
add_callback(C1 = #callback{priority = P1, action = MFA1}, [C2 = #callback{priority = P2, action = MFA2} | More], Acc)
|
||||
when P1 =:= P2 andalso MFA1 >= MFA2 ->
|
||||
add_callback(
|
||||
C1 = #callback{priority = P1, action = MFA1},
|
||||
[C2 = #callback{priority = P2, action = MFA2} | More],
|
||||
Acc
|
||||
) when
|
||||
P1 =:= P2 andalso MFA1 >= MFA2
|
||||
->
|
||||
add_callback(C1, More, [C2 | Acc]);
|
||||
add_callback(C1, More, Acc) ->
|
||||
lists:append(lists:reverse(Acc), [C1 | More]).
|
||||
|
|
|
|||
|
|
@ -19,109 +19,109 @@
|
|||
-compile(inline).
|
||||
|
||||
%% APIs
|
||||
-export([ new/0
|
||||
, new/1
|
||||
, contain/2
|
||||
, lookup/2
|
||||
, insert/3
|
||||
, update/3
|
||||
, resize/2
|
||||
, delete/2
|
||||
, values/1
|
||||
, to_list/1
|
||||
, to_list/2
|
||||
, size/1
|
||||
, max_size/1
|
||||
, is_full/1
|
||||
, is_empty/1
|
||||
, window/1
|
||||
]).
|
||||
-export([
|
||||
new/0,
|
||||
new/1,
|
||||
contain/2,
|
||||
lookup/2,
|
||||
insert/3,
|
||||
update/3,
|
||||
resize/2,
|
||||
delete/2,
|
||||
values/1,
|
||||
to_list/1,
|
||||
to_list/2,
|
||||
size/1,
|
||||
max_size/1,
|
||||
is_full/1,
|
||||
is_empty/1,
|
||||
window/1
|
||||
]).
|
||||
|
||||
-export_type([inflight/0]).
|
||||
|
||||
-type(key() :: term()).
|
||||
-type key() :: term().
|
||||
|
||||
-type(max_size() :: pos_integer()).
|
||||
-type max_size() :: pos_integer().
|
||||
|
||||
-opaque(inflight() :: {inflight, max_size(), gb_trees:tree()}).
|
||||
-opaque inflight() :: {inflight, max_size(), gb_trees:tree()}.
|
||||
|
||||
-define(INFLIGHT(Tree), {inflight, _MaxSize, Tree}).
|
||||
|
||||
-define(INFLIGHT(MaxSize, Tree), {inflight, MaxSize, (Tree)}).
|
||||
|
||||
-spec(new() -> inflight()).
|
||||
-spec new() -> inflight().
|
||||
new() -> new(0).
|
||||
|
||||
-spec(new(non_neg_integer()) -> inflight()).
|
||||
-spec new(non_neg_integer()) -> inflight().
|
||||
new(MaxSize) when MaxSize >= 0 ->
|
||||
?INFLIGHT(MaxSize, gb_trees:empty()).
|
||||
|
||||
-spec(contain(key(), inflight()) -> boolean()).
|
||||
-spec contain(key(), inflight()) -> boolean().
|
||||
contain(Key, ?INFLIGHT(Tree)) ->
|
||||
gb_trees:is_defined(Key, Tree).
|
||||
|
||||
-spec(lookup(key(), inflight()) -> {value, term()} | none).
|
||||
-spec lookup(key(), inflight()) -> {value, term()} | none.
|
||||
lookup(Key, ?INFLIGHT(Tree)) ->
|
||||
gb_trees:lookup(Key, Tree).
|
||||
|
||||
-spec(insert(key(), Val :: term(), inflight()) -> inflight()).
|
||||
-spec insert(key(), Val :: term(), inflight()) -> inflight().
|
||||
insert(Key, Val, ?INFLIGHT(MaxSize, Tree)) ->
|
||||
?INFLIGHT(MaxSize, gb_trees:insert(Key, Val, Tree)).
|
||||
|
||||
-spec(delete(key(), inflight()) -> inflight()).
|
||||
-spec delete(key(), inflight()) -> inflight().
|
||||
delete(Key, ?INFLIGHT(MaxSize, Tree)) ->
|
||||
?INFLIGHT(MaxSize, gb_trees:delete(Key, Tree)).
|
||||
|
||||
-spec(update(key(), Val :: term(), inflight()) -> inflight()).
|
||||
-spec update(key(), Val :: term(), inflight()) -> inflight().
|
||||
update(Key, Val, ?INFLIGHT(MaxSize, Tree)) ->
|
||||
?INFLIGHT(MaxSize, gb_trees:update(Key, Val, Tree)).
|
||||
|
||||
-spec(resize(integer(), inflight()) -> inflight()).
|
||||
-spec resize(integer(), inflight()) -> inflight().
|
||||
resize(MaxSize, ?INFLIGHT(Tree)) ->
|
||||
?INFLIGHT(MaxSize, Tree).
|
||||
|
||||
-spec(is_full(inflight()) -> boolean()).
|
||||
-spec is_full(inflight()) -> boolean().
|
||||
is_full(?INFLIGHT(0, _Tree)) ->
|
||||
false;
|
||||
is_full(?INFLIGHT(MaxSize, Tree)) ->
|
||||
MaxSize =< gb_trees:size(Tree).
|
||||
|
||||
-spec(is_empty(inflight()) -> boolean()).
|
||||
-spec is_empty(inflight()) -> boolean().
|
||||
is_empty(?INFLIGHT(Tree)) ->
|
||||
gb_trees:is_empty(Tree).
|
||||
|
||||
-spec(smallest(inflight()) -> {key(), term()}).
|
||||
-spec smallest(inflight()) -> {key(), term()}.
|
||||
smallest(?INFLIGHT(Tree)) ->
|
||||
gb_trees:smallest(Tree).
|
||||
|
||||
-spec(largest(inflight()) -> {key(), term()}).
|
||||
-spec largest(inflight()) -> {key(), term()}.
|
||||
largest(?INFLIGHT(Tree)) ->
|
||||
gb_trees:largest(Tree).
|
||||
|
||||
-spec(values(inflight()) -> list()).
|
||||
-spec values(inflight()) -> list().
|
||||
values(?INFLIGHT(Tree)) ->
|
||||
gb_trees:values(Tree).
|
||||
|
||||
-spec(to_list(inflight()) -> list({key(), term()})).
|
||||
-spec to_list(inflight()) -> list({key(), term()}).
|
||||
to_list(?INFLIGHT(Tree)) ->
|
||||
gb_trees:to_list(Tree).
|
||||
|
||||
-spec(to_list(fun(), inflight()) -> list({key(), term()})).
|
||||
-spec to_list(fun(), inflight()) -> list({key(), term()}).
|
||||
to_list(SortFun, ?INFLIGHT(Tree)) ->
|
||||
lists:sort(SortFun, gb_trees:to_list(Tree)).
|
||||
|
||||
-spec(window(inflight()) -> list()).
|
||||
-spec window(inflight()) -> list().
|
||||
window(Inflight = ?INFLIGHT(Tree)) ->
|
||||
case gb_trees:is_empty(Tree) of
|
||||
true -> [];
|
||||
false -> [Key || {Key, _Val} <- [smallest(Inflight), largest(Inflight)]]
|
||||
end.
|
||||
|
||||
-spec(size(inflight()) -> non_neg_integer()).
|
||||
-spec size(inflight()) -> non_neg_integer().
|
||||
size(?INFLIGHT(Tree)) ->
|
||||
gb_trees:size(Tree).
|
||||
|
||||
-spec(max_size(inflight()) -> non_neg_integer()).
|
||||
-spec max_size(inflight()) -> non_neg_integer().
|
||||
max_size(?INFLIGHT(MaxSize, _Tree)) ->
|
||||
MaxSize.
|
||||
|
||||
|
|
|
|||
|
|
@ -18,52 +18,58 @@
|
|||
|
||||
-compile(inline).
|
||||
|
||||
-export([ encode/1
|
||||
, encode/2
|
||||
, safe_encode/1
|
||||
, safe_encode/2
|
||||
]).
|
||||
-export([
|
||||
encode/1,
|
||||
encode/2,
|
||||
safe_encode/1,
|
||||
safe_encode/2
|
||||
]).
|
||||
|
||||
-compile({inline,
|
||||
[ encode/1
|
||||
, encode/2
|
||||
]}).
|
||||
-compile(
|
||||
{inline, [
|
||||
encode/1,
|
||||
encode/2
|
||||
]}
|
||||
).
|
||||
|
||||
-export([ decode/1
|
||||
, decode/2
|
||||
, safe_decode/1
|
||||
, safe_decode/2
|
||||
]).
|
||||
-export([
|
||||
decode/1,
|
||||
decode/2,
|
||||
safe_decode/1,
|
||||
safe_decode/2
|
||||
]).
|
||||
|
||||
-compile({inline,
|
||||
[ decode/1
|
||||
, decode/2
|
||||
]}).
|
||||
-compile(
|
||||
{inline, [
|
||||
decode/1,
|
||||
decode/2
|
||||
]}
|
||||
).
|
||||
|
||||
-type(encode_options() :: jiffy:encode_options()).
|
||||
-type(decode_options() :: jiffy:decode_options()).
|
||||
-type encode_options() :: jiffy:encode_options().
|
||||
-type decode_options() :: jiffy:decode_options().
|
||||
|
||||
-type(json_text() :: iolist() | binary()).
|
||||
-type(json_term() :: jiffy:jiffy_decode_result()).
|
||||
-type json_text() :: iolist() | binary().
|
||||
-type json_term() :: jiffy:jiffy_decode_result().
|
||||
|
||||
-export_type([json_text/0, json_term/0]).
|
||||
-export_type([decode_options/0, encode_options/0]).
|
||||
|
||||
-spec(encode(json_term()) -> json_text()).
|
||||
-spec encode(json_term()) -> json_text().
|
||||
encode(Term) ->
|
||||
encode(Term, [force_utf8]).
|
||||
|
||||
-spec(encode(json_term(), encode_options()) -> json_text()).
|
||||
-spec encode(json_term(), encode_options()) -> json_text().
|
||||
encode(Term, Opts) ->
|
||||
to_binary(jiffy:encode(to_ejson(Term), Opts)).
|
||||
|
||||
-spec(safe_encode(json_term())
|
||||
-> {ok, json_text()} | {error, Reason :: term()}).
|
||||
-spec safe_encode(json_term()) ->
|
||||
{ok, json_text()} | {error, Reason :: term()}.
|
||||
safe_encode(Term) ->
|
||||
safe_encode(Term, [force_utf8]).
|
||||
|
||||
-spec(safe_encode(json_term(), encode_options())
|
||||
-> {ok, json_text()} | {error, Reason :: term()}).
|
||||
-spec safe_encode(json_term(), encode_options()) ->
|
||||
{ok, json_text()} | {error, Reason :: term()}.
|
||||
safe_encode(Term, Opts) ->
|
||||
try encode(Term, Opts) of
|
||||
Json -> {ok, Json}
|
||||
|
|
@ -72,20 +78,20 @@ safe_encode(Term, Opts) ->
|
|||
{error, Reason}
|
||||
end.
|
||||
|
||||
-spec(decode(json_text()) -> json_term()).
|
||||
-spec decode(json_text()) -> json_term().
|
||||
decode(Json) -> decode(Json, []).
|
||||
|
||||
-spec(decode(json_text(), decode_options()) -> json_term()).
|
||||
-spec decode(json_text(), decode_options()) -> json_term().
|
||||
decode(Json, Opts) ->
|
||||
from_ejson(jiffy:decode(Json, Opts)).
|
||||
|
||||
-spec(safe_decode(json_text())
|
||||
-> {ok, json_term()} | {error, Reason :: term()}).
|
||||
-spec safe_decode(json_text()) ->
|
||||
{ok, json_term()} | {error, Reason :: term()}.
|
||||
safe_decode(Json) ->
|
||||
safe_decode(Json, []).
|
||||
|
||||
-spec(safe_decode(json_text(), decode_options())
|
||||
-> {ok, json_term()} | {error, Reason :: term()}).
|
||||
-spec safe_decode(json_text(), decode_options()) ->
|
||||
{ok, json_term()} | {error, Reason :: term()}.
|
||||
safe_decode(Json, Opts) ->
|
||||
try decode(Json, Opts) of
|
||||
Term -> {ok, Term}
|
||||
|
|
@ -98,18 +104,21 @@ safe_decode(Json, Opts) ->
|
|||
%% Helpers
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-compile({inline,
|
||||
[ to_ejson/1
|
||||
, from_ejson/1
|
||||
]}).
|
||||
-compile(
|
||||
{inline, [
|
||||
to_ejson/1,
|
||||
from_ejson/1
|
||||
]}
|
||||
).
|
||||
|
||||
to_ejson([{}]) ->
|
||||
{[]};
|
||||
to_ejson([{_, _}|_] = L) ->
|
||||
{[{K, to_ejson(V)} || {K, V} <- L ]};
|
||||
to_ejson([{_, _} | _] = L) ->
|
||||
{[{K, to_ejson(V)} || {K, V} <- L]};
|
||||
to_ejson(L) when is_list(L) ->
|
||||
[to_ejson(E) || E <- L];
|
||||
to_ejson(T) -> T.
|
||||
to_ejson(T) ->
|
||||
T.
|
||||
|
||||
from_ejson(L) when is_list(L) ->
|
||||
[from_ejson(E) || E <- L];
|
||||
|
|
@ -117,7 +126,8 @@ from_ejson({[]}) ->
|
|||
[{}];
|
||||
from_ejson({L}) ->
|
||||
[{Name, from_ejson(Value)} || {Name, Value} <- L];
|
||||
from_ejson(T) -> T.
|
||||
from_ejson(T) ->
|
||||
T.
|
||||
|
||||
to_binary(B) when is_binary(B) -> B;
|
||||
to_binary(L) when is_list(L) ->
|
||||
|
|
|
|||
|
|
@ -16,49 +16,55 @@
|
|||
|
||||
-module(emqx_keepalive).
|
||||
|
||||
-export([ init/1
|
||||
, init/2
|
||||
, info/1
|
||||
, info/2
|
||||
, check/2
|
||||
, set/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
init/2,
|
||||
info/1,
|
||||
info/2,
|
||||
check/2,
|
||||
set/3
|
||||
]).
|
||||
|
||||
-elvis([{elvis_style, no_if_expression, disable}]).
|
||||
|
||||
-export_type([keepalive/0]).
|
||||
|
||||
-record(keepalive, {
|
||||
interval :: pos_integer(),
|
||||
statval :: non_neg_integer(),
|
||||
repeat :: non_neg_integer()
|
||||
}).
|
||||
interval :: pos_integer(),
|
||||
statval :: non_neg_integer(),
|
||||
repeat :: non_neg_integer()
|
||||
}).
|
||||
|
||||
-opaque(keepalive() :: #keepalive{}).
|
||||
-opaque keepalive() :: #keepalive{}.
|
||||
|
||||
%% @doc Init keepalive.
|
||||
-spec(init(Interval :: non_neg_integer()) -> keepalive()).
|
||||
-spec init(Interval :: non_neg_integer()) -> keepalive().
|
||||
init(Interval) -> init(0, Interval).
|
||||
|
||||
%% @doc Init keepalive.
|
||||
-spec(init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive()).
|
||||
-spec init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive().
|
||||
init(StatVal, Interval) when Interval > 0 ->
|
||||
#keepalive{interval = Interval,
|
||||
statval = StatVal,
|
||||
repeat = 0}.
|
||||
#keepalive{
|
||||
interval = Interval,
|
||||
statval = StatVal,
|
||||
repeat = 0
|
||||
}.
|
||||
|
||||
%% @doc Get Info of the keepalive.
|
||||
-spec(info(keepalive()) -> emqx_types:infos()).
|
||||
info(#keepalive{interval = Interval,
|
||||
statval = StatVal,
|
||||
repeat = Repeat}) ->
|
||||
#{interval => Interval,
|
||||
statval => StatVal,
|
||||
repeat => Repeat
|
||||
}.
|
||||
-spec info(keepalive()) -> emqx_types:infos().
|
||||
info(#keepalive{
|
||||
interval = Interval,
|
||||
statval = StatVal,
|
||||
repeat = Repeat
|
||||
}) ->
|
||||
#{
|
||||
interval => Interval,
|
||||
statval => StatVal,
|
||||
repeat => Repeat
|
||||
}.
|
||||
|
||||
-spec(info(interval | statval | repeat, keepalive())
|
||||
-> non_neg_integer()).
|
||||
-spec info(interval | statval | repeat, keepalive()) ->
|
||||
non_neg_integer().
|
||||
info(interval, #keepalive{interval = Interval}) ->
|
||||
Interval;
|
||||
info(statval, #keepalive{statval = StatVal}) ->
|
||||
|
|
@ -67,16 +73,22 @@ info(repeat, #keepalive{repeat = Repeat}) ->
|
|||
Repeat.
|
||||
|
||||
%% @doc Check keepalive.
|
||||
-spec(check(non_neg_integer(), keepalive())
|
||||
-> {ok, keepalive()} | {error, timeout}).
|
||||
check(NewVal, KeepAlive = #keepalive{statval = OldVal,
|
||||
repeat = Repeat}) ->
|
||||
-spec check(non_neg_integer(), keepalive()) ->
|
||||
{ok, keepalive()} | {error, timeout}.
|
||||
check(
|
||||
NewVal,
|
||||
KeepAlive = #keepalive{
|
||||
statval = OldVal,
|
||||
repeat = Repeat
|
||||
}
|
||||
) ->
|
||||
if
|
||||
NewVal =/= OldVal ->
|
||||
{ok, KeepAlive#keepalive{statval = NewVal, repeat = 0}};
|
||||
Repeat < 1 ->
|
||||
{ok, KeepAlive#keepalive{repeat = Repeat + 1}};
|
||||
true -> {error, timeout}
|
||||
true ->
|
||||
{error, timeout}
|
||||
end.
|
||||
|
||||
%% from mqtt-v3.1.1 specific
|
||||
|
|
@ -91,6 +103,6 @@ check(NewVal, KeepAlive = #keepalive{statval = OldVal,
|
|||
%% typically this is a few minutes.
|
||||
%% The maximum value is (65535s) 18 hours 12 minutes and 15 seconds.
|
||||
%% @doc Update keepalive's interval
|
||||
-spec(set(interval, non_neg_integer(), keepalive()) -> keepalive()).
|
||||
-spec set(interval, non_neg_integer(), keepalive()) -> keepalive().
|
||||
set(interval, Interval, KeepAlive) when Interval >= 0 andalso Interval =< 65535000 ->
|
||||
KeepAlive#keepalive{interval = Interval}.
|
||||
|
|
|
|||
|
|
@ -26,33 +26,37 @@ start_link() ->
|
|||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
||||
init([]) ->
|
||||
{ok, {{one_for_one, 10, 100},
|
||||
%% always start emqx_config_handler first to load the emqx.conf to emqx_config
|
||||
[ child_spec(emqx_config_handler, worker)
|
||||
, child_spec(emqx_pool_sup, supervisor)
|
||||
, child_spec(emqx_hooks, worker)
|
||||
, child_spec(emqx_stats, worker)
|
||||
, child_spec(emqx_metrics, worker)
|
||||
, child_spec(emqx_ctl, worker)
|
||||
]}}.
|
||||
{ok, {
|
||||
{one_for_one, 10, 100},
|
||||
%% always start emqx_config_handler first to load the emqx.conf to emqx_config
|
||||
[
|
||||
child_spec(emqx_config_handler, worker),
|
||||
child_spec(emqx_pool_sup, supervisor),
|
||||
child_spec(emqx_hooks, worker),
|
||||
child_spec(emqx_stats, worker),
|
||||
child_spec(emqx_metrics, worker),
|
||||
child_spec(emqx_ctl, worker)
|
||||
]
|
||||
}}.
|
||||
|
||||
child_spec(M, Type) ->
|
||||
child_spec(M, Type, []).
|
||||
|
||||
child_spec(M, worker, Args) ->
|
||||
#{id => M,
|
||||
start => {M, start_link, Args},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [M]
|
||||
};
|
||||
|
||||
#{
|
||||
id => M,
|
||||
start => {M, start_link, Args},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [M]
|
||||
};
|
||||
child_spec(M, supervisor, Args) ->
|
||||
#{id => M,
|
||||
start => {M, start_link, Args},
|
||||
restart => permanent,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [M]
|
||||
}.
|
||||
#{
|
||||
id => M,
|
||||
start => {M, start_link, Args},
|
||||
restart => permanent,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [M]
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -19,39 +19,46 @@
|
|||
|
||||
-include("types.hrl").
|
||||
|
||||
-export([ init/2
|
||||
, init/4 %% XXX: Compatible with before 4.2 version
|
||||
, info/1
|
||||
, check/2
|
||||
]).
|
||||
-export([
|
||||
init/2,
|
||||
%% XXX: Compatible with before 4.2 version
|
||||
init/4,
|
||||
info/1,
|
||||
check/2
|
||||
]).
|
||||
|
||||
-record(limiter, {
|
||||
%% Zone
|
||||
zone :: atom(),
|
||||
%% Checkers
|
||||
checkers :: [checker()]
|
||||
}).
|
||||
%% Zone
|
||||
zone :: atom(),
|
||||
%% Checkers
|
||||
checkers :: [checker()]
|
||||
}).
|
||||
|
||||
-type(checker() :: #{ name := name()
|
||||
, capacity := non_neg_integer()
|
||||
, interval := non_neg_integer()
|
||||
, consumer := esockd_rate_limit:bucket() | atom()
|
||||
}).
|
||||
-type checker() :: #{
|
||||
name := name(),
|
||||
capacity := non_neg_integer(),
|
||||
interval := non_neg_integer(),
|
||||
consumer := esockd_rate_limit:bucket() | atom()
|
||||
}.
|
||||
|
||||
-type(name() :: conn_bytes_in
|
||||
| conn_messages_in
|
||||
| conn_messages_routing
|
||||
| overall_messages_routing
|
||||
).
|
||||
-type name() ::
|
||||
conn_bytes_in
|
||||
| conn_messages_in
|
||||
| conn_messages_routing
|
||||
| overall_messages_routing.
|
||||
|
||||
-type(policy() :: [{name(), esockd_rate_limit:config()}]).
|
||||
-type policy() :: [{name(), esockd_rate_limit:config()}].
|
||||
|
||||
-type(info() :: #{name() :=
|
||||
#{tokens := non_neg_integer(),
|
||||
capacity := non_neg_integer(),
|
||||
interval := non_neg_integer()}}).
|
||||
-type info() :: #{
|
||||
name() :=
|
||||
#{
|
||||
tokens := non_neg_integer(),
|
||||
capacity := non_neg_integer(),
|
||||
interval := non_neg_integer()
|
||||
}
|
||||
}.
|
||||
|
||||
-type(limiter() :: #limiter{}).
|
||||
-type limiter() :: #limiter{}.
|
||||
|
||||
-dialyzer({nowarn_function, [consume/3]}).
|
||||
|
||||
|
|
@ -59,17 +66,25 @@
|
|||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(init(atom(),
|
||||
maybe(esockd_rate_limit:config()),
|
||||
maybe(esockd_rate_limit:config()), policy())
|
||||
-> maybe(limiter())).
|
||||
-spec init(
|
||||
atom(),
|
||||
maybe(esockd_rate_limit:config()),
|
||||
maybe(esockd_rate_limit:config()),
|
||||
policy()
|
||||
) ->
|
||||
maybe(limiter()).
|
||||
init(Zone, PubLimit, BytesIn, Specs) ->
|
||||
Merged = maps:merge(#{conn_messages_in => PubLimit,
|
||||
conn_bytes_in => BytesIn}, maps:from_list(Specs)),
|
||||
Merged = maps:merge(
|
||||
#{
|
||||
conn_messages_in => PubLimit,
|
||||
conn_bytes_in => BytesIn
|
||||
},
|
||||
maps:from_list(Specs)
|
||||
),
|
||||
Filtered = maps:filter(fun(_, V) -> V /= undefined end, Merged),
|
||||
init(Zone, maps:to_list(Filtered)).
|
||||
|
||||
-spec(init(atom(), policy()) -> maybe(limiter())).
|
||||
-spec init(atom(), policy()) -> maybe(limiter()).
|
||||
init(_Zone, []) ->
|
||||
undefined;
|
||||
init(Zone, Specs) ->
|
||||
|
|
@ -91,15 +106,19 @@ do_init_checker(Zone, {Name, {Capacity, Interval}}) ->
|
|||
Ck#{consumer => esockd_rate_limit:new(Capacity / Interval, Capacity)}
|
||||
end.
|
||||
|
||||
-spec(info(limiter()) -> info()).
|
||||
-spec info(limiter()) -> info().
|
||||
info(#limiter{zone = Zone, checkers = Cks}) ->
|
||||
maps:from_list([get_info(Zone, Ck) || Ck <- Cks]).
|
||||
|
||||
-spec(check(#{cnt := Cnt :: non_neg_integer(),
|
||||
oct := Oct :: non_neg_integer()},
|
||||
Limiter :: limiter())
|
||||
-> {ok, NLimiter :: limiter()}
|
||||
| {pause, MilliSecs :: non_neg_integer(), NLimiter :: limiter()}).
|
||||
-spec check(
|
||||
#{
|
||||
cnt := Cnt :: non_neg_integer(),
|
||||
oct := Oct :: non_neg_integer()
|
||||
},
|
||||
Limiter :: limiter()
|
||||
) ->
|
||||
{ok, NLimiter :: limiter()}
|
||||
| {pause, MilliSecs :: non_neg_integer(), NLimiter :: limiter()}.
|
||||
check(#{cnt := Cnt, oct := Oct}, Limiter = #limiter{checkers = Cks}) ->
|
||||
{Pauses, NCks} = do_check(Cnt, Oct, Cks, [], []),
|
||||
case lists:max(Pauses) of
|
||||
|
|
@ -112,16 +131,20 @@ check(#{cnt := Cnt, oct := Oct}, Limiter = #limiter{checkers = Cks}) ->
|
|||
%% @private
|
||||
do_check(_, _, [], Pauses, NCks) ->
|
||||
{Pauses, lists:reverse(NCks)};
|
||||
do_check(Pubs, Bytes, [Ck|More], Pauses, Acc) ->
|
||||
do_check(Pubs, Bytes, [Ck | More], Pauses, Acc) ->
|
||||
{I, NConsumer} = consume(Pubs, Bytes, Ck),
|
||||
do_check(Pubs, Bytes, More, [I|Pauses], [Ck#{consumer := NConsumer}|Acc]).
|
||||
do_check(Pubs, Bytes, More, [I | Pauses], [Ck#{consumer := NConsumer} | Acc]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal funcs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
consume(Pubs, Bytes, #{name := Name, consumer := Cons}) ->
|
||||
Tokens = case is_message_limiter(Name) of true -> Pubs; _ -> Bytes end,
|
||||
Tokens =
|
||||
case is_message_limiter(Name) of
|
||||
true -> Pubs;
|
||||
_ -> Bytes
|
||||
end,
|
||||
case Tokens =:= 0 of
|
||||
true ->
|
||||
{0, Cons};
|
||||
|
|
@ -135,15 +158,22 @@ consume(Pubs, Bytes, #{name := Name, consumer := Cons}) ->
|
|||
end
|
||||
end.
|
||||
|
||||
get_info(Zone, #{name := Name, capacity := Cap,
|
||||
interval := Intv, consumer := Cons}) ->
|
||||
Info = case is_overall_limiter(Name) of
|
||||
true -> esockd_limiter:lookup({Zone, Name});
|
||||
_ -> esockd_rate_limit:info(Cons)
|
||||
end,
|
||||
{Name, #{capacity => Cap,
|
||||
interval => Intv,
|
||||
tokens => maps:get(tokens, Info)}}.
|
||||
get_info(Zone, #{
|
||||
name := Name,
|
||||
capacity := Cap,
|
||||
interval := Intv,
|
||||
consumer := Cons
|
||||
}) ->
|
||||
Info =
|
||||
case is_overall_limiter(Name) of
|
||||
true -> esockd_limiter:lookup({Zone, Name});
|
||||
_ -> esockd_rate_limit:info(Cons)
|
||||
end,
|
||||
{Name, #{
|
||||
capacity => Cap,
|
||||
interval => Intv,
|
||||
tokens => maps:get(tokens, Info)
|
||||
}}.
|
||||
|
||||
is_overall_limiter(overall_messages_routing) -> true;
|
||||
is_overall_limiter(_) -> false.
|
||||
|
|
|
|||
|
|
@ -23,37 +23,40 @@
|
|||
-include("logger.hrl").
|
||||
|
||||
%% APIs
|
||||
-export([ list/0
|
||||
, start/0
|
||||
, restart/0
|
||||
, stop/0
|
||||
, is_running/1
|
||||
, current_conns/2
|
||||
, max_conns/2
|
||||
, id_example/0
|
||||
]).
|
||||
-export([
|
||||
list/0,
|
||||
start/0,
|
||||
restart/0,
|
||||
stop/0,
|
||||
is_running/1,
|
||||
current_conns/2,
|
||||
max_conns/2,
|
||||
id_example/0
|
||||
]).
|
||||
|
||||
-export([ start_listener/1
|
||||
, start_listener/3
|
||||
, stop_listener/1
|
||||
, stop_listener/3
|
||||
, restart_listener/1
|
||||
, restart_listener/3
|
||||
, has_enabled_listener_conf_by_type/1
|
||||
]).
|
||||
-export([
|
||||
start_listener/1,
|
||||
start_listener/3,
|
||||
stop_listener/1,
|
||||
stop_listener/3,
|
||||
restart_listener/1,
|
||||
restart_listener/3,
|
||||
has_enabled_listener_conf_by_type/1
|
||||
]).
|
||||
|
||||
-export([ listener_id/2
|
||||
, parse_listener_id/1
|
||||
]).
|
||||
-export([
|
||||
listener_id/2,
|
||||
parse_listener_id/1
|
||||
]).
|
||||
|
||||
-export([post_config_update/5]).
|
||||
|
||||
-export([format_addr/1]).
|
||||
|
||||
-define(CONF_KEY_PATH, [listeners]).
|
||||
-define(TYPES_STRING, ["tcp","ssl","ws","wss","quic"]).
|
||||
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
|
||||
|
||||
-spec(id_example() -> atom()).
|
||||
-spec id_example() -> atom().
|
||||
id_example() ->
|
||||
id_example(list()).
|
||||
|
||||
|
|
@ -66,7 +69,7 @@ id_example([_ | Listeners]) ->
|
|||
id_example(Listeners).
|
||||
|
||||
%% @doc List configured listeners.
|
||||
-spec(list() -> [{ListenerId :: atom(), ListenerConf :: map()}]).
|
||||
-spec list() -> [{ListenerId :: atom(), ListenerConf :: map()}].
|
||||
list() ->
|
||||
[{listener_id(Type, LName), LConf} || {Type, LName, LConf} <- do_list()].
|
||||
|
||||
|
|
@ -75,37 +78,45 @@ do_list() ->
|
|||
lists:append([list(Type, maps:to_list(Conf)) || {Type, Conf} <- Listeners]).
|
||||
|
||||
list(Type, Conf) ->
|
||||
[begin
|
||||
Running = is_running(Type, listener_id(Type, LName), LConf),
|
||||
{Type, LName, maps:put(running, Running, LConf)}
|
||||
end || {LName, LConf} <- Conf, is_map(LConf)].
|
||||
[
|
||||
begin
|
||||
Running = is_running(Type, listener_id(Type, LName), LConf),
|
||||
{Type, LName, maps:put(running, Running, LConf)}
|
||||
end
|
||||
|| {LName, LConf} <- Conf, is_map(LConf)
|
||||
].
|
||||
|
||||
-spec is_running(ListenerId :: atom()) -> boolean() | {error, no_found}.
|
||||
is_running(ListenerId) ->
|
||||
case lists:filtermap(fun({_Type, Id, #{running := IsRunning}}) ->
|
||||
Id =:= ListenerId andalso {true, IsRunning}
|
||||
end, do_list()) of
|
||||
case
|
||||
lists:filtermap(
|
||||
fun({_Type, Id, #{running := IsRunning}}) ->
|
||||
Id =:= ListenerId andalso {true, IsRunning}
|
||||
end,
|
||||
do_list()
|
||||
)
|
||||
of
|
||||
[IsRunning] -> IsRunning;
|
||||
[] -> {error, not_found}
|
||||
end.
|
||||
|
||||
is_running(Type, ListenerId, #{bind := ListenOn}) when Type =:= tcp; Type =:= ssl ->
|
||||
try esockd:listener({ListenerId, ListenOn}) of
|
||||
Pid when is_pid(Pid)->
|
||||
Pid when is_pid(Pid) ->
|
||||
true
|
||||
catch _:_ ->
|
||||
false
|
||||
catch
|
||||
_:_ ->
|
||||
false
|
||||
end;
|
||||
|
||||
is_running(Type, ListenerId, _Conf) when Type =:= ws; Type =:= wss ->
|
||||
try
|
||||
Info = ranch:info(ListenerId),
|
||||
proplists:get_value(status, Info) =:= running
|
||||
catch _:_ ->
|
||||
false
|
||||
catch
|
||||
_:_ ->
|
||||
false
|
||||
end;
|
||||
|
||||
is_running(quic, _ListenerId, _Conf)->
|
||||
is_running(quic, _ListenerId, _Conf) ->
|
||||
%% TODO: quic support
|
||||
{error, no_found}.
|
||||
|
||||
|
|
@ -132,7 +143,7 @@ max_conns(_, _, _) ->
|
|||
{error, not_support}.
|
||||
|
||||
%% @doc Start all listeners.
|
||||
-spec(start() -> ok).
|
||||
-spec start() -> ok.
|
||||
start() ->
|
||||
%% The ?MODULE:start/0 will be called by emqx_app when emqx get started,
|
||||
%% so we install the config handler here.
|
||||
|
|
@ -146,31 +157,39 @@ start_listener(ListenerId) ->
|
|||
-spec start_listener(atom(), atom(), map()) -> ok | {error, term()}.
|
||||
start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||
case do_start_listener(Type, ListenerName, Conf) of
|
||||
{ok, {skipped, Reason}} when Reason =:= listener_disabled;
|
||||
Reason =:= quic_app_missing ->
|
||||
console_print("Listener ~ts is NOT started due to: ~p~n.",
|
||||
[listener_id(Type, ListenerName), Reason]);
|
||||
{ok, {skipped, Reason}} when
|
||||
Reason =:= listener_disabled;
|
||||
Reason =:= quic_app_missing
|
||||
->
|
||||
console_print(
|
||||
"Listener ~ts is NOT started due to: ~p~n.",
|
||||
[listener_id(Type, ListenerName), Reason]
|
||||
);
|
||||
{ok, _} ->
|
||||
console_print("Listener ~ts on ~ts started.~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind)]);
|
||||
console_print(
|
||||
"Listener ~ts on ~ts started.~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind)]
|
||||
);
|
||||
{error, {already_started, Pid}} ->
|
||||
{error, {already_started, Pid}};
|
||||
{error, Reason} ->
|
||||
?ELOG("Failed to start listener ~ts on ~ts: ~0p~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind), Reason]),
|
||||
?ELOG(
|
||||
"Failed to start listener ~ts on ~ts: ~0p~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind), Reason]
|
||||
),
|
||||
error(Reason)
|
||||
end.
|
||||
|
||||
%% @doc Restart all listeners
|
||||
-spec(restart() -> ok).
|
||||
-spec restart() -> ok.
|
||||
restart() ->
|
||||
foreach_listeners(fun restart_listener/3).
|
||||
|
||||
-spec(restart_listener(atom()) -> ok | {error, term()}).
|
||||
-spec restart_listener(atom()) -> ok | {error, term()}.
|
||||
restart_listener(ListenerId) ->
|
||||
apply_on_listener(ListenerId, fun restart_listener/3).
|
||||
|
||||
-spec(restart_listener(atom(), atom(), map()) -> ok | {error, term()}).
|
||||
-spec restart_listener(atom(), atom(), map()) -> ok | {error, term()}.
|
||||
restart_listener(Type, ListenerName, {OldConf, NewConf}) ->
|
||||
restart_listener(Type, ListenerName, OldConf, NewConf);
|
||||
restart_listener(Type, ListenerName, Conf) ->
|
||||
|
|
@ -184,30 +203,34 @@ restart_listener(Type, ListenerName, OldConf, NewConf) ->
|
|||
end.
|
||||
|
||||
%% @doc Stop all listeners.
|
||||
-spec(stop() -> ok).
|
||||
-spec stop() -> ok.
|
||||
stop() ->
|
||||
%% The ?MODULE:stop/0 will be called by emqx_app when emqx is going to shutdown,
|
||||
%% so we uninstall the config handler here.
|
||||
_ = emqx_config_handler:remove_handler(?CONF_KEY_PATH),
|
||||
foreach_listeners(fun stop_listener/3).
|
||||
|
||||
-spec(stop_listener(atom()) -> ok | {error, term()}).
|
||||
-spec stop_listener(atom()) -> ok | {error, term()}.
|
||||
stop_listener(ListenerId) ->
|
||||
apply_on_listener(ListenerId, fun stop_listener/3).
|
||||
|
||||
stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||
case do_stop_listener(Type, ListenerName, Conf) of
|
||||
ok ->
|
||||
console_print("Listener ~ts on ~ts stopped.~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind)]),
|
||||
console_print(
|
||||
"Listener ~ts on ~ts stopped.~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind)]
|
||||
),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?ELOG("Failed to stop listener ~ts on ~ts: ~0p~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind), Reason]),
|
||||
?ELOG(
|
||||
"Failed to stop listener ~ts on ~ts: ~0p~n",
|
||||
[listener_id(Type, ListenerName), format_addr(Bind), Reason]
|
||||
),
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
-spec(do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}).
|
||||
-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}.
|
||||
do_stop_listener(Type, ListenerName, #{bind := ListenOn}) when Type == tcp; Type == ssl ->
|
||||
esockd:close(listener_id(Type, ListenerName), ListenOn);
|
||||
do_stop_listener(Type, ListenerName, _Conf) when Type == ws; Type == wss ->
|
||||
|
|
@ -222,21 +245,29 @@ console_print(_Fmt, _Args) -> ok.
|
|||
-endif.
|
||||
|
||||
%% Start MQTT/TCP listener
|
||||
-spec(do_start_listener(atom(), atom(), map())
|
||||
-> {ok, pid() | {skipped, atom()}} | {error, term()}).
|
||||
-spec do_start_listener(atom(), atom(), map()) ->
|
||||
{ok, pid() | {skipped, atom()}} | {error, term()}.
|
||||
do_start_listener(_Type, _ListenerName, #{enabled := false}) ->
|
||||
{ok, {skipped, listener_disabled}};
|
||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts)
|
||||
when Type == tcp; Type == ssl ->
|
||||
esockd:open(listener_id(Type, ListenerName), ListenOn, merge_default(esockd_opts(Type, Opts)),
|
||||
{emqx_connection, start_link,
|
||||
[#{listener => {Type, ListenerName},
|
||||
zone => zone(Opts),
|
||||
limiter => limiter(Opts)}]});
|
||||
|
||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
|
||||
Type == tcp; Type == ssl
|
||||
->
|
||||
esockd:open(
|
||||
listener_id(Type, ListenerName),
|
||||
ListenOn,
|
||||
merge_default(esockd_opts(Type, Opts)),
|
||||
{emqx_connection, start_link, [
|
||||
#{
|
||||
listener => {Type, ListenerName},
|
||||
zone => zone(Opts),
|
||||
limiter => limiter(Opts)
|
||||
}
|
||||
]}
|
||||
);
|
||||
%% Start MQTT/WS listener
|
||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts)
|
||||
when Type == ws; Type == wss ->
|
||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
|
||||
Type == ws; Type == wss
|
||||
->
|
||||
Id = listener_id(Type, ListenerName),
|
||||
RanchOpts = ranch_opts(Type, ListenOn, Opts),
|
||||
WsOpts = ws_opts(Type, ListenerName, Opts),
|
||||
|
|
@ -244,31 +275,36 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts)
|
|||
ws -> cowboy:start_clear(Id, RanchOpts, WsOpts);
|
||||
wss -> cowboy:start_tls(Id, RanchOpts, WsOpts)
|
||||
end;
|
||||
|
||||
%% Start MQTT/QUIC listener
|
||||
do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) ->
|
||||
case [ A || {quicer, _, _} = A<-application:which_applications() ] of
|
||||
case [A || {quicer, _, _} = A <- application:which_applications()] of
|
||||
[_] ->
|
||||
DefAcceptors = erlang:system_info(schedulers_online) * 8,
|
||||
ListenOpts = [ {cert, maps:get(certfile, Opts)}
|
||||
, {key, maps:get(keyfile, Opts)}
|
||||
, {alpn, ["mqtt"]}
|
||||
, {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])}
|
||||
, {idle_timeout_ms,
|
||||
lists:max([
|
||||
emqx_config:get_zone_conf(zone(Opts), [mqtt, idle_timeout]) * 3,
|
||||
timer:seconds(maps:get(idle_timeout, Opts))])}
|
||||
],
|
||||
ConnectionOpts = #{ conn_callback => emqx_quic_connection
|
||||
, peer_unidi_stream_count => 1
|
||||
, peer_bidi_stream_count => 10
|
||||
, zone => zone(Opts)
|
||||
, listener => {quic, ListenerName}
|
||||
, limiter => limiter(Opts)
|
||||
},
|
||||
ListenOpts = [
|
||||
{cert, maps:get(certfile, Opts)},
|
||||
{key, maps:get(keyfile, Opts)},
|
||||
{alpn, ["mqtt"]},
|
||||
{conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])},
|
||||
{idle_timeout_ms,
|
||||
lists:max([
|
||||
emqx_config:get_zone_conf(zone(Opts), [mqtt, idle_timeout]) * 3,
|
||||
timer:seconds(maps:get(idle_timeout, Opts))
|
||||
])}
|
||||
],
|
||||
ConnectionOpts = #{
|
||||
conn_callback => emqx_quic_connection,
|
||||
peer_unidi_stream_count => 1,
|
||||
peer_bidi_stream_count => 10,
|
||||
zone => zone(Opts),
|
||||
listener => {quic, ListenerName},
|
||||
limiter => limiter(Opts)
|
||||
},
|
||||
StreamOpts = [{stream_callback, emqx_quic_stream}],
|
||||
quicer:start_listener(listener_id(quic, ListenerName),
|
||||
port(ListenOn), {ListenOpts, ConnectionOpts, StreamOpts});
|
||||
quicer:start_listener(
|
||||
listener_id(quic, ListenerName),
|
||||
port(ListenOn),
|
||||
{ListenOpts, ConnectionOpts, StreamOpts}
|
||||
);
|
||||
[] ->
|
||||
{ok, {skipped, quic_app_missing}}
|
||||
end.
|
||||
|
|
@ -278,54 +314,70 @@ delete_authentication(Type, ListenerName, _Conf) ->
|
|||
|
||||
%% Update the listeners at runtime
|
||||
post_config_update(_, _Req, NewListeners, OldListeners, _AppEnvs) ->
|
||||
#{added := Added, removed := Removed, changed := Updated}
|
||||
= diff_listeners(NewListeners, OldListeners),
|
||||
#{added := Added, removed := Removed, changed := Updated} =
|
||||
diff_listeners(NewListeners, OldListeners),
|
||||
perform_listener_changes(fun stop_listener/3, Removed),
|
||||
perform_listener_changes(fun delete_authentication/3, Removed),
|
||||
perform_listener_changes(fun start_listener/3, Added),
|
||||
perform_listener_changes(fun restart_listener/3, Updated).
|
||||
|
||||
perform_listener_changes(Action, MapConfs) ->
|
||||
lists:foreach(fun
|
||||
({Id, Conf}) ->
|
||||
lists:foreach(
|
||||
fun({Id, Conf}) ->
|
||||
{Type, Name} = parse_listener_id(Id),
|
||||
Action(Type, Name, Conf)
|
||||
end, maps:to_list(MapConfs)).
|
||||
end,
|
||||
maps:to_list(MapConfs)
|
||||
).
|
||||
|
||||
diff_listeners(NewListeners, OldListeners) ->
|
||||
emqx_map_lib:diff_maps(flatten_listeners(NewListeners), flatten_listeners(OldListeners)).
|
||||
|
||||
flatten_listeners(Conf0) ->
|
||||
maps:from_list(
|
||||
lists:append([do_flatten_listeners(Type, Conf)
|
||||
|| {Type, Conf} <- maps:to_list(Conf0)])).
|
||||
lists:append([
|
||||
do_flatten_listeners(Type, Conf)
|
||||
|| {Type, Conf} <- maps:to_list(Conf0)
|
||||
])
|
||||
).
|
||||
|
||||
do_flatten_listeners(Type, Conf0) ->
|
||||
[{listener_id(Type, Name), maps:remove(authentication, Conf)} ||
|
||||
{Name, Conf} <- maps:to_list(Conf0)].
|
||||
[
|
||||
{listener_id(Type, Name), maps:remove(authentication, Conf)}
|
||||
|| {Name, Conf} <- maps:to_list(Conf0)
|
||||
].
|
||||
|
||||
esockd_opts(Type, Opts0) ->
|
||||
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
||||
Limiter = limiter(Opts0),
|
||||
Opts2 = case maps:get(connection, Limiter, undefined) of
|
||||
undefined ->
|
||||
Opts1;
|
||||
BucketName ->
|
||||
Opts1#{limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName)}
|
||||
end,
|
||||
Opts3 = Opts2#{ access_rules => esockd_access_rules(maps:get(access_rules, Opts0, []))
|
||||
, tune_fun => {emqx_olp, backoff_new_conn, [zone(Opts0)]}
|
||||
},
|
||||
maps:to_list(case Type of
|
||||
tcp -> Opts3#{tcp_options => tcp_opts(Opts0)};
|
||||
ssl -> Opts3#{ssl_options => ssl_opts(Opts0), tcp_options => tcp_opts(Opts0)}
|
||||
end).
|
||||
Opts2 =
|
||||
case maps:get(connection, Limiter, undefined) of
|
||||
undefined ->
|
||||
Opts1;
|
||||
BucketName ->
|
||||
Opts1#{
|
||||
limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName)
|
||||
}
|
||||
end,
|
||||
Opts3 = Opts2#{
|
||||
access_rules => esockd_access_rules(maps:get(access_rules, Opts0, [])),
|
||||
tune_fun => {emqx_olp, backoff_new_conn, [zone(Opts0)]}
|
||||
},
|
||||
maps:to_list(
|
||||
case Type of
|
||||
tcp -> Opts3#{tcp_options => tcp_opts(Opts0)};
|
||||
ssl -> Opts3#{ssl_options => ssl_opts(Opts0), tcp_options => tcp_opts(Opts0)}
|
||||
end
|
||||
).
|
||||
|
||||
ws_opts(Type, ListenerName, Opts) ->
|
||||
WsPaths = [{maps:get(mqtt_path, Opts, "/mqtt"), emqx_ws_connection,
|
||||
#{zone => zone(Opts),
|
||||
listener => {Type, ListenerName},
|
||||
limiter => limiter(Opts)}}],
|
||||
WsPaths = [
|
||||
{maps:get(mqtt_path, Opts, "/mqtt"), emqx_ws_connection, #{
|
||||
zone => zone(Opts),
|
||||
listener => {Type, ListenerName},
|
||||
limiter => limiter(Opts)
|
||||
}}
|
||||
],
|
||||
Dispatch = cowboy_router:compile([{'_', WsPaths}]),
|
||||
ProxyProto = maps:get(proxy_protocol, Opts, false),
|
||||
#{env => #{dispatch => Dispatch}, proxy_header => ProxyProto}.
|
||||
|
|
@ -333,16 +385,19 @@ ws_opts(Type, ListenerName, Opts) ->
|
|||
ranch_opts(Type, ListenOn, Opts) ->
|
||||
NumAcceptors = maps:get(acceptors, Opts, 4),
|
||||
MaxConnections = maps:get(max_connections, Opts, 1024),
|
||||
SocketOpts = case Type of
|
||||
wss -> tcp_opts(Opts) ++ proplists:delete(handshake_timeout, ssl_opts(Opts));
|
||||
ws -> tcp_opts(Opts)
|
||||
end,
|
||||
#{num_acceptors => NumAcceptors,
|
||||
max_connections => MaxConnections,
|
||||
handshake_timeout => maps:get(handshake_timeout, Opts, 15000),
|
||||
socket_opts => ip_port(ListenOn) ++
|
||||
SocketOpts =
|
||||
case Type of
|
||||
wss -> tcp_opts(Opts) ++ proplists:delete(handshake_timeout, ssl_opts(Opts));
|
||||
ws -> tcp_opts(Opts)
|
||||
end,
|
||||
#{
|
||||
num_acceptors => NumAcceptors,
|
||||
max_connections => MaxConnections,
|
||||
handshake_timeout => maps:get(handshake_timeout, Opts, 15000),
|
||||
socket_opts => ip_port(ListenOn) ++
|
||||
%% cowboy don't allow us to set 'reuseaddr'
|
||||
proplists:delete(reuseaddr, SocketOpts)}.
|
||||
proplists:delete(reuseaddr, SocketOpts)
|
||||
}.
|
||||
|
||||
ip_port(Port) when is_integer(Port) ->
|
||||
[{port, Port}];
|
||||
|
|
@ -355,7 +410,13 @@ port({_Addr, Port}) when is_integer(Port) -> Port.
|
|||
esockd_access_rules(StrRules) ->
|
||||
Access = fun(S) ->
|
||||
[A, CIDR] = string:tokens(S, " "),
|
||||
{list_to_atom(A), case CIDR of "all" -> all; _ -> CIDR end}
|
||||
{
|
||||
list_to_atom(A),
|
||||
case CIDR of
|
||||
"all" -> all;
|
||||
_ -> CIDR
|
||||
end
|
||||
}
|
||||
end,
|
||||
[Access(R) || R <- StrRules].
|
||||
|
||||
|
|
@ -389,7 +450,8 @@ parse_listener_id(Id) ->
|
|||
true -> {list_to_existing_atom(Type), list_to_atom(Name)};
|
||||
false -> {error, {invalid_listener_id, Id}}
|
||||
end;
|
||||
_ -> {error, {invalid_listener_id, Id}}
|
||||
_ ->
|
||||
{error, {invalid_listener_id, Id}}
|
||||
end.
|
||||
|
||||
zone(Opts) ->
|
||||
|
|
@ -401,25 +463,36 @@ limiter(Opts) ->
|
|||
ssl_opts(Opts) ->
|
||||
maps:to_list(
|
||||
emqx_tls_lib:drop_tls13_for_old_otp(
|
||||
maps:without([enable],
|
||||
maps:get(ssl, Opts, #{})))).
|
||||
maps:without(
|
||||
[enable],
|
||||
maps:get(ssl, Opts, #{})
|
||||
)
|
||||
)
|
||||
).
|
||||
|
||||
tcp_opts(Opts) ->
|
||||
maps:to_list(
|
||||
maps:without([active_n],
|
||||
maps:get(tcp, Opts, #{}))).
|
||||
maps:without(
|
||||
[active_n],
|
||||
maps:get(tcp, Opts, #{})
|
||||
)
|
||||
).
|
||||
|
||||
foreach_listeners(Do) ->
|
||||
lists:foreach(
|
||||
fun({Type, LName, LConf}) ->
|
||||
Do(Type, LName, LConf)
|
||||
end, do_list()).
|
||||
Do(Type, LName, LConf)
|
||||
end,
|
||||
do_list()
|
||||
).
|
||||
|
||||
has_enabled_listener_conf_by_type(Type) ->
|
||||
lists:any(
|
||||
fun({Type0, _LName, LConf}) when is_map(LConf) ->
|
||||
Type =:= Type0 andalso maps:get(enabled, LConf, true)
|
||||
end, do_list()).
|
||||
Type =:= Type0 andalso maps:get(enabled, LConf, true)
|
||||
end,
|
||||
do_list()
|
||||
).
|
||||
|
||||
apply_on_listener(ListenerId, Do) ->
|
||||
{Type, ListenerName} = parse_listener_id(ListenerId),
|
||||
|
|
|
|||
|
|
@ -21,165 +21,169 @@
|
|||
-elvis([{elvis_style, god_modules, disable}]).
|
||||
|
||||
%% Logs
|
||||
-export([ debug/1
|
||||
, debug/2
|
||||
, debug/3
|
||||
, info/1
|
||||
, info/2
|
||||
, info/3
|
||||
, warning/1
|
||||
, warning/2
|
||||
, warning/3
|
||||
, error/1
|
||||
, error/2
|
||||
, error/3
|
||||
, critical/1
|
||||
, critical/2
|
||||
, critical/3
|
||||
]).
|
||||
-export([
|
||||
debug/1,
|
||||
debug/2,
|
||||
debug/3,
|
||||
info/1,
|
||||
info/2,
|
||||
info/3,
|
||||
warning/1,
|
||||
warning/2,
|
||||
warning/3,
|
||||
error/1,
|
||||
error/2,
|
||||
error/3,
|
||||
critical/1,
|
||||
critical/2,
|
||||
critical/3
|
||||
]).
|
||||
|
||||
%% Configs
|
||||
-export([ set_metadata_peername/1
|
||||
, set_metadata_clientid/1
|
||||
, set_proc_metadata/1
|
||||
, set_primary_log_level/1
|
||||
, set_log_handler_level/2
|
||||
, set_log_level/1
|
||||
, set_all_log_handlers_level/1
|
||||
]).
|
||||
-export([
|
||||
set_metadata_peername/1,
|
||||
set_metadata_clientid/1,
|
||||
set_proc_metadata/1,
|
||||
set_primary_log_level/1,
|
||||
set_log_handler_level/2,
|
||||
set_log_level/1,
|
||||
set_all_log_handlers_level/1
|
||||
]).
|
||||
|
||||
-export([ get_primary_log_level/0
|
||||
, tune_primary_log_level/0
|
||||
, get_log_handlers/0
|
||||
, get_log_handlers/1
|
||||
, get_log_handler/1
|
||||
]).
|
||||
-export([
|
||||
get_primary_log_level/0,
|
||||
tune_primary_log_level/0,
|
||||
get_log_handlers/0,
|
||||
get_log_handlers/1,
|
||||
get_log_handler/1
|
||||
]).
|
||||
|
||||
-export([ start_log_handler/1
|
||||
, stop_log_handler/1
|
||||
]).
|
||||
-export([
|
||||
start_log_handler/1,
|
||||
stop_log_handler/1
|
||||
]).
|
||||
|
||||
-type(peername_str() :: list()).
|
||||
-type(logger_dst() :: file:filename() | console | unknown).
|
||||
-type(logger_handler_info() :: #{
|
||||
id := logger:handler_id(),
|
||||
level := logger:level(),
|
||||
dst := logger_dst(),
|
||||
filters := [{logger:filter_id(), logger:filter()}],
|
||||
status := started | stopped
|
||||
}).
|
||||
-type peername_str() :: list().
|
||||
-type logger_dst() :: file:filename() | console | unknown.
|
||||
-type logger_handler_info() :: #{
|
||||
id := logger:handler_id(),
|
||||
level := logger:level(),
|
||||
dst := logger_dst(),
|
||||
filters := [{logger:filter_id(), logger:filter()}],
|
||||
status := started | stopped
|
||||
}.
|
||||
|
||||
-define(STOPPED_HANDLERS, {?MODULE, stopped_handlers}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
-spec(debug(unicode:chardata()) -> ok).
|
||||
-spec debug(unicode:chardata()) -> ok.
|
||||
debug(Msg) ->
|
||||
logger:debug(Msg).
|
||||
|
||||
-spec(debug(io:format(), [term()]) -> ok).
|
||||
-spec debug(io:format(), [term()]) -> ok.
|
||||
debug(Format, Args) ->
|
||||
logger:debug(Format, Args).
|
||||
|
||||
-spec(debug(logger:metadata(), io:format(), [term()]) -> ok).
|
||||
-spec debug(logger:metadata(), io:format(), [term()]) -> ok.
|
||||
debug(Metadata, Format, Args) when is_map(Metadata) ->
|
||||
logger:debug(Format, Args, Metadata).
|
||||
|
||||
|
||||
-spec(info(unicode:chardata()) -> ok).
|
||||
-spec info(unicode:chardata()) -> ok.
|
||||
info(Msg) ->
|
||||
logger:info(Msg).
|
||||
|
||||
-spec(info(io:format(), [term()]) -> ok).
|
||||
-spec info(io:format(), [term()]) -> ok.
|
||||
info(Format, Args) ->
|
||||
logger:info(Format, Args).
|
||||
|
||||
-spec(info(logger:metadata(), io:format(), [term()]) -> ok).
|
||||
-spec info(logger:metadata(), io:format(), [term()]) -> ok.
|
||||
info(Metadata, Format, Args) when is_map(Metadata) ->
|
||||
logger:info(Format, Args, Metadata).
|
||||
|
||||
|
||||
-spec(warning(unicode:chardata()) -> ok).
|
||||
-spec warning(unicode:chardata()) -> ok.
|
||||
warning(Msg) ->
|
||||
logger:warning(Msg).
|
||||
|
||||
-spec(warning(io:format(), [term()]) -> ok).
|
||||
-spec warning(io:format(), [term()]) -> ok.
|
||||
warning(Format, Args) ->
|
||||
logger:warning(Format, Args).
|
||||
|
||||
-spec(warning(logger:metadata(), io:format(), [term()]) -> ok).
|
||||
-spec warning(logger:metadata(), io:format(), [term()]) -> ok.
|
||||
warning(Metadata, Format, Args) when is_map(Metadata) ->
|
||||
logger:warning(Format, Args, Metadata).
|
||||
|
||||
|
||||
-spec(error(unicode:chardata()) -> ok).
|
||||
-spec error(unicode:chardata()) -> ok.
|
||||
error(Msg) ->
|
||||
logger:error(Msg).
|
||||
-spec(error(io:format(), [term()]) -> ok).
|
||||
-spec error(io:format(), [term()]) -> ok.
|
||||
error(Format, Args) ->
|
||||
logger:error(Format, Args).
|
||||
-spec(error(logger:metadata(), io:format(), [term()]) -> ok).
|
||||
-spec error(logger:metadata(), io:format(), [term()]) -> ok.
|
||||
error(Metadata, Format, Args) when is_map(Metadata) ->
|
||||
logger:error(Format, Args, Metadata).
|
||||
|
||||
|
||||
-spec(critical(unicode:chardata()) -> ok).
|
||||
-spec critical(unicode:chardata()) -> ok.
|
||||
critical(Msg) ->
|
||||
logger:critical(Msg).
|
||||
|
||||
-spec(critical(io:format(), [term()]) -> ok).
|
||||
-spec critical(io:format(), [term()]) -> ok.
|
||||
critical(Format, Args) ->
|
||||
logger:critical(Format, Args).
|
||||
|
||||
-spec(critical(logger:metadata(), io:format(), [term()]) -> ok).
|
||||
-spec critical(logger:metadata(), io:format(), [term()]) -> ok.
|
||||
critical(Metadata, Format, Args) when is_map(Metadata) ->
|
||||
logger:critical(Format, Args, Metadata).
|
||||
|
||||
-spec(set_metadata_clientid(emqx_types:clientid()) -> ok).
|
||||
-spec set_metadata_clientid(emqx_types:clientid()) -> ok.
|
||||
set_metadata_clientid(<<>>) ->
|
||||
ok;
|
||||
set_metadata_clientid(ClientId) ->
|
||||
set_proc_metadata(#{clientid => ClientId}).
|
||||
|
||||
-spec(set_metadata_peername(peername_str()) -> ok).
|
||||
-spec set_metadata_peername(peername_str()) -> ok.
|
||||
set_metadata_peername(Peername) ->
|
||||
set_proc_metadata(#{peername => Peername}).
|
||||
|
||||
-spec(set_proc_metadata(logger:metadata()) -> ok).
|
||||
-spec set_proc_metadata(logger:metadata()) -> ok.
|
||||
set_proc_metadata(Meta) ->
|
||||
logger:update_process_metadata(Meta).
|
||||
|
||||
-spec(get_primary_log_level() -> logger:level()).
|
||||
-spec get_primary_log_level() -> logger:level().
|
||||
get_primary_log_level() ->
|
||||
#{level := Level} = logger:get_primary_config(),
|
||||
Level.
|
||||
|
||||
-spec tune_primary_log_level() -> ok.
|
||||
tune_primary_log_level() ->
|
||||
LowestLevel = lists:foldl(fun(#{level := Level}, OldLevel) ->
|
||||
LowestLevel = lists:foldl(
|
||||
fun(#{level := Level}, OldLevel) ->
|
||||
case logger:compare_levels(Level, OldLevel) of
|
||||
lt -> Level;
|
||||
_ -> OldLevel
|
||||
end
|
||||
end, get_primary_log_level(), get_log_handlers()),
|
||||
end,
|
||||
get_primary_log_level(),
|
||||
get_log_handlers()
|
||||
),
|
||||
set_primary_log_level(LowestLevel).
|
||||
|
||||
-spec(set_primary_log_level(logger:level()) -> ok | {error, term()}).
|
||||
-spec set_primary_log_level(logger:level()) -> ok | {error, term()}.
|
||||
set_primary_log_level(Level) ->
|
||||
logger:set_primary_config(level, Level).
|
||||
|
||||
-spec(get_log_handlers() -> [logger_handler_info()]).
|
||||
-spec get_log_handlers() -> [logger_handler_info()].
|
||||
get_log_handlers() ->
|
||||
get_log_handlers(started) ++ get_log_handlers(stopped).
|
||||
|
||||
-spec(get_log_handlers(started | stopped) -> [logger_handler_info()]).
|
||||
-spec get_log_handlers(started | stopped) -> [logger_handler_info()].
|
||||
get_log_handlers(started) ->
|
||||
[log_handler_info(Conf, started) || Conf <- logger:get_handler_config()];
|
||||
get_log_handlers(stopped) ->
|
||||
[log_handler_info(Conf, stopped) || Conf <- list_stopped_handler_config()].
|
||||
|
||||
-spec(get_log_handler(logger:handler_id()) -> logger_handler_info()).
|
||||
-spec get_log_handler(logger:handler_id()) -> logger_handler_info().
|
||||
get_log_handler(HandlerId) ->
|
||||
case logger:get_handler_config(HandlerId) of
|
||||
{ok, Conf} ->
|
||||
|
|
@ -191,13 +195,15 @@ get_log_handler(HandlerId) ->
|
|||
end
|
||||
end.
|
||||
|
||||
-spec(start_log_handler(logger:handler_id()) -> ok | {error, term()}).
|
||||
-spec start_log_handler(logger:handler_id()) -> ok | {error, term()}.
|
||||
start_log_handler(HandlerId) ->
|
||||
case lists:member(HandlerId, logger:get_handler_ids()) of
|
||||
true -> ok;
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
case read_stopped_handler_config(HandlerId) of
|
||||
error -> {error, {not_found, HandlerId}};
|
||||
error ->
|
||||
{error, {not_found, HandlerId}};
|
||||
{ok, Conf = #{module := Mod}} ->
|
||||
case logger:add_handler(HandlerId, Mod, Conf) of
|
||||
ok -> remove_stopped_handler_config(HandlerId);
|
||||
|
|
@ -206,7 +212,7 @@ start_log_handler(HandlerId) ->
|
|||
end
|
||||
end.
|
||||
|
||||
-spec(stop_log_handler(logger:handler_id()) -> ok | {error, term()}).
|
||||
-spec stop_log_handler(logger:handler_id()) -> ok | {error, term()}.
|
||||
stop_log_handler(HandlerId) ->
|
||||
case logger:get_handler_config(HandlerId) of
|
||||
{ok, Conf} ->
|
||||
|
|
@ -218,20 +224,20 @@ stop_log_handler(HandlerId) ->
|
|||
{error, {not_started, HandlerId}}
|
||||
end.
|
||||
|
||||
-spec(set_log_handler_level(logger:handler_id(), logger:level()) -> ok | {error, term()}).
|
||||
-spec set_log_handler_level(logger:handler_id(), logger:level()) -> ok | {error, term()}.
|
||||
set_log_handler_level(HandlerId, Level) ->
|
||||
case logger:set_handler_config(HandlerId, level, Level) of
|
||||
ok -> ok;
|
||||
ok ->
|
||||
ok;
|
||||
{error, _} ->
|
||||
case read_stopped_handler_config(HandlerId) of
|
||||
error -> {error, {not_found, HandlerId}};
|
||||
{ok, Conf} ->
|
||||
save_stopped_handler_config(HandlerId, Conf#{level => Level})
|
||||
{ok, Conf} -> save_stopped_handler_config(HandlerId, Conf#{level => Level})
|
||||
end
|
||||
end.
|
||||
|
||||
%% @doc Set both the primary and all handlers level in one command
|
||||
-spec(set_log_level(logger:handler_id()) -> ok | {error, term()}).
|
||||
-spec set_log_level(logger:handler_id()) -> ok | {error, term()}.
|
||||
set_log_level(Level) ->
|
||||
case set_primary_log_level(Level) of
|
||||
ok -> set_all_log_handlers_level(Level);
|
||||
|
|
@ -242,18 +248,47 @@ set_log_level(Level) ->
|
|||
%% Internal Functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
log_handler_info(#{id := Id, level := Level, module := logger_std_h,
|
||||
filters := Filters, config := #{type := Type}}, Status) when
|
||||
Type =:= standard_io;
|
||||
Type =:= standard_error ->
|
||||
log_handler_info(
|
||||
#{
|
||||
id := Id,
|
||||
level := Level,
|
||||
module := logger_std_h,
|
||||
filters := Filters,
|
||||
config := #{type := Type}
|
||||
},
|
||||
Status
|
||||
) when
|
||||
Type =:= standard_io;
|
||||
Type =:= standard_error
|
||||
->
|
||||
#{id => Id, level => Level, dst => console, status => Status, filters => Filters};
|
||||
log_handler_info(#{id := Id, level := Level, module := logger_std_h,
|
||||
filters := Filters, config := Config = #{type := file}}, Status) ->
|
||||
#{id => Id, level => Level, status => Status, filters => Filters,
|
||||
dst => maps:get(file, Config, atom_to_list(Id))};
|
||||
|
||||
log_handler_info(#{id := Id, level := Level, module := logger_disk_log_h,
|
||||
filters := Filters, config := #{file := Filename}}, Status) ->
|
||||
log_handler_info(
|
||||
#{
|
||||
id := Id,
|
||||
level := Level,
|
||||
module := logger_std_h,
|
||||
filters := Filters,
|
||||
config := Config = #{type := file}
|
||||
},
|
||||
Status
|
||||
) ->
|
||||
#{
|
||||
id => Id,
|
||||
level => Level,
|
||||
status => Status,
|
||||
filters => Filters,
|
||||
dst => maps:get(file, Config, atom_to_list(Id))
|
||||
};
|
||||
log_handler_info(
|
||||
#{
|
||||
id := Id,
|
||||
level := Level,
|
||||
module := logger_disk_log_h,
|
||||
filters := Filters,
|
||||
config := #{file := Filename}
|
||||
},
|
||||
Status
|
||||
) ->
|
||||
#{id => Id, level => Level, dst => Filename, status => Status, filters => Filters};
|
||||
log_handler_info(#{id := Id, level := Level, filters := Filters}, Status) ->
|
||||
#{id => Id, level => Level, dst => unknown, status => Status, filters => Filters}.
|
||||
|
|
@ -264,7 +299,8 @@ set_all_log_handlers_level(Level) ->
|
|||
|
||||
set_all_log_handlers_level([#{id := ID, level := Level} | List], NewLevel, ChangeHistory) ->
|
||||
case set_log_handler_level(ID, NewLevel) of
|
||||
ok -> set_all_log_handlers_level(List, NewLevel, [{ID, Level} | ChangeHistory]);
|
||||
ok ->
|
||||
set_all_log_handlers_level(List, NewLevel, [{ID, Level} | ChangeHistory]);
|
||||
{error, Error} ->
|
||||
rollback(ChangeHistory),
|
||||
{error, {handlers_logger_level, {ID, Error}}}
|
||||
|
|
@ -275,7 +311,8 @@ set_all_log_handlers_level([], _NewLevel, _NewHanlder) ->
|
|||
rollback([{ID, Level} | List]) ->
|
||||
_ = set_log_handler_level(ID, Level),
|
||||
rollback(List);
|
||||
rollback([]) -> ok.
|
||||
rollback([]) ->
|
||||
ok.
|
||||
|
||||
save_stopped_handler_config(HandlerId, Config) ->
|
||||
case persistent_term:get(?STOPPED_HANDLERS, undefined) of
|
||||
|
|
@ -291,12 +328,12 @@ read_stopped_handler_config(HandlerId) ->
|
|||
end.
|
||||
remove_stopped_handler_config(HandlerId) ->
|
||||
case persistent_term:get(?STOPPED_HANDLERS, undefined) of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
ConfList ->
|
||||
case maps:find(HandlerId, ConfList) of
|
||||
error -> ok;
|
||||
{ok, _} ->
|
||||
persistent_term:put(?STOPPED_HANDLERS, maps:remove(HandlerId, ConfList))
|
||||
{ok, _} -> persistent_term:put(?STOPPED_HANDLERS, maps:remove(HandlerId, ConfList))
|
||||
end
|
||||
end.
|
||||
list_stopped_handler_config() ->
|
||||
|
|
|
|||
|
|
@ -43,14 +43,16 @@
|
|||
|
||||
-export_type([config/0]).
|
||||
|
||||
-elvis([{elvis_style, no_nested_try_catch, #{ ignore => [emqx_logger_jsonfmt]}}]).
|
||||
-elvis([{elvis_style, no_nested_try_catch, #{ignore => [emqx_logger_jsonfmt]}}]).
|
||||
|
||||
%% this is what used when calling logger:log(Level, Report, Meta).
|
||||
-define(DEFAULT_FORMATTER, fun logger:format_otp_report/1).
|
||||
|
||||
-type config() :: #{depth => pos_integer() | unlimited,
|
||||
report_cb => logger:report_cb(),
|
||||
single_line => boolean()}.
|
||||
-type config() :: #{
|
||||
depth => pos_integer() | unlimited,
|
||||
report_cb => logger:report_cb(),
|
||||
single_line => boolean()
|
||||
}.
|
||||
|
||||
-define(IS_STRING(String), (is_list(String) orelse is_binary(String))).
|
||||
|
||||
|
|
@ -67,7 +69,7 @@ best_effort_json(Input) ->
|
|||
-spec format(logger:log_event(), config()) -> iodata().
|
||||
format(#{level := Level, msg := Msg, meta := Meta}, Config0) when is_map(Config0) ->
|
||||
Config = add_default_config(Config0),
|
||||
[format(Msg, Meta#{level => Level}, Config) , "\n"].
|
||||
[format(Msg, Meta#{level => Level}, Config), "\n"].
|
||||
|
||||
format(Msg, Meta, Config) ->
|
||||
Data0 =
|
||||
|
|
@ -78,12 +80,13 @@ format(Msg, Meta, Config) ->
|
|||
Meta#{msg => Bin}
|
||||
catch
|
||||
C:R:S ->
|
||||
Meta#{ msg => "emqx_logger_jsonfmt_format_error"
|
||||
, fmt_raw_input => Msg
|
||||
, fmt_error => C
|
||||
, fmt_reason => R
|
||||
, fmt_stacktrace => S
|
||||
}
|
||||
Meta#{
|
||||
msg => "emqx_logger_jsonfmt_format_error",
|
||||
fmt_raw_input => Msg,
|
||||
fmt_error => C,
|
||||
fmt_reason => R,
|
||||
fmt_stacktrace => S
|
||||
}
|
||||
end,
|
||||
Data = maps:without([report_cb], Data0),
|
||||
jiffy:encode(json_obj(Data, Config)).
|
||||
|
|
@ -102,8 +105,9 @@ maybe_format_msg(Msg, Meta, Config) ->
|
|||
format_msg({string, Chardata}, Meta, Config) ->
|
||||
%% already formatted
|
||||
format_msg({"~ts", [Chardata]}, Meta, Config);
|
||||
format_msg({report, _} = Msg, Meta, #{report_cb := Fun} = Config)
|
||||
when is_function(Fun,1); is_function(Fun,2) ->
|
||||
format_msg({report, _} = Msg, Meta, #{report_cb := Fun} = Config) when
|
||||
is_function(Fun, 1); is_function(Fun, 2)
|
||||
->
|
||||
%% a format callback function in config, no idea when this happens, but leaving it
|
||||
format_msg(Msg, Meta#{report_cb => Fun}, maps:remove(report_cb, Config));
|
||||
format_msg({report, Report}, #{report_cb := Fun} = Meta, Config) when is_function(Fun, 1) ->
|
||||
|
|
@ -112,10 +116,11 @@ format_msg({report, Report}, #{report_cb := Fun} = Meta, Config) when is_functio
|
|||
{Format, Args} when is_list(Format), is_list(Args) ->
|
||||
format_msg({Format, Args}, maps:remove(report_cb, Meta), Config);
|
||||
Other ->
|
||||
#{ msg => "report_cb_bad_return"
|
||||
, report_cb_fun => Fun
|
||||
, report_cb_return => Other
|
||||
}
|
||||
#{
|
||||
msg => "report_cb_bad_return",
|
||||
report_cb_fun => Fun,
|
||||
report_cb_return => Other
|
||||
}
|
||||
end;
|
||||
format_msg({report, Report}, #{report_cb := Fun}, Config) when is_function(Fun, 2) ->
|
||||
%% a format callback function of arity 2
|
||||
|
|
@ -125,30 +130,34 @@ format_msg({report, Report}, #{report_cb := Fun}, Config) when is_function(Fun,
|
|||
unicode:characters_to_binary(Chardata, utf8)
|
||||
catch
|
||||
_:_ ->
|
||||
#{ msg => "report_cb_bad_return"
|
||||
, report_cb_fun => Fun
|
||||
, report_cb_return => Chardata
|
||||
}
|
||||
#{
|
||||
msg => "report_cb_bad_return",
|
||||
report_cb_fun => Fun,
|
||||
report_cb_return => Chardata
|
||||
}
|
||||
end;
|
||||
Other ->
|
||||
#{ msg => "report_cb_bad_return"
|
||||
, report_cb_fun => Fun
|
||||
, report_cb_return => Other
|
||||
}
|
||||
#{
|
||||
msg => "report_cb_bad_return",
|
||||
report_cb_fun => Fun,
|
||||
report_cb_return => Other
|
||||
}
|
||||
end;
|
||||
format_msg({Fmt, Args}, _Meta, Config) ->
|
||||
do_format_msg(Fmt, Args, Config).
|
||||
|
||||
do_format_msg(Format0, Args, #{depth := Depth,
|
||||
single_line := SingleLine
|
||||
}) ->
|
||||
do_format_msg(Format0, Args, #{
|
||||
depth := Depth,
|
||||
single_line := SingleLine
|
||||
}) ->
|
||||
Format1 = io_lib:scan_format(Format0, Args),
|
||||
Format = reformat(Format1, Depth, SingleLine),
|
||||
Text0 = io_lib:build_text(Format, []),
|
||||
Text = case SingleLine of
|
||||
true -> re:replace(Text0, ",?\r?\n\s*",", ", [{return, list}, global, unicode]);
|
||||
false -> Text0
|
||||
end,
|
||||
Text =
|
||||
case SingleLine of
|
||||
true -> re:replace(Text0, ",?\r?\n\s*", ", ", [{return, list}, global, unicode]);
|
||||
false -> Text0
|
||||
end,
|
||||
trim(unicode:characters_to_binary(Text, utf8)).
|
||||
|
||||
%% Get rid of the leading spaces.
|
||||
|
|
@ -162,16 +171,18 @@ reformat([#{control_char := C} = M | T], Depth, true) when C =:= $p ->
|
|||
[limit_depth(M#{width => 0}, Depth) | reformat(T, Depth, true)];
|
||||
reformat([#{control_char := C} = M | T], Depth, true) when C =:= $P ->
|
||||
[M#{width => 0} | reformat(T, Depth, true)];
|
||||
reformat([#{control_char := C}=M | T], Depth, Single) when C =:= $p; C =:= $w ->
|
||||
reformat([#{control_char := C} = M | T], Depth, Single) when C =:= $p; C =:= $w ->
|
||||
[limit_depth(M, Depth) | reformat(T, Depth, Single)];
|
||||
reformat([H | T], Depth, Single) ->
|
||||
[H | reformat(T, Depth, Single)];
|
||||
reformat([], _, _) ->
|
||||
[].
|
||||
|
||||
limit_depth(M0, unlimited) -> M0;
|
||||
limit_depth(#{control_char:=C0, args:=Args}=M0, Depth) ->
|
||||
C = C0 - ($a - $A), %To uppercase.
|
||||
limit_depth(M0, unlimited) ->
|
||||
M0;
|
||||
limit_depth(#{control_char := C0, args := Args} = M0, Depth) ->
|
||||
%To uppercase.
|
||||
C = C0 - ($a - $A),
|
||||
M0#{control_char := C, args := Args ++ [Depth]}.
|
||||
|
||||
add_default_config(Config0) ->
|
||||
|
|
@ -187,7 +198,7 @@ best_effort_unicode(Input, Config) ->
|
|||
B when is_binary(B) -> B;
|
||||
_ -> do_format_msg("~p", [Input], Config)
|
||||
catch
|
||||
_ : _ ->
|
||||
_:_ ->
|
||||
do_format_msg("~p", [Input], Config)
|
||||
end.
|
||||
|
||||
|
|
@ -195,19 +206,21 @@ best_effort_json_obj(List, Config) when is_list(List) ->
|
|||
try
|
||||
json_obj(maps:from_list(List), Config)
|
||||
catch
|
||||
_ : _ ->
|
||||
_:_ ->
|
||||
[json(I, Config) || I <- List]
|
||||
end;
|
||||
best_effort_json_obj(Map, Config) ->
|
||||
try
|
||||
json_obj(Map, Config)
|
||||
catch
|
||||
_ : _ ->
|
||||
_:_ ->
|
||||
do_format_msg("~p", [Map], Config)
|
||||
end.
|
||||
|
||||
json([], _) -> "[]";
|
||||
json(<<"">>, _) -> "\"\"";
|
||||
json([], _) ->
|
||||
"[]";
|
||||
json(<<"">>, _) ->
|
||||
"\"\"";
|
||||
json(A, _) when is_atom(A) -> atom_to_binary(A, utf8);
|
||||
json(I, _) when is_integer(I) -> I;
|
||||
json(F, _) when is_float(F) -> F;
|
||||
|
|
@ -216,7 +229,7 @@ json(P, C) when is_port(P) -> json(port_to_list(P), C);
|
|||
json(F, C) when is_function(F) -> json(erlang:fun_to_list(F), C);
|
||||
json(B, Config) when is_binary(B) ->
|
||||
best_effort_unicode(B, Config);
|
||||
json(L, Config) when is_list(L), is_integer(hd(L))->
|
||||
json(L, Config) when is_list(L), is_integer(hd(L)) ->
|
||||
best_effort_unicode(L, Config);
|
||||
json(M, Config) when is_list(M), is_tuple(hd(M)), tuple_size(hd(M)) =:= 2 ->
|
||||
best_effort_json_obj(M, Config);
|
||||
|
|
@ -228,15 +241,28 @@ json(Term, Config) ->
|
|||
do_format_msg("~p", [Term], Config).
|
||||
|
||||
json_obj(Data, Config) ->
|
||||
maps:fold(fun (K, V, D) ->
|
||||
json_kv(K, V, D, Config)
|
||||
end, maps:new(), Data).
|
||||
maps:fold(
|
||||
fun(K, V, D) ->
|
||||
json_kv(K, V, D, Config)
|
||||
end,
|
||||
maps:new(),
|
||||
Data
|
||||
).
|
||||
|
||||
json_kv(mfa, {M, F, A}, Data, _Config) ->
|
||||
maps:put(mfa, <<(atom_to_binary(M, utf8))/binary, $:,
|
||||
(atom_to_binary(F, utf8))/binary, $/,
|
||||
(integer_to_binary(A))/binary>>, Data);
|
||||
json_kv('$kind', Kind, Data, Config) -> %% snabbkaffe
|
||||
maps:put(
|
||||
mfa,
|
||||
<<
|
||||
(atom_to_binary(M, utf8))/binary,
|
||||
$:,
|
||||
(atom_to_binary(F, utf8))/binary,
|
||||
$/,
|
||||
(integer_to_binary(A))/binary
|
||||
>>,
|
||||
Data
|
||||
);
|
||||
%% snabbkaffe
|
||||
json_kv('$kind', Kind, Data, Config) ->
|
||||
maps:put(msg, json(Kind, Config), Data);
|
||||
json_kv(gl, _, Data, _Config) ->
|
||||
%% drop gl because it's not interesting
|
||||
|
|
@ -267,44 +293,52 @@ json_key(Term) ->
|
|||
|
||||
no_crash_test_() ->
|
||||
Opts = [{numtests, 1000}, {to_file, user}],
|
||||
{timeout, 30,
|
||||
fun() -> ?assert(proper:quickcheck(t_no_crash(), Opts)) end}.
|
||||
{timeout, 30, fun() -> ?assert(proper:quickcheck(t_no_crash(), Opts)) end}.
|
||||
|
||||
t_no_crash() ->
|
||||
?FORALL({Level, Report, Meta, Config},
|
||||
{p_level(), p_report(), p_meta(), p_config()},
|
||||
t_no_crash_run(Level, Report, Meta, Config)).
|
||||
?FORALL(
|
||||
{Level, Report, Meta, Config},
|
||||
{p_level(), p_report(), p_meta(), p_config()},
|
||||
t_no_crash_run(Level, Report, Meta, Config)
|
||||
).
|
||||
|
||||
t_no_crash_run(Level, Report, {undefined, Meta}, Config) ->
|
||||
t_no_crash_run(Level, Report, maps:from_list(Meta), Config);
|
||||
t_no_crash_run(Level, Report, {ReportCb, Meta}, Config) ->
|
||||
t_no_crash_run(Level, Report, maps:from_list([{report_cb, ReportCb} | Meta]), Config);
|
||||
t_no_crash_run(Level, Report, Meta, Config) ->
|
||||
Input = #{ level => Level
|
||||
, msg => {report, Report}
|
||||
, meta => filter(Meta)
|
||||
},
|
||||
Input = #{
|
||||
level => Level,
|
||||
msg => {report, Report},
|
||||
meta => filter(Meta)
|
||||
},
|
||||
_ = format(Input, maps:from_list(Config)),
|
||||
true.
|
||||
|
||||
%% assume top level Report and Meta are sane
|
||||
filter(Map) ->
|
||||
Keys = lists:filter(
|
||||
fun(K) ->
|
||||
try json_key(K), true
|
||||
catch throw : {badkey, _} -> false
|
||||
end
|
||||
end, maps:keys(Map)),
|
||||
fun(K) ->
|
||||
try
|
||||
json_key(K),
|
||||
true
|
||||
catch
|
||||
throw:{badkey, _} -> false
|
||||
end
|
||||
end,
|
||||
maps:keys(Map)
|
||||
),
|
||||
maps:with(Keys, Map).
|
||||
|
||||
p_report_cb() ->
|
||||
proper_types:oneof([ fun ?MODULE:report_cb_1/1
|
||||
, fun ?MODULE:report_cb_2/2
|
||||
, fun ?MODULE:report_cb_crash/2
|
||||
, fun logger:format_otp_report/1
|
||||
, fun logger:format_report/1
|
||||
, format_report_undefined
|
||||
]).
|
||||
proper_types:oneof([
|
||||
fun ?MODULE:report_cb_1/1,
|
||||
fun ?MODULE:report_cb_2/2,
|
||||
fun ?MODULE:report_cb_crash/2,
|
||||
fun logger:format_otp_report/1,
|
||||
fun logger:format_report/1,
|
||||
format_report_undefined
|
||||
]).
|
||||
|
||||
report_cb_1(Input) -> {"~p", [Input]}.
|
||||
|
||||
|
|
@ -314,9 +348,12 @@ report_cb_crash(_Input, _Config) -> error(report_cb_crash).
|
|||
|
||||
p_kvlist() ->
|
||||
proper_types:list({
|
||||
proper_types:oneof([proper_types:atom(),
|
||||
proper_types:binary()
|
||||
]), proper_types:term()}).
|
||||
proper_types:oneof([
|
||||
proper_types:atom(),
|
||||
proper_types:binary()
|
||||
]),
|
||||
proper_types:term()
|
||||
}).
|
||||
|
||||
%% meta type is 2-tuple, report_cb type, and some random key value pairs
|
||||
p_meta() ->
|
||||
|
|
@ -330,8 +367,10 @@ p_level() -> proper_types:oneof([info, debug, error, warning, foobar]).
|
|||
|
||||
p_config() ->
|
||||
proper_types:shrink_list(
|
||||
[ {depth, p_limit()}
|
||||
, {single_line, proper_types:boolean()}
|
||||
]).
|
||||
[
|
||||
{depth, p_limit()},
|
||||
{single_line, proper_types:boolean()}
|
||||
]
|
||||
).
|
||||
|
||||
-endif.
|
||||
|
|
|
|||
|
|
@ -44,8 +44,9 @@ try_format_unicode(Char) ->
|
|||
{incomplete, _, _} -> error;
|
||||
Binary -> Binary
|
||||
end
|
||||
catch _:_ ->
|
||||
error
|
||||
catch
|
||||
_:_ ->
|
||||
error
|
||||
end,
|
||||
case List of
|
||||
error -> io_lib:format("~0p", [Char]);
|
||||
|
|
@ -54,15 +55,18 @@ try_format_unicode(Char) ->
|
|||
|
||||
enrich_report_mfa(Report, #{mfa := Mfa, line := Line}) ->
|
||||
Report#{mfa => mfa(Mfa), line => Line};
|
||||
enrich_report_mfa(Report, _) -> Report.
|
||||
enrich_report_mfa(Report, _) ->
|
||||
Report.
|
||||
|
||||
enrich_report_clientid(Report, #{clientid := ClientId}) ->
|
||||
Report#{clientid => try_format_unicode(ClientId)};
|
||||
enrich_report_clientid(Report, _) -> Report.
|
||||
enrich_report_clientid(Report, _) ->
|
||||
Report.
|
||||
|
||||
enrich_report_peername(Report, #{peername := Peername}) ->
|
||||
Report#{peername => Peername};
|
||||
enrich_report_peername(Report, _) -> Report.
|
||||
enrich_report_peername(Report, _) ->
|
||||
Report.
|
||||
|
||||
%% clientid and peername always in emqx_conn's process metadata.
|
||||
%% topic can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
|
||||
|
|
@ -70,7 +74,8 @@ enrich_report_topic(Report, #{topic := Topic}) ->
|
|||
Report#{topic => try_format_unicode(Topic)};
|
||||
enrich_report_topic(Report = #{topic := Topic}, _) ->
|
||||
Report#{topic => try_format_unicode(Topic)};
|
||||
enrich_report_topic(Report, _) -> Report.
|
||||
enrich_report_topic(Report, _) ->
|
||||
Report.
|
||||
|
||||
enrich_mfa({Fmt, Args}, #{mfa := Mfa, line := Line}) when is_list(Fmt) ->
|
||||
{Fmt ++ " mfa: ~ts line: ~w", Args ++ [mfa(Mfa), Line]};
|
||||
|
|
@ -78,7 +83,7 @@ enrich_mfa(Msg, _) ->
|
|||
Msg.
|
||||
|
||||
enrich_client_info({Fmt, Args}, #{clientid := ClientId, peername := Peer}) when is_list(Fmt) ->
|
||||
{" ~ts@~ts " ++ Fmt, [ClientId, Peer | Args] };
|
||||
{" ~ts@~ts " ++ Fmt, [ClientId, Peer | Args]};
|
||||
enrich_client_info({Fmt, Args}, #{clientid := ClientId}) when is_list(Fmt) ->
|
||||
{" ~ts " ++ Fmt, [ClientId | Args]};
|
||||
enrich_client_info({Fmt, Args}, #{peername := Peer}) when is_list(Fmt) ->
|
||||
|
|
|
|||
|
|
@ -15,26 +15,27 @@
|
|||
%%--------------------------------------------------------------------
|
||||
-module(emqx_map_lib).
|
||||
|
||||
-export([ deep_get/2
|
||||
, deep_get/3
|
||||
, deep_find/2
|
||||
, deep_put/3
|
||||
, deep_remove/2
|
||||
, deep_merge/2
|
||||
, safe_atom_key_map/1
|
||||
, unsafe_atom_key_map/1
|
||||
, jsonable_map/1
|
||||
, jsonable_map/2
|
||||
, binary_string/1
|
||||
, deep_convert/3
|
||||
, diff_maps/2
|
||||
, merge_with/3
|
||||
]).
|
||||
-export([
|
||||
deep_get/2,
|
||||
deep_get/3,
|
||||
deep_find/2,
|
||||
deep_put/3,
|
||||
deep_remove/2,
|
||||
deep_merge/2,
|
||||
safe_atom_key_map/1,
|
||||
unsafe_atom_key_map/1,
|
||||
jsonable_map/1,
|
||||
jsonable_map/2,
|
||||
binary_string/1,
|
||||
deep_convert/3,
|
||||
diff_maps/2,
|
||||
merge_with/3
|
||||
]).
|
||||
|
||||
-export_type([config_key/0, config_key_path/0]).
|
||||
-type config_key() :: atom() | binary() | [byte()].
|
||||
-type config_key_path() :: [config_key()].
|
||||
-type convert_fun() :: fun((...) -> {K1::any(), V1::any()} | drop).
|
||||
-type convert_fun() :: fun((...) -> {K1 :: any(), V1 :: any()} | drop).
|
||||
|
||||
%%-----------------------------------------------------------------
|
||||
-spec deep_get(config_key_path(), map()) -> term().
|
||||
|
|
@ -81,8 +82,10 @@ deep_remove([Key | KeyPath], Map) ->
|
|||
case maps:find(Key, Map) of
|
||||
{ok, SubMap} when is_map(SubMap) ->
|
||||
Map#{Key => deep_remove(KeyPath, SubMap)};
|
||||
{ok, _Val} -> Map;
|
||||
error -> Map
|
||||
{ok, _Val} ->
|
||||
Map;
|
||||
error ->
|
||||
Map
|
||||
end.
|
||||
|
||||
%% #{a => #{b => 3, c => 2}, d => 4}
|
||||
|
|
@ -90,7 +93,8 @@ deep_remove([Key | KeyPath], Map) ->
|
|||
-spec deep_merge(map(), map()) -> map().
|
||||
deep_merge(BaseMap, NewMap) ->
|
||||
NewKeys = maps:keys(NewMap) -- maps:keys(BaseMap),
|
||||
MergedBase = maps:fold(fun(K, V, Acc) ->
|
||||
MergedBase = maps:fold(
|
||||
fun(K, V, Acc) ->
|
||||
case maps:find(K, NewMap) of
|
||||
error ->
|
||||
Acc#{K => V};
|
||||
|
|
@ -99,20 +103,28 @@ deep_merge(BaseMap, NewMap) ->
|
|||
{ok, NewV} ->
|
||||
Acc#{K => NewV}
|
||||
end
|
||||
end, #{}, BaseMap),
|
||||
end,
|
||||
#{},
|
||||
BaseMap
|
||||
),
|
||||
maps:merge(MergedBase, maps:with(NewKeys, NewMap)).
|
||||
|
||||
-spec deep_convert(map(), convert_fun(), Args::list()) -> map().
|
||||
-spec deep_convert(map(), convert_fun(), Args :: list()) -> map().
|
||||
deep_convert(Map, ConvFun, Args) when is_map(Map) ->
|
||||
maps:fold(fun(K, V, Acc) ->
|
||||
maps:fold(
|
||||
fun(K, V, Acc) ->
|
||||
case apply(ConvFun, [K, deep_convert(V, ConvFun, Args) | Args]) of
|
||||
drop -> Acc;
|
||||
{K1, V1} -> Acc#{K1 => V1}
|
||||
end
|
||||
end, #{}, Map);
|
||||
end,
|
||||
#{},
|
||||
Map
|
||||
);
|
||||
deep_convert(ListV, ConvFun, Args) when is_list(ListV) ->
|
||||
[deep_convert(V, ConvFun, Args) || V <- ListV];
|
||||
deep_convert(Val, _, _Args) -> Val.
|
||||
deep_convert(Val, _, _Args) ->
|
||||
Val.
|
||||
|
||||
-spec unsafe_atom_key_map(#{binary() | atom() => any()}) -> #{atom() => any()}.
|
||||
unsafe_atom_key_map(Map) ->
|
||||
|
|
@ -130,33 +142,41 @@ jsonable_map(Map, JsonableFun) ->
|
|||
deep_convert(Map, fun binary_string_kv/3, [JsonableFun]).
|
||||
|
||||
-spec diff_maps(map(), map()) ->
|
||||
#{added := map(), identical := map(), removed := map(),
|
||||
changed := #{any() => {OldValue::any(), NewValue::any()}}}.
|
||||
#{
|
||||
added := map(),
|
||||
identical := map(),
|
||||
removed := map(),
|
||||
changed := #{any() => {OldValue :: any(), NewValue :: any()}}
|
||||
}.
|
||||
diff_maps(NewMap, OldMap) ->
|
||||
InitR = #{identical => #{}, changed => #{}, removed => #{}},
|
||||
{Result, RemInNew} =
|
||||
lists:foldl(fun({OldK, OldV}, {Result0 = #{identical := I, changed := U, removed := D},
|
||||
RemNewMap}) ->
|
||||
Result1 = case maps:find(OldK, NewMap) of
|
||||
error ->
|
||||
Result0#{removed => D#{OldK => OldV}};
|
||||
{ok, NewV} when NewV == OldV ->
|
||||
Result0#{identical => I#{OldK => OldV}};
|
||||
{ok, NewV} ->
|
||||
Result0#{changed => U#{OldK => {OldV, NewV}}}
|
||||
lists:foldl(
|
||||
fun({OldK, OldV}, {Result0 = #{identical := I, changed := U, removed := D}, RemNewMap}) ->
|
||||
Result1 =
|
||||
case maps:find(OldK, NewMap) of
|
||||
error ->
|
||||
Result0#{removed => D#{OldK => OldV}};
|
||||
{ok, NewV} when NewV == OldV ->
|
||||
Result0#{identical => I#{OldK => OldV}};
|
||||
{ok, NewV} ->
|
||||
Result0#{changed => U#{OldK => {OldV, NewV}}}
|
||||
end,
|
||||
{Result1, maps:remove(OldK, RemNewMap)}
|
||||
end,
|
||||
{Result1, maps:remove(OldK, RemNewMap)}
|
||||
end, {InitR, NewMap}, maps:to_list(OldMap)),
|
||||
{InitR, NewMap},
|
||||
maps:to_list(OldMap)
|
||||
),
|
||||
Result#{added => RemInNew}.
|
||||
|
||||
|
||||
binary_string_kv(K, V, JsonableFun) ->
|
||||
case JsonableFun(K, V) of
|
||||
drop -> drop;
|
||||
{K1, V1} -> {binary_string(K1), V1}
|
||||
end.
|
||||
|
||||
binary_string([]) -> [];
|
||||
binary_string([]) ->
|
||||
[];
|
||||
binary_string(Val) when is_list(Val) ->
|
||||
case io_lib:printable_unicode_list(Val) of
|
||||
true -> unicode:characters_to_binary(Val);
|
||||
|
|
@ -167,40 +187,52 @@ binary_string(Val) ->
|
|||
|
||||
%%---------------------------------------------------------------------------
|
||||
covert_keys_to_atom(BinKeyMap, Conv) ->
|
||||
deep_convert(BinKeyMap, fun
|
||||
deep_convert(
|
||||
BinKeyMap,
|
||||
fun
|
||||
(K, V) when is_atom(K) -> {K, V};
|
||||
(K, V) when is_binary(K) -> {Conv(K), V}
|
||||
end, []).
|
||||
end,
|
||||
[]
|
||||
).
|
||||
|
||||
%% copy from maps.erl OTP24.0
|
||||
-compile({inline, [error_with_info/2]}).
|
||||
merge_with(Combiner, Map1, Map2) when is_map(Map1),
|
||||
is_map(Map2),
|
||||
is_function(Combiner, 3) ->
|
||||
merge_with(Combiner, Map1, Map2) when
|
||||
is_map(Map1),
|
||||
is_map(Map2),
|
||||
is_function(Combiner, 3)
|
||||
->
|
||||
case map_size(Map1) > map_size(Map2) of
|
||||
true ->
|
||||
Iterator = maps:iterator(Map2),
|
||||
merge_with_t(maps:next(Iterator),
|
||||
Map1,
|
||||
Map2,
|
||||
Combiner);
|
||||
merge_with_t(
|
||||
maps:next(Iterator),
|
||||
Map1,
|
||||
Map2,
|
||||
Combiner
|
||||
);
|
||||
false ->
|
||||
Iterator = maps:iterator(Map1),
|
||||
merge_with_t(maps:next(Iterator),
|
||||
Map2,
|
||||
Map1,
|
||||
fun(K, V1, V2) -> Combiner(K, V2, V1) end)
|
||||
merge_with_t(
|
||||
maps:next(Iterator),
|
||||
Map2,
|
||||
Map1,
|
||||
fun(K, V1, V2) -> Combiner(K, V2, V1) end
|
||||
)
|
||||
end;
|
||||
merge_with(Combiner, Map1, Map2) ->
|
||||
error_with_info(error_type_merge_intersect(Map1, Map2, Combiner),
|
||||
[Combiner, Map1, Map2]).
|
||||
error_with_info(
|
||||
error_type_merge_intersect(Map1, Map2, Combiner),
|
||||
[Combiner, Map1, Map2]
|
||||
).
|
||||
|
||||
merge_with_t({K, V2, Iterator}, Map1, Map2, Combiner) ->
|
||||
case Map1 of
|
||||
#{ K := V1 } ->
|
||||
NewMap1 = Map1#{ K := Combiner(K, V1, V2) },
|
||||
#{K := V1} ->
|
||||
NewMap1 = Map1#{K := Combiner(K, V1, V2)},
|
||||
merge_with_t(maps:next(Iterator), NewMap1, Map2, Combiner);
|
||||
#{ } ->
|
||||
#{} ->
|
||||
merge_with_t(maps:next(Iterator), maps:put(K, V2, Map1), Map2, Combiner)
|
||||
end;
|
||||
merge_with_t(none, Result, _, _) ->
|
||||
|
|
|
|||
|
|
@ -23,309 +23,361 @@
|
|||
-include("types.hrl").
|
||||
|
||||
%% Create
|
||||
-export([ make/2
|
||||
, make/3
|
||||
, make/4
|
||||
, make/6
|
||||
, make/7
|
||||
]).
|
||||
-export([
|
||||
make/2,
|
||||
make/3,
|
||||
make/4,
|
||||
make/6,
|
||||
make/7
|
||||
]).
|
||||
|
||||
%% Fields
|
||||
-export([ id/1
|
||||
, qos/1
|
||||
, from/1
|
||||
, topic/1
|
||||
, payload/1
|
||||
, timestamp/1
|
||||
]).
|
||||
-export([
|
||||
id/1,
|
||||
qos/1,
|
||||
from/1,
|
||||
topic/1,
|
||||
payload/1,
|
||||
timestamp/1
|
||||
]).
|
||||
|
||||
%% Flags
|
||||
-export([ is_sys/1
|
||||
, clean_dup/1
|
||||
, get_flag/2
|
||||
, get_flag/3
|
||||
, get_flags/1
|
||||
, set_flag/2
|
||||
, set_flag/3
|
||||
, set_flags/2
|
||||
, unset_flag/2
|
||||
]).
|
||||
-export([
|
||||
is_sys/1,
|
||||
clean_dup/1,
|
||||
get_flag/2,
|
||||
get_flag/3,
|
||||
get_flags/1,
|
||||
set_flag/2,
|
||||
set_flag/3,
|
||||
set_flags/2,
|
||||
unset_flag/2
|
||||
]).
|
||||
|
||||
%% Headers
|
||||
-export([ get_headers/1
|
||||
, get_header/2
|
||||
, get_header/3
|
||||
, set_header/3
|
||||
, set_headers/2
|
||||
, remove_header/2
|
||||
]).
|
||||
-export([
|
||||
get_headers/1,
|
||||
get_header/2,
|
||||
get_header/3,
|
||||
set_header/3,
|
||||
set_headers/2,
|
||||
remove_header/2
|
||||
]).
|
||||
|
||||
-export([ is_expired/1
|
||||
, update_expiry/1
|
||||
]).
|
||||
-export([
|
||||
is_expired/1,
|
||||
update_expiry/1
|
||||
]).
|
||||
|
||||
-export([ to_packet/2
|
||||
, to_map/1
|
||||
, to_log_map/1
|
||||
, to_list/1
|
||||
, from_map/1
|
||||
]).
|
||||
-export([
|
||||
to_packet/2,
|
||||
to_map/1,
|
||||
to_log_map/1,
|
||||
to_list/1,
|
||||
from_map/1
|
||||
]).
|
||||
|
||||
-export_type([message_map/0]).
|
||||
|
||||
-type(message_map() :: #{id := binary(),
|
||||
qos := 0 | 1 | 2,
|
||||
from := atom() | binary(),
|
||||
flags := emqx_types:flags(),
|
||||
headers := emqx_types:headers(),
|
||||
topic := emqx_types:topic(),
|
||||
payload := emqx_types:payload(),
|
||||
timestamp := integer(),
|
||||
extra := _}
|
||||
).
|
||||
-type message_map() :: #{
|
||||
id := binary(),
|
||||
qos := 0 | 1 | 2,
|
||||
from := atom() | binary(),
|
||||
flags := emqx_types:flags(),
|
||||
headers := emqx_types:headers(),
|
||||
topic := emqx_types:topic(),
|
||||
payload := emqx_types:payload(),
|
||||
timestamp := integer(),
|
||||
extra := _
|
||||
}.
|
||||
|
||||
-elvis([{elvis_style, god_modules, disable}]).
|
||||
|
||||
-spec(make(emqx_types:topic(), emqx_types:payload()) -> emqx_types:message()).
|
||||
-spec make(emqx_types:topic(), emqx_types:payload()) -> emqx_types:message().
|
||||
make(Topic, Payload) ->
|
||||
make(undefined, Topic, Payload).
|
||||
|
||||
-spec(make(emqx_types:clientid(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload()) -> emqx_types:message()).
|
||||
-spec make(
|
||||
emqx_types:clientid(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload()
|
||||
) -> emqx_types:message().
|
||||
make(From, Topic, Payload) ->
|
||||
make(From, ?QOS_0, Topic, Payload).
|
||||
|
||||
-spec(make(emqx_types:clientid(),
|
||||
emqx_types:qos(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload()) -> emqx_types:message()).
|
||||
-spec make(
|
||||
emqx_types:clientid(),
|
||||
emqx_types:qos(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload()
|
||||
) -> emqx_types:message().
|
||||
make(From, QoS, Topic, Payload) when ?QOS_0 =< QoS, QoS =< ?QOS_2 ->
|
||||
Now = erlang:system_time(millisecond),
|
||||
#message{id = emqx_guid:gen(),
|
||||
qos = QoS,
|
||||
from = From,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Now
|
||||
}.
|
||||
#message{
|
||||
id = emqx_guid:gen(),
|
||||
qos = QoS,
|
||||
from = From,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Now
|
||||
}.
|
||||
|
||||
-spec(make(emqx_types:clientid(),
|
||||
emqx_types:qos(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload(),
|
||||
emqx_types:flags(),
|
||||
emqx_types:headers()) -> emqx_types:message()).
|
||||
make(From, QoS, Topic, Payload, Flags, Headers)
|
||||
when ?QOS_0 =< QoS, QoS =< ?QOS_2,
|
||||
is_map(Flags), is_map(Headers) ->
|
||||
-spec make(
|
||||
emqx_types:clientid(),
|
||||
emqx_types:qos(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload(),
|
||||
emqx_types:flags(),
|
||||
emqx_types:headers()
|
||||
) -> emqx_types:message().
|
||||
make(From, QoS, Topic, Payload, Flags, Headers) when
|
||||
?QOS_0 =< QoS,
|
||||
QoS =< ?QOS_2,
|
||||
is_map(Flags),
|
||||
is_map(Headers)
|
||||
->
|
||||
Now = erlang:system_time(millisecond),
|
||||
#message{id = emqx_guid:gen(),
|
||||
qos = QoS,
|
||||
from = From,
|
||||
flags = Flags,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Now
|
||||
}.
|
||||
#message{
|
||||
id = emqx_guid:gen(),
|
||||
qos = QoS,
|
||||
from = From,
|
||||
flags = Flags,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Now
|
||||
}.
|
||||
|
||||
-spec(make(MsgId :: binary(),
|
||||
emqx_types:clientid(),
|
||||
emqx_types:qos(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload(),
|
||||
emqx_types:flags(),
|
||||
emqx_types:headers()) -> emqx_types:message()).
|
||||
make(MsgId, From, QoS, Topic, Payload, Flags, Headers)
|
||||
when ?QOS_0 =< QoS, QoS =< ?QOS_2,
|
||||
is_map(Flags), is_map(Headers) ->
|
||||
-spec make(
|
||||
MsgId :: binary(),
|
||||
emqx_types:clientid(),
|
||||
emqx_types:qos(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:payload(),
|
||||
emqx_types:flags(),
|
||||
emqx_types:headers()
|
||||
) -> emqx_types:message().
|
||||
make(MsgId, From, QoS, Topic, Payload, Flags, Headers) when
|
||||
?QOS_0 =< QoS,
|
||||
QoS =< ?QOS_2,
|
||||
is_map(Flags),
|
||||
is_map(Headers)
|
||||
->
|
||||
Now = erlang:system_time(millisecond),
|
||||
#message{id = MsgId,
|
||||
qos = QoS,
|
||||
from = From,
|
||||
flags = Flags,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Now
|
||||
}.
|
||||
#message{
|
||||
id = MsgId,
|
||||
qos = QoS,
|
||||
from = From,
|
||||
flags = Flags,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Now
|
||||
}.
|
||||
|
||||
-spec(id(emqx_types:message()) -> maybe(binary())).
|
||||
-spec id(emqx_types:message()) -> maybe(binary()).
|
||||
id(#message{id = Id}) -> Id.
|
||||
|
||||
-spec(qos(emqx_types:message()) -> emqx_types:qos()).
|
||||
-spec qos(emqx_types:message()) -> emqx_types:qos().
|
||||
qos(#message{qos = QoS}) -> QoS.
|
||||
|
||||
-spec(from(emqx_types:message()) -> atom() | binary()).
|
||||
-spec from(emqx_types:message()) -> atom() | binary().
|
||||
from(#message{from = From}) -> From.
|
||||
|
||||
-spec(topic(emqx_types:message()) -> emqx_types:topic()).
|
||||
-spec topic(emqx_types:message()) -> emqx_types:topic().
|
||||
topic(#message{topic = Topic}) -> Topic.
|
||||
|
||||
-spec(payload(emqx_types:message()) -> emqx_types:payload()).
|
||||
-spec payload(emqx_types:message()) -> emqx_types:payload().
|
||||
payload(#message{payload = Payload}) -> Payload.
|
||||
|
||||
-spec(timestamp(emqx_types:message()) -> integer()).
|
||||
-spec timestamp(emqx_types:message()) -> integer().
|
||||
timestamp(#message{timestamp = TS}) -> TS.
|
||||
|
||||
-spec(is_sys(emqx_types:message()) -> boolean()).
|
||||
-spec is_sys(emqx_types:message()) -> boolean().
|
||||
is_sys(#message{flags = #{sys := true}}) ->
|
||||
true;
|
||||
is_sys(#message{topic = <<"$SYS/", _/binary>>}) ->
|
||||
true;
|
||||
is_sys(_Msg) -> false.
|
||||
is_sys(_Msg) ->
|
||||
false.
|
||||
|
||||
-spec(clean_dup(emqx_types:message()) -> emqx_types:message()).
|
||||
-spec clean_dup(emqx_types:message()) -> emqx_types:message().
|
||||
clean_dup(Msg = #message{flags = Flags = #{dup := true}}) ->
|
||||
Msg#message{flags = Flags#{dup => false}};
|
||||
clean_dup(Msg) -> Msg.
|
||||
clean_dup(Msg) ->
|
||||
Msg.
|
||||
|
||||
-spec(set_flags(map(), emqx_types:message()) -> emqx_types:message()).
|
||||
-spec set_flags(map(), emqx_types:message()) -> emqx_types:message().
|
||||
set_flags(New, Msg = #message{flags = Old}) when is_map(New) ->
|
||||
Msg#message{flags = maps:merge(Old, New)}.
|
||||
|
||||
-spec(get_flag(emqx_types:flag(), emqx_types:message()) -> boolean()).
|
||||
-spec get_flag(emqx_types:flag(), emqx_types:message()) -> boolean().
|
||||
get_flag(Flag, Msg) ->
|
||||
get_flag(Flag, Msg, false).
|
||||
|
||||
get_flag(Flag, #message{flags = Flags}, Default) ->
|
||||
maps:get(Flag, Flags, Default).
|
||||
|
||||
-spec(get_flags(emqx_types:message()) -> maybe(map())).
|
||||
-spec get_flags(emqx_types:message()) -> maybe(map()).
|
||||
get_flags(#message{flags = Flags}) -> Flags.
|
||||
|
||||
-spec(set_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message()).
|
||||
-spec set_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message().
|
||||
set_flag(Flag, Msg = #message{flags = Flags}) when is_atom(Flag) ->
|
||||
Msg#message{flags = maps:put(Flag, true, Flags)}.
|
||||
|
||||
-spec(set_flag(emqx_types:flag(), boolean() | integer(), emqx_types:message())
|
||||
-> emqx_types:message()).
|
||||
-spec set_flag(emqx_types:flag(), boolean() | integer(), emqx_types:message()) ->
|
||||
emqx_types:message().
|
||||
set_flag(Flag, Val, Msg = #message{flags = Flags}) when is_atom(Flag) ->
|
||||
Msg#message{flags = maps:put(Flag, Val, Flags)}.
|
||||
|
||||
-spec(unset_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message()).
|
||||
-spec unset_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message().
|
||||
unset_flag(Flag, Msg = #message{flags = Flags}) ->
|
||||
case maps:is_key(Flag, Flags) of
|
||||
true -> Msg#message{flags = maps:remove(Flag, Flags)};
|
||||
true -> Msg#message{flags = maps:remove(Flag, Flags)};
|
||||
false -> Msg
|
||||
end.
|
||||
|
||||
-spec(set_headers(map(), emqx_types:message()) -> emqx_types:message()).
|
||||
-spec set_headers(map(), emqx_types:message()) -> emqx_types:message().
|
||||
set_headers(New, Msg = #message{headers = Old}) when is_map(New) ->
|
||||
Msg#message{headers = maps:merge(Old, New)}.
|
||||
|
||||
-spec(get_headers(emqx_types:message()) -> maybe(map())).
|
||||
-spec get_headers(emqx_types:message()) -> maybe(map()).
|
||||
get_headers(Msg) -> Msg#message.headers.
|
||||
|
||||
-spec(get_header(term(), emqx_types:message()) -> term()).
|
||||
-spec get_header(term(), emqx_types:message()) -> term().
|
||||
get_header(Hdr, Msg) ->
|
||||
get_header(Hdr, Msg, undefined).
|
||||
-spec(get_header(term(), emqx_types:message(), term()) -> term()).
|
||||
-spec get_header(term(), emqx_types:message(), term()) -> term().
|
||||
get_header(Hdr, #message{headers = Headers}, Default) ->
|
||||
maps:get(Hdr, Headers, Default).
|
||||
|
||||
-spec(set_header(term(), term(), emqx_types:message()) -> emqx_types:message()).
|
||||
-spec set_header(term(), term(), emqx_types:message()) -> emqx_types:message().
|
||||
set_header(Hdr, Val, Msg = #message{headers = Headers}) ->
|
||||
Msg#message{headers = maps:put(Hdr, Val, Headers)}.
|
||||
|
||||
-spec(remove_header(term(), emqx_types:message()) -> emqx_types:message()).
|
||||
-spec remove_header(term(), emqx_types:message()) -> emqx_types:message().
|
||||
remove_header(Hdr, Msg = #message{headers = Headers}) ->
|
||||
case maps:is_key(Hdr, Headers) of
|
||||
true -> Msg#message{headers = maps:remove(Hdr, Headers)};
|
||||
true -> Msg#message{headers = maps:remove(Hdr, Headers)};
|
||||
false -> Msg
|
||||
end.
|
||||
|
||||
-spec(is_expired(emqx_types:message()) -> boolean()).
|
||||
is_expired(#message{headers = #{properties := #{'Message-Expiry-Interval' := Interval}},
|
||||
timestamp = CreatedAt}) ->
|
||||
-spec is_expired(emqx_types:message()) -> boolean().
|
||||
is_expired(#message{
|
||||
headers = #{properties := #{'Message-Expiry-Interval' := Interval}},
|
||||
timestamp = CreatedAt
|
||||
}) ->
|
||||
elapsed(CreatedAt) > timer:seconds(Interval);
|
||||
is_expired(_Msg) -> false.
|
||||
is_expired(_Msg) ->
|
||||
false.
|
||||
|
||||
-spec(update_expiry(emqx_types:message()) -> emqx_types:message()).
|
||||
update_expiry(Msg = #message{headers = #{properties := #{'Message-Expiry-Interval' := Interval}},
|
||||
timestamp = CreatedAt}) ->
|
||||
-spec update_expiry(emqx_types:message()) -> emqx_types:message().
|
||||
update_expiry(
|
||||
Msg = #message{
|
||||
headers = #{properties := #{'Message-Expiry-Interval' := Interval}},
|
||||
timestamp = CreatedAt
|
||||
}
|
||||
) ->
|
||||
Props = maps:get(properties, Msg#message.headers),
|
||||
case elapsed(CreatedAt) of
|
||||
Elapsed when Elapsed > 0 ->
|
||||
Interval1 = max(1, Interval - (Elapsed div 1000)),
|
||||
set_header(properties, Props#{'Message-Expiry-Interval' => Interval1}, Msg);
|
||||
_ -> Msg
|
||||
_ ->
|
||||
Msg
|
||||
end;
|
||||
update_expiry(Msg) -> Msg.
|
||||
update_expiry(Msg) ->
|
||||
Msg.
|
||||
|
||||
%% @doc Message to PUBLISH Packet.
|
||||
-spec(to_packet(emqx_types:packet_id(), emqx_types:message())
|
||||
-> emqx_types:packet()).
|
||||
to_packet(PacketId, Msg = #message{qos = QoS, headers = Headers,
|
||||
topic = Topic, payload = Payload}) ->
|
||||
#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH,
|
||||
dup = get_flag(dup, Msg),
|
||||
qos = QoS,
|
||||
retain = get_flag(retain, Msg)
|
||||
},
|
||||
variable = #mqtt_packet_publish{topic_name = Topic,
|
||||
packet_id = PacketId,
|
||||
properties = filter_pub_props(
|
||||
maps:get(properties, Headers, #{}))
|
||||
},
|
||||
payload = Payload
|
||||
}.
|
||||
-spec to_packet(emqx_types:packet_id(), emqx_types:message()) ->
|
||||
emqx_types:packet().
|
||||
to_packet(
|
||||
PacketId,
|
||||
Msg = #message{
|
||||
qos = QoS,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload
|
||||
}
|
||||
) ->
|
||||
#mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
dup = get_flag(dup, Msg),
|
||||
qos = QoS,
|
||||
retain = get_flag(retain, Msg)
|
||||
},
|
||||
variable = #mqtt_packet_publish{
|
||||
topic_name = Topic,
|
||||
packet_id = PacketId,
|
||||
properties = filter_pub_props(
|
||||
maps:get(properties, Headers, #{})
|
||||
)
|
||||
},
|
||||
payload = Payload
|
||||
}.
|
||||
|
||||
filter_pub_props(Props) ->
|
||||
maps:with(['Payload-Format-Indicator',
|
||||
'Message-Expiry-Interval',
|
||||
'Response-Topic',
|
||||
'Correlation-Data',
|
||||
'User-Property',
|
||||
'Subscription-Identifier',
|
||||
'Content-Type'
|
||||
], Props).
|
||||
maps:with(
|
||||
[
|
||||
'Payload-Format-Indicator',
|
||||
'Message-Expiry-Interval',
|
||||
'Response-Topic',
|
||||
'Correlation-Data',
|
||||
'User-Property',
|
||||
'Subscription-Identifier',
|
||||
'Content-Type'
|
||||
],
|
||||
Props
|
||||
).
|
||||
|
||||
%% @doc Message to map
|
||||
-spec(to_map(emqx_types:message()) -> message_map()).
|
||||
-spec to_map(emqx_types:message()) -> message_map().
|
||||
to_map(#message{
|
||||
id = Id,
|
||||
qos = QoS,
|
||||
from = From,
|
||||
flags = Flags,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Timestamp,
|
||||
extra = Extra
|
||||
}) ->
|
||||
#{id => Id,
|
||||
qos => QoS,
|
||||
from => From,
|
||||
flags => Flags,
|
||||
headers => Headers,
|
||||
topic => Topic,
|
||||
payload => Payload,
|
||||
timestamp => Timestamp,
|
||||
extra => Extra
|
||||
}.
|
||||
id = Id,
|
||||
qos = QoS,
|
||||
from = From,
|
||||
flags = Flags,
|
||||
headers = Headers,
|
||||
topic = Topic,
|
||||
payload = Payload,
|
||||
timestamp = Timestamp,
|
||||
extra = Extra
|
||||
}) ->
|
||||
#{
|
||||
id => Id,
|
||||
qos => QoS,
|
||||
from => From,
|
||||
flags => Flags,
|
||||
headers => Headers,
|
||||
topic => Topic,
|
||||
payload => Payload,
|
||||
timestamp => Timestamp,
|
||||
extra => Extra
|
||||
}.
|
||||
|
||||
%% @doc To map for logging, with payload dropped.
|
||||
to_log_map(Msg) -> maps:without([payload], to_map(Msg)).
|
||||
|
||||
%% @doc Message to tuple list
|
||||
-spec(to_list(emqx_types:message()) -> list()).
|
||||
-spec to_list(emqx_types:message()) -> list().
|
||||
to_list(Msg) ->
|
||||
lists:zip(record_info(fields, message), tl(tuple_to_list(Msg))).
|
||||
|
||||
%% @doc Map to message
|
||||
-spec(from_map(message_map()) -> emqx_types:message()).
|
||||
from_map(#{id := Id,
|
||||
qos := QoS,
|
||||
from := From,
|
||||
flags := Flags,
|
||||
headers := Headers,
|
||||
topic := Topic,
|
||||
payload := Payload,
|
||||
timestamp := Timestamp,
|
||||
extra := Extra
|
||||
}) ->
|
||||
-spec from_map(message_map()) -> emqx_types:message().
|
||||
from_map(#{
|
||||
id := Id,
|
||||
qos := QoS,
|
||||
from := From,
|
||||
flags := Flags,
|
||||
headers := Headers,
|
||||
topic := Topic,
|
||||
payload := Payload,
|
||||
timestamp := Timestamp,
|
||||
extra := Extra
|
||||
}) ->
|
||||
#message{
|
||||
id = Id,
|
||||
qos = QoS,
|
||||
|
|
|
|||
|
|
@ -23,57 +23,62 @@
|
|||
-include("types.hrl").
|
||||
-include("emqx_mqtt.hrl").
|
||||
|
||||
-export([ start_link/0
|
||||
, stop/0
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
stop/0
|
||||
]).
|
||||
|
||||
-export([ new/1
|
||||
, new/2
|
||||
, ensure/1
|
||||
, ensure/2
|
||||
, all/0
|
||||
]).
|
||||
-export([
|
||||
new/1,
|
||||
new/2,
|
||||
ensure/1,
|
||||
ensure/2,
|
||||
all/0
|
||||
]).
|
||||
|
||||
-export([ val/1
|
||||
, inc/1
|
||||
, inc/2
|
||||
, dec/1
|
||||
, dec/2
|
||||
, set/2
|
||||
]).
|
||||
-export([
|
||||
val/1,
|
||||
inc/1,
|
||||
inc/2,
|
||||
dec/1,
|
||||
dec/2,
|
||||
set/2
|
||||
]).
|
||||
|
||||
-export([ trans/2
|
||||
, trans/3
|
||||
, commit/0
|
||||
]).
|
||||
-export([
|
||||
trans/2,
|
||||
trans/3,
|
||||
commit/0
|
||||
]).
|
||||
|
||||
%% Inc received/sent metrics
|
||||
-export([ inc_msg/1
|
||||
, inc_recv/1
|
||||
, inc_sent/1
|
||||
]).
|
||||
-export([
|
||||
inc_msg/1,
|
||||
inc_recv/1,
|
||||
inc_sent/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
%% BACKW: v4.3.0
|
||||
-export([ upgrade_retained_delayed_counter_type/0
|
||||
]).
|
||||
-export([upgrade_retained_delayed_counter_type/0]).
|
||||
|
||||
-export_type([metric_idx/0]).
|
||||
|
||||
-compile({inline, [inc/1, inc/2, dec/1, dec/2]}).
|
||||
-compile({inline, [inc_recv/1, inc_sent/1]}).
|
||||
|
||||
-opaque(metric_idx() :: 1..1024).
|
||||
-opaque metric_idx() :: 1..1024.
|
||||
|
||||
-type(metric_name() :: atom() | string() | binary()).
|
||||
-type metric_name() :: atom() | string() | binary().
|
||||
|
||||
-define(MAX_SIZE, 1024).
|
||||
-define(RESERVED_IDX, 512).
|
||||
|
|
@ -82,133 +87,198 @@
|
|||
|
||||
%% Bytes sent and received
|
||||
-define(BYTES_METRICS,
|
||||
[{counter, 'bytes.received'}, % Total bytes received
|
||||
{counter, 'bytes.sent'} % Total bytes sent
|
||||
]).
|
||||
% Total bytes received
|
||||
[
|
||||
{counter, 'bytes.received'},
|
||||
% Total bytes sent
|
||||
{counter, 'bytes.sent'}
|
||||
]
|
||||
).
|
||||
|
||||
%% Packets sent and received
|
||||
-define(PACKET_METRICS,
|
||||
[{counter, 'packets.received'}, % All Packets received
|
||||
{counter, 'packets.sent'}, % All Packets sent
|
||||
{counter, 'packets.connect.received'}, % CONNECT Packets received
|
||||
{counter, 'packets.connack.sent'}, % CONNACK Packets sent
|
||||
{counter, 'packets.connack.error'}, % CONNACK error sent
|
||||
{counter, 'packets.connack.auth_error'}, % CONNACK auth_error sent
|
||||
{counter, 'packets.publish.received'}, % PUBLISH packets received
|
||||
{counter, 'packets.publish.sent'}, % PUBLISH packets sent
|
||||
{counter, 'packets.publish.inuse'}, % PUBLISH packet_id inuse
|
||||
{counter, 'packets.publish.error'}, % PUBLISH failed for error
|
||||
{counter, 'packets.publish.auth_error'}, % PUBLISH failed for auth error
|
||||
{counter, 'packets.publish.dropped'}, % PUBLISH(QoS2) packets dropped
|
||||
{counter, 'packets.puback.received'}, % PUBACK packets received
|
||||
{counter, 'packets.puback.sent'}, % PUBACK packets sent
|
||||
{counter, 'packets.puback.inuse'}, % PUBACK packet_id inuse
|
||||
{counter, 'packets.puback.missed'}, % PUBACK packets missed
|
||||
{counter, 'packets.pubrec.received'}, % PUBREC packets received
|
||||
{counter, 'packets.pubrec.sent'}, % PUBREC packets sent
|
||||
{counter, 'packets.pubrec.inuse'}, % PUBREC packet_id inuse
|
||||
{counter, 'packets.pubrec.missed'}, % PUBREC packets missed
|
||||
{counter, 'packets.pubrel.received'}, % PUBREL packets received
|
||||
{counter, 'packets.pubrel.sent'}, % PUBREL packets sent
|
||||
{counter, 'packets.pubrel.missed'}, % PUBREL packets missed
|
||||
{counter, 'packets.pubcomp.received'}, % PUBCOMP packets received
|
||||
{counter, 'packets.pubcomp.sent'}, % PUBCOMP packets sent
|
||||
{counter, 'packets.pubcomp.inuse'}, % PUBCOMP packet_id inuse
|
||||
{counter, 'packets.pubcomp.missed'}, % PUBCOMP packets missed
|
||||
{counter, 'packets.subscribe.received'}, % SUBSCRIBE Packets received
|
||||
{counter, 'packets.subscribe.error'}, % SUBSCRIBE error
|
||||
{counter, 'packets.subscribe.auth_error'}, % SUBSCRIBE failed for not auth
|
||||
{counter, 'packets.suback.sent'}, % SUBACK packets sent
|
||||
{counter, 'packets.unsubscribe.received'}, % UNSUBSCRIBE Packets received
|
||||
{counter, 'packets.unsubscribe.error'}, % UNSUBSCRIBE error
|
||||
{counter, 'packets.unsuback.sent'}, % UNSUBACK Packets sent
|
||||
{counter, 'packets.pingreq.received'}, % PINGREQ packets received
|
||||
{counter, 'packets.pingresp.sent'}, % PINGRESP Packets sent
|
||||
{counter, 'packets.disconnect.received'}, % DISCONNECT Packets received
|
||||
{counter, 'packets.disconnect.sent'}, % DISCONNECT Packets sent
|
||||
{counter, 'packets.auth.received'}, % Auth Packets received
|
||||
{counter, 'packets.auth.sent'} % Auth Packets sent
|
||||
]).
|
||||
% All Packets received
|
||||
[
|
||||
{counter, 'packets.received'},
|
||||
% All Packets sent
|
||||
{counter, 'packets.sent'},
|
||||
% CONNECT Packets received
|
||||
{counter, 'packets.connect.received'},
|
||||
% CONNACK Packets sent
|
||||
{counter, 'packets.connack.sent'},
|
||||
% CONNACK error sent
|
||||
{counter, 'packets.connack.error'},
|
||||
% CONNACK auth_error sent
|
||||
{counter, 'packets.connack.auth_error'},
|
||||
% PUBLISH packets received
|
||||
{counter, 'packets.publish.received'},
|
||||
% PUBLISH packets sent
|
||||
{counter, 'packets.publish.sent'},
|
||||
% PUBLISH packet_id inuse
|
||||
{counter, 'packets.publish.inuse'},
|
||||
% PUBLISH failed for error
|
||||
{counter, 'packets.publish.error'},
|
||||
% PUBLISH failed for auth error
|
||||
{counter, 'packets.publish.auth_error'},
|
||||
% PUBLISH(QoS2) packets dropped
|
||||
{counter, 'packets.publish.dropped'},
|
||||
% PUBACK packets received
|
||||
{counter, 'packets.puback.received'},
|
||||
% PUBACK packets sent
|
||||
{counter, 'packets.puback.sent'},
|
||||
% PUBACK packet_id inuse
|
||||
{counter, 'packets.puback.inuse'},
|
||||
% PUBACK packets missed
|
||||
{counter, 'packets.puback.missed'},
|
||||
% PUBREC packets received
|
||||
{counter, 'packets.pubrec.received'},
|
||||
% PUBREC packets sent
|
||||
{counter, 'packets.pubrec.sent'},
|
||||
% PUBREC packet_id inuse
|
||||
{counter, 'packets.pubrec.inuse'},
|
||||
% PUBREC packets missed
|
||||
{counter, 'packets.pubrec.missed'},
|
||||
% PUBREL packets received
|
||||
{counter, 'packets.pubrel.received'},
|
||||
% PUBREL packets sent
|
||||
{counter, 'packets.pubrel.sent'},
|
||||
% PUBREL packets missed
|
||||
{counter, 'packets.pubrel.missed'},
|
||||
% PUBCOMP packets received
|
||||
{counter, 'packets.pubcomp.received'},
|
||||
% PUBCOMP packets sent
|
||||
{counter, 'packets.pubcomp.sent'},
|
||||
% PUBCOMP packet_id inuse
|
||||
{counter, 'packets.pubcomp.inuse'},
|
||||
% PUBCOMP packets missed
|
||||
{counter, 'packets.pubcomp.missed'},
|
||||
% SUBSCRIBE Packets received
|
||||
{counter, 'packets.subscribe.received'},
|
||||
% SUBSCRIBE error
|
||||
{counter, 'packets.subscribe.error'},
|
||||
% SUBSCRIBE failed for not auth
|
||||
{counter, 'packets.subscribe.auth_error'},
|
||||
% SUBACK packets sent
|
||||
{counter, 'packets.suback.sent'},
|
||||
% UNSUBSCRIBE Packets received
|
||||
{counter, 'packets.unsubscribe.received'},
|
||||
% UNSUBSCRIBE error
|
||||
{counter, 'packets.unsubscribe.error'},
|
||||
% UNSUBACK Packets sent
|
||||
{counter, 'packets.unsuback.sent'},
|
||||
% PINGREQ packets received
|
||||
{counter, 'packets.pingreq.received'},
|
||||
% PINGRESP Packets sent
|
||||
{counter, 'packets.pingresp.sent'},
|
||||
% DISCONNECT Packets received
|
||||
{counter, 'packets.disconnect.received'},
|
||||
% DISCONNECT Packets sent
|
||||
{counter, 'packets.disconnect.sent'},
|
||||
% Auth Packets received
|
||||
{counter, 'packets.auth.received'},
|
||||
% Auth Packets sent
|
||||
{counter, 'packets.auth.sent'}
|
||||
]
|
||||
).
|
||||
|
||||
%% Messages sent/received and pubsub
|
||||
-define(MESSAGE_METRICS,
|
||||
[{counter, 'messages.received'}, % All Messages received
|
||||
{counter, 'messages.sent'}, % All Messages sent
|
||||
{counter, 'messages.qos0.received'}, % QoS0 Messages received
|
||||
{counter, 'messages.qos0.sent'}, % QoS0 Messages sent
|
||||
{counter, 'messages.qos1.received'}, % QoS1 Messages received
|
||||
{counter, 'messages.qos1.sent'}, % QoS1 Messages sent
|
||||
{counter, 'messages.qos2.received'}, % QoS2 Messages received
|
||||
{counter, 'messages.qos2.sent'}, % QoS2 Messages sent
|
||||
%% PubSub Metrics
|
||||
{counter, 'messages.publish'}, % Messages Publish
|
||||
{counter, 'messages.dropped'}, % Messages dropped due to no subscribers
|
||||
{counter, 'messages.dropped.await_pubrel_timeout'}, % QoS2 Messages expired
|
||||
{counter, 'messages.dropped.no_subscribers'}, % Messages dropped
|
||||
{counter, 'messages.forward'}, % Messages forward
|
||||
{counter, 'messages.delayed'}, % Messages delayed
|
||||
{counter, 'messages.delivered'}, % Messages delivered
|
||||
{counter, 'messages.acked'} % Messages acked
|
||||
]).
|
||||
% All Messages received
|
||||
[
|
||||
{counter, 'messages.received'},
|
||||
% All Messages sent
|
||||
{counter, 'messages.sent'},
|
||||
% QoS0 Messages received
|
||||
{counter, 'messages.qos0.received'},
|
||||
% QoS0 Messages sent
|
||||
{counter, 'messages.qos0.sent'},
|
||||
% QoS1 Messages received
|
||||
{counter, 'messages.qos1.received'},
|
||||
% QoS1 Messages sent
|
||||
{counter, 'messages.qos1.sent'},
|
||||
% QoS2 Messages received
|
||||
{counter, 'messages.qos2.received'},
|
||||
% QoS2 Messages sent
|
||||
{counter, 'messages.qos2.sent'},
|
||||
%% PubSub Metrics
|
||||
|
||||
% Messages Publish
|
||||
{counter, 'messages.publish'},
|
||||
% Messages dropped due to no subscribers
|
||||
{counter, 'messages.dropped'},
|
||||
% QoS2 Messages expired
|
||||
{counter, 'messages.dropped.await_pubrel_timeout'},
|
||||
% Messages dropped
|
||||
{counter, 'messages.dropped.no_subscribers'},
|
||||
% Messages forward
|
||||
{counter, 'messages.forward'},
|
||||
% Messages delayed
|
||||
{counter, 'messages.delayed'},
|
||||
% Messages delivered
|
||||
{counter, 'messages.delivered'},
|
||||
% Messages acked
|
||||
{counter, 'messages.acked'}
|
||||
]
|
||||
).
|
||||
|
||||
%% Delivery metrics
|
||||
-define(DELIVERY_METRICS,
|
||||
[{counter, 'delivery.dropped'},
|
||||
{counter, 'delivery.dropped.no_local'},
|
||||
{counter, 'delivery.dropped.too_large'},
|
||||
{counter, 'delivery.dropped.qos0_msg'},
|
||||
{counter, 'delivery.dropped.queue_full'},
|
||||
{counter, 'delivery.dropped.expired'}
|
||||
]).
|
||||
-define(DELIVERY_METRICS, [
|
||||
{counter, 'delivery.dropped'},
|
||||
{counter, 'delivery.dropped.no_local'},
|
||||
{counter, 'delivery.dropped.too_large'},
|
||||
{counter, 'delivery.dropped.qos0_msg'},
|
||||
{counter, 'delivery.dropped.queue_full'},
|
||||
{counter, 'delivery.dropped.expired'}
|
||||
]).
|
||||
|
||||
%% Client Lifecircle metrics
|
||||
-define(CLIENT_METRICS,
|
||||
[{counter, 'client.connect'},
|
||||
{counter, 'client.connack'},
|
||||
{counter, 'client.connected'},
|
||||
{counter, 'client.authenticate'},
|
||||
{counter, 'client.auth.anonymous'},
|
||||
{counter, 'client.authorize'},
|
||||
{counter, 'client.subscribe'},
|
||||
{counter, 'client.unsubscribe'},
|
||||
{counter, 'client.disconnected'}
|
||||
]).
|
||||
-define(CLIENT_METRICS, [
|
||||
{counter, 'client.connect'},
|
||||
{counter, 'client.connack'},
|
||||
{counter, 'client.connected'},
|
||||
{counter, 'client.authenticate'},
|
||||
{counter, 'client.auth.anonymous'},
|
||||
{counter, 'client.authorize'},
|
||||
{counter, 'client.subscribe'},
|
||||
{counter, 'client.unsubscribe'},
|
||||
{counter, 'client.disconnected'}
|
||||
]).
|
||||
|
||||
%% Session Lifecircle metrics
|
||||
-define(SESSION_METRICS,
|
||||
[{counter, 'session.created'},
|
||||
{counter, 'session.resumed'},
|
||||
{counter, 'session.takenover'},
|
||||
{counter, 'session.discarded'},
|
||||
{counter, 'session.terminated'}
|
||||
]).
|
||||
-define(SESSION_METRICS, [
|
||||
{counter, 'session.created'},
|
||||
{counter, 'session.resumed'},
|
||||
{counter, 'session.takenover'},
|
||||
{counter, 'session.discarded'},
|
||||
{counter, 'session.terminated'}
|
||||
]).
|
||||
|
||||
%% Statistic metrics for ACL checking
|
||||
-define(STASTS_ACL_METRICS,
|
||||
[ {counter, 'client.acl.allow'},
|
||||
{counter, 'client.acl.deny'},
|
||||
{counter, 'client.acl.cache_hit'}
|
||||
]).
|
||||
-define(STASTS_ACL_METRICS, [
|
||||
{counter, 'client.acl.allow'},
|
||||
{counter, 'client.acl.deny'},
|
||||
{counter, 'client.acl.cache_hit'}
|
||||
]).
|
||||
|
||||
%% Overload protetion counters
|
||||
-define(OLP_METRICS,
|
||||
[{counter, 'olp.delay.ok'},
|
||||
{counter, 'olp.delay.timeout'},
|
||||
{counter, 'olp.hbn'},
|
||||
{counter, 'olp.gc'},
|
||||
{counter, 'olp.new_conn'}
|
||||
]).
|
||||
-define(OLP_METRICS, [
|
||||
{counter, 'olp.delay.ok'},
|
||||
{counter, 'olp.delay.timeout'},
|
||||
{counter, 'olp.hbn'},
|
||||
{counter, 'olp.gc'},
|
||||
{counter, 'olp.new_conn'}
|
||||
]).
|
||||
|
||||
-record(state, {next_idx = 1}).
|
||||
|
||||
-record(metric, {name, type, idx}).
|
||||
|
||||
%% @doc Start the metrics server.
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
|
||||
|
||||
-spec(stop() -> ok).
|
||||
-spec stop() -> ok.
|
||||
stop() -> gen_server:stop(?SERVER).
|
||||
|
||||
%% BACKW: v4.3.0
|
||||
|
|
@ -220,21 +290,21 @@ upgrade_retained_delayed_counter_type() ->
|
|||
%% Metrics API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(new(metric_name()) -> ok).
|
||||
-spec new(metric_name()) -> ok.
|
||||
new(Name) ->
|
||||
new(counter, Name).
|
||||
|
||||
-spec(new(gauge|counter, metric_name()) -> ok).
|
||||
-spec new(gauge | counter, metric_name()) -> ok.
|
||||
new(gauge, Name) ->
|
||||
create(gauge, Name);
|
||||
new(counter, Name) ->
|
||||
create(counter, Name).
|
||||
|
||||
-spec(ensure(metric_name()) -> ok).
|
||||
-spec ensure(metric_name()) -> ok.
|
||||
ensure(Name) ->
|
||||
ensure(counter, Name).
|
||||
|
||||
-spec(ensure(gauge|counter, metric_name()) -> ok).
|
||||
-spec ensure(gauge | counter, metric_name()) -> ok.
|
||||
ensure(Type, Name) when Type =:= gauge; Type =:= counter ->
|
||||
case ets:lookup(?TAB, Name) of
|
||||
[] -> create(Type, Name);
|
||||
|
|
@ -249,73 +319,80 @@ create(Type, Name) ->
|
|||
end.
|
||||
|
||||
%% @doc Get all metrics
|
||||
-spec(all() -> [{metric_name(), non_neg_integer()}]).
|
||||
-spec all() -> [{metric_name(), non_neg_integer()}].
|
||||
all() ->
|
||||
CRef = persistent_term:get(?MODULE),
|
||||
[{Name, counters:get(CRef, Idx)}
|
||||
|| #metric{name = Name, idx = Idx} <- ets:tab2list(?TAB)].
|
||||
[
|
||||
{Name, counters:get(CRef, Idx)}
|
||||
|| #metric{name = Name, idx = Idx} <- ets:tab2list(?TAB)
|
||||
].
|
||||
|
||||
%% @doc Get metric value
|
||||
-spec(val(metric_name()) -> maybe(non_neg_integer())).
|
||||
-spec val(metric_name()) -> maybe(non_neg_integer()).
|
||||
val(Name) ->
|
||||
case ets:lookup(?TAB, Name) of
|
||||
[#metric{idx = Idx}] ->
|
||||
CRef = persistent_term:get(?MODULE),
|
||||
counters:get(CRef, Idx);
|
||||
[] -> undefined
|
||||
[] ->
|
||||
undefined
|
||||
end.
|
||||
|
||||
%% @doc Increase counter
|
||||
-spec(inc(metric_name()) -> ok).
|
||||
-spec inc(metric_name()) -> ok.
|
||||
inc(Name) ->
|
||||
inc(Name, 1).
|
||||
|
||||
%% @doc Increase metric value
|
||||
-spec(inc(metric_name(), pos_integer()) -> ok).
|
||||
-spec inc(metric_name(), pos_integer()) -> ok.
|
||||
inc(Name, Value) ->
|
||||
update_counter(Name, Value).
|
||||
|
||||
%% @doc Decrease metric value
|
||||
-spec(dec(metric_name()) -> ok).
|
||||
-spec dec(metric_name()) -> ok.
|
||||
dec(Name) ->
|
||||
dec(Name, 1).
|
||||
|
||||
%% @doc Decrease metric value
|
||||
-spec(dec(metric_name(), pos_integer()) -> ok).
|
||||
-spec dec(metric_name(), pos_integer()) -> ok.
|
||||
dec(Name, Value) ->
|
||||
update_counter(Name, -Value).
|
||||
|
||||
%% @doc Set metric value
|
||||
-spec(set(metric_name(), integer()) -> ok).
|
||||
-spec set(metric_name(), integer()) -> ok.
|
||||
set(Name, Value) ->
|
||||
CRef = persistent_term:get(?MODULE),
|
||||
Idx = ets:lookup_element(?TAB, Name, 4),
|
||||
counters:put(CRef, Idx, Value).
|
||||
|
||||
-spec(trans(inc | dec, metric_name()) -> ok).
|
||||
-spec trans(inc | dec, metric_name()) -> ok.
|
||||
trans(Op, Name) when Op =:= inc; Op =:= dec ->
|
||||
trans(Op, Name, 1).
|
||||
|
||||
-spec(trans(inc | dec, metric_name(), pos_integer()) -> ok).
|
||||
-spec trans(inc | dec, metric_name(), pos_integer()) -> ok.
|
||||
trans(inc, Name, Value) ->
|
||||
cache(Name, Value);
|
||||
trans(dec, Name, Value) ->
|
||||
cache(Name, -Value).
|
||||
|
||||
-spec(cache(metric_name(), integer()) -> ok).
|
||||
-spec cache(metric_name(), integer()) -> ok.
|
||||
cache(Name, Value) ->
|
||||
put('$metrics', case get('$metrics') of
|
||||
undefined ->
|
||||
#{Name => Value};
|
||||
Metrics ->
|
||||
maps:update_with(Name, fun(Cnt) -> Cnt + Value end, Value, Metrics)
|
||||
end),
|
||||
put(
|
||||
'$metrics',
|
||||
case get('$metrics') of
|
||||
undefined ->
|
||||
#{Name => Value};
|
||||
Metrics ->
|
||||
maps:update_with(Name, fun(Cnt) -> Cnt + Value end, Value, Metrics)
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
-spec(commit() -> ok).
|
||||
-spec commit() -> ok.
|
||||
commit() ->
|
||||
case get('$metrics') of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
Metrics ->
|
||||
_ = erase('$metrics'),
|
||||
lists:foreach(fun update_counter/1, maps:to_list(Metrics))
|
||||
|
|
@ -326,18 +403,18 @@ update_counter({Name, Value}) ->
|
|||
|
||||
update_counter(Name, Value) ->
|
||||
CRef = persistent_term:get(?MODULE),
|
||||
CIdx = case reserved_idx(Name) of
|
||||
Idx when is_integer(Idx) -> Idx;
|
||||
undefined ->
|
||||
ets:lookup_element(?TAB, Name, 4)
|
||||
end,
|
||||
CIdx =
|
||||
case reserved_idx(Name) of
|
||||
Idx when is_integer(Idx) -> Idx;
|
||||
undefined -> ets:lookup_element(?TAB, Name, 4)
|
||||
end,
|
||||
counters:add(CRef, CIdx, Value).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Inc received/sent metrics
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(inc_msg(emqx_types:massage()) -> ok).
|
||||
-spec inc_msg(emqx_types:massage()) -> ok.
|
||||
inc_msg(Msg) ->
|
||||
case Msg#message.qos of
|
||||
0 -> inc('messages.qos0.received');
|
||||
|
|
@ -347,7 +424,7 @@ inc_msg(Msg) ->
|
|||
inc('messages.received').
|
||||
|
||||
%% @doc Inc packets received.
|
||||
-spec(inc_recv(emqx_types:packet()) -> ok).
|
||||
-spec inc_recv(emqx_types:packet()) -> ok.
|
||||
inc_recv(Packet) ->
|
||||
inc('packets.received'),
|
||||
do_inc_recv(Packet).
|
||||
|
|
@ -381,10 +458,11 @@ do_inc_recv(?PACKET(?DISCONNECT)) ->
|
|||
inc('packets.disconnect.received');
|
||||
do_inc_recv(?PACKET(?AUTH)) ->
|
||||
inc('packets.auth.received');
|
||||
do_inc_recv(_Packet) -> ok.
|
||||
do_inc_recv(_Packet) ->
|
||||
ok.
|
||||
|
||||
%% @doc Inc packets sent. Will not count $SYS PUBLISH.
|
||||
-spec(inc_sent(emqx_types:packet()) -> ok).
|
||||
-spec inc_sent(emqx_types:packet()) -> ok.
|
||||
inc_sent(?PUBLISH_PACKET(_QoS, <<"$SYS/", _/binary>>, _, _)) ->
|
||||
ok;
|
||||
inc_sent(Packet) ->
|
||||
|
|
@ -396,7 +474,6 @@ do_inc_sent(?CONNACK_PACKET(ReasonCode)) ->
|
|||
(ReasonCode == ?RC_NOT_AUTHORIZED) andalso inc('packets.connack.auth_error'),
|
||||
(ReasonCode == ?RC_BAD_USER_NAME_OR_PASSWORD) andalso inc('packets.connack.auth_error'),
|
||||
inc('packets.connack.sent');
|
||||
|
||||
do_inc_sent(?PUBLISH_PACKET(QoS)) ->
|
||||
inc('messages.sent'),
|
||||
case QoS of
|
||||
|
|
@ -428,7 +505,8 @@ do_inc_sent(?PACKET(?DISCONNECT)) ->
|
|||
inc('packets.disconnect.sent');
|
||||
do_inc_sent(?PACKET(?AUTH)) ->
|
||||
inc('packets.auth.sent');
|
||||
do_inc_sent(_Packet) -> ok.
|
||||
do_inc_sent(_Packet) ->
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% gen_server callbacks
|
||||
|
|
@ -440,22 +518,26 @@ init([]) ->
|
|||
ok = persistent_term:put(?MODULE, CRef),
|
||||
% Create index mapping table
|
||||
ok = emqx_tables:new(?TAB, [{keypos, 2}, {read_concurrency, true}]),
|
||||
Metrics = lists:append([?BYTES_METRICS,
|
||||
?PACKET_METRICS,
|
||||
?MESSAGE_METRICS,
|
||||
?DELIVERY_METRICS,
|
||||
?CLIENT_METRICS,
|
||||
?SESSION_METRICS,
|
||||
?STASTS_ACL_METRICS,
|
||||
?OLP_METRICS
|
||||
]),
|
||||
Metrics = lists:append([
|
||||
?BYTES_METRICS,
|
||||
?PACKET_METRICS,
|
||||
?MESSAGE_METRICS,
|
||||
?DELIVERY_METRICS,
|
||||
?CLIENT_METRICS,
|
||||
?SESSION_METRICS,
|
||||
?STASTS_ACL_METRICS,
|
||||
?OLP_METRICS
|
||||
]),
|
||||
% Store reserved indices
|
||||
ok = lists:foreach(fun({Type, Name}) ->
|
||||
Idx = reserved_idx(Name),
|
||||
Metric = #metric{name = Name, type = Type, idx = Idx},
|
||||
true = ets:insert(?TAB, Metric),
|
||||
ok = counters:put(CRef, Idx, 0)
|
||||
end, Metrics),
|
||||
ok = lists:foreach(
|
||||
fun({Type, Name}) ->
|
||||
Idx = reserved_idx(Name),
|
||||
Metric = #metric{name = Name, type = Type, idx = Idx},
|
||||
true = ets:insert(?TAB, Metric),
|
||||
ok = counters:put(CRef, Idx, 0)
|
||||
end,
|
||||
Metrics
|
||||
),
|
||||
{ok, #state{next_idx = ?RESERVED_IDX + 1}, hibernate}.
|
||||
|
||||
handle_call({create, Type, Name}, _From, State = #state{next_idx = ?MAX_SIZE}) ->
|
||||
|
|
@ -465,7 +547,6 @@ handle_call({create, Type, Name}, _From, State = #state{next_idx = ?MAX_SIZE}) -
|
|||
name => Name
|
||||
}),
|
||||
{reply, {error, metric_index_exceeded}, State};
|
||||
|
||||
handle_call({create, Type, Name}, _From, State = #state{next_idx = NextIdx}) ->
|
||||
case ets:lookup(?TAB, Name) of
|
||||
[#metric{idx = Idx}] ->
|
||||
|
|
@ -476,14 +557,14 @@ handle_call({create, Type, Name}, _From, State = #state{next_idx = NextIdx}) ->
|
|||
true = ets:insert(?TAB, Metric),
|
||||
{reply, {ok, NextIdx}, State#state{next_idx = NextIdx + 1}}
|
||||
end;
|
||||
|
||||
handle_call({set_type_to_counter, Keys}, _From, State) ->
|
||||
lists:foreach(
|
||||
fun(K) ->
|
||||
ets:update_element(?TAB, K, {#metric.type, counter})
|
||||
end, Keys),
|
||||
fun(K) ->
|
||||
ets:update_element(?TAB, K, {#metric.type, counter})
|
||||
end,
|
||||
Keys
|
||||
),
|
||||
{reply, ok, State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", req => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -506,100 +587,95 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
reserved_idx('bytes.received') -> 01;
|
||||
reserved_idx('bytes.sent') -> 02;
|
||||
reserved_idx('bytes.received') -> 01;
|
||||
reserved_idx('bytes.sent') -> 02;
|
||||
%% Reserved indices of packet's metrics
|
||||
reserved_idx('packets.received') -> 10;
|
||||
reserved_idx('packets.sent') -> 11;
|
||||
reserved_idx('packets.connect.received') -> 12;
|
||||
reserved_idx('packets.connack.sent') -> 13;
|
||||
reserved_idx('packets.connack.error') -> 14;
|
||||
reserved_idx('packets.connack.auth_error') -> 15;
|
||||
reserved_idx('packets.publish.received') -> 16;
|
||||
reserved_idx('packets.publish.sent') -> 17;
|
||||
reserved_idx('packets.publish.inuse') -> 18;
|
||||
reserved_idx('packets.publish.error') -> 19;
|
||||
reserved_idx('packets.publish.auth_error') -> 20;
|
||||
reserved_idx('packets.puback.received') -> 21;
|
||||
reserved_idx('packets.puback.sent') -> 22;
|
||||
reserved_idx('packets.puback.inuse') -> 23;
|
||||
reserved_idx('packets.puback.missed') -> 24;
|
||||
reserved_idx('packets.pubrec.received') -> 25;
|
||||
reserved_idx('packets.pubrec.sent') -> 26;
|
||||
reserved_idx('packets.pubrec.inuse') -> 27;
|
||||
reserved_idx('packets.pubrec.missed') -> 28;
|
||||
reserved_idx('packets.pubrel.received') -> 29;
|
||||
reserved_idx('packets.pubrel.sent') -> 30;
|
||||
reserved_idx('packets.pubrel.missed') -> 31;
|
||||
reserved_idx('packets.pubcomp.received') -> 32;
|
||||
reserved_idx('packets.pubcomp.sent') -> 33;
|
||||
reserved_idx('packets.pubcomp.inuse') -> 34;
|
||||
reserved_idx('packets.pubcomp.missed') -> 35;
|
||||
reserved_idx('packets.subscribe.received') -> 36;
|
||||
reserved_idx('packets.subscribe.error') -> 37;
|
||||
reserved_idx('packets.received') -> 10;
|
||||
reserved_idx('packets.sent') -> 11;
|
||||
reserved_idx('packets.connect.received') -> 12;
|
||||
reserved_idx('packets.connack.sent') -> 13;
|
||||
reserved_idx('packets.connack.error') -> 14;
|
||||
reserved_idx('packets.connack.auth_error') -> 15;
|
||||
reserved_idx('packets.publish.received') -> 16;
|
||||
reserved_idx('packets.publish.sent') -> 17;
|
||||
reserved_idx('packets.publish.inuse') -> 18;
|
||||
reserved_idx('packets.publish.error') -> 19;
|
||||
reserved_idx('packets.publish.auth_error') -> 20;
|
||||
reserved_idx('packets.puback.received') -> 21;
|
||||
reserved_idx('packets.puback.sent') -> 22;
|
||||
reserved_idx('packets.puback.inuse') -> 23;
|
||||
reserved_idx('packets.puback.missed') -> 24;
|
||||
reserved_idx('packets.pubrec.received') -> 25;
|
||||
reserved_idx('packets.pubrec.sent') -> 26;
|
||||
reserved_idx('packets.pubrec.inuse') -> 27;
|
||||
reserved_idx('packets.pubrec.missed') -> 28;
|
||||
reserved_idx('packets.pubrel.received') -> 29;
|
||||
reserved_idx('packets.pubrel.sent') -> 30;
|
||||
reserved_idx('packets.pubrel.missed') -> 31;
|
||||
reserved_idx('packets.pubcomp.received') -> 32;
|
||||
reserved_idx('packets.pubcomp.sent') -> 33;
|
||||
reserved_idx('packets.pubcomp.inuse') -> 34;
|
||||
reserved_idx('packets.pubcomp.missed') -> 35;
|
||||
reserved_idx('packets.subscribe.received') -> 36;
|
||||
reserved_idx('packets.subscribe.error') -> 37;
|
||||
reserved_idx('packets.subscribe.auth_error') -> 38;
|
||||
reserved_idx('packets.suback.sent') -> 39;
|
||||
reserved_idx('packets.suback.sent') -> 39;
|
||||
reserved_idx('packets.unsubscribe.received') -> 40;
|
||||
reserved_idx('packets.unsubscribe.error') -> 41;
|
||||
reserved_idx('packets.unsuback.sent') -> 42;
|
||||
reserved_idx('packets.pingreq.received') -> 43;
|
||||
reserved_idx('packets.pingresp.sent') -> 44;
|
||||
reserved_idx('packets.disconnect.received') -> 45;
|
||||
reserved_idx('packets.disconnect.sent') -> 46;
|
||||
reserved_idx('packets.auth.received') -> 47;
|
||||
reserved_idx('packets.auth.sent') -> 48;
|
||||
reserved_idx('packets.publish.dropped') -> 49;
|
||||
reserved_idx('packets.unsubscribe.error') -> 41;
|
||||
reserved_idx('packets.unsuback.sent') -> 42;
|
||||
reserved_idx('packets.pingreq.received') -> 43;
|
||||
reserved_idx('packets.pingresp.sent') -> 44;
|
||||
reserved_idx('packets.disconnect.received') -> 45;
|
||||
reserved_idx('packets.disconnect.sent') -> 46;
|
||||
reserved_idx('packets.auth.received') -> 47;
|
||||
reserved_idx('packets.auth.sent') -> 48;
|
||||
reserved_idx('packets.publish.dropped') -> 49;
|
||||
%% Reserved indices of message's metrics
|
||||
reserved_idx('messages.received') -> 100;
|
||||
reserved_idx('messages.sent') -> 101;
|
||||
reserved_idx('messages.qos0.received') -> 102;
|
||||
reserved_idx('messages.qos0.sent') -> 103;
|
||||
reserved_idx('messages.qos1.received') -> 104;
|
||||
reserved_idx('messages.qos1.sent') -> 105;
|
||||
reserved_idx('messages.qos2.received') -> 106;
|
||||
reserved_idx('messages.qos2.sent') -> 107;
|
||||
reserved_idx('messages.publish') -> 108;
|
||||
reserved_idx('messages.dropped') -> 109;
|
||||
reserved_idx('messages.dropped.await_pubrel_timeout') -> 110;
|
||||
reserved_idx('messages.received') -> 100;
|
||||
reserved_idx('messages.sent') -> 101;
|
||||
reserved_idx('messages.qos0.received') -> 102;
|
||||
reserved_idx('messages.qos0.sent') -> 103;
|
||||
reserved_idx('messages.qos1.received') -> 104;
|
||||
reserved_idx('messages.qos1.sent') -> 105;
|
||||
reserved_idx('messages.qos2.received') -> 106;
|
||||
reserved_idx('messages.qos2.sent') -> 107;
|
||||
reserved_idx('messages.publish') -> 108;
|
||||
reserved_idx('messages.dropped') -> 109;
|
||||
reserved_idx('messages.dropped.await_pubrel_timeout') -> 110;
|
||||
reserved_idx('messages.dropped.no_subscribers') -> 111;
|
||||
reserved_idx('messages.forward') -> 112;
|
||||
reserved_idx('messages.forward') -> 112;
|
||||
%%reserved_idx('messages.retained') -> 113; %% keep the index, new metrics can use this
|
||||
reserved_idx('messages.delayed') -> 114;
|
||||
reserved_idx('messages.delivered') -> 115;
|
||||
reserved_idx('messages.acked') -> 116;
|
||||
reserved_idx('delivery.expired') -> 117;
|
||||
reserved_idx('delivery.dropped') -> 118;
|
||||
reserved_idx('delivery.dropped.no_local') -> 119;
|
||||
reserved_idx('delivery.dropped.too_large') -> 120;
|
||||
reserved_idx('delivery.dropped.qos0_msg') -> 121;
|
||||
reserved_idx('delivery.dropped.queue_full') -> 122;
|
||||
reserved_idx('delivery.dropped.expired') -> 123;
|
||||
|
||||
reserved_idx('client.connect') -> 200;
|
||||
reserved_idx('client.connack') -> 201;
|
||||
reserved_idx('client.connected') -> 202;
|
||||
reserved_idx('client.authenticate') -> 203;
|
||||
reserved_idx('messages.delayed') -> 114;
|
||||
reserved_idx('messages.delivered') -> 115;
|
||||
reserved_idx('messages.acked') -> 116;
|
||||
reserved_idx('delivery.expired') -> 117;
|
||||
reserved_idx('delivery.dropped') -> 118;
|
||||
reserved_idx('delivery.dropped.no_local') -> 119;
|
||||
reserved_idx('delivery.dropped.too_large') -> 120;
|
||||
reserved_idx('delivery.dropped.qos0_msg') -> 121;
|
||||
reserved_idx('delivery.dropped.queue_full') -> 122;
|
||||
reserved_idx('delivery.dropped.expired') -> 123;
|
||||
reserved_idx('client.connect') -> 200;
|
||||
reserved_idx('client.connack') -> 201;
|
||||
reserved_idx('client.connected') -> 202;
|
||||
reserved_idx('client.authenticate') -> 203;
|
||||
reserved_idx('client.enhanced_authenticate') -> 204;
|
||||
reserved_idx('client.auth.anonymous') -> 205;
|
||||
reserved_idx('client.authorize') -> 206;
|
||||
reserved_idx('client.subscribe') -> 207;
|
||||
reserved_idx('client.unsubscribe') -> 208;
|
||||
reserved_idx('client.disconnected') -> 209;
|
||||
|
||||
reserved_idx('session.created') -> 220;
|
||||
reserved_idx('session.resumed') -> 221;
|
||||
reserved_idx('session.takenover') -> 222;
|
||||
reserved_idx('session.discarded') -> 223;
|
||||
reserved_idx('session.terminated') -> 224;
|
||||
|
||||
reserved_idx('client.acl.allow') -> 300;
|
||||
reserved_idx('client.acl.deny') -> 301;
|
||||
reserved_idx('client.acl.cache_hit') -> 302;
|
||||
|
||||
reserved_idx('olp.delay.ok') -> 400;
|
||||
reserved_idx('olp.delay.timeout') -> 401;
|
||||
reserved_idx('olp.hbn') -> 402;
|
||||
reserved_idx('olp.gc') -> 403;
|
||||
reserved_idx('olp.new_conn') -> 404;
|
||||
|
||||
reserved_idx(_) -> undefined.
|
||||
reserved_idx('client.auth.anonymous') -> 205;
|
||||
reserved_idx('client.authorize') -> 206;
|
||||
reserved_idx('client.subscribe') -> 207;
|
||||
reserved_idx('client.unsubscribe') -> 208;
|
||||
reserved_idx('client.disconnected') -> 209;
|
||||
reserved_idx('session.created') -> 220;
|
||||
reserved_idx('session.resumed') -> 221;
|
||||
reserved_idx('session.takenover') -> 222;
|
||||
reserved_idx('session.discarded') -> 223;
|
||||
reserved_idx('session.terminated') -> 224;
|
||||
reserved_idx('client.acl.allow') -> 300;
|
||||
reserved_idx('client.acl.deny') -> 301;
|
||||
reserved_idx('client.acl.cache_hit') -> 302;
|
||||
reserved_idx('olp.delay.ok') -> 400;
|
||||
reserved_idx('olp.delay.timeout') -> 401;
|
||||
reserved_idx('olp.hbn') -> 402;
|
||||
reserved_idx('olp.gc') -> 403;
|
||||
reserved_idx('olp.new_conn') -> 404;
|
||||
reserved_idx(_) -> undefined.
|
||||
|
|
|
|||
|
|
@ -22,39 +22,41 @@
|
|||
-include("types.hrl").
|
||||
-include("logger.hrl").
|
||||
|
||||
-export([ merge_opts/2
|
||||
, maybe_apply/2
|
||||
, compose/1
|
||||
, compose/2
|
||||
, run_fold/3
|
||||
, pipeline/3
|
||||
, start_timer/2
|
||||
, start_timer/3
|
||||
, cancel_timer/1
|
||||
, drain_deliver/0
|
||||
, drain_deliver/1
|
||||
, drain_down/1
|
||||
, check_oom/1
|
||||
, check_oom/2
|
||||
, tune_heap_size/1
|
||||
, proc_name/2
|
||||
, proc_stats/0
|
||||
, proc_stats/1
|
||||
, rand_seed/0
|
||||
, now_to_secs/1
|
||||
, now_to_ms/1
|
||||
, index_of/2
|
||||
, maybe_parse_ip/1
|
||||
, ipv6_probe/1
|
||||
, gen_id/0
|
||||
, gen_id/1
|
||||
, explain_posix/1
|
||||
]).
|
||||
-export([
|
||||
merge_opts/2,
|
||||
maybe_apply/2,
|
||||
compose/1,
|
||||
compose/2,
|
||||
run_fold/3,
|
||||
pipeline/3,
|
||||
start_timer/2,
|
||||
start_timer/3,
|
||||
cancel_timer/1,
|
||||
drain_deliver/0,
|
||||
drain_deliver/1,
|
||||
drain_down/1,
|
||||
check_oom/1,
|
||||
check_oom/2,
|
||||
tune_heap_size/1,
|
||||
proc_name/2,
|
||||
proc_stats/0,
|
||||
proc_stats/1,
|
||||
rand_seed/0,
|
||||
now_to_secs/1,
|
||||
now_to_ms/1,
|
||||
index_of/2,
|
||||
maybe_parse_ip/1,
|
||||
ipv6_probe/1,
|
||||
gen_id/0,
|
||||
gen_id/1,
|
||||
explain_posix/1
|
||||
]).
|
||||
|
||||
-export([ bin2hexstr_A_F/1
|
||||
, bin2hexstr_a_f/1
|
||||
, hexstr2bin/1
|
||||
]).
|
||||
-export([
|
||||
bin2hexstr_A_F/1,
|
||||
bin2hexstr_a_f/1,
|
||||
hexstr2bin/1
|
||||
]).
|
||||
|
||||
-export([clamp/3]).
|
||||
|
||||
|
|
@ -77,33 +79,43 @@ ipv6_probe(Opts) ->
|
|||
end.
|
||||
|
||||
%% @doc Merge options
|
||||
-spec(merge_opts(Opts, Opts) -> Opts when Opts :: proplists:proplist()).
|
||||
-spec merge_opts(Opts, Opts) -> Opts when Opts :: proplists:proplist().
|
||||
merge_opts(Defaults, Options) ->
|
||||
lists:foldl(
|
||||
fun({Opt, Val}, Acc) ->
|
||||
lists:keystore(Opt, 1, Acc, {Opt, Val});
|
||||
(Opt, Acc) ->
|
||||
lists:usort([Opt | Acc])
|
||||
end, Defaults, Options).
|
||||
fun
|
||||
({Opt, Val}, Acc) ->
|
||||
lists:keystore(Opt, 1, Acc, {Opt, Val});
|
||||
(Opt, Acc) ->
|
||||
lists:usort([Opt | Acc])
|
||||
end,
|
||||
Defaults,
|
||||
Options
|
||||
).
|
||||
|
||||
%% @doc Apply a function to a maybe argument.
|
||||
-spec(maybe_apply(fun((maybe(A)) -> maybe(A)), maybe(A))
|
||||
-> maybe(A) when A :: any()).
|
||||
maybe_apply(_Fun, undefined) -> undefined;
|
||||
-spec maybe_apply(fun((maybe(A)) -> maybe(A)), maybe(A)) ->
|
||||
maybe(A)
|
||||
when
|
||||
A :: any().
|
||||
maybe_apply(_Fun, undefined) ->
|
||||
undefined;
|
||||
maybe_apply(Fun, Arg) when is_function(Fun) ->
|
||||
erlang:apply(Fun, [Arg]).
|
||||
|
||||
-spec(compose(list(F)) -> G
|
||||
when F :: fun((any()) -> any()),
|
||||
G :: fun((any()) -> any())).
|
||||
-spec compose(list(F)) -> G when
|
||||
F :: fun((any()) -> any()),
|
||||
G :: fun((any()) -> any()).
|
||||
compose([F | More]) -> compose(F, More).
|
||||
|
||||
-spec(compose(F, G | [Gs]) -> C
|
||||
when F :: fun((X1) -> X2),
|
||||
G :: fun((X2) -> X3),
|
||||
Gs :: [fun((Xn) -> Xn1)],
|
||||
C :: fun((X1) -> Xm),
|
||||
X3 :: any(), Xn :: any(), Xn1 :: any(), Xm :: any()).
|
||||
-spec compose(F, G | [Gs]) -> C when
|
||||
F :: fun((X1) -> X2),
|
||||
G :: fun((X2) -> X3),
|
||||
Gs :: [fun((Xn) -> Xn1)],
|
||||
C :: fun((X1) -> Xm),
|
||||
X3 :: any(),
|
||||
Xn :: any(),
|
||||
Xn1 :: any(),
|
||||
Xm :: any().
|
||||
compose(F, G) when is_function(G) -> fun(X) -> G(F(X)) end;
|
||||
compose(F, [G]) -> compose(F, G);
|
||||
compose(F, [G | More]) -> compose(compose(F, G), More).
|
||||
|
|
@ -117,18 +129,13 @@ run_fold([Fun | More], Acc, State) ->
|
|||
%% @doc Pipeline
|
||||
pipeline([], Input, State) ->
|
||||
{ok, Input, State};
|
||||
|
||||
pipeline([Fun | More], Input, State) ->
|
||||
case apply_fun(Fun, Input, State) of
|
||||
ok -> pipeline(More, Input, State);
|
||||
{ok, NState} ->
|
||||
pipeline(More, Input, NState);
|
||||
{ok, Output, NState} ->
|
||||
pipeline(More, Output, NState);
|
||||
{error, Reason} ->
|
||||
{error, Reason, State};
|
||||
{error, Reason, NState} ->
|
||||
{error, Reason, NState}
|
||||
{ok, NState} -> pipeline(More, Input, NState);
|
||||
{ok, Output, NState} -> pipeline(More, Output, NState);
|
||||
{error, Reason} -> {error, Reason, State};
|
||||
{error, Reason, NState} -> {error, Reason, NState}
|
||||
end.
|
||||
|
||||
-compile({inline, [apply_fun/3]}).
|
||||
|
|
@ -138,23 +145,29 @@ apply_fun(Fun, Input, State) ->
|
|||
{arity, 2} -> Fun(Input, State)
|
||||
end.
|
||||
|
||||
-spec(start_timer(integer() | atom(), term()) -> maybe(reference())).
|
||||
-spec start_timer(integer() | atom(), term()) -> maybe(reference()).
|
||||
start_timer(Interval, Msg) ->
|
||||
start_timer(Interval, self(), Msg).
|
||||
|
||||
-spec(start_timer(integer() | atom(), pid() | atom(), term()) -> maybe(reference())).
|
||||
-spec start_timer(integer() | atom(), pid() | atom(), term()) -> maybe(reference()).
|
||||
start_timer(Interval, Dest, Msg) when is_number(Interval) ->
|
||||
erlang:start_timer(erlang:ceil(Interval), Dest, Msg);
|
||||
start_timer(_Atom, _Dest, _Msg) -> undefined.
|
||||
start_timer(_Atom, _Dest, _Msg) ->
|
||||
undefined.
|
||||
|
||||
-spec(cancel_timer(maybe(reference())) -> ok).
|
||||
-spec cancel_timer(maybe(reference())) -> ok.
|
||||
cancel_timer(Timer) when is_reference(Timer) ->
|
||||
case erlang:cancel_timer(Timer) of
|
||||
false ->
|
||||
receive {timeout, Timer, _} -> ok after 0 -> ok end;
|
||||
_ -> ok
|
||||
receive
|
||||
{timeout, Timer, _} -> ok
|
||||
after 0 -> ok
|
||||
end;
|
||||
_ ->
|
||||
ok
|
||||
end;
|
||||
cancel_timer(_) -> ok.
|
||||
cancel_timer(_) ->
|
||||
ok.
|
||||
|
||||
%% @doc Drain delivers
|
||||
drain_deliver() ->
|
||||
|
|
@ -168,13 +181,13 @@ drain_deliver(0, Acc) ->
|
|||
drain_deliver(N, Acc) ->
|
||||
receive
|
||||
Deliver = {deliver, _Topic, _Msg} ->
|
||||
drain_deliver(N-1, [Deliver | Acc])
|
||||
drain_deliver(N - 1, [Deliver | Acc])
|
||||
after 0 ->
|
||||
lists:reverse(Acc)
|
||||
end.
|
||||
|
||||
%% @doc Drain process 'DOWN' events.
|
||||
-spec(drain_down(pos_integer()) -> list(pid())).
|
||||
-spec drain_down(pos_integer()) -> list(pid()).
|
||||
drain_down(Cnt) when Cnt > 0 ->
|
||||
drain_down(Cnt, []).
|
||||
|
||||
|
|
@ -183,7 +196,7 @@ drain_down(0, Acc) ->
|
|||
drain_down(Cnt, Acc) ->
|
||||
receive
|
||||
{'DOWN', _MRef, process, Pid, _Reason} ->
|
||||
drain_down(Cnt-1, [Pid | Acc])
|
||||
drain_down(Cnt - 1, [Pid | Acc])
|
||||
after 0 ->
|
||||
lists:reverse(Acc)
|
||||
end.
|
||||
|
|
@ -193,26 +206,32 @@ drain_down(Cnt, Acc) ->
|
|||
%% `ok': There is nothing out of the ordinary.
|
||||
%% `shutdown': Some numbers (message queue length hit the limit),
|
||||
%% hence shutdown for greater good (system stability).
|
||||
-spec(check_oom(emqx_types:oom_policy()) -> ok | {shutdown, term()}).
|
||||
-spec check_oom(emqx_types:oom_policy()) -> ok | {shutdown, term()}.
|
||||
check_oom(Policy) ->
|
||||
check_oom(self(), Policy).
|
||||
|
||||
-spec(check_oom(pid(), emqx_types:oom_policy()) -> ok | {shutdown, term()}).
|
||||
check_oom(_Pid, #{enable := false}) -> ok;
|
||||
check_oom(Pid, #{max_message_queue_len := MaxQLen,
|
||||
max_heap_size := MaxHeapSize}) ->
|
||||
-spec check_oom(pid(), emqx_types:oom_policy()) -> ok | {shutdown, term()}.
|
||||
check_oom(_Pid, #{enable := false}) ->
|
||||
ok;
|
||||
check_oom(Pid, #{
|
||||
max_message_queue_len := MaxQLen,
|
||||
max_heap_size := MaxHeapSize
|
||||
}) ->
|
||||
case process_info(Pid, [message_queue_len, total_heap_size]) of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
[{message_queue_len, QLen}, {total_heap_size, HeapSize}] ->
|
||||
do_check_oom([{QLen, MaxQLen, message_queue_too_long},
|
||||
{HeapSize, MaxHeapSize, proc_heap_too_large}
|
||||
])
|
||||
do_check_oom([
|
||||
{QLen, MaxQLen, message_queue_too_long},
|
||||
{HeapSize, MaxHeapSize, proc_heap_too_large}
|
||||
])
|
||||
end.
|
||||
|
||||
do_check_oom([]) -> ok;
|
||||
do_check_oom([]) ->
|
||||
ok;
|
||||
do_check_oom([{Val, Max, Reason} | Rest]) ->
|
||||
case is_integer(Max) andalso (0 < Max) andalso (Max < Val) of
|
||||
true -> {shutdown, Reason};
|
||||
true -> {shutdown, Reason};
|
||||
false -> do_check_oom(Rest)
|
||||
end.
|
||||
|
||||
|
|
@ -220,53 +239,59 @@ tune_heap_size(#{enable := false}) ->
|
|||
ok;
|
||||
%% If the max_heap_size is set to zero, the limit is disabled.
|
||||
tune_heap_size(#{max_heap_size := MaxHeapSize}) when MaxHeapSize > 0 ->
|
||||
MaxSize = case erlang:system_info(wordsize) of
|
||||
8 -> % arch_64
|
||||
(1 bsl 59) - 1;
|
||||
4 -> % arch_32
|
||||
(1 bsl 27) - 1
|
||||
end,
|
||||
OverflowedSize = case erlang:trunc(MaxHeapSize * 1.5) of
|
||||
SZ when SZ > MaxSize -> MaxSize;
|
||||
SZ -> SZ
|
||||
end,
|
||||
MaxSize =
|
||||
case erlang:system_info(wordsize) of
|
||||
% arch_64
|
||||
8 ->
|
||||
(1 bsl 59) - 1;
|
||||
% arch_32
|
||||
4 ->
|
||||
(1 bsl 27) - 1
|
||||
end,
|
||||
OverflowedSize =
|
||||
case erlang:trunc(MaxHeapSize * 1.5) of
|
||||
SZ when SZ > MaxSize -> MaxSize;
|
||||
SZ -> SZ
|
||||
end,
|
||||
erlang:process_flag(max_heap_size, #{
|
||||
size => OverflowedSize,
|
||||
kill => true,
|
||||
error_logger => true
|
||||
}).
|
||||
|
||||
|
||||
-spec(proc_name(atom(), pos_integer()) -> atom()).
|
||||
-spec proc_name(atom(), pos_integer()) -> atom().
|
||||
proc_name(Mod, Id) ->
|
||||
list_to_atom(lists:concat([Mod, "_", Id])).
|
||||
|
||||
%% Get Proc's Stats.
|
||||
-spec(proc_stats() -> emqx_types:stats()).
|
||||
-spec proc_stats() -> emqx_types:stats().
|
||||
proc_stats() -> proc_stats(self()).
|
||||
|
||||
-spec(proc_stats(pid()) -> emqx_types:stats()).
|
||||
-spec proc_stats(pid()) -> emqx_types:stats().
|
||||
proc_stats(Pid) ->
|
||||
case process_info(Pid, [message_queue_len,
|
||||
heap_size,
|
||||
total_heap_size,
|
||||
reductions,
|
||||
memory]) of
|
||||
case
|
||||
process_info(Pid, [
|
||||
message_queue_len,
|
||||
heap_size,
|
||||
total_heap_size,
|
||||
reductions,
|
||||
memory
|
||||
])
|
||||
of
|
||||
undefined -> [];
|
||||
[{message_queue_len, Len} | ProcStats] ->
|
||||
[{mailbox_len, Len} | ProcStats]
|
||||
[{message_queue_len, Len} | ProcStats] -> [{mailbox_len, Len} | ProcStats]
|
||||
end.
|
||||
|
||||
rand_seed() ->
|
||||
rand:seed(exsplus, erlang:timestamp()).
|
||||
|
||||
-spec(now_to_secs(erlang:timestamp()) -> pos_integer()).
|
||||
-spec now_to_secs(erlang:timestamp()) -> pos_integer().
|
||||
now_to_secs({MegaSecs, Secs, _MicroSecs}) ->
|
||||
MegaSecs * 1000000 + Secs.
|
||||
|
||||
-spec(now_to_ms(erlang:timestamp()) -> pos_integer()).
|
||||
-spec now_to_ms(erlang:timestamp()) -> pos_integer().
|
||||
now_to_ms({MegaSecs, Secs, MicroSecs}) ->
|
||||
(MegaSecs * 1000000 + Secs) * 1000 + round(MicroSecs/1000).
|
||||
(MegaSecs * 1000000 + Secs) * 1000 + round(MicroSecs / 1000).
|
||||
|
||||
%% lists:index_of/2
|
||||
index_of(E, L) ->
|
||||
|
|
@ -277,33 +302,33 @@ index_of(_E, _I, []) ->
|
|||
index_of(E, I, [E | _]) ->
|
||||
I;
|
||||
index_of(E, I, [_ | L]) ->
|
||||
index_of(E, I+1, L).
|
||||
index_of(E, I + 1, L).
|
||||
|
||||
-spec(bin2hexstr_A_F(binary()) -> binary()).
|
||||
-spec bin2hexstr_A_F(binary()) -> binary().
|
||||
bin2hexstr_A_F(B) when is_binary(B) ->
|
||||
<< <<(int2hexchar(H, upper)), (int2hexchar(L, upper))>> || <<H:4, L:4>> <= B>>.
|
||||
<<<<(int2hexchar(H, upper)), (int2hexchar(L, upper))>> || <<H:4, L:4>> <= B>>.
|
||||
|
||||
-spec(bin2hexstr_a_f(binary()) -> binary()).
|
||||
-spec bin2hexstr_a_f(binary()) -> binary().
|
||||
bin2hexstr_a_f(B) when is_binary(B) ->
|
||||
<< <<(int2hexchar(H, lower)), (int2hexchar(L, lower))>> || <<H:4, L:4>> <= B>>.
|
||||
<<<<(int2hexchar(H, lower)), (int2hexchar(L, lower))>> || <<H:4, L:4>> <= B>>.
|
||||
|
||||
int2hexchar(I, _) when I >= 0 andalso I < 10 -> I + $0;
|
||||
int2hexchar(I, upper) -> I - 10 + $A;
|
||||
int2hexchar(I, lower) -> I - 10 + $a.
|
||||
|
||||
-spec(hexstr2bin(binary()) -> binary()).
|
||||
-spec hexstr2bin(binary()) -> binary().
|
||||
hexstr2bin(B) when is_binary(B) ->
|
||||
<< <<(hexchar2int(H)*16 + hexchar2int(L))>> || <<H:8, L:8>> <= B>>.
|
||||
<<<<(hexchar2int(H) * 16 + hexchar2int(L))>> || <<H:8, L:8>> <= B>>.
|
||||
|
||||
hexchar2int(I) when I >= $0 andalso I =< $9 -> I - $0;
|
||||
hexchar2int(I) when I >= $A andalso I =< $F -> I - $A + 10;
|
||||
hexchar2int(I) when I >= $a andalso I =< $f -> I - $a + 10.
|
||||
|
||||
-spec(gen_id() -> list()).
|
||||
-spec gen_id() -> list().
|
||||
gen_id() ->
|
||||
gen_id(?SHORT).
|
||||
|
||||
-spec(gen_id(integer()) -> list()).
|
||||
-spec gen_id(integer()) -> list().
|
||||
gen_id(Len) ->
|
||||
BitLen = Len * 4,
|
||||
<<R:BitLen>> = crypto:strong_rand_bytes(Len div 2),
|
||||
|
|
@ -356,8 +381,9 @@ explain_posix(NotPosix) -> NotPosix.
|
|||
int_to_hex(I, N) when is_integer(I), I >= 0 ->
|
||||
int_to_hex([], I, 1, N).
|
||||
|
||||
int_to_hex(L, I, Count, N)
|
||||
when I < 16 ->
|
||||
int_to_hex(L, I, Count, N) when
|
||||
I < 16
|
||||
->
|
||||
pad([int_to_hex(I) | L], N - Count);
|
||||
int_to_hex(L, I, Count, N) ->
|
||||
int_to_hex([int_to_hex(I rem 16) | L], I div 16, Count + 1, N).
|
||||
|
|
@ -380,7 +406,7 @@ ipv6_probe_test() ->
|
|||
true ->
|
||||
?assertEqual([{ipv6_probe, true}], ipv6_probe([]))
|
||||
catch
|
||||
_ : _ ->
|
||||
_:_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -20,20 +20,22 @@
|
|||
-include("emqx_placeholder.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
-export([ mount/2
|
||||
, unmount/2
|
||||
]).
|
||||
-export([
|
||||
mount/2,
|
||||
unmount/2
|
||||
]).
|
||||
|
||||
-export([replvar/2]).
|
||||
|
||||
-export_type([mountpoint/0]).
|
||||
|
||||
-type(mountpoint() :: binary()).
|
||||
-type mountpoint() :: binary().
|
||||
|
||||
-spec(mount(maybe(mountpoint()), Any) -> Any
|
||||
when Any :: emqx_types:topic()
|
||||
| emqx_types:message()
|
||||
| emqx_types:topic_filters()).
|
||||
-spec mount(maybe(mountpoint()), Any) -> Any when
|
||||
Any ::
|
||||
emqx_types:topic()
|
||||
| emqx_types:message()
|
||||
| emqx_types:topic_filters().
|
||||
mount(undefined, Any) ->
|
||||
Any;
|
||||
mount(MountPoint, Topic) when is_binary(Topic) ->
|
||||
|
|
@ -48,33 +50,35 @@ mount(MountPoint, TopicFilters) when is_list(TopicFilters) ->
|
|||
prefix(MountPoint, Topic) ->
|
||||
<<MountPoint/binary, Topic/binary>>.
|
||||
|
||||
-spec(unmount(maybe(mountpoint()), Any) -> Any
|
||||
when Any :: emqx_types:topic()
|
||||
| emqx_types:message()).
|
||||
-spec unmount(maybe(mountpoint()), Any) -> Any when
|
||||
Any ::
|
||||
emqx_types:topic()
|
||||
| emqx_types:message().
|
||||
unmount(undefined, Any) ->
|
||||
Any;
|
||||
unmount(MountPoint, Topic) when is_binary(Topic) ->
|
||||
case string:prefix(Topic, MountPoint) of
|
||||
nomatch -> Topic;
|
||||
Topic1 -> Topic1
|
||||
Topic1 -> Topic1
|
||||
end;
|
||||
unmount(MountPoint, Msg = #message{topic = Topic}) ->
|
||||
case string:prefix(Topic, MountPoint) of
|
||||
nomatch -> Msg;
|
||||
Topic1 -> Msg#message{topic = Topic1}
|
||||
Topic1 -> Msg#message{topic = Topic1}
|
||||
end.
|
||||
|
||||
-spec(replvar(maybe(mountpoint()), map()) -> maybe(mountpoint())).
|
||||
-spec replvar(maybe(mountpoint()), map()) -> maybe(mountpoint()).
|
||||
replvar(undefined, _Vars) ->
|
||||
undefined;
|
||||
replvar(MountPoint, Vars) ->
|
||||
ClientID = maps:get(clientid, Vars, undefined),
|
||||
UserName = maps:get(username, Vars, undefined),
|
||||
ClientID = maps:get(clientid, Vars, undefined),
|
||||
UserName = maps:get(username, Vars, undefined),
|
||||
EndpointName = maps:get(endpoint_name, Vars, undefined),
|
||||
List = [ {?PH_CLIENTID, ClientID}
|
||||
, {?PH_USERNAME, UserName}
|
||||
, {?PH_ENDPOINT_NAME, EndpointName}
|
||||
],
|
||||
List = [
|
||||
{?PH_CLIENTID, ClientID},
|
||||
{?PH_USERNAME, UserName},
|
||||
{?PH_ENDPOINT_NAME, EndpointName}
|
||||
],
|
||||
lists:foldl(fun feed_var/2, MountPoint, List).
|
||||
|
||||
feed_var({_PlaceHolder, undefined}, MountPoint) ->
|
||||
|
|
|
|||
|
|
@ -20,100 +20,128 @@
|
|||
-include("emqx_mqtt.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
-export([ check_pub/2
|
||||
, check_sub/3
|
||||
]).
|
||||
-export([
|
||||
check_pub/2,
|
||||
check_sub/3
|
||||
]).
|
||||
|
||||
-export([ get_caps/1
|
||||
]).
|
||||
-export([get_caps/1]).
|
||||
|
||||
-export_type([caps/0]).
|
||||
|
||||
-type(caps() :: #{max_packet_size => integer(),
|
||||
max_clientid_len => integer(),
|
||||
max_topic_alias => integer(),
|
||||
max_topic_levels => integer(),
|
||||
max_qos_allowed => emqx_types:qos(),
|
||||
retain_available => boolean(),
|
||||
wildcard_subscription => boolean(),
|
||||
subscription_identifiers => boolean(),
|
||||
shared_subscription => boolean()
|
||||
}).
|
||||
-type caps() :: #{
|
||||
max_packet_size => integer(),
|
||||
max_clientid_len => integer(),
|
||||
max_topic_alias => integer(),
|
||||
max_topic_levels => integer(),
|
||||
max_qos_allowed => emqx_types:qos(),
|
||||
retain_available => boolean(),
|
||||
wildcard_subscription => boolean(),
|
||||
subscription_identifiers => boolean(),
|
||||
shared_subscription => boolean()
|
||||
}.
|
||||
|
||||
-define(MAX_TOPIC_LEVELS, 65535).
|
||||
|
||||
-define(PUBCAP_KEYS, [max_topic_levels,
|
||||
max_qos_allowed,
|
||||
retain_available
|
||||
]).
|
||||
-define(PUBCAP_KEYS, [
|
||||
max_topic_levels,
|
||||
max_qos_allowed,
|
||||
retain_available
|
||||
]).
|
||||
|
||||
-define(SUBCAP_KEYS, [max_topic_levels,
|
||||
max_qos_allowed,
|
||||
wildcard_subscription,
|
||||
shared_subscription
|
||||
]).
|
||||
-define(SUBCAP_KEYS, [
|
||||
max_topic_levels,
|
||||
max_qos_allowed,
|
||||
wildcard_subscription,
|
||||
shared_subscription
|
||||
]).
|
||||
|
||||
-define(DEFAULT_CAPS, #{max_packet_size => ?MAX_PACKET_SIZE,
|
||||
max_clientid_len => ?MAX_CLIENTID_LEN,
|
||||
max_topic_alias => ?MAX_TOPIC_AlIAS,
|
||||
max_topic_levels => ?MAX_TOPIC_LEVELS,
|
||||
max_qos_allowed => ?QOS_2,
|
||||
retain_available => true,
|
||||
wildcard_subscription => true,
|
||||
subscription_identifiers => true,
|
||||
shared_subscription => true
|
||||
}).
|
||||
-define(DEFAULT_CAPS, #{
|
||||
max_packet_size => ?MAX_PACKET_SIZE,
|
||||
max_clientid_len => ?MAX_CLIENTID_LEN,
|
||||
max_topic_alias => ?MAX_TOPIC_AlIAS,
|
||||
max_topic_levels => ?MAX_TOPIC_LEVELS,
|
||||
max_qos_allowed => ?QOS_2,
|
||||
retain_available => true,
|
||||
wildcard_subscription => true,
|
||||
subscription_identifiers => true,
|
||||
shared_subscription => true
|
||||
}).
|
||||
|
||||
-spec(check_pub(emqx_types:zone(),
|
||||
#{qos := emqx_types:qos(),
|
||||
retain := boolean(),
|
||||
topic := emqx_types:topic()})
|
||||
-> ok_or_error(emqx_types:reason_code())).
|
||||
-spec check_pub(
|
||||
emqx_types:zone(),
|
||||
#{
|
||||
qos := emqx_types:qos(),
|
||||
retain := boolean(),
|
||||
topic := emqx_types:topic()
|
||||
}
|
||||
) ->
|
||||
ok_or_error(emqx_types:reason_code()).
|
||||
check_pub(Zone, Flags) when is_map(Flags) ->
|
||||
do_check_pub(case maps:take(topic, Flags) of
|
||||
{Topic, Flags1} ->
|
||||
Flags1#{topic_levels => emqx_topic:levels(Topic)};
|
||||
error ->
|
||||
Flags
|
||||
end, maps:with(?PUBCAP_KEYS, get_caps(Zone))).
|
||||
do_check_pub(
|
||||
case maps:take(topic, Flags) of
|
||||
{Topic, Flags1} ->
|
||||
Flags1#{topic_levels => emqx_topic:levels(Topic)};
|
||||
error ->
|
||||
Flags
|
||||
end,
|
||||
maps:with(?PUBCAP_KEYS, get_caps(Zone))
|
||||
).
|
||||
|
||||
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit})
|
||||
when Limit > 0, Levels > Limit ->
|
||||
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
|
||||
Limit > 0, Levels > Limit
|
||||
->
|
||||
{error, ?RC_TOPIC_NAME_INVALID};
|
||||
do_check_pub(#{qos := QoS}, #{max_qos_allowed := MaxQoS})
|
||||
when QoS > MaxQoS ->
|
||||
do_check_pub(#{qos := QoS}, #{max_qos_allowed := MaxQoS}) when
|
||||
QoS > MaxQoS
|
||||
->
|
||||
{error, ?RC_QOS_NOT_SUPPORTED};
|
||||
do_check_pub(#{retain := true}, #{retain_available := false}) ->
|
||||
{error, ?RC_RETAIN_NOT_SUPPORTED};
|
||||
do_check_pub(_Flags, _Caps) -> ok.
|
||||
do_check_pub(_Flags, _Caps) ->
|
||||
ok.
|
||||
|
||||
-spec(check_sub(emqx_types:zone(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:subopts())
|
||||
-> ok_or_error(emqx_types:reason_code())).
|
||||
-spec check_sub(
|
||||
emqx_types:zone(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:subopts()
|
||||
) ->
|
||||
ok_or_error(emqx_types:reason_code()).
|
||||
check_sub(Zone, Topic, SubOpts) ->
|
||||
Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)),
|
||||
Flags = lists:foldl(
|
||||
fun(max_topic_levels, Map) ->
|
||||
Map#{topic_levels => emqx_topic:levels(Topic)};
|
||||
(wildcard_subscription, Map) ->
|
||||
Map#{is_wildcard => emqx_topic:wildcard(Topic)};
|
||||
(shared_subscription, Map) ->
|
||||
Map#{is_shared => maps:is_key(share, SubOpts)};
|
||||
(_Key, Map) -> Map %% Ignore
|
||||
end, #{}, maps:keys(Caps)),
|
||||
fun
|
||||
(max_topic_levels, Map) ->
|
||||
Map#{topic_levels => emqx_topic:levels(Topic)};
|
||||
(wildcard_subscription, Map) ->
|
||||
Map#{is_wildcard => emqx_topic:wildcard(Topic)};
|
||||
(shared_subscription, Map) ->
|
||||
Map#{is_shared => maps:is_key(share, SubOpts)};
|
||||
%% Ignore
|
||||
(_Key, Map) ->
|
||||
Map
|
||||
end,
|
||||
#{},
|
||||
maps:keys(Caps)
|
||||
),
|
||||
do_check_sub(Flags, Caps).
|
||||
|
||||
do_check_sub(#{topic_levels := Levels}, #{max_topic_levels := Limit})
|
||||
when Limit > 0, Levels > Limit ->
|
||||
do_check_sub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
|
||||
Limit > 0, Levels > Limit
|
||||
->
|
||||
{error, ?RC_TOPIC_FILTER_INVALID};
|
||||
do_check_sub(#{is_wildcard := true}, #{wildcard_subscription := false}) ->
|
||||
{error, ?RC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED};
|
||||
do_check_sub(#{is_shared := true}, #{shared_subscription := false}) ->
|
||||
{error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED};
|
||||
do_check_sub(_Flags, _Caps) -> ok.
|
||||
do_check_sub(_Flags, _Caps) ->
|
||||
ok.
|
||||
|
||||
get_caps(Zone) ->
|
||||
lists:foldl(fun({K, V}, Acc) ->
|
||||
lists:foldl(
|
||||
fun({K, V}, Acc) ->
|
||||
Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)}
|
||||
end, #{}, maps:to_list(?DEFAULT_CAPS)).
|
||||
end,
|
||||
#{},
|
||||
maps:to_list(?DEFAULT_CAPS)
|
||||
).
|
||||
|
|
|
|||
|
|
@ -19,86 +19,97 @@
|
|||
|
||||
-include("emqx_mqtt.hrl").
|
||||
|
||||
-export([ id/1
|
||||
, name/1
|
||||
, filter/2
|
||||
, validate/1
|
||||
, new/0
|
||||
]).
|
||||
-export([
|
||||
id/1,
|
||||
name/1,
|
||||
filter/2,
|
||||
validate/1,
|
||||
new/0
|
||||
]).
|
||||
|
||||
%% For tests
|
||||
-export([all/0]).
|
||||
|
||||
-export([ set/3
|
||||
, get/3
|
||||
]).
|
||||
-export([
|
||||
set/3,
|
||||
get/3
|
||||
]).
|
||||
|
||||
-type(prop_name() :: atom()).
|
||||
-type(prop_id() :: pos_integer()).
|
||||
-type prop_name() :: atom().
|
||||
-type prop_id() :: pos_integer().
|
||||
|
||||
-define(PROPS_TABLE,
|
||||
#{16#01 => {'Payload-Format-Indicator', 'Byte', [?PUBLISH]},
|
||||
16#02 => {'Message-Expiry-Interval', 'Four-Byte-Integer', [?PUBLISH]},
|
||||
16#03 => {'Content-Type', 'UTF8-Encoded-String', [?PUBLISH]},
|
||||
16#08 => {'Response-Topic', 'UTF8-Encoded-String', [?PUBLISH]},
|
||||
16#09 => {'Correlation-Data', 'Binary-Data', [?PUBLISH]},
|
||||
16#0B => {'Subscription-Identifier', 'Variable-Byte-Integer', [?PUBLISH, ?SUBSCRIBE]},
|
||||
16#11 => {'Session-Expiry-Interval', 'Four-Byte-Integer', [?CONNECT, ?CONNACK, ?DISCONNECT]},
|
||||
16#12 => {'Assigned-Client-Identifier', 'UTF8-Encoded-String', [?CONNACK]},
|
||||
16#13 => {'Server-Keep-Alive', 'Two-Byte-Integer', [?CONNACK]},
|
||||
16#15 => {'Authentication-Method', 'UTF8-Encoded-String', [?CONNECT, ?CONNACK, ?AUTH]},
|
||||
16#16 => {'Authentication-Data', 'Binary-Data', [?CONNECT, ?CONNACK, ?AUTH]},
|
||||
16#17 => {'Request-Problem-Information', 'Byte', [?CONNECT]},
|
||||
16#18 => {'Will-Delay-Interval', 'Four-Byte-Integer', ['WILL']},
|
||||
16#19 => {'Request-Response-Information', 'Byte', [?CONNECT]},
|
||||
16#1A => {'Response-Information', 'UTF8-Encoded-String', [?CONNACK]},
|
||||
16#1C => {'Server-Reference', 'UTF8-Encoded-String', [?CONNACK, ?DISCONNECT]},
|
||||
16#1F => {'Reason-String', 'UTF8-Encoded-String', [?CONNACK, ?DISCONNECT, ?PUBACK,
|
||||
?PUBREC, ?PUBREL, ?PUBCOMP,
|
||||
?SUBACK, ?UNSUBACK, ?AUTH]},
|
||||
16#21 => {'Receive-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]},
|
||||
16#22 => {'Topic-Alias-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]},
|
||||
16#23 => {'Topic-Alias', 'Two-Byte-Integer', [?PUBLISH]},
|
||||
16#24 => {'Maximum-QoS', 'Byte', [?CONNACK]},
|
||||
16#25 => {'Retain-Available', 'Byte', [?CONNACK]},
|
||||
16#26 => {'User-Property', 'UTF8-String-Pair', 'ALL'},
|
||||
16#27 => {'Maximum-Packet-Size', 'Four-Byte-Integer', [?CONNECT, ?CONNACK]},
|
||||
16#28 => {'Wildcard-Subscription-Available', 'Byte', [?CONNACK]},
|
||||
16#29 => {'Subscription-Identifier-Available', 'Byte', [?CONNACK]},
|
||||
16#2A => {'Shared-Subscription-Available', 'Byte', [?CONNACK]}
|
||||
}).
|
||||
-define(PROPS_TABLE, #{
|
||||
16#01 => {'Payload-Format-Indicator', 'Byte', [?PUBLISH]},
|
||||
16#02 => {'Message-Expiry-Interval', 'Four-Byte-Integer', [?PUBLISH]},
|
||||
16#03 => {'Content-Type', 'UTF8-Encoded-String', [?PUBLISH]},
|
||||
16#08 => {'Response-Topic', 'UTF8-Encoded-String', [?PUBLISH]},
|
||||
16#09 => {'Correlation-Data', 'Binary-Data', [?PUBLISH]},
|
||||
16#0B => {'Subscription-Identifier', 'Variable-Byte-Integer', [?PUBLISH, ?SUBSCRIBE]},
|
||||
16#11 => {'Session-Expiry-Interval', 'Four-Byte-Integer', [?CONNECT, ?CONNACK, ?DISCONNECT]},
|
||||
16#12 => {'Assigned-Client-Identifier', 'UTF8-Encoded-String', [?CONNACK]},
|
||||
16#13 => {'Server-Keep-Alive', 'Two-Byte-Integer', [?CONNACK]},
|
||||
16#15 => {'Authentication-Method', 'UTF8-Encoded-String', [?CONNECT, ?CONNACK, ?AUTH]},
|
||||
16#16 => {'Authentication-Data', 'Binary-Data', [?CONNECT, ?CONNACK, ?AUTH]},
|
||||
16#17 => {'Request-Problem-Information', 'Byte', [?CONNECT]},
|
||||
16#18 => {'Will-Delay-Interval', 'Four-Byte-Integer', ['WILL']},
|
||||
16#19 => {'Request-Response-Information', 'Byte', [?CONNECT]},
|
||||
16#1A => {'Response-Information', 'UTF8-Encoded-String', [?CONNACK]},
|
||||
16#1C => {'Server-Reference', 'UTF8-Encoded-String', [?CONNACK, ?DISCONNECT]},
|
||||
16#1F =>
|
||||
{'Reason-String', 'UTF8-Encoded-String', [
|
||||
?CONNACK,
|
||||
?DISCONNECT,
|
||||
?PUBACK,
|
||||
?PUBREC,
|
||||
?PUBREL,
|
||||
?PUBCOMP,
|
||||
?SUBACK,
|
||||
?UNSUBACK,
|
||||
?AUTH
|
||||
]},
|
||||
16#21 => {'Receive-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]},
|
||||
16#22 => {'Topic-Alias-Maximum', 'Two-Byte-Integer', [?CONNECT, ?CONNACK]},
|
||||
16#23 => {'Topic-Alias', 'Two-Byte-Integer', [?PUBLISH]},
|
||||
16#24 => {'Maximum-QoS', 'Byte', [?CONNACK]},
|
||||
16#25 => {'Retain-Available', 'Byte', [?CONNACK]},
|
||||
16#26 => {'User-Property', 'UTF8-String-Pair', 'ALL'},
|
||||
16#27 => {'Maximum-Packet-Size', 'Four-Byte-Integer', [?CONNECT, ?CONNACK]},
|
||||
16#28 => {'Wildcard-Subscription-Available', 'Byte', [?CONNACK]},
|
||||
16#29 => {'Subscription-Identifier-Available', 'Byte', [?CONNACK]},
|
||||
16#2A => {'Shared-Subscription-Available', 'Byte', [?CONNACK]}
|
||||
}).
|
||||
|
||||
-spec(id(prop_name()) -> prop_id()).
|
||||
id('Payload-Format-Indicator') -> 16#01;
|
||||
id('Message-Expiry-Interval') -> 16#02;
|
||||
id('Content-Type') -> 16#03;
|
||||
id('Response-Topic') -> 16#08;
|
||||
id('Correlation-Data') -> 16#09;
|
||||
id('Subscription-Identifier') -> 16#0B;
|
||||
id('Session-Expiry-Interval') -> 16#11;
|
||||
id('Assigned-Client-Identifier') -> 16#12;
|
||||
id('Server-Keep-Alive') -> 16#13;
|
||||
id('Authentication-Method') -> 16#15;
|
||||
id('Authentication-Data') -> 16#16;
|
||||
id('Request-Problem-Information') -> 16#17;
|
||||
id('Will-Delay-Interval') -> 16#18;
|
||||
id('Request-Response-Information') -> 16#19;
|
||||
id('Response-Information') -> 16#1A;
|
||||
id('Server-Reference') -> 16#1C;
|
||||
id('Reason-String') -> 16#1F;
|
||||
id('Receive-Maximum') -> 16#21;
|
||||
id('Topic-Alias-Maximum') -> 16#22;
|
||||
id('Topic-Alias') -> 16#23;
|
||||
id('Maximum-QoS') -> 16#24;
|
||||
id('Retain-Available') -> 16#25;
|
||||
id('User-Property') -> 16#26;
|
||||
id('Maximum-Packet-Size') -> 16#27;
|
||||
id('Wildcard-Subscription-Available') -> 16#28;
|
||||
-spec id(prop_name()) -> prop_id().
|
||||
id('Payload-Format-Indicator') -> 16#01;
|
||||
id('Message-Expiry-Interval') -> 16#02;
|
||||
id('Content-Type') -> 16#03;
|
||||
id('Response-Topic') -> 16#08;
|
||||
id('Correlation-Data') -> 16#09;
|
||||
id('Subscription-Identifier') -> 16#0B;
|
||||
id('Session-Expiry-Interval') -> 16#11;
|
||||
id('Assigned-Client-Identifier') -> 16#12;
|
||||
id('Server-Keep-Alive') -> 16#13;
|
||||
id('Authentication-Method') -> 16#15;
|
||||
id('Authentication-Data') -> 16#16;
|
||||
id('Request-Problem-Information') -> 16#17;
|
||||
id('Will-Delay-Interval') -> 16#18;
|
||||
id('Request-Response-Information') -> 16#19;
|
||||
id('Response-Information') -> 16#1A;
|
||||
id('Server-Reference') -> 16#1C;
|
||||
id('Reason-String') -> 16#1F;
|
||||
id('Receive-Maximum') -> 16#21;
|
||||
id('Topic-Alias-Maximum') -> 16#22;
|
||||
id('Topic-Alias') -> 16#23;
|
||||
id('Maximum-QoS') -> 16#24;
|
||||
id('Retain-Available') -> 16#25;
|
||||
id('User-Property') -> 16#26;
|
||||
id('Maximum-Packet-Size') -> 16#27;
|
||||
id('Wildcard-Subscription-Available') -> 16#28;
|
||||
id('Subscription-Identifier-Available') -> 16#29;
|
||||
id('Shared-Subscription-Available') -> 16#2A;
|
||||
id(Name) -> error({bad_property, Name}).
|
||||
id('Shared-Subscription-Available') -> 16#2A;
|
||||
id(Name) -> error({bad_property, Name}).
|
||||
|
||||
-spec(name(prop_id()) -> prop_name()).
|
||||
-spec name(prop_id()) -> prop_name().
|
||||
name(16#01) -> 'Payload-Format-Indicator';
|
||||
name(16#02) -> 'Message-Expiry-Interval';
|
||||
name(16#03) -> 'Content-Type';
|
||||
|
|
@ -126,33 +137,36 @@ name(16#27) -> 'Maximum-Packet-Size';
|
|||
name(16#28) -> 'Wildcard-Subscription-Available';
|
||||
name(16#29) -> 'Subscription-Identifier-Available';
|
||||
name(16#2A) -> 'Shared-Subscription-Available';
|
||||
name(Id) -> error({unsupported_property, Id}).
|
||||
name(Id) -> error({unsupported_property, Id}).
|
||||
|
||||
-spec(filter(emqx_types:packet_type(), emqx_types:properties())
|
||||
-> emqx_types:properties()).
|
||||
filter(PacketType, Props) when is_map(Props),
|
||||
PacketType >= ?CONNECT,
|
||||
PacketType =< ?AUTH ->
|
||||
-spec filter(emqx_types:packet_type(), emqx_types:properties()) ->
|
||||
emqx_types:properties().
|
||||
filter(PacketType, Props) when
|
||||
is_map(Props),
|
||||
PacketType >= ?CONNECT,
|
||||
PacketType =< ?AUTH
|
||||
->
|
||||
F = fun(Name, _) ->
|
||||
case maps:find(id(Name), ?PROPS_TABLE) of
|
||||
{ok, {Name, _Type, 'ALL'}} ->
|
||||
true;
|
||||
{ok, {Name, _Type, AllowedTypes}} ->
|
||||
lists:member(PacketType, AllowedTypes);
|
||||
error -> false
|
||||
end
|
||||
end,
|
||||
case maps:find(id(Name), ?PROPS_TABLE) of
|
||||
{ok, {Name, _Type, 'ALL'}} ->
|
||||
true;
|
||||
{ok, {Name, _Type, AllowedTypes}} ->
|
||||
lists:member(PacketType, AllowedTypes);
|
||||
error ->
|
||||
false
|
||||
end
|
||||
end,
|
||||
maps:filter(F, Props).
|
||||
|
||||
-spec(validate(emqx_types:properties()) -> ok).
|
||||
-spec validate(emqx_types:properties()) -> ok.
|
||||
validate(Props) when is_map(Props) ->
|
||||
lists:foreach(fun validate_prop/1, maps:to_list(Props)).
|
||||
|
||||
validate_prop(Prop = {Name, Val}) ->
|
||||
case maps:find(id(Name), ?PROPS_TABLE) of
|
||||
{ok, {Name, Type, _}} ->
|
||||
validate_value(Type, Val)
|
||||
orelse error({bad_property_value, Prop});
|
||||
validate_value(Type, Val) orelse
|
||||
error({bad_property_value, Prop});
|
||||
error ->
|
||||
error({bad_property, Name})
|
||||
end.
|
||||
|
|
@ -166,23 +180,28 @@ validate_value('Four-Byte-Integer', Val) ->
|
|||
validate_value('Variable-Byte-Integer', Val) ->
|
||||
is_integer(Val) andalso 0 =< Val andalso Val =< 16#7FFFFFFF;
|
||||
validate_value('UTF8-String-Pair', {Name, Val}) ->
|
||||
validate_value('UTF8-Encoded-String', Name)
|
||||
andalso validate_value('UTF8-Encoded-String', Val);
|
||||
validate_value('UTF8-Encoded-String', Name) andalso
|
||||
validate_value('UTF8-Encoded-String', Val);
|
||||
validate_value('UTF8-String-Pair', Pairs) when is_list(Pairs) ->
|
||||
lists:foldl(fun(Pair, OK) ->
|
||||
OK andalso validate_value('UTF8-String-Pair', Pair)
|
||||
end, true, Pairs);
|
||||
validate_value('UTF8-Encoded-String', Val) ->
|
||||
lists:foldl(
|
||||
fun(Pair, OK) ->
|
||||
OK andalso validate_value('UTF8-String-Pair', Pair)
|
||||
end,
|
||||
true,
|
||||
Pairs
|
||||
);
|
||||
validate_value('UTF8-Encoded-String', Val) ->
|
||||
is_binary(Val);
|
||||
validate_value('Binary-Data', Val) ->
|
||||
is_binary(Val);
|
||||
validate_value(_Type, _Val) -> false.
|
||||
validate_value(_Type, _Val) ->
|
||||
false.
|
||||
|
||||
-spec(new() -> map()).
|
||||
-spec new() -> map().
|
||||
new() ->
|
||||
#{}.
|
||||
|
||||
-spec(all() -> map()).
|
||||
-spec all() -> map().
|
||||
all() -> ?PROPS_TABLE.
|
||||
|
||||
set(Name, Value, undefined) ->
|
||||
|
|
@ -194,4 +213,3 @@ get(_Name, undefined, Default) ->
|
|||
Default;
|
||||
get(Name, Props, Default) ->
|
||||
maps:get(Name, Props, Default).
|
||||
|
||||
|
|
|
|||
|
|
@ -53,39 +53,43 @@
|
|||
-include("types.hrl").
|
||||
-include("emqx_mqtt.hrl").
|
||||
|
||||
-export([ init/1
|
||||
, info/1
|
||||
, info/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
info/1,
|
||||
info/2
|
||||
]).
|
||||
|
||||
-export([ is_empty/1
|
||||
, len/1
|
||||
, max_len/1
|
||||
, in/2
|
||||
, out/1
|
||||
, stats/1
|
||||
, dropped/1
|
||||
]).
|
||||
-export([
|
||||
is_empty/1,
|
||||
len/1,
|
||||
max_len/1,
|
||||
in/2,
|
||||
out/1,
|
||||
stats/1,
|
||||
dropped/1
|
||||
]).
|
||||
|
||||
-define(NO_PRIORITY_TABLE, disabled).
|
||||
|
||||
-export_type([mqueue/0, options/0]).
|
||||
|
||||
-type(topic() :: emqx_types:topic()).
|
||||
-type(priority() :: infinity | integer()).
|
||||
-type(pq() :: emqx_pqueue:q()).
|
||||
-type(count() :: non_neg_integer()).
|
||||
-type(p_table() :: ?NO_PRIORITY_TABLE | #{topic() := priority()}).
|
||||
-type(options() :: #{max_len := count(),
|
||||
priorities => p_table(),
|
||||
default_priority => highest | lowest,
|
||||
store_qos0 => boolean()
|
||||
}).
|
||||
-type(message() :: emqx_types:message()).
|
||||
-type topic() :: emqx_types:topic().
|
||||
-type priority() :: infinity | integer().
|
||||
-type pq() :: emqx_pqueue:q().
|
||||
-type count() :: non_neg_integer().
|
||||
-type p_table() :: ?NO_PRIORITY_TABLE | #{topic() := priority()}.
|
||||
-type options() :: #{
|
||||
max_len := count(),
|
||||
priorities => p_table(),
|
||||
default_priority => highest | lowest,
|
||||
store_qos0 => boolean()
|
||||
}.
|
||||
-type message() :: emqx_types:message().
|
||||
|
||||
-type(stat() :: {len, non_neg_integer()}
|
||||
| {max_len, non_neg_integer()}
|
||||
| {dropped, non_neg_integer()}).
|
||||
-type stat() ::
|
||||
{len, non_neg_integer()}
|
||||
| {max_len, non_neg_integer()}
|
||||
| {dropped, non_neg_integer()}.
|
||||
|
||||
-define(PQUEUE, emqx_pqueue).
|
||||
-define(LOWEST_PRIORITY, 0).
|
||||
|
|
@ -94,43 +98,45 @@
|
|||
-define(INFO_KEYS, [store_qos0, max_len, len, dropped]).
|
||||
|
||||
-record(shift_opts, {
|
||||
multiplier :: non_neg_integer(),
|
||||
base :: integer()
|
||||
}).
|
||||
multiplier :: non_neg_integer(),
|
||||
base :: integer()
|
||||
}).
|
||||
|
||||
-record(mqueue, {
|
||||
store_qos0 = false :: boolean(),
|
||||
max_len = ?MAX_LEN_INFINITY :: count(),
|
||||
len = 0 :: count(),
|
||||
dropped = 0 :: count(),
|
||||
p_table = ?NO_PRIORITY_TABLE :: p_table(),
|
||||
default_p = ?LOWEST_PRIORITY :: priority(),
|
||||
q = ?PQUEUE:new() :: pq(),
|
||||
shift_opts :: #shift_opts{},
|
||||
last_prio :: non_neg_integer() | undefined,
|
||||
p_credit :: non_neg_integer() | undefined
|
||||
}).
|
||||
store_qos0 = false :: boolean(),
|
||||
max_len = ?MAX_LEN_INFINITY :: count(),
|
||||
len = 0 :: count(),
|
||||
dropped = 0 :: count(),
|
||||
p_table = ?NO_PRIORITY_TABLE :: p_table(),
|
||||
default_p = ?LOWEST_PRIORITY :: priority(),
|
||||
q = ?PQUEUE:new() :: pq(),
|
||||
shift_opts :: #shift_opts{},
|
||||
last_prio :: non_neg_integer() | undefined,
|
||||
p_credit :: non_neg_integer() | undefined
|
||||
}).
|
||||
|
||||
-type(mqueue() :: #mqueue{}).
|
||||
-type mqueue() :: #mqueue{}.
|
||||
|
||||
-spec(init(options()) -> mqueue()).
|
||||
-spec init(options()) -> mqueue().
|
||||
init(Opts = #{max_len := MaxLen0, store_qos0 := QoS_0}) ->
|
||||
MaxLen = case (is_integer(MaxLen0) andalso MaxLen0 > ?MAX_LEN_INFINITY) of
|
||||
true -> MaxLen0;
|
||||
false -> ?MAX_LEN_INFINITY
|
||||
end,
|
||||
#mqueue{max_len = MaxLen,
|
||||
store_qos0 = QoS_0,
|
||||
p_table = get_opt(priorities, Opts, ?NO_PRIORITY_TABLE),
|
||||
default_p = get_priority_opt(Opts),
|
||||
shift_opts = get_shift_opt(Opts)
|
||||
}.
|
||||
MaxLen =
|
||||
case (is_integer(MaxLen0) andalso MaxLen0 > ?MAX_LEN_INFINITY) of
|
||||
true -> MaxLen0;
|
||||
false -> ?MAX_LEN_INFINITY
|
||||
end,
|
||||
#mqueue{
|
||||
max_len = MaxLen,
|
||||
store_qos0 = QoS_0,
|
||||
p_table = get_opt(priorities, Opts, ?NO_PRIORITY_TABLE),
|
||||
default_p = get_priority_opt(Opts),
|
||||
shift_opts = get_shift_opt(Opts)
|
||||
}.
|
||||
|
||||
-spec(info(mqueue()) -> emqx_types:infos()).
|
||||
-spec info(mqueue()) -> emqx_types:infos().
|
||||
info(MQ) ->
|
||||
maps:from_list([{Key, info(Key, MQ)} || Key <- ?INFO_KEYS]).
|
||||
|
||||
-spec(info(atom(), mqueue()) -> term()).
|
||||
-spec info(atom(), mqueue()) -> term().
|
||||
info(store_qos0, #mqueue{store_qos0 = True}) ->
|
||||
True;
|
||||
info(max_len, #mqueue{max_len = MaxLen}) ->
|
||||
|
|
@ -147,25 +153,30 @@ len(#mqueue{len = Len}) -> Len.
|
|||
max_len(#mqueue{max_len = MaxLen}) -> MaxLen.
|
||||
|
||||
%% @doc Return number of dropped messages.
|
||||
-spec(dropped(mqueue()) -> count()).
|
||||
-spec dropped(mqueue()) -> count().
|
||||
dropped(#mqueue{dropped = Dropped}) -> Dropped.
|
||||
|
||||
%% @doc Stats of the mqueue
|
||||
-spec(stats(mqueue()) -> [stat()]).
|
||||
-spec stats(mqueue()) -> [stat()].
|
||||
stats(#mqueue{max_len = MaxLen, dropped = Dropped} = MQ) ->
|
||||
[{len, len(MQ)}, {max_len, MaxLen}, {dropped, Dropped}].
|
||||
|
||||
%% @doc Enqueue a message.
|
||||
-spec(in(message(), mqueue()) -> {maybe(message()), mqueue()}).
|
||||
-spec in(message(), mqueue()) -> {maybe(message()), mqueue()}.
|
||||
in(Msg = #message{qos = ?QOS_0}, MQ = #mqueue{store_qos0 = false}) ->
|
||||
{_Dropped = Msg, MQ};
|
||||
in(Msg = #message{topic = Topic}, MQ = #mqueue{default_p = Dp,
|
||||
p_table = PTab,
|
||||
q = Q,
|
||||
len = Len,
|
||||
max_len = MaxLen,
|
||||
dropped = Dropped
|
||||
} = MQ) ->
|
||||
in(
|
||||
Msg = #message{topic = Topic},
|
||||
MQ =
|
||||
#mqueue{
|
||||
default_p = Dp,
|
||||
p_table = PTab,
|
||||
q = Q,
|
||||
len = Len,
|
||||
max_len = MaxLen,
|
||||
dropped = Dropped
|
||||
} = MQ
|
||||
) ->
|
||||
Priority = get_priority(Topic, PTab, Dp),
|
||||
PLen = ?PQUEUE:plen(Priority, Q),
|
||||
case MaxLen =/= ?MAX_LEN_INFINITY andalso PLen =:= MaxLen of
|
||||
|
|
@ -178,24 +189,26 @@ in(Msg = #message{topic = Topic}, MQ = #mqueue{default_p = Dp,
|
|||
{_DroppedMsg = undefined, MQ#mqueue{len = Len + 1, q = ?PQUEUE:in(Msg, Priority, Q)}}
|
||||
end.
|
||||
|
||||
-spec(out(mqueue()) -> {empty | {value, message()}, mqueue()}).
|
||||
-spec out(mqueue()) -> {empty | {value, message()}, mqueue()}.
|
||||
out(MQ = #mqueue{len = 0, q = Q}) ->
|
||||
0 = ?PQUEUE:len(Q), %% assert, in this case, ?PQUEUE:len should be very cheap
|
||||
%% assert, in this case, ?PQUEUE:len should be very cheap
|
||||
0 = ?PQUEUE:len(Q),
|
||||
{empty, MQ};
|
||||
out(MQ = #mqueue{q = Q, len = Len, last_prio = undefined, shift_opts = ShiftOpts}) ->
|
||||
{{value, Val, Prio}, Q1} = ?PQUEUE:out_p(Q), %% Shouldn't fail, since we've checked the length
|
||||
%% Shouldn't fail, since we've checked the length
|
||||
{{value, Val, Prio}, Q1} = ?PQUEUE:out_p(Q),
|
||||
MQ1 = MQ#mqueue{
|
||||
q = Q1,
|
||||
len = Len - 1,
|
||||
last_prio = Prio,
|
||||
p_credit = get_credits(Prio, ShiftOpts)
|
||||
},
|
||||
q = Q1,
|
||||
len = Len - 1,
|
||||
last_prio = Prio,
|
||||
p_credit = get_credits(Prio, ShiftOpts)
|
||||
},
|
||||
{{value, Val}, MQ1};
|
||||
out(MQ = #mqueue{q = Q, p_credit = 0}) ->
|
||||
MQ1 = MQ#mqueue{
|
||||
q = ?PQUEUE:shift(Q),
|
||||
last_prio = undefined
|
||||
},
|
||||
q = ?PQUEUE:shift(Q),
|
||||
last_prio = undefined
|
||||
},
|
||||
out(MQ1);
|
||||
out(MQ = #mqueue{q = Q, len = Len, p_credit = Cnt}) ->
|
||||
{R, Q1} = ?PQUEUE:out(Q),
|
||||
|
|
@ -232,23 +245,25 @@ get_shift_opt(Opts) ->
|
|||
%% overhead of ?PQUEUE:rotate
|
||||
Mult = maps:get(shift_multiplier, Opts, 10),
|
||||
true = is_integer(Mult) andalso Mult > 0,
|
||||
Min = case Opts of
|
||||
#{p_table := PTab} ->
|
||||
case maps:size(PTab) of
|
||||
0 -> 0;
|
||||
_ -> lists:min(maps:values(PTab))
|
||||
end;
|
||||
_ ->
|
||||
?LOWEST_PRIORITY
|
||||
end,
|
||||
Min =
|
||||
case Opts of
|
||||
#{p_table := PTab} ->
|
||||
case maps:size(PTab) of
|
||||
0 -> 0;
|
||||
_ -> lists:min(maps:values(PTab))
|
||||
end;
|
||||
_ ->
|
||||
?LOWEST_PRIORITY
|
||||
end,
|
||||
%% `mqueue' module supports negative priorities, but we don't want
|
||||
%% the counter to be negative, so all priorities should be shifted
|
||||
%% by a constant, if negative priorities are used:
|
||||
Base = case Min < 0 of
|
||||
true -> -Min;
|
||||
false -> 0
|
||||
end,
|
||||
Base =
|
||||
case Min < 0 of
|
||||
true -> -Min;
|
||||
false -> 0
|
||||
end,
|
||||
#shift_opts{
|
||||
multiplier = Mult,
|
||||
base = Base
|
||||
}.
|
||||
multiplier = Mult,
|
||||
base = Base
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -17,14 +17,16 @@
|
|||
%% Collection of functions for creating node dumps
|
||||
-module(emqx_node_dump).
|
||||
|
||||
-export([ sys_info/0
|
||||
, app_env_dump/0
|
||||
]).
|
||||
-export([
|
||||
sys_info/0,
|
||||
app_env_dump/0
|
||||
]).
|
||||
|
||||
sys_info() ->
|
||||
#{ release => emqx_app:get_release()
|
||||
, otp_version => emqx_vm:get_otp_version()
|
||||
}.
|
||||
#{
|
||||
release => emqx_app:get_release(),
|
||||
otp_version => emqx_vm:get_otp_version()
|
||||
}.
|
||||
|
||||
app_env_dump() ->
|
||||
censor(ets:tab2list(ac_tab)).
|
||||
|
|
@ -37,13 +39,13 @@ censor([_ | Rest]) ->
|
|||
censor(Rest).
|
||||
|
||||
censor(Path, {Key, Val}) when is_atom(Key) ->
|
||||
{Key, censor([Key|Path], Val)};
|
||||
{Key, censor([Key | Path], Val)};
|
||||
censor(Path, M) when is_map(M) ->
|
||||
Fun = fun(Key, Val) ->
|
||||
censor([Key|Path], Val)
|
||||
end,
|
||||
censor([Key | Path], Val)
|
||||
end,
|
||||
maps:map(Fun, M);
|
||||
censor(Path, L = [Fst|_]) when is_tuple(Fst) ->
|
||||
censor(Path, L = [Fst | _]) when is_tuple(Fst) ->
|
||||
[censor(Path, I) || I <- L];
|
||||
censor([Key | _], Val) ->
|
||||
case is_sensitive(Key) of
|
||||
|
|
@ -58,12 +60,14 @@ is_sensitive(Key) when is_list(Key) ->
|
|||
Bin ->
|
||||
is_sensitive(Bin)
|
||||
catch
|
||||
_ : _ ->
|
||||
_:_ ->
|
||||
false
|
||||
end;
|
||||
is_sensitive(Key) when is_binary(Key) ->
|
||||
lists:any(fun(Pattern) -> re:run(Key, Pattern) =/= nomatch end,
|
||||
["passwd", "password", "secret"]);
|
||||
lists:any(
|
||||
fun(Pattern) -> re:run(Key, Pattern) =/= nomatch end,
|
||||
["passwd", "password", "secret"]
|
||||
);
|
||||
is_sensitive(Key) when is_tuple(Key) ->
|
||||
false.
|
||||
|
||||
|
|
@ -77,11 +81,14 @@ obfuscate_value(_Val) ->
|
|||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
censor_test() ->
|
||||
?assertMatch( [{{env, emqx, listeners}, #{password := <<"********">>}}]
|
||||
, censor([foo, {{env, emqx, listeners}, #{password => <<"secret">>}}, {app, bar}])
|
||||
),
|
||||
?assertMatch( [{{env, emqx, listeners}, [{foo, 1}, {password, "********"}]}]
|
||||
, censor([{{env, emqx, listeners}, [{foo, 1}, {password, "secret"}]}])
|
||||
).
|
||||
?assertMatch(
|
||||
[{{env, emqx, listeners}, #{password := <<"********">>}}],
|
||||
censor([foo, {{env, emqx, listeners}, #{password => <<"secret">>}}, {app, bar}])
|
||||
),
|
||||
?assertMatch(
|
||||
[{{env, emqx, listeners}, [{foo, 1}, {password, "********"}]}],
|
||||
censor([{{env, emqx, listeners}, [{foo, 1}, {password, "secret"}]}])
|
||||
).
|
||||
|
||||
-endif. %% TEST
|
||||
%% TEST
|
||||
-endif.
|
||||
|
|
|
|||
|
|
@ -17,38 +17,39 @@
|
|||
|
||||
-include_lib("lc/include/lc.hrl").
|
||||
|
||||
-export([ is_overloaded/0
|
||||
, backoff/1
|
||||
, backoff_gc/1
|
||||
, backoff_hibernation/1
|
||||
, backoff_new_conn/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
is_overloaded/0,
|
||||
backoff/1,
|
||||
backoff_gc/1,
|
||||
backoff_hibernation/1,
|
||||
backoff_new_conn/1
|
||||
]).
|
||||
|
||||
%% exports for O&M
|
||||
-export([ status/0
|
||||
, enable/0
|
||||
, disable/0
|
||||
]).
|
||||
-export([
|
||||
status/0,
|
||||
enable/0,
|
||||
disable/0
|
||||
]).
|
||||
|
||||
-type cfg_key() ::
|
||||
backoff_gc |
|
||||
backoff_hibernation |
|
||||
backoff_new_conn.
|
||||
backoff_gc
|
||||
| backoff_hibernation
|
||||
| backoff_new_conn.
|
||||
|
||||
-type cnt_name() ::
|
||||
'olp.delay.ok' |
|
||||
'olp.delay.timeout' |
|
||||
'olp.hbn' |
|
||||
'olp.gc' |
|
||||
'olp.new_conn'.
|
||||
'olp.delay.ok'
|
||||
| 'olp.delay.timeout'
|
||||
| 'olp.hbn'
|
||||
| 'olp.gc'
|
||||
| 'olp.new_conn'.
|
||||
|
||||
-define(overload_protection, overload_protection).
|
||||
|
||||
%% @doc Light realtime check if system is overloaded.
|
||||
-spec is_overloaded() -> boolean().
|
||||
is_overloaded() ->
|
||||
load_ctl:is_overloaded().
|
||||
load_ctl:is_overloaded().
|
||||
|
||||
%% @doc Backoff with a delay if the system is overloaded, for tasks that could be deferred.
|
||||
%% returns `false' if backoff didn't happen, the system is cool.
|
||||
|
|
@ -56,78 +57,79 @@ is_overloaded() ->
|
|||
%% returns `timeout' if backoff is triggered but get unblocked due to timeout as configured.
|
||||
-spec backoff(Zone :: atom()) -> ok | false | timeout.
|
||||
backoff(Zone) ->
|
||||
case emqx_config:get_zone_conf(Zone, [?overload_protection]) of
|
||||
#{enable := true, backoff_delay := Delay} ->
|
||||
case load_ctl:maydelay(Delay) of
|
||||
false -> false;
|
||||
ok ->
|
||||
emqx_metrics:inc('olp.delay.ok'),
|
||||
ok;
|
||||
timeout ->
|
||||
emqx_metrics:inc('olp.delay.timeout'),
|
||||
timeout
|
||||
end;
|
||||
_ ->
|
||||
ok
|
||||
end.
|
||||
case emqx_config:get_zone_conf(Zone, [?overload_protection]) of
|
||||
#{enable := true, backoff_delay := Delay} ->
|
||||
case load_ctl:maydelay(Delay) of
|
||||
false ->
|
||||
false;
|
||||
ok ->
|
||||
emqx_metrics:inc('olp.delay.ok'),
|
||||
ok;
|
||||
timeout ->
|
||||
emqx_metrics:inc('olp.delay.timeout'),
|
||||
timeout
|
||||
end;
|
||||
_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% @doc If forceful GC should be skipped when the system is overloaded.
|
||||
-spec backoff_gc(Zone :: atom()) -> boolean().
|
||||
backoff_gc(Zone) ->
|
||||
do_check(Zone, ?FUNCTION_NAME, 'olp.gc').
|
||||
do_check(Zone, ?FUNCTION_NAME, 'olp.gc').
|
||||
|
||||
%% @doc If hibernation should be skipped when the system is overloaded.
|
||||
-spec backoff_hibernation(Zone :: atom()) -> boolean().
|
||||
backoff_hibernation(Zone) ->
|
||||
do_check(Zone, ?FUNCTION_NAME, 'olp.hbn').
|
||||
do_check(Zone, ?FUNCTION_NAME, 'olp.hbn').
|
||||
|
||||
%% @doc Returns {error, overloaded} if new connection should be
|
||||
%% closed when system is overloaded.
|
||||
-spec backoff_new_conn(Zone :: atom()) -> ok | {error, overloaded}.
|
||||
backoff_new_conn(Zone) ->
|
||||
case do_check(Zone, ?FUNCTION_NAME, 'olp.new_conn') of
|
||||
true ->
|
||||
{error, overloaded};
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
case do_check(Zone, ?FUNCTION_NAME, 'olp.new_conn') of
|
||||
true ->
|
||||
{error, overloaded};
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
|
||||
-spec status() -> any().
|
||||
status() ->
|
||||
is_overloaded().
|
||||
is_overloaded().
|
||||
|
||||
%% @doc turn off background runq check.
|
||||
-spec disable() -> ok | {error, timeout}.
|
||||
disable() ->
|
||||
load_ctl:stop_runq_flagman(5000).
|
||||
load_ctl:stop_runq_flagman(5000).
|
||||
|
||||
%% @doc turn on background runq check.
|
||||
-spec enable() -> {ok, pid()} | {error, running | restarting | disabled}.
|
||||
enable() ->
|
||||
case load_ctl:restart_runq_flagman() of
|
||||
{error, disabled} ->
|
||||
OldCfg = load_ctl:get_config(),
|
||||
ok = load_ctl:put_config(OldCfg#{ ?RUNQ_MON_F0 => true }),
|
||||
load_ctl:restart_runq_flagman();
|
||||
Other ->
|
||||
Other
|
||||
end.
|
||||
case load_ctl:restart_runq_flagman() of
|
||||
{error, disabled} ->
|
||||
OldCfg = load_ctl:get_config(),
|
||||
ok = load_ctl:put_config(OldCfg#{?RUNQ_MON_F0 => true}),
|
||||
load_ctl:restart_runq_flagman();
|
||||
Other ->
|
||||
Other
|
||||
end.
|
||||
|
||||
%%% Internals
|
||||
-spec do_check(Zone::atom(), cfg_key(), cnt_name()) -> boolean().
|
||||
-spec do_check(Zone :: atom(), cfg_key(), cnt_name()) -> boolean().
|
||||
do_check(Zone, Key, CntName) ->
|
||||
case load_ctl:is_overloaded() of
|
||||
true ->
|
||||
case emqx_config:get_zone_conf(Zone, [?overload_protection]) of
|
||||
#{enable := true, Key := true} ->
|
||||
emqx_metrics:inc(CntName),
|
||||
true;
|
||||
_ ->
|
||||
false
|
||||
end;
|
||||
false -> false
|
||||
end.
|
||||
|
||||
case load_ctl:is_overloaded() of
|
||||
true ->
|
||||
case emqx_config:get_zone_conf(Zone, [?overload_protection]) of
|
||||
#{enable := true, Key := true} ->
|
||||
emqx_metrics:inc(CntName),
|
||||
true;
|
||||
_ ->
|
||||
false
|
||||
end;
|
||||
false ->
|
||||
false
|
||||
end.
|
||||
|
||||
%%%_* Emacs ====================================================================
|
||||
%%% Local Variables:
|
||||
|
|
|
|||
|
|
@ -20,25 +20,26 @@
|
|||
|
||||
-include("logger.hrl").
|
||||
|
||||
|
||||
-export([start_link/0]).
|
||||
|
||||
-export([ get_mem_check_interval/0
|
||||
, set_mem_check_interval/1
|
||||
, get_sysmem_high_watermark/0
|
||||
, set_sysmem_high_watermark/1
|
||||
, get_procmem_high_watermark/0
|
||||
, set_procmem_high_watermark/1
|
||||
]).
|
||||
-export([
|
||||
get_mem_check_interval/0,
|
||||
set_mem_check_interval/1,
|
||||
get_sysmem_high_watermark/0,
|
||||
set_sysmem_high_watermark/1,
|
||||
get_procmem_high_watermark/0,
|
||||
set_procmem_high_watermark/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-include("emqx.hrl").
|
||||
|
||||
|
|
@ -93,41 +94,47 @@ handle_call(Req, _From, State) ->
|
|||
{reply, {error, {unexpected_call, Req}}, State}.
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast=> Msg}),
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
||||
handle_info({timeout, _Timer, check}, State) ->
|
||||
CPUHighWatermark = emqx:get_config([sysmon, os, cpu_high_watermark]) * 100,
|
||||
CPULowWatermark = emqx:get_config([sysmon, os, cpu_low_watermark]) * 100,
|
||||
_ = case emqx_vm:cpu_util() of %% TODO: should be improved?
|
||||
0 -> ok;
|
||||
Busy when Busy > CPUHighWatermark ->
|
||||
Usage = list_to_binary(io_lib:format("~.2f%", [Busy])),
|
||||
Message = <<Usage/binary, " cpu usage">>,
|
||||
emqx_alarm:activate(high_cpu_usage,
|
||||
#{
|
||||
usage => Usage,
|
||||
high_watermark => CPUHighWatermark,
|
||||
low_watermark => CPULowWatermark
|
||||
},
|
||||
Message),
|
||||
start_check_timer();
|
||||
Busy when Busy < CPULowWatermark ->
|
||||
Usage = list_to_binary(io_lib:format("~.2f%", [Busy])),
|
||||
Message = <<Usage/binary, " cpu usage">>,
|
||||
emqx_alarm:deactivate(high_cpu_usage,
|
||||
#{
|
||||
usage => Usage,
|
||||
high_watermark => CPUHighWatermark,
|
||||
low_watermark => CPULowWatermark
|
||||
},
|
||||
Message),
|
||||
start_check_timer();
|
||||
_Busy ->
|
||||
start_check_timer()
|
||||
end,
|
||||
%% TODO: should be improved?
|
||||
_ =
|
||||
case emqx_vm:cpu_util() of
|
||||
0 ->
|
||||
ok;
|
||||
Busy when Busy > CPUHighWatermark ->
|
||||
Usage = list_to_binary(io_lib:format("~.2f%", [Busy])),
|
||||
Message = <<Usage/binary, " cpu usage">>,
|
||||
emqx_alarm:activate(
|
||||
high_cpu_usage,
|
||||
#{
|
||||
usage => Usage,
|
||||
high_watermark => CPUHighWatermark,
|
||||
low_watermark => CPULowWatermark
|
||||
},
|
||||
Message
|
||||
),
|
||||
start_check_timer();
|
||||
Busy when Busy < CPULowWatermark ->
|
||||
Usage = list_to_binary(io_lib:format("~.2f%", [Busy])),
|
||||
Message = <<Usage/binary, " cpu usage">>,
|
||||
emqx_alarm:deactivate(
|
||||
high_cpu_usage,
|
||||
#{
|
||||
usage => Usage,
|
||||
high_watermark => CPUHighWatermark,
|
||||
low_watermark => CPULowWatermark
|
||||
},
|
||||
Message
|
||||
),
|
||||
start_check_timer();
|
||||
_Busy ->
|
||||
start_check_timer()
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -157,10 +164,11 @@ start_check_timer() ->
|
|||
%% so it can only be checked again at startup.
|
||||
ensure_system_memory_alarm(HW) ->
|
||||
case erlang:whereis(memsup) of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
_Pid ->
|
||||
{Total, Allocated, _Worst} = memsup:get_memory_data(),
|
||||
case Total =/= 0 andalso Allocated/Total * 100 > HW of
|
||||
case Total =/= 0 andalso Allocated / Total * 100 > HW of
|
||||
true -> emqx_alarm:activate(high_system_memory_usage, #{high_watermark => HW});
|
||||
false -> ok
|
||||
end
|
||||
|
|
|
|||
|
|
@ -20,89 +20,83 @@
|
|||
-include("emqx_mqtt.hrl").
|
||||
|
||||
%% Header APIs
|
||||
-export([ type/1
|
||||
, type_name/1
|
||||
, dup/1
|
||||
, qos/1
|
||||
, retain/1
|
||||
]).
|
||||
-export([
|
||||
type/1,
|
||||
type_name/1,
|
||||
dup/1,
|
||||
qos/1,
|
||||
retain/1
|
||||
]).
|
||||
|
||||
%% Field APIs
|
||||
-export([ proto_name/1
|
||||
, proto_ver/1
|
||||
, info/2
|
||||
, set_props/2
|
||||
]).
|
||||
-export([
|
||||
proto_name/1,
|
||||
proto_ver/1,
|
||||
info/2,
|
||||
set_props/2
|
||||
]).
|
||||
|
||||
%% Check API
|
||||
-export([ check/1
|
||||
, check/2
|
||||
]).
|
||||
-export([
|
||||
check/1,
|
||||
check/2
|
||||
]).
|
||||
|
||||
-export([ to_message/2
|
||||
, to_message/3
|
||||
, will_msg/1
|
||||
]).
|
||||
-export([
|
||||
to_message/2,
|
||||
to_message/3,
|
||||
will_msg/1
|
||||
]).
|
||||
|
||||
-export([ format/1
|
||||
, format/2
|
||||
]).
|
||||
-export([
|
||||
format/1,
|
||||
format/2
|
||||
]).
|
||||
|
||||
-export([encode_hex/1]).
|
||||
|
||||
-define(TYPE_NAMES,
|
||||
{ 'CONNECT'
|
||||
, 'CONNACK'
|
||||
, 'PUBLISH'
|
||||
, 'PUBACK'
|
||||
, 'PUBREC'
|
||||
, 'PUBREL'
|
||||
, 'PUBCOMP'
|
||||
, 'SUBSCRIBE'
|
||||
, 'SUBACK'
|
||||
, 'UNSUBSCRIBE'
|
||||
, 'UNSUBACK'
|
||||
, 'PINGREQ'
|
||||
, 'PINGRESP'
|
||||
, 'DISCONNECT'
|
||||
, 'AUTH'
|
||||
}).
|
||||
{'CONNECT', 'CONNACK', 'PUBLISH', 'PUBACK', 'PUBREC', 'PUBREL', 'PUBCOMP', 'SUBSCRIBE',
|
||||
'SUBACK', 'UNSUBSCRIBE', 'UNSUBACK', 'PINGREQ', 'PINGRESP', 'DISCONNECT', 'AUTH'}
|
||||
).
|
||||
|
||||
-type(connect() :: #mqtt_packet_connect{}).
|
||||
-type(publish() :: #mqtt_packet_publish{}).
|
||||
-type(subscribe() :: #mqtt_packet_subscribe{}).
|
||||
-type(unsubscribe() :: #mqtt_packet_unsubscribe{}).
|
||||
-type connect() :: #mqtt_packet_connect{}.
|
||||
-type publish() :: #mqtt_packet_publish{}.
|
||||
-type subscribe() :: #mqtt_packet_subscribe{}.
|
||||
-type unsubscribe() :: #mqtt_packet_unsubscribe{}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% MQTT Packet Type and Flags.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc MQTT packet type.
|
||||
-spec(type(emqx_types:packet()) -> emqx_types:packet_type()).
|
||||
-spec type(emqx_types:packet()) -> emqx_types:packet_type().
|
||||
type(#mqtt_packet{header = #mqtt_packet_header{type = Type}}) ->
|
||||
Type.
|
||||
|
||||
%% @doc Name of MQTT packet type.
|
||||
-spec(type_name(emqx_types:packet() | non_neg_integer()) -> atom() | string()).
|
||||
-spec type_name(emqx_types:packet() | non_neg_integer()) -> atom() | string().
|
||||
type_name(#mqtt_packet{} = Packet) ->
|
||||
type_name(type(Packet));
|
||||
type_name(0) -> 'FORBIDDEN';
|
||||
type_name(0) ->
|
||||
'FORBIDDEN';
|
||||
type_name(Type) when Type > 0 andalso Type =< tuple_size(?TYPE_NAMES) ->
|
||||
element(Type, ?TYPE_NAMES);
|
||||
type_name(Type) -> "UNKNOWN("++ integer_to_list(Type) ++")".
|
||||
type_name(Type) ->
|
||||
"UNKNOWN(" ++ integer_to_list(Type) ++ ")".
|
||||
|
||||
%% @doc Dup flag of MQTT packet.
|
||||
-spec(dup(emqx_types:packet()) -> boolean()).
|
||||
-spec dup(emqx_types:packet()) -> boolean().
|
||||
dup(#mqtt_packet{header = #mqtt_packet_header{dup = Dup}}) ->
|
||||
Dup.
|
||||
|
||||
%% @doc QoS of MQTT packet type.
|
||||
-spec(qos(emqx_types:packet()) -> emqx_types:qos()).
|
||||
-spec qos(emqx_types:packet()) -> emqx_types:qos().
|
||||
qos(#mqtt_packet{header = #mqtt_packet_header{qos = QoS}}) ->
|
||||
QoS.
|
||||
|
||||
%% @doc Retain flag of MQTT packet.
|
||||
-spec(retain(emqx_types:packet()) -> boolean()).
|
||||
-spec retain(emqx_types:packet()) -> boolean().
|
||||
retain(#mqtt_packet{header = #mqtt_packet_header{retain = Retain}}) ->
|
||||
Retain.
|
||||
|
||||
|
|
@ -111,14 +105,14 @@ retain(#mqtt_packet{header = #mqtt_packet_header{retain = Retain}}) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Protocol name of the CONNECT Packet.
|
||||
-spec(proto_name(emqx_types:packet()|connect()) -> binary()).
|
||||
-spec proto_name(emqx_types:packet() | connect()) -> binary().
|
||||
proto_name(?CONNECT_PACKET(ConnPkt)) ->
|
||||
proto_name(ConnPkt);
|
||||
proto_name(#mqtt_packet_connect{proto_name = Name}) ->
|
||||
Name.
|
||||
|
||||
%% @doc Protocol version of the CONNECT Packet.
|
||||
-spec(proto_ver(emqx_types:packet()|connect()) -> emqx_types:proto_ver()).
|
||||
-spec proto_ver(emqx_types:packet() | connect()) -> emqx_types:proto_ver().
|
||||
proto_ver(?CONNECT_PACKET(ConnPkt)) ->
|
||||
proto_ver(ConnPkt);
|
||||
proto_ver(#mqtt_packet_connect{proto_ver = Ver}) ->
|
||||
|
|
@ -158,61 +152,52 @@ info(username, #mqtt_packet_connect{username = Username}) ->
|
|||
Username;
|
||||
info(password, #mqtt_packet_connect{password = Password}) ->
|
||||
Password;
|
||||
|
||||
info(ack_flags, #mqtt_packet_connack{ack_flags = Flags}) ->
|
||||
Flags;
|
||||
info(reason_code, #mqtt_packet_connack{reason_code = RC}) ->
|
||||
RC;
|
||||
info(properties, #mqtt_packet_connack{properties = Props}) ->
|
||||
Props;
|
||||
|
||||
info(topic_name, #mqtt_packet_publish{topic_name = Topic}) ->
|
||||
Topic;
|
||||
info(packet_id, #mqtt_packet_publish{packet_id = PacketId}) ->
|
||||
PacketId;
|
||||
info(properties, #mqtt_packet_publish{properties = Props}) ->
|
||||
Props;
|
||||
|
||||
info(packet_id, #mqtt_packet_puback{packet_id = PacketId}) ->
|
||||
PacketId;
|
||||
info(reason_code, #mqtt_packet_puback{reason_code = RC}) ->
|
||||
RC;
|
||||
info(properties, #mqtt_packet_puback{properties = Props}) ->
|
||||
Props;
|
||||
|
||||
info(packet_id, #mqtt_packet_subscribe{packet_id = PacketId}) ->
|
||||
PacketId;
|
||||
info(properties, #mqtt_packet_subscribe{properties = Props}) ->
|
||||
Props;
|
||||
info(topic_filters, #mqtt_packet_subscribe{topic_filters = Topics}) ->
|
||||
Topics;
|
||||
|
||||
info(packet_id, #mqtt_packet_suback{packet_id = PacketId}) ->
|
||||
PacketId;
|
||||
info(properties, #mqtt_packet_suback{properties = Props}) ->
|
||||
Props;
|
||||
info(reason_codes, #mqtt_packet_suback{reason_codes = RCs}) ->
|
||||
RCs;
|
||||
|
||||
info(packet_id, #mqtt_packet_unsubscribe{packet_id = PacketId}) ->
|
||||
PacketId;
|
||||
info(properties, #mqtt_packet_unsubscribe{properties = Props}) ->
|
||||
Props;
|
||||
info(topic_filters, #mqtt_packet_unsubscribe{topic_filters = Topics}) ->
|
||||
Topics;
|
||||
|
||||
info(packet_id, #mqtt_packet_unsuback{packet_id = PacketId}) ->
|
||||
PacketId;
|
||||
info(properties, #mqtt_packet_unsuback{properties = Props}) ->
|
||||
Props;
|
||||
info(reason_codes, #mqtt_packet_unsuback{reason_codes = RCs}) ->
|
||||
RCs;
|
||||
|
||||
info(reason_code, #mqtt_packet_disconnect{reason_code = RC}) ->
|
||||
RC;
|
||||
info(properties, #mqtt_packet_disconnect{properties = Props}) ->
|
||||
Props;
|
||||
|
||||
info(reason_code, #mqtt_packet_auth{reason_code = RC}) ->
|
||||
RC;
|
||||
info(properties, #mqtt_packet_auth{properties = Props}) ->
|
||||
|
|
@ -220,31 +205,22 @@ info(properties, #mqtt_packet_auth{properties = Props}) ->
|
|||
|
||||
set_props(Props, #mqtt_packet_connect{} = Pkt) ->
|
||||
Pkt#mqtt_packet_connect{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_connack{} = Pkt) ->
|
||||
Pkt#mqtt_packet_connack{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_publish{} = Pkt) ->
|
||||
Pkt#mqtt_packet_publish{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_puback{} = Pkt) ->
|
||||
Pkt#mqtt_packet_puback{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_subscribe{} = Pkt) ->
|
||||
Pkt#mqtt_packet_subscribe{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_suback{} = Pkt) ->
|
||||
Pkt#mqtt_packet_suback{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_unsubscribe{} = Pkt) ->
|
||||
Pkt#mqtt_packet_unsubscribe{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_unsuback{} = Pkt) ->
|
||||
Pkt#mqtt_packet_unsuback{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_disconnect{} = Pkt) ->
|
||||
Pkt#mqtt_packet_disconnect{properties = Props};
|
||||
|
||||
set_props(Props, #mqtt_packet_auth{} = Pkt) ->
|
||||
Pkt#mqtt_packet_auth{properties = Props}.
|
||||
|
||||
|
|
@ -253,10 +229,12 @@ set_props(Props, #mqtt_packet_auth{} = Pkt) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Check PubSub Packet.
|
||||
-spec(check(emqx_types:packet()|publish()|subscribe()|unsubscribe())
|
||||
-> ok | {error, emqx_types:reason_code()}).
|
||||
check(#mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH},
|
||||
variable = PubPkt}) when not is_tuple(PubPkt) ->
|
||||
-spec check(emqx_types:packet() | publish() | subscribe() | unsubscribe()) ->
|
||||
ok | {error, emqx_types:reason_code()}.
|
||||
check(#mqtt_packet{
|
||||
header = #mqtt_packet_header{type = ?PUBLISH},
|
||||
variable = PubPkt
|
||||
}) when not is_tuple(PubPkt) ->
|
||||
%% publish without any data
|
||||
%% disconnect instead of crash
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
|
|
@ -266,11 +244,10 @@ check(#mqtt_packet{variable = #mqtt_packet_subscribe{} = SubPkt}) ->
|
|||
check(SubPkt);
|
||||
check(#mqtt_packet{variable = #mqtt_packet_unsubscribe{} = UnsubPkt}) ->
|
||||
check(UnsubPkt);
|
||||
|
||||
%% A Topic Alias of 0 is not permitted.
|
||||
check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias':= 0}}) ->
|
||||
check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias' := 0}}) ->
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias':= _Alias}}) ->
|
||||
check(#mqtt_packet_publish{topic_name = <<>>, properties = #{'Topic-Alias' := _Alias}}) ->
|
||||
ok;
|
||||
check(#mqtt_packet_publish{topic_name = <<>>, properties = #{}}) ->
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
|
|
@ -281,26 +258,24 @@ check(#mqtt_packet_publish{topic_name = TopicName, properties = Props}) ->
|
|||
error:_Error ->
|
||||
{error, ?RC_TOPIC_NAME_INVALID}
|
||||
end;
|
||||
|
||||
check(#mqtt_packet_subscribe{properties = #{'Subscription-Identifier' := I}})
|
||||
when I =< 0; I >= 16#FFFFFFF ->
|
||||
check(#mqtt_packet_subscribe{properties = #{'Subscription-Identifier' := I}}) when
|
||||
I =< 0; I >= 16#FFFFFFF
|
||||
->
|
||||
{error, ?RC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED};
|
||||
|
||||
check(#mqtt_packet_subscribe{topic_filters = []}) ->
|
||||
{error, ?RC_TOPIC_FILTER_INVALID};
|
||||
|
||||
check(#mqtt_packet_subscribe{topic_filters = TopicFilters}) ->
|
||||
try validate_topic_filters(TopicFilters)
|
||||
try
|
||||
validate_topic_filters(TopicFilters)
|
||||
catch
|
||||
error:_Error ->
|
||||
{error, ?RC_TOPIC_FILTER_INVALID}
|
||||
end;
|
||||
|
||||
check(#mqtt_packet_unsubscribe{topic_filters = []}) ->
|
||||
{error, ?RC_TOPIC_FILTER_INVALID};
|
||||
|
||||
check(#mqtt_packet_unsubscribe{topic_filters = TopicFilters}) ->
|
||||
try validate_topic_filters(TopicFilters)
|
||||
try
|
||||
validate_topic_filters(TopicFilters)
|
||||
catch
|
||||
error:_Error ->
|
||||
{error, ?RC_TOPIC_FILTER_INVALID}
|
||||
|
|
@ -308,10 +283,8 @@ check(#mqtt_packet_unsubscribe{topic_filters = TopicFilters}) ->
|
|||
|
||||
check_pub_props(#{'Topic-Alias' := 0}) ->
|
||||
{error, ?RC_TOPIC_ALIAS_INVALID};
|
||||
|
||||
check_pub_props(#{'Subscription-Identifier' := 0}) ->
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
|
||||
check_pub_props(#{'Response-Topic' := ResponseTopic}) ->
|
||||
try emqx_topic:validate(name, ResponseTopic) of
|
||||
true -> ok
|
||||
|
|
@ -319,41 +292,70 @@ check_pub_props(#{'Response-Topic' := ResponseTopic}) ->
|
|||
error:_Error ->
|
||||
{error, ?RC_PROTOCOL_ERROR}
|
||||
end;
|
||||
check_pub_props(_Props) -> ok.
|
||||
check_pub_props(_Props) ->
|
||||
ok.
|
||||
|
||||
%% @doc Check CONNECT Packet.
|
||||
-spec(check(emqx_types:packet()|connect(), Opts :: map())
|
||||
-> ok | {error, emqx_types:reason_code()}).
|
||||
-spec check(emqx_types:packet() | connect(), Opts :: map()) ->
|
||||
ok | {error, emqx_types:reason_code()}.
|
||||
check(?CONNECT_PACKET(ConnPkt), Opts) ->
|
||||
check(ConnPkt, Opts);
|
||||
check(ConnPkt, Opts) when is_record(ConnPkt, mqtt_packet_connect) ->
|
||||
run_checks([fun check_proto_ver/2,
|
||||
fun check_client_id/2,
|
||||
fun check_conn_props/2,
|
||||
fun check_will_msg/2], ConnPkt, Opts).
|
||||
run_checks(
|
||||
[
|
||||
fun check_proto_ver/2,
|
||||
fun check_client_id/2,
|
||||
fun check_conn_props/2,
|
||||
fun check_will_msg/2
|
||||
],
|
||||
ConnPkt,
|
||||
Opts
|
||||
).
|
||||
|
||||
check_proto_ver(#mqtt_packet_connect{proto_ver = Ver,
|
||||
proto_name = Name}, _Opts) ->
|
||||
check_proto_ver(
|
||||
#mqtt_packet_connect{
|
||||
proto_ver = Ver,
|
||||
proto_name = Name
|
||||
},
|
||||
_Opts
|
||||
) ->
|
||||
case proplists:get_value(Ver, ?PROTOCOL_NAMES) of
|
||||
Name -> ok;
|
||||
Name -> ok;
|
||||
_Other -> {error, ?RC_UNSUPPORTED_PROTOCOL_VERSION}
|
||||
end.
|
||||
|
||||
%% MQTT3.1 does not allow null clientId
|
||||
check_client_id(#mqtt_packet_connect{proto_ver = ?MQTT_PROTO_V3,
|
||||
clientid = <<>>}, _Opts) ->
|
||||
check_client_id(
|
||||
#mqtt_packet_connect{
|
||||
proto_ver = ?MQTT_PROTO_V3,
|
||||
clientid = <<>>
|
||||
},
|
||||
_Opts
|
||||
) ->
|
||||
{error, ?RC_CLIENT_IDENTIFIER_NOT_VALID};
|
||||
%% Issue#599: Null clientId and clean_start = false
|
||||
check_client_id(#mqtt_packet_connect{clientid = <<>>,
|
||||
clean_start = false}, _Opts) ->
|
||||
check_client_id(
|
||||
#mqtt_packet_connect{
|
||||
clientid = <<>>,
|
||||
clean_start = false
|
||||
},
|
||||
_Opts
|
||||
) ->
|
||||
{error, ?RC_CLIENT_IDENTIFIER_NOT_VALID};
|
||||
check_client_id(#mqtt_packet_connect{clientid = <<>>,
|
||||
clean_start = true}, _Opts) ->
|
||||
check_client_id(
|
||||
#mqtt_packet_connect{
|
||||
clientid = <<>>,
|
||||
clean_start = true
|
||||
},
|
||||
_Opts
|
||||
) ->
|
||||
ok;
|
||||
check_client_id(#mqtt_packet_connect{clientid = ClientId},
|
||||
#{max_clientid_len := MaxLen} = _Opts) ->
|
||||
check_client_id(
|
||||
#mqtt_packet_connect{clientid = ClientId},
|
||||
#{max_clientid_len := MaxLen} = _Opts
|
||||
) ->
|
||||
case (1 =< (Len = byte_size(ClientId))) andalso (Len =< MaxLen) of
|
||||
true -> ok;
|
||||
true -> ok;
|
||||
false -> {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID}
|
||||
end.
|
||||
|
||||
|
|
@ -361,32 +363,44 @@ check_conn_props(#mqtt_packet_connect{properties = undefined}, _Opts) ->
|
|||
ok;
|
||||
check_conn_props(#mqtt_packet_connect{properties = #{'Receive-Maximum' := 0}}, _Opts) ->
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
check_conn_props(#mqtt_packet_connect{properties = #{'Request-Response-Information' := ReqRespInfo}}, _Opts)
|
||||
when ReqRespInfo =/= 0, ReqRespInfo =/= 1 ->
|
||||
check_conn_props(
|
||||
#mqtt_packet_connect{properties = #{'Request-Response-Information' := ReqRespInfo}}, _Opts
|
||||
) when
|
||||
ReqRespInfo =/= 0, ReqRespInfo =/= 1
|
||||
->
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
check_conn_props(#mqtt_packet_connect{properties = #{'Request-Problem-Information' := ReqProInfo}}, _Opts)
|
||||
when ReqProInfo =/= 0, ReqProInfo =/= 1 ->
|
||||
check_conn_props(
|
||||
#mqtt_packet_connect{properties = #{'Request-Problem-Information' := ReqProInfo}}, _Opts
|
||||
) when
|
||||
ReqProInfo =/= 0, ReqProInfo =/= 1
|
||||
->
|
||||
{error, ?RC_PROTOCOL_ERROR};
|
||||
check_conn_props(_ConnPkt, _Opts) -> ok.
|
||||
check_conn_props(_ConnPkt, _Opts) ->
|
||||
ok.
|
||||
|
||||
check_will_msg(#mqtt_packet_connect{will_flag = false}, _Caps) ->
|
||||
ok;
|
||||
check_will_msg(#mqtt_packet_connect{will_retain = true},
|
||||
_Opts = #{mqtt_retain_available := false}) ->
|
||||
check_will_msg(
|
||||
#mqtt_packet_connect{will_retain = true},
|
||||
_Opts = #{mqtt_retain_available := false}
|
||||
) ->
|
||||
{error, ?RC_RETAIN_NOT_SUPPORTED};
|
||||
check_will_msg(#mqtt_packet_connect{will_qos = WillQoS},
|
||||
_Opts = #{max_qos_allowed := MaxQoS}) when WillQoS > MaxQoS ->
|
||||
check_will_msg(
|
||||
#mqtt_packet_connect{will_qos = WillQoS},
|
||||
_Opts = #{max_qos_allowed := MaxQoS}
|
||||
) when WillQoS > MaxQoS ->
|
||||
{error, ?RC_QOS_NOT_SUPPORTED};
|
||||
check_will_msg(#mqtt_packet_connect{will_topic = WillTopic}, _Opts) ->
|
||||
try emqx_topic:validate(name, WillTopic) of
|
||||
true -> ok
|
||||
catch error:_Error ->
|
||||
{error, ?RC_TOPIC_NAME_INVALID}
|
||||
catch
|
||||
error:_Error ->
|
||||
{error, ?RC_TOPIC_NAME_INVALID}
|
||||
end.
|
||||
|
||||
run_checks([], _Packet, _Options) ->
|
||||
ok;
|
||||
run_checks([Check|More], Packet, Options) ->
|
||||
run_checks([Check | More], Packet, Options) ->
|
||||
case Check(Packet, Options) of
|
||||
ok -> run_checks(More, Packet, Options);
|
||||
Error = {error, _Reason} -> Error
|
||||
|
|
@ -396,134 +410,192 @@ run_checks([Check|More], Packet, Options) ->
|
|||
%% @private
|
||||
validate_topic_filters(TopicFilters) ->
|
||||
lists:foreach(
|
||||
fun({TopicFilter, _SubOpts}) ->
|
||||
emqx_topic:validate(TopicFilter);
|
||||
(TopicFilter) ->
|
||||
emqx_topic:validate(TopicFilter)
|
||||
end, TopicFilters).
|
||||
fun
|
||||
({TopicFilter, _SubOpts}) ->
|
||||
emqx_topic:validate(TopicFilter);
|
||||
(TopicFilter) ->
|
||||
emqx_topic:validate(TopicFilter)
|
||||
end,
|
||||
TopicFilters
|
||||
).
|
||||
|
||||
-spec(to_message(emqx_types:packet(), emqx_types:clientid()) -> emqx_types:message()).
|
||||
-spec to_message(emqx_types:packet(), emqx_types:clientid()) -> emqx_types:message().
|
||||
to_message(Packet, ClientId) ->
|
||||
to_message(Packet, ClientId, #{}).
|
||||
|
||||
%% @doc Transform Publish Packet to Message.
|
||||
-spec(to_message(emqx_types:packet(), emqx_types:clientid(), map()) -> emqx_types:message()).
|
||||
to_message(#mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
retain = Retain,
|
||||
qos = QoS,
|
||||
dup = Dup},
|
||||
variable = #mqtt_packet_publish{
|
||||
topic_name = Topic,
|
||||
properties = Props},
|
||||
payload = Payload
|
||||
}, ClientId, Headers) ->
|
||||
-spec to_message(emqx_types:packet(), emqx_types:clientid(), map()) -> emqx_types:message().
|
||||
to_message(
|
||||
#mqtt_packet{
|
||||
header = #mqtt_packet_header{
|
||||
type = ?PUBLISH,
|
||||
retain = Retain,
|
||||
qos = QoS,
|
||||
dup = Dup
|
||||
},
|
||||
variable = #mqtt_packet_publish{
|
||||
topic_name = Topic,
|
||||
properties = Props
|
||||
},
|
||||
payload = Payload
|
||||
},
|
||||
ClientId,
|
||||
Headers
|
||||
) ->
|
||||
Msg = emqx_message:make(ClientId, QoS, Topic, Payload),
|
||||
Msg#message{flags = #{dup => Dup, retain => Retain},
|
||||
headers = Headers#{properties => Props}}.
|
||||
Msg#message{
|
||||
flags = #{dup => Dup, retain => Retain},
|
||||
headers = Headers#{properties => Props}
|
||||
}.
|
||||
|
||||
-spec(will_msg(#mqtt_packet_connect{}) -> emqx_types:message()).
|
||||
-spec will_msg(#mqtt_packet_connect{}) -> emqx_types:message().
|
||||
will_msg(#mqtt_packet_connect{will_flag = false}) ->
|
||||
undefined;
|
||||
will_msg(#mqtt_packet_connect{clientid = ClientId,
|
||||
username = Username,
|
||||
will_retain = Retain,
|
||||
will_qos = QoS,
|
||||
will_topic = Topic,
|
||||
will_props = Props,
|
||||
will_payload = Payload}) ->
|
||||
will_msg(#mqtt_packet_connect{
|
||||
clientid = ClientId,
|
||||
username = Username,
|
||||
will_retain = Retain,
|
||||
will_qos = QoS,
|
||||
will_topic = Topic,
|
||||
will_props = Props,
|
||||
will_payload = Payload
|
||||
}) ->
|
||||
Msg = emqx_message:make(ClientId, QoS, Topic, Payload),
|
||||
Msg#message{flags = #{dup => false, retain => Retain},
|
||||
headers = #{username => Username, properties => Props}}.
|
||||
Msg#message{
|
||||
flags = #{dup => false, retain => Retain},
|
||||
headers = #{username => Username, properties => Props}
|
||||
}.
|
||||
|
||||
%% @doc Format packet
|
||||
-spec(format(emqx_types:packet()) -> iolist()).
|
||||
-spec format(emqx_types:packet()) -> iolist().
|
||||
format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
|
||||
|
||||
%% @doc Format packet
|
||||
-spec(format(emqx_types:packet(), hex | text | hidden) -> iolist()).
|
||||
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
|
||||
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->
|
||||
HeaderIO = format_header(Header),
|
||||
case format_variable(Variable, Payload, PayloadEncode) of
|
||||
"" -> HeaderIO;
|
||||
VarIO -> [HeaderIO,",", VarIO]
|
||||
VarIO -> [HeaderIO, ",", VarIO]
|
||||
end.
|
||||
|
||||
format_header(#mqtt_packet_header{type = Type,
|
||||
dup = Dup,
|
||||
qos = QoS,
|
||||
retain = Retain}) ->
|
||||
format_header(#mqtt_packet_header{
|
||||
type = Type,
|
||||
dup = Dup,
|
||||
qos = QoS,
|
||||
retain = Retain
|
||||
}) ->
|
||||
io_lib:format("~ts(Q~p, R~p, D~p)", [type_name(Type), QoS, i(Retain), i(Dup)]).
|
||||
|
||||
format_variable(undefined, _, _) -> "";
|
||||
format_variable(undefined, _, _) ->
|
||||
"";
|
||||
format_variable(Variable, undefined, PayloadEncode) ->
|
||||
format_variable(Variable, PayloadEncode);
|
||||
format_variable(Variable, Payload, PayloadEncode) ->
|
||||
[format_variable(Variable, PayloadEncode), format_payload(Payload, PayloadEncode)].
|
||||
|
||||
format_variable(#mqtt_packet_connect{
|
||||
proto_ver = ProtoVer,
|
||||
proto_name = ProtoName,
|
||||
will_retain = WillRetain,
|
||||
will_qos = WillQoS,
|
||||
will_flag = WillFlag,
|
||||
clean_start = CleanStart,
|
||||
keepalive = KeepAlive,
|
||||
clientid = ClientId,
|
||||
will_topic = WillTopic,
|
||||
will_payload = WillPayload,
|
||||
username = Username,
|
||||
password = Password},
|
||||
PayloadEncode) ->
|
||||
format_variable(
|
||||
#mqtt_packet_connect{
|
||||
proto_ver = ProtoVer,
|
||||
proto_name = ProtoName,
|
||||
will_retain = WillRetain,
|
||||
will_qos = WillQoS,
|
||||
will_flag = WillFlag,
|
||||
clean_start = CleanStart,
|
||||
keepalive = KeepAlive,
|
||||
clientid = ClientId,
|
||||
will_topic = WillTopic,
|
||||
will_payload = WillPayload,
|
||||
username = Username,
|
||||
password = Password
|
||||
},
|
||||
PayloadEncode
|
||||
) ->
|
||||
Base = io_lib:format(
|
||||
"ClientId=~ts, ProtoName=~ts, ProtoVsn=~p, CleanStart=~ts, KeepAlive=~p, Username=~ts, Password=~ts",
|
||||
[ClientId, ProtoName, ProtoVer, CleanStart, KeepAlive, Username, format_password(Password)]),
|
||||
[ClientId, ProtoName, ProtoVer, CleanStart, KeepAlive, Username, format_password(Password)]
|
||||
),
|
||||
case WillFlag of
|
||||
true ->
|
||||
[Base, io_lib:format(", Will(Q~p, R~p, Topic=~ts ",
|
||||
[WillQoS, i(WillRetain), WillTopic]),
|
||||
format_payload(WillPayload, PayloadEncode), ")"];
|
||||
[
|
||||
Base,
|
||||
io_lib:format(
|
||||
", Will(Q~p, R~p, Topic=~ts ",
|
||||
[WillQoS, i(WillRetain), WillTopic]
|
||||
),
|
||||
format_payload(WillPayload, PayloadEncode),
|
||||
")"
|
||||
];
|
||||
false ->
|
||||
Base
|
||||
end;
|
||||
|
||||
format_variable(#mqtt_packet_disconnect
|
||||
{reason_code = ReasonCode}, _) ->
|
||||
format_variable(
|
||||
#mqtt_packet_disconnect{
|
||||
reason_code = ReasonCode
|
||||
},
|
||||
_
|
||||
) ->
|
||||
io_lib:format("ReasonCode=~p", [ReasonCode]);
|
||||
|
||||
format_variable(#mqtt_packet_connack{ack_flags = AckFlags,
|
||||
reason_code = ReasonCode}, _) ->
|
||||
format_variable(
|
||||
#mqtt_packet_connack{
|
||||
ack_flags = AckFlags,
|
||||
reason_code = ReasonCode
|
||||
},
|
||||
_
|
||||
) ->
|
||||
io_lib:format("AckFlags=~p, ReasonCode=~p", [AckFlags, ReasonCode]);
|
||||
|
||||
format_variable(#mqtt_packet_publish{topic_name = TopicName,
|
||||
packet_id = PacketId}, _) ->
|
||||
format_variable(
|
||||
#mqtt_packet_publish{
|
||||
topic_name = TopicName,
|
||||
packet_id = PacketId
|
||||
},
|
||||
_
|
||||
) ->
|
||||
io_lib:format("Topic=~ts, PacketId=~p", [TopicName, PacketId]);
|
||||
|
||||
format_variable(#mqtt_packet_puback{packet_id = PacketId,
|
||||
reason_code = ReasonCode}, _) ->
|
||||
format_variable(
|
||||
#mqtt_packet_puback{
|
||||
packet_id = PacketId,
|
||||
reason_code = ReasonCode
|
||||
},
|
||||
_
|
||||
) ->
|
||||
io_lib:format("PacketId=~p, ReasonCode=~p", [PacketId, ReasonCode]);
|
||||
|
||||
format_variable(#mqtt_packet_subscribe{packet_id = PacketId,
|
||||
topic_filters = TopicFilters}, _) ->
|
||||
[io_lib:format("PacketId=~p ", [PacketId]), "TopicFilters=",
|
||||
format_topic_filters(TopicFilters)];
|
||||
|
||||
format_variable(#mqtt_packet_unsubscribe{packet_id = PacketId,
|
||||
topic_filters = Topics}, _) ->
|
||||
[io_lib:format("PacketId=~p ", [PacketId]), "TopicFilters=",
|
||||
format_topic_filters(Topics)];
|
||||
|
||||
format_variable(#mqtt_packet_suback{packet_id = PacketId,
|
||||
reason_codes = ReasonCodes}, _) ->
|
||||
format_variable(
|
||||
#mqtt_packet_subscribe{
|
||||
packet_id = PacketId,
|
||||
topic_filters = TopicFilters
|
||||
},
|
||||
_
|
||||
) ->
|
||||
[
|
||||
io_lib:format("PacketId=~p ", [PacketId]),
|
||||
"TopicFilters=",
|
||||
format_topic_filters(TopicFilters)
|
||||
];
|
||||
format_variable(
|
||||
#mqtt_packet_unsubscribe{
|
||||
packet_id = PacketId,
|
||||
topic_filters = Topics
|
||||
},
|
||||
_
|
||||
) ->
|
||||
[
|
||||
io_lib:format("PacketId=~p ", [PacketId]),
|
||||
"TopicFilters=",
|
||||
format_topic_filters(Topics)
|
||||
];
|
||||
format_variable(
|
||||
#mqtt_packet_suback{
|
||||
packet_id = PacketId,
|
||||
reason_codes = ReasonCodes
|
||||
},
|
||||
_
|
||||
) ->
|
||||
io_lib:format("PacketId=~p, ReasonCodes=~p", [PacketId, ReasonCodes]);
|
||||
|
||||
format_variable(#mqtt_packet_unsuback{packet_id = PacketId}, _) ->
|
||||
io_lib:format("PacketId=~p", [PacketId]);
|
||||
|
||||
format_variable(#mqtt_packet_auth{reason_code = ReasonCode}, _) ->
|
||||
io_lib:format("ReasonCode=~p", [ReasonCode]);
|
||||
|
||||
format_variable(PacketId, _) when is_integer(PacketId) ->
|
||||
io_lib:format("PacketId=~p", [PacketId]).
|
||||
|
||||
|
|
@ -534,80 +606,92 @@ format_payload(Payload, text) -> ["Payload=", io_lib:format("~ts", [Payload])];
|
|||
format_payload(Payload, hex) -> ["Payload(hex)=", encode_hex(Payload)];
|
||||
format_payload(_, hidden) -> "Payload=******".
|
||||
|
||||
i(true) -> 1;
|
||||
i(true) -> 1;
|
||||
i(false) -> 0;
|
||||
i(I) when is_integer(I) -> I.
|
||||
|
||||
format_topic_filters(Filters) ->
|
||||
["[",
|
||||
lists:join(",",
|
||||
[
|
||||
"[",
|
||||
lists:join(
|
||||
",",
|
||||
lists:map(
|
||||
fun({TopicFilter, SubOpts}) ->
|
||||
io_lib:format("~ts(~p)", [TopicFilter, SubOpts]);
|
||||
fun
|
||||
({TopicFilter, SubOpts}) ->
|
||||
io_lib:format("~ts(~p)", [TopicFilter, SubOpts]);
|
||||
(TopicFilter) ->
|
||||
io_lib:format("~ts", [TopicFilter])
|
||||
end, Filters)),
|
||||
"]"].
|
||||
end,
|
||||
Filters
|
||||
)
|
||||
),
|
||||
"]"
|
||||
].
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Hex encoding functions
|
||||
%% Copy from binary:encode_hex/1 (was only introduced in OTP24).
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
-define(HEX(X), (hex(X)):16).
|
||||
-compile({inline,[hex/1]}).
|
||||
-compile({inline, [hex/1]}).
|
||||
-spec encode_hex(Bin) -> Bin2 when
|
||||
Bin :: binary(),
|
||||
Bin2 :: <<_:_*16>>.
|
||||
encode_hex(Data) when byte_size(Data) rem 8 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B),?HEX(C),?HEX(D),?HEX(E),?HEX(F),?HEX(G),?HEX(H)>> || <<A,B,C,D,E,F,G,H>> <= Data >>;
|
||||
<<
|
||||
<<?HEX(A), ?HEX(B), ?HEX(C), ?HEX(D), ?HEX(E), ?HEX(F), ?HEX(G), ?HEX(H)>>
|
||||
|| <<A, B, C, D, E, F, G, H>> <= Data
|
||||
>>;
|
||||
encode_hex(Data) when byte_size(Data) rem 7 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B),?HEX(C),?HEX(D),?HEX(E),?HEX(F),?HEX(G)>> || <<A,B,C,D,E,F,G>> <= Data >>;
|
||||
<<
|
||||
<<?HEX(A), ?HEX(B), ?HEX(C), ?HEX(D), ?HEX(E), ?HEX(F), ?HEX(G)>>
|
||||
|| <<A, B, C, D, E, F, G>> <= Data
|
||||
>>;
|
||||
encode_hex(Data) when byte_size(Data) rem 6 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B),?HEX(C),?HEX(D),?HEX(E),?HEX(F)>> || <<A,B,C,D,E,F>> <= Data >>;
|
||||
<<<<?HEX(A), ?HEX(B), ?HEX(C), ?HEX(D), ?HEX(E), ?HEX(F)>> || <<A, B, C, D, E, F>> <= Data>>;
|
||||
encode_hex(Data) when byte_size(Data) rem 5 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B),?HEX(C),?HEX(D),?HEX(E)>> || <<A,B,C,D,E>> <= Data >>;
|
||||
<<<<?HEX(A), ?HEX(B), ?HEX(C), ?HEX(D), ?HEX(E)>> || <<A, B, C, D, E>> <= Data>>;
|
||||
encode_hex(Data) when byte_size(Data) rem 4 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B),?HEX(C),?HEX(D)>> || <<A,B,C,D>> <= Data >>;
|
||||
<<<<?HEX(A), ?HEX(B), ?HEX(C), ?HEX(D)>> || <<A, B, C, D>> <= Data>>;
|
||||
encode_hex(Data) when byte_size(Data) rem 3 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B),?HEX(C)>> || <<A,B,C>> <= Data >>;
|
||||
<<<<?HEX(A), ?HEX(B), ?HEX(C)>> || <<A, B, C>> <= Data>>;
|
||||
encode_hex(Data) when byte_size(Data) rem 2 =:= 0 ->
|
||||
<< <<?HEX(A),?HEX(B)>> || <<A,B>> <= Data >>;
|
||||
<<<<?HEX(A), ?HEX(B)>> || <<A, B>> <= Data>>;
|
||||
encode_hex(Data) when is_binary(Data) ->
|
||||
<< <<?HEX(N)>> || <<N>> <= Data >>;
|
||||
<<<<?HEX(N)>> || <<N>> <= Data>>;
|
||||
encode_hex(Bin) ->
|
||||
erlang:error(badarg, [Bin]).
|
||||
|
||||
hex(X) ->
|
||||
element(
|
||||
X+1, {16#3030, 16#3031, 16#3032, 16#3033, 16#3034, 16#3035, 16#3036, 16#3037, 16#3038, 16#3039, 16#3041,
|
||||
16#3042, 16#3043, 16#3044, 16#3045, 16#3046,
|
||||
16#3130, 16#3131, 16#3132, 16#3133, 16#3134, 16#3135, 16#3136, 16#3137, 16#3138, 16#3139, 16#3141,
|
||||
16#3142, 16#3143, 16#3144, 16#3145, 16#3146,
|
||||
16#3230, 16#3231, 16#3232, 16#3233, 16#3234, 16#3235, 16#3236, 16#3237, 16#3238, 16#3239, 16#3241,
|
||||
16#3242, 16#3243, 16#3244, 16#3245, 16#3246,
|
||||
16#3330, 16#3331, 16#3332, 16#3333, 16#3334, 16#3335, 16#3336, 16#3337, 16#3338, 16#3339, 16#3341,
|
||||
16#3342, 16#3343, 16#3344, 16#3345, 16#3346,
|
||||
16#3430, 16#3431, 16#3432, 16#3433, 16#3434, 16#3435, 16#3436, 16#3437, 16#3438, 16#3439, 16#3441,
|
||||
16#3442, 16#3443, 16#3444, 16#3445, 16#3446,
|
||||
16#3530, 16#3531, 16#3532, 16#3533, 16#3534, 16#3535, 16#3536, 16#3537, 16#3538, 16#3539, 16#3541,
|
||||
16#3542, 16#3543, 16#3544, 16#3545, 16#3546,
|
||||
16#3630, 16#3631, 16#3632, 16#3633, 16#3634, 16#3635, 16#3636, 16#3637, 16#3638, 16#3639, 16#3641,
|
||||
16#3642, 16#3643, 16#3644, 16#3645, 16#3646,
|
||||
16#3730, 16#3731, 16#3732, 16#3733, 16#3734, 16#3735, 16#3736, 16#3737, 16#3738, 16#3739, 16#3741,
|
||||
16#3742, 16#3743, 16#3744, 16#3745, 16#3746,
|
||||
16#3830, 16#3831, 16#3832, 16#3833, 16#3834, 16#3835, 16#3836, 16#3837, 16#3838, 16#3839, 16#3841,
|
||||
16#3842, 16#3843, 16#3844, 16#3845, 16#3846,
|
||||
16#3930, 16#3931, 16#3932, 16#3933, 16#3934, 16#3935, 16#3936, 16#3937, 16#3938, 16#3939, 16#3941,
|
||||
16#3942, 16#3943, 16#3944, 16#3945, 16#3946,
|
||||
16#4130, 16#4131, 16#4132, 16#4133, 16#4134, 16#4135, 16#4136, 16#4137, 16#4138, 16#4139, 16#4141,
|
||||
16#4142, 16#4143, 16#4144, 16#4145, 16#4146,
|
||||
16#4230, 16#4231, 16#4232, 16#4233, 16#4234, 16#4235, 16#4236, 16#4237, 16#4238, 16#4239, 16#4241,
|
||||
16#4242, 16#4243, 16#4244, 16#4245, 16#4246,
|
||||
16#4330, 16#4331, 16#4332, 16#4333, 16#4334, 16#4335, 16#4336, 16#4337, 16#4338, 16#4339, 16#4341,
|
||||
16#4342, 16#4343, 16#4344, 16#4345, 16#4346,
|
||||
16#4430, 16#4431, 16#4432, 16#4433, 16#4434, 16#4435, 16#4436, 16#4437, 16#4438, 16#4439, 16#4441,
|
||||
16#4442, 16#4443, 16#4444, 16#4445, 16#4446,
|
||||
16#4530, 16#4531, 16#4532, 16#4533, 16#4534, 16#4535, 16#4536, 16#4537, 16#4538, 16#4539, 16#4541,
|
||||
16#4542, 16#4543, 16#4544, 16#4545, 16#4546,
|
||||
16#4630, 16#4631, 16#4632, 16#4633, 16#4634, 16#4635, 16#4636, 16#4637, 16#4638, 16#4639, 16#4641,
|
||||
16#4642, 16#4643, 16#4644, 16#4645, 16#4646}).
|
||||
X + 1,
|
||||
{16#3030, 16#3031, 16#3032, 16#3033, 16#3034, 16#3035, 16#3036, 16#3037, 16#3038, 16#3039,
|
||||
16#3041, 16#3042, 16#3043, 16#3044, 16#3045, 16#3046, 16#3130, 16#3131, 16#3132,
|
||||
16#3133, 16#3134, 16#3135, 16#3136, 16#3137, 16#3138, 16#3139, 16#3141, 16#3142,
|
||||
16#3143, 16#3144, 16#3145, 16#3146, 16#3230, 16#3231, 16#3232, 16#3233, 16#3234,
|
||||
16#3235, 16#3236, 16#3237, 16#3238, 16#3239, 16#3241, 16#3242, 16#3243, 16#3244,
|
||||
16#3245, 16#3246, 16#3330, 16#3331, 16#3332, 16#3333, 16#3334, 16#3335, 16#3336,
|
||||
16#3337, 16#3338, 16#3339, 16#3341, 16#3342, 16#3343, 16#3344, 16#3345, 16#3346,
|
||||
16#3430, 16#3431, 16#3432, 16#3433, 16#3434, 16#3435, 16#3436, 16#3437, 16#3438,
|
||||
16#3439, 16#3441, 16#3442, 16#3443, 16#3444, 16#3445, 16#3446, 16#3530, 16#3531,
|
||||
16#3532, 16#3533, 16#3534, 16#3535, 16#3536, 16#3537, 16#3538, 16#3539, 16#3541,
|
||||
16#3542, 16#3543, 16#3544, 16#3545, 16#3546, 16#3630, 16#3631, 16#3632, 16#3633,
|
||||
16#3634, 16#3635, 16#3636, 16#3637, 16#3638, 16#3639, 16#3641, 16#3642, 16#3643,
|
||||
16#3644, 16#3645, 16#3646, 16#3730, 16#3731, 16#3732, 16#3733, 16#3734, 16#3735,
|
||||
16#3736, 16#3737, 16#3738, 16#3739, 16#3741, 16#3742, 16#3743, 16#3744, 16#3745,
|
||||
16#3746, 16#3830, 16#3831, 16#3832, 16#3833, 16#3834, 16#3835, 16#3836, 16#3837,
|
||||
16#3838, 16#3839, 16#3841, 16#3842, 16#3843, 16#3844, 16#3845, 16#3846, 16#3930,
|
||||
16#3931, 16#3932, 16#3933, 16#3934, 16#3935, 16#3936, 16#3937, 16#3938, 16#3939,
|
||||
16#3941, 16#3942, 16#3943, 16#3944, 16#3945, 16#3946, 16#4130, 16#4131, 16#4132,
|
||||
16#4133, 16#4134, 16#4135, 16#4136, 16#4137, 16#4138, 16#4139, 16#4141, 16#4142,
|
||||
16#4143, 16#4144, 16#4145, 16#4146, 16#4230, 16#4231, 16#4232, 16#4233, 16#4234,
|
||||
16#4235, 16#4236, 16#4237, 16#4238, 16#4239, 16#4241, 16#4242, 16#4243, 16#4244,
|
||||
16#4245, 16#4246, 16#4330, 16#4331, 16#4332, 16#4333, 16#4334, 16#4335, 16#4336,
|
||||
16#4337, 16#4338, 16#4339, 16#4341, 16#4342, 16#4343, 16#4344, 16#4345, 16#4346,
|
||||
16#4430, 16#4431, 16#4432, 16#4433, 16#4434, 16#4435, 16#4436, 16#4437, 16#4438,
|
||||
16#4439, 16#4441, 16#4442, 16#4443, 16#4444, 16#4445, 16#4446, 16#4530, 16#4531,
|
||||
16#4532, 16#4533, 16#4534, 16#4535, 16#4536, 16#4537, 16#4538, 16#4539, 16#4541,
|
||||
16#4542, 16#4543, 16#4544, 16#4545, 16#4546, 16#4630, 16#4631, 16#4632, 16#4633,
|
||||
16#4634, 16#4635, 16#4636, 16#4637, 16#4638, 16#4639, 16#4641, 16#4642, 16#4643,
|
||||
16#4644, 16#4645, 16#4646}
|
||||
).
|
||||
|
|
|
|||
|
|
@ -16,37 +16,40 @@
|
|||
|
||||
-module(emqx_passwd).
|
||||
|
||||
-export([ hash/2
|
||||
, hash_data/2
|
||||
, check_pass/3
|
||||
]).
|
||||
-export([
|
||||
hash/2,
|
||||
hash_data/2,
|
||||
check_pass/3
|
||||
]).
|
||||
|
||||
-export_type([ password/0
|
||||
, password_hash/0
|
||||
, hash_type_simple/0
|
||||
, hash_type/0
|
||||
, salt_position/0
|
||||
, salt/0]).
|
||||
-export_type([
|
||||
password/0,
|
||||
password_hash/0,
|
||||
hash_type_simple/0,
|
||||
hash_type/0,
|
||||
salt_position/0,
|
||||
salt/0
|
||||
]).
|
||||
|
||||
-include("logger.hrl").
|
||||
|
||||
-type(password() :: binary()).
|
||||
-type(password_hash() :: binary()).
|
||||
-type password() :: binary().
|
||||
-type password_hash() :: binary().
|
||||
|
||||
-type(hash_type_simple() :: plain | md5 | sha | sha256 | sha512).
|
||||
-type(hash_type() :: hash_type_simple() | bcrypt | pbkdf2).
|
||||
-type hash_type_simple() :: plain | md5 | sha | sha256 | sha512.
|
||||
-type hash_type() :: hash_type_simple() | bcrypt | pbkdf2.
|
||||
|
||||
-type(salt_position() :: prefix | suffix).
|
||||
-type(salt() :: binary()).
|
||||
-type salt_position() :: prefix | suffix.
|
||||
-type salt() :: binary().
|
||||
|
||||
-type(pbkdf2_mac_fun() :: md4 | md5 | ripemd160 | sha | sha224 | sha256 | sha384 | sha512).
|
||||
-type(pbkdf2_iterations() :: pos_integer()).
|
||||
-type(pbkdf2_dk_length() :: pos_integer() | undefined).
|
||||
-type pbkdf2_mac_fun() :: md4 | md5 | ripemd160 | sha | sha224 | sha256 | sha384 | sha512.
|
||||
-type pbkdf2_iterations() :: pos_integer().
|
||||
-type pbkdf2_dk_length() :: pos_integer() | undefined.
|
||||
|
||||
-type(hash_params() ::
|
||||
{bcrypt, salt()} |
|
||||
{pbkdf2, pbkdf2_mac_fun(), salt(), pbkdf2_iterations(), pbkdf2_dk_length()} |
|
||||
{hash_type_simple(), salt(), salt_position()}).
|
||||
-type hash_params() ::
|
||||
{bcrypt, salt()}
|
||||
| {pbkdf2, pbkdf2_mac_fun(), salt(), pbkdf2_iterations(), pbkdf2_dk_length()}
|
||||
| {hash_type_simple(), salt(), salt_position()}.
|
||||
|
||||
-export_type([pbkdf2_mac_fun/0]).
|
||||
|
||||
|
|
@ -54,38 +57,38 @@
|
|||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(check_pass(hash_params(), password_hash(), password()) -> boolean()).
|
||||
-spec check_pass(hash_params(), password_hash(), password()) -> boolean().
|
||||
check_pass({pbkdf2, MacFun, Salt, Iterations, DKLength}, PasswordHash, Password) ->
|
||||
case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of
|
||||
{ok, HashPasswd} ->
|
||||
compare_secure(hex(HashPasswd), PasswordHash);
|
||||
{error, _Reason}->
|
||||
{error, _Reason} ->
|
||||
false
|
||||
end;
|
||||
check_pass({bcrypt, Salt}, PasswordHash, Password) ->
|
||||
case bcrypt:hashpw(Password, Salt) of
|
||||
{ok, HashPasswd} ->
|
||||
compare_secure(list_to_binary(HashPasswd), PasswordHash);
|
||||
{error, _Reason}->
|
||||
{error, _Reason} ->
|
||||
false
|
||||
end;
|
||||
check_pass({_SimpleHash, _Salt, _SaltPosition} = HashParams, PasswordHash, Password) ->
|
||||
Hash = hash(HashParams, Password),
|
||||
compare_secure(Hash, PasswordHash).
|
||||
|
||||
-spec(hash(hash_params(), password()) -> password_hash()).
|
||||
-spec hash(hash_params(), password()) -> password_hash().
|
||||
hash({pbkdf2, MacFun, Salt, Iterations, DKLength}, Password) ->
|
||||
case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of
|
||||
{ok, HashPasswd} ->
|
||||
hex(HashPasswd);
|
||||
{error, Reason}->
|
||||
{error, Reason} ->
|
||||
error(Reason)
|
||||
end;
|
||||
hash({bcrypt, Salt}, Password) ->
|
||||
case bcrypt:hashpw(Password, Salt) of
|
||||
{ok, HashPasswd} ->
|
||||
list_to_binary(HashPasswd);
|
||||
{error, Reason}->
|
||||
{error, Reason} ->
|
||||
error(Reason)
|
||||
end;
|
||||
hash({SimpleHash, Salt, prefix}, Password) when is_binary(Password), is_binary(Salt) ->
|
||||
|
|
@ -93,8 +96,7 @@ hash({SimpleHash, Salt, prefix}, Password) when is_binary(Password), is_binary(S
|
|||
hash({SimpleHash, Salt, suffix}, Password) when is_binary(Password), is_binary(Salt) ->
|
||||
hash_data(SimpleHash, <<Password/binary, Salt/binary>>).
|
||||
|
||||
|
||||
-spec(hash_data(hash_type(), binary()) -> binary()).
|
||||
-spec hash_data(hash_type(), binary()) -> binary().
|
||||
hash_data(plain, Data) when is_binary(Data) ->
|
||||
Data;
|
||||
hash_data(md5, Data) when is_binary(Data) ->
|
||||
|
|
@ -111,26 +113,24 @@ hash_data(sha512, Data) when is_binary(Data) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
compare_secure(X, Y) when is_binary(X), is_binary(Y) ->
|
||||
compare_secure(binary_to_list(X), binary_to_list(Y));
|
||||
compare_secure(binary_to_list(X), binary_to_list(Y));
|
||||
compare_secure(X, Y) when is_list(X), is_list(Y) ->
|
||||
case length(X) == length(Y) of
|
||||
true ->
|
||||
compare_secure(X, Y, 0);
|
||||
false ->
|
||||
false
|
||||
case length(X) == length(Y) of
|
||||
true ->
|
||||
compare_secure(X, Y, 0);
|
||||
false ->
|
||||
false
|
||||
end.
|
||||
|
||||
compare_secure([X | RestX], [Y | RestY], Result) ->
|
||||
compare_secure(RestX, RestY, (X bxor Y) bor Result);
|
||||
compare_secure(RestX, RestY, (X bxor Y) bor Result);
|
||||
compare_secure([], [], Result) ->
|
||||
Result == 0.
|
||||
|
||||
Result == 0.
|
||||
|
||||
pbkdf2(MacFun, Password, Salt, Iterations, undefined) ->
|
||||
pbkdf2:pbkdf2(MacFun, Password, Salt, Iterations);
|
||||
pbkdf2(MacFun, Password, Salt, Iterations, DKLength) ->
|
||||
pbkdf2:pbkdf2(MacFun, Password, Salt, Iterations, DKLength).
|
||||
|
||||
|
||||
hex(X) when is_binary(X) ->
|
||||
pbkdf2:to_hex(X).
|
||||
|
|
|
|||
|
|
@ -19,34 +19,42 @@
|
|||
|
||||
-include("types.hrl").
|
||||
|
||||
-export([ get_counters/1
|
||||
, get_counter/1
|
||||
, inc_counter/2
|
||||
, reset_counter/1
|
||||
]).
|
||||
-export([
|
||||
get_counters/1,
|
||||
get_counter/1,
|
||||
inc_counter/2,
|
||||
reset_counter/1
|
||||
]).
|
||||
|
||||
-compile({inline,
|
||||
[ get_counters/1
|
||||
, get_counter/1
|
||||
, inc_counter/2
|
||||
, reset_counter/1
|
||||
]}).
|
||||
-compile(
|
||||
{inline, [
|
||||
get_counters/1,
|
||||
get_counter/1,
|
||||
inc_counter/2,
|
||||
reset_counter/1
|
||||
]}
|
||||
).
|
||||
|
||||
-type(key() :: term()).
|
||||
-type key() :: term().
|
||||
|
||||
-spec(get_counters(list(key())) -> list({key(), number()})).
|
||||
-spec get_counters(list(key())) -> list({key(), number()}).
|
||||
get_counters(Keys) when is_list(Keys) ->
|
||||
[{Key, emqx_pd:get_counter(Key)} || Key <- Keys].
|
||||
|
||||
-spec(get_counter(key()) -> number()).
|
||||
-spec get_counter(key()) -> number().
|
||||
get_counter(Key) ->
|
||||
case get(Key) of undefined -> 0; Cnt -> Cnt end.
|
||||
case get(Key) of
|
||||
undefined -> 0;
|
||||
Cnt -> Cnt
|
||||
end.
|
||||
|
||||
-spec(inc_counter(key(), number()) -> maybe(number())).
|
||||
-spec inc_counter(key(), number()) -> maybe(number()).
|
||||
inc_counter(Key, Inc) ->
|
||||
put(Key, get_counter(Key) + Inc).
|
||||
|
||||
-spec(reset_counter(key()) -> number()).
|
||||
-spec reset_counter(key()) -> number().
|
||||
reset_counter(Key) ->
|
||||
case put(Key, 0) of undefined -> 0; Cnt -> Cnt end.
|
||||
|
||||
case put(Key, 0) of
|
||||
undefined -> 0;
|
||||
Cnt -> Cnt
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -16,42 +16,47 @@
|
|||
|
||||
-module(emqx_persistent_session).
|
||||
|
||||
-export([ is_store_enabled/0
|
||||
, init_db_backend/0
|
||||
, storage_type/0
|
||||
]).
|
||||
-export([
|
||||
is_store_enabled/0,
|
||||
init_db_backend/0,
|
||||
storage_type/0
|
||||
]).
|
||||
|
||||
-export([ discard/2
|
||||
, discard_if_present/1
|
||||
, lookup/1
|
||||
, persist/3
|
||||
, persist_message/1
|
||||
, pending/1
|
||||
, pending/2
|
||||
, resume/3
|
||||
]).
|
||||
-export([
|
||||
discard/2,
|
||||
discard_if_present/1,
|
||||
lookup/1,
|
||||
persist/3,
|
||||
persist_message/1,
|
||||
pending/1,
|
||||
pending/2,
|
||||
resume/3
|
||||
]).
|
||||
|
||||
-export([ add_subscription/3
|
||||
, remove_subscription/3
|
||||
]).
|
||||
-export([
|
||||
add_subscription/3,
|
||||
remove_subscription/3
|
||||
]).
|
||||
|
||||
-export([ mark_as_delivered/2
|
||||
, mark_resume_begin/1
|
||||
]).
|
||||
-export([
|
||||
mark_as_delivered/2,
|
||||
mark_resume_begin/1
|
||||
]).
|
||||
|
||||
-export([ pending_messages_in_db/2
|
||||
, delete_session_message/1
|
||||
, gc_session_messages/1
|
||||
, session_message_info/2
|
||||
]).
|
||||
-export([
|
||||
pending_messages_in_db/2,
|
||||
delete_session_message/1,
|
||||
gc_session_messages/1,
|
||||
session_message_info/2
|
||||
]).
|
||||
|
||||
-export([ delete_message/1
|
||||
, first_message_id/0
|
||||
, next_message_id/1
|
||||
]).
|
||||
-export([
|
||||
delete_message/1,
|
||||
first_message_id/0,
|
||||
next_message_id/1
|
||||
]).
|
||||
|
||||
-export_type([ sess_msg_key/0
|
||||
]).
|
||||
-export_type([sess_msg_key/0]).
|
||||
|
||||
-include("emqx.hrl").
|
||||
-include("emqx_persistent_session.hrl").
|
||||
|
|
@ -59,7 +64,8 @@
|
|||
|
||||
-compile({inline, [is_store_enabled/0]}).
|
||||
|
||||
-define(MAX_EXPIRY_INTERVAL, 4294967295000). %% 16#FFFFFFFF * 1000
|
||||
%% 16#FFFFFFFF * 1000
|
||||
-define(MAX_EXPIRY_INTERVAL, 4294967295000).
|
||||
|
||||
%% NOTE: Order is significant because of traversal order of the table.
|
||||
-define(MARKER, 3).
|
||||
|
|
@ -67,12 +73,11 @@
|
|||
-define(UNDELIVERED, 1).
|
||||
-define(ABANDONED, 0).
|
||||
|
||||
|
||||
-type bin_timestamp() :: <<_:64>>.
|
||||
-opaque sess_msg_key() ::
|
||||
{emqx_guid:guid(), emqx_guid:guid(), emqx_types:topic(), ?UNDELIVERED | ?DELIVERED}
|
||||
| {emqx_guid:guid(), emqx_guid:guid(), <<>> , ?MARKER}
|
||||
| {emqx_guid:guid(), <<>> , bin_timestamp() , ?ABANDONED}.
|
||||
{emqx_guid:guid(), emqx_guid:guid(), emqx_types:topic(), ?UNDELIVERED | ?DELIVERED}
|
||||
| {emqx_guid:guid(), emqx_guid:guid(), <<>>, ?MARKER}
|
||||
| {emqx_guid:guid(), <<>>, bin_timestamp(), ?ABANDONED}.
|
||||
|
||||
-type gc_traverse_fun() :: fun(('delete' | 'marker' | 'abandoned', sess_msg_key()) -> 'ok').
|
||||
|
||||
|
|
@ -82,14 +87,16 @@
|
|||
|
||||
init_db_backend() ->
|
||||
case is_store_enabled() of
|
||||
true ->
|
||||
true ->
|
||||
StorageType = storage_type(),
|
||||
ok = emqx_trie:create_session_trie(StorageType),
|
||||
ok = emqx_session_router:create_router_tab(StorageType),
|
||||
case StorageType of
|
||||
disc ->
|
||||
emqx_persistent_session_mnesia_disc_backend:create_tables(),
|
||||
persistent_term:put(?db_backend_key, emqx_persistent_session_mnesia_disc_backend);
|
||||
persistent_term:put(
|
||||
?db_backend_key, emqx_persistent_session_mnesia_disc_backend
|
||||
);
|
||||
ram ->
|
||||
emqx_persistent_session_mnesia_ram_backend:create_tables(),
|
||||
persistent_term:put(?db_backend_key, emqx_persistent_session_mnesia_ram_backend)
|
||||
|
|
@ -112,7 +119,7 @@ storage_type() ->
|
|||
|
||||
-spec session_message_info('timestamp' | 'sessionID', sess_msg_key()) -> term().
|
||||
session_message_info(timestamp, {_, <<>>, <<TS:64>>, ?ABANDONED}) -> TS;
|
||||
session_message_info(timestamp, {_, GUID, _ , _ }) -> emqx_guid:timestamp(GUID);
|
||||
session_message_info(timestamp, {_, GUID, _, _}) -> emqx_guid:timestamp(GUID);
|
||||
session_message_info(sessionID, {SessionID, _, _, _}) -> SessionID.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -147,7 +154,7 @@ lookup_session_store(ClientID) ->
|
|||
?db_backend:lookup_session_store(ClientID).
|
||||
|
||||
put_session_message({_, _, _, _} = Key) ->
|
||||
?db_backend:put_session_message(#session_msg{ key = Key }).
|
||||
?db_backend:put_session_message(#session_msg{key = Key}).
|
||||
|
||||
put_message(Msg) ->
|
||||
?db_backend:put_message(Msg).
|
||||
|
|
@ -164,29 +171,37 @@ pending_messages_in_db(SessionID, MarkerIds) ->
|
|||
|
||||
%% The timestamp (TS) is the last time a client interacted with the session,
|
||||
%% or when the client disconnected.
|
||||
-spec persist(emqx_types:clientinfo(),
|
||||
emqx_types:conninfo(),
|
||||
emqx_session:session()) -> emqx_session:session().
|
||||
-spec persist(
|
||||
emqx_types:clientinfo(),
|
||||
emqx_types:conninfo(),
|
||||
emqx_session:session()
|
||||
) -> emqx_session:session().
|
||||
|
||||
persist(#{ clientid := ClientID }, ConnInfo, Session) ->
|
||||
persist(#{clientid := ClientID}, ConnInfo, Session) ->
|
||||
case ClientID == undefined orelse not emqx_session:info(is_persistent, Session) of
|
||||
true -> Session;
|
||||
true ->
|
||||
Session;
|
||||
false ->
|
||||
SS = #session_store{ client_id = ClientID
|
||||
, expiry_interval = maps:get(expiry_interval, ConnInfo)
|
||||
, ts = timestamp_from_conninfo(ConnInfo)
|
||||
, session = Session},
|
||||
SS = #session_store{
|
||||
client_id = ClientID,
|
||||
expiry_interval = maps:get(expiry_interval, ConnInfo),
|
||||
ts = timestamp_from_conninfo(ConnInfo),
|
||||
session = Session
|
||||
},
|
||||
case persistent_session_status(SS) of
|
||||
not_persistent -> Session;
|
||||
expired -> discard(ClientID, Session);
|
||||
persistent -> put_session_store(SS),
|
||||
Session
|
||||
not_persistent ->
|
||||
Session;
|
||||
expired ->
|
||||
discard(ClientID, Session);
|
||||
persistent ->
|
||||
put_session_store(SS),
|
||||
Session
|
||||
end
|
||||
end.
|
||||
|
||||
timestamp_from_conninfo(ConnInfo) ->
|
||||
case maps:get(disconnected_at, ConnInfo, undefined) of
|
||||
undefined -> erlang:system_time(millisecond);
|
||||
undefined -> erlang:system_time(millisecond);
|
||||
Disconnect -> Disconnect
|
||||
end.
|
||||
|
||||
|
|
@ -196,11 +211,12 @@ lookup(ClientID) when is_binary(ClientID) ->
|
|||
none;
|
||||
true ->
|
||||
case lookup_session_store(ClientID) of
|
||||
none -> none;
|
||||
none ->
|
||||
none;
|
||||
{value, #session_store{session = S} = SS} ->
|
||||
case persistent_session_status(SS) of
|
||||
expired -> {expired, S};
|
||||
persistent -> {persistent, S}
|
||||
expired -> {expired, S};
|
||||
persistent -> {persistent, S}
|
||||
end
|
||||
end
|
||||
end.
|
||||
|
|
@ -208,7 +224,8 @@ lookup(ClientID) when is_binary(ClientID) ->
|
|||
-spec discard_if_present(binary()) -> 'ok'.
|
||||
discard_if_present(ClientID) ->
|
||||
case lookup(ClientID) of
|
||||
none -> ok;
|
||||
none ->
|
||||
ok;
|
||||
{Tag, Session} when Tag =:= persistent; Tag =:= expired ->
|
||||
_ = discard(ClientID, Session),
|
||||
ok
|
||||
|
|
@ -218,12 +235,12 @@ discard_if_present(ClientID) ->
|
|||
discard(ClientID, Session) ->
|
||||
discard_opt(is_store_enabled(), ClientID, Session).
|
||||
|
||||
discard_opt(false,_ClientID, Session) ->
|
||||
discard_opt(false, _ClientID, Session) ->
|
||||
emqx_session:set_field(is_persistent, false, Session);
|
||||
discard_opt(true, ClientID, Session) ->
|
||||
delete_session_store(ClientID),
|
||||
SessionID = emqx_session:info(id, Session),
|
||||
put_session_message({SessionID, <<>>, << (erlang:system_time(microsecond)) : 64>>, ?ABANDONED}),
|
||||
put_session_message({SessionID, <<>>, <<(erlang:system_time(microsecond)):64>>, ?ABANDONED}),
|
||||
Subscriptions = emqx_session:info(subscriptions, Session),
|
||||
emqx_session_router:delete_routes(SessionID, Subscriptions),
|
||||
emqx_session:set_field(is_persistent, false, Session).
|
||||
|
|
@ -236,7 +253,7 @@ mark_resume_begin(SessionID) ->
|
|||
|
||||
add_subscription(TopicFilter, SessionID, true = _IsPersistent) ->
|
||||
case is_store_enabled() of
|
||||
true -> emqx_session_router:do_add_route(TopicFilter, SessionID);
|
||||
true -> emqx_session_router:do_add_route(TopicFilter, SessionID);
|
||||
false -> ok
|
||||
end;
|
||||
add_subscription(_TopicFilter, _SessionID, false = _IsPersistent) ->
|
||||
|
|
@ -244,7 +261,7 @@ add_subscription(_TopicFilter, _SessionID, false = _IsPersistent) ->
|
|||
|
||||
remove_subscription(TopicFilter, SessionID, true = _IsPersistent) ->
|
||||
case is_store_enabled() of
|
||||
true -> emqx_session_router:do_delete_route(TopicFilter, SessionID);
|
||||
true -> emqx_session_router:do_delete_route(TopicFilter, SessionID);
|
||||
false -> ok
|
||||
end;
|
||||
remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) ->
|
||||
|
|
@ -255,8 +272,8 @@ remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
%% Must be called inside a emqx_cm_locker transaction.
|
||||
-spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()
|
||||
) -> {emqx_session:session(), [emqx_types:deliver()]}.
|
||||
-spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()) ->
|
||||
{emqx_session:session(), [emqx_types:deliver()]}.
|
||||
resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
||||
SessionID = emqx_session:info(id, Session),
|
||||
?tp(ps_resuming, #{from => db, sid => SessionID}),
|
||||
|
|
@ -267,8 +284,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
?tp(ps_initial_pendings, #{sid => SessionID}),
|
||||
Pendings1 = pending(SessionID),
|
||||
Pendings2 = emqx_session:ignore_local(ClientInfo, Pendings1, ClientID, Session),
|
||||
?tp(ps_got_initial_pendings, #{ sid => SessionID
|
||||
, msgs => Pendings1}),
|
||||
?tp(ps_got_initial_pendings, #{
|
||||
sid => SessionID,
|
||||
msgs => Pendings1
|
||||
}),
|
||||
|
||||
%% 2. Enqueue messages to mimic that the process was alive
|
||||
%% when the messages were delivered.
|
||||
|
|
@ -276,8 +295,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
Session1 = emqx_session:enqueue(ClientInfo, Pendings2, Session),
|
||||
Session2 = persist(ClientInfo, ConnInfo, Session1),
|
||||
mark_as_delivered(SessionID, Pendings2),
|
||||
?tp(ps_persist_pendings_msgs, #{ msgs => Pendings2
|
||||
, sid => SessionID}),
|
||||
?tp(ps_persist_pendings_msgs, #{
|
||||
msgs => Pendings2,
|
||||
sid => SessionID
|
||||
}),
|
||||
|
||||
%% 3. Notify writers that we are resuming.
|
||||
%% They will buffer new messages.
|
||||
|
|
@ -295,14 +316,18 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
MarkerIDs = [Marker || {_, Marker} <- NodeMarkers],
|
||||
Pendings3 = pending(SessionID, MarkerIDs),
|
||||
Pendings4 = emqx_session:ignore_local(ClientInfo, Pendings3, ClientID, Session),
|
||||
?tp(ps_marker_pendings_msgs, #{ sid => SessionID
|
||||
, msgs => Pendings4}),
|
||||
?tp(ps_marker_pendings_msgs, #{
|
||||
sid => SessionID,
|
||||
msgs => Pendings4
|
||||
}),
|
||||
|
||||
%% 6. Get pending messages from writers.
|
||||
?tp(ps_resume_end, #{sid => SessionID}),
|
||||
WriterPendings = resume_end(Nodes, SessionID),
|
||||
?tp(ps_writer_pendings, #{ msgs => WriterPendings
|
||||
, sid => SessionID}),
|
||||
?tp(ps_writer_pendings, #{
|
||||
msgs => WriterPendings,
|
||||
sid => SessionID
|
||||
}),
|
||||
|
||||
%% 7. Drain the inbox and usort the messages
|
||||
%% with the pending messages. (Should be done by caller.)
|
||||
|
|
@ -314,30 +339,32 @@ resume_begin(Nodes, SessionID) ->
|
|||
|
||||
resume_end(Nodes, SessionID) ->
|
||||
Res = emqx_persistent_session_proto_v1:resume_end(Nodes, self(), SessionID),
|
||||
?tp(ps_erpc_multical_result, #{ res => Res, sid => SessionID }),
|
||||
?tp(ps_erpc_multical_result, #{res => Res, sid => SessionID}),
|
||||
%% TODO: Should handle the errors
|
||||
[ {deliver, STopic, M}
|
||||
|| {ok, {ok, Messages}} <- Res,
|
||||
{{M, STopic}} <- Messages
|
||||
[
|
||||
{deliver, STopic, M}
|
||||
|| {ok, {ok, Messages}} <- Res,
|
||||
{{M, STopic}} <- Messages
|
||||
].
|
||||
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Messages API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
persist_message(Msg) ->
|
||||
case is_store_enabled() of
|
||||
true -> do_persist_message(Msg);
|
||||
true -> do_persist_message(Msg);
|
||||
false -> ok
|
||||
end.
|
||||
|
||||
do_persist_message(Msg) ->
|
||||
case emqx_message:get_flag(dup, Msg) orelse emqx_message:is_sys(Msg) of
|
||||
true -> ok;
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
case emqx_session_router:match_routes(emqx_message:topic(Msg)) of
|
||||
[] -> ok;
|
||||
[] ->
|
||||
ok;
|
||||
Routes ->
|
||||
put_message(Msg),
|
||||
MsgId = emqx_message:id(Msg),
|
||||
|
|
@ -345,7 +372,7 @@ do_persist_message(Msg) ->
|
|||
end
|
||||
end.
|
||||
|
||||
persist_message_routes([#route{dest = SessionID, topic = STopic}|Left], MsgId, Msg) ->
|
||||
persist_message_routes([#route{dest = SessionID, topic = STopic} | Left], MsgId, Msg) ->
|
||||
?tp(ps_persist_msg, #{sid => SessionID, payload => emqx_message:payload(Msg)}),
|
||||
put_session_message({SessionID, MsgId, STopic, ?UNDELIVERED}),
|
||||
emqx_session_router:buffer(SessionID, STopic, Msg),
|
||||
|
|
@ -355,11 +382,11 @@ persist_message_routes([], _MsgId, _Msg) ->
|
|||
|
||||
mark_as_delivered(SessionID, List) ->
|
||||
case is_store_enabled() of
|
||||
true -> do_mark_as_delivered(SessionID, List);
|
||||
true -> do_mark_as_delivered(SessionID, List);
|
||||
false -> ok
|
||||
end.
|
||||
|
||||
do_mark_as_delivered(SessionID, [{deliver, STopic, Msg}|Left]) ->
|
||||
do_mark_as_delivered(SessionID, [{deliver, STopic, Msg} | Left]) ->
|
||||
MsgID = emqx_message:id(Msg),
|
||||
case next_session_message({SessionID, MsgID, STopic, ?ABANDONED}) of
|
||||
{SessionID, MsgID, STopic, ?UNDELIVERED} = Key ->
|
||||
|
|
@ -374,12 +401,12 @@ do_mark_as_delivered(_SessionID, []) ->
|
|||
ok.
|
||||
|
||||
-spec pending(emqx_session:sessionID()) ->
|
||||
[{emqx_types:message(), STopic :: binary()}].
|
||||
[{emqx_types:message(), STopic :: binary()}].
|
||||
pending(SessionID) ->
|
||||
pending_messages_in_db(SessionID, []).
|
||||
|
||||
-spec pending(emqx_session:sessionID(), MarkerIDs :: [emqx_guid:guid()]) ->
|
||||
[{emqx_types:message(), STopic :: binary()}].
|
||||
[{emqx_types:message(), STopic :: binary()}].
|
||||
pending(SessionID, MarkerIds) ->
|
||||
%% TODO: Handle lost MarkerIDs
|
||||
case emqx_session_router:pending(SessionID, MarkerIds) of
|
||||
|
|
@ -401,7 +428,7 @@ persistent_session_status(#session_store{expiry_interval = ?MAX_EXPIRY_INTERVAL}
|
|||
persistent;
|
||||
persistent_session_status(#session_store{expiry_interval = E, ts = TS}) ->
|
||||
case E + TS > erlang:system_time(millisecond) of
|
||||
true -> persistent;
|
||||
true -> persistent;
|
||||
false -> expired
|
||||
end.
|
||||
|
||||
|
|
@ -413,20 +440,25 @@ pending_messages_fun(SessionID, MarkerIds) ->
|
|||
fun() ->
|
||||
case pending_messages({SessionID, <<>>, <<>>, ?DELIVERED}, [], MarkerIds) of
|
||||
{Pending, []} -> read_pending_msgs(Pending, []);
|
||||
{_Pending, [_|_]} -> incomplete
|
||||
{_Pending, [_ | _]} -> incomplete
|
||||
end
|
||||
end.
|
||||
|
||||
read_pending_msgs([{MsgId, STopic}|Left], Acc) ->
|
||||
Acc1 = try [{deliver, STopic, get_message(MsgId)}|Acc]
|
||||
catch error:{msg_not_found, _} ->
|
||||
HighwaterMark = erlang:system_time(microsecond)
|
||||
- emqx_config:get(?msg_retain) * 1000,
|
||||
case emqx_guid:timestamp(MsgId) < HighwaterMark of
|
||||
true -> Acc; %% Probably cleaned by GC
|
||||
false -> error({msg_not_found, MsgId})
|
||||
end
|
||||
end,
|
||||
read_pending_msgs([{MsgId, STopic} | Left], Acc) ->
|
||||
Acc1 =
|
||||
try
|
||||
[{deliver, STopic, get_message(MsgId)} | Acc]
|
||||
catch
|
||||
error:{msg_not_found, _} ->
|
||||
HighwaterMark =
|
||||
erlang:system_time(microsecond) -
|
||||
emqx_config:get(?msg_retain) * 1000,
|
||||
case emqx_guid:timestamp(MsgId) < HighwaterMark of
|
||||
%% Probably cleaned by GC
|
||||
true -> Acc;
|
||||
false -> error({msg_not_found, MsgId})
|
||||
end
|
||||
end,
|
||||
read_pending_msgs(Left, Acc1);
|
||||
read_pending_msgs([], Acc) ->
|
||||
lists:reverse(Acc).
|
||||
|
|
@ -450,21 +482,24 @@ pending_messages({SessionID, PrevMsgId, PrevSTopic, PrevTag} = PrevKey, Acc, Mar
|
|||
MarkerIds1 = MarkerIds -- [MsgId],
|
||||
case PrevTag =:= ?UNDELIVERED of
|
||||
false -> pending_messages(Key, Acc, MarkerIds1);
|
||||
true -> pending_messages(Key, [{PrevMsgId, PrevSTopic}|Acc], MarkerIds1)
|
||||
true -> pending_messages(Key, [{PrevMsgId, PrevSTopic} | Acc], MarkerIds1)
|
||||
end;
|
||||
{S, MsgId, STopic, ?DELIVERED} = Key when S =:= SessionID,
|
||||
MsgId =:= PrevMsgId,
|
||||
STopic =:= PrevSTopic ->
|
||||
{S, MsgId, STopic, ?DELIVERED} = Key when
|
||||
S =:= SessionID,
|
||||
MsgId =:= PrevMsgId,
|
||||
STopic =:= PrevSTopic
|
||||
->
|
||||
pending_messages(Key, Acc, MarkerIds);
|
||||
{S, _MsgId, _STopic, _Tag} = Key when S =:= SessionID ->
|
||||
case PrevTag =:= ?UNDELIVERED of
|
||||
false -> pending_messages(Key, Acc, MarkerIds);
|
||||
true -> pending_messages(Key, [{PrevMsgId, PrevSTopic}|Acc], MarkerIds)
|
||||
true -> pending_messages(Key, [{PrevMsgId, PrevSTopic} | Acc], MarkerIds)
|
||||
end;
|
||||
_What -> %% Next sessionID or '$end_of_table'
|
||||
%% Next sessionID or '$end_of_table'
|
||||
_What ->
|
||||
case PrevTag =:= ?UNDELIVERED of
|
||||
false -> {lists:reverse(Acc), MarkerIds};
|
||||
true -> {lists:reverse([{PrevMsgId, PrevSTopic}|Acc]), MarkerIds}
|
||||
true -> {lists:reverse([{PrevMsgId, PrevSTopic} | Acc]), MarkerIds}
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
@ -495,16 +530,20 @@ gc_traverse({S, _MsgID, <<>>, ?MARKER} = Key, SessionID, Abandoned, Fun) ->
|
|||
ok = Fun(marker, Key),
|
||||
NewAbandoned = S =:= SessionID andalso Abandoned,
|
||||
gc_traverse(next_session_message(Key), S, NewAbandoned, Fun);
|
||||
gc_traverse({S, _MsgID, _STopic, _Tag} = Key, SessionID, Abandoned, Fun) when Abandoned andalso
|
||||
S =:= SessionID ->
|
||||
gc_traverse({S, _MsgID, _STopic, _Tag} = Key, SessionID, Abandoned, Fun) when
|
||||
Abandoned andalso
|
||||
S =:= SessionID
|
||||
->
|
||||
%% Delete all messages from an abandoned session.
|
||||
ok = Fun(delete, Key),
|
||||
gc_traverse(next_session_message(Key), S, Abandoned, Fun);
|
||||
gc_traverse({S, MsgID, STopic, ?UNDELIVERED} = Key, SessionID, Abandoned, Fun) ->
|
||||
case next_session_message(Key) of
|
||||
{S1, M, ST, ?DELIVERED} = NextKey when S1 =:= S andalso
|
||||
MsgID =:= M andalso
|
||||
STopic =:= ST ->
|
||||
{S1, M, ST, ?DELIVERED} = NextKey when
|
||||
S1 =:= S andalso
|
||||
MsgID =:= M andalso
|
||||
STopic =:= ST
|
||||
->
|
||||
%% We have both markers for the same message/topic so it is safe to delete both.
|
||||
ok = Fun(delete, Key),
|
||||
ok = Fun(delete, NextKey),
|
||||
|
|
|
|||
|
|
@ -23,13 +23,17 @@
|
|||
-define(MSG_TAB_DISC, emqx_persistent_msg_disc).
|
||||
-define(MSG_TAB_RAM, emqx_persistent_msg_ram).
|
||||
|
||||
-record(session_store, { client_id :: binary()
|
||||
, expiry_interval :: non_neg_integer()
|
||||
, ts :: non_neg_integer()
|
||||
, session :: emqx_session:session()}).
|
||||
-record(session_store, {
|
||||
client_id :: binary(),
|
||||
expiry_interval :: non_neg_integer(),
|
||||
ts :: non_neg_integer(),
|
||||
session :: emqx_session:session()
|
||||
}).
|
||||
|
||||
-record(session_msg, {key :: emqx_persistent_session:sess_msg_key(),
|
||||
val = [] :: []}).
|
||||
-record(session_msg, {
|
||||
key :: emqx_persistent_session:sess_msg_key(),
|
||||
val = [] :: []
|
||||
}).
|
||||
|
||||
-define(db_backend_key, [persistent_session_store, db_backend]).
|
||||
-define(is_enabled_key, [persistent_session_store, enabled]).
|
||||
|
|
|
|||
|
|
@ -18,20 +18,21 @@
|
|||
|
||||
-include("emqx_persistent_session.hrl").
|
||||
|
||||
-export([ first_message_id/0
|
||||
, next_message_id/1
|
||||
, delete_message/1
|
||||
, first_session_message/0
|
||||
, next_session_message/1
|
||||
, delete_session_message/1
|
||||
, put_session_store/1
|
||||
, delete_session_store/1
|
||||
, lookup_session_store/1
|
||||
, put_session_message/1
|
||||
, put_message/1
|
||||
, get_message/1
|
||||
, ro_transaction/1
|
||||
]).
|
||||
-export([
|
||||
first_message_id/0,
|
||||
next_message_id/1,
|
||||
delete_message/1,
|
||||
first_session_message/0,
|
||||
next_session_message/1,
|
||||
delete_session_message/1,
|
||||
put_session_store/1,
|
||||
delete_session_store/1,
|
||||
lookup_session_store/1,
|
||||
put_session_message/1,
|
||||
put_message/1,
|
||||
get_message/1,
|
||||
ro_transaction/1
|
||||
]).
|
||||
|
||||
first_message_id() ->
|
||||
'$end_of_table'.
|
||||
|
|
@ -73,4 +74,3 @@ get_message(_MsgId) ->
|
|||
|
||||
ro_transaction(Fun) ->
|
||||
Fun().
|
||||
|
||||
|
|
|
|||
|
|
@ -23,17 +23,19 @@
|
|||
-export([start_link/0]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([ session_gc_worker/2
|
||||
, message_gc_worker/0
|
||||
]).
|
||||
-export([
|
||||
session_gc_worker/2,
|
||||
message_gc_worker/0
|
||||
]).
|
||||
-endif.
|
||||
|
||||
-define(SERVER, ?MODULE).
|
||||
|
|
@ -85,14 +87,18 @@ terminate(_Reason, _State) ->
|
|||
|
||||
start_session_gc_timer(State) ->
|
||||
Interval = emqx_config:get([persistent_session_store, session_message_gc_interval]),
|
||||
State#{ session_gc_timer => erlang:start_timer(Interval, self(), session_gc_timeout)}.
|
||||
State#{session_gc_timer => erlang:start_timer(Interval, self(), session_gc_timeout)}.
|
||||
|
||||
session_gc_timeout(Ref, #{ session_gc_timer := R } = State) when R =:= Ref ->
|
||||
session_gc_timeout(Ref, #{session_gc_timer := R} = State) when R =:= Ref ->
|
||||
%% Prevent overlapping processes.
|
||||
GCPid = maps:get(session_gc_pid, State, undefined),
|
||||
case GCPid =/= undefined andalso erlang:is_process_alive(GCPid) of
|
||||
true -> start_session_gc_timer(State);
|
||||
false -> start_session_gc_timer(State#{ session_gc_pid => proc_lib:spawn_link(fun session_gc_worker/0)})
|
||||
true ->
|
||||
start_session_gc_timer(State);
|
||||
false ->
|
||||
start_session_gc_timer(State#{
|
||||
session_gc_pid => proc_lib:spawn_link(fun session_gc_worker/0)
|
||||
})
|
||||
end;
|
||||
session_gc_timeout(_Ref, State) ->
|
||||
State.
|
||||
|
|
@ -105,13 +111,13 @@ session_gc_worker(delete, Key) ->
|
|||
session_gc_worker(marker, Key) ->
|
||||
TS = emqx_persistent_session:session_message_info(timestamp, Key),
|
||||
case TS + ?MARKER_GRACE_PERIOD < erlang:system_time(microsecond) of
|
||||
true -> emqx_persistent_session:delete_session_message(Key);
|
||||
true -> emqx_persistent_session:delete_session_message(Key);
|
||||
false -> ok
|
||||
end;
|
||||
session_gc_worker(abandoned, Key) ->
|
||||
TS = emqx_persistent_session:session_message_info(timestamp, Key),
|
||||
case TS + ?ABANDONED_GRACE_PERIOD < erlang:system_time(microsecond) of
|
||||
true -> emqx_persistent_session:delete_session_message(Key);
|
||||
true -> emqx_persistent_session:delete_session_message(Key);
|
||||
false -> ok
|
||||
end.
|
||||
|
||||
|
|
@ -124,14 +130,18 @@ session_gc_worker(abandoned, Key) ->
|
|||
%% We sacrifice space for simplicity at this point.
|
||||
start_message_gc_timer(State) ->
|
||||
Interval = emqx_config:get([persistent_session_store, session_message_gc_interval]),
|
||||
State#{ message_gc_timer => erlang:start_timer(Interval, self(), message_gc_timeout)}.
|
||||
State#{message_gc_timer => erlang:start_timer(Interval, self(), message_gc_timeout)}.
|
||||
|
||||
message_gc_timeout(Ref, #{ message_gc_timer := R } = State) when R =:= Ref ->
|
||||
message_gc_timeout(Ref, #{message_gc_timer := R} = State) when R =:= Ref ->
|
||||
%% Prevent overlapping processes.
|
||||
GCPid = maps:get(message_gc_pid, State, undefined),
|
||||
case GCPid =/= undefined andalso erlang:is_process_alive(GCPid) of
|
||||
true -> start_message_gc_timer(State);
|
||||
false -> start_message_gc_timer(State#{ message_gc_pid => proc_lib:spawn_link(fun message_gc_worker/0)})
|
||||
true ->
|
||||
start_message_gc_timer(State);
|
||||
false ->
|
||||
start_message_gc_timer(State#{
|
||||
message_gc_pid => proc_lib:spawn_link(fun message_gc_worker/0)
|
||||
})
|
||||
end;
|
||||
message_gc_timeout(_Ref, State) ->
|
||||
State.
|
||||
|
|
|
|||
|
|
@ -19,48 +19,60 @@
|
|||
-include("emqx.hrl").
|
||||
-include("emqx_persistent_session.hrl").
|
||||
|
||||
-export([ create_tables/0
|
||||
, first_message_id/0
|
||||
, next_message_id/1
|
||||
, delete_message/1
|
||||
, first_session_message/0
|
||||
, next_session_message/1
|
||||
, delete_session_message/1
|
||||
, put_session_store/1
|
||||
, delete_session_store/1
|
||||
, lookup_session_store/1
|
||||
, put_session_message/1
|
||||
, put_message/1
|
||||
, get_message/1
|
||||
, ro_transaction/1
|
||||
]).
|
||||
-export([
|
||||
create_tables/0,
|
||||
first_message_id/0,
|
||||
next_message_id/1,
|
||||
delete_message/1,
|
||||
first_session_message/0,
|
||||
next_session_message/1,
|
||||
delete_session_message/1,
|
||||
put_session_store/1,
|
||||
delete_session_store/1,
|
||||
lookup_session_store/1,
|
||||
put_session_message/1,
|
||||
put_message/1,
|
||||
get_message/1,
|
||||
ro_transaction/1
|
||||
]).
|
||||
|
||||
create_tables() ->
|
||||
ok = mria:create_table(?SESSION_STORE_DISC, [
|
||||
{type, set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, session_store},
|
||||
{attributes, record_info(fields, session_store)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}]),
|
||||
{type, set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, session_store},
|
||||
{attributes, record_info(fields, session_store)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}
|
||||
]),
|
||||
|
||||
ok = mria:create_table(?SESS_MSG_TAB_DISC, [
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, session_msg},
|
||||
{attributes, record_info(fields, session_msg)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]),
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, session_msg},
|
||||
{attributes, record_info(fields, session_msg)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]),
|
||||
|
||||
ok = mria:create_table(?MSG_TAB_DISC, [
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, message},
|
||||
{attributes, record_info(fields, message)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]).
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, message},
|
||||
{attributes, record_info(fields, message)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]).
|
||||
|
||||
first_session_message() ->
|
||||
mnesia:dirty_first(?SESS_MSG_TAB_DISC).
|
||||
|
|
@ -107,4 +119,3 @@ get_message(MsgId) ->
|
|||
ro_transaction(Fun) ->
|
||||
{atomic, Res} = mria:ro_transaction(?PERSISTENT_SESSION_SHARD, Fun),
|
||||
Res.
|
||||
|
||||
|
|
|
|||
|
|
@ -19,48 +19,60 @@
|
|||
-include("emqx.hrl").
|
||||
-include("emqx_persistent_session.hrl").
|
||||
|
||||
-export([ create_tables/0
|
||||
, first_message_id/0
|
||||
, next_message_id/1
|
||||
, delete_message/1
|
||||
, first_session_message/0
|
||||
, next_session_message/1
|
||||
, delete_session_message/1
|
||||
, put_session_store/1
|
||||
, delete_session_store/1
|
||||
, lookup_session_store/1
|
||||
, put_session_message/1
|
||||
, put_message/1
|
||||
, get_message/1
|
||||
, ro_transaction/1
|
||||
]).
|
||||
-export([
|
||||
create_tables/0,
|
||||
first_message_id/0,
|
||||
next_message_id/1,
|
||||
delete_message/1,
|
||||
first_session_message/0,
|
||||
next_session_message/1,
|
||||
delete_session_message/1,
|
||||
put_session_store/1,
|
||||
delete_session_store/1,
|
||||
lookup_session_store/1,
|
||||
put_session_message/1,
|
||||
put_message/1,
|
||||
get_message/1,
|
||||
ro_transaction/1
|
||||
]).
|
||||
|
||||
create_tables() ->
|
||||
ok = mria:create_table(?SESSION_STORE_RAM, [
|
||||
{type, set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, session_store},
|
||||
{attributes, record_info(fields, session_store)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}]),
|
||||
{type, set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, session_store},
|
||||
{attributes, record_info(fields, session_store)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}
|
||||
]),
|
||||
|
||||
ok = mria:create_table(?SESS_MSG_TAB_RAM, [
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, session_msg},
|
||||
{attributes, record_info(fields, session_msg)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]),
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, session_msg},
|
||||
{attributes, record_info(fields, session_msg)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]),
|
||||
|
||||
ok = mria:create_table(?MSG_TAB_RAM, [
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, message},
|
||||
{attributes, record_info(fields, message)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]).
|
||||
{type, ordered_set},
|
||||
{rlog_shard, ?PERSISTENT_SESSION_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, message},
|
||||
{attributes, record_info(fields, message)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]).
|
||||
|
||||
first_session_message() ->
|
||||
mnesia:dirty_first(?SESS_MSG_TAB_RAM).
|
||||
|
|
@ -107,4 +119,3 @@ get_message(MsgId) ->
|
|||
ro_transaction(Fun) ->
|
||||
{atomic, Res} = mria:ro_transaction(?PERSISTENT_SESSION_SHARD, Fun),
|
||||
Res.
|
||||
|
||||
|
|
|
|||
|
|
@ -30,31 +30,40 @@ init([]) ->
|
|||
SessionTab = emqx_session_router:create_init_tab(),
|
||||
|
||||
%% Resume worker sup
|
||||
ResumeSup = #{id => router_worker_sup,
|
||||
start => {emqx_session_router_worker_sup, start_link, [SessionTab]},
|
||||
restart => permanent,
|
||||
shutdown => 2000,
|
||||
type => supervisor,
|
||||
modules => [emqx_session_router_worker_sup]},
|
||||
ResumeSup = #{
|
||||
id => router_worker_sup,
|
||||
start => {emqx_session_router_worker_sup, start_link, [SessionTab]},
|
||||
restart => permanent,
|
||||
shutdown => 2000,
|
||||
type => supervisor,
|
||||
modules => [emqx_session_router_worker_sup]
|
||||
},
|
||||
|
||||
SessionRouterPool = emqx_pool_sup:spec(session_router_pool,
|
||||
[session_router_pool, hash,
|
||||
{emqx_session_router, start_link, []}]),
|
||||
SessionRouterPool = emqx_pool_sup:spec(
|
||||
session_router_pool,
|
||||
[
|
||||
session_router_pool,
|
||||
hash,
|
||||
{emqx_session_router, start_link, []}
|
||||
]
|
||||
),
|
||||
|
||||
GCWorker = child_spec(emqx_persistent_session_gc, worker),
|
||||
|
||||
Spec = #{ strategy => one_for_all
|
||||
, intensity => 0
|
||||
, period => 1
|
||||
},
|
||||
Spec = #{
|
||||
strategy => one_for_all,
|
||||
intensity => 0,
|
||||
period => 1
|
||||
},
|
||||
|
||||
{ok, {Spec, [ResumeSup, SessionRouterPool, GCWorker]}}.
|
||||
|
||||
child_spec(Mod, worker) ->
|
||||
#{id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 15000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}.
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 15000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -20,21 +20,23 @@
|
|||
|
||||
-export([new/0]).
|
||||
|
||||
-export([ monitor/2
|
||||
, monitor/3
|
||||
, demonitor/2
|
||||
]).
|
||||
-export([
|
||||
monitor/2,
|
||||
monitor/3,
|
||||
demonitor/2
|
||||
]).
|
||||
|
||||
-export([ find/2
|
||||
, erase/2
|
||||
, erase_all/2
|
||||
]).
|
||||
-export([
|
||||
find/2,
|
||||
erase/2,
|
||||
erase_all/2
|
||||
]).
|
||||
|
||||
-export([count/1]).
|
||||
|
||||
-export_type([pmon/0]).
|
||||
|
||||
-opaque(pmon() :: {?MODULE, map()}).
|
||||
-opaque pmon() :: {?MODULE, map()}.
|
||||
|
||||
-define(PMON(Map), {?MODULE, Map}).
|
||||
|
||||
|
|
@ -42,55 +44,61 @@
|
|||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(new() -> pmon()).
|
||||
-spec new() -> pmon().
|
||||
new() -> ?PMON(maps:new()).
|
||||
|
||||
-spec(monitor(pid(), pmon()) -> pmon()).
|
||||
-spec monitor(pid(), pmon()) -> pmon().
|
||||
monitor(Pid, PMon) ->
|
||||
?MODULE:monitor(Pid, undefined, PMon).
|
||||
|
||||
-spec(monitor(pid(), term(), pmon()) -> pmon()).
|
||||
-spec monitor(pid(), term(), pmon()) -> pmon().
|
||||
monitor(Pid, Val, PMon = ?PMON(Map)) ->
|
||||
case maps:is_key(Pid, Map) of
|
||||
true -> PMon;
|
||||
true ->
|
||||
PMon;
|
||||
false ->
|
||||
Ref = erlang:monitor(process, Pid),
|
||||
?PMON(maps:put(Pid, {Ref, Val}, Map))
|
||||
end.
|
||||
|
||||
-spec(demonitor(pid(), pmon()) -> pmon()).
|
||||
-spec demonitor(pid(), pmon()) -> pmon().
|
||||
demonitor(Pid, PMon = ?PMON(Map)) ->
|
||||
case maps:find(Pid, Map) of
|
||||
{ok, {Ref, _Val}} ->
|
||||
%% flush
|
||||
_ = erlang:demonitor(Ref, [flush]),
|
||||
?PMON(maps:remove(Pid, Map));
|
||||
error -> PMon
|
||||
error ->
|
||||
PMon
|
||||
end.
|
||||
|
||||
-spec(find(pid(), pmon()) -> error | {ok, term()}).
|
||||
-spec find(pid(), pmon()) -> error | {ok, term()}.
|
||||
find(Pid, ?PMON(Map)) ->
|
||||
case maps:find(Pid, Map) of
|
||||
{ok, {_Ref, Val}} ->
|
||||
{ok, Val};
|
||||
error -> error
|
||||
error ->
|
||||
error
|
||||
end.
|
||||
|
||||
-spec(erase(pid(), pmon()) -> pmon()).
|
||||
-spec erase(pid(), pmon()) -> pmon().
|
||||
erase(Pid, ?PMON(Map)) ->
|
||||
?PMON(maps:remove(Pid, Map)).
|
||||
|
||||
-spec(erase_all([pid()], pmon()) -> {[{pid(), term()}], pmon()}).
|
||||
-spec erase_all([pid()], pmon()) -> {[{pid(), term()}], pmon()}.
|
||||
erase_all(Pids, PMon0) ->
|
||||
lists:foldl(
|
||||
fun(Pid, {Acc, PMon}) ->
|
||||
case find(Pid, PMon) of
|
||||
{ok, Val} ->
|
||||
{[{Pid, Val}|Acc], erase(Pid, PMon)};
|
||||
error -> {Acc, PMon}
|
||||
end
|
||||
end, {[], PMon0}, Pids).
|
||||
fun(Pid, {Acc, PMon}) ->
|
||||
case find(Pid, PMon) of
|
||||
{ok, Val} ->
|
||||
{[{Pid, Val} | Acc], erase(Pid, PMon)};
|
||||
error ->
|
||||
{Acc, PMon}
|
||||
end
|
||||
end,
|
||||
{[], PMon0},
|
||||
Pids
|
||||
).
|
||||
|
||||
-spec(count(pmon()) -> non_neg_integer()).
|
||||
-spec count(pmon()) -> non_neg_integer().
|
||||
count(?PMON(Map)) -> maps:size(Map).
|
||||
|
||||
|
|
|
|||
|
|
@ -21,49 +21,54 @@
|
|||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
|
||||
%% APIs
|
||||
-export([start_link/2]).
|
||||
|
||||
-export([ submit/1
|
||||
, submit/2
|
||||
, async_submit/1
|
||||
, async_submit/2
|
||||
]).
|
||||
-export([
|
||||
submit/1,
|
||||
submit/2,
|
||||
async_submit/1,
|
||||
async_submit/2
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([worker/0, flush_async_tasks/0]).
|
||||
-endif.
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-define(POOL, ?MODULE).
|
||||
|
||||
-type(task() :: fun() | mfa() | {fun(), Args :: list(any())}).
|
||||
-type task() :: fun() | mfa() | {fun(), Args :: list(any())}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Start pool.
|
||||
-spec(start_link(atom(), pos_integer()) -> startlink_ret()).
|
||||
-spec start_link(atom(), pos_integer()) -> startlink_ret().
|
||||
start_link(Pool, Id) ->
|
||||
gen_server:start_link({local, emqx_misc:proc_name(?MODULE, Id)},
|
||||
?MODULE, [Pool, Id], [{hibernate_after, 1000}]).
|
||||
gen_server:start_link(
|
||||
{local, emqx_misc:proc_name(?MODULE, Id)},
|
||||
?MODULE,
|
||||
[Pool, Id],
|
||||
[{hibernate_after, 1000}]
|
||||
).
|
||||
|
||||
%% @doc Submit work to the pool.
|
||||
-spec(submit(task()) -> any()).
|
||||
-spec submit(task()) -> any().
|
||||
submit(Task) ->
|
||||
call({submit, Task}).
|
||||
|
||||
-spec(submit(fun(), list(any())) -> any()).
|
||||
-spec submit(fun(), list(any())) -> any().
|
||||
submit(Fun, Args) ->
|
||||
call({submit, {Fun, Args}}).
|
||||
|
||||
|
|
@ -72,11 +77,11 @@ call(Req) ->
|
|||
gen_server:call(worker(), Req, infinity).
|
||||
|
||||
%% @doc Submit work to the pool asynchronously.
|
||||
-spec(async_submit(task()) -> ok).
|
||||
-spec async_submit(task()) -> ok.
|
||||
async_submit(Task) ->
|
||||
cast({async_submit, Task}).
|
||||
|
||||
-spec(async_submit(fun(), list(any())) -> ok).
|
||||
-spec async_submit(fun(), list(any())) -> ok.
|
||||
async_submit(Fun, Args) ->
|
||||
cast({async_submit, {Fun, Args}}).
|
||||
|
||||
|
|
@ -98,22 +103,23 @@ init([Pool, Id]) ->
|
|||
|
||||
handle_call({submit, Task}, _From, State) ->
|
||||
{reply, catch run(Task), State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
||||
handle_cast({async_submit, Task}, State) ->
|
||||
try run(Task)
|
||||
catch Error:Reason:Stacktrace ->
|
||||
?SLOG(error, #{msg => "async_submit_error",
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace
|
||||
})
|
||||
try
|
||||
run(Task)
|
||||
catch
|
||||
Error:Reason:Stacktrace ->
|
||||
?SLOG(error, #{
|
||||
msg => "async_submit_error",
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace
|
||||
})
|
||||
end,
|
||||
{noreply, State};
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
|
@ -149,5 +155,12 @@ flush_async_tasks() ->
|
|||
Self = self(),
|
||||
L = lists:seq(1, 997),
|
||||
lists:foreach(fun(I) -> emqx_pool:async_submit(fun() -> Self ! {done, Ref, I} end, []) end, L),
|
||||
lists:foreach(fun(I) -> receive {done, Ref, I} -> ok end end, L).
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
receive
|
||||
{done, Ref, I} -> ok
|
||||
end
|
||||
end,
|
||||
L
|
||||
).
|
||||
-endif.
|
||||
|
|
|
|||
|
|
@ -22,64 +22,73 @@
|
|||
|
||||
-export([spec/1, spec/2]).
|
||||
|
||||
-export([ start_link/0
|
||||
, start_link/3
|
||||
, start_link/4
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
start_link/3,
|
||||
start_link/4
|
||||
]).
|
||||
|
||||
-export([init/1]).
|
||||
|
||||
-define(POOL, emqx_pool).
|
||||
|
||||
-spec(spec(list()) -> supervisor:child_spec()).
|
||||
-spec spec(list()) -> supervisor:child_spec().
|
||||
spec(Args) ->
|
||||
spec(pool_sup, Args).
|
||||
|
||||
-spec(spec(any(), list()) -> supervisor:child_spec()).
|
||||
-spec spec(any(), list()) -> supervisor:child_spec().
|
||||
spec(ChildId, Args) ->
|
||||
#{id => ChildId,
|
||||
start => {?MODULE, start_link, Args},
|
||||
restart => transient,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [?MODULE]}.
|
||||
#{
|
||||
id => ChildId,
|
||||
start => {?MODULE, start_link, Args},
|
||||
restart => transient,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [?MODULE]
|
||||
}.
|
||||
|
||||
%% @doc Start the default pool supervisor.
|
||||
start_link() ->
|
||||
start_link(?POOL, random, {?POOL, start_link, []}).
|
||||
|
||||
-spec(start_link(atom() | tuple(), atom(), mfargs())
|
||||
-> {ok, pid()} | {error, term()}).
|
||||
-spec start_link(atom() | tuple(), atom(), mfargs()) ->
|
||||
{ok, pid()} | {error, term()}.
|
||||
start_link(Pool, Type, MFA) ->
|
||||
start_link(Pool, Type, emqx_vm:schedulers(), MFA).
|
||||
|
||||
-spec(start_link(atom() | tuple(), atom(), pos_integer(), mfargs())
|
||||
-> {ok, pid()} | {error, term()}).
|
||||
-spec start_link(atom() | tuple(), atom(), pos_integer(), mfargs()) ->
|
||||
{ok, pid()} | {error, term()}.
|
||||
start_link(Pool, Type, Size, MFA) ->
|
||||
supervisor:start_link(?MODULE, [Pool, Type, Size, MFA]).
|
||||
|
||||
init([Pool, Type, Size, {M, F, Args}]) ->
|
||||
ok = ensure_pool(Pool, Type, [{size, Size}]),
|
||||
{ok, {{one_for_one, 10, 3600}, [
|
||||
begin
|
||||
ensure_pool_worker(Pool, {Pool, I}, I),
|
||||
#{id => {M, I},
|
||||
start => {M, F, [Pool, I | Args]},
|
||||
restart => transient,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [M]}
|
||||
end || I <- lists:seq(1, Size)]}}.
|
||||
{ok,
|
||||
{{one_for_one, 10, 3600}, [
|
||||
begin
|
||||
ensure_pool_worker(Pool, {Pool, I}, I),
|
||||
#{
|
||||
id => {M, I},
|
||||
start => {M, F, [Pool, I | Args]},
|
||||
restart => transient,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [M]
|
||||
}
|
||||
end
|
||||
|| I <- lists:seq(1, Size)
|
||||
]}}.
|
||||
|
||||
ensure_pool(Pool, Type, Opts) ->
|
||||
try gproc_pool:new(Pool, Type, Opts)
|
||||
try
|
||||
gproc_pool:new(Pool, Type, Opts)
|
||||
catch
|
||||
error:exists -> ok
|
||||
end.
|
||||
|
||||
ensure_pool_worker(Pool, Name, Slot) ->
|
||||
try gproc_pool:add_worker(Pool, Name, Slot)
|
||||
try
|
||||
gproc_pool:add_worker(Pool, Name, Slot)
|
||||
catch
|
||||
error:exists -> ok
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -39,63 +39,68 @@
|
|||
|
||||
-module(emqx_pqueue).
|
||||
|
||||
-export([ new/0
|
||||
, is_queue/1
|
||||
, is_empty/1
|
||||
, len/1
|
||||
, plen/2
|
||||
, to_list/1
|
||||
, from_list/1
|
||||
, in/2
|
||||
, in/3
|
||||
, out/1
|
||||
, out/2
|
||||
, out_p/1
|
||||
, join/2
|
||||
, filter/2
|
||||
, fold/3
|
||||
, highest/1
|
||||
, shift/1
|
||||
]).
|
||||
-export([
|
||||
new/0,
|
||||
is_queue/1,
|
||||
is_empty/1,
|
||||
len/1,
|
||||
plen/2,
|
||||
to_list/1,
|
||||
from_list/1,
|
||||
in/2,
|
||||
in/3,
|
||||
out/1,
|
||||
out/2,
|
||||
out_p/1,
|
||||
join/2,
|
||||
filter/2,
|
||||
fold/3,
|
||||
highest/1,
|
||||
shift/1
|
||||
]).
|
||||
|
||||
-export_type([q/0]).
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-type(priority() :: integer() | 'infinity').
|
||||
-type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
|
||||
-type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
|
||||
-type(q() :: pqueue()).
|
||||
-type priority() :: integer() | 'infinity'.
|
||||
-type squeue() :: {queue, [any()], [any()], non_neg_integer()}.
|
||||
-type pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}.
|
||||
-type q() :: pqueue().
|
||||
|
||||
%%----------------------------------------------------------------------------
|
||||
|
||||
-spec(new() -> pqueue()).
|
||||
-spec new() -> pqueue().
|
||||
new() ->
|
||||
{queue, [], [], 0}.
|
||||
|
||||
-spec(is_queue(any()) -> boolean()).
|
||||
-spec is_queue(any()) -> boolean().
|
||||
is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
|
||||
true;
|
||||
is_queue({pqueue, Queues}) when is_list(Queues) ->
|
||||
lists:all(fun ({infinity, Q}) -> is_queue(Q);
|
||||
({P, Q}) -> is_integer(P) andalso is_queue(Q)
|
||||
end, Queues);
|
||||
lists:all(
|
||||
fun
|
||||
({infinity, Q}) -> is_queue(Q);
|
||||
({P, Q}) -> is_integer(P) andalso is_queue(Q)
|
||||
end,
|
||||
Queues
|
||||
);
|
||||
is_queue(_) ->
|
||||
false.
|
||||
|
||||
-spec(is_empty(pqueue()) -> boolean()).
|
||||
-spec is_empty(pqueue()) -> boolean().
|
||||
is_empty({queue, [], [], 0}) ->
|
||||
true;
|
||||
is_empty(_) ->
|
||||
false.
|
||||
|
||||
-spec(len(pqueue()) -> non_neg_integer()).
|
||||
-spec len(pqueue()) -> non_neg_integer().
|
||||
len({queue, _R, _F, L}) ->
|
||||
L;
|
||||
len({pqueue, Queues}) ->
|
||||
lists:sum([len(Q) || {_, Q} <- Queues]).
|
||||
|
||||
-spec(plen(priority(), pqueue()) -> non_neg_integer()).
|
||||
-spec plen(priority(), pqueue()) -> non_neg_integer().
|
||||
plen(0, {queue, _R, _F, L}) ->
|
||||
L;
|
||||
plen(_, {queue, _R, _F, _}) ->
|
||||
|
|
@ -103,84 +108,95 @@ plen(_, {queue, _R, _F, _}) ->
|
|||
plen(P, {pqueue, Queues}) ->
|
||||
case lists:keysearch(maybe_negate_priority(P), 1, Queues) of
|
||||
{value, {_, Q}} -> len(Q);
|
||||
false -> 0
|
||||
false -> 0
|
||||
end.
|
||||
|
||||
-spec(to_list(pqueue()) -> [{priority(), any()}]).
|
||||
-spec to_list(pqueue()) -> [{priority(), any()}].
|
||||
to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
|
||||
[{0, V} || V <- Out ++ lists:reverse(In, [])];
|
||||
to_list({pqueue, Queues}) ->
|
||||
[{maybe_negate_priority(P), V} || {P, Q} <- Queues,
|
||||
{0, V} <- to_list(Q)].
|
||||
[
|
||||
{maybe_negate_priority(P), V}
|
||||
|| {P, Q} <- Queues,
|
||||
{0, V} <- to_list(Q)
|
||||
].
|
||||
|
||||
-spec(from_list([{priority(), any()}]) -> pqueue()).
|
||||
-spec from_list([{priority(), any()}]) -> pqueue().
|
||||
from_list(L) ->
|
||||
lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L).
|
||||
lists:foldl(fun({P, E}, Q) -> in(E, P, Q) end, new(), L).
|
||||
|
||||
-spec(in(any(), pqueue()) -> pqueue()).
|
||||
-spec in(any(), pqueue()) -> pqueue().
|
||||
in(Item, Q) ->
|
||||
in(Item, 0, Q).
|
||||
|
||||
-spec(in(any(), priority(), pqueue()) -> pqueue()).
|
||||
-spec in(any(), priority(), pqueue()) -> pqueue().
|
||||
in(X, 0, {queue, [_] = In, [], 1}) ->
|
||||
{queue, [X], In, 2};
|
||||
in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
|
||||
{queue, [X|In], Out, Len + 1};
|
||||
{queue, [X | In], Out, Len + 1};
|
||||
in(X, Priority, _Q = {queue, [], [], 0}) ->
|
||||
in(X, Priority, {pqueue, []});
|
||||
in(X, Priority, Q = {queue, _, _, _}) ->
|
||||
in(X, Priority, {pqueue, [{0, Q}]});
|
||||
in(X, Priority, {pqueue, Queues}) ->
|
||||
P = maybe_negate_priority(Priority),
|
||||
{pqueue, case lists:keysearch(P, 1, Queues) of
|
||||
{value, {_, Q}} ->
|
||||
lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
|
||||
false when P == infinity ->
|
||||
[{P, {queue, [X], [], 1}} | Queues];
|
||||
false ->
|
||||
case Queues of
|
||||
[{infinity, InfQueue} | Queues1] ->
|
||||
[{infinity, InfQueue} |
|
||||
lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
|
||||
_ ->
|
||||
lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
|
||||
end
|
||||
end}.
|
||||
{pqueue,
|
||||
case lists:keysearch(P, 1, Queues) of
|
||||
{value, {_, Q}} ->
|
||||
lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
|
||||
false when P == infinity ->
|
||||
[{P, {queue, [X], [], 1}} | Queues];
|
||||
false ->
|
||||
case Queues of
|
||||
[{infinity, InfQueue} | Queues1] ->
|
||||
[
|
||||
{infinity, InfQueue}
|
||||
| lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])
|
||||
];
|
||||
_ ->
|
||||
lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
|
||||
end
|
||||
end}.
|
||||
|
||||
-spec(out(pqueue()) -> {empty | {value, any()}, pqueue()}).
|
||||
-spec out(pqueue()) -> {empty | {value, any()}, pqueue()}.
|
||||
out({queue, [], [], 0} = Q) ->
|
||||
{empty, Q};
|
||||
out({queue, [V], [], 1}) ->
|
||||
{{value, V}, {queue, [], [], 0}};
|
||||
out({queue, [Y|In], [], Len}) ->
|
||||
[V|Out] = lists:reverse(In, []),
|
||||
out({queue, [Y | In], [], Len}) ->
|
||||
[V | Out] = lists:reverse(In, []),
|
||||
{{value, V}, {queue, [Y], Out, Len - 1}};
|
||||
out({queue, In, [V], Len}) when is_list(In) ->
|
||||
{{value,V}, r2f(In, Len - 1)};
|
||||
out({queue, In,[V|Out], Len}) when is_list(In) ->
|
||||
{{value, V}, r2f(In, Len - 1)};
|
||||
out({queue, In, [V | Out], Len}) when is_list(In) ->
|
||||
{{value, V}, {queue, In, Out, Len - 1}};
|
||||
out({pqueue, [{P, Q} | Queues]}) ->
|
||||
{R, Q1} = out(Q),
|
||||
NewQ = case is_empty(Q1) of
|
||||
true -> case Queues of
|
||||
[] -> {queue, [], [], 0};
|
||||
[{0, OnlyQ}] -> OnlyQ;
|
||||
[_|_] -> {pqueue, Queues}
|
||||
end;
|
||||
false -> {pqueue, [{P, Q1} | Queues]}
|
||||
end,
|
||||
NewQ =
|
||||
case is_empty(Q1) of
|
||||
true ->
|
||||
case Queues of
|
||||
[] -> {queue, [], [], 0};
|
||||
[{0, OnlyQ}] -> OnlyQ;
|
||||
[_ | _] -> {pqueue, Queues}
|
||||
end;
|
||||
false ->
|
||||
{pqueue, [{P, Q1} | Queues]}
|
||||
end,
|
||||
{R, NewQ}.
|
||||
|
||||
-spec(shift(pqueue()) -> pqueue()).
|
||||
-spec shift(pqueue()) -> pqueue().
|
||||
shift(Q = {queue, _, _, _}) ->
|
||||
Q;
|
||||
shift({pqueue, []}) ->
|
||||
{pqueue, []}; %% Shouldn't happen?
|
||||
shift({pqueue, [Hd|Rest]}) ->
|
||||
{pqueue, Rest ++ [Hd]}. %% Let's hope there are not many priorities.
|
||||
%% Shouldn't happen?
|
||||
{pqueue, []};
|
||||
shift({pqueue, [Hd | Rest]}) ->
|
||||
%% Let's hope there are not many priorities.
|
||||
{pqueue, Rest ++ [Hd]}.
|
||||
|
||||
-spec(out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}).
|
||||
out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
|
||||
-spec out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}.
|
||||
out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
|
||||
out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)).
|
||||
|
||||
out(0, {queue, _, _, _} = Q) ->
|
||||
|
|
@ -192,25 +208,28 @@ out(Priority, {pqueue, Queues}) ->
|
|||
case lists:keysearch(P, 1, Queues) of
|
||||
{value, {_, Q}} ->
|
||||
{R, Q1} = out(Q),
|
||||
Queues1 = case is_empty(Q1) of
|
||||
true -> lists:keydelete(P, 1, Queues);
|
||||
false -> lists:keyreplace(P, 1, Queues, {P, Q1})
|
||||
end,
|
||||
{R, case Queues1 of
|
||||
[] -> {queue, [], [], 0};
|
||||
Queues1 =
|
||||
case is_empty(Q1) of
|
||||
true -> lists:keydelete(P, 1, Queues);
|
||||
false -> lists:keyreplace(P, 1, Queues, {P, Q1})
|
||||
end,
|
||||
{R,
|
||||
case Queues1 of
|
||||
[] -> {queue, [], [], 0};
|
||||
[{0, OnlyQ}] -> OnlyQ;
|
||||
[_|_] -> {pqueue, Queues1}
|
||||
[_ | _] -> {pqueue, Queues1}
|
||||
end};
|
||||
false ->
|
||||
{empty, {pqueue, Queues}}
|
||||
end.
|
||||
|
||||
add_p(R, P) -> case R of
|
||||
{empty, Q} -> {empty, Q};
|
||||
{{value, V}, Q} -> {{value, V, P}, Q}
|
||||
end.
|
||||
add_p(R, P) ->
|
||||
case R of
|
||||
{empty, Q} -> {empty, Q};
|
||||
{{value, V}, Q} -> {{value, V, P}, Q}
|
||||
end.
|
||||
|
||||
-spec(join(pqueue(), pqueue()) -> pqueue()).
|
||||
-spec join(pqueue(), pqueue()) -> pqueue().
|
||||
join(A, {queue, [], [], 0}) ->
|
||||
A;
|
||||
join({queue, [], [], 0}, B) ->
|
||||
|
|
@ -219,21 +238,23 @@ join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
|
|||
{queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
|
||||
join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
|
||||
{Pre, Post} =
|
||||
lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
|
||||
Post1 = case Post of
|
||||
[] -> [ {0, A} ];
|
||||
[ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
|
||||
_ -> [ {0, A} | Post ]
|
||||
end,
|
||||
lists:splitwith(fun({P, _}) -> P < 0 orelse P == infinity end, BPQ),
|
||||
Post1 =
|
||||
case Post of
|
||||
[] -> [{0, A}];
|
||||
[{0, ZeroQueue} | Rest] -> [{0, join(A, ZeroQueue)} | Rest];
|
||||
_ -> [{0, A} | Post]
|
||||
end,
|
||||
{pqueue, Pre ++ Post1};
|
||||
join({pqueue, APQ}, B = {queue, _, _, _}) ->
|
||||
{Pre, Post} =
|
||||
lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
|
||||
Post1 = case Post of
|
||||
[] -> [ {0, B} ];
|
||||
[ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
|
||||
_ -> [ {0, B} | Post ]
|
||||
end,
|
||||
lists:splitwith(fun({P, _}) -> P < 0 orelse P == infinity end, APQ),
|
||||
Post1 =
|
||||
case Post of
|
||||
[] -> [{0, B}];
|
||||
[{0, ZeroQueue} | Rest] -> [{0, join(ZeroQueue, B)} | Rest];
|
||||
_ -> [{0, B} | Post]
|
||||
end,
|
||||
{pqueue, Pre ++ Post1};
|
||||
join({pqueue, APQ}, {pqueue, BPQ}) ->
|
||||
{pqueue, merge(APQ, BPQ, [])}.
|
||||
|
|
@ -242,36 +263,42 @@ merge([], BPQ, Acc) ->
|
|||
lists:reverse(Acc, BPQ);
|
||||
merge(APQ, [], Acc) ->
|
||||
lists:reverse(Acc, APQ);
|
||||
merge([{P, A}|As], [{P, B}|Bs], Acc) ->
|
||||
merge(As, Bs, [ {P, join(A, B)} | Acc ]);
|
||||
merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
|
||||
merge(As, Bs, [ {PA, A} | Acc ]);
|
||||
merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
|
||||
merge(As, Bs, [ {PB, B} | Acc ]).
|
||||
merge([{P, A} | As], [{P, B} | Bs], Acc) ->
|
||||
merge(As, Bs, [{P, join(A, B)} | Acc]);
|
||||
merge([{PA, A} | As], Bs = [{PB, _} | _], Acc) when PA < PB orelse PA == infinity ->
|
||||
merge(As, Bs, [{PA, A} | Acc]);
|
||||
merge(As = [{_, _} | _], [{PB, B} | Bs], Acc) ->
|
||||
merge(As, Bs, [{PB, B} | Acc]).
|
||||
|
||||
-spec(filter(fun ((any()) -> boolean()), pqueue()) -> pqueue()).
|
||||
filter(Pred, Q) -> fold(fun(V, P, Acc) ->
|
||||
case Pred(V) of
|
||||
true -> in(V, P, Acc);
|
||||
false -> Acc
|
||||
end
|
||||
end, new(), Q).
|
||||
-spec filter(fun((any()) -> boolean()), pqueue()) -> pqueue().
|
||||
filter(Pred, Q) ->
|
||||
fold(
|
||||
fun(V, P, Acc) ->
|
||||
case Pred(V) of
|
||||
true -> in(V, P, Acc);
|
||||
false -> Acc
|
||||
end
|
||||
end,
|
||||
new(),
|
||||
Q
|
||||
).
|
||||
|
||||
-spec(fold(fun ((any(), priority(), A) -> A), A, pqueue()) -> A).
|
||||
fold(Fun, Init, Q) -> case out_p(Q) of
|
||||
{empty, _Q} -> Init;
|
||||
{{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
|
||||
end.
|
||||
-spec fold(fun((any(), priority(), A) -> A), A, pqueue()) -> A.
|
||||
fold(Fun, Init, Q) ->
|
||||
case out_p(Q) of
|
||||
{empty, _Q} -> Init;
|
||||
{{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
|
||||
end.
|
||||
|
||||
-spec(highest(pqueue()) -> priority() | 'empty').
|
||||
highest({queue, [], [], 0}) -> empty;
|
||||
highest({queue, _, _, _}) -> 0;
|
||||
-spec highest(pqueue()) -> priority() | 'empty'.
|
||||
highest({queue, [], [], 0}) -> empty;
|
||||
highest({queue, _, _, _}) -> 0;
|
||||
highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P).
|
||||
|
||||
r2f([], 0) -> {queue, [], [], 0};
|
||||
r2f([_] = R, 1) -> {queue, [], R, 1};
|
||||
r2f([X,Y], 2) -> {queue, [X], [Y], 2};
|
||||
r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
|
||||
r2f([X, Y], 2) -> {queue, [X], [Y], 2};
|
||||
r2f([X, Y | R], L) -> {queue, [X, Y], lists:reverse(R, []), L}.
|
||||
|
||||
maybe_negate_priority(infinity) -> infinity;
|
||||
maybe_negate_priority(P) -> -P.
|
||||
maybe_negate_priority(P) -> -P.
|
||||
|
|
|
|||
|
|
@ -17,15 +17,15 @@
|
|||
-module(emqx_quic_connection).
|
||||
|
||||
%% Callbacks
|
||||
-export([ init/1
|
||||
, new_conn/2
|
||||
, connected/2
|
||||
, shutdown/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
new_conn/2,
|
||||
connected/2,
|
||||
shutdown/2
|
||||
]).
|
||||
|
||||
-type cb_state() :: map() | proplists:proplist().
|
||||
|
||||
|
||||
-spec init(cb_state()) -> cb_state().
|
||||
init(ConnOpts) when is_list(ConnOpts) ->
|
||||
init(maps:from_list(ConnOpts));
|
||||
|
|
|
|||
|
|
@ -18,18 +18,19 @@
|
|||
-module(emqx_quic_stream).
|
||||
|
||||
%% emqx transport Callbacks
|
||||
-export([ type/1
|
||||
, wait/1
|
||||
, getstat/2
|
||||
, fast_close/1
|
||||
, ensure_ok_or_exit/2
|
||||
, async_send/3
|
||||
, setopts/2
|
||||
, getopts/2
|
||||
, peername/1
|
||||
, sockname/1
|
||||
, peercert/1
|
||||
]).
|
||||
-export([
|
||||
type/1,
|
||||
wait/1,
|
||||
getstat/2,
|
||||
fast_close/1,
|
||||
ensure_ok_or_exit/2,
|
||||
async_send/3,
|
||||
setopts/2,
|
||||
getopts/2,
|
||||
peername/1,
|
||||
sockname/1,
|
||||
peercert/1
|
||||
]).
|
||||
|
||||
wait({ConnOwner, Conn}) ->
|
||||
{ok, Conn} = quicer:async_accept_stream(Conn, []),
|
||||
|
|
@ -62,28 +63,34 @@ getstat(Socket, Stats) ->
|
|||
end.
|
||||
|
||||
setopts(Socket, Opts) ->
|
||||
lists:foreach(fun({Opt, V}) when is_atom(Opt) ->
|
||||
quicer:setopt(Socket, Opt, V);
|
||||
(Opt) when is_atom(Opt) ->
|
||||
quicer:setopt(Socket, Opt, true)
|
||||
end, Opts),
|
||||
lists:foreach(
|
||||
fun
|
||||
({Opt, V}) when is_atom(Opt) ->
|
||||
quicer:setopt(Socket, Opt, V);
|
||||
(Opt) when is_atom(Opt) ->
|
||||
quicer:setopt(Socket, Opt, true)
|
||||
end,
|
||||
Opts
|
||||
),
|
||||
ok.
|
||||
|
||||
getopts(_Socket, _Opts) ->
|
||||
%% @todo
|
||||
{ok, [{high_watermark, 0},
|
||||
{high_msgq_watermark, 0},
|
||||
{sndbuf, 0},
|
||||
{recbuf, 0},
|
||||
{buffer,80000}]}.
|
||||
{ok, [
|
||||
{high_watermark, 0},
|
||||
{high_msgq_watermark, 0},
|
||||
{sndbuf, 0},
|
||||
{recbuf, 0},
|
||||
{buffer, 80000}
|
||||
]}.
|
||||
|
||||
fast_close(Stream) ->
|
||||
%% Stream might be closed already.
|
||||
_ = quicer:async_close_stream(Stream),
|
||||
ok.
|
||||
|
||||
-spec(ensure_ok_or_exit(atom(), list(term())) -> term()).
|
||||
ensure_ok_or_exit(Fun, Args = [Sock|_]) when is_atom(Fun), is_list(Args) ->
|
||||
-spec ensure_ok_or_exit(atom(), list(term())) -> term().
|
||||
ensure_ok_or_exit(Fun, Args = [Sock | _]) when is_atom(Fun), is_list(Args) ->
|
||||
case erlang:apply(?MODULE, Fun, Args) of
|
||||
{error, Reason} when Reason =:= enotconn; Reason =:= closed ->
|
||||
fast_close(Sock),
|
||||
|
|
@ -91,7 +98,8 @@ ensure_ok_or_exit(Fun, Args = [Sock|_]) when is_atom(Fun), is_list(Args) ->
|
|||
{error, Reason} ->
|
||||
fast_close(Sock),
|
||||
exit({shutdown, Reason});
|
||||
Result -> Result
|
||||
Result ->
|
||||
Result
|
||||
end.
|
||||
|
||||
async_send(Stream, Data, Options) when is_list(Data) ->
|
||||
|
|
@ -99,6 +107,5 @@ async_send(Stream, Data, Options) when is_list(Data) ->
|
|||
async_send(Stream, Data, _Options) when is_binary(Data) ->
|
||||
case quicer:send(Stream, Data) of
|
||||
{ok, _Len} -> ok;
|
||||
Other ->
|
||||
Other
|
||||
Other -> Other
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -19,27 +19,36 @@
|
|||
|
||||
-include("emqx_mqtt.hrl").
|
||||
|
||||
-export([ name/1
|
||||
, name/2
|
||||
, text/1
|
||||
, text/2
|
||||
]).
|
||||
-export([
|
||||
name/1,
|
||||
name/2,
|
||||
text/1,
|
||||
text/2
|
||||
]).
|
||||
|
||||
-export([ frame_error/1
|
||||
, connack_error/1
|
||||
]).
|
||||
-export([
|
||||
frame_error/1,
|
||||
connack_error/1
|
||||
]).
|
||||
|
||||
-export([compat/2]).
|
||||
|
||||
name(I, Ver) when Ver >= ?MQTT_PROTO_V5 ->
|
||||
name(I);
|
||||
name(0, _Ver) -> connection_accepted;
|
||||
name(1, _Ver) -> unacceptable_protocol_version;
|
||||
name(2, _Ver) -> client_identifier_not_valid;
|
||||
name(3, _Ver) -> server_unavaliable;
|
||||
name(4, _Ver) -> malformed_username_or_password;
|
||||
name(5, _Ver) -> unauthorized_client;
|
||||
name(_, _Ver) -> unknown_error.
|
||||
name(0, _Ver) ->
|
||||
connection_accepted;
|
||||
name(1, _Ver) ->
|
||||
unacceptable_protocol_version;
|
||||
name(2, _Ver) ->
|
||||
client_identifier_not_valid;
|
||||
name(3, _Ver) ->
|
||||
server_unavaliable;
|
||||
name(4, _Ver) ->
|
||||
malformed_username_or_password;
|
||||
name(5, _Ver) ->
|
||||
unauthorized_client;
|
||||
name(_, _Ver) ->
|
||||
unknown_error.
|
||||
|
||||
name(16#00) -> success;
|
||||
name(16#01) -> granted_qos1;
|
||||
|
|
@ -88,13 +97,20 @@ name(_Code) -> unknown_error.
|
|||
|
||||
text(I, Ver) when Ver >= ?MQTT_PROTO_V5 ->
|
||||
text(I);
|
||||
text(0, _Ver) -> <<"Connection accepted">>;
|
||||
text(1, _Ver) -> <<"unacceptable_protocol_version">>;
|
||||
text(2, _Ver) -> <<"client_identifier_not_valid">>;
|
||||
text(3, _Ver) -> <<"server_unavaliable">>;
|
||||
text(4, _Ver) -> <<"malformed_username_or_password">>;
|
||||
text(5, _Ver) -> <<"unauthorized_client">>;
|
||||
text(_, _Ver) -> <<"unknown_error">>.
|
||||
text(0, _Ver) ->
|
||||
<<"Connection accepted">>;
|
||||
text(1, _Ver) ->
|
||||
<<"unacceptable_protocol_version">>;
|
||||
text(2, _Ver) ->
|
||||
<<"client_identifier_not_valid">>;
|
||||
text(3, _Ver) ->
|
||||
<<"server_unavaliable">>;
|
||||
text(4, _Ver) ->
|
||||
<<"malformed_username_or_password">>;
|
||||
text(5, _Ver) ->
|
||||
<<"unauthorized_client">>;
|
||||
text(_, _Ver) ->
|
||||
<<"unknown_error">>.
|
||||
|
||||
text(16#00) -> <<"Success">>;
|
||||
text(16#01) -> <<"Granted QoS 1">>;
|
||||
|
|
@ -159,10 +175,8 @@ compat(connack, 16#97) -> ?CONNACK_SERVER;
|
|||
compat(connack, 16#9C) -> ?CONNACK_SERVER;
|
||||
compat(connack, 16#9D) -> ?CONNACK_SERVER;
|
||||
compat(connack, 16#9F) -> ?CONNACK_SERVER;
|
||||
|
||||
compat(suback, Code) when Code =< ?QOS_2 -> Code;
|
||||
compat(suback, Code) when Code >= 16#80 -> 16#80;
|
||||
|
||||
compat(suback, Code) when Code >= 16#80 -> 16#80;
|
||||
compat(unsuback, _Code) -> undefined;
|
||||
compat(_Other, _Code) -> undefined.
|
||||
|
||||
|
|
@ -177,4 +191,3 @@ connack_error(server_busy) -> ?RC_SERVER_BUSY;
|
|||
connack_error(banned) -> ?RC_BANNED;
|
||||
connack_error(bad_authentication_method) -> ?RC_BAD_AUTHENTICATION_METHOD;
|
||||
connack_error(_) -> ?RC_UNSPECIFIED_ERROR.
|
||||
|
||||
|
|
|
|||
|
|
@ -16,24 +16,25 @@
|
|||
|
||||
-module(emqx_release).
|
||||
|
||||
-export([ edition/0
|
||||
, description/0
|
||||
, version/0
|
||||
]).
|
||||
-export([
|
||||
edition/0,
|
||||
description/0,
|
||||
version/0
|
||||
]).
|
||||
|
||||
-include("emqx_release.hrl").
|
||||
|
||||
-define(EMQX_DESCS,
|
||||
#{ee => "EMQX Enterprise",
|
||||
ce => "EMQX",
|
||||
edge => "EMQX Edge"
|
||||
}).
|
||||
-define(EMQX_DESCS, #{
|
||||
ee => "EMQX Enterprise",
|
||||
ce => "EMQX",
|
||||
edge => "EMQX Edge"
|
||||
}).
|
||||
|
||||
-define(EMQX_REL_VSNS,
|
||||
#{ee => ?EMQX_RELEASE_EE,
|
||||
ce => ?EMQX_RELEASE_CE,
|
||||
edge => ?EMQX_RELEASE_CE
|
||||
}).
|
||||
-define(EMQX_REL_VSNS, #{
|
||||
ee => ?EMQX_RELEASE_EE,
|
||||
ce => ?EMQX_RELEASE_CE,
|
||||
edge => ?EMQX_RELEASE_CE
|
||||
}).
|
||||
|
||||
%% @doc Return EMQX description.
|
||||
description() ->
|
||||
|
|
@ -52,17 +53,21 @@ edition() -> ce.
|
|||
%% @doc Return the release version.
|
||||
version() ->
|
||||
case lists:keyfind(emqx_vsn, 1, ?MODULE:module_info(compile)) of
|
||||
false -> %% For TEST build or dependency build.
|
||||
%% For TEST build or dependency build.
|
||||
false ->
|
||||
build_vsn();
|
||||
{_, Vsn} -> %% For emqx release build
|
||||
%% For emqx release build
|
||||
{_, Vsn} ->
|
||||
VsnStr = build_vsn(),
|
||||
case string:str(Vsn, VsnStr) of
|
||||
1 -> ok;
|
||||
1 ->
|
||||
ok;
|
||||
_ ->
|
||||
erlang:error(#{ reason => version_mismatch
|
||||
, source => VsnStr
|
||||
, built_for => Vsn
|
||||
})
|
||||
erlang:error(#{
|
||||
reason => version_mismatch,
|
||||
source => VsnStr,
|
||||
built_for => Vsn
|
||||
})
|
||||
end,
|
||||
Vsn
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@
|
|||
-include("types.hrl").
|
||||
-include_lib("ekka/include/ekka.hrl").
|
||||
|
||||
|
||||
%% Mnesia bootstrap
|
||||
-export([mnesia/1]).
|
||||
|
||||
|
|
@ -32,39 +31,43 @@
|
|||
-export([start_link/2]).
|
||||
|
||||
%% Route APIs
|
||||
-export([ add_route/1
|
||||
, add_route/2
|
||||
, do_add_route/1
|
||||
, do_add_route/2
|
||||
]).
|
||||
-export([
|
||||
add_route/1,
|
||||
add_route/2,
|
||||
do_add_route/1,
|
||||
do_add_route/2
|
||||
]).
|
||||
|
||||
-export([ delete_route/1
|
||||
, delete_route/2
|
||||
, do_delete_route/1
|
||||
, do_delete_route/2
|
||||
]).
|
||||
-export([
|
||||
delete_route/1,
|
||||
delete_route/2,
|
||||
do_delete_route/1,
|
||||
do_delete_route/2
|
||||
]).
|
||||
|
||||
-export([ match_routes/1
|
||||
, lookup_routes/1
|
||||
, has_routes/1
|
||||
]).
|
||||
-export([
|
||||
match_routes/1,
|
||||
lookup_routes/1,
|
||||
has_routes/1
|
||||
]).
|
||||
|
||||
-export([print_routes/1]).
|
||||
|
||||
-export([topics/0]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-type(group() :: binary()).
|
||||
-type group() :: binary().
|
||||
|
||||
-type(dest() :: node() | {group(), node()}).
|
||||
-type dest() :: node() | {group(), node()}.
|
||||
|
||||
-define(ROUTE_TAB, emqx_route).
|
||||
|
||||
|
|
@ -75,48 +78,58 @@
|
|||
mnesia(boot) ->
|
||||
mria_config:set_dirty_shard(?ROUTE_SHARD, true),
|
||||
ok = mria:create_table(?ROUTE_TAB, [
|
||||
{type, bag},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, route},
|
||||
{attributes, record_info(fields, route)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]).
|
||||
{type, bag},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, route},
|
||||
{attributes, record_info(fields, route)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Start a router
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(start_link(atom(), pos_integer()) -> startlink_ret()).
|
||||
-spec start_link(atom(), pos_integer()) -> startlink_ret().
|
||||
start_link(Pool, Id) ->
|
||||
gen_server:start_link({local, emqx_misc:proc_name(?MODULE, Id)},
|
||||
?MODULE, [Pool, Id], [{hibernate_after, 1000}]).
|
||||
gen_server:start_link(
|
||||
{local, emqx_misc:proc_name(?MODULE, Id)},
|
||||
?MODULE,
|
||||
[Pool, Id],
|
||||
[{hibernate_after, 1000}]
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Route APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(add_route(emqx_types:topic()) -> ok | {error, term()}).
|
||||
-spec add_route(emqx_types:topic()) -> ok | {error, term()}.
|
||||
add_route(Topic) when is_binary(Topic) ->
|
||||
add_route(Topic, node()).
|
||||
|
||||
-spec(add_route(emqx_types:topic(), dest()) -> ok | {error, term()}).
|
||||
-spec add_route(emqx_types:topic(), dest()) -> ok | {error, term()}.
|
||||
add_route(Topic, Dest) when is_binary(Topic) ->
|
||||
call(pick(Topic), {add_route, Topic, Dest}).
|
||||
|
||||
-spec(do_add_route(emqx_types:topic()) -> ok | {error, term()}).
|
||||
-spec do_add_route(emqx_types:topic()) -> ok | {error, term()}.
|
||||
do_add_route(Topic) when is_binary(Topic) ->
|
||||
do_add_route(Topic, node()).
|
||||
|
||||
-spec(do_add_route(emqx_types:topic(), dest()) -> ok | {error, term()}).
|
||||
-spec do_add_route(emqx_types:topic(), dest()) -> ok | {error, term()}.
|
||||
do_add_route(Topic, Dest) when is_binary(Topic) ->
|
||||
Route = #route{topic = Topic, dest = Dest},
|
||||
case lists:member(Route, lookup_routes(Topic)) of
|
||||
true -> ok;
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
ok = emqx_router_helper:monitor(Dest),
|
||||
case emqx_topic:wildcard(Topic) of
|
||||
true ->
|
||||
true ->
|
||||
Fun = fun emqx_router_utils:insert_trie_route/2,
|
||||
emqx_router_utils:maybe_trans(Fun, [?ROUTE_TAB, Route], ?ROUTE_SHARD);
|
||||
false ->
|
||||
|
|
@ -125,12 +138,11 @@ do_add_route(Topic, Dest) when is_binary(Topic) ->
|
|||
end.
|
||||
|
||||
%% @doc Match routes
|
||||
-spec(match_routes(emqx_types:topic()) -> [emqx_types:route()]).
|
||||
-spec match_routes(emqx_types:topic()) -> [emqx_types:route()].
|
||||
match_routes(Topic) when is_binary(Topic) ->
|
||||
case match_trie(Topic) of
|
||||
[] -> lookup_routes(Topic);
|
||||
Matched ->
|
||||
lists:append([lookup_routes(To) || To <- [Topic | Matched]])
|
||||
Matched -> lists:append([lookup_routes(To) || To <- [Topic | Matched]])
|
||||
end.
|
||||
|
||||
%% Optimize: routing table will be replicated to all router nodes.
|
||||
|
|
@ -140,47 +152,50 @@ match_trie(Topic) ->
|
|||
false -> emqx_trie:match(Topic)
|
||||
end.
|
||||
|
||||
-spec(lookup_routes(emqx_types:topic()) -> [emqx_types:route()]).
|
||||
-spec lookup_routes(emqx_types:topic()) -> [emqx_types:route()].
|
||||
lookup_routes(Topic) ->
|
||||
ets:lookup(?ROUTE_TAB, Topic).
|
||||
|
||||
-spec(has_routes(emqx_types:topic()) -> boolean()).
|
||||
-spec has_routes(emqx_types:topic()) -> boolean().
|
||||
has_routes(Topic) when is_binary(Topic) ->
|
||||
ets:member(?ROUTE_TAB, Topic).
|
||||
|
||||
-spec(delete_route(emqx_types:topic()) -> ok | {error, term()}).
|
||||
-spec delete_route(emqx_types:topic()) -> ok | {error, term()}.
|
||||
delete_route(Topic) when is_binary(Topic) ->
|
||||
delete_route(Topic, node()).
|
||||
|
||||
-spec(delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}).
|
||||
-spec delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}.
|
||||
delete_route(Topic, Dest) when is_binary(Topic) ->
|
||||
call(pick(Topic), {delete_route, Topic, Dest}).
|
||||
|
||||
-spec(do_delete_route(emqx_types:topic()) -> ok | {error, term()}).
|
||||
-spec do_delete_route(emqx_types:topic()) -> ok | {error, term()}.
|
||||
do_delete_route(Topic) when is_binary(Topic) ->
|
||||
do_delete_route(Topic, node()).
|
||||
|
||||
-spec(do_delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}).
|
||||
-spec do_delete_route(emqx_types:topic(), dest()) -> ok | {error, term()}.
|
||||
do_delete_route(Topic, Dest) ->
|
||||
Route = #route{topic = Topic, dest = Dest},
|
||||
case emqx_topic:wildcard(Topic) of
|
||||
true ->
|
||||
true ->
|
||||
Fun = fun emqx_router_utils:delete_trie_route/2,
|
||||
emqx_router_utils:maybe_trans(Fun, [?ROUTE_TAB, Route], ?ROUTE_SHARD);
|
||||
false ->
|
||||
emqx_router_utils:delete_direct_route(?ROUTE_TAB, Route)
|
||||
end.
|
||||
|
||||
-spec(topics() -> list(emqx_types:topic())).
|
||||
-spec topics() -> list(emqx_types:topic()).
|
||||
topics() ->
|
||||
mnesia:dirty_all_keys(?ROUTE_TAB).
|
||||
|
||||
%% @doc Print routes to a topic
|
||||
-spec(print_routes(emqx_types:topic()) -> ok).
|
||||
-spec print_routes(emqx_types:topic()) -> ok.
|
||||
print_routes(Topic) ->
|
||||
lists:foreach(fun(#route{topic = To, dest = Dest}) ->
|
||||
io:format("~ts -> ~ts~n", [To, Dest])
|
||||
end, match_routes(Topic)).
|
||||
lists:foreach(
|
||||
fun(#route{topic = To, dest = Dest}) ->
|
||||
io:format("~ts -> ~ts~n", [To, Dest])
|
||||
end,
|
||||
match_routes(Topic)
|
||||
).
|
||||
|
||||
call(Router, Msg) ->
|
||||
gen_server:call(Router, Msg, infinity).
|
||||
|
|
@ -199,11 +214,9 @@ init([Pool, Id]) ->
|
|||
handle_call({add_route, Topic, Dest}, _From, State) ->
|
||||
Ok = do_add_route(Topic, Dest),
|
||||
{reply, Ok, State};
|
||||
|
||||
handle_call({delete_route, Topic, Dest}, _From, State) ->
|
||||
Ok = do_delete_route(Topic, Dest),
|
||||
{reply, Ok, State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
|
|||
|
|
@ -23,28 +23,29 @@
|
|||
-include("types.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
|
||||
%% Mnesia bootstrap
|
||||
-export([mnesia/1]).
|
||||
|
||||
-boot_mnesia({mnesia, [boot]}).
|
||||
|
||||
%% API
|
||||
-export([ start_link/0
|
||||
, monitor/1
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
monitor/1
|
||||
]).
|
||||
|
||||
%% Internal export
|
||||
-export([stats_fun/0]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-record(routing_node, {name, const = unused}).
|
||||
|
||||
|
|
@ -60,30 +61,33 @@
|
|||
|
||||
mnesia(boot) ->
|
||||
ok = mria:create_table(?ROUTING_NODE, [
|
||||
{type, set},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, routing_node},
|
||||
{attributes, record_info(fields, routing_node)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}]).
|
||||
{type, set},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, routing_node},
|
||||
{attributes, record_info(fields, routing_node)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Starts the router helper
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||
|
||||
%% @doc Monitor routing node
|
||||
-spec(monitor(node() | {binary(), node()}) -> ok).
|
||||
-spec monitor(node() | {binary(), node()}) -> ok.
|
||||
monitor({_Group, Node}) ->
|
||||
monitor(Node);
|
||||
monitor(Node) when is_atom(Node) ->
|
||||
case ekka:is_member(Node)
|
||||
orelse ets:member(?ROUTING_NODE, Node) of
|
||||
true -> ok;
|
||||
case
|
||||
ekka:is_member(Node) orelse
|
||||
ets:member(?ROUTING_NODE, Node)
|
||||
of
|
||||
true -> ok;
|
||||
false -> mria:dirty_write(?ROUTING_NODE, #routing_node{name = Node})
|
||||
end.
|
||||
|
||||
|
|
@ -97,13 +101,18 @@ init([]) ->
|
|||
_ = mria:wait_for_tables([?ROUTING_NODE]),
|
||||
{ok, _} = mnesia:subscribe({table, ?ROUTING_NODE, simple}),
|
||||
Nodes = lists:foldl(
|
||||
fun(Node, Acc) ->
|
||||
case ekka:is_member(Node) of
|
||||
true -> Acc;
|
||||
false -> true = erlang:monitor_node(Node, true),
|
||||
[Node | Acc]
|
||||
end
|
||||
end, [], mnesia:dirty_all_keys(?ROUTING_NODE)),
|
||||
fun(Node, Acc) ->
|
||||
case ekka:is_member(Node) of
|
||||
true ->
|
||||
Acc;
|
||||
false ->
|
||||
true = erlang:monitor_node(Node, true),
|
||||
[Node | Acc]
|
||||
end
|
||||
end,
|
||||
[],
|
||||
mnesia:dirty_all_keys(?ROUTING_NODE)
|
||||
),
|
||||
ok = emqx_stats:update_interval(route_stats, fun ?MODULE:stats_fun/0),
|
||||
{ok, #{nodes => Nodes}, hibernate}.
|
||||
|
||||
|
|
@ -115,41 +124,39 @@ handle_cast(Msg, State) ->
|
|||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
||||
handle_info({mnesia_table_event, {write, {?ROUTING_NODE, Node, _}, _}},
|
||||
State = #{nodes := Nodes}) ->
|
||||
handle_info(
|
||||
{mnesia_table_event, {write, {?ROUTING_NODE, Node, _}, _}},
|
||||
State = #{nodes := Nodes}
|
||||
) ->
|
||||
case ekka:is_member(Node) orelse lists:member(Node, Nodes) of
|
||||
true -> {noreply, State};
|
||||
true ->
|
||||
{noreply, State};
|
||||
false ->
|
||||
true = erlang:monitor_node(Node, true),
|
||||
{noreply, State#{nodes := [Node | Nodes]}}
|
||||
end;
|
||||
|
||||
handle_info({mnesia_table_event, {delete, {?ROUTING_NODE, _Node}, _}}, State) ->
|
||||
%% ignore
|
||||
{noreply, State};
|
||||
|
||||
handle_info({mnesia_table_event, Event}, State) ->
|
||||
?SLOG(error,#{msg => "unexpected_mnesia_table_event", event => Event}),
|
||||
?SLOG(error, #{msg => "unexpected_mnesia_table_event", event => Event}),
|
||||
{noreply, State};
|
||||
|
||||
handle_info({nodedown, Node}, State = #{nodes := Nodes}) ->
|
||||
global:trans({?LOCK, self()},
|
||||
fun() ->
|
||||
mria:transaction(?ROUTE_SHARD, fun cleanup_routes/1, [Node])
|
||||
end),
|
||||
global:trans(
|
||||
{?LOCK, self()},
|
||||
fun() ->
|
||||
mria:transaction(?ROUTE_SHARD, fun cleanup_routes/1, [Node])
|
||||
end
|
||||
),
|
||||
ok = mria:dirty_delete(?ROUTING_NODE, Node),
|
||||
?tp(emqx_router_helper_cleanup_done, #{node => Node}),
|
||||
{noreply, State#{nodes := lists:delete(Node, Nodes)}, hibernate};
|
||||
|
||||
handle_info({membership, {mnesia, down, Node}}, State) ->
|
||||
handle_info({nodedown, Node}, State);
|
||||
|
||||
handle_info({membership, {node, down, Node}}, State) ->
|
||||
handle_info({nodedown, Node}, State);
|
||||
|
||||
handle_info({membership, _Event}, State) ->
|
||||
{noreply, State};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -168,14 +175,19 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
|
||||
stats_fun() ->
|
||||
case ets:info(?ROUTE, size) of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
Size ->
|
||||
emqx_stats:setstat('routes.count', 'routes.max', Size),
|
||||
emqx_stats:setstat('topics.count', 'topics.max', Size)
|
||||
end.
|
||||
|
||||
cleanup_routes(Node) ->
|
||||
Patterns = [#route{_ = '_', dest = Node},
|
||||
#route{_ = '_', dest = {'_', Node}}],
|
||||
[mnesia:delete_object(?ROUTE, Route, write)
|
||||
|| Pat <- Patterns, Route <- mnesia:match_object(?ROUTE, Pat, write)].
|
||||
Patterns = [
|
||||
#route{_ = '_', dest = Node},
|
||||
#route{_ = '_', dest = {'_', Node}}
|
||||
],
|
||||
[
|
||||
mnesia:delete_object(?ROUTE, Route, write)
|
||||
|| Pat <- Patterns, Route <- mnesia:match_object(?ROUTE, Pat, write)
|
||||
].
|
||||
|
|
|
|||
|
|
@ -27,14 +27,18 @@ start_link() ->
|
|||
|
||||
init([]) ->
|
||||
%% Router helper
|
||||
Helper = #{id => helper,
|
||||
start => {emqx_router_helper, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [emqx_router_helper]},
|
||||
Helper = #{
|
||||
id => helper,
|
||||
start => {emqx_router_helper, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [emqx_router_helper]
|
||||
},
|
||||
%% Router pool
|
||||
RouterPool = emqx_pool_sup:spec([router_pool, hash,
|
||||
{emqx_router, start_link, []}]),
|
||||
RouterPool = emqx_pool_sup:spec([
|
||||
router_pool,
|
||||
hash,
|
||||
{emqx_router, start_link, []}
|
||||
]),
|
||||
{ok, {{one_for_all, 0, 1}, [Helper, RouterPool]}}.
|
||||
|
||||
|
|
|
|||
|
|
@ -18,14 +18,15 @@
|
|||
|
||||
-include("emqx.hrl").
|
||||
|
||||
-export([ delete_direct_route/2
|
||||
, delete_trie_route/2
|
||||
, delete_session_trie_route/2
|
||||
, insert_direct_route/2
|
||||
, insert_trie_route/2
|
||||
, insert_session_trie_route/2
|
||||
, maybe_trans/3
|
||||
]).
|
||||
-export([
|
||||
delete_direct_route/2,
|
||||
delete_trie_route/2,
|
||||
delete_session_trie_route/2,
|
||||
insert_direct_route/2,
|
||||
insert_trie_route/2,
|
||||
insert_session_trie_route/2,
|
||||
maybe_trans/3
|
||||
]).
|
||||
|
||||
insert_direct_route(Tab, Route) ->
|
||||
mria:dirty_write(Tab, Route).
|
||||
|
|
@ -33,14 +34,14 @@ insert_direct_route(Tab, Route) ->
|
|||
insert_trie_route(RouteTab, Route = #route{topic = Topic}) ->
|
||||
case mnesia:wread({RouteTab, Topic}) of
|
||||
[] -> emqx_trie:insert(Topic);
|
||||
_ -> ok
|
||||
_ -> ok
|
||||
end,
|
||||
mnesia:write(RouteTab, Route, sticky_write).
|
||||
|
||||
insert_session_trie_route(RouteTab, Route = #route{topic = Topic}) ->
|
||||
case mnesia:wread({RouteTab, Topic}) of
|
||||
[] -> emqx_trie:insert_session(Topic);
|
||||
_ -> ok
|
||||
[] -> emqx_trie:insert_session(Topic);
|
||||
_ -> ok
|
||||
end,
|
||||
mnesia:write(RouteTab, Route, sticky_write).
|
||||
|
||||
|
|
@ -59,10 +60,10 @@ delete_trie_route(RouteTab, Route = #route{topic = Topic}, Type) ->
|
|||
%% Remove route and trie
|
||||
ok = mnesia:delete_object(RouteTab, Route, sticky_write),
|
||||
case Type of
|
||||
normal -> emqx_trie:delete(Topic);
|
||||
normal -> emqx_trie:delete(Topic);
|
||||
session -> emqx_trie:delete_session(Topic)
|
||||
end;
|
||||
[_|_] ->
|
||||
[_ | _] ->
|
||||
%% Remove route only
|
||||
mnesia:delete_object(RouteTab, Route, sticky_write);
|
||||
[] ->
|
||||
|
|
@ -70,30 +71,37 @@ delete_trie_route(RouteTab, Route = #route{topic = Topic}, Type) ->
|
|||
end.
|
||||
|
||||
%% @private
|
||||
-spec(maybe_trans(function(), list(any()), Shard :: atom()) -> ok | {error, term()}).
|
||||
-spec maybe_trans(function(), list(any()), Shard :: atom()) -> ok | {error, term()}.
|
||||
maybe_trans(Fun, Args, Shard) ->
|
||||
case emqx:get_config([broker, perf, route_lock_type]) of
|
||||
key ->
|
||||
trans(Fun, Args, Shard);
|
||||
global ->
|
||||
%% Assert:
|
||||
mnesia = mria_rlog:backend(), %% TODO: do something smarter than just crash
|
||||
|
||||
%% TODO: do something smarter than just crash
|
||||
mnesia = mria_rlog:backend(),
|
||||
lock_router(Shard),
|
||||
try mnesia:sync_dirty(Fun, Args)
|
||||
try
|
||||
mnesia:sync_dirty(Fun, Args)
|
||||
after
|
||||
unlock_router(Shard)
|
||||
end;
|
||||
tab ->
|
||||
trans(fun() ->
|
||||
emqx_trie:lock_tables(),
|
||||
apply(Fun, Args)
|
||||
end, [], Shard)
|
||||
trans(
|
||||
fun() ->
|
||||
emqx_trie:lock_tables(),
|
||||
apply(Fun, Args)
|
||||
end,
|
||||
[],
|
||||
Shard
|
||||
)
|
||||
end.
|
||||
|
||||
%% The created fun only terminates with explicit exception
|
||||
-dialyzer({nowarn_function, [trans/3]}).
|
||||
|
||||
-spec(trans(function(), list(any()), atom()) -> ok | {error, term()}).
|
||||
-spec trans(function(), list(any()), atom()) -> ok | {error, term()}.
|
||||
trans(Fun, Args, Shard) ->
|
||||
{WPid, RefMon} =
|
||||
spawn_monitor(
|
||||
|
|
@ -102,12 +110,14 @@ trans(Fun, Args, Shard) ->
|
|||
%% Future changes should keep in mind that this process
|
||||
%% always exit with database write result.
|
||||
fun() ->
|
||||
Res = case mria:transaction(Shard, Fun, Args) of
|
||||
{atomic, Ok} -> Ok;
|
||||
{aborted, Reason} -> {error, Reason}
|
||||
end,
|
||||
exit({shutdown, Res})
|
||||
end),
|
||||
Res =
|
||||
case mria:transaction(Shard, Fun, Args) of
|
||||
{atomic, Ok} -> Ok;
|
||||
{aborted, Reason} -> {error, Reason}
|
||||
end,
|
||||
exit({shutdown, Res})
|
||||
end
|
||||
),
|
||||
%% Receive a 'shutdown' exit to pass result from the short-lived process.
|
||||
%% so the receive below can be receive-mark optimized by the compiler.
|
||||
%%
|
||||
|
|
|
|||
|
|
@ -19,33 +19,37 @@
|
|||
%% Note: please don't forget to add new API functions to
|
||||
%% `emqx_bpapi_trans:extract_mfa'
|
||||
|
||||
-export([ call/4
|
||||
, call/5
|
||||
, cast/4
|
||||
, cast/5
|
||||
, multicall/4
|
||||
, multicall/5
|
||||
-export([
|
||||
call/4,
|
||||
call/5,
|
||||
cast/4,
|
||||
cast/5,
|
||||
multicall/4,
|
||||
multicall/5,
|
||||
|
||||
, unwrap_erpc/1
|
||||
]).
|
||||
unwrap_erpc/1
|
||||
]).
|
||||
|
||||
-export_type([ badrpc/0
|
||||
, call_result/0
|
||||
, cast_result/0
|
||||
, multicall_result/1
|
||||
, multicall_result/0
|
||||
, erpc/1
|
||||
, erpc_multicall/1
|
||||
]).
|
||||
-export_type([
|
||||
badrpc/0,
|
||||
call_result/0,
|
||||
cast_result/0,
|
||||
multicall_result/1,
|
||||
multicall_result/0,
|
||||
erpc/1,
|
||||
erpc_multicall/1
|
||||
]).
|
||||
|
||||
-compile({inline,
|
||||
[ rpc_node/1
|
||||
, rpc_nodes/1
|
||||
]}).
|
||||
-compile(
|
||||
{inline, [
|
||||
rpc_node/1,
|
||||
rpc_nodes/1
|
||||
]}
|
||||
).
|
||||
|
||||
-define(DefaultClientNum, 1).
|
||||
|
||||
-type badrpc() :: {badrpc, term()} | {badtcp, term()}.
|
||||
-type badrpc() :: {badrpc, term()} | {badtcp, term()}.
|
||||
|
||||
-type call_result() :: term() | badrpc().
|
||||
|
||||
|
|
@ -55,11 +59,12 @@
|
|||
|
||||
-type multicall_result() :: multicall_result(term()).
|
||||
|
||||
-type erpc(Ret) :: {ok, Ret}
|
||||
| {throw, _Err}
|
||||
| {exit, {exception | signal, _Reason}}
|
||||
| {error, {exception, _Reason, _Stack :: list()}}
|
||||
| {error, {erpc, _Reason}}.
|
||||
-type erpc(Ret) ::
|
||||
{ok, Ret}
|
||||
| {throw, _Err}
|
||||
| {exit, {exception | signal, _Reason}}
|
||||
| {error, {exception, _Reason, _Stack :: list()}}
|
||||
| {error, {erpc, _Reason}}.
|
||||
|
||||
-type erpc_multicall(Ret) :: [erpc(Ret)].
|
||||
|
||||
|
|
@ -100,8 +105,9 @@ rpc_nodes([], Acc) ->
|
|||
rpc_nodes([Node | Nodes], Acc) ->
|
||||
rpc_nodes(Nodes, [rpc_node(Node) | Acc]).
|
||||
|
||||
filter_result({Error, Reason})
|
||||
when Error =:= badrpc; Error =:= badtcp ->
|
||||
filter_result({Error, Reason}) when
|
||||
Error =:= badrpc; Error =:= badtcp
|
||||
->
|
||||
{badrpc, Reason};
|
||||
filter_result(Delivery) ->
|
||||
Delivery.
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -16,58 +16,62 @@
|
|||
|
||||
-module(emqx_sequence).
|
||||
|
||||
-export([ create/1
|
||||
, nextval/2
|
||||
, currval/2
|
||||
, reclaim/2
|
||||
, delete/1
|
||||
]).
|
||||
-export([
|
||||
create/1,
|
||||
nextval/2,
|
||||
currval/2,
|
||||
reclaim/2,
|
||||
delete/1
|
||||
]).
|
||||
|
||||
-export_type([seqid/0]).
|
||||
|
||||
-type(key() :: term()).
|
||||
-type key() :: term().
|
||||
|
||||
-type(name() :: atom()).
|
||||
-type name() :: atom().
|
||||
|
||||
-type(seqid() :: non_neg_integer()).
|
||||
-type seqid() :: non_neg_integer().
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc Create a sequence.
|
||||
-spec(create(name()) -> ok).
|
||||
-spec create(name()) -> ok.
|
||||
create(Name) ->
|
||||
emqx_tables:new(Name, [public, set, {write_concurrency, true}]).
|
||||
|
||||
%% @doc Next value of the sequence.
|
||||
-spec(nextval(name(), key()) -> seqid()).
|
||||
-spec nextval(name(), key()) -> seqid().
|
||||
nextval(Name, Key) ->
|
||||
ets:update_counter(Name, Key, {2, 1}, {Key, 0}).
|
||||
|
||||
%% @doc Current value of the sequence.
|
||||
-spec(currval(name(), key()) -> seqid()).
|
||||
-spec currval(name(), key()) -> seqid().
|
||||
currval(Name, Key) ->
|
||||
try ets:lookup_element(Name, Key, 2)
|
||||
try
|
||||
ets:lookup_element(Name, Key, 2)
|
||||
catch
|
||||
error:badarg -> 0
|
||||
end.
|
||||
|
||||
%% @doc Reclaim a sequence id.
|
||||
-spec(reclaim(name(), key()) -> seqid()).
|
||||
-spec reclaim(name(), key()) -> seqid().
|
||||
reclaim(Name, Key) ->
|
||||
try ets:update_counter(Name, Key, {2, -1, 0, 0}) of
|
||||
0 -> ets:delete_object(Name, {Key, 0}), 0;
|
||||
I -> I
|
||||
0 ->
|
||||
ets:delete_object(Name, {Key, 0}),
|
||||
0;
|
||||
I ->
|
||||
I
|
||||
catch
|
||||
error:badarg -> 0
|
||||
end.
|
||||
|
||||
%% @doc Delete the sequence.
|
||||
-spec(delete(name()) -> boolean()).
|
||||
-spec delete(name()) -> boolean().
|
||||
delete(Name) ->
|
||||
case ets:info(Name, name) of
|
||||
Name -> ets:delete(Name);
|
||||
undefined -> false
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,6 @@
|
|||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
|
@ -56,36 +55,41 @@
|
|||
|
||||
-export([init/1]).
|
||||
|
||||
-export([ info/1
|
||||
, info/2
|
||||
, is_session/1
|
||||
, stats/1
|
||||
, obtain_next_pkt_id/1
|
||||
]).
|
||||
-export([
|
||||
info/1,
|
||||
info/2,
|
||||
is_session/1,
|
||||
stats/1,
|
||||
obtain_next_pkt_id/1
|
||||
]).
|
||||
|
||||
-export([ subscribe/4
|
||||
, unsubscribe/4
|
||||
]).
|
||||
-export([
|
||||
subscribe/4,
|
||||
unsubscribe/4
|
||||
]).
|
||||
|
||||
-export([ publish/4
|
||||
, puback/3
|
||||
, pubrec/3
|
||||
, pubrel/3
|
||||
, pubcomp/3
|
||||
]).
|
||||
-export([
|
||||
publish/4,
|
||||
puback/3,
|
||||
pubrec/3,
|
||||
pubrel/3,
|
||||
pubcomp/3
|
||||
]).
|
||||
|
||||
-export([ deliver/3
|
||||
, enqueue/3
|
||||
, dequeue/2
|
||||
, ignore_local/4
|
||||
, retry/2
|
||||
, terminate/3
|
||||
]).
|
||||
-export([
|
||||
deliver/3,
|
||||
enqueue/3,
|
||||
dequeue/2,
|
||||
ignore_local/4,
|
||||
retry/2,
|
||||
terminate/3
|
||||
]).
|
||||
|
||||
-export([ takeover/1
|
||||
, resume/2
|
||||
, replay/2
|
||||
]).
|
||||
-export([
|
||||
takeover/1,
|
||||
resume/2,
|
||||
replay/2
|
||||
]).
|
||||
|
||||
-export([expire/3]).
|
||||
|
||||
|
|
@ -94,124 +98,130 @@
|
|||
|
||||
-type sessionID() :: emqx_guid:guid().
|
||||
|
||||
-export_type([ session/0
|
||||
, sessionID/0
|
||||
]).
|
||||
-export_type([
|
||||
session/0,
|
||||
sessionID/0
|
||||
]).
|
||||
|
||||
-record(session, {
|
||||
%% Client's id
|
||||
clientid :: emqx_types:clientid(),
|
||||
id :: sessionID(),
|
||||
%% Is this session a persistent session i.e. was it started with Session-Expiry > 0
|
||||
is_persistent :: boolean(),
|
||||
%% Client’s Subscriptions.
|
||||
subscriptions :: map(),
|
||||
%% Max subscriptions allowed
|
||||
max_subscriptions :: non_neg_integer() | infinity,
|
||||
%% Upgrade QoS?
|
||||
upgrade_qos :: boolean(),
|
||||
%% Client <- Broker: QoS1/2 messages sent to the client but
|
||||
%% have not been unacked.
|
||||
inflight :: emqx_inflight:inflight(),
|
||||
%% All QoS1/2 messages published to when client is disconnected,
|
||||
%% or QoS1/2 messages pending transmission to the Client.
|
||||
%%
|
||||
%% Optionally, QoS0 messages pending transmission to the Client.
|
||||
mqueue :: emqx_mqueue:mqueue(),
|
||||
%% Next packet id of the session
|
||||
next_pkt_id = 1 :: emqx_types:packet_id(),
|
||||
%% Retry interval for redelivering QoS1/2 messages (Unit: millisecond)
|
||||
retry_interval :: timeout(),
|
||||
%% Client -> Broker: QoS2 messages received from the client, but
|
||||
%% have not been completely acknowledged
|
||||
awaiting_rel :: map(),
|
||||
%% Maximum number of awaiting QoS2 messages allowed
|
||||
max_awaiting_rel :: non_neg_integer() | infinity,
|
||||
%% Awaiting PUBREL Timeout (Unit: millisecond)
|
||||
await_rel_timeout :: timeout(),
|
||||
%% Created at
|
||||
created_at :: pos_integer()
|
||||
%% Message deliver latency stats
|
||||
}).
|
||||
|
||||
%% Client's id
|
||||
clientid :: emqx_types:clientid(),
|
||||
id :: sessionID(),
|
||||
%% Is this session a persistent session i.e. was it started with Session-Expiry > 0
|
||||
is_persistent :: boolean(),
|
||||
%% Client’s Subscriptions.
|
||||
subscriptions :: map(),
|
||||
%% Max subscriptions allowed
|
||||
max_subscriptions :: non_neg_integer() | infinity,
|
||||
%% Upgrade QoS?
|
||||
upgrade_qos :: boolean(),
|
||||
%% Client <- Broker: QoS1/2 messages sent to the client but
|
||||
%% have not been unacked.
|
||||
inflight :: emqx_inflight:inflight(),
|
||||
%% All QoS1/2 messages published to when client is disconnected,
|
||||
%% or QoS1/2 messages pending transmission to the Client.
|
||||
%%
|
||||
%% Optionally, QoS0 messages pending transmission to the Client.
|
||||
mqueue :: emqx_mqueue:mqueue(),
|
||||
%% Next packet id of the session
|
||||
next_pkt_id = 1 :: emqx_types:packet_id(),
|
||||
%% Retry interval for redelivering QoS1/2 messages (Unit: millisecond)
|
||||
retry_interval :: timeout(),
|
||||
%% Client -> Broker: QoS2 messages received from the client, but
|
||||
%% have not been completely acknowledged
|
||||
awaiting_rel :: map(),
|
||||
%% Maximum number of awaiting QoS2 messages allowed
|
||||
max_awaiting_rel :: non_neg_integer() | infinity,
|
||||
%% Awaiting PUBREL Timeout (Unit: millisecond)
|
||||
await_rel_timeout :: timeout(),
|
||||
%% Created at
|
||||
created_at :: pos_integer()
|
||||
%% Message deliver latency stats
|
||||
}).
|
||||
|
||||
-type inflight_data_phase() :: wait_ack | wait_comp.
|
||||
|
||||
-record(inflight_data, { phase :: inflight_data_phase()
|
||||
, message :: emqx_types:message()
|
||||
, timestamp :: non_neg_integer()}).
|
||||
-record(inflight_data, {
|
||||
phase :: inflight_data_phase(),
|
||||
message :: emqx_types:message(),
|
||||
timestamp :: non_neg_integer()
|
||||
}).
|
||||
|
||||
-type(session() :: #session{}).
|
||||
-type session() :: #session{}.
|
||||
|
||||
-type(publish() :: {maybe(emqx_types:packet_id()), emqx_types:message()}).
|
||||
-type publish() :: {maybe(emqx_types:packet_id()), emqx_types:message()}.
|
||||
|
||||
-type(pubrel() :: {pubrel, emqx_types:packet_id()}).
|
||||
-type pubrel() :: {pubrel, emqx_types:packet_id()}.
|
||||
|
||||
-type(replies() :: list(publish() | pubrel())).
|
||||
-type replies() :: list(publish() | pubrel()).
|
||||
|
||||
-define(INFO_KEYS,
|
||||
[ id
|
||||
, is_persistent
|
||||
, subscriptions
|
||||
, upgrade_qos
|
||||
, retry_interval
|
||||
, await_rel_timeout
|
||||
, created_at
|
||||
]).
|
||||
-define(INFO_KEYS, [
|
||||
id,
|
||||
is_persistent,
|
||||
subscriptions,
|
||||
upgrade_qos,
|
||||
retry_interval,
|
||||
await_rel_timeout,
|
||||
created_at
|
||||
]).
|
||||
|
||||
-define(STATS_KEYS,
|
||||
[ subscriptions_cnt
|
||||
, subscriptions_max
|
||||
, inflight_cnt
|
||||
, inflight_max
|
||||
, mqueue_len
|
||||
, mqueue_max
|
||||
, mqueue_dropped
|
||||
, next_pkt_id
|
||||
, awaiting_rel_cnt
|
||||
, awaiting_rel_max
|
||||
]).
|
||||
-define(STATS_KEYS, [
|
||||
subscriptions_cnt,
|
||||
subscriptions_max,
|
||||
inflight_cnt,
|
||||
inflight_max,
|
||||
mqueue_len,
|
||||
mqueue_max,
|
||||
mqueue_dropped,
|
||||
next_pkt_id,
|
||||
awaiting_rel_cnt,
|
||||
awaiting_rel_max
|
||||
]).
|
||||
|
||||
-define(DEFAULT_BATCH_N, 1000).
|
||||
|
||||
-type options() :: #{ max_subscriptions => non_neg_integer()
|
||||
, upgrade_qos => boolean()
|
||||
, retry_interval => timeout()
|
||||
, max_awaiting_rel => non_neg_integer() | infinity
|
||||
, await_rel_timeout => timeout()
|
||||
, max_inflight => integer()
|
||||
, mqueue => emqx_mqueue:options()
|
||||
, is_persistent => boolean()
|
||||
, clientid => emqx_types:clientid()
|
||||
}.
|
||||
-type options() :: #{
|
||||
max_subscriptions => non_neg_integer(),
|
||||
upgrade_qos => boolean(),
|
||||
retry_interval => timeout(),
|
||||
max_awaiting_rel => non_neg_integer() | infinity,
|
||||
await_rel_timeout => timeout(),
|
||||
max_inflight => integer(),
|
||||
mqueue => emqx_mqueue:options(),
|
||||
is_persistent => boolean(),
|
||||
clientid => emqx_types:clientid()
|
||||
}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Init a Session
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(init(options()) -> session()).
|
||||
-spec init(options()) -> session().
|
||||
init(Opts) ->
|
||||
MaxInflight = maps:get(max_inflight, Opts, 1),
|
||||
QueueOpts = maps:merge(
|
||||
#{max_len => 1000,
|
||||
store_qos0 => true
|
||||
}, maps:get(mqueue, Opts, #{})),
|
||||
#{
|
||||
max_len => 1000,
|
||||
store_qos0 => true
|
||||
},
|
||||
maps:get(mqueue, Opts, #{})
|
||||
),
|
||||
#session{
|
||||
id = emqx_guid:gen(),
|
||||
clientid = maps:get(clientid, Opts, <<>>),
|
||||
is_persistent = maps:get(is_persistent, Opts, false),
|
||||
max_subscriptions = maps:get(max_subscriptions, Opts, infinity),
|
||||
subscriptions = #{},
|
||||
upgrade_qos = maps:get(upgrade_qos, Opts, false),
|
||||
inflight = emqx_inflight:new(MaxInflight),
|
||||
mqueue = emqx_mqueue:init(QueueOpts),
|
||||
next_pkt_id = 1,
|
||||
retry_interval = maps:get(retry_interval, Opts, 30000),
|
||||
awaiting_rel = #{},
|
||||
max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100),
|
||||
await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000),
|
||||
created_at = erlang:system_time(millisecond)
|
||||
}.
|
||||
id = emqx_guid:gen(),
|
||||
clientid = maps:get(clientid, Opts, <<>>),
|
||||
is_persistent = maps:get(is_persistent, Opts, false),
|
||||
max_subscriptions = maps:get(max_subscriptions, Opts, infinity),
|
||||
subscriptions = #{},
|
||||
upgrade_qos = maps:get(upgrade_qos, Opts, false),
|
||||
inflight = emqx_inflight:new(MaxInflight),
|
||||
mqueue = emqx_mqueue:init(QueueOpts),
|
||||
next_pkt_id = 1,
|
||||
retry_interval = maps:get(retry_interval, Opts, 30000),
|
||||
awaiting_rel = #{},
|
||||
max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100),
|
||||
await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000),
|
||||
created_at = erlang:system_time(millisecond)
|
||||
}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Info, Stats
|
||||
|
|
@ -221,7 +231,7 @@ is_session(#session{}) -> true;
|
|||
is_session(_) -> false.
|
||||
|
||||
%% @doc Get infos of the session.
|
||||
-spec(info(session()) -> emqx_types:infos()).
|
||||
-spec info(session()) -> emqx_types:infos().
|
||||
info(Session) ->
|
||||
maps:from_list(info(?INFO_KEYS, Session)).
|
||||
|
||||
|
|
@ -269,7 +279,7 @@ info(created_at, #session{created_at = CreatedAt}) ->
|
|||
CreatedAt.
|
||||
|
||||
%% @doc Get stats of the session.
|
||||
-spec(stats(session()) -> emqx_types:stats()).
|
||||
-spec stats(session()) -> emqx_types:stats().
|
||||
stats(Session) -> info(?STATS_KEYS, Session).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -278,58 +288,80 @@ stats(Session) -> info(?STATS_KEYS, Session).
|
|||
|
||||
ignore_local(ClientInfo, Delivers, Subscriber, Session) ->
|
||||
Subs = info(subscriptions, Session),
|
||||
lists:dropwhile(fun({deliver, Topic, #message{from = Publisher} = Msg}) ->
|
||||
case maps:find(Topic, Subs) of
|
||||
{ok, #{nl := 1}} when Subscriber =:= Publisher ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]),
|
||||
ok = emqx_metrics:inc('delivery.dropped'),
|
||||
ok = emqx_metrics:inc('delivery.dropped.no_local'),
|
||||
true;
|
||||
_ ->
|
||||
false
|
||||
end
|
||||
end, Delivers).
|
||||
lists:dropwhile(
|
||||
fun({deliver, Topic, #message{from = Publisher} = Msg}) ->
|
||||
case maps:find(Topic, Subs) of
|
||||
{ok, #{nl := 1}} when Subscriber =:= Publisher ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]),
|
||||
ok = emqx_metrics:inc('delivery.dropped'),
|
||||
ok = emqx_metrics:inc('delivery.dropped.no_local'),
|
||||
true;
|
||||
_ ->
|
||||
false
|
||||
end
|
||||
end,
|
||||
Delivers
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Client -> Broker: SUBSCRIBE
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(subscribe(emqx_types:clientinfo(), emqx_types:topic(),
|
||||
emqx_types:subopts(), session())
|
||||
-> {ok, session()} | {error, emqx_types:reason_code()}).
|
||||
subscribe(ClientInfo = #{clientid := ClientId}, TopicFilter, SubOpts,
|
||||
Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs}) ->
|
||||
-spec subscribe(
|
||||
emqx_types:clientinfo(),
|
||||
emqx_types:topic(),
|
||||
emqx_types:subopts(),
|
||||
session()
|
||||
) ->
|
||||
{ok, session()} | {error, emqx_types:reason_code()}.
|
||||
subscribe(
|
||||
ClientInfo = #{clientid := ClientId},
|
||||
TopicFilter,
|
||||
SubOpts,
|
||||
Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs}
|
||||
) ->
|
||||
IsNew = not maps:is_key(TopicFilter, Subs),
|
||||
case IsNew andalso is_subscriptions_full(Session) of
|
||||
false ->
|
||||
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts),
|
||||
ok = emqx_persistent_session:add_subscription(TopicFilter, SessionID, IsPS),
|
||||
ok = emqx_hooks:run('session.subscribed',
|
||||
[ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]),
|
||||
ok = emqx_hooks:run(
|
||||
'session.subscribed',
|
||||
[ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]
|
||||
),
|
||||
{ok, Session#session{subscriptions = maps:put(TopicFilter, SubOpts, Subs)}};
|
||||
true -> {error, ?RC_QUOTA_EXCEEDED}
|
||||
true ->
|
||||
{error, ?RC_QUOTA_EXCEEDED}
|
||||
end.
|
||||
|
||||
is_subscriptions_full(#session{max_subscriptions = infinity}) ->
|
||||
false;
|
||||
is_subscriptions_full(#session{subscriptions = Subs,
|
||||
max_subscriptions = MaxLimit}) ->
|
||||
is_subscriptions_full(#session{
|
||||
subscriptions = Subs,
|
||||
max_subscriptions = MaxLimit
|
||||
}) ->
|
||||
maps:size(Subs) >= MaxLimit.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Client -> Broker: UNSUBSCRIBE
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(unsubscribe(emqx_types:clientinfo(), emqx_types:topic(), emqx_types:subopts(), session())
|
||||
-> {ok, session()} | {error, emqx_types:reason_code()}).
|
||||
unsubscribe(ClientInfo, TopicFilter, UnSubOpts,
|
||||
Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS}) ->
|
||||
-spec unsubscribe(emqx_types:clientinfo(), emqx_types:topic(), emqx_types:subopts(), session()) ->
|
||||
{ok, session()} | {error, emqx_types:reason_code()}.
|
||||
unsubscribe(
|
||||
ClientInfo,
|
||||
TopicFilter,
|
||||
UnSubOpts,
|
||||
Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS}
|
||||
) ->
|
||||
case maps:find(TopicFilter, Subs) of
|
||||
{ok, SubOpts} ->
|
||||
ok = emqx_broker:unsubscribe(TopicFilter),
|
||||
ok = emqx_persistent_session:remove_subscription(TopicFilter, SessionID, IsPS),
|
||||
ok = emqx_hooks:run('session.unsubscribed',
|
||||
[ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]),
|
||||
ok = emqx_hooks:run(
|
||||
'session.unsubscribed',
|
||||
[ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]
|
||||
),
|
||||
{ok, Session#session{subscriptions = maps:remove(TopicFilter, Subs)}};
|
||||
error ->
|
||||
{error, ?RC_NO_SUBSCRIPTION_EXISTED}
|
||||
|
|
@ -339,11 +371,15 @@ unsubscribe(ClientInfo, TopicFilter, UnSubOpts,
|
|||
%% Client -> Broker: PUBLISH
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(publish(emqx_types:clientinfo(), emqx_types:packet_id(), emqx_types:message(), session())
|
||||
-> {ok, emqx_types:publish_result(), session()}
|
||||
| {error, emqx_types:reason_code()}).
|
||||
publish(_ClientInfo, PacketId, Msg = #message{qos = ?QOS_2, timestamp = Ts},
|
||||
Session = #session{awaiting_rel = AwaitingRel}) ->
|
||||
-spec publish(emqx_types:clientinfo(), emqx_types:packet_id(), emqx_types:message(), session()) ->
|
||||
{ok, emqx_types:publish_result(), session()}
|
||||
| {error, emqx_types:reason_code()}.
|
||||
publish(
|
||||
_ClientInfo,
|
||||
PacketId,
|
||||
Msg = #message{qos = ?QOS_2, timestamp = Ts},
|
||||
Session = #session{awaiting_rel = AwaitingRel}
|
||||
) ->
|
||||
case is_awaiting_full(Session) of
|
||||
false ->
|
||||
case maps:is_key(PacketId, AwaitingRel) of
|
||||
|
|
@ -354,27 +390,29 @@ publish(_ClientInfo, PacketId, Msg = #message{qos = ?QOS_2, timestamp = Ts},
|
|||
true ->
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE}
|
||||
end;
|
||||
true -> {error, ?RC_RECEIVE_MAXIMUM_EXCEEDED}
|
||||
true ->
|
||||
{error, ?RC_RECEIVE_MAXIMUM_EXCEEDED}
|
||||
end;
|
||||
|
||||
%% Publish QoS0/1 directly
|
||||
publish(_ClientInfo, _PacketId, Msg, Session) ->
|
||||
{ok, emqx_broker:publish(Msg), Session}.
|
||||
|
||||
is_awaiting_full(#session{max_awaiting_rel = infinity}) ->
|
||||
false;
|
||||
is_awaiting_full(#session{awaiting_rel = AwaitingRel,
|
||||
max_awaiting_rel = MaxLimit}) ->
|
||||
is_awaiting_full(#session{
|
||||
awaiting_rel = AwaitingRel,
|
||||
max_awaiting_rel = MaxLimit
|
||||
}) ->
|
||||
maps:size(AwaitingRel) >= MaxLimit.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Client -> Broker: PUBACK
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(puback(emqx_types:clientinfo(), emqx_types:packet_id(), session())
|
||||
-> {ok, emqx_types:message(), session()}
|
||||
| {ok, emqx_types:message(), replies(), session()}
|
||||
| {error, emqx_types:reason_code()}).
|
||||
-spec puback(emqx_types:clientinfo(), emqx_types:packet_id(), session()) ->
|
||||
{ok, emqx_types:message(), session()}
|
||||
| {ok, emqx_types:message(), replies(), session()}
|
||||
| {error, emqx_types:reason_code()}.
|
||||
puback(ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
|
||||
case emqx_inflight:lookup(PacketId, Inflight) of
|
||||
{value, #inflight_data{phase = wait_ack, message = Msg}} ->
|
||||
|
|
@ -396,9 +434,9 @@ return_with(Msg, {ok, Publishes, Session}) ->
|
|||
%% Client -> Broker: PUBREC
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(pubrec(emqx_types:clientinfo(), emqx_types:packet_id(), session())
|
||||
-> {ok, emqx_types:message(), session()}
|
||||
| {error, emqx_types:reason_code()}).
|
||||
-spec pubrec(emqx_types:clientinfo(), emqx_types:packet_id(), session()) ->
|
||||
{ok, emqx_types:message(), session()}
|
||||
| {error, emqx_types:reason_code()}.
|
||||
pubrec(_ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
|
||||
case emqx_inflight:lookup(PacketId, Inflight) of
|
||||
{value, #inflight_data{phase = wait_ack, message = Msg} = Data} ->
|
||||
|
|
@ -415,8 +453,8 @@ pubrec(_ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
|
|||
%% Client -> Broker: PUBREL
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(pubrel(emqx_types:clientinfo(), emqx_types:packet_id(), session())
|
||||
-> {ok, session()} | {error, emqx_types:reason_code()}).
|
||||
-spec pubrel(emqx_types:clientinfo(), emqx_types:packet_id(), session()) ->
|
||||
{ok, session()} | {error, emqx_types:reason_code()}.
|
||||
pubrel(_ClientInfo, PacketId, Session = #session{awaiting_rel = AwaitingRel}) ->
|
||||
case maps:take(PacketId, AwaitingRel) of
|
||||
{_Ts, AwaitingRel1} ->
|
||||
|
|
@ -429,9 +467,10 @@ pubrel(_ClientInfo, PacketId, Session = #session{awaiting_rel = AwaitingRel}) ->
|
|||
%% Client -> Broker: PUBCOMP
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(pubcomp(emqx_types:clientinfo(), emqx_types:packet_id(), session())
|
||||
-> {ok, session()} | {ok, replies(), session()}
|
||||
| {error, emqx_types:reason_code()}).
|
||||
-spec pubcomp(emqx_types:clientinfo(), emqx_types:packet_id(), session()) ->
|
||||
{ok, session()}
|
||||
| {ok, replies(), session()}
|
||||
| {error, emqx_types:reason_code()}.
|
||||
pubcomp(ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
|
||||
case emqx_inflight:lookup(PacketId, Inflight) of
|
||||
{value, #inflight_data{phase = wait_comp, message = Msg}} ->
|
||||
|
|
@ -450,7 +489,8 @@ pubcomp(ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
|
|||
|
||||
dequeue(ClientInfo, Session = #session{inflight = Inflight, mqueue = Q}) ->
|
||||
case emqx_mqueue:is_empty(Q) of
|
||||
true -> {ok, Session};
|
||||
true ->
|
||||
{ok, Session};
|
||||
false ->
|
||||
{Msgs, Q1} = dequeue(ClientInfo, batch_n(Inflight), [], Q),
|
||||
do_deliver(ClientInfo, Msgs, [], Session#session{mqueue = Q1})
|
||||
|
|
@ -458,17 +498,18 @@ dequeue(ClientInfo, Session = #session{inflight = Inflight, mqueue = Q}) ->
|
|||
|
||||
dequeue(_ClientInfo, 0, Msgs, Q) ->
|
||||
{lists:reverse(Msgs), Q};
|
||||
|
||||
dequeue(ClientInfo, Cnt, Msgs, Q) ->
|
||||
case emqx_mqueue:out(Q) of
|
||||
{empty, _Q} -> dequeue(ClientInfo, 0, Msgs, Q);
|
||||
{empty, _Q} ->
|
||||
dequeue(ClientInfo, 0, Msgs, Q);
|
||||
{{value, Msg}, Q1} ->
|
||||
case emqx_message:is_expired(Msg) of
|
||||
true ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, expired]),
|
||||
ok = inc_delivery_expired_cnt(),
|
||||
dequeue(ClientInfo, Cnt, Msgs, Q1);
|
||||
false -> dequeue(ClientInfo, acc_cnt(Msg, Cnt), [Msg|Msgs], Q1)
|
||||
false ->
|
||||
dequeue(ClientInfo, acc_cnt(Msg, Cnt), [Msg | Msgs], Q1)
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
@ -479,40 +520,45 @@ acc_cnt(_Msg, Cnt) -> Cnt - 1.
|
|||
%% Broker -> Client: Deliver
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(deliver(emqx_types:clientinfo(), list(emqx_types:deliver()), session())
|
||||
-> {ok, session()} | {ok, replies(), session()}).
|
||||
deliver(ClientInfo, [Deliver], Session) -> %% Optimize
|
||||
-spec deliver(emqx_types:clientinfo(), list(emqx_types:deliver()), session()) ->
|
||||
{ok, session()} | {ok, replies(), session()}.
|
||||
%% Optimize
|
||||
deliver(ClientInfo, [Deliver], Session) ->
|
||||
Msg = enrich_deliver(Deliver, Session),
|
||||
deliver_msg(ClientInfo, Msg, Session);
|
||||
|
||||
deliver(ClientInfo, Delivers, Session) ->
|
||||
Msgs = [enrich_deliver(D, Session) || D <- Delivers],
|
||||
do_deliver(ClientInfo, Msgs, [], Session).
|
||||
|
||||
do_deliver(_ClientInfo, [], Publishes, Session) ->
|
||||
{ok, lists:reverse(Publishes), Session};
|
||||
|
||||
do_deliver(ClientInfo, [Msg | More], Acc, Session) ->
|
||||
case deliver_msg(ClientInfo, Msg, Session) of
|
||||
{ok, Session1} ->
|
||||
do_deliver(ClientInfo, More, Acc, Session1);
|
||||
{ok, [Publish], Session1} ->
|
||||
do_deliver(ClientInfo, More, [Publish|Acc], Session1)
|
||||
do_deliver(ClientInfo, More, [Publish | Acc], Session1)
|
||||
end.
|
||||
|
||||
deliver_msg(_ClientInfo, Msg = #message{qos = ?QOS_0}, Session) ->
|
||||
on_delivery_completed(Msg, Session), %
|
||||
%
|
||||
on_delivery_completed(Msg, Session),
|
||||
{ok, [{undefined, maybe_ack(Msg)}], Session};
|
||||
|
||||
deliver_msg(ClientInfo, Msg = #message{qos = QoS}, Session =
|
||||
#session{next_pkt_id = PacketId, inflight = Inflight})
|
||||
when QoS =:= ?QOS_1 orelse QoS =:= ?QOS_2 ->
|
||||
deliver_msg(
|
||||
ClientInfo,
|
||||
Msg = #message{qos = QoS},
|
||||
Session =
|
||||
#session{next_pkt_id = PacketId, inflight = Inflight}
|
||||
) when
|
||||
QoS =:= ?QOS_1 orelse QoS =:= ?QOS_2
|
||||
->
|
||||
case emqx_inflight:is_full(Inflight) of
|
||||
true ->
|
||||
Session1 = case maybe_nack(Msg) of
|
||||
true -> Session;
|
||||
false -> enqueue(ClientInfo, Msg, Session)
|
||||
end,
|
||||
Session1 =
|
||||
case maybe_nack(Msg) of
|
||||
true -> Session;
|
||||
false -> enqueue(ClientInfo, Msg, Session)
|
||||
end,
|
||||
{ok, Session1};
|
||||
false ->
|
||||
Publish = {PacketId, maybe_ack(Msg)},
|
||||
|
|
@ -521,14 +567,20 @@ deliver_msg(ClientInfo, Msg = #message{qos = QoS}, Session =
|
|||
{ok, [Publish], next_pkt_id(Session1)}
|
||||
end.
|
||||
|
||||
-spec(enqueue(emqx_types:clientinfo(), list(emqx_types:deliver())|emqx_types:message(),
|
||||
session()) -> session()).
|
||||
-spec enqueue(
|
||||
emqx_types:clientinfo(),
|
||||
list(emqx_types:deliver()) | emqx_types:message(),
|
||||
session()
|
||||
) -> session().
|
||||
enqueue(ClientInfo, Delivers, Session) when is_list(Delivers) ->
|
||||
lists:foldl(fun(Deliver, Session0) ->
|
||||
lists:foldl(
|
||||
fun(Deliver, Session0) ->
|
||||
Msg = enrich_deliver(Deliver, Session),
|
||||
enqueue(ClientInfo, Msg, Session0)
|
||||
end, Session, Delivers);
|
||||
|
||||
end,
|
||||
Session,
|
||||
Delivers
|
||||
);
|
||||
enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) ->
|
||||
{Dropped, NewQ} = emqx_mqueue:in(Msg, Q),
|
||||
(Dropped =/= undefined) andalso handle_dropped(ClientInfo, Dropped, Session),
|
||||
|
|
@ -536,25 +588,37 @@ enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) ->
|
|||
|
||||
handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mqueue = Q}) ->
|
||||
Payload = emqx_message:to_log_map(Msg),
|
||||
#{store_qos0 := StoreQos0} = QueueInfo = emqx_mqueue:info(Q),
|
||||
#{store_qos0 := StoreQos0} = QueueInfo = emqx_mqueue:info(Q),
|
||||
case (QoS == ?QOS_0) andalso (not StoreQos0) of
|
||||
true ->
|
||||
true ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, qos0_msg]),
|
||||
ok = emqx_metrics:inc('delivery.dropped'),
|
||||
ok = emqx_metrics:inc('delivery.dropped.qos0_msg'),
|
||||
ok = inc_pd('send_msg.dropped'),
|
||||
?SLOG(warning, #{msg => "dropped_qos0_msg",
|
||||
queue => QueueInfo,
|
||||
payload => Payload}, #{topic => Topic});
|
||||
?SLOG(
|
||||
warning,
|
||||
#{
|
||||
msg => "dropped_qos0_msg",
|
||||
queue => QueueInfo,
|
||||
payload => Payload
|
||||
},
|
||||
#{topic => Topic}
|
||||
);
|
||||
false ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, queue_full]),
|
||||
ok = emqx_metrics:inc('delivery.dropped'),
|
||||
ok = emqx_metrics:inc('delivery.dropped.queue_full'),
|
||||
ok = inc_pd('send_msg.dropped'),
|
||||
ok = inc_pd('send_msg.dropped.queue_full'),
|
||||
?SLOG(warning, #{msg => "dropped_msg_due_to_mqueue_is_full",
|
||||
queue => QueueInfo,
|
||||
payload => Payload}, #{topic => Topic})
|
||||
?SLOG(
|
||||
warning,
|
||||
#{
|
||||
msg => "dropped_msg_due_to_mqueue_is_full",
|
||||
queue => QueueInfo,
|
||||
payload => Payload
|
||||
},
|
||||
#{topic => Topic}
|
||||
)
|
||||
end.
|
||||
|
||||
enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) ->
|
||||
|
|
@ -562,13 +626,13 @@ enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs})
|
|||
|
||||
maybe_ack(Msg) ->
|
||||
case emqx_shared_sub:is_ack_required(Msg) of
|
||||
true -> emqx_shared_sub:maybe_ack(Msg);
|
||||
true -> emqx_shared_sub:maybe_ack(Msg);
|
||||
false -> Msg
|
||||
end.
|
||||
|
||||
maybe_nack(Msg) ->
|
||||
emqx_shared_sub:is_ack_required(Msg)
|
||||
andalso (ok == emqx_shared_sub:maybe_nack_dropped(Msg)).
|
||||
emqx_shared_sub:is_ack_required(Msg) andalso
|
||||
(ok == emqx_shared_sub:maybe_nack_dropped(Msg)).
|
||||
|
||||
get_subopts(Topic, SubMap) ->
|
||||
case maps:find(Topic, SubMap) of
|
||||
|
|
@ -576,27 +640,35 @@ get_subopts(Topic, SubMap) ->
|
|||
[{nl, Nl}, {qos, QoS}, {rap, Rap}, {subid, SubId}];
|
||||
{ok, #{nl := Nl, qos := QoS, rap := Rap}} ->
|
||||
[{nl, Nl}, {qos, QoS}, {rap, Rap}];
|
||||
error -> []
|
||||
error ->
|
||||
[]
|
||||
end.
|
||||
|
||||
enrich_subopts([], Msg, _Session) -> Msg;
|
||||
enrich_subopts([{nl, 1}|Opts], Msg, Session) ->
|
||||
enrich_subopts([], Msg, _Session) ->
|
||||
Msg;
|
||||
enrich_subopts([{nl, 1} | Opts], Msg, Session) ->
|
||||
enrich_subopts(Opts, emqx_message:set_flag(nl, Msg), Session);
|
||||
enrich_subopts([{nl, 0}|Opts], Msg, Session) ->
|
||||
enrich_subopts([{nl, 0} | Opts], Msg, Session) ->
|
||||
enrich_subopts(Opts, Msg, Session);
|
||||
enrich_subopts([{qos, SubQoS}|Opts], Msg = #message{qos = PubQoS},
|
||||
Session = #session{upgrade_qos = true}) ->
|
||||
enrich_subopts(
|
||||
[{qos, SubQoS} | Opts],
|
||||
Msg = #message{qos = PubQoS},
|
||||
Session = #session{upgrade_qos = true}
|
||||
) ->
|
||||
enrich_subopts(Opts, Msg#message{qos = max(SubQoS, PubQoS)}, Session);
|
||||
enrich_subopts([{qos, SubQoS}|Opts], Msg = #message{qos = PubQoS},
|
||||
Session = #session{upgrade_qos = false}) ->
|
||||
enrich_subopts(
|
||||
[{qos, SubQoS} | Opts],
|
||||
Msg = #message{qos = PubQoS},
|
||||
Session = #session{upgrade_qos = false}
|
||||
) ->
|
||||
enrich_subopts(Opts, Msg#message{qos = min(SubQoS, PubQoS)}, Session);
|
||||
enrich_subopts([{rap, 1}|Opts], Msg, Session) ->
|
||||
enrich_subopts([{rap, 1} | Opts], Msg, Session) ->
|
||||
enrich_subopts(Opts, Msg, Session);
|
||||
enrich_subopts([{rap, 0}|Opts], Msg = #message{headers = #{retained := true}}, Session) ->
|
||||
enrich_subopts([{rap, 0} | Opts], Msg = #message{headers = #{retained := true}}, Session) ->
|
||||
enrich_subopts(Opts, Msg, Session);
|
||||
enrich_subopts([{rap, 0}|Opts], Msg, Session) ->
|
||||
enrich_subopts([{rap, 0} | Opts], Msg, Session) ->
|
||||
enrich_subopts(Opts, emqx_message:set_flag(retain, false, Msg), Session);
|
||||
enrich_subopts([{subid, SubId}|Opts], Msg, Session) ->
|
||||
enrich_subopts([{subid, SubId} | Opts], Msg, Session) ->
|
||||
Props = emqx_message:get_header(properties, Msg, #{}),
|
||||
Msg1 = emqx_message:set_header(properties, Props#{'Subscription-Identifier' => SubId}, Msg),
|
||||
enrich_subopts(Opts, Msg1, Session).
|
||||
|
|
@ -613,22 +685,32 @@ await(PacketId, Msg, Session = #session{inflight = Inflight}) ->
|
|||
%% Retry Delivery
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(retry(emqx_types:clientinfo(), session()) ->
|
||||
{ok, session()} | {ok, replies(), timeout(), session()}).
|
||||
-spec retry(emqx_types:clientinfo(), session()) ->
|
||||
{ok, session()} | {ok, replies(), timeout(), session()}.
|
||||
retry(ClientInfo, Session = #session{inflight = Inflight}) ->
|
||||
case emqx_inflight:is_empty(Inflight) of
|
||||
true -> {ok, Session};
|
||||
true ->
|
||||
{ok, Session};
|
||||
false ->
|
||||
Now = erlang:system_time(millisecond),
|
||||
retry_delivery(emqx_inflight:to_list(fun sort_fun/2, Inflight),
|
||||
[], Now, Session, ClientInfo)
|
||||
retry_delivery(
|
||||
emqx_inflight:to_list(fun sort_fun/2, Inflight),
|
||||
[],
|
||||
Now,
|
||||
Session,
|
||||
ClientInfo
|
||||
)
|
||||
end.
|
||||
|
||||
retry_delivery([], Acc, _Now, Session = #session{retry_interval = Interval}, _ClientInfo) ->
|
||||
{ok, lists:reverse(Acc), Interval, Session};
|
||||
|
||||
retry_delivery([{PacketId, #inflight_data{timestamp = Ts} = Data} | More],
|
||||
Acc, Now, Session = #session{retry_interval = Interval, inflight = Inflight}, ClientInfo) ->
|
||||
retry_delivery(
|
||||
[{PacketId, #inflight_data{timestamp = Ts} = Data} | More],
|
||||
Acc,
|
||||
Now,
|
||||
Session = #session{retry_interval = Interval, inflight = Inflight},
|
||||
ClientInfo
|
||||
) ->
|
||||
case (Age = age(Now, Ts)) >= Interval of
|
||||
true ->
|
||||
{Acc1, Inflight1} = do_retry_delivery(PacketId, Data, Now, Acc, Inflight, ClientInfo),
|
||||
|
|
@ -637,8 +719,14 @@ retry_delivery([{PacketId, #inflight_data{timestamp = Ts} = Data} | More],
|
|||
{ok, lists:reverse(Acc), Interval - max(0, Age), Session}
|
||||
end.
|
||||
|
||||
do_retry_delivery(PacketId, #inflight_data{phase = wait_ack, message = Msg} = Data,
|
||||
Now, Acc, Inflight, ClientInfo) ->
|
||||
do_retry_delivery(
|
||||
PacketId,
|
||||
#inflight_data{phase = wait_ack, message = Msg} = Data,
|
||||
Now,
|
||||
Acc,
|
||||
Inflight,
|
||||
ClientInfo
|
||||
) ->
|
||||
case emqx_message:is_expired(Msg) of
|
||||
true ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, expired]),
|
||||
|
|
@ -650,7 +738,6 @@ do_retry_delivery(PacketId, #inflight_data{phase = wait_ack, message = Msg} = Da
|
|||
Inflight1 = emqx_inflight:update(PacketId, Update, Inflight),
|
||||
{[{PacketId, Msg1} | Acc], Inflight1}
|
||||
end;
|
||||
|
||||
do_retry_delivery(PacketId, Data, Now, Acc, Inflight, _) ->
|
||||
Update = Data#inflight_data{timestamp = Now},
|
||||
Inflight1 = emqx_inflight:update(PacketId, Update, Inflight),
|
||||
|
|
@ -660,16 +747,21 @@ do_retry_delivery(PacketId, Data, Now, Acc, Inflight, _) ->
|
|||
%% Expire Awaiting Rel
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(expire(emqx_types:clientinfo(), awaiting_rel, session()) ->
|
||||
{ok, session()} | {ok, timeout(), session()}).
|
||||
-spec expire(emqx_types:clientinfo(), awaiting_rel, session()) ->
|
||||
{ok, session()} | {ok, timeout(), session()}.
|
||||
expire(_ClientInfo, awaiting_rel, Session = #session{awaiting_rel = AwaitingRel}) ->
|
||||
case maps:size(AwaitingRel) of
|
||||
0 -> {ok, Session};
|
||||
_ -> expire_awaiting_rel(erlang:system_time(millisecond), Session)
|
||||
end.
|
||||
|
||||
expire_awaiting_rel(Now, Session = #session{awaiting_rel = AwaitingRel,
|
||||
await_rel_timeout = Timeout}) ->
|
||||
expire_awaiting_rel(
|
||||
Now,
|
||||
Session = #session{
|
||||
awaiting_rel = AwaitingRel,
|
||||
await_rel_timeout = Timeout
|
||||
}
|
||||
) ->
|
||||
NotExpired = fun(_PacketId, Ts) -> age(Now, Ts) < Timeout end,
|
||||
AwaitingRel1 = maps:filter(NotExpired, AwaitingRel),
|
||||
ExpiredCnt = maps:size(AwaitingRel) - maps:size(AwaitingRel1),
|
||||
|
|
@ -684,32 +776,38 @@ expire_awaiting_rel(Now, Session = #session{awaiting_rel = AwaitingRel,
|
|||
%% Takeover, Resume and Replay
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(takeover(session()) -> ok).
|
||||
-spec takeover(session()) -> ok.
|
||||
takeover(#session{subscriptions = Subs}) ->
|
||||
lists:foreach(fun emqx_broker:unsubscribe/1, maps:keys(Subs)).
|
||||
|
||||
-spec(resume(emqx_types:clientinfo(), session()) -> ok).
|
||||
-spec resume(emqx_types:clientinfo(), session()) -> ok.
|
||||
resume(ClientInfo = #{clientid := ClientId}, Session = #session{subscriptions = Subs}) ->
|
||||
lists:foreach(fun({TopicFilter, SubOpts}) ->
|
||||
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts)
|
||||
end, maps:to_list(Subs)),
|
||||
lists:foreach(
|
||||
fun({TopicFilter, SubOpts}) ->
|
||||
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts)
|
||||
end,
|
||||
maps:to_list(Subs)
|
||||
),
|
||||
ok = emqx_metrics:inc('session.resumed'),
|
||||
emqx_hooks:run('session.resumed', [ClientInfo, info(Session)]).
|
||||
|
||||
-spec(replay(emqx_types:clientinfo(), session()) -> {ok, replies(), session()}).
|
||||
-spec replay(emqx_types:clientinfo(), session()) -> {ok, replies(), session()}.
|
||||
replay(ClientInfo, Session = #session{inflight = Inflight}) ->
|
||||
Pubs = lists:map(fun({PacketId, #inflight_data{phase = wait_comp}}) ->
|
||||
{pubrel, PacketId};
|
||||
({PacketId, #inflight_data{message = Msg}}) ->
|
||||
{PacketId, emqx_message:set_flag(dup, true, Msg)}
|
||||
end, emqx_inflight:to_list(Inflight)),
|
||||
Pubs = lists:map(
|
||||
fun
|
||||
({PacketId, #inflight_data{phase = wait_comp}}) ->
|
||||
{pubrel, PacketId};
|
||||
({PacketId, #inflight_data{message = Msg}}) ->
|
||||
{PacketId, emqx_message:set_flag(dup, true, Msg)}
|
||||
end,
|
||||
emqx_inflight:to_list(Inflight)
|
||||
),
|
||||
case dequeue(ClientInfo, Session) of
|
||||
{ok, NSession} -> {ok, Pubs, NSession};
|
||||
{ok, More, NSession} ->
|
||||
{ok, lists:append(Pubs, More), NSession}
|
||||
{ok, More, NSession} -> {ok, lists:append(Pubs, More), NSession}
|
||||
end.
|
||||
|
||||
-spec(terminate(emqx_types:clientinfo(), Reason :: term(), session()) -> ok).
|
||||
-spec terminate(emqx_types:clientinfo(), Reason :: term(), session()) -> ok.
|
||||
terminate(ClientInfo, discarded, Session) ->
|
||||
run_hook('session.discarded', [ClientInfo, info(Session)]);
|
||||
terminate(ClientInfo, takenover, Session) ->
|
||||
|
|
@ -718,7 +816,8 @@ terminate(ClientInfo, Reason, Session) ->
|
|||
run_hook('session.terminated', [ClientInfo, Reason, info(Session)]).
|
||||
|
||||
run_hook(Name, Args) ->
|
||||
ok = emqx_metrics:inc(Name), emqx_hooks:run(Name, Args).
|
||||
ok = emqx_metrics:inc(Name),
|
||||
emqx_hooks:run(Name, Args).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Inc message/delivery expired counter
|
||||
|
|
@ -753,18 +852,23 @@ obtain_next_pkt_id(Session) ->
|
|||
|
||||
next_pkt_id(Session = #session{next_pkt_id = ?MAX_PACKET_ID}) ->
|
||||
Session#session{next_pkt_id = 1};
|
||||
|
||||
next_pkt_id(Session = #session{next_pkt_id = Id}) ->
|
||||
Session#session{next_pkt_id = Id + 1}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Message Latency Stats
|
||||
%%--------------------------------------------------------------------
|
||||
on_delivery_completed(Msg,
|
||||
#session{created_at = CreateAt, clientid = ClientId}) ->
|
||||
emqx:run_hook('delivery.completed',
|
||||
[Msg,
|
||||
#{session_birth_time => CreateAt, clientid => ClientId}]).
|
||||
on_delivery_completed(
|
||||
Msg,
|
||||
#session{created_at = CreateAt, clientid = ClientId}
|
||||
) ->
|
||||
emqx:run_hook(
|
||||
'delivery.completed',
|
||||
[
|
||||
Msg,
|
||||
#{session_birth_time => CreateAt, clientid => ClientId}
|
||||
]
|
||||
).
|
||||
|
||||
mark_begin_deliver(Msg) ->
|
||||
emqx_message:set_header(deliver_begin_at, erlang:system_time(second), Msg).
|
||||
|
|
@ -785,9 +889,11 @@ batch_n(Inflight) ->
|
|||
end.
|
||||
|
||||
with_ts(Msg) ->
|
||||
#inflight_data{phase = wait_ack,
|
||||
message = Msg,
|
||||
timestamp = erlang:system_time(millisecond)}.
|
||||
#inflight_data{
|
||||
phase = wait_ack,
|
||||
message = Msg,
|
||||
timestamp = erlang:system_time(millisecond)
|
||||
}.
|
||||
|
||||
age(Now, Ts) -> Now - Ts.
|
||||
|
||||
|
|
@ -797,4 +903,4 @@ age(Now, Ts) -> Now - Ts.
|
|||
|
||||
set_field(Name, Value, Session) ->
|
||||
Pos = emqx_misc:index_of(Name, record_info(fields, session)),
|
||||
setelement(Pos+1, Session, Value).
|
||||
setelement(Pos + 1, Session, Value).
|
||||
|
|
|
|||
|
|
@ -24,37 +24,42 @@
|
|||
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-export([ create_init_tab/0
|
||||
, create_router_tab/1
|
||||
, start_link/2]).
|
||||
-export([
|
||||
create_init_tab/0,
|
||||
create_router_tab/1,
|
||||
start_link/2
|
||||
]).
|
||||
|
||||
%% Route APIs
|
||||
-export([ delete_routes/2
|
||||
, do_add_route/2
|
||||
, do_delete_route/2
|
||||
, match_routes/1
|
||||
]).
|
||||
-export([
|
||||
delete_routes/2,
|
||||
do_add_route/2,
|
||||
do_delete_route/2,
|
||||
match_routes/1
|
||||
]).
|
||||
|
||||
-export([ buffer/3
|
||||
, pending/2
|
||||
, resume_begin/2
|
||||
, resume_end/2
|
||||
]).
|
||||
-export([
|
||||
buffer/3,
|
||||
pending/2,
|
||||
resume_begin/2,
|
||||
resume_end/2
|
||||
]).
|
||||
|
||||
-export([print_routes/1]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-type(group() :: binary()).
|
||||
-type group() :: binary().
|
||||
|
||||
-type(dest() :: node() | {group(), node()}).
|
||||
-type dest() :: node() | {group(), node()}.
|
||||
|
||||
-define(ROUTE_RAM_TAB, emqx_session_route_ram).
|
||||
-define(ROUTE_DISC_TAB, emqx_session_route_disc).
|
||||
|
|
@ -67,63 +72,83 @@
|
|||
|
||||
create_router_tab(disc) ->
|
||||
ok = mria:create_table(?ROUTE_DISC_TAB, [
|
||||
{type, bag},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, route},
|
||||
{attributes, record_info(fields, route)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]);
|
||||
{type, bag},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, disc_copies},
|
||||
{record_name, route},
|
||||
{attributes, record_info(fields, route)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]);
|
||||
create_router_tab(ram) ->
|
||||
ok = mria:create_table(?ROUTE_RAM_TAB, [
|
||||
{type, bag},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, route},
|
||||
{attributes, record_info(fields, route)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true},
|
||||
{write_concurrency, true}]}]}]).
|
||||
{type, bag},
|
||||
{rlog_shard, ?ROUTE_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, route},
|
||||
{attributes, record_info(fields, route)},
|
||||
{storage_properties, [
|
||||
{ets, [
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]}
|
||||
]}
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Start a router
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
create_init_tab() ->
|
||||
emqx_tables:new(?SESSION_INIT_TAB, [public, {read_concurrency, true},
|
||||
{write_concurrency, true}]).
|
||||
emqx_tables:new(?SESSION_INIT_TAB, [
|
||||
public,
|
||||
{read_concurrency, true},
|
||||
{write_concurrency, true}
|
||||
]).
|
||||
|
||||
-spec(start_link(atom(), pos_integer()) -> startlink_ret()).
|
||||
-spec start_link(atom(), pos_integer()) -> startlink_ret().
|
||||
start_link(Pool, Id) ->
|
||||
gen_server:start_link({local, emqx_misc:proc_name(?MODULE, Id)},
|
||||
?MODULE, [Pool, Id], [{hibernate_after, 1000}]).
|
||||
gen_server:start_link(
|
||||
{local, emqx_misc:proc_name(?MODULE, Id)},
|
||||
?MODULE,
|
||||
[Pool, Id],
|
||||
[{hibernate_after, 1000}]
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Route APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(do_add_route(emqx_topic:topic(), dest()) -> ok | {error, term()}).
|
||||
-spec do_add_route(emqx_topic:topic(), dest()) -> ok | {error, term()}.
|
||||
do_add_route(Topic, SessionID) when is_binary(Topic) ->
|
||||
Route = #route{topic = Topic, dest = SessionID},
|
||||
case lists:member(Route, lookup_routes(Topic)) of
|
||||
true -> ok;
|
||||
true ->
|
||||
ok;
|
||||
false ->
|
||||
case emqx_topic:wildcard(Topic) of
|
||||
true ->
|
||||
true ->
|
||||
Fun = fun emqx_router_utils:insert_session_trie_route/2,
|
||||
emqx_router_utils:maybe_trans(Fun, [route_tab(), Route],
|
||||
?PERSISTENT_SESSION_SHARD);
|
||||
emqx_router_utils:maybe_trans(
|
||||
Fun,
|
||||
[route_tab(), Route],
|
||||
?PERSISTENT_SESSION_SHARD
|
||||
);
|
||||
false ->
|
||||
emqx_router_utils:insert_direct_route(route_tab(), Route)
|
||||
end
|
||||
end.
|
||||
|
||||
%% @doc Match routes
|
||||
-spec(match_routes(emqx_topic:topic()) -> [emqx_types:route()]).
|
||||
-spec match_routes(emqx_topic:topic()) -> [emqx_types:route()].
|
||||
match_routes(Topic) when is_binary(Topic) ->
|
||||
case match_trie(Topic) of
|
||||
[] -> lookup_routes(Topic);
|
||||
Matched ->
|
||||
lists:append([lookup_routes(To) || To <- [Topic | Matched]])
|
||||
Matched -> lists:append([lookup_routes(To) || To <- [Topic | Matched]])
|
||||
end.
|
||||
|
||||
%% Optimize: routing table will be replicated to all router nodes.
|
||||
|
|
@ -137,11 +162,11 @@ match_trie(Topic) ->
|
|||
delete_routes(SessionID, Subscriptions) ->
|
||||
cast(pick(SessionID), {delete_routes, SessionID, Subscriptions}).
|
||||
|
||||
-spec(do_delete_route(emqx_topic:topic(), dest()) -> ok | {error, term()}).
|
||||
-spec do_delete_route(emqx_topic:topic(), dest()) -> ok | {error, term()}.
|
||||
do_delete_route(Topic, SessionID) ->
|
||||
Route = #route{topic = Topic, dest = SessionID},
|
||||
case emqx_topic:wildcard(Topic) of
|
||||
true ->
|
||||
true ->
|
||||
Fun = fun emqx_router_utils:delete_session_trie_route/2,
|
||||
emqx_router_utils:maybe_trans(Fun, [route_tab(), Route], ?PERSISTENT_SESSION_SHARD);
|
||||
false ->
|
||||
|
|
@ -149,11 +174,14 @@ do_delete_route(Topic, SessionID) ->
|
|||
end.
|
||||
|
||||
%% @doc Print routes to a topic
|
||||
-spec(print_routes(emqx_topic:topic()) -> ok).
|
||||
-spec print_routes(emqx_topic:topic()) -> ok.
|
||||
print_routes(Topic) ->
|
||||
lists:foreach(fun(#route{topic = To, dest = SessionID}) ->
|
||||
io:format("~s -> ~p~n", [To, SessionID])
|
||||
end, match_routes(Topic)).
|
||||
lists:foreach(
|
||||
fun(#route{topic = To, dest = SessionID}) ->
|
||||
io:format("~s -> ~p~n", [To, SessionID])
|
||||
end,
|
||||
match_routes(Topic)
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Session APIs
|
||||
|
|
@ -173,11 +201,11 @@ resume_begin(From, SessionID) when is_pid(From), is_binary(SessionID) ->
|
|||
call(pick(SessionID), {resume_begin, From, SessionID}).
|
||||
|
||||
-spec resume_end(pid(), binary()) ->
|
||||
{'ok', [emqx_types:message()]} | {'error', term()}.
|
||||
{'ok', [emqx_types:message()]} | {'error', term()}.
|
||||
resume_end(From, SessionID) when is_pid(From), is_binary(SessionID) ->
|
||||
case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of
|
||||
undefined ->
|
||||
?tp(ps_session_not_found, #{ sid => SessionID }),
|
||||
?tp(ps_session_not_found, #{sid => SessionID}),
|
||||
{error, not_found};
|
||||
Pid ->
|
||||
Res = emqx_session_router_worker:resume_end(From, Pid, SessionID),
|
||||
|
|
@ -237,7 +265,7 @@ handle_cast({resume_end, SessionID, Pid}, State) ->
|
|||
end,
|
||||
Pmon = emqx_pmon:demonitor(Pid, maps:get(pmon, State)),
|
||||
_ = emqx_session_router_worker_sup:abort_worker(Pid),
|
||||
{noreply, State#{ pmon => Pmon }};
|
||||
{noreply, State#{pmon => Pmon}};
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
|
@ -257,7 +285,7 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%% initialisation of a resuming session.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
init_resume_worker(RemotePid, SessionID, #{ pmon := Pmon } = State) ->
|
||||
init_resume_worker(RemotePid, SessionID, #{pmon := Pmon} = State) ->
|
||||
case emqx_session_router_worker_sup:start_worker(SessionID, RemotePid) of
|
||||
{error, What} ->
|
||||
?SLOG(error, #{msg => "failed_to_start_resume_worker", reason => What}),
|
||||
|
|
@ -266,11 +294,11 @@ init_resume_worker(RemotePid, SessionID, #{ pmon := Pmon } = State) ->
|
|||
Pmon1 = emqx_pmon:monitor(Pid, Pmon),
|
||||
case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of
|
||||
undefined ->
|
||||
{ok, Pid, State#{ pmon => Pmon1 }};
|
||||
{ok, Pid, State#{pmon => Pmon1}};
|
||||
{_, OldPid} ->
|
||||
Pmon2 = emqx_pmon:demonitor(OldPid, Pmon1),
|
||||
_ = emqx_session_router_worker_sup:abort_worker(OldPid),
|
||||
{ok, Pid, State#{ pmon => Pmon2 }}
|
||||
{ok, Pid, State#{pmon => Pmon2}}
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
@ -284,5 +312,5 @@ lookup_routes(Topic) ->
|
|||
route_tab() ->
|
||||
case emqx_persistent_session:storage_type() of
|
||||
disc -> ?ROUTE_DISC_TAB;
|
||||
ram -> ?ROUTE_RAM_TAB
|
||||
ram -> ?ROUTE_RAM_TAB
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -26,41 +26,43 @@
|
|||
%% the initialization, the messages are delivered and the worker is
|
||||
%% terminated.
|
||||
|
||||
|
||||
-module(emqx_session_router_worker).
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
%% API
|
||||
-export([ buffer/3
|
||||
, pendings/1
|
||||
, resume_end/3
|
||||
, start_link/2
|
||||
]).
|
||||
-export([
|
||||
buffer/3,
|
||||
pendings/1,
|
||||
resume_end/3,
|
||||
start_link/2
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-record(state, { remote_pid :: pid()
|
||||
, session_id :: binary()
|
||||
, session_tab :: ets:table()
|
||||
, messages :: ets:table()
|
||||
, buffering :: boolean()
|
||||
}).
|
||||
-record(state, {
|
||||
remote_pid :: pid(),
|
||||
session_id :: binary(),
|
||||
session_tab :: ets:table(),
|
||||
messages :: ets:table(),
|
||||
buffering :: boolean()
|
||||
}).
|
||||
|
||||
%%%===================================================================
|
||||
%%% API
|
||||
%%%===================================================================
|
||||
|
||||
start_link(SessionTab, #{} = Opts) ->
|
||||
gen_server:start_link(?MODULE, Opts#{ session_tab => SessionTab}, []).
|
||||
gen_server:start_link(?MODULE, Opts#{session_tab => SessionTab}, []).
|
||||
|
||||
pendings(Pid) ->
|
||||
gen_server:call(Pid, pendings).
|
||||
|
|
@ -68,15 +70,19 @@ pendings(Pid) ->
|
|||
resume_end(RemotePid, Pid, _SessionID) ->
|
||||
case gen_server:call(Pid, {resume_end, RemotePid}) of
|
||||
{ok, EtsHandle} ->
|
||||
?tp(ps_worker_call_ok, #{ pid => Pid
|
||||
, remote_pid => RemotePid
|
||||
, sid => _SessionID}),
|
||||
?tp(ps_worker_call_ok, #{
|
||||
pid => Pid,
|
||||
remote_pid => RemotePid,
|
||||
sid => _SessionID
|
||||
}),
|
||||
{ok, ets:tab2list(EtsHandle)};
|
||||
{error, _} = Err ->
|
||||
?tp(ps_worker_call_failed, #{ pid => Pid
|
||||
, remote_pid => RemotePid
|
||||
, sid => _SessionID
|
||||
, reason => Err}),
|
||||
?tp(ps_worker_call_failed, #{
|
||||
pid => Pid,
|
||||
remote_pid => RemotePid,
|
||||
sid => _SessionID,
|
||||
reason => Err
|
||||
}),
|
||||
Err
|
||||
end.
|
||||
|
||||
|
|
@ -88,26 +94,31 @@ buffer(Worker, STopic, Msg) ->
|
|||
%%% gen_server callbacks
|
||||
%%%===================================================================
|
||||
|
||||
init(#{ remote_pid := RemotePid
|
||||
, session_id := SessionID
|
||||
, session_tab := SessionTab}) ->
|
||||
init(#{
|
||||
remote_pid := RemotePid,
|
||||
session_id := SessionID,
|
||||
session_tab := SessionTab
|
||||
}) ->
|
||||
process_flag(trap_exit, true),
|
||||
erlang:monitor(process, RemotePid),
|
||||
?tp(ps_worker_started, #{ remote_pid => RemotePid
|
||||
, sid => SessionID }),
|
||||
{ok, #state{ remote_pid = RemotePid
|
||||
, session_id = SessionID
|
||||
, session_tab = SessionTab
|
||||
, messages = ets:new(?MODULE, [protected, ordered_set])
|
||||
, buffering = true
|
||||
}}.
|
||||
?tp(ps_worker_started, #{
|
||||
remote_pid => RemotePid,
|
||||
sid => SessionID
|
||||
}),
|
||||
{ok, #state{
|
||||
remote_pid = RemotePid,
|
||||
session_id = SessionID,
|
||||
session_tab = SessionTab,
|
||||
messages = ets:new(?MODULE, [protected, ordered_set]),
|
||||
buffering = true
|
||||
}}.
|
||||
|
||||
handle_call(pendings, _From, State) ->
|
||||
%% Debug API
|
||||
{reply, {State#state.messages, State#state.remote_pid}, State};
|
||||
handle_call({resume_end, RemotePid}, _From, #state{remote_pid = RemotePid} = State) ->
|
||||
?tp(ps_worker_resume_end, #{sid => State#state.session_id}),
|
||||
{reply, {ok, State#state.messages}, State#state{ buffering = false }};
|
||||
{reply, {ok, State#state.messages}, State#state{buffering = false}};
|
||||
handle_call({resume_end, _RemotePid}, _From, State) ->
|
||||
?tp(ps_worker_resume_end_error, #{sid => State#state.session_id}),
|
||||
{reply, {error, wrong_remote_pid}, State};
|
||||
|
|
@ -119,26 +130,30 @@ handle_cast(_Request, State) ->
|
|||
{noreply, State}.
|
||||
|
||||
handle_info({buffer, _STopic, _Msg}, State) when not State#state.buffering ->
|
||||
?tp(ps_worker_drop_deliver, #{ sid => State#state.session_id
|
||||
, msg_id => emqx_message:id(_Msg)
|
||||
}),
|
||||
?tp(ps_worker_drop_deliver, #{
|
||||
sid => State#state.session_id,
|
||||
msg_id => emqx_message:id(_Msg)
|
||||
}),
|
||||
{noreply, State};
|
||||
handle_info({buffer, STopic, Msg}, State) when State#state.buffering ->
|
||||
?tp(ps_worker_deliver, #{ sid => State#state.session_id
|
||||
, msg_id => emqx_message:id(Msg)
|
||||
}),
|
||||
?tp(ps_worker_deliver, #{
|
||||
sid => State#state.session_id,
|
||||
msg_id => emqx_message:id(Msg)
|
||||
}),
|
||||
ets:insert(State#state.messages, {{Msg, STopic}}),
|
||||
{noreply, State};
|
||||
handle_info({'DOWN', _, process, RemotePid, _Reason}, #state{remote_pid = RemotePid} = State) ->
|
||||
?tp(warning, ps_worker, #{ event => worker_remote_died
|
||||
, sid => State#state.session_id
|
||||
, msg => "Remote pid died. Exiting." }),
|
||||
?tp(warning, ps_worker, #{
|
||||
event => worker_remote_died,
|
||||
sid => State#state.session_id,
|
||||
msg => "Remote pid died. Exiting."
|
||||
}),
|
||||
{stop, normal, State};
|
||||
handle_info(_Info, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(shutdown, _State) ->
|
||||
?tp(ps_worker_shutdown, #{ sid => _State#state.session_id }),
|
||||
?tp(ps_worker_shutdown, #{sid => _State#state.session_id}),
|
||||
ok;
|
||||
terminate(_, _State) ->
|
||||
ok.
|
||||
|
|
|
|||
|
|
@ -18,22 +18,25 @@
|
|||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-export([ start_link/1
|
||||
]).
|
||||
-export([start_link/1]).
|
||||
|
||||
-export([ abort_worker/1
|
||||
, start_worker/2
|
||||
]).
|
||||
-export([
|
||||
abort_worker/1,
|
||||
start_worker/2
|
||||
]).
|
||||
|
||||
-export([ init/1
|
||||
]).
|
||||
-export([init/1]).
|
||||
|
||||
start_link(SessionTab) ->
|
||||
supervisor:start_link({local, ?MODULE}, ?MODULE, SessionTab).
|
||||
|
||||
start_worker(SessionID, RemotePid) ->
|
||||
supervisor:start_child(?MODULE, [#{ session_id => SessionID
|
||||
, remote_pid => RemotePid}]).
|
||||
supervisor:start_child(?MODULE, [
|
||||
#{
|
||||
session_id => SessionID,
|
||||
remote_pid => RemotePid
|
||||
}
|
||||
]).
|
||||
|
||||
abort_worker(Pid) ->
|
||||
supervisor:terminate_child(?MODULE, Pid).
|
||||
|
|
@ -44,14 +47,18 @@ abort_worker(Pid) ->
|
|||
|
||||
init(SessionTab) ->
|
||||
%% Resume worker
|
||||
Worker = #{id => session_router_worker,
|
||||
start => {emqx_session_router_worker, start_link, [SessionTab]},
|
||||
restart => transient,
|
||||
shutdown => 2000,
|
||||
type => worker,
|
||||
modules => [emqx_session_router_worker]},
|
||||
Spec = #{ strategy => simple_one_for_one
|
||||
, intensity => 1
|
||||
, period => 5},
|
||||
Worker = #{
|
||||
id => session_router_worker,
|
||||
start => {emqx_session_router_worker, start_link, [SessionTab]},
|
||||
restart => transient,
|
||||
shutdown => 2000,
|
||||
type => worker,
|
||||
modules => [emqx_session_router_worker]
|
||||
},
|
||||
Spec = #{
|
||||
strategy => simple_one_for_one,
|
||||
intensity => 1,
|
||||
period => 5
|
||||
},
|
||||
|
||||
{ok, {Spec, [Worker]}}.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@
|
|||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
|
||||
|
||||
%% Mnesia bootstrap
|
||||
-export([mnesia/1]).
|
||||
|
||||
|
|
@ -32,38 +31,43 @@
|
|||
%% APIs
|
||||
-export([start_link/0]).
|
||||
|
||||
-export([ subscribe/3
|
||||
, unsubscribe/3
|
||||
]).
|
||||
-export([
|
||||
subscribe/3,
|
||||
unsubscribe/3
|
||||
]).
|
||||
|
||||
-export([dispatch/3]).
|
||||
|
||||
-export([ maybe_ack/1
|
||||
, maybe_nack_dropped/1
|
||||
, nack_no_connection/1
|
||||
, is_ack_required/1
|
||||
]).
|
||||
-export([
|
||||
maybe_ack/1,
|
||||
maybe_nack_dropped/1,
|
||||
nack_no_connection/1,
|
||||
is_ack_required/1
|
||||
]).
|
||||
|
||||
%% for testing
|
||||
-export([subscribers/2]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-export_type([strategy/0]).
|
||||
|
||||
-type strategy() :: random
|
||||
| round_robin
|
||||
| sticky
|
||||
| hash %% same as hash_clientid, backward compatible
|
||||
| hash_clientid
|
||||
| hash_topic.
|
||||
-type strategy() ::
|
||||
random
|
||||
| round_robin
|
||||
| sticky
|
||||
%% same as hash_clientid, backward compatible
|
||||
| hash
|
||||
| hash_clientid
|
||||
| hash_topic.
|
||||
|
||||
-define(SERVER, ?MODULE).
|
||||
-define(TAB, emqx_shared_subscription).
|
||||
|
|
@ -85,33 +89,34 @@
|
|||
|
||||
mnesia(boot) ->
|
||||
ok = mria:create_table(?TAB, [
|
||||
{type, bag},
|
||||
{rlog_shard, ?SHARED_SUB_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, emqx_shared_subscription},
|
||||
{attributes, record_info(fields, emqx_shared_subscription)}]).
|
||||
{type, bag},
|
||||
{rlog_shard, ?SHARED_SUB_SHARD},
|
||||
{storage, ram_copies},
|
||||
{record_name, emqx_shared_subscription},
|
||||
{attributes, record_info(fields, emqx_shared_subscription)}
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
|
||||
|
||||
-spec(subscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok).
|
||||
-spec subscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok.
|
||||
subscribe(Group, Topic, SubPid) when is_pid(SubPid) ->
|
||||
gen_server:call(?SERVER, {subscribe, Group, Topic, SubPid}).
|
||||
|
||||
-spec(unsubscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok).
|
||||
-spec unsubscribe(emqx_types:group(), emqx_types:topic(), pid()) -> ok.
|
||||
unsubscribe(Group, Topic, SubPid) when is_pid(SubPid) ->
|
||||
gen_server:call(?SERVER, {unsubscribe, Group, Topic, SubPid}).
|
||||
|
||||
record(Group, Topic, SubPid) ->
|
||||
#emqx_shared_subscription{group = Group, topic = Topic, subpid = SubPid}.
|
||||
|
||||
-spec(dispatch(emqx_types:group(), emqx_types:topic(), emqx_types:delivery())
|
||||
-> emqx_types:deliver_result()).
|
||||
-spec dispatch(emqx_types:group(), emqx_types:topic(), emqx_types:delivery()) ->
|
||||
emqx_types:deliver_result().
|
||||
dispatch(Group, Topic, Delivery) ->
|
||||
dispatch(Group, Topic, Delivery, _FailedSubs = []).
|
||||
|
||||
|
|
@ -122,18 +127,19 @@ dispatch(Group, Topic, Delivery = #delivery{message = Msg}, FailedSubs) ->
|
|||
{error, no_subscribers};
|
||||
{Type, SubPid} ->
|
||||
case do_dispatch(SubPid, Topic, Msg, Type) of
|
||||
ok -> {ok, 1};
|
||||
ok ->
|
||||
{ok, 1};
|
||||
{error, _Reason} ->
|
||||
%% Failed to dispatch to this sub, try next.
|
||||
dispatch(Group, Topic, Delivery, [SubPid | FailedSubs])
|
||||
end
|
||||
end.
|
||||
|
||||
-spec(strategy() -> strategy()).
|
||||
-spec strategy() -> strategy().
|
||||
strategy() ->
|
||||
emqx:get_config([broker, shared_subscription_strategy]).
|
||||
|
||||
-spec(ack_enabled() -> boolean()).
|
||||
-spec ack_enabled() -> boolean().
|
||||
ack_enabled() ->
|
||||
emqx:get_config([broker, shared_dispatch_ack_enabled]).
|
||||
|
||||
|
|
@ -167,10 +173,11 @@ dispatch_with_ack(SubPid, Topic, Msg) ->
|
|||
Ref = erlang:monitor(process, SubPid),
|
||||
Sender = self(),
|
||||
_ = erlang:send(SubPid, {deliver, Topic, with_ack_ref(Msg, {Sender, Ref})}),
|
||||
Timeout = case Msg#message.qos of
|
||||
?QOS_1 -> timer:seconds(?SHARED_SUB_QOS1_DISPATCH_TIMEOUT_SECONDS);
|
||||
?QOS_2 -> infinity
|
||||
end,
|
||||
Timeout =
|
||||
case Msg#message.qos of
|
||||
?QOS_1 -> timer:seconds(?SHARED_SUB_QOS1_DISPATCH_TIMEOUT_SECONDS);
|
||||
?QOS_2 -> infinity
|
||||
end,
|
||||
try
|
||||
receive
|
||||
{Ref, ?ACK} ->
|
||||
|
|
@ -180,9 +187,8 @@ dispatch_with_ack(SubPid, Topic, Msg) ->
|
|||
{error, Reason};
|
||||
{'DOWN', Ref, process, SubPid, Reason} ->
|
||||
{error, Reason}
|
||||
after
|
||||
Timeout ->
|
||||
{error, timeout}
|
||||
after Timeout ->
|
||||
{error, timeout}
|
||||
end
|
||||
after
|
||||
_ = erlang:demonitor(Ref, [flush])
|
||||
|
|
@ -197,11 +203,11 @@ without_ack_ref(Msg) ->
|
|||
get_ack_ref(Msg) ->
|
||||
emqx_message:get_header(shared_dispatch_ack, Msg, ?NO_ACK).
|
||||
|
||||
-spec(is_ack_required(emqx_types:message()) -> boolean()).
|
||||
-spec is_ack_required(emqx_types:message()) -> boolean().
|
||||
is_ack_required(Msg) -> ?NO_ACK =/= get_ack_ref(Msg).
|
||||
|
||||
%% @doc Negative ack dropped message due to inflight window or message queue being full.
|
||||
-spec(maybe_nack_dropped(emqx_types:message()) -> ok).
|
||||
-spec maybe_nack_dropped(emqx_types:message()) -> ok.
|
||||
maybe_nack_dropped(Msg) ->
|
||||
case get_ack_ref(Msg) of
|
||||
?NO_ACK -> ok;
|
||||
|
|
@ -211,17 +217,17 @@ maybe_nack_dropped(Msg) ->
|
|||
%% @doc Negative ack message due to connection down.
|
||||
%% Assuming this function is always called when ack is required
|
||||
%% i.e is_ack_required returned true.
|
||||
-spec(nack_no_connection(emqx_types:message()) -> ok).
|
||||
-spec nack_no_connection(emqx_types:message()) -> ok.
|
||||
nack_no_connection(Msg) ->
|
||||
{Sender, Ref} = get_ack_ref(Msg),
|
||||
nack(Sender, Ref, no_connection).
|
||||
|
||||
-spec(nack(pid(), reference(), dropped | no_connection) -> ok).
|
||||
-spec nack(pid(), reference(), dropped | no_connection) -> ok.
|
||||
nack(Sender, Ref, Reason) ->
|
||||
erlang:send(Sender, {Ref, ?NACK(Reason)}),
|
||||
ok.
|
||||
|
||||
-spec(maybe_ack(emqx_types:message()) -> emqx_types:message()).
|
||||
-spec maybe_ack(emqx_types:message()) -> emqx_types:message().
|
||||
maybe_ack(Msg) ->
|
||||
case get_ack_ref(Msg) of
|
||||
?NO_ACK ->
|
||||
|
|
@ -262,7 +268,8 @@ do_pick(Strategy, ClientId, SourceTopic, Group, Topic, FailedSubs) ->
|
|||
{fresh, pick_subscriber(Group, Topic, Strategy, ClientId, SourceTopic, Subs)}
|
||||
end.
|
||||
|
||||
pick_subscriber(_Group, _Topic, _Strategy, _ClientId, _SourceTopic, [Sub]) -> Sub;
|
||||
pick_subscriber(_Group, _Topic, _Strategy, _ClientId, _SourceTopic, [Sub]) ->
|
||||
Sub;
|
||||
pick_subscriber(Group, Topic, Strategy, ClientId, SourceTopic, Subs) ->
|
||||
Nth = do_pick_subscriber(Group, Topic, Strategy, ClientId, SourceTopic, length(Subs)),
|
||||
lists:nth(Nth, Subs).
|
||||
|
|
@ -277,10 +284,11 @@ do_pick_subscriber(_Group, _Topic, hash_clientid, ClientId, _SourceTopic, Count)
|
|||
do_pick_subscriber(_Group, _Topic, hash_topic, _ClientId, SourceTopic, Count) ->
|
||||
1 + erlang:phash2(SourceTopic) rem Count;
|
||||
do_pick_subscriber(Group, Topic, round_robin, _ClientId, _SourceTopic, Count) ->
|
||||
Rem = case erlang:get({shared_sub_round_robin, Group, Topic}) of
|
||||
undefined -> rand:uniform(Count) - 1;
|
||||
N -> (N + 1) rem Count
|
||||
end,
|
||||
Rem =
|
||||
case erlang:get({shared_sub_round_robin, Group, Topic}) of
|
||||
undefined -> rand:uniform(Count) - 1;
|
||||
N -> (N + 1) rem Count
|
||||
end,
|
||||
_ = erlang:put({shared_sub_round_robin, Group, Topic}, Rem),
|
||||
Rem + 1.
|
||||
|
||||
|
|
@ -301,26 +309,27 @@ init([]) ->
|
|||
|
||||
init_monitors() ->
|
||||
mnesia:foldl(
|
||||
fun(#emqx_shared_subscription{subpid = SubPid}, Mon) ->
|
||||
emqx_pmon:monitor(SubPid, Mon)
|
||||
end, emqx_pmon:new(), ?TAB).
|
||||
fun(#emqx_shared_subscription{subpid = SubPid}, Mon) ->
|
||||
emqx_pmon:monitor(SubPid, Mon)
|
||||
end,
|
||||
emqx_pmon:new(),
|
||||
?TAB
|
||||
).
|
||||
|
||||
handle_call({subscribe, Group, Topic, SubPid}, _From, State = #state{pmon = PMon}) ->
|
||||
mria:dirty_write(?TAB, record(Group, Topic, SubPid)),
|
||||
case ets:member(?SHARED_SUBS, {Group, Topic}) of
|
||||
true -> ok;
|
||||
true -> ok;
|
||||
false -> ok = emqx_router:do_add_route(Topic, {Group, node()})
|
||||
end,
|
||||
ok = maybe_insert_alive_tab(SubPid),
|
||||
true = ets:insert(?SHARED_SUBS, {{Group, Topic}, SubPid}),
|
||||
{reply, ok, update_stats(State#state{pmon = emqx_pmon:monitor(SubPid, PMon)})};
|
||||
|
||||
handle_call({unsubscribe, Group, Topic, SubPid}, _From, State) ->
|
||||
mria:dirty_delete_object(?TAB, record(Group, Topic, SubPid)),
|
||||
true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}),
|
||||
delete_route_if_needed({Group, Topic}),
|
||||
{reply, ok, State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", req => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -332,7 +341,6 @@ handle_cast(Msg, State) ->
|
|||
handle_info({mnesia_table_event, {write, NewRecord, _}}, State = #state{pmon = PMon}) ->
|
||||
#emqx_shared_subscription{subpid = SubPid} = NewRecord,
|
||||
{noreply, update_stats(State#state{pmon = emqx_pmon:monitor(SubPid, PMon)})};
|
||||
|
||||
%% The subscriber may have subscribed multiple topics, so we need to keep monitoring the PID until
|
||||
%% it `unsubscribed` the last topic.
|
||||
%% The trick is we don't demonitor the subscriber here, and (after a long time) it will eventually
|
||||
|
|
@ -343,12 +351,10 @@ handle_info({mnesia_table_event, {write, NewRecord, _}}, State = #state{pmon = P
|
|||
|
||||
handle_info({mnesia_table_event, _Event}, State) ->
|
||||
{noreply, State};
|
||||
|
||||
handle_info({'DOWN', _MRef, process, SubPid, Reason}, State = #state{pmon = PMon}) ->
|
||||
?SLOG(info, #{msg => "shared_subscriber_down", sub_pid => SubPid, reason => Reason}),
|
||||
cleanup_down(SubPid),
|
||||
{noreply, update_stats(State#state{pmon = emqx_pmon:erase(SubPid, PMon)})};
|
||||
|
||||
handle_info(_Info, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
|
|
@ -364,7 +370,9 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
|
||||
%% keep track of alive remote pids
|
||||
maybe_insert_alive_tab(Pid) when ?IS_LOCAL_PID(Pid) -> ok;
|
||||
maybe_insert_alive_tab(Pid) when is_pid(Pid) -> ets:insert(?ALIVE_SUBS, {Pid}), ok.
|
||||
maybe_insert_alive_tab(Pid) when is_pid(Pid) ->
|
||||
ets:insert(?ALIVE_SUBS, {Pid}),
|
||||
ok.
|
||||
|
||||
cleanup_down(SubPid) ->
|
||||
?IS_LOCAL_PID(SubPid) orelse ets:delete(?ALIVE_SUBS, SubPid),
|
||||
|
|
@ -373,13 +381,16 @@ cleanup_down(SubPid) ->
|
|||
ok = mria:dirty_delete_object(?TAB, Record),
|
||||
true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}),
|
||||
delete_route_if_needed({Group, Topic})
|
||||
end, mnesia:dirty_match_object(#emqx_shared_subscription{_ = '_', subpid = SubPid})).
|
||||
end,
|
||||
mnesia:dirty_match_object(#emqx_shared_subscription{_ = '_', subpid = SubPid})
|
||||
).
|
||||
|
||||
update_stats(State) ->
|
||||
emqx_stats:setstat('subscriptions.shared.count',
|
||||
'subscriptions.shared.max',
|
||||
ets:info(?TAB, size)
|
||||
),
|
||||
emqx_stats:setstat(
|
||||
'subscriptions.shared.count',
|
||||
'subscriptions.shared.max',
|
||||
ets:info(?TAB, size)
|
||||
),
|
||||
State.
|
||||
|
||||
%% Return 'true' if the subscriber process is alive AND not in the failed list
|
||||
|
|
|
|||
|
|
@ -23,123 +23,140 @@
|
|||
-include("types.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
|
||||
%% APIs
|
||||
-export([ start_link/0
|
||||
, start_link/1
|
||||
, stop/0
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
start_link/1,
|
||||
stop/0
|
||||
]).
|
||||
|
||||
%% Stats API.
|
||||
-export([ getstats/0
|
||||
, getstat/1
|
||||
, setstat/2
|
||||
, setstat/3
|
||||
, statsfun/1
|
||||
, statsfun/2
|
||||
]).
|
||||
-export([
|
||||
getstats/0,
|
||||
getstat/1,
|
||||
setstat/2,
|
||||
setstat/3,
|
||||
statsfun/1,
|
||||
statsfun/2
|
||||
]).
|
||||
|
||||
-export([ update_interval/2
|
||||
, update_interval/3
|
||||
, cancel_update/1
|
||||
]).
|
||||
-export([
|
||||
update_interval/2,
|
||||
update_interval/3,
|
||||
cancel_update/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-export_type([stats/0]).
|
||||
|
||||
-record(update, {name, countdown, interval, func}).
|
||||
|
||||
-record(state, {
|
||||
timer :: maybe(reference()),
|
||||
updates :: [#update{}],
|
||||
tick_ms :: timeout()
|
||||
}).
|
||||
timer :: maybe(reference()),
|
||||
updates :: [#update{}],
|
||||
tick_ms :: timeout()
|
||||
}).
|
||||
|
||||
-type(stats() :: list({atom(), non_neg_integer()})).
|
||||
-type stats() :: list({atom(), non_neg_integer()}).
|
||||
|
||||
%% Connection stats
|
||||
-define(CONNECTION_STATS,
|
||||
[ 'connections.count' %% Count of Concurrent Connections
|
||||
, 'connections.max' %% Maximum Number of Concurrent Connections
|
||||
, 'live_connections.count' %% Count of connected clients
|
||||
, 'live_connections.max' %% Maximum number of connected clients
|
||||
]).
|
||||
%% Count of Concurrent Connections
|
||||
[
|
||||
'connections.count',
|
||||
%% Maximum Number of Concurrent Connections
|
||||
'connections.max',
|
||||
%% Count of connected clients
|
||||
'live_connections.count',
|
||||
%% Maximum number of connected clients
|
||||
'live_connections.max'
|
||||
]
|
||||
).
|
||||
|
||||
%% Channel stats
|
||||
-define(CHANNEL_STATS,
|
||||
['channels.count', %% Count of Concurrent Channels
|
||||
'channels.max' %% Maximum Number of Concurrent Channels
|
||||
]).
|
||||
%% Count of Concurrent Channels
|
||||
[
|
||||
'channels.count',
|
||||
%% Maximum Number of Concurrent Channels
|
||||
'channels.max'
|
||||
]
|
||||
).
|
||||
|
||||
%% Session stats
|
||||
-define(SESSION_STATS,
|
||||
['sessions.count', %% Count of Concurrent Sessions
|
||||
'sessions.max' %% Maximum Number of Concurrent Sessions
|
||||
]).
|
||||
%% Count of Concurrent Sessions
|
||||
[
|
||||
'sessions.count',
|
||||
%% Maximum Number of Concurrent Sessions
|
||||
'sessions.max'
|
||||
]
|
||||
).
|
||||
|
||||
%% PubSub stats
|
||||
-define(PUBSUB_STATS,
|
||||
['topics.count',
|
||||
'topics.max',
|
||||
'suboptions.count',
|
||||
'suboptions.max',
|
||||
'subscribers.count',
|
||||
'subscribers.max',
|
||||
'subscriptions.count',
|
||||
'subscriptions.max',
|
||||
'subscriptions.shared.count',
|
||||
'subscriptions.shared.max'
|
||||
]).
|
||||
-define(PUBSUB_STATS, [
|
||||
'topics.count',
|
||||
'topics.max',
|
||||
'suboptions.count',
|
||||
'suboptions.max',
|
||||
'subscribers.count',
|
||||
'subscribers.max',
|
||||
'subscriptions.count',
|
||||
'subscriptions.max',
|
||||
'subscriptions.shared.count',
|
||||
'subscriptions.shared.max'
|
||||
]).
|
||||
|
||||
%% Route stats
|
||||
-define(ROUTE_STATS,
|
||||
['routes.count',
|
||||
'routes.max'
|
||||
]).
|
||||
-define(ROUTE_STATS, [
|
||||
'routes.count',
|
||||
'routes.max'
|
||||
]).
|
||||
|
||||
%% Retained stats
|
||||
-define(RETAINED_STATS,
|
||||
['retained.count',
|
||||
'retained.max'
|
||||
]).
|
||||
-define(RETAINED_STATS, [
|
||||
'retained.count',
|
||||
'retained.max'
|
||||
]).
|
||||
|
||||
-define(TAB, ?MODULE).
|
||||
-define(SERVER, ?MODULE).
|
||||
|
||||
-type(opts() :: #{tick_ms := timeout()}).
|
||||
-type opts() :: #{tick_ms := timeout()}.
|
||||
|
||||
%% @doc Start stats server
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
start_link(#{tick_ms => timer:seconds(1)}).
|
||||
|
||||
-spec(start_link(opts()) -> startlink_ret()).
|
||||
-spec start_link(opts()) -> startlink_ret().
|
||||
start_link(Opts) ->
|
||||
gen_server:start_link({local, ?SERVER}, ?MODULE, Opts, []).
|
||||
|
||||
-spec(stop() -> ok).
|
||||
-spec stop() -> ok.
|
||||
stop() ->
|
||||
gen_server:call(?SERVER, stop, infinity).
|
||||
|
||||
%% @doc Generate stats fun.
|
||||
-spec(statsfun(Stat :: atom()) -> fun()).
|
||||
-spec statsfun(Stat :: atom()) -> fun().
|
||||
statsfun(Stat) ->
|
||||
fun(Val) -> setstat(Stat, Val) end.
|
||||
|
||||
-spec(statsfun(Stat :: atom(), MaxStat :: atom()) -> fun()).
|
||||
-spec statsfun(Stat :: atom(), MaxStat :: atom()) -> fun().
|
||||
statsfun(Stat, MaxStat) ->
|
||||
fun(Val) -> setstat(Stat, MaxStat, Val) end.
|
||||
|
||||
%% @doc Get all statistics.
|
||||
-spec(getstats() -> stats()).
|
||||
-spec getstats() -> stats().
|
||||
getstats() ->
|
||||
case ets:info(?TAB, name) of
|
||||
undefined -> [];
|
||||
|
|
@ -147,7 +164,7 @@ getstats() ->
|
|||
end.
|
||||
|
||||
%% @doc Get stats by name.
|
||||
-spec(getstat(atom()) -> maybe(non_neg_integer())).
|
||||
-spec getstat(atom()) -> maybe(non_neg_integer()).
|
||||
getstat(Name) ->
|
||||
case ets:lookup(?TAB, Name) of
|
||||
[{Name, Val}] -> Val;
|
||||
|
|
@ -155,25 +172,28 @@ getstat(Name) ->
|
|||
end.
|
||||
|
||||
%% @doc Set stats
|
||||
-spec(setstat(Stat :: atom(), Val :: pos_integer()) -> boolean()).
|
||||
-spec setstat(Stat :: atom(), Val :: pos_integer()) -> boolean().
|
||||
setstat(Stat, Val) when is_integer(Val) ->
|
||||
safe_update_element(Stat, Val).
|
||||
|
||||
%% @doc Set stats with max value.
|
||||
-spec(setstat(Stat :: atom(), MaxStat :: atom(),
|
||||
Val :: pos_integer()) -> ok).
|
||||
-spec setstat(
|
||||
Stat :: atom(),
|
||||
MaxStat :: atom(),
|
||||
Val :: pos_integer()
|
||||
) -> ok.
|
||||
setstat(Stat, MaxStat, Val) when is_integer(Val) ->
|
||||
cast({setstat, Stat, MaxStat, Val}).
|
||||
|
||||
-spec(update_interval(atom(), fun()) -> ok).
|
||||
-spec update_interval(atom(), fun()) -> ok.
|
||||
update_interval(Name, UpFun) ->
|
||||
update_interval(Name, 1, UpFun).
|
||||
|
||||
-spec(update_interval(atom(), pos_integer(), fun()) -> ok).
|
||||
-spec update_interval(atom(), pos_integer(), fun()) -> ok.
|
||||
update_interval(Name, Secs, UpFun) when is_integer(Secs), Secs >= 1 ->
|
||||
cast({update_interval, rec(Name, Secs, UpFun)}).
|
||||
|
||||
-spec(cancel_update(atom()) -> ok).
|
||||
-spec cancel_update(atom()) -> ok.
|
||||
cancel_update(Name) ->
|
||||
cast({cancel_update, Name}).
|
||||
|
||||
|
|
@ -188,13 +208,14 @@ cast(Msg) -> gen_server:cast(?SERVER, Msg).
|
|||
|
||||
init(#{tick_ms := TickMs}) ->
|
||||
ok = emqx_tables:new(?TAB, [public, set, {write_concurrency, true}]),
|
||||
Stats = lists:append([?CONNECTION_STATS,
|
||||
?CHANNEL_STATS,
|
||||
?SESSION_STATS,
|
||||
?PUBSUB_STATS,
|
||||
?ROUTE_STATS,
|
||||
?RETAINED_STATS
|
||||
]),
|
||||
Stats = lists:append([
|
||||
?CONNECTION_STATS,
|
||||
?CHANNEL_STATS,
|
||||
?SESSION_STATS,
|
||||
?PUBSUB_STATS,
|
||||
?ROUTE_STATS,
|
||||
?RETAINED_STATS
|
||||
]),
|
||||
true = ets:insert(?TAB, [{Name, 0} || Name <- Stats]),
|
||||
{ok, start_timer(#state{updates = [], tick_ms = TickMs}), hibernate}.
|
||||
|
||||
|
|
@ -203,7 +224,6 @@ start_timer(#state{tick_ms = Ms} = State) ->
|
|||
|
||||
handle_call(stop, _From, State) ->
|
||||
{stop, normal, ok, State};
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -212,59 +232,77 @@ handle_cast({setstat, Stat, MaxStat, Val}, State) ->
|
|||
try ets:lookup_element(?TAB, MaxStat, 2) of
|
||||
MaxVal when Val > MaxVal ->
|
||||
ets:update_element(?TAB, MaxStat, {2, Val});
|
||||
_ -> ok
|
||||
_ ->
|
||||
ok
|
||||
catch
|
||||
error:badarg ->
|
||||
ets:insert(?TAB, {MaxStat, Val})
|
||||
end,
|
||||
safe_update_element(Stat, Val),
|
||||
?tp(emqx_stats_setstat,
|
||||
#{ count_stat => Stat
|
||||
, max_stat => MaxStat
|
||||
, value => Val
|
||||
}),
|
||||
?tp(
|
||||
emqx_stats_setstat,
|
||||
#{
|
||||
count_stat => Stat,
|
||||
max_stat => MaxStat,
|
||||
value => Val
|
||||
}
|
||||
),
|
||||
{noreply, State};
|
||||
|
||||
handle_cast({update_interval, Update = #update{name = Name}},
|
||||
State = #state{updates = Updates}) ->
|
||||
NState = case lists:keyfind(Name, #update.name, Updates) of
|
||||
#update{} ->
|
||||
?SLOG(warning, #{msg => "duplicated_update",
|
||||
name => Name
|
||||
}),
|
||||
State;
|
||||
false -> State#state{updates = [Update | Updates]}
|
||||
end,
|
||||
handle_cast(
|
||||
{update_interval, Update = #update{name = Name}},
|
||||
State = #state{updates = Updates}
|
||||
) ->
|
||||
NState =
|
||||
case lists:keyfind(Name, #update.name, Updates) of
|
||||
#update{} ->
|
||||
?SLOG(warning, #{
|
||||
msg => "duplicated_update",
|
||||
name => Name
|
||||
}),
|
||||
State;
|
||||
false ->
|
||||
State#state{updates = [Update | Updates]}
|
||||
end,
|
||||
{noreply, NState};
|
||||
|
||||
handle_cast({cancel_update, Name}, State = #state{updates = Updates}) ->
|
||||
Updates1 = lists:keydelete(Name, #update.name, Updates),
|
||||
{noreply, State#state{updates = Updates1}};
|
||||
|
||||
handle_cast(Msg, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, State}.
|
||||
|
||||
handle_info({timeout, TRef, tick}, State = #state{timer = TRef, updates = Updates}) ->
|
||||
Updates1 = lists:foldl(
|
||||
fun(Update = #update{name = Name, countdown = C, interval = I,
|
||||
func = UpFun}, Acc) when C =< 0 ->
|
||||
try UpFun()
|
||||
catch
|
||||
Error : Reason : Stacktrace ->
|
||||
?SLOG(error, #{msg => "update_name_failed",
|
||||
name => Name,
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace
|
||||
})
|
||||
end,
|
||||
[Update#update{countdown = I} | Acc];
|
||||
(Update = #update{countdown = C}, Acc) ->
|
||||
[Update#update{countdown = C - 1} | Acc]
|
||||
end, [], Updates),
|
||||
fun
|
||||
(
|
||||
Update = #update{
|
||||
name = Name,
|
||||
countdown = C,
|
||||
interval = I,
|
||||
func = UpFun
|
||||
},
|
||||
Acc
|
||||
) when C =< 0 ->
|
||||
try
|
||||
UpFun()
|
||||
catch
|
||||
Error:Reason:Stacktrace ->
|
||||
?SLOG(error, #{
|
||||
msg => "update_name_failed",
|
||||
name => Name,
|
||||
exception => Error,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace
|
||||
})
|
||||
end,
|
||||
[Update#update{countdown = I} | Acc];
|
||||
(Update = #update{countdown = C}, Acc) ->
|
||||
[Update#update{countdown = C - 1} | Acc]
|
||||
end,
|
||||
[],
|
||||
Updates
|
||||
),
|
||||
{noreply, start_timer(State#state{updates = Updates1}), hibernate};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -283,7 +321,8 @@ safe_update_element(Key, Val) ->
|
|||
try ets:update_element(?TAB, Key, {2, Val}) of
|
||||
false ->
|
||||
ets:insert_new(?TAB, {Key, Val});
|
||||
true -> true
|
||||
true ->
|
||||
true
|
||||
catch
|
||||
error:badarg ->
|
||||
?SLOG(warning, #{
|
||||
|
|
|
|||
|
|
@ -20,17 +20,19 @@
|
|||
|
||||
-include("types.hrl").
|
||||
|
||||
-export([ start_link/0
|
||||
, start_child/1
|
||||
, start_child/2
|
||||
, stop_child/1
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
start_child/1,
|
||||
start_child/2,
|
||||
stop_child/1
|
||||
]).
|
||||
|
||||
-export([init/1]).
|
||||
|
||||
-type(startchild_ret() :: {ok, supervisor:child()}
|
||||
| {ok, supervisor:child(), term()}
|
||||
| {error, term()}).
|
||||
-type startchild_ret() ::
|
||||
{ok, supervisor:child()}
|
||||
| {ok, supervisor:child(), term()}
|
||||
| {error, term()}.
|
||||
|
||||
-define(SUP, ?MODULE).
|
||||
|
||||
|
|
@ -38,19 +40,19 @@
|
|||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
supervisor:start_link({local, ?SUP}, ?MODULE, []).
|
||||
|
||||
-spec(start_child(supervisor:child_spec()) -> startchild_ret()).
|
||||
-spec start_child(supervisor:child_spec()) -> startchild_ret().
|
||||
start_child(ChildSpec) when is_map(ChildSpec) ->
|
||||
supervisor:start_child(?SUP, ChildSpec).
|
||||
|
||||
-spec(start_child(module(), worker | supervisor) -> startchild_ret()).
|
||||
-spec start_child(module(), worker | supervisor) -> startchild_ret().
|
||||
start_child(Mod, Type) ->
|
||||
start_child(child_spec(Mod, Type)).
|
||||
|
||||
-spec(stop_child(supervisor:child_id()) -> ok | {error, term()}).
|
||||
-spec stop_child(supervisor:child_id()) -> ok | {error, term()}.
|
||||
stop_child(ChildId) ->
|
||||
case supervisor:terminate_child(?SUP, ChildId) of
|
||||
ok -> supervisor:delete_child(?SUP, ChildId);
|
||||
|
|
@ -69,16 +71,18 @@ init([]) ->
|
|||
CMSup = child_spec(emqx_cm_sup, supervisor),
|
||||
SysSup = child_spec(emqx_sys_sup, supervisor),
|
||||
Limiter = child_spec(emqx_limiter_sup, supervisor),
|
||||
Children = [KernelSup] ++
|
||||
[SessionSup || emqx_persistent_session:is_store_enabled()] ++
|
||||
[RouterSup || emqx_boot:is_enabled(router)] ++
|
||||
[BrokerSup || emqx_boot:is_enabled(broker)] ++
|
||||
[CMSup || emqx_boot:is_enabled(broker)] ++
|
||||
[SysSup, Limiter],
|
||||
SupFlags = #{strategy => one_for_all,
|
||||
intensity => 0,
|
||||
period => 1
|
||||
},
|
||||
Children =
|
||||
[KernelSup] ++
|
||||
[SessionSup || emqx_persistent_session:is_store_enabled()] ++
|
||||
[RouterSup || emqx_boot:is_enabled(router)] ++
|
||||
[BrokerSup || emqx_boot:is_enabled(broker)] ++
|
||||
[CMSup || emqx_boot:is_enabled(broker)] ++
|
||||
[SysSup, Limiter],
|
||||
SupFlags = #{
|
||||
strategy => one_for_all,
|
||||
intensity => 0,
|
||||
period => 1
|
||||
},
|
||||
{ok, {SupFlags, Children}}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -86,20 +90,20 @@ init([]) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
child_spec(Mod, supervisor) ->
|
||||
#{id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [Mod]
|
||||
};
|
||||
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => infinity,
|
||||
type => supervisor,
|
||||
modules => [Mod]
|
||||
};
|
||||
child_spec(Mod, worker) ->
|
||||
#{id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 15000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}.
|
||||
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 15000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -22,32 +22,35 @@
|
|||
-include("types.hrl").
|
||||
-include("logger.hrl").
|
||||
|
||||
-export([
|
||||
start_link/0,
|
||||
stop/0
|
||||
]).
|
||||
|
||||
-export([ start_link/0
|
||||
, stop/0
|
||||
]).
|
||||
|
||||
-export([ version/0
|
||||
, uptime/0
|
||||
, datetime/0
|
||||
, sysdescr/0
|
||||
]).
|
||||
-export([
|
||||
version/0,
|
||||
uptime/0,
|
||||
datetime/0,
|
||||
sysdescr/0
|
||||
]).
|
||||
|
||||
-export([info/0]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
-export([ on_client_connected/2
|
||||
, on_client_disconnected/3
|
||||
, on_client_subscribed/3
|
||||
, on_client_unsubscribed/3
|
||||
]).
|
||||
-export([
|
||||
on_client_connected/2,
|
||||
on_client_disconnected/3,
|
||||
on_client_subscribed/3,
|
||||
on_client_unsubscribed/3
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
|
|
@ -57,27 +60,33 @@
|
|||
-import(emqx_topic, [systop/1]).
|
||||
-import(emqx_misc, [start_timer/2]).
|
||||
|
||||
-record(state,
|
||||
{heartbeat :: maybe(reference())
|
||||
, ticker :: maybe(reference())
|
||||
, sysdescr :: binary()
|
||||
}).
|
||||
-record(state, {
|
||||
heartbeat :: maybe(reference()),
|
||||
ticker :: maybe(reference()),
|
||||
sysdescr :: binary()
|
||||
}).
|
||||
|
||||
-define(APP, emqx).
|
||||
-define(SYS, ?MODULE).
|
||||
|
||||
-define(INFO_KEYS,
|
||||
[ version % Broker version
|
||||
, uptime % Broker uptime
|
||||
, datetime % Broker local datetime
|
||||
, sysdescr % Broker description
|
||||
]).
|
||||
% Broker version
|
||||
[
|
||||
version,
|
||||
% Broker uptime
|
||||
uptime,
|
||||
% Broker local datetime
|
||||
datetime,
|
||||
% Broker description
|
||||
sysdescr
|
||||
]
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(start_link() -> {ok, pid()} | ignore | {error, any()}).
|
||||
-spec start_link() -> {ok, pid()} | ignore | {error, any()}.
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?SYS}, ?MODULE, [], []).
|
||||
|
||||
|
|
@ -85,26 +94,28 @@ stop() ->
|
|||
gen_server:stop(?SYS).
|
||||
|
||||
%% @doc Get sys version
|
||||
-spec(version() -> string()).
|
||||
-spec version() -> string().
|
||||
version() -> emqx_app:get_release().
|
||||
|
||||
%% @doc Get sys description
|
||||
-spec(sysdescr() -> string()).
|
||||
-spec sysdescr() -> string().
|
||||
sysdescr() -> emqx_app:get_description().
|
||||
|
||||
%% @doc Get sys uptime
|
||||
-spec(uptime() -> Milliseconds :: integer()).
|
||||
-spec uptime() -> Milliseconds :: integer().
|
||||
uptime() ->
|
||||
{TotalWallClock, _} = erlang:statistics(wall_clock),
|
||||
TotalWallClock.
|
||||
|
||||
%% @doc Get sys datetime
|
||||
-spec(datetime() -> string()).
|
||||
-spec datetime() -> string().
|
||||
datetime() ->
|
||||
{{Y, M, D}, {H, MM, S}} = calendar:local_time(),
|
||||
lists:flatten(
|
||||
io_lib:format(
|
||||
"~4..0w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w", [Y, M, D, H, MM, S])).
|
||||
"~4..0w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w", [Y, M, D, H, MM, S]
|
||||
)
|
||||
).
|
||||
|
||||
sys_interval() ->
|
||||
emqx:get_config([sys_topics, sys_msg_interval]).
|
||||
|
|
@ -116,19 +127,21 @@ sys_event_messages() ->
|
|||
emqx:get_config([sys_topics, sys_event_messages]).
|
||||
|
||||
%% @doc Get sys info
|
||||
-spec(info() -> list(tuple())).
|
||||
-spec info() -> list(tuple()).
|
||||
info() ->
|
||||
[{version, version()},
|
||||
{sysdescr, sysdescr()},
|
||||
{uptime, uptime()},
|
||||
{datetime, datetime()}].
|
||||
[
|
||||
{version, version()},
|
||||
{sysdescr, sysdescr()},
|
||||
{uptime, uptime()},
|
||||
{datetime, datetime()}
|
||||
].
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% gen_server callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
init([]) ->
|
||||
State = #state{sysdescr = iolist_to_binary(sysdescr())},
|
||||
State = #state{sysdescr = iolist_to_binary(sysdescr())},
|
||||
load_event_hooks(),
|
||||
{ok, heartbeat(tick(State))}.
|
||||
|
||||
|
|
@ -139,11 +152,15 @@ tick(State) ->
|
|||
|
||||
load_event_hooks() ->
|
||||
lists:foreach(
|
||||
fun({_, false}) -> ok;
|
||||
({K, true}) ->
|
||||
{HookPoint, Fun} = hook_and_fun(K),
|
||||
emqx_hooks:put(HookPoint, {?MODULE, Fun, []})
|
||||
end, maps:to_list(sys_event_messages())).
|
||||
fun
|
||||
({_, false}) ->
|
||||
ok;
|
||||
({K, true}) ->
|
||||
{HookPoint, Fun} = hook_and_fun(K),
|
||||
emqx_hooks:put(HookPoint, {?MODULE, Fun, []})
|
||||
end,
|
||||
maps:to_list(sys_event_messages())
|
||||
).
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
|
|
@ -157,7 +174,6 @@ handle_info({timeout, TRef, heartbeat}, State = #state{heartbeat = TRef}) ->
|
|||
publish_any(uptime, integer_to_binary(uptime())),
|
||||
publish_any(datetime, iolist_to_binary(datetime())),
|
||||
{noreply, heartbeat(State)};
|
||||
|
||||
handle_info({timeout, TRef, tick}, State = #state{ticker = TRef, sysdescr = Descr}) ->
|
||||
publish_any(version, version()),
|
||||
publish_any(sysdescr, Descr),
|
||||
|
|
@ -165,7 +181,6 @@ handle_info({timeout, TRef, tick}, State = #state{ticker = TRef, sysdescr = Desc
|
|||
publish_any(stats, emqx_stats:getstats()),
|
||||
publish_any(metrics, emqx_metrics:all()),
|
||||
{noreply, tick(State), hibernate};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -175,10 +190,13 @@ terminate(_Reason, #state{heartbeat = TRef1, ticker = TRef2}) ->
|
|||
lists:foreach(fun emqx_misc:cancel_timer/1, [TRef1, TRef2]).
|
||||
|
||||
unload_event_hooks() ->
|
||||
lists:foreach(fun({K, _}) ->
|
||||
{HookPoint, Fun} = hook_and_fun(K),
|
||||
emqx_hooks:del(HookPoint, {?MODULE, Fun})
|
||||
end, maps:to_list(sys_event_messages())).
|
||||
lists:foreach(
|
||||
fun({K, _}) ->
|
||||
{HookPoint, Fun} = hook_and_fun(K),
|
||||
emqx_hooks:del(HookPoint, {?MODULE, Fun})
|
||||
end,
|
||||
maps:to_list(sys_event_messages())
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% hook callbacks
|
||||
|
|
@ -187,20 +205,22 @@ unload_event_hooks() ->
|
|||
on_client_connected(ClientInfo, ConnInfo) ->
|
||||
Payload0 = common_infos(ClientInfo, ConnInfo),
|
||||
Payload = Payload0#{
|
||||
keepalive => maps:get(keepalive, ConnInfo, 0),
|
||||
clean_start => maps:get(clean_start, ConnInfo, true),
|
||||
expiry_interval => maps:get(expiry_interval, ConnInfo, 0)
|
||||
},
|
||||
keepalive => maps:get(keepalive, ConnInfo, 0),
|
||||
clean_start => maps:get(clean_start, ConnInfo, true),
|
||||
expiry_interval => maps:get(expiry_interval, ConnInfo, 0)
|
||||
},
|
||||
publish(connected, Payload).
|
||||
|
||||
on_client_disconnected(ClientInfo, Reason,
|
||||
ConnInfo = #{disconnected_at := DisconnectedAt}) ->
|
||||
|
||||
on_client_disconnected(
|
||||
ClientInfo,
|
||||
Reason,
|
||||
ConnInfo = #{disconnected_at := DisconnectedAt}
|
||||
) ->
|
||||
Payload0 = common_infos(ClientInfo, ConnInfo),
|
||||
Payload = Payload0#{
|
||||
reason => reason(Reason),
|
||||
disconnected_at => DisconnectedAt
|
||||
},
|
||||
reason => reason(Reason),
|
||||
disconnected_at => DisconnectedAt
|
||||
},
|
||||
publish(disconnected, Payload).
|
||||
|
||||
-compile({inline, [reason/1]}).
|
||||
|
|
@ -209,29 +229,41 @@ reason({shutdown, Reason}) when is_atom(Reason) -> Reason;
|
|||
reason({Error, _}) when is_atom(Error) -> Error;
|
||||
reason(_) -> internal_error.
|
||||
|
||||
on_client_subscribed(_ClientInfo = #{clientid := ClientId,
|
||||
username := Username,
|
||||
protocol := Protocol},
|
||||
Topic, SubOpts) ->
|
||||
Payload = #{clientid => ClientId,
|
||||
username => Username,
|
||||
protocol => Protocol,
|
||||
topic => Topic,
|
||||
subopts => SubOpts,
|
||||
ts => erlang:system_time(millisecond)
|
||||
},
|
||||
on_client_subscribed(
|
||||
_ClientInfo = #{
|
||||
clientid := ClientId,
|
||||
username := Username,
|
||||
protocol := Protocol
|
||||
},
|
||||
Topic,
|
||||
SubOpts
|
||||
) ->
|
||||
Payload = #{
|
||||
clientid => ClientId,
|
||||
username => Username,
|
||||
protocol => Protocol,
|
||||
topic => Topic,
|
||||
subopts => SubOpts,
|
||||
ts => erlang:system_time(millisecond)
|
||||
},
|
||||
publish(subscribed, Payload).
|
||||
|
||||
on_client_unsubscribed(_ClientInfo = #{clientid := ClientId,
|
||||
username := Username,
|
||||
protocol := Protocol},
|
||||
Topic, _SubOpts) ->
|
||||
Payload = #{clientid => ClientId,
|
||||
username => Username,
|
||||
protocol => Protocol,
|
||||
topic => Topic,
|
||||
ts => erlang:system_time(millisecond)
|
||||
},
|
||||
on_client_unsubscribed(
|
||||
_ClientInfo = #{
|
||||
clientid := ClientId,
|
||||
username := Username,
|
||||
protocol := Protocol
|
||||
},
|
||||
Topic,
|
||||
_SubOpts
|
||||
) ->
|
||||
Payload = #{
|
||||
clientid => ClientId,
|
||||
username => Username,
|
||||
protocol => Protocol,
|
||||
topic => Topic,
|
||||
ts => erlang:system_time(millisecond)
|
||||
},
|
||||
publish(unsubscribed, Payload).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -263,13 +295,21 @@ publish(brokers, Nodes) ->
|
|||
Payload = string:join([atom_to_list(N) || N <- Nodes], ","),
|
||||
safe_publish(<<"$SYS/brokers">>, #{retain => true}, Payload);
|
||||
publish(stats, Stats) ->
|
||||
[safe_publish(systop(lists:concat(['stats/', Stat])), integer_to_binary(Val))
|
||||
|| {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val)];
|
||||
[
|
||||
safe_publish(systop(lists:concat(['stats/', Stat])), integer_to_binary(Val))
|
||||
|| {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val)
|
||||
];
|
||||
publish(metrics, Metrics) ->
|
||||
[safe_publish(systop(metric_topic(Name)), integer_to_binary(Val))
|
||||
|| {Name, Val} <- Metrics, is_atom(Name), is_integer(Val)];
|
||||
publish(Event, Payload) when Event == connected; Event == disconnected;
|
||||
Event == subscribed; Event == unsubscribed ->
|
||||
[
|
||||
safe_publish(systop(metric_topic(Name)), integer_to_binary(Val))
|
||||
|| {Name, Val} <- Metrics, is_atom(Name), is_integer(Val)
|
||||
];
|
||||
publish(Event, Payload) when
|
||||
Event == connected;
|
||||
Event == disconnected;
|
||||
Event == subscribed;
|
||||
Event == unsubscribed
|
||||
->
|
||||
Topic = event_topic(Event, Payload),
|
||||
safe_publish(Topic, emqx_json:encode(Payload)).
|
||||
|
||||
|
|
@ -280,42 +320,55 @@ safe_publish(Topic, Payload) ->
|
|||
safe_publish(Topic, #{}, Payload).
|
||||
safe_publish(Topic, Flags, Payload) ->
|
||||
emqx_broker:safe_publish(
|
||||
emqx_message:set_flags(
|
||||
maps:merge(#{sys => true}, Flags),
|
||||
emqx_message:make(?SYS, Topic, iolist_to_binary(Payload)))).
|
||||
emqx_message:set_flags(
|
||||
maps:merge(#{sys => true}, Flags),
|
||||
emqx_message:make(?SYS, Topic, iolist_to_binary(Payload))
|
||||
)
|
||||
).
|
||||
|
||||
common_infos(
|
||||
_ClientInfo = #{clientid := ClientId,
|
||||
username := Username,
|
||||
peerhost := PeerHost,
|
||||
sockport := SockPort,
|
||||
protocol := Protocol
|
||||
},
|
||||
_ConnInfo = #{proto_name := ProtoName,
|
||||
proto_ver := ProtoVer,
|
||||
connected_at := ConnectedAt
|
||||
}) ->
|
||||
#{clientid => ClientId,
|
||||
username => Username,
|
||||
ipaddress => ntoa(PeerHost),
|
||||
sockport => SockPort,
|
||||
protocol => Protocol,
|
||||
proto_name => ProtoName,
|
||||
proto_ver => ProtoVer,
|
||||
connected_at => ConnectedAt,
|
||||
ts => erlang:system_time(millisecond)
|
||||
}.
|
||||
_ClientInfo = #{
|
||||
clientid := ClientId,
|
||||
username := Username,
|
||||
peerhost := PeerHost,
|
||||
sockport := SockPort,
|
||||
protocol := Protocol
|
||||
},
|
||||
_ConnInfo = #{
|
||||
proto_name := ProtoName,
|
||||
proto_ver := ProtoVer,
|
||||
connected_at := ConnectedAt
|
||||
}
|
||||
) ->
|
||||
#{
|
||||
clientid => ClientId,
|
||||
username => Username,
|
||||
ipaddress => ntoa(PeerHost),
|
||||
sockport => SockPort,
|
||||
protocol => Protocol,
|
||||
proto_name => ProtoName,
|
||||
proto_ver => ProtoVer,
|
||||
connected_at => ConnectedAt,
|
||||
ts => erlang:system_time(millisecond)
|
||||
}.
|
||||
|
||||
ntoa(undefined) -> undefined;
|
||||
ntoa({IpAddr, Port}) ->
|
||||
iolist_to_binary([inet:ntoa(IpAddr), ":", integer_to_list(Port)]);
|
||||
ntoa(IpAddr) ->
|
||||
iolist_to_binary(inet:ntoa(IpAddr)).
|
||||
ntoa({IpAddr, Port}) -> iolist_to_binary([inet:ntoa(IpAddr), ":", integer_to_list(Port)]);
|
||||
ntoa(IpAddr) -> iolist_to_binary(inet:ntoa(IpAddr)).
|
||||
|
||||
event_topic(Event, #{clientid := ClientId, protocol := mqtt}) ->
|
||||
iolist_to_binary(
|
||||
[systop("clients"), "/", ClientId, "/", atom_to_binary(Event)]);
|
||||
[systop("clients"), "/", ClientId, "/", atom_to_binary(Event)]
|
||||
);
|
||||
event_topic(Event, #{clientid := ClientId, protocol := GwName}) ->
|
||||
iolist_to_binary(
|
||||
[systop("gateway"), "/", atom_to_binary(GwName),
|
||||
"/clients/", ClientId, "/", atom_to_binary(Event)]).
|
||||
[
|
||||
systop("gateway"),
|
||||
"/",
|
||||
atom_to_binary(GwName),
|
||||
"/clients/",
|
||||
ClientId,
|
||||
"/",
|
||||
atom_to_binary(Event)
|
||||
]
|
||||
).
|
||||
|
|
|
|||
|
|
@ -21,25 +21,25 @@
|
|||
-include("types.hrl").
|
||||
-include("logger.hrl").
|
||||
|
||||
|
||||
-export([start_link/0]).
|
||||
|
||||
%% compress unused warning
|
||||
-export([procinfo/1]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, terminate/2
|
||||
, code_change/3
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-define(SYSMON, ?MODULE).
|
||||
|
||||
%% @doc Start the system monitor.
|
||||
-spec(start_link() -> startlink_ret()).
|
||||
-spec start_link() -> startlink_ret().
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?SYSMON}, ?MODULE, [], []).
|
||||
|
||||
|
|
@ -63,23 +63,23 @@ sysm_opts() ->
|
|||
sysm_opts(maps:to_list(emqx:get_config([sysmon, vm])), []).
|
||||
sysm_opts([], Acc) ->
|
||||
Acc;
|
||||
sysm_opts([{_, disabled}|Opts], Acc) ->
|
||||
sysm_opts([{_, disabled} | Opts], Acc) ->
|
||||
sysm_opts(Opts, Acc);
|
||||
sysm_opts([{long_gc, Ms}|Opts], Acc) when is_integer(Ms) ->
|
||||
sysm_opts(Opts, [{long_gc, Ms}|Acc]);
|
||||
sysm_opts([{long_schedule, Ms}|Opts], Acc) when is_integer(Ms) ->
|
||||
sysm_opts(Opts, [{long_schedule, Ms}|Acc]);
|
||||
sysm_opts([{large_heap, Size}|Opts], Acc) when is_integer(Size) ->
|
||||
sysm_opts(Opts, [{large_heap, Size}|Acc]);
|
||||
sysm_opts([{busy_port, true}|Opts], Acc) ->
|
||||
sysm_opts(Opts, [busy_port|Acc]);
|
||||
sysm_opts([{busy_port, false}|Opts], Acc) ->
|
||||
sysm_opts([{long_gc, Ms} | Opts], Acc) when is_integer(Ms) ->
|
||||
sysm_opts(Opts, [{long_gc, Ms} | Acc]);
|
||||
sysm_opts([{long_schedule, Ms} | Opts], Acc) when is_integer(Ms) ->
|
||||
sysm_opts(Opts, [{long_schedule, Ms} | Acc]);
|
||||
sysm_opts([{large_heap, Size} | Opts], Acc) when is_integer(Size) ->
|
||||
sysm_opts(Opts, [{large_heap, Size} | Acc]);
|
||||
sysm_opts([{busy_port, true} | Opts], Acc) ->
|
||||
sysm_opts(Opts, [busy_port | Acc]);
|
||||
sysm_opts([{busy_port, false} | Opts], Acc) ->
|
||||
sysm_opts(Opts, Acc);
|
||||
sysm_opts([{busy_dist_port, true}|Opts], Acc) ->
|
||||
sysm_opts(Opts, [busy_dist_port|Acc]);
|
||||
sysm_opts([{busy_dist_port, false}|Opts], Acc) ->
|
||||
sysm_opts([{busy_dist_port, true} | Opts], Acc) ->
|
||||
sysm_opts(Opts, [busy_dist_port | Acc]);
|
||||
sysm_opts([{busy_dist_port, false} | Opts], Acc) ->
|
||||
sysm_opts(Opts, Acc);
|
||||
sysm_opts([_Opt|Opts], Acc) ->
|
||||
sysm_opts([_Opt | Opts], Acc) ->
|
||||
sysm_opts(Opts, Acc).
|
||||
|
||||
handle_call(Req, _From, State) ->
|
||||
|
|
@ -91,70 +91,91 @@ handle_cast(Msg, State) ->
|
|||
{noreply, State}.
|
||||
|
||||
handle_info({monitor, Pid, long_gc, Info}, State) ->
|
||||
suppress({long_gc, Pid},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]),
|
||||
?SLOG(warning, #{msg => long_gc,
|
||||
info => Info,
|
||||
porcinfo => procinfo(Pid)
|
||||
}),
|
||||
safe_publish(long_gc, WarnMsg)
|
||||
end, State);
|
||||
|
||||
suppress(
|
||||
{long_gc, Pid},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]),
|
||||
?SLOG(warning, #{
|
||||
msg => long_gc,
|
||||
info => Info,
|
||||
porcinfo => procinfo(Pid)
|
||||
}),
|
||||
safe_publish(long_gc, WarnMsg)
|
||||
end,
|
||||
State
|
||||
);
|
||||
handle_info({monitor, Pid, long_schedule, Info}, State) when is_pid(Pid) ->
|
||||
suppress({long_schedule, Pid},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]),
|
||||
?SLOG(warning, #{msg => long_schedule,
|
||||
info => Info,
|
||||
procinfo => procinfo(Pid)}),
|
||||
safe_publish(long_schedule, WarnMsg)
|
||||
end, State);
|
||||
|
||||
suppress(
|
||||
{long_schedule, Pid},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]),
|
||||
?SLOG(warning, #{
|
||||
msg => long_schedule,
|
||||
info => Info,
|
||||
procinfo => procinfo(Pid)
|
||||
}),
|
||||
safe_publish(long_schedule, WarnMsg)
|
||||
end,
|
||||
State
|
||||
);
|
||||
handle_info({monitor, Port, long_schedule, Info}, State) when is_port(Port) ->
|
||||
suppress({long_schedule, Port},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]),
|
||||
?SLOG(warning, #{msg => long_schedule,
|
||||
info => Info,
|
||||
portinfo => portinfo(Port)}),
|
||||
safe_publish(long_schedule, WarnMsg)
|
||||
end, State);
|
||||
|
||||
suppress(
|
||||
{long_schedule, Port},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]),
|
||||
?SLOG(warning, #{
|
||||
msg => long_schedule,
|
||||
info => Info,
|
||||
portinfo => portinfo(Port)
|
||||
}),
|
||||
safe_publish(long_schedule, WarnMsg)
|
||||
end,
|
||||
State
|
||||
);
|
||||
handle_info({monitor, Pid, large_heap, Info}, State) ->
|
||||
suppress({large_heap, Pid},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]),
|
||||
?SLOG(warning, #{msg => large_heap,
|
||||
info => Info,
|
||||
procinfo => procinfo(Pid)}),
|
||||
safe_publish(large_heap, WarnMsg)
|
||||
end, State);
|
||||
|
||||
suppress(
|
||||
{large_heap, Pid},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]),
|
||||
?SLOG(warning, #{
|
||||
msg => large_heap,
|
||||
info => Info,
|
||||
procinfo => procinfo(Pid)
|
||||
}),
|
||||
safe_publish(large_heap, WarnMsg)
|
||||
end,
|
||||
State
|
||||
);
|
||||
handle_info({monitor, SusPid, busy_port, Port}, State) ->
|
||||
suppress({busy_port, Port},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
|
||||
?SLOG(warning, #{msg => busy_port,
|
||||
portinfo => portinfo(Port),
|
||||
procinfo => procinfo(SusPid)
|
||||
}),
|
||||
safe_publish(busy_port, WarnMsg)
|
||||
end, State);
|
||||
|
||||
suppress(
|
||||
{busy_port, Port},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
|
||||
?SLOG(warning, #{
|
||||
msg => busy_port,
|
||||
portinfo => portinfo(Port),
|
||||
procinfo => procinfo(SusPid)
|
||||
}),
|
||||
safe_publish(busy_port, WarnMsg)
|
||||
end,
|
||||
State
|
||||
);
|
||||
handle_info({monitor, SusPid, busy_dist_port, Port}, State) ->
|
||||
suppress({busy_dist_port, Port},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
|
||||
?SLOG(warning, #{msg => busy_dist_port,
|
||||
portinfo => portinfo(Port),
|
||||
procinfo => procinfo(SusPid)}),
|
||||
safe_publish(busy_dist_port, WarnMsg)
|
||||
end, State);
|
||||
|
||||
suppress(
|
||||
{busy_dist_port, Port},
|
||||
fun() ->
|
||||
WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
|
||||
?SLOG(warning, #{
|
||||
msg => busy_dist_port,
|
||||
portinfo => portinfo(Port),
|
||||
procinfo => procinfo(SusPid)
|
||||
}),
|
||||
safe_publish(busy_dist_port, WarnMsg)
|
||||
end,
|
||||
State
|
||||
);
|
||||
handle_info({timeout, _Ref, reset}, State) ->
|
||||
{noreply, State#{events := []}, hibernate};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||
{noreply, State}.
|
||||
|
|
@ -182,13 +203,13 @@ suppress(Key, SuccFun, State = #{events := Events}) ->
|
|||
{noreply, State};
|
||||
false ->
|
||||
_ = SuccFun(),
|
||||
{noreply, State#{events := [Key|Events]}}
|
||||
{noreply, State#{events := [Key | Events]}}
|
||||
end.
|
||||
|
||||
procinfo(Pid) ->
|
||||
[{pid, Pid} | procinfo_l(emqx_vm:get_process_gc_info(Pid))] ++
|
||||
get_proc_lib_initial_call(Pid) ++
|
||||
procinfo_l(emqx_vm:get_process_info(Pid)).
|
||||
get_proc_lib_initial_call(Pid) ++
|
||||
procinfo_l(emqx_vm:get_process_info(Pid)).
|
||||
|
||||
procinfo_l(undefined) -> [];
|
||||
procinfo_l(List) -> List.
|
||||
|
|
|
|||
|
|
@ -26,11 +26,13 @@ start_link() ->
|
|||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
||||
init([]) ->
|
||||
Childs = [child_spec(emqx_sys),
|
||||
child_spec(emqx_alarm),
|
||||
child_spec(emqx_sys_mon),
|
||||
child_spec(emqx_os_mon),
|
||||
child_spec(emqx_vm_mon)],
|
||||
Childs = [
|
||||
child_spec(emqx_sys),
|
||||
child_spec(emqx_alarm),
|
||||
child_spec(emqx_sys_mon),
|
||||
child_spec(emqx_os_mon),
|
||||
child_spec(emqx_vm_mon)
|
||||
],
|
||||
{ok, {{one_for_one, 10, 100}, Childs}}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -41,10 +43,11 @@ child_spec(Mod) ->
|
|||
child_spec(Mod, []).
|
||||
|
||||
child_spec(Mod, Args) ->
|
||||
#{id => Mod,
|
||||
start => {Mod, start_link, Args},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}.
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, Args},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -16,50 +16,54 @@
|
|||
|
||||
-module(emqx_tables).
|
||||
|
||||
-export([ new/1
|
||||
, new/2
|
||||
]).
|
||||
-export([
|
||||
new/1,
|
||||
new/2
|
||||
]).
|
||||
|
||||
-export([ lookup_value/2
|
||||
, lookup_value/3
|
||||
]).
|
||||
-export([
|
||||
lookup_value/2,
|
||||
lookup_value/3
|
||||
]).
|
||||
|
||||
-export([delete/1]).
|
||||
|
||||
%% Create an ets table.
|
||||
-spec(new(atom()) -> ok).
|
||||
-spec new(atom()) -> ok.
|
||||
new(Tab) ->
|
||||
new(Tab, []).
|
||||
|
||||
%% Create a named_table ets.
|
||||
-spec(new(atom(), list()) -> ok).
|
||||
-spec new(atom(), list()) -> ok.
|
||||
new(Tab, Opts) ->
|
||||
case ets:info(Tab, name) of
|
||||
undefined ->
|
||||
_ = ets:new(Tab, lists:usort([named_table | Opts])),
|
||||
ok;
|
||||
Tab -> ok
|
||||
Tab ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% KV lookup
|
||||
-spec(lookup_value(ets:tab(), term()) -> any()).
|
||||
-spec lookup_value(ets:tab(), term()) -> any().
|
||||
lookup_value(Tab, Key) ->
|
||||
lookup_value(Tab, Key, undefined).
|
||||
|
||||
-spec(lookup_value(ets:tab(), term(), any()) -> any()).
|
||||
-spec lookup_value(ets:tab(), term(), any()) -> any().
|
||||
lookup_value(Tab, Key, Def) ->
|
||||
try ets:lookup_element(Tab, Key, 2)
|
||||
try
|
||||
ets:lookup_element(Tab, Key, 2)
|
||||
catch
|
||||
error:badarg -> Def
|
||||
end.
|
||||
|
||||
%% Delete the ets table.
|
||||
-spec(delete(ets:tab()) -> ok).
|
||||
-spec delete(ets:tab()) -> ok.
|
||||
delete(Tab) ->
|
||||
case ets:info(Tab, name) of
|
||||
undefined -> ok;
|
||||
undefined ->
|
||||
ok;
|
||||
Tab ->
|
||||
ets:delete(Tab),
|
||||
ok
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -17,20 +17,22 @@
|
|||
-module(emqx_tls_lib).
|
||||
|
||||
%% version & cipher suites
|
||||
-export([ default_versions/0
|
||||
, integral_versions/1
|
||||
, default_ciphers/0
|
||||
, selected_ciphers/1
|
||||
, integral_ciphers/2
|
||||
, drop_tls13_for_old_otp/1
|
||||
, all_ciphers/0
|
||||
]).
|
||||
-export([
|
||||
default_versions/0,
|
||||
integral_versions/1,
|
||||
default_ciphers/0,
|
||||
selected_ciphers/1,
|
||||
integral_ciphers/2,
|
||||
drop_tls13_for_old_otp/1,
|
||||
all_ciphers/0
|
||||
]).
|
||||
|
||||
%% SSL files
|
||||
-export([ ensure_ssl_files/2
|
||||
, delete_ssl_files/3
|
||||
, file_content_as_options/1
|
||||
]).
|
||||
-export([
|
||||
ensure_ssl_files/2,
|
||||
delete_ssl_files/3,
|
||||
file_content_as_options/1
|
||||
]).
|
||||
|
||||
-include("logger.hrl").
|
||||
|
||||
|
|
@ -52,7 +54,7 @@ default_versions() -> available_versions().
|
|||
%% raise an error exception if non of them are available.
|
||||
%% The input list can be a string/binary of comma separated versions.
|
||||
-spec integral_versions(undefined | string() | binary() | [ssl:tls_version()]) ->
|
||||
[ssl:tls_version()].
|
||||
[ssl:tls_version()].
|
||||
integral_versions(undefined) ->
|
||||
integral_versions(default_versions());
|
||||
integral_versions([]) ->
|
||||
|
|
@ -66,10 +68,12 @@ integral_versions(Desired) when is_binary(Desired) ->
|
|||
integral_versions(Desired) ->
|
||||
Available = available_versions(),
|
||||
case lists:filter(fun(V) -> lists:member(V, Available) end, Desired) of
|
||||
[] -> erlang:error(#{ reason => no_available_tls_version
|
||||
, desired => Desired
|
||||
, available => Available
|
||||
});
|
||||
[] ->
|
||||
erlang:error(#{
|
||||
reason => no_available_tls_version,
|
||||
desired => Desired,
|
||||
available => Available
|
||||
});
|
||||
Filtered ->
|
||||
Filtered
|
||||
end.
|
||||
|
|
@ -89,7 +93,6 @@ all_ciphers(Versions) ->
|
|||
%% assert non-empty
|
||||
[_ | _] = dedup(lists:append([ssl:cipher_suites(all, V, openssl) || V <- Versions])).
|
||||
|
||||
|
||||
%% @doc All Pre-selected TLS ciphers.
|
||||
default_ciphers() ->
|
||||
selected_ciphers(available_versions()).
|
||||
|
|
@ -97,8 +100,12 @@ default_ciphers() ->
|
|||
%% @doc Pre-selected TLS ciphers for given versions..
|
||||
selected_ciphers(Vsns) ->
|
||||
All = all_ciphers(Vsns),
|
||||
dedup(lists:filter(fun(Cipher) -> lists:member(Cipher, All) end,
|
||||
lists:flatmap(fun do_selected_ciphers/1, Vsns))).
|
||||
dedup(
|
||||
lists:filter(
|
||||
fun(Cipher) -> lists:member(Cipher, All) end,
|
||||
lists:flatmap(fun do_selected_ciphers/1, Vsns)
|
||||
)
|
||||
).
|
||||
|
||||
do_selected_ciphers('tlsv1.3') ->
|
||||
case lists:member('tlsv1.3', proplists:get_value(available, ssl:versions())) of
|
||||
|
|
@ -106,24 +113,49 @@ do_selected_ciphers('tlsv1.3') ->
|
|||
false -> []
|
||||
end ++ do_selected_ciphers('tlsv1.2');
|
||||
do_selected_ciphers(_) ->
|
||||
[ "ECDHE-ECDSA-AES256-GCM-SHA384",
|
||||
"ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES256-SHA384", "ECDHE-RSA-AES256-SHA384",
|
||||
"ECDH-ECDSA-AES256-GCM-SHA384", "ECDH-RSA-AES256-GCM-SHA384",
|
||||
"ECDH-ECDSA-AES256-SHA384", "ECDH-RSA-AES256-SHA384", "DHE-DSS-AES256-GCM-SHA384",
|
||||
"DHE-DSS-AES256-SHA256", "AES256-GCM-SHA384", "AES256-SHA256",
|
||||
"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256",
|
||||
"ECDHE-ECDSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDH-ECDSA-AES128-GCM-SHA256",
|
||||
"ECDH-RSA-AES128-GCM-SHA256", "ECDH-ECDSA-AES128-SHA256", "ECDH-RSA-AES128-SHA256",
|
||||
"DHE-DSS-AES128-GCM-SHA256", "DHE-DSS-AES128-SHA256", "AES128-GCM-SHA256", "AES128-SHA256",
|
||||
"ECDHE-ECDSA-AES256-SHA", "ECDHE-RSA-AES256-SHA", "DHE-DSS-AES256-SHA",
|
||||
"ECDH-ECDSA-AES256-SHA", "ECDH-RSA-AES256-SHA", "ECDHE-ECDSA-AES128-SHA",
|
||||
"ECDHE-RSA-AES128-SHA", "DHE-DSS-AES128-SHA", "ECDH-ECDSA-AES128-SHA",
|
||||
"ECDH-RSA-AES128-SHA",
|
||||
[
|
||||
"ECDHE-ECDSA-AES256-GCM-SHA384",
|
||||
"ECDHE-RSA-AES256-GCM-SHA384",
|
||||
"ECDHE-ECDSA-AES256-SHA384",
|
||||
"ECDHE-RSA-AES256-SHA384",
|
||||
"ECDH-ECDSA-AES256-GCM-SHA384",
|
||||
"ECDH-RSA-AES256-GCM-SHA384",
|
||||
"ECDH-ECDSA-AES256-SHA384",
|
||||
"ECDH-RSA-AES256-SHA384",
|
||||
"DHE-DSS-AES256-GCM-SHA384",
|
||||
"DHE-DSS-AES256-SHA256",
|
||||
"AES256-GCM-SHA384",
|
||||
"AES256-SHA256",
|
||||
"ECDHE-ECDSA-AES128-GCM-SHA256",
|
||||
"ECDHE-RSA-AES128-GCM-SHA256",
|
||||
"ECDHE-ECDSA-AES128-SHA256",
|
||||
"ECDHE-RSA-AES128-SHA256",
|
||||
"ECDH-ECDSA-AES128-GCM-SHA256",
|
||||
"ECDH-RSA-AES128-GCM-SHA256",
|
||||
"ECDH-ECDSA-AES128-SHA256",
|
||||
"ECDH-RSA-AES128-SHA256",
|
||||
"DHE-DSS-AES128-GCM-SHA256",
|
||||
"DHE-DSS-AES128-SHA256",
|
||||
"AES128-GCM-SHA256",
|
||||
"AES128-SHA256",
|
||||
"ECDHE-ECDSA-AES256-SHA",
|
||||
"ECDHE-RSA-AES256-SHA",
|
||||
"DHE-DSS-AES256-SHA",
|
||||
"ECDH-ECDSA-AES256-SHA",
|
||||
"ECDH-RSA-AES256-SHA",
|
||||
"ECDHE-ECDSA-AES128-SHA",
|
||||
"ECDHE-RSA-AES128-SHA",
|
||||
"DHE-DSS-AES128-SHA",
|
||||
"ECDH-ECDSA-AES128-SHA",
|
||||
"ECDH-RSA-AES128-SHA",
|
||||
|
||||
%% psk
|
||||
"RSA-PSK-AES256-GCM-SHA384","RSA-PSK-AES256-CBC-SHA384",
|
||||
"RSA-PSK-AES128-GCM-SHA256","RSA-PSK-AES128-CBC-SHA256",
|
||||
"RSA-PSK-AES256-CBC-SHA","RSA-PSK-AES128-CBC-SHA"
|
||||
%% psk
|
||||
"RSA-PSK-AES256-GCM-SHA384",
|
||||
"RSA-PSK-AES256-CBC-SHA384",
|
||||
"RSA-PSK-AES128-GCM-SHA256",
|
||||
"RSA-PSK-AES128-CBC-SHA256",
|
||||
"RSA-PSK-AES256-CBC-SHA",
|
||||
"RSA-PSK-AES128-CBC-SHA"
|
||||
].
|
||||
|
||||
%% @doc Ensure version & cipher-suites integrity.
|
||||
|
|
@ -146,7 +178,7 @@ integral_ciphers(Versions, Ciphers) ->
|
|||
ensure_tls13_cipher(true, Ciphers) ->
|
||||
Tls13Ciphers = selected_ciphers(['tlsv1.3']),
|
||||
case lists:any(fun(C) -> lists:member(C, Tls13Ciphers) end, Ciphers) of
|
||||
true -> Ciphers;
|
||||
true -> Ciphers;
|
||||
false -> Tls13Ciphers ++ Ciphers
|
||||
end;
|
||||
ensure_tls13_cipher(false, Ciphers) ->
|
||||
|
|
@ -172,7 +204,8 @@ dedup([H | T]) -> [H | dedup([I || I <- T, I =/= H])].
|
|||
parse_versions(Versions) ->
|
||||
do_parse_versions(split_by_comma(Versions), []).
|
||||
|
||||
do_parse_versions([], Acc) -> lists:reverse(Acc);
|
||||
do_parse_versions([], Acc) ->
|
||||
lists:reverse(Acc);
|
||||
do_parse_versions([V | More], Acc) ->
|
||||
case parse_version(V) of
|
||||
unknown ->
|
||||
|
|
@ -209,21 +242,22 @@ drop_tls13_for_old_otp(SslOpts) ->
|
|||
%% should return when running on otp 23.
|
||||
%% But we still have to hard-code them because tlsv1.3 on otp 22 is
|
||||
%% not trustworthy.
|
||||
-define(TLSV13_EXCLUSIVE_CIPHERS, [ "TLS_AES_256_GCM_SHA384"
|
||||
, "TLS_AES_128_GCM_SHA256"
|
||||
, "TLS_CHACHA20_POLY1305_SHA256"
|
||||
, "TLS_AES_128_CCM_SHA256"
|
||||
, "TLS_AES_128_CCM_8_SHA256"
|
||||
]).
|
||||
-define(TLSV13_EXCLUSIVE_CIPHERS, [
|
||||
"TLS_AES_256_GCM_SHA384",
|
||||
"TLS_AES_128_GCM_SHA256",
|
||||
"TLS_CHACHA20_POLY1305_SHA256",
|
||||
"TLS_AES_128_CCM_SHA256",
|
||||
"TLS_AES_128_CCM_8_SHA256"
|
||||
]).
|
||||
drop_tls13(SslOpts0) ->
|
||||
SslOpts1 = case maps:find(versions, SslOpts0) of
|
||||
error -> SslOpts0;
|
||||
{ok, Vsns} -> SslOpts0#{versions => (Vsns -- ['tlsv1.3'])}
|
||||
end,
|
||||
SslOpts1 =
|
||||
case maps:find(versions, SslOpts0) of
|
||||
error -> SslOpts0;
|
||||
{ok, Vsns} -> SslOpts0#{versions => (Vsns -- ['tlsv1.3'])}
|
||||
end,
|
||||
case maps:find(ciphers, SslOpts1) of
|
||||
error -> SslOpts1;
|
||||
{ok, Ciphers} ->
|
||||
SslOpts1#{ciphers => Ciphers -- ?TLSV13_EXCLUSIVE_CIPHERS}
|
||||
{ok, Ciphers} -> SslOpts1#{ciphers => Ciphers -- ?TLSV13_EXCLUSIVE_CIPHERS}
|
||||
end.
|
||||
|
||||
%% @doc The input map is a HOCON decoded result of a struct defined as
|
||||
|
|
@ -233,17 +267,19 @@ drop_tls13(SslOpts0) ->
|
|||
%% When PEM format key or certificate is given, it tries to to save them in the given
|
||||
%% sub-dir in emqx's data_dir, and replace saved file paths for SSL options.
|
||||
-spec ensure_ssl_files(file:name_all(), undefined | map()) ->
|
||||
{ok, undefined | map()} | {error, map()}.
|
||||
{ok, undefined | map()} | {error, map()}.
|
||||
ensure_ssl_files(Dir, Opts) ->
|
||||
ensure_ssl_files(Dir, Opts, _DryRun = false).
|
||||
|
||||
ensure_ssl_files(_Dir, undefined, _DryRun) -> {ok, undefined};
|
||||
ensure_ssl_files(_Dir, undefined, _DryRun) ->
|
||||
{ok, undefined};
|
||||
ensure_ssl_files(_Dir, #{<<"enable">> := False} = Opts, _DryRun) when ?IS_FALSE(False) ->
|
||||
{ok, Opts};
|
||||
ensure_ssl_files(Dir, Opts, DryRun) ->
|
||||
ensure_ssl_files(Dir, Opts, ?SSL_FILE_OPT_NAMES, DryRun).
|
||||
|
||||
ensure_ssl_files(_Dir,Opts, [], _DryRun) -> {ok, Opts};
|
||||
ensure_ssl_files(_Dir, Opts, [], _DryRun) ->
|
||||
{ok, Opts};
|
||||
ensure_ssl_files(Dir, Opts, [Key | Keys], DryRun) ->
|
||||
case ensure_ssl_file(Dir, Key, Opts, maps:get(Key, Opts, undefined), DryRun) of
|
||||
{ok, NewOpts} ->
|
||||
|
|
@ -258,18 +294,25 @@ delete_ssl_files(Dir, NewOpts0, OldOpts0) ->
|
|||
DryRun = true,
|
||||
{ok, NewOpts} = ensure_ssl_files(Dir, NewOpts0, DryRun),
|
||||
{ok, OldOpts} = ensure_ssl_files(Dir, OldOpts0, DryRun),
|
||||
Get = fun(_K, undefined) -> undefined;
|
||||
(K, Opts) -> maps:get(K, Opts, undefined)
|
||||
end,
|
||||
lists:foreach(fun(Key) -> delete_old_file(Get(Key, NewOpts), Get(Key, OldOpts)) end,
|
||||
?SSL_FILE_OPT_NAMES).
|
||||
Get = fun
|
||||
(_K, undefined) -> undefined;
|
||||
(K, Opts) -> maps:get(K, Opts, undefined)
|
||||
end,
|
||||
lists:foreach(
|
||||
fun(Key) -> delete_old_file(Get(Key, NewOpts), Get(Key, OldOpts)) end,
|
||||
?SSL_FILE_OPT_NAMES
|
||||
).
|
||||
|
||||
delete_old_file(New, Old) when New =:= Old -> ok;
|
||||
delete_old_file(_New, _Old = undefined) -> ok;
|
||||
delete_old_file(_New, _Old = undefined) ->
|
||||
ok;
|
||||
delete_old_file(_New, Old) ->
|
||||
case filelib:is_regular(Old) andalso file:delete(Old) of
|
||||
ok -> ok;
|
||||
false -> ok; %% already deleted
|
||||
ok ->
|
||||
ok;
|
||||
%% already deleted
|
||||
false ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "failed_to_delete_ssl_file", file_path => Old, reason => Reason})
|
||||
end.
|
||||
|
|
@ -293,12 +336,14 @@ do_ensure_ssl_file(Dir, Key, Opts, MaybePem, DryRun) ->
|
|||
end;
|
||||
false ->
|
||||
case is_valid_pem_file(MaybePem) of
|
||||
true -> {ok, Opts};
|
||||
true ->
|
||||
{ok, Opts};
|
||||
{error, enoent} when DryRun -> {ok, Opts};
|
||||
{error, Reason} ->
|
||||
{error, #{pem_check => invalid_pem,
|
||||
file_read => Reason
|
||||
}}
|
||||
{error, #{
|
||||
pem_check => invalid_pem,
|
||||
file_read => Reason
|
||||
}}
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
@ -312,8 +357,10 @@ is_valid_string(Binary) when is_binary(Binary) ->
|
|||
|
||||
%% Check if it is a valid PEM formatted key.
|
||||
is_pem(MaybePem) ->
|
||||
try public_key:pem_decode(MaybePem) =/= []
|
||||
catch _ : _ -> false
|
||||
try
|
||||
public_key:pem_decode(MaybePem) =/= []
|
||||
catch
|
||||
_:_ -> false
|
||||
end.
|
||||
|
||||
%% Write the pem file to the given dir.
|
||||
|
|
@ -328,8 +375,7 @@ save_pem_file(Dir, Key, Pem, DryRun) ->
|
|||
ok ->
|
||||
case file:write_file(Path, Pem) of
|
||||
ok -> {ok, Path};
|
||||
{error, Reason} ->
|
||||
{error, #{failed_to_write_file => Reason, file_path => Path}}
|
||||
{error, Reason} -> {error, #{failed_to_write_file => Reason, file_path => Path}}
|
||||
end;
|
||||
{error, Reason} ->
|
||||
{error, #{failed_to_create_dir_for => Path, reason => Reason}}
|
||||
|
|
@ -346,7 +392,7 @@ pem_file_name(Dir, Key, Pem) ->
|
|||
filename:join([emqx:certs_dir(), Dir, FileName]).
|
||||
|
||||
hex_str(Bin) ->
|
||||
iolist_to_binary([io_lib:format("~2.16.0b",[X]) || <<X:8>> <= Bin ]).
|
||||
iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <<X:8>> <= Bin]).
|
||||
|
||||
is_valid_pem_file(Path) ->
|
||||
case file:read_file(Path) of
|
||||
|
|
@ -355,7 +401,8 @@ is_valid_pem_file(Path) ->
|
|||
end.
|
||||
|
||||
%% @doc This is to return SSL file content in management APIs.
|
||||
file_content_as_options(undefined) -> undefined;
|
||||
file_content_as_options(undefined) ->
|
||||
undefined;
|
||||
file_content_as_options(#{<<"enable">> := False} = SSL) when ?IS_FALSE(False) ->
|
||||
{ok, maps:without(?SSL_FILE_OPT_NAMES, SSL)};
|
||||
file_content_as_options(#{<<"enable">> := True} = SSL) when ?IS_TRUE(True) ->
|
||||
|
|
@ -365,15 +412,17 @@ file_content_as_options([], SSL) ->
|
|||
{ok, SSL};
|
||||
file_content_as_options([Key | Keys], SSL) ->
|
||||
case maps:get(Key, SSL, undefined) of
|
||||
undefined -> file_content_as_options(Keys, SSL);
|
||||
undefined ->
|
||||
file_content_as_options(Keys, SSL);
|
||||
Path ->
|
||||
case file:read_file(Path) of
|
||||
{ok, Bin} ->
|
||||
file_content_as_options(Keys, SSL#{Key => Bin});
|
||||
{error, Reason} ->
|
||||
{error, #{file_path => Path,
|
||||
reason => Reason
|
||||
}}
|
||||
{error, #{
|
||||
file_path => Path,
|
||||
reason => Reason
|
||||
}}
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
|
|||
|
|
@ -30,19 +30,25 @@ lookup(psk, PSKIdentity, _UserState) ->
|
|||
{ok, SharedSecret} when is_binary(SharedSecret) ->
|
||||
{ok, SharedSecret};
|
||||
normal ->
|
||||
?SLOG(info, #{msg => "psk_identity_not_found",
|
||||
psk_identity => PSKIdentity}),
|
||||
?SLOG(info, #{
|
||||
msg => "psk_identity_not_found",
|
||||
psk_identity => PSKIdentity
|
||||
}),
|
||||
error;
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, #{msg => "psk_identity_not_found",
|
||||
psk_identity => PSKIdentity,
|
||||
reason => Reason}),
|
||||
?SLOG(warning, #{
|
||||
msg => "psk_identity_not_found",
|
||||
psk_identity => PSKIdentity,
|
||||
reason => Reason
|
||||
}),
|
||||
error
|
||||
catch
|
||||
Class:Reason:Stacktrace ->
|
||||
?SLOG(error, #{msg => "lookup_psk_failed",
|
||||
class => Class,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace}),
|
||||
error
|
||||
?SLOG(error, #{
|
||||
msg => "lookup_psk_failed",
|
||||
class => Class,
|
||||
reason => Reason,
|
||||
stacktrace => Stacktrace
|
||||
}),
|
||||
error
|
||||
end.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue