Merge pull request #11641 from zhongwencool/slog-msg-unify

This commit is contained in:
zhongwencool 2023-09-21 07:32:39 +08:00 committed by GitHub
commit 1b29e4b189
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 164 additions and 153 deletions

View File

@ -91,7 +91,7 @@ check_subscribe(#{clientid := ClientId}, Topic) ->
Res; Res;
{aborted, Reason} -> {aborted, Reason} ->
?SLOG(warning, #{ ?SLOG(warning, #{
msg => "Cannot check subscribe ~p due to ~p.", topic => Topic, reason => Reason msg => "check_subscribe_aborted", topic => Topic, reason => Reason
}), }),
deny deny
end. end.

View File

@ -622,7 +622,7 @@ find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -
{ok, Bucket} -> {ok, Bucket} ->
{ok, Bucket, Cfg}; {ok, Bucket, Cfg};
_ -> _ ->
?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}), ?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket} {error, invalid_bucket}
end; end;
%% this is a node-level reference %% this is a node-level reference

View File

@ -640,7 +640,7 @@ esockd_access_rules(StrRules) ->
| Acc | Acc
]; ];
_ -> _ ->
?SLOG(warning, #{msg => "invalid esockd access rule", rule => S}), ?SLOG(warning, #{msg => "invalid_esockd_access_rule", rule => S}),
Acc Acc
end end
end, end,

View File

@ -91,7 +91,7 @@ closed(_Conn, #{is_peer_acked := _} = Prop, S) ->
?SLOG(debug, Prop), ?SLOG(debug, Prop),
{stop, normal, S}. {stop, normal, S}.
%% @doc handle the new incoming connecion as the connecion acceptor. %% @doc handle the new incoming connection as the connection acceptor.
-spec new_conn(quicer:connection_handle(), quicer:new_conn_props(), cb_state()) -> -spec new_conn(quicer:connection_handle(), quicer:new_conn_props(), cb_state()) ->
{ok, cb_state()} | {error, any(), cb_state()}. {ok, cb_state()} | {error, any(), cb_state()}.
new_conn( new_conn(
@ -183,13 +183,13 @@ new_stream(
ok -> ok ->
ok; ok;
E -> E ->
%% Only log, keep connecion alive. %% Only log, keep connection alive.
?SLOG(error, #{message => "new stream handoff failed", stream => Stream, error => E}) ?SLOG(error, #{msg => "new_stream_handoff_failed", stream => Stream, error => E})
end, end,
%% @TODO maybe keep them in `inactive_streams' %% @TODO maybe keep them in `inactive_streams'
{ok, S#{streams := [{NewStreamOwner, Stream} | Streams]}}. {ok, S#{streams := [{NewStreamOwner, Stream} | Streams]}}.
%% @doc callback for handling remote connecion shutdown. %% @doc callback for handling remote connection shutdown.
-spec shutdown(quicer:connection_handle(), quicer:error_code(), cb_state()) -> cb_ret(). -spec shutdown(quicer:connection_handle(), quicer:error_code(), cb_state()) -> cb_ret().
shutdown(Conn, ErrorCode, S) -> shutdown(Conn, ErrorCode, S) ->
ErrorCode =/= 0 andalso ?SLOG(debug, #{error_code => ErrorCode, state => S}), ErrorCode =/= 0 andalso ?SLOG(debug, #{error_code => ErrorCode, state => S}),
@ -234,7 +234,7 @@ streams_available(_C, {BidirCnt, UnidirCnt}, S) ->
-spec peer_needs_streams(quicer:connection_handle(), undefined, cb_state()) -> cb_ret(). -spec peer_needs_streams(quicer:connection_handle(), undefined, cb_state()) -> cb_ret().
peer_needs_streams(_C, undefined, S) -> peer_needs_streams(_C, undefined, S) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "ignore: peer need more streames", info => maps:with([conn_pid, ctrl_pid], S) msg => "ignore_peer_needs_more_streams", info => maps:with([conn_pid, ctrl_pid], S)
}), }),
{ok, S}. {ok, S}.
@ -281,7 +281,7 @@ handle_info({'EXIT', Pid, Reason}, #{streams := Streams} = S) ->
-> ->
{ok, S}; {ok, S};
true -> true ->
?SLOG(info, #{message => "Data stream unexpected exit", reason => Reason}), ?SLOG(info, #{msg => "data_stream_unexpected_exit", reason => Reason}),
{ok, S}; {ok, S};
false -> false ->
{stop, unknown_pid_down, S} {stop, unknown_pid_down, S}

View File

@ -368,7 +368,7 @@ do_handle_call(
ok -> ok ->
{reply, ok, NewS}; {reply, ok, NewS};
{error, E} -> {error, E} ->
?SLOG(error, #{msg => "set stream active failed", error => E}), ?SLOG(error, #{msg => "set_stream_active_failed", error => E}),
{stop, E, NewS} {stop, E, NewS}
end; end;
do_handle_call(_Call, _S) -> do_handle_call(_Call, _S) ->

View File

@ -191,7 +191,7 @@ peer_send_shutdown(Stream, undefined, S) ->
send_complete(_Stream, false, S) -> send_complete(_Stream, false, S) ->
{ok, S}; {ok, S};
send_complete(_Stream, true = _IsCancelled, S) -> send_complete(_Stream, true = _IsCancelled, S) ->
?SLOG(error, #{message => "send cancelled"}), ?SLOG(error, #{msg => "send_cancelled"}),
{ok, S}. {ok, S}.
-spec send_shutdown_complete(stream_handle(), boolean(), cb_data()) -> cb_ret(). -spec send_shutdown_complete(stream_handle(), boolean(), cb_data()) -> cb_ret().
@ -202,7 +202,7 @@ send_shutdown_complete(_Stream, _IsGraceful, S) ->
passive(Stream, undefined, S) -> passive(Stream, undefined, S) ->
case quicer:setopt(Stream, active, 10) of case quicer:setopt(Stream, active, 10) of
ok -> ok; ok -> ok;
Error -> ?SLOG(error, #{message => "set active error", error => Error}) Error -> ?SLOG(error, #{msg => "set_active_error", error => Error})
end, end,
{ok, S}. {ok, S}.

View File

@ -120,7 +120,7 @@ handle_info({monitor, Pid, long_gc, Info}, State) ->
fun() -> fun() ->
WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]), WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => long_gc, msg => "long_gc",
info => Info, info => Info,
porcinfo => procinfo(Pid) porcinfo => procinfo(Pid)
}), }),
@ -134,7 +134,7 @@ handle_info({monitor, Pid, long_schedule, Info}, State) when is_pid(Pid) ->
fun() -> fun() ->
WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]), WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => long_schedule, msg => "long_schedule",
info => Info, info => Info,
procinfo => procinfo(Pid) procinfo => procinfo(Pid)
}), }),
@ -148,7 +148,7 @@ handle_info({monitor, Port, long_schedule, Info}, State) when is_port(Port) ->
fun() -> fun() ->
WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]), WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => long_schedule, msg => "long_schedule",
info => Info, info => Info,
portinfo => portinfo(Port) portinfo => portinfo(Port)
}), }),
@ -162,7 +162,7 @@ handle_info({monitor, Pid, large_heap, Info}, State) ->
fun() -> fun() ->
WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]), WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => large_heap, msg => "large_heap",
info => Info, info => Info,
procinfo => procinfo(Pid) procinfo => procinfo(Pid)
}), }),
@ -176,7 +176,7 @@ handle_info({monitor, SusPid, busy_port, Port}, State) ->
fun() -> fun() ->
WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]), WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => busy_port, msg => "busy_port",
portinfo => portinfo(Port), portinfo => portinfo(Port),
procinfo => procinfo(SusPid) procinfo => procinfo(SusPid)
}), }),
@ -190,7 +190,7 @@ handle_info({monitor, SusPid, busy_dist_port, Port}, State) ->
fun() -> fun() ->
WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]), WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => busy_dist_port, msg => "busy_dist_port",
portinfo => portinfo(Port), portinfo => portinfo(Port),
procinfo => procinfo(SusPid) procinfo => procinfo(SusPid)
}), }),

View File

@ -247,14 +247,14 @@ handle_call(check, _From, State) ->
{_, NewState} = handle_info({mnesia_table_event, check}, State), {_, NewState} = handle_info({mnesia_table_event, check}, State),
{reply, ok, NewState}; {reply, ok, NewState};
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?SLOG(error, #{unexpected_call => Req}), ?SLOG(error, #{msg => "unexpected_call", req => Req}),
{reply, ok, State}. {reply, ok, State}.
handle_cast({delete_tag, Pid, Files}, State = #{monitors := Monitors}) -> handle_cast({delete_tag, Pid, Files}, State = #{monitors := Monitors}) ->
erlang:monitor(process, Pid), erlang:monitor(process, Pid),
{noreply, State#{monitors => Monitors#{Pid => Files}}}; {noreply, State#{monitors => Monitors#{Pid => Files}}};
handle_cast(Msg, State) -> handle_cast(Msg, State) ->
?SLOG(error, #{unexpected_cast => Msg}), ?SLOG(error, #{msg => "unexpected_cast", req => Msg}),
{noreply, State}. {noreply, State}.
handle_info({'DOWN', _Ref, process, Pid, _Reason}, State = #{monitors := Monitors}) -> handle_info({'DOWN', _Ref, process, Pid, _Reason}, State = #{monitors := Monitors}) ->
@ -275,7 +275,7 @@ handle_info({mnesia_table_event, _Events}, State = #{timer := TRef}) ->
emqx_utils:cancel_timer(TRef), emqx_utils:cancel_timer(TRef),
handle_info({timeout, TRef, update_trace}, State); handle_info({timeout, TRef, update_trace}, State);
handle_info(Info, State) -> handle_info(Info, State) ->
?SLOG(error, #{unexpected_info => Info}), ?SLOG(error, #{msg => "unexpected_info", req => Info}),
{noreply, State}. {noreply, State}.
terminate(_Reason, #{timer := TRef}) -> terminate(_Reason, #{timer := TRef}) ->

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authz, [ {application, emqx_authz, [
{description, "An OTP application"}, {description, "emqx authorization application"},
{vsn, "0.1.25"}, {vsn, "0.1.26"},
{registered, []}, {registered, []},
{mod, {emqx_authz_app, []}}, {mod, {emqx_authz_app, []}},
{applications, [ {applications, [

View File

@ -53,7 +53,7 @@ validate(Path0) ->
}), }),
throw(failed_to_read_acl_file); throw(failed_to_read_acl_file);
{error, Reason} -> {error, Reason} ->
?SLOG(alert, #{msg => bad_acl_file_content, path => Path, reason => Reason}), ?SLOG(alert, #{msg => "bad_acl_file_content", path => Path, reason => Reason}),
throw({bad_acl_file_content, Reason}) throw({bad_acl_file_content, Reason})
end, end,
{ok, Rules}. {ok, Rules}.

View File

@ -95,7 +95,7 @@ authorize(
case emqx_authz_utils:parse_http_resp_body(ContentType, Body) of case emqx_authz_utils:parse_http_resp_body(ContentType, Body) of
error -> error ->
?SLOG(error, #{ ?SLOG(error, #{
msg => authz_http_response_incorrect, msg => "authz_http_response_incorrect",
content_type => ContentType, content_type => ContentType,
body => Body body => Body
}), }),
@ -123,7 +123,7 @@ log_nomtach_msg(Status, Headers, Body) ->
?SLOG( ?SLOG(
debug, debug,
#{ #{
msg => unexpected_authz_http_response, msg => "unexpected_authz_http_response",
status => Status, status => Status,
content_type => emqx_authz_utils:content_type(Headers), content_type => emqx_authz_utils:content_type(Headers),
body => Body body => Body

View File

@ -336,7 +336,7 @@ disable_enable(Action, BridgeType, BridgeName) when
create(BridgeType, BridgeName, RawConf) -> create(BridgeType, BridgeName, RawConf) ->
?SLOG(debug, #{ ?SLOG(debug, #{
brige_action => create, bridge_action => create,
bridge_type => BridgeType, bridge_type => BridgeType,
bridge_name => BridgeName, bridge_name => BridgeName,
bridge_raw_config => emqx_utils:redact(RawConf) bridge_raw_config => emqx_utils:redact(RawConf)
@ -349,7 +349,7 @@ create(BridgeType, BridgeName, RawConf) ->
remove(BridgeType, BridgeName) -> remove(BridgeType, BridgeName) ->
?SLOG(debug, #{ ?SLOG(debug, #{
brige_action => remove, bridge_action => remove,
bridge_type => BridgeType, bridge_type => BridgeType,
bridge_name => BridgeName bridge_name => BridgeName
}), }),

View File

@ -174,7 +174,7 @@ create(Type, Name, Conf) ->
create(Type, Name, Conf0, Opts) -> create(Type, Name, Conf0, Opts) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "create bridge", msg => "create_bridge",
type => Type, type => Type,
name => Name, name => Name,
config => emqx_utils:redact(Conf0) config => emqx_utils:redact(Conf0)
@ -209,7 +209,7 @@ update(Type, Name, {OldConf, Conf}, Opts) ->
case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of
false -> false ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "update bridge", msg => "update_bridge",
type => Type, type => Type,
name => Name, name => Name,
config => emqx_utils:redact(Conf) config => emqx_utils:redact(Conf)

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_clickhouse, [ {application, emqx_bridge_clickhouse, [
{description, "EMQX Enterprise ClickHouse Bridge"}, {description, "EMQX Enterprise ClickHouse Bridge"},
{vsn, "0.2.2"}, {vsn, "0.2.3"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View File

@ -349,7 +349,7 @@ on_query(
#{pool_name := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "clickhouse connector received sql query", msg => "clickhouse_connector_received_sql_query",
connector => ResourceID, connector => ResourceID,
type => RequestType, type => RequestType,
sql => DataOrSQL, sql => DataOrSQL,
@ -463,7 +463,7 @@ transform_and_log_clickhouse_result({ok, ResponseCode, Data}, _, _) when
Result; Result;
transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) -> transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "clickhouse connector do sql query failed", msg => "clickhouse_connector_do_sql_query_failed",
connector => ResourceID, connector => ResourceID,
sql => SQL, sql => SQL,
reason => ClickhouseErrorResult reason => ClickhouseErrorResult
@ -474,7 +474,7 @@ transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) ->
%% handles all error cases that we need to handle as recoverable_error %% handles all error cases that we need to handle as recoverable_error
true -> true ->
?SLOG(warning, #{ ?SLOG(warning, #{
msg => "clickhouse connector: sql query failed (recoverable)", msg => "clickhouse_connector_sql_query_failed_recoverable",
recoverable_error => true, recoverable_error => true,
connector => ResourceID, connector => ResourceID,
sql => SQL, sql => SQL,
@ -483,7 +483,7 @@ transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) ->
to_recoverable_error(ClickhouseErrorResult); to_recoverable_error(ClickhouseErrorResult);
false -> false ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "clickhouse connector: sql query failed (unrecoverable)", msg => "clickhouse_connector_sql_query_failed_unrecoverable",
recoverable_error => false, recoverable_error => false,
connector => ResourceID, connector => ResourceID,
sql => SQL, sql => SQL,

View File

@ -176,7 +176,7 @@ desc(greptimedb) ->
start_client(InstId, Config) -> start_client(InstId, Config) ->
ClientConfig = client_config(InstId, Config), ClientConfig = client_config(InstId, Config),
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting greptimedb connector", msg => "starting_greptimedb_connector",
connector => InstId, connector => InstId,
config => emqx_utils:redact(Config), config => emqx_utils:redact(Config),
client_config => emqx_utils:redact(ClientConfig) client_config => emqx_utils:redact(ClientConfig)
@ -191,7 +191,7 @@ start_client(InstId, Config) ->
E:R:S -> E:R:S ->
?tp(greptimedb_connector_start_exception, #{error => {E, R}}), ?tp(greptimedb_connector_start_exception, #{error => {E, R}}),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => "start greptimedb connector error", msg => "start_greptimedb_connector_error",
connector => InstId, connector => InstId,
error => E, error => E,
reason => emqx_utils:redact(R), reason => emqx_utils:redact(R),
@ -216,7 +216,7 @@ do_start_client(
write_syntax => to_config(Lines, Precision) write_syntax => to_config(Lines, Precision)
}, },
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting greptimedb connector success", msg => "starting_greptimedb_connector_success",
connector => InstId, connector => InstId,
client => redact_auth(Client), client => redact_auth(Client),
state => redact_auth(State) state => redact_auth(State)
@ -239,7 +239,7 @@ do_start_client(
{error, {already_started, Client0}} -> {error, {already_started, Client0}} ->
?tp(greptimedb_connector_start_already_started, #{}), ?tp(greptimedb_connector_start_already_started, #{}),
?SLOG(info, #{ ?SLOG(info, #{
msg => "restarting greptimedb connector, found already started client", msg => "restarting_greptimedb_connector_found_already_started_client",
connector => InstId, connector => InstId,
old_client => redact_auth(Client0) old_client => redact_auth(Client0)
}), }),
@ -321,7 +321,7 @@ do_query(InstId, Client, Points) ->
case greptimedb:write_batch(Client, Points) of case greptimedb:write_batch(Client, Points) of
{ok, #{response := {affected_rows, #{value := Rows}}}} -> {ok, #{response := {affected_rows, #{value := Rows}}}} ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "greptimedb write point success", msg => "greptimedb_write_point_success",
connector => InstId, connector => InstId,
points => Points points => Points
}), }),
@ -337,7 +337,7 @@ do_query(InstId, Client, Points) ->
{error, Reason} = Err -> {error, Reason} = Err ->
?tp(greptimedb_connector_do_query_failure, #{error => Reason}), ?tp(greptimedb_connector_do_query_failure, #{error => Reason}),
?SLOG(error, #{ ?SLOG(error, #{
msg => "greptimedb write point failed", msg => "greptimedb_write_point_failed",
connector => InstId, connector => InstId,
reason => Reason reason => Reason
}), }),
@ -422,7 +422,8 @@ parse_batch_data(InstId, BatchData, SyntaxLines) ->
{ok, lists:flatten(Points)}; {ok, lists:flatten(Points)};
_ -> _ ->
?SLOG(error, #{ ?SLOG(error, #{
msg => io_lib:format("Greptimedb trans point failed, count: ~p", [Errors]), msg => "greptimedb_trans_point_failed",
error_count => Errors,
connector => InstId, connector => InstId,
reason => points_trans_failed reason => points_trans_failed
}), }),
@ -583,7 +584,7 @@ log_error_points(InstId, Errs) ->
lists:foreach( lists:foreach(
fun({error, Reason}) -> fun({error, Reason}) ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "greptimedb trans point failed", msg => "greptimedb_trans_point_failed",
connector => InstId, connector => InstId,
reason => Reason reason => Reason
}) })

View File

@ -346,7 +346,8 @@ query_by_clientid(Topic, ClientId, Config) ->
BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName), BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName),
?SLOG(error, #{ ?SLOG(error, #{
msg => io_lib:format("Failed to query: ~p, ~p", [Code, Error]), msg => "failed_to_query",
code => Code,
connector => BridgeId, connector => BridgeId,
reason => Error reason => Error
}), }),

View File

@ -52,7 +52,7 @@ on_stop(InstId, _State) ->
StopClientRes = hstreamdb:stop_client(Client), StopClientRes = hstreamdb:stop_client(Client),
StopProducerRes = hstreamdb:stop_producer(Producer), StopProducerRes = hstreamdb:stop_producer(Producer),
?SLOG(info, #{ ?SLOG(info, #{
msg => "stop hstreamdb connector", msg => "stop_hstreamdb_connector",
connector => InstId, connector => InstId,
client => Client, client => Client,
producer => Producer, producer => Producer,
@ -146,7 +146,7 @@ start_client(InstId, Config) ->
catch catch
E:R:S -> E:R:S ->
Error = #{ Error = #{
msg => "start hstreamdb connector error", msg => "start_hstreamdb_connector_error",
connector => InstId, connector => InstId,
error => E, error => E,
reason => R, reason => R,
@ -158,7 +158,7 @@ start_client(InstId, Config) ->
do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl := SSL}) -> do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl := SSL}) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting hstreamdb connector: client", msg => "starting_hstreamdb_connector_client",
connector => InstId, connector => InstId,
config => Config config => Config
}), }),
@ -185,7 +185,7 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl :=
case is_alive(Client) of case is_alive(Client) of
true -> true ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "hstreamdb connector: client started", msg => "hstreamdb_connector_client_started",
connector => InstId, connector => InstId,
client => Client client => Client
}), }),
@ -196,14 +196,14 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl :=
#{error => client_not_alive} #{error => client_not_alive}
), ),
?SLOG(error, #{ ?SLOG(error, #{
msg => "hstreamdb connector: client not alive", msg => "hstreamdb_connector_client_not_alive",
connector => InstId connector => InstId
}), }),
{error, connect_failed} {error, connect_failed}
end; end;
{error, {already_started, Pid}} -> {error, {already_started, Pid}} ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting hstreamdb connector: client, find old client. restart client", msg => "starting_hstreamdb_connector_client_find_old_client_restart_client",
old_client_pid => Pid, old_client_pid => Pid,
old_client_name => ClientName old_client_name => ClientName
}), }),
@ -211,7 +211,7 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl :=
start_client(InstId, Config); start_client(InstId, Config);
{error, Error} -> {error, Error} ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "hstreamdb connector: client failed", msg => "hstreamdb_connector_client_failed",
connector => InstId, connector => InstId,
reason => Error reason => Error
}), }),
@ -239,13 +239,13 @@ start_producer(
], ],
Name = produce_name(InstId), Name = produce_name(InstId),
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting hstreamdb connector: producer", msg => "starting_hstreamdb_connector_producer",
connector => InstId connector => InstId
}), }),
case hstreamdb:start_producer(Client, Name, ProducerOptions) of case hstreamdb:start_producer(Client, Name, ProducerOptions) of
{ok, Producer} -> {ok, Producer} ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "hstreamdb connector: producer started" msg => "hstreamdb_connector_producer_started"
}), }),
State = #{ State = #{
client => Client, client => Client,
@ -263,7 +263,7 @@ start_producer(
{error, {already_started, Pid}} -> {error, {already_started, Pid}} ->
?SLOG(info, #{ ?SLOG(info, #{
msg => msg =>
"starting hstreamdb connector: producer, find old producer. restart producer", "starting_hstreamdb_connector_producer_find_old_producer_restart_producer",
old_producer_pid => Pid, old_producer_pid => Pid,
old_producer_name => Name old_producer_name => Name
}), }),
@ -271,7 +271,7 @@ start_producer(
start_producer(InstId, Client, Options); start_producer(InstId, Client, Options);
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "starting hstreamdb connector: producer, failed", msg => "starting_hstreamdb_connector_producer_failed",
reason => Reason reason => Reason
}), }),
{error, Reason} {error, Reason}
@ -318,7 +318,7 @@ handle_result({ok, Result}, Record, IsBatch) ->
#{result => Result, is_batch => IsBatch} #{result => Result, is_batch => IsBatch}
), ),
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "HStreamDB producer sync append success", msg => "hstreamdb_producer_sync_append_success",
record => Record, record => Record,
is_batch => IsBatch is_batch => IsBatch
}); });
@ -328,7 +328,7 @@ handle_result({error, Reason} = Err, Record, IsBatch) ->
#{error => Reason, is_batch => IsBatch} #{error => Reason, is_batch => IsBatch}
), ),
?SLOG(error, #{ ?SLOG(error, #{
msg => "HStreamDB producer sync append failed", msg => "hstreamdb_producer_sync_append_failed",
reason => Reason, reason => Reason,
record => Record, record => Record,
is_batch => IsBatch is_batch => IsBatch

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_http, [ {application, emqx_bridge_http, [
{description, "EMQX HTTP Bridge and Connector Application"}, {description, "EMQX HTTP Bridge and Connector Application"},
{vsn, "0.1.2"}, {vsn, "0.1.3"},
{registered, []}, {registered, []},
{applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]}, {applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]},
{env, []}, {env, []},

View File

@ -328,7 +328,7 @@ on_query(
{error, {recoverable_error, Reason}}; {error, {recoverable_error, Reason}};
{error, #{status_code := StatusCode}} -> {error, #{status_code := StatusCode}} ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "http connector do request, received error response.", msg => "http_connector_do_request_received_error_response",
note => "the body will be redacted due to security reasons", note => "the body will be redacted due to security reasons",
request => redact_request(NRequest), request => redact_request(NRequest),
connector => InstId, connector => InstId,

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_influxdb, [ {application, emqx_bridge_influxdb, [
{description, "EMQX Enterprise InfluxDB Bridge"}, {description, "EMQX Enterprise InfluxDB Bridge"},
{vsn, "0.1.4"}, {vsn, "0.1.5"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View File

@ -230,7 +230,7 @@ desc(influxdb_api_v2) ->
start_client(InstId, Config) -> start_client(InstId, Config) ->
ClientConfig = client_config(InstId, Config), ClientConfig = client_config(InstId, Config),
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting influxdb connector", msg => "starting_influxdb_connector",
connector => InstId, connector => InstId,
config => emqx_utils:redact(Config), config => emqx_utils:redact(Config),
client_config => emqx_utils:redact(ClientConfig) client_config => emqx_utils:redact(ClientConfig)
@ -245,7 +245,7 @@ start_client(InstId, Config) ->
E:R:S -> E:R:S ->
?tp(influxdb_connector_start_exception, #{error => {E, R}}), ?tp(influxdb_connector_start_exception, #{error => {E, R}}),
?SLOG(warning, #{ ?SLOG(warning, #{
msg => "start influxdb connector error", msg => "start_influxdb_connector_error",
connector => InstId, connector => InstId,
error => E, error => E,
reason => R, reason => R,
@ -271,7 +271,7 @@ do_start_client(
write_syntax => to_config(Lines, Precision) write_syntax => to_config(Lines, Precision)
}, },
?SLOG(info, #{ ?SLOG(info, #{
msg => "starting influxdb connector success", msg => "starting_influxdb_connector_success",
connector => InstId, connector => InstId,
client => redact_auth(Client), client => redact_auth(Client),
state => redact_auth(State) state => redact_auth(State)
@ -307,7 +307,7 @@ do_start_client(
{error, {already_started, Client0}} -> {error, {already_started, Client0}} ->
?tp(influxdb_connector_start_already_started, #{}), ?tp(influxdb_connector_start_already_started, #{}),
?SLOG(info, #{ ?SLOG(info, #{
msg => "restarting influxdb connector, found already started client", msg => "restarting_influxdb_connector_found_already_started_client",
connector => InstId, connector => InstId,
old_client => redact_auth(Client0) old_client => redact_auth(Client0)
}), }),
@ -401,7 +401,7 @@ do_query(InstId, Client, Points) ->
case influxdb:write(Client, Points) of case influxdb:write(Client, Points) of
ok -> ok ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "influxdb write point success", msg => "influxdb_write_point_success",
connector => InstId, connector => InstId,
points => Points points => Points
}); });
@ -416,7 +416,7 @@ do_query(InstId, Client, Points) ->
{error, Reason} = Err -> {error, Reason} = Err ->
?tp(influxdb_connector_do_query_failure, #{error => Reason}), ?tp(influxdb_connector_do_query_failure, #{error => Reason}),
?SLOG(error, #{ ?SLOG(error, #{
msg => "influxdb write point failed", msg => "influxdb_write_point_failed",
connector => InstId, connector => InstId,
reason => Reason reason => Reason
}), }),
@ -430,7 +430,7 @@ do_query(InstId, Client, Points) ->
do_async_query(InstId, Client, Points, ReplyFunAndArgs) -> do_async_query(InstId, Client, Points, ReplyFunAndArgs) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "influxdb write point async", msg => "influxdb_write_point_async",
connector => InstId, connector => InstId,
points => Points points => Points
}), }),
@ -522,7 +522,8 @@ parse_batch_data(InstId, BatchData, SyntaxLines) ->
{ok, lists:flatten(Points)}; {ok, lists:flatten(Points)};
_ -> _ ->
?SLOG(error, #{ ?SLOG(error, #{
msg => io_lib:format("InfluxDB trans point failed, count: ~p", [Errors]), msg => "influxdb_trans_point_failed",
error_count => Errors,
connector => InstId, connector => InstId,
reason => points_trans_failed reason => points_trans_failed
}), }),
@ -683,7 +684,7 @@ log_error_points(InstId, Errs) ->
lists:foreach( lists:foreach(
fun({error, Reason}) -> fun({error, Reason}) ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "influxdb trans point failed", msg => "influxdb_trans_point_failed",
connector => InstId, connector => InstId,
reason => Reason reason => Reason
}) })

View File

@ -78,7 +78,7 @@ tcp_keepalive(KeepAlive) ->
Opts; Opts;
{error, {unsupported_os, OS}} -> {error, {unsupported_os, OS}} ->
?SLOG(warning, #{ ?SLOG(warning, #{
msg => "Unsupported operation: set TCP keepalive", msg => "unsupported_operation_set_tcp_keepalive",
os => OS os => OS
}), }),
[] []

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_opents, [ {application, emqx_bridge_opents, [
{description, "EMQX Enterprise OpenTSDB Bridge"}, {description, "EMQX Enterprise OpenTSDB Bridge"},
{vsn, "0.1.2"}, {vsn, "0.1.3"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View File

@ -112,7 +112,7 @@ on_get_status(_InstanceId, #{server := Server}) ->
ok -> ok ->
connected; connected;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), ?SLOG(error, #{msg => "opents_lost_connection", reason => Reason}),
connecting connecting
end, end,
Result. Result.

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_rabbitmq, [ {application, emqx_bridge_rabbitmq, [
{description, "EMQX Enterprise RabbitMQ Bridge"}, {description, "EMQX Enterprise RabbitMQ Bridge"},
{vsn, "0.1.4"}, {vsn, "0.1.5"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View File

@ -258,7 +258,7 @@ on_stop(
_State _State
) -> ) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "stopping RabbitMQ connector", msg => "stopping_rabbitmq_connector",
connector => ResourceID connector => ResourceID
}), }),
stop_clients_and_pool(ResourceID). stop_clients_and_pool(ResourceID).
@ -429,7 +429,7 @@ on_query(
} = State } = State
) -> ) ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "RabbitMQ connector received query", msg => "rabbitmq_connector_received_query",
connector => ResourceID, connector => ResourceID,
type => RequestType, type => RequestType,
data => Data, data => Data,
@ -453,7 +453,7 @@ on_batch_query(
State State
) -> ) ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "RabbitMQ connector received batch query", msg => "rabbitmq_connector_received_batch_query",
connector => ResourceID, connector => ResourceID,
data => BatchReq, data => BatchReq,
state => emqx_utils:redact(State) state => emqx_utils:redact(State)

View File

@ -456,14 +456,14 @@ parse_sql_template([{Key, H} | T], BatchInsertTks) ->
} }
); );
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}), ?SLOG(error, #{msg => "split_sql_failed", sql => H, reason => Reason}),
parse_sql_template(T, BatchInsertTks) parse_sql_template(T, BatchInsertTks)
end; end;
Type when is_atom(Type) -> Type when is_atom(Type) ->
?SLOG(error, #{msg => "detect sql type unsupported", sql => H, type => Type}), ?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}),
parse_sql_template(T, BatchInsertTks); parse_sql_template(T, BatchInsertTks);
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}), ?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}),
parse_sql_template(T, BatchInsertTks) parse_sql_template(T, BatchInsertTks)
end; end;
parse_sql_template([], BatchInsertTks) -> parse_sql_template([], BatchInsertTks) ->
@ -491,8 +491,8 @@ apply_template(
{Key, SQL} {Key, SQL}
end; end;
apply_template(Query, Templates) -> apply_template(Query, Templates) ->
%% TODO: more detail infomatoin %% TODO: more detail information
?SLOG(error, #{msg => "apply sql template failed", query => Query, templates => Templates}), ?SLOG(error, #{msg => "apply_sql_template_failed", query => Query, templates => Templates}),
{error, failed_to_apply_sql_template}. {error, failed_to_apply_sql_template}.
proc_batch_sql(BatchReqs, BatchInserts, Tokens) -> proc_batch_sql(BatchReqs, BatchInserts, Tokens) ->

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_tdengine, [ {application, emqx_bridge_tdengine, [
{description, "EMQX Enterprise TDEngine Bridge"}, {description, "EMQX Enterprise TDEngine Bridge"},
{vsn, "0.1.4"}, {vsn, "0.1.5"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View File

@ -148,7 +148,7 @@ on_batch_query(
end; end;
on_batch_query(InstanceId, BatchReq, State) -> on_batch_query(InstanceId, BatchReq, State) ->
LogMeta = #{connector => InstanceId, request => BatchReq, state => State}, LogMeta = #{connector => InstanceId, request => BatchReq, state => State},
?SLOG(error, LogMeta#{msg => "invalid request"}), ?SLOG(error, LogMeta#{msg => "invalid_request"}),
{error, {unrecoverable_error, invalid_request}}. {error, {unrecoverable_error, invalid_request}}.
on_get_status(_InstanceId, #{pool_name := PoolName}) -> on_get_status(_InstanceId, #{pool_name := PoolName}) ->
@ -253,14 +253,14 @@ parse_batch_prepare_sql([{Key, H} | T], InsertTksMap, BatchTksMap) ->
BatchTksMap#{Key => BatchTks} BatchTksMap#{Key => BatchTks}
); );
Result -> Result ->
?SLOG(error, #{msg => "split sql failed", sql => H, result => Result}), ?SLOG(error, #{msg => "split_sql_failed", sql => H, result => Result}),
parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap) parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap)
end; end;
Type when is_atom(Type) -> Type when is_atom(Type) ->
?SLOG(error, #{msg => "detect sql type unsupported", sql => H, type => Type}), ?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}),
parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap); parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap);
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}), ?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}),
parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap) parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap)
end; end;
parse_batch_prepare_sql([], InsertTksMap, BatchTksMap) -> parse_batch_prepare_sql([], InsertTksMap, BatchTksMap) ->

View File

@ -152,7 +152,7 @@ on_start(
on_stop(InstId, _State) -> on_stop(InstId, _State) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "stopping postgresql connector", msg => "stopping_postgresql_connector",
connector => InstId connector => InstId
}), }),
emqx_resource_pool:stop(InstId). emqx_resource_pool:stop(InstId).
@ -165,7 +165,7 @@ on_query(
#{pool_name := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "postgresql connector received sql query", msg => "postgresql_connector_received_sql_query",
connector => InstId, connector => InstId,
type => TypeOrKey, type => TypeOrKey,
sql => NameOrSQL, sql => NameOrSQL,
@ -200,7 +200,7 @@ on_batch_query(
connector => InstId, connector => InstId,
first_request => Request, first_request => Request,
state => State, state => State,
msg => "batch prepare not implemented" msg => "batch_prepare_not_implemented"
}, },
?SLOG(error, Log), ?SLOG(error, Log),
{error, {unrecoverable_error, batch_prepare_not_implemented}}; {error, {unrecoverable_error, batch_prepare_not_implemented}};
@ -220,7 +220,7 @@ on_batch_query(
connector => InstId, connector => InstId,
request => BatchReq, request => BatchReq,
state => State, state => State,
msg => "invalid request" msg => "invalid_request"
}, },
?SLOG(error, Log), ?SLOG(error, Log),
{error, {unrecoverable_error, invalid_request}} {error, {unrecoverable_error, invalid_request}}
@ -247,7 +247,7 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
#{error => Reason} #{error => Reason}
), ),
?SLOG(error, #{ ?SLOG(error, #{
msg => "postgresql connector do sql query failed", msg => "postgresql_connector_do_sql_query_failed",
connector => InstId, connector => InstId,
type => Type, type => Type,
sql => NameOrSQL, sql => NameOrSQL,
@ -272,7 +272,7 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
catch catch
error:function_clause:Stacktrace -> error:function_clause:Stacktrace ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "postgresql connector do sql query failed", msg => "postgresql_connector_do_sql_query_failed",
connector => InstId, connector => InstId,
type => Type, type => Type,
sql => NameOrSQL, sql => NameOrSQL,
@ -453,7 +453,7 @@ init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) ->
State#{prepare_statement := Sts}; State#{prepare_statement := Sts};
Error -> Error ->
LogMeta = #{ LogMeta = #{
msg => <<"PostgreSQL init prepare statement failed">>, error => Error msg => <<"postgresql_init_prepare_statement_failed">>, error => Error
}, },
?SLOG(error, LogMeta), ?SLOG(error, LogMeta),
%% mark the prepare_sql as failed %% mark the prepare_sql as failed
@ -492,7 +492,7 @@ prepare_sql_to_conn(Conn, Prepares) ->
prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements};
prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) ->
LogMeta = #{msg => "PostgreSQL Prepare Statement", name => Key, prepare_sql => SQL}, LogMeta = #{msg => "postgresql_prepare_statement", name => Key, prepare_sql => SQL},
?SLOG(info, LogMeta), ?SLOG(info, LogMeta),
case epgsql:parse2(Conn, Key, SQL, []) of case epgsql:parse2(Conn, Key, SQL, []) of
{ok, Statement} -> {ok, Statement} ->
@ -500,10 +500,10 @@ prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Co
{error, {error, error, _, undefined_table, _, _} = Error} -> {error, {error, error, _, undefined_table, _, _} = Error} ->
%% Target table is not created %% Target table is not created
?tp(pgsql_undefined_table, #{}), ?tp(pgsql_undefined_table, #{}),
?SLOG(error, LogMeta#{msg => "PostgreSQL parse failed", error => Error}), ?SLOG(error, LogMeta#{msg => "postgresql_parse_failed", error => Error}),
{error, undefined_table}; {error, undefined_table};
{error, Error} = Other -> {error, Error} = Other ->
?SLOG(error, LogMeta#{msg => "PostgreSQL parse failed", error => Error}), ?SLOG(error, LogMeta#{msg => "postgresql_parse_failed", error => Error}),
Other Other
end. end.

View File

@ -220,7 +220,7 @@ login(post, #{body := Params}) ->
Password = maps:get(<<"password">>, Params), Password = maps:get(<<"password">>, Params),
case emqx_dashboard_admin:sign_token(Username, Password) of case emqx_dashboard_admin:sign_token(Username, Password) of
{ok, Token} -> {ok, Token} ->
?SLOG(info, #{msg => "Dashboard login successfully", username => Username}), ?SLOG(info, #{msg => "dashboard_login_successful", username => Username}),
Version = iolist_to_binary(proplists:get_value(version, emqx_sys:info())), Version = iolist_to_binary(proplists:get_value(version, emqx_sys:info())),
{200, #{ {200, #{
token => Token, token => Token,
@ -228,7 +228,7 @@ login(post, #{body := Params}) ->
license => #{edition => emqx_release:edition()} license => #{edition => emqx_release:edition()}
}}; }};
{error, R} -> {error, R} ->
?SLOG(info, #{msg => "Dashboard login failed", username => Username, reason => R}), ?SLOG(info, #{msg => "dashboard_login_failed", username => Username, reason => R}),
{401, ?BAD_USERNAME_OR_PWD, <<"Auth failed">>} {401, ?BAD_USERNAME_OR_PWD, <<"Auth failed">>}
end. end.
@ -239,10 +239,10 @@ logout(_, #{
Username = username(Req, Username0), Username = username(Req, Username0),
case emqx_dashboard_admin:destroy_token_by_username(Username, Token) of case emqx_dashboard_admin:destroy_token_by_username(Username, Token) of
ok -> ok ->
?SLOG(info, #{msg => "Dashboard logout successfully", username => Username0}), ?SLOG(info, #{msg => "dashboard_logout_successful", username => Username0}),
204; 204;
_R -> _R ->
?SLOG(info, #{msg => "Dashboard logout failed.", username => Username0}), ?SLOG(info, #{msg => "dashboard_logout_failed.", username => Username0}),
{401, ?WRONG_TOKEN_OR_USERNAME, <<"Ensure your token & username">>} {401, ?WRONG_TOKEN_OR_USERNAME, <<"Ensure your token & username">>}
end. end.
@ -259,11 +259,11 @@ users(post, #{body := Params}) ->
false -> false ->
case emqx_dashboard_admin:add_user(Username, Password, Role, Desc) of case emqx_dashboard_admin:add_user(Username, Password, Role, Desc) of
{ok, Result} -> {ok, Result} ->
?SLOG(info, #{msg => "Create dashboard success", username => Username}), ?SLOG(info, #{msg => "create_dashboard_user_success", username => Username}),
{200, filter_result(Result)}; {200, filter_result(Result)};
{error, Reason} -> {error, Reason} ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "Create dashboard failed", msg => "create_dashboard_user_failed",
username => Username, username => Username,
reason => Reason reason => Reason
}), }),
@ -286,7 +286,7 @@ user(put, #{bindings := #{username := Username0}, body := Params} = Req) ->
user(delete, #{bindings := #{username := Username0}, headers := Headers} = Req) -> user(delete, #{bindings := #{username := Username0}, headers := Headers} = Req) ->
case Username0 == emqx_dashboard_admin:default_username() of case Username0 == emqx_dashboard_admin:default_username() of
true -> true ->
?SLOG(info, #{msg => "Dashboard delete admin user failed", username => Username0}), ?SLOG(info, #{msg => "dashboard_delete_admin_user_failed", username => Username0}),
Message = list_to_binary(io_lib:format("Cannot delete user ~p", [Username0])), Message = list_to_binary(io_lib:format("Cannot delete user ~p", [Username0])),
{400, ?NOT_ALLOWED, Message}; {400, ?NOT_ALLOWED, Message};
false -> false ->
@ -300,7 +300,7 @@ user(delete, #{bindings := #{username := Username0}, headers := Headers} = Req)
{404, ?USER_NOT_FOUND, Reason}; {404, ?USER_NOT_FOUND, Reason};
{ok, _} -> {ok, _} ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "Dashboard delete admin user", username => Username0 msg => "dashboard_delete_admin_user", username => Username0
}), }),
{204} {204}
end end
@ -340,12 +340,12 @@ is_self_auth_token(Username, Token) ->
end. end.
change_pwd(post, #{bindings := #{username := Username}, body := Params}) -> change_pwd(post, #{bindings := #{username := Username}, body := Params}) ->
LogMeta = #{msg => "Dashboard change password", username => binary_to_list(Username)}, LogMeta = #{msg => "dashboard_change_password", username => binary_to_list(Username)},
OldPwd = maps:get(<<"old_pwd">>, Params), OldPwd = maps:get(<<"old_pwd">>, Params),
NewPwd = maps:get(<<"new_pwd">>, Params), NewPwd = maps:get(<<"new_pwd">>, Params),
case ?EMPTY(OldPwd) orelse ?EMPTY(NewPwd) of case ?EMPTY(OldPwd) orelse ?EMPTY(NewPwd) of
true -> true ->
?SLOG(error, LogMeta#{result => failed, reason => "password undefined or empty"}), ?SLOG(error, LogMeta#{result => failed, reason => "password_undefined_or_empty"}),
{400, ?BAD_REQUEST, <<"Old password or new password undefined">>}; {400, ?BAD_REQUEST, <<"Old password or new password undefined">>};
false -> false ->
case emqx_dashboard_admin:change_password(Username, OldPwd, NewPwd) of case emqx_dashboard_admin:change_password(Username, OldPwd, NewPwd) of

View File

@ -40,7 +40,7 @@ init() ->
%% Load is incremental, so it can be called multiple times. %% Load is incremental, so it can be called multiple times.
%% NOTE: no garbage collection is done, because stale entries are harmless. %% NOTE: no garbage collection is done, because stale entries are harmless.
load_desc(EtsTab, File) -> load_desc(EtsTab, File) ->
?SLOG(info, #{msg => "loading desc", file => File}), ?SLOG(info, #{msg => "loading_desc", file => File}),
{ok, Descs} = hocon:load(File), {ok, Descs} = hocon:load(File),
["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."), ["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."),
Insert = fun(Namespace, Id, Tag, Text) -> Insert = fun(Namespace, Id, Tag, Text) ->

View File

@ -141,7 +141,7 @@ current_rate(Node) when Node == node() ->
{ok, Rate} {ok, Rate}
catch catch
_E:R -> _E:R ->
?SLOG(warning, #{msg => "Dashboard monitor error", reason => R}), ?SLOG(warning, #{msg => "dashboard_monitor_error", reason => R}),
%% Rate map 0, ensure api will not crash. %% Rate map 0, ensure api will not crash.
%% When joining cluster, dashboard monitor restart. %% When joining cluster, dashboard monitor restart.
Rate0 = [ Rate0 = [

View File

@ -121,7 +121,7 @@ login(post, #{bindings := #{backend := Backend}, body := Sign}) ->
Provider = emqx_dashboard_sso:provider(Backend), Provider = emqx_dashboard_sso:provider(Backend),
case emqx_dashboard_sso:login(Provider, Sign, State) of case emqx_dashboard_sso:login(Provider, Sign, State) of
{ok, Token} -> {ok, Token} ->
?SLOG(info, #{msg => "Dashboard SSO login successfully", request => Sign}), ?SLOG(info, #{msg => "dashboard_sso_login_successful", request => Sign}),
Version = iolist_to_binary(proplists:get_value(version, emqx_sys:info())), Version = iolist_to_binary(proplists:get_value(version, emqx_sys:info())),
{200, #{ {200, #{
token => Token, token => Token,
@ -130,7 +130,7 @@ login(post, #{bindings := #{backend := Backend}, body := Sign}) ->
}}; }};
{error, Reason} -> {error, Reason} ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "Dashboard SSO login failed", msg => "dashboard_sso_login_failed",
request => Sign, request => Sign,
reason => Reason reason => Reason
}), }),

View File

@ -134,7 +134,8 @@ complete(
ok -> ok ->
ok; ok;
{error, Reason} -> {error, Reason} ->
?SLOG(warning, "filemeta_write_failed", #{ ?SLOG(warning, #{
msg => "filemeta_write_failed",
path => ManifestFilepath, path => ManifestFilepath,
meta => Filemeta, meta => Filemeta,
reason => Reason reason => Reason
@ -294,7 +295,8 @@ try_read_filemeta(Filepath, Info) ->
{ok, Filemeta} -> {ok, Filemeta} ->
Info#{meta => Filemeta}; Info#{meta => Filemeta};
{error, Reason} -> {error, Reason} ->
?SLOG(warning, "filemeta_inaccessible", #{ ?SLOG(warning, #{
msg => "filemeta_inaccessible",
path => Filepath, path => Filepath,
reason => Reason reason => Reason
}), }),
@ -305,13 +307,15 @@ mk_export_uri(RelFilepath) ->
emqx_ft_storage_exporter_fs_api:mk_export_uri(node(), RelFilepath). emqx_ft_storage_exporter_fs_api:mk_export_uri(node(), RelFilepath).
log_invalid_entry(Options, {_Type, RelFilepath, Fileinfo = #file_info{}, _Stack}) -> log_invalid_entry(Options, {_Type, RelFilepath, Fileinfo = #file_info{}, _Stack}) ->
?SLOG(notice, "filesystem_object_unexpected", #{ ?SLOG(notice, #{
msg => "filesystem_object_unexpected",
relpath => RelFilepath, relpath => RelFilepath,
fileinfo => Fileinfo, fileinfo => Fileinfo,
options => Options options => Options
}); });
log_invalid_entry(Options, {_Type, RelFilepath, {error, Reason}, _Stack}) -> log_invalid_entry(Options, {_Type, RelFilepath, {error, Reason}, _Stack}) ->
?SLOG(warning, "filesystem_object_inaccessible", #{ ?SLOG(warning, #{
msg => "filesystem_object_inaccessible",
relpath => RelFilepath, relpath => RelFilepath,
reason => Reason, reason => Reason,
options => Options options => Options
@ -346,7 +350,8 @@ list(_Options, Query) ->
Result = list(Query), Result = list(Query),
case Result of case Result of
#{errors := NodeErrors} -> #{errors := NodeErrors} ->
?SLOG(warning, "list_exports_errors", #{ ?SLOG(warning, #{
msg => "list_exports_errors",
query => Query, query => Query,
errors => NodeErrors errors => NodeErrors
}); });

View File

@ -893,6 +893,6 @@ convert_certs(Type, SubDir, Conf) ->
{ok, NSSL = #{}} -> {ok, NSSL = #{}} ->
Conf#{Type => NSSL}; Conf#{Type => NSSL};
{error, Reason} -> {error, Reason} ->
?SLOG(error, Reason#{msg => bad_ssl_config}), ?SLOG(error, Reason#{msg => "bad_ssl_config", reason => Reason}),
throw({bad_ssl_config, Reason}) throw({bad_ssl_config, Reason})
end. end.

View File

@ -1,6 +1,6 @@
{application, emqx_gcp_device, [ {application, emqx_gcp_device, [
{description, "Application simplifying migration from GCP IoT Core"}, {description, "Application simplifying migration from GCP IoT Core"},
{vsn, "0.1.0"}, {vsn, "0.1.1"},
{registered, []}, {registered, []},
{mod, {emqx_gcp_device_app, []}}, {mod, {emqx_gcp_device_app, []}},
{applications, [ {applications, [

View File

@ -81,7 +81,7 @@ put_device(FormattedDevice) ->
catch catch
_Error:Reason -> _Error:Reason ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "Failed to put device", msg => "failed_to_put_device",
device => FormattedDevice, device => FormattedDevice,
reason => Reason reason => Reason
}), }),
@ -246,7 +246,7 @@ import_device(Device, {NumImported, NumError}) ->
catch catch
Error:Reason:Stacktrace -> Error:Reason:Stacktrace ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "Failed to import device", msg => "failed_to_import_device",
exception => Error, exception => Error,
reason => Reason, reason => Reason,
stacktrace => Stacktrace stacktrace => Stacktrace

View File

@ -305,7 +305,7 @@ global_zone_configs(put, #{body := Body}, _Req) ->
Acc#{Path => RawConf}; Acc#{Path => RawConf};
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "update global zone failed", msg => "update_global_zone_failed",
reason => Reason, reason => Reason,
path => Path, path => Path,
value => Value value => Value

View File

@ -931,7 +931,7 @@ safe_call_mria(Fun, Args, OnFail) ->
catch catch
EC:Err:Stack -> EC:Err:Stack ->
?SLOG(warning, #{ ?SLOG(warning, #{
msg => "Call to mria failed", msg => "call_to_mria_failed",
call => {mria, Fun, Args}, call => {mria, Fun, Args},
EC => Err, EC => Err,
stacktrace => Stack stacktrace => Stack

View File

@ -91,7 +91,10 @@ register_hook(Rules) ->
[] -> [] ->
ok; ok;
_ -> _ ->
?SLOG(error, #{rewrite_rule_re_complie_failed => ErrRules}), ?SLOG(error, #{
msg => "rewrite_rule_re_compile_failed",
error_rules => ErrRules
}),
{error, ErrRules} {error, ErrRules}
end. end.

View File

@ -1,6 +1,6 @@
{application, emqx_mysql, [ {application, emqx_mysql, [
{description, "EMQX MySQL Database Connector"}, {description, "EMQX MySQL Database Connector"},
{vsn, "0.1.1"}, {vsn, "0.1.2"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View File

@ -294,7 +294,7 @@ init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) ->
ok -> ok ->
State; State;
{error, Reason} -> {error, Reason} ->
LogMeta = #{msg => <<"MySQL init prepare statement failed">>, reason => Reason}, LogMeta = #{msg => <<"mysql_init_prepare_statement_failed">>, reason => Reason},
?SLOG(error, LogMeta), ?SLOG(error, LogMeta),
%% mark the prepare_statement as failed %% mark the prepare_statement as failed
State#{prepare_statement => {error, Prepares}} State#{prepare_statement => {error, Prepares}}
@ -348,7 +348,7 @@ prepare_sql_to_conn_list([Conn | ConnList], PrepareList) ->
prepare_sql_to_conn(Conn, []) when is_pid(Conn) -> ok; prepare_sql_to_conn(Conn, []) when is_pid(Conn) -> ok;
prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList]) when is_pid(Conn) -> prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList]) when is_pid(Conn) ->
LogMeta = #{msg => "MySQL Prepare Statement", name => Key, prepare_sql => SQL}, LogMeta = #{msg => "mysql_prepare_statement", name => Key, prepare_sql => SQL},
?SLOG(info, LogMeta), ?SLOG(info, LogMeta),
_ = unprepare_sql_to_conn(Conn, Key), _ = unprepare_sql_to_conn(Conn, Key),
case mysql:prepare(Conn, Key, SQL) of case mysql:prepare(Conn, Key, SQL) of
@ -412,14 +412,14 @@ parse_batch_prepare_sql([{Key, H} | T], Prepares, Tokens, BatchInserts, BatchTks
BatchTks#{Key => ParamsTks} BatchTks#{Key => ParamsTks}
); );
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}), ?SLOG(error, #{msg => "split_sql_failed", sql => H, reason => Reason}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks) parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks)
end; end;
Type when is_atom(Type) -> Type when is_atom(Type) ->
?SLOG(error, #{msg => "detect sql type unsupported", sql => H, type => Type}), ?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks); parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks);
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}), ?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks) parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks)
end. end.

View File

@ -133,7 +133,7 @@ on_query(
#{pool_name := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
?SLOG(debug, #{ ?SLOG(debug, #{
msg => "oracle database connector received sql query", msg => "oracle_connector_received_sql_query",
connector => InstId, connector => InstId,
type => TypeOrKey, type => TypeOrKey,
sql => NameOrSQL, sql => NameOrSQL,
@ -158,7 +158,7 @@ on_batch_query(
connector => InstId, connector => InstId,
first_request => Request, first_request => Request,
state => State, state => State,
msg => "batch prepare not implemented" msg => "batch_prepare_not_implemented"
}, },
?SLOG(error, Log), ?SLOG(error, Log),
{error, {unrecoverable_error, batch_prepare_not_implemented}}; {error, {unrecoverable_error, batch_prepare_not_implemented}};
@ -180,7 +180,7 @@ on_batch_query(
connector => InstId, connector => InstId,
request => BatchReq, request => BatchReq,
state => State, state => State,
msg => "invalid request" msg => "invalid_request"
}, },
?SLOG(error, Log), ?SLOG(error, Log),
{error, {unrecoverable_error, invalid_request}} {error, {unrecoverable_error, invalid_request}}
@ -212,7 +212,7 @@ on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) ->
#{error => Reason} #{error => Reason}
), ),
?SLOG(error, #{ ?SLOG(error, #{
msg => "oracle database connector do sql query failed", msg => "oracle_connector_do_sql_query_failed",
connector => InstId, connector => InstId,
type => Type, type => Type,
sql => NameOrSQL, sql => NameOrSQL,
@ -358,7 +358,7 @@ init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName, params_to
State#{prepare_sql := Sts}; State#{prepare_sql := Sts};
Error -> Error ->
LogMeta = #{ LogMeta = #{
msg => <<"Oracle Database init prepare statement failed">>, error => Error msg => <<"oracle_init_prepare_statement_failed">>, error => Error
}, },
?SLOG(error, LogMeta), ?SLOG(error, LogMeta),
%% mark the prepare_sql as failed %% mark the prepare_sql as failed
@ -396,7 +396,7 @@ prepare_sql_to_conn(Conn, Prepares, TokensMap) ->
prepare_sql_to_conn(Conn, [], _TokensMap, Statements) when is_pid(Conn) -> {ok, Statements}; prepare_sql_to_conn(Conn, [], _TokensMap, Statements) when is_pid(Conn) -> {ok, Statements};
prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], TokensMap, Statements) when is_pid(Conn) -> prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], TokensMap, Statements) when is_pid(Conn) ->
LogMeta = #{msg => "Oracle Database Prepare Statement", name => Key, prepare_sql => SQL}, LogMeta = #{msg => "oracle_prepare_statement", name => Key, prepare_sql => SQL},
Tokens = maps:get(Key, TokensMap, []), Tokens = maps:get(Key, TokensMap, []),
?SLOG(info, LogMeta), ?SLOG(info, LogMeta),
case check_if_table_exists(Conn, SQL, Tokens) of case check_if_table_exists(Conn, SQL, Tokens) of

View File

@ -249,10 +249,10 @@ running(info, {flush_metrics, _Ref}, _Data) ->
running(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when running(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when
is_map_key(Pid, AsyncWorkers0) is_map_key(Pid, AsyncWorkers0)
-> ->
?SLOG(info, #{msg => async_worker_died, state => running, reason => Reason}), ?SLOG(info, #{msg => "async_worker_died", state => running, reason => Reason}),
handle_async_worker_down(Data0, Pid); handle_async_worker_down(Data0, Pid);
running(info, Info, _St) -> running(info, Info, _St) ->
?SLOG(error, #{msg => unexpected_msg, state => running, info => Info}), ?SLOG(error, #{msg => "unexpected_msg", state => running, info => Info}),
keep_state_and_data. keep_state_and_data.
blocked(enter, _, #{resume_interval := ResumeT} = St0) -> blocked(enter, _, #{resume_interval := ResumeT} = St0) ->
@ -282,10 +282,10 @@ blocked(info, {flush_metrics, _Ref}, _Data) ->
blocked(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when blocked(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when
is_map_key(Pid, AsyncWorkers0) is_map_key(Pid, AsyncWorkers0)
-> ->
?SLOG(info, #{msg => async_worker_died, state => blocked, reason => Reason}), ?SLOG(info, #{msg => "async_worker_died", state => blocked, reason => Reason}),
handle_async_worker_down(Data0, Pid); handle_async_worker_down(Data0, Pid);
blocked(info, Info, _Data) -> blocked(info, Info, _Data) ->
?SLOG(error, #{msg => unexpected_msg, state => blocked, info => Info}), ?SLOG(error, #{msg => "unexpected_msg", state => blocked, info => Info}),
keep_state_and_data. keep_state_and_data.
terminate(_Reason, #{id := Id, index := Index, queue := Q}) -> terminate(_Reason, #{id := Id, index := Index, queue := Q}) ->
@ -856,7 +856,7 @@ handle_query_result(Id, Result, HasBeenSent) ->
{ack | nack, function(), counters()}. {ack | nack, function(), counters()}.
handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(exception, Msg), _HasBeenSent) -> handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(exception, Msg), _HasBeenSent) ->
PostFn = fun() -> PostFn = fun() ->
?SLOG(error, #{msg => resource_exception, info => Msg}), ?SLOG(error, #{msg => "resource_exception", info => Msg}),
ok ok
end, end,
{nack, PostFn, #{}}; {nack, PostFn, #{}};
@ -866,19 +866,19 @@ handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(NotWorking, _), _HasBeenSent) wh
{nack, fun() -> ok end, #{}}; {nack, fun() -> ok end, #{}};
handle_query_result_pure(Id, ?RESOURCE_ERROR_M(not_found, Msg), _HasBeenSent) -> handle_query_result_pure(Id, ?RESOURCE_ERROR_M(not_found, Msg), _HasBeenSent) ->
PostFn = fun() -> PostFn = fun() ->
?SLOG(error, #{id => Id, msg => resource_not_found, info => Msg}), ?SLOG(error, #{id => Id, msg => "resource_not_found", info => Msg}),
ok ok
end, end,
{ack, PostFn, #{dropped_resource_not_found => 1}}; {ack, PostFn, #{dropped_resource_not_found => 1}};
handle_query_result_pure(Id, ?RESOURCE_ERROR_M(stopped, Msg), _HasBeenSent) -> handle_query_result_pure(Id, ?RESOURCE_ERROR_M(stopped, Msg), _HasBeenSent) ->
PostFn = fun() -> PostFn = fun() ->
?SLOG(error, #{id => Id, msg => resource_stopped, info => Msg}), ?SLOG(error, #{id => Id, msg => "resource_stopped", info => Msg}),
ok ok
end, end,
{ack, PostFn, #{dropped_resource_stopped => 1}}; {ack, PostFn, #{dropped_resource_stopped => 1}};
handle_query_result_pure(Id, ?RESOURCE_ERROR_M(Reason, _), _HasBeenSent) -> handle_query_result_pure(Id, ?RESOURCE_ERROR_M(Reason, _), _HasBeenSent) ->
PostFn = fun() -> PostFn = fun() ->
?SLOG(error, #{id => Id, msg => other_resource_error, reason => Reason}), ?SLOG(error, #{id => Id, msg => "other_resource_error", reason => Reason}),
ok ok
end, end,
{nack, PostFn, #{}}; {nack, PostFn, #{}};
@ -887,7 +887,7 @@ handle_query_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
true -> true ->
PostFn = PostFn =
fun() -> fun() ->
?SLOG(error, #{id => Id, msg => unrecoverable_error, reason => Reason}), ?SLOG(error, #{id => Id, msg => "unrecoverable_error", reason => Reason}),
ok ok
end, end,
Counters = Counters =
@ -899,7 +899,7 @@ handle_query_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
false -> false ->
PostFn = PostFn =
fun() -> fun() ->
?SLOG(error, #{id => Id, msg => send_error, reason => Reason}), ?SLOG(error, #{id => Id, msg => "send_error", reason => Reason}),
ok ok
end, end,
{nack, PostFn, #{}} {nack, PostFn, #{}}
@ -925,7 +925,7 @@ handle_query_async_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
true -> true ->
PostFn = PostFn =
fun() -> fun() ->
?SLOG(error, #{id => Id, msg => unrecoverable_error, reason => Reason}), ?SLOG(error, #{id => Id, msg => "unrecoverable_error", reason => Reason}),
ok ok
end, end,
Counters = Counters =
@ -936,7 +936,7 @@ handle_query_async_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
{ack, PostFn, Counters}; {ack, PostFn, Counters};
false -> false ->
PostFn = fun() -> PostFn = fun() ->
?SLOG(error, #{id => Id, msg => async_send_error, reason => Reason}), ?SLOG(error, #{id => Id, msg => "async_send_error", reason => Reason}),
ok ok
end, end,
{nack, PostFn, #{}} {nack, PostFn, #{}}
@ -1446,7 +1446,7 @@ append_queue(Id, Index, Q, Queries) ->
Dropped = length(Items2), Dropped = length(Items2),
Counters = #{dropped_queue_full => Dropped}, Counters = #{dropped_queue_full => Dropped},
?SLOG(info, #{ ?SLOG(info, #{
msg => buffer_worker_overflow, msg => "buffer_worker_overflow",
resource_id => Id, resource_id => Id,
worker_index => Index, worker_index => Index,
dropped => Dropped dropped => Dropped
@ -1850,7 +1850,7 @@ adjust_batch_time(Id, RequestTTL, BatchTime0) ->
false -> false ->
?SLOG(info, #{ ?SLOG(info, #{
id => Id, id => Id,
msg => adjusting_buffer_worker_batch_time, msg => "adjusting_buffer_worker_batch_time",
new_batch_time => BatchTime new_batch_time => BatchTime
}); });
true -> true ->

View File

@ -204,7 +204,7 @@ remove(ResId, ClearMetrics) when is_binary(ResId) ->
safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION) safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION)
after after
%% Ensure the supervisor has it removed, otherwise the immediate re-add will see a stale process %% Ensure the supervisor has it removed, otherwise the immediate re-add will see a stale process
%% If the 'remove' call babove had succeeded, this is mostly a no-op but still needed to avoid race condition. %% If the 'remove' call above had succeeded, this is mostly a no-op but still needed to avoid race condition.
%% Otherwise this is a 'infinity' shutdown, so it may take arbitrary long. %% Otherwise this is a 'infinity' shutdown, so it may take arbitrary long.
emqx_resource_manager_sup:delete_child(ResId) emqx_resource_manager_sup:delete_child(ResId)
end. end.
@ -411,7 +411,7 @@ handle_event(EventType, EventData, State, Data) ->
?SLOG( ?SLOG(
error, error,
#{ #{
msg => ignore_all_other_events, msg => "ignore_all_other_events",
event_type => EventType, event_type => EventType,
event_data => EventData, event_data => EventData,
state => State, state => State,
@ -488,7 +488,7 @@ start_resource(Data, From) ->
{next_state, connecting, update_state(UpdatedData, Data), Actions}; {next_state, connecting, update_state(UpdatedData, Data), Actions};
{error, Reason} = Err -> {error, Reason} = Err ->
?SLOG(warning, #{ ?SLOG(warning, #{
msg => start_resource_failed, msg => "start_resource_failed",
id => Data#data.id, id => Data#data.id,
reason => Reason reason => Reason
}), }),
@ -555,7 +555,7 @@ handle_connected_health_check(Data) ->
{keep_state, UpdatedData, health_check_actions(UpdatedData)}; {keep_state, UpdatedData, health_check_actions(UpdatedData)};
(Status, UpdatedData) -> (Status, UpdatedData) ->
?SLOG(warning, #{ ?SLOG(warning, #{
msg => health_check_failed, msg => "health_check_failed",
id => Data#data.id, id => Data#data.id,
status => Status status => Status
}), }),
@ -631,7 +631,7 @@ parse_health_check_result({error, Error}, Data) ->
?SLOG( ?SLOG(
error, error,
#{ #{
msg => health_check_exception, msg => "health_check_exception",
resource_id => Data#data.id, resource_id => Data#data.id,
reason => Error reason => Error
} }