Merge pull request #11641 from zhongwencool/slog-msg-unify

This commit is contained in:
zhongwencool 2023-09-21 07:32:39 +08:00 committed by GitHub
commit 1b29e4b189
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 164 additions and 153 deletions

View File

@ -91,7 +91,7 @@ check_subscribe(#{clientid := ClientId}, Topic) ->
Res;
{aborted, Reason} ->
?SLOG(warning, #{
msg => "Cannot check subscribe ~p due to ~p.", topic => Topic, reason => Reason
msg => "check_subscribe_aborted", topic => Topic, reason => Reason
}),
deny
end.

View File

@ -622,7 +622,7 @@ find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -
{ok, Bucket} ->
{ok, Bucket, Cfg};
_ ->
?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}),
?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket}
end;
%% this is a node-level reference

View File

@ -640,7 +640,7 @@ esockd_access_rules(StrRules) ->
| Acc
];
_ ->
?SLOG(warning, #{msg => "invalid esockd access rule", rule => S}),
?SLOG(warning, #{msg => "invalid_esockd_access_rule", rule => S}),
Acc
end
end,

View File

@ -91,7 +91,7 @@ closed(_Conn, #{is_peer_acked := _} = Prop, S) ->
?SLOG(debug, Prop),
{stop, normal, S}.
%% @doc handle the new incoming connecion as the connecion acceptor.
%% @doc handle the new incoming connection as the connection acceptor.
-spec new_conn(quicer:connection_handle(), quicer:new_conn_props(), cb_state()) ->
{ok, cb_state()} | {error, any(), cb_state()}.
new_conn(
@ -183,13 +183,13 @@ new_stream(
ok ->
ok;
E ->
%% Only log, keep connecion alive.
?SLOG(error, #{message => "new stream handoff failed", stream => Stream, error => E})
%% Only log, keep connection alive.
?SLOG(error, #{msg => "new_stream_handoff_failed", stream => Stream, error => E})
end,
%% @TODO maybe keep them in `inactive_streams'
{ok, S#{streams := [{NewStreamOwner, Stream} | Streams]}}.
%% @doc callback for handling remote connecion shutdown.
%% @doc callback for handling remote connection shutdown.
-spec shutdown(quicer:connection_handle(), quicer:error_code(), cb_state()) -> cb_ret().
shutdown(Conn, ErrorCode, S) ->
ErrorCode =/= 0 andalso ?SLOG(debug, #{error_code => ErrorCode, state => S}),
@ -234,7 +234,7 @@ streams_available(_C, {BidirCnt, UnidirCnt}, S) ->
-spec peer_needs_streams(quicer:connection_handle(), undefined, cb_state()) -> cb_ret().
peer_needs_streams(_C, undefined, S) ->
?SLOG(info, #{
msg => "ignore: peer need more streames", info => maps:with([conn_pid, ctrl_pid], S)
msg => "ignore_peer_needs_more_streams", info => maps:with([conn_pid, ctrl_pid], S)
}),
{ok, S}.
@ -281,7 +281,7 @@ handle_info({'EXIT', Pid, Reason}, #{streams := Streams} = S) ->
->
{ok, S};
true ->
?SLOG(info, #{message => "Data stream unexpected exit", reason => Reason}),
?SLOG(info, #{msg => "data_stream_unexpected_exit", reason => Reason}),
{ok, S};
false ->
{stop, unknown_pid_down, S}

View File

@ -368,7 +368,7 @@ do_handle_call(
ok ->
{reply, ok, NewS};
{error, E} ->
?SLOG(error, #{msg => "set stream active failed", error => E}),
?SLOG(error, #{msg => "set_stream_active_failed", error => E}),
{stop, E, NewS}
end;
do_handle_call(_Call, _S) ->

View File

@ -191,7 +191,7 @@ peer_send_shutdown(Stream, undefined, S) ->
send_complete(_Stream, false, S) ->
{ok, S};
send_complete(_Stream, true = _IsCancelled, S) ->
?SLOG(error, #{message => "send cancelled"}),
?SLOG(error, #{msg => "send_cancelled"}),
{ok, S}.
-spec send_shutdown_complete(stream_handle(), boolean(), cb_data()) -> cb_ret().
@ -202,7 +202,7 @@ send_shutdown_complete(_Stream, _IsGraceful, S) ->
passive(Stream, undefined, S) ->
case quicer:setopt(Stream, active, 10) of
ok -> ok;
Error -> ?SLOG(error, #{message => "set active error", error => Error})
Error -> ?SLOG(error, #{msg => "set_active_error", error => Error})
end,
{ok, S}.

View File

@ -120,7 +120,7 @@ handle_info({monitor, Pid, long_gc, Info}, State) ->
fun() ->
WarnMsg = io_lib:format("long_gc warning: pid = ~p", [Pid]),
?SLOG(warning, #{
msg => long_gc,
msg => "long_gc",
info => Info,
porcinfo => procinfo(Pid)
}),
@ -134,7 +134,7 @@ handle_info({monitor, Pid, long_schedule, Info}, State) when is_pid(Pid) ->
fun() ->
WarnMsg = io_lib:format("long_schedule warning: pid = ~p", [Pid]),
?SLOG(warning, #{
msg => long_schedule,
msg => "long_schedule",
info => Info,
procinfo => procinfo(Pid)
}),
@ -148,7 +148,7 @@ handle_info({monitor, Port, long_schedule, Info}, State) when is_port(Port) ->
fun() ->
WarnMsg = io_lib:format("long_schedule warning: port = ~p", [Port]),
?SLOG(warning, #{
msg => long_schedule,
msg => "long_schedule",
info => Info,
portinfo => portinfo(Port)
}),
@ -162,7 +162,7 @@ handle_info({monitor, Pid, large_heap, Info}, State) ->
fun() ->
WarnMsg = io_lib:format("large_heap warning: pid = ~p", [Pid]),
?SLOG(warning, #{
msg => large_heap,
msg => "large_heap",
info => Info,
procinfo => procinfo(Pid)
}),
@ -176,7 +176,7 @@ handle_info({monitor, SusPid, busy_port, Port}, State) ->
fun() ->
WarnMsg = io_lib:format("busy_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
?SLOG(warning, #{
msg => busy_port,
msg => "busy_port",
portinfo => portinfo(Port),
procinfo => procinfo(SusPid)
}),
@ -190,7 +190,7 @@ handle_info({monitor, SusPid, busy_dist_port, Port}, State) ->
fun() ->
WarnMsg = io_lib:format("busy_dist_port warning: suspid = ~p, port = ~p", [SusPid, Port]),
?SLOG(warning, #{
msg => busy_dist_port,
msg => "busy_dist_port",
portinfo => portinfo(Port),
procinfo => procinfo(SusPid)
}),

View File

@ -247,14 +247,14 @@ handle_call(check, _From, State) ->
{_, NewState} = handle_info({mnesia_table_event, check}, State),
{reply, ok, NewState};
handle_call(Req, _From, State) ->
?SLOG(error, #{unexpected_call => Req}),
?SLOG(error, #{msg => "unexpected_call", req => Req}),
{reply, ok, State}.
handle_cast({delete_tag, Pid, Files}, State = #{monitors := Monitors}) ->
erlang:monitor(process, Pid),
{noreply, State#{monitors => Monitors#{Pid => Files}}};
handle_cast(Msg, State) ->
?SLOG(error, #{unexpected_cast => Msg}),
?SLOG(error, #{msg => "unexpected_cast", req => Msg}),
{noreply, State}.
handle_info({'DOWN', _Ref, process, Pid, _Reason}, State = #{monitors := Monitors}) ->
@ -275,7 +275,7 @@ handle_info({mnesia_table_event, _Events}, State = #{timer := TRef}) ->
emqx_utils:cancel_timer(TRef),
handle_info({timeout, TRef, update_trace}, State);
handle_info(Info, State) ->
?SLOG(error, #{unexpected_info => Info}),
?SLOG(error, #{msg => "unexpected_info", req => Info}),
{noreply, State}.
terminate(_Reason, #{timer := TRef}) ->

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.25"},
{description, "emqx authorization application"},
{vsn, "0.1.26"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [

View File

@ -53,7 +53,7 @@ validate(Path0) ->
}),
throw(failed_to_read_acl_file);
{error, Reason} ->
?SLOG(alert, #{msg => bad_acl_file_content, path => Path, reason => Reason}),
?SLOG(alert, #{msg => "bad_acl_file_content", path => Path, reason => Reason}),
throw({bad_acl_file_content, Reason})
end,
{ok, Rules}.

View File

@ -95,7 +95,7 @@ authorize(
case emqx_authz_utils:parse_http_resp_body(ContentType, Body) of
error ->
?SLOG(error, #{
msg => authz_http_response_incorrect,
msg => "authz_http_response_incorrect",
content_type => ContentType,
body => Body
}),
@ -123,7 +123,7 @@ log_nomtach_msg(Status, Headers, Body) ->
?SLOG(
debug,
#{
msg => unexpected_authz_http_response,
msg => "unexpected_authz_http_response",
status => Status,
content_type => emqx_authz_utils:content_type(Headers),
body => Body

View File

@ -336,7 +336,7 @@ disable_enable(Action, BridgeType, BridgeName) when
create(BridgeType, BridgeName, RawConf) ->
?SLOG(debug, #{
brige_action => create,
bridge_action => create,
bridge_type => BridgeType,
bridge_name => BridgeName,
bridge_raw_config => emqx_utils:redact(RawConf)
@ -349,7 +349,7 @@ create(BridgeType, BridgeName, RawConf) ->
remove(BridgeType, BridgeName) ->
?SLOG(debug, #{
brige_action => remove,
bridge_action => remove,
bridge_type => BridgeType,
bridge_name => BridgeName
}),

View File

@ -174,7 +174,7 @@ create(Type, Name, Conf) ->
create(Type, Name, Conf0, Opts) ->
?SLOG(info, #{
msg => "create bridge",
msg => "create_bridge",
type => Type,
name => Name,
config => emqx_utils:redact(Conf0)
@ -209,7 +209,7 @@ update(Type, Name, {OldConf, Conf}, Opts) ->
case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of
false ->
?SLOG(info, #{
msg => "update bridge",
msg => "update_bridge",
type => Type,
name => Name,
config => emqx_utils:redact(Conf)

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_clickhouse, [
{description, "EMQX Enterprise ClickHouse Bridge"},
{vsn, "0.2.2"},
{vsn, "0.2.3"},
{registered, []},
{applications, [
kernel,

View File

@ -349,7 +349,7 @@ on_query(
#{pool_name := PoolName} = State
) ->
?SLOG(debug, #{
msg => "clickhouse connector received sql query",
msg => "clickhouse_connector_received_sql_query",
connector => ResourceID,
type => RequestType,
sql => DataOrSQL,
@ -463,7 +463,7 @@ transform_and_log_clickhouse_result({ok, ResponseCode, Data}, _, _) when
Result;
transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) ->
?SLOG(error, #{
msg => "clickhouse connector do sql query failed",
msg => "clickhouse_connector_do_sql_query_failed",
connector => ResourceID,
sql => SQL,
reason => ClickhouseErrorResult
@ -474,7 +474,7 @@ transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) ->
%% handles all error cases that we need to handle as recoverable_error
true ->
?SLOG(warning, #{
msg => "clickhouse connector: sql query failed (recoverable)",
msg => "clickhouse_connector_sql_query_failed_recoverable",
recoverable_error => true,
connector => ResourceID,
sql => SQL,
@ -483,7 +483,7 @@ transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) ->
to_recoverable_error(ClickhouseErrorResult);
false ->
?SLOG(error, #{
msg => "clickhouse connector: sql query failed (unrecoverable)",
msg => "clickhouse_connector_sql_query_failed_unrecoverable",
recoverable_error => false,
connector => ResourceID,
sql => SQL,

View File

@ -176,7 +176,7 @@ desc(greptimedb) ->
start_client(InstId, Config) ->
ClientConfig = client_config(InstId, Config),
?SLOG(info, #{
msg => "starting greptimedb connector",
msg => "starting_greptimedb_connector",
connector => InstId,
config => emqx_utils:redact(Config),
client_config => emqx_utils:redact(ClientConfig)
@ -191,7 +191,7 @@ start_client(InstId, Config) ->
E:R:S ->
?tp(greptimedb_connector_start_exception, #{error => {E, R}}),
?SLOG(warning, #{
msg => "start greptimedb connector error",
msg => "start_greptimedb_connector_error",
connector => InstId,
error => E,
reason => emqx_utils:redact(R),
@ -216,7 +216,7 @@ do_start_client(
write_syntax => to_config(Lines, Precision)
},
?SLOG(info, #{
msg => "starting greptimedb connector success",
msg => "starting_greptimedb_connector_success",
connector => InstId,
client => redact_auth(Client),
state => redact_auth(State)
@ -239,7 +239,7 @@ do_start_client(
{error, {already_started, Client0}} ->
?tp(greptimedb_connector_start_already_started, #{}),
?SLOG(info, #{
msg => "restarting greptimedb connector, found already started client",
msg => "restarting_greptimedb_connector_found_already_started_client",
connector => InstId,
old_client => redact_auth(Client0)
}),
@ -321,7 +321,7 @@ do_query(InstId, Client, Points) ->
case greptimedb:write_batch(Client, Points) of
{ok, #{response := {affected_rows, #{value := Rows}}}} ->
?SLOG(debug, #{
msg => "greptimedb write point success",
msg => "greptimedb_write_point_success",
connector => InstId,
points => Points
}),
@ -337,7 +337,7 @@ do_query(InstId, Client, Points) ->
{error, Reason} = Err ->
?tp(greptimedb_connector_do_query_failure, #{error => Reason}),
?SLOG(error, #{
msg => "greptimedb write point failed",
msg => "greptimedb_write_point_failed",
connector => InstId,
reason => Reason
}),
@ -422,7 +422,8 @@ parse_batch_data(InstId, BatchData, SyntaxLines) ->
{ok, lists:flatten(Points)};
_ ->
?SLOG(error, #{
msg => io_lib:format("Greptimedb trans point failed, count: ~p", [Errors]),
msg => "greptimedb_trans_point_failed",
error_count => Errors,
connector => InstId,
reason => points_trans_failed
}),
@ -583,7 +584,7 @@ log_error_points(InstId, Errs) ->
lists:foreach(
fun({error, Reason}) ->
?SLOG(error, #{
msg => "greptimedb trans point failed",
msg => "greptimedb_trans_point_failed",
connector => InstId,
reason => Reason
})

View File

@ -346,7 +346,8 @@ query_by_clientid(Topic, ClientId, Config) ->
BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName),
?SLOG(error, #{
msg => io_lib:format("Failed to query: ~p, ~p", [Code, Error]),
msg => "failed_to_query",
code => Code,
connector => BridgeId,
reason => Error
}),

View File

@ -52,7 +52,7 @@ on_stop(InstId, _State) ->
StopClientRes = hstreamdb:stop_client(Client),
StopProducerRes = hstreamdb:stop_producer(Producer),
?SLOG(info, #{
msg => "stop hstreamdb connector",
msg => "stop_hstreamdb_connector",
connector => InstId,
client => Client,
producer => Producer,
@ -146,7 +146,7 @@ start_client(InstId, Config) ->
catch
E:R:S ->
Error = #{
msg => "start hstreamdb connector error",
msg => "start_hstreamdb_connector_error",
connector => InstId,
error => E,
reason => R,
@ -158,7 +158,7 @@ start_client(InstId, Config) ->
do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl := SSL}) ->
?SLOG(info, #{
msg => "starting hstreamdb connector: client",
msg => "starting_hstreamdb_connector_client",
connector => InstId,
config => Config
}),
@ -185,7 +185,7 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl :=
case is_alive(Client) of
true ->
?SLOG(info, #{
msg => "hstreamdb connector: client started",
msg => "hstreamdb_connector_client_started",
connector => InstId,
client => Client
}),
@ -196,14 +196,14 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl :=
#{error => client_not_alive}
),
?SLOG(error, #{
msg => "hstreamdb connector: client not alive",
msg => "hstreamdb_connector_client_not_alive",
connector => InstId
}),
{error, connect_failed}
end;
{error, {already_started, Pid}} ->
?SLOG(info, #{
msg => "starting hstreamdb connector: client, find old client. restart client",
msg => "starting_hstreamdb_connector_client_find_old_client_restart_client",
old_client_pid => Pid,
old_client_name => ClientName
}),
@ -211,7 +211,7 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl :=
start_client(InstId, Config);
{error, Error} ->
?SLOG(error, #{
msg => "hstreamdb connector: client failed",
msg => "hstreamdb_connector_client_failed",
connector => InstId,
reason => Error
}),
@ -239,13 +239,13 @@ start_producer(
],
Name = produce_name(InstId),
?SLOG(info, #{
msg => "starting hstreamdb connector: producer",
msg => "starting_hstreamdb_connector_producer",
connector => InstId
}),
case hstreamdb:start_producer(Client, Name, ProducerOptions) of
{ok, Producer} ->
?SLOG(info, #{
msg => "hstreamdb connector: producer started"
msg => "hstreamdb_connector_producer_started"
}),
State = #{
client => Client,
@ -263,7 +263,7 @@ start_producer(
{error, {already_started, Pid}} ->
?SLOG(info, #{
msg =>
"starting hstreamdb connector: producer, find old producer. restart producer",
"starting_hstreamdb_connector_producer_find_old_producer_restart_producer",
old_producer_pid => Pid,
old_producer_name => Name
}),
@ -271,7 +271,7 @@ start_producer(
start_producer(InstId, Client, Options);
{error, Reason} ->
?SLOG(error, #{
msg => "starting hstreamdb connector: producer, failed",
msg => "starting_hstreamdb_connector_producer_failed",
reason => Reason
}),
{error, Reason}
@ -318,7 +318,7 @@ handle_result({ok, Result}, Record, IsBatch) ->
#{result => Result, is_batch => IsBatch}
),
?SLOG(debug, #{
msg => "HStreamDB producer sync append success",
msg => "hstreamdb_producer_sync_append_success",
record => Record,
is_batch => IsBatch
});
@ -328,7 +328,7 @@ handle_result({error, Reason} = Err, Record, IsBatch) ->
#{error => Reason, is_batch => IsBatch}
),
?SLOG(error, #{
msg => "HStreamDB producer sync append failed",
msg => "hstreamdb_producer_sync_append_failed",
reason => Reason,
record => Record,
is_batch => IsBatch

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_http, [
{description, "EMQX HTTP Bridge and Connector Application"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]},
{env, []},

View File

@ -328,7 +328,7 @@ on_query(
{error, {recoverable_error, Reason}};
{error, #{status_code := StatusCode}} ->
?SLOG(error, #{
msg => "http connector do request, received error response.",
msg => "http_connector_do_request_received_error_response",
note => "the body will be redacted due to security reasons",
request => redact_request(NRequest),
connector => InstId,

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_influxdb, [
{description, "EMQX Enterprise InfluxDB Bridge"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [
kernel,

View File

@ -230,7 +230,7 @@ desc(influxdb_api_v2) ->
start_client(InstId, Config) ->
ClientConfig = client_config(InstId, Config),
?SLOG(info, #{
msg => "starting influxdb connector",
msg => "starting_influxdb_connector",
connector => InstId,
config => emqx_utils:redact(Config),
client_config => emqx_utils:redact(ClientConfig)
@ -245,7 +245,7 @@ start_client(InstId, Config) ->
E:R:S ->
?tp(influxdb_connector_start_exception, #{error => {E, R}}),
?SLOG(warning, #{
msg => "start influxdb connector error",
msg => "start_influxdb_connector_error",
connector => InstId,
error => E,
reason => R,
@ -271,7 +271,7 @@ do_start_client(
write_syntax => to_config(Lines, Precision)
},
?SLOG(info, #{
msg => "starting influxdb connector success",
msg => "starting_influxdb_connector_success",
connector => InstId,
client => redact_auth(Client),
state => redact_auth(State)
@ -307,7 +307,7 @@ do_start_client(
{error, {already_started, Client0}} ->
?tp(influxdb_connector_start_already_started, #{}),
?SLOG(info, #{
msg => "restarting influxdb connector, found already started client",
msg => "restarting_influxdb_connector_found_already_started_client",
connector => InstId,
old_client => redact_auth(Client0)
}),
@ -401,7 +401,7 @@ do_query(InstId, Client, Points) ->
case influxdb:write(Client, Points) of
ok ->
?SLOG(debug, #{
msg => "influxdb write point success",
msg => "influxdb_write_point_success",
connector => InstId,
points => Points
});
@ -416,7 +416,7 @@ do_query(InstId, Client, Points) ->
{error, Reason} = Err ->
?tp(influxdb_connector_do_query_failure, #{error => Reason}),
?SLOG(error, #{
msg => "influxdb write point failed",
msg => "influxdb_write_point_failed",
connector => InstId,
reason => Reason
}),
@ -430,7 +430,7 @@ do_query(InstId, Client, Points) ->
do_async_query(InstId, Client, Points, ReplyFunAndArgs) ->
?SLOG(info, #{
msg => "influxdb write point async",
msg => "influxdb_write_point_async",
connector => InstId,
points => Points
}),
@ -522,7 +522,8 @@ parse_batch_data(InstId, BatchData, SyntaxLines) ->
{ok, lists:flatten(Points)};
_ ->
?SLOG(error, #{
msg => io_lib:format("InfluxDB trans point failed, count: ~p", [Errors]),
msg => "influxdb_trans_point_failed",
error_count => Errors,
connector => InstId,
reason => points_trans_failed
}),
@ -683,7 +684,7 @@ log_error_points(InstId, Errs) ->
lists:foreach(
fun({error, Reason}) ->
?SLOG(error, #{
msg => "influxdb trans point failed",
msg => "influxdb_trans_point_failed",
connector => InstId,
reason => Reason
})

View File

@ -78,7 +78,7 @@ tcp_keepalive(KeepAlive) ->
Opts;
{error, {unsupported_os, OS}} ->
?SLOG(warning, #{
msg => "Unsupported operation: set TCP keepalive",
msg => "unsupported_operation_set_tcp_keepalive",
os => OS
}),
[]

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_opents, [
{description, "EMQX Enterprise OpenTSDB Bridge"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [
kernel,

View File

@ -112,7 +112,7 @@ on_get_status(_InstanceId, #{server := Server}) ->
ok ->
connected;
{error, Reason} ->
?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}),
?SLOG(error, #{msg => "opents_lost_connection", reason => Reason}),
connecting
end,
Result.

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_rabbitmq, [
{description, "EMQX Enterprise RabbitMQ Bridge"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [
kernel,

View File

@ -258,7 +258,7 @@ on_stop(
_State
) ->
?SLOG(info, #{
msg => "stopping RabbitMQ connector",
msg => "stopping_rabbitmq_connector",
connector => ResourceID
}),
stop_clients_and_pool(ResourceID).
@ -429,7 +429,7 @@ on_query(
} = State
) ->
?SLOG(debug, #{
msg => "RabbitMQ connector received query",
msg => "rabbitmq_connector_received_query",
connector => ResourceID,
type => RequestType,
data => Data,
@ -453,7 +453,7 @@ on_batch_query(
State
) ->
?SLOG(debug, #{
msg => "RabbitMQ connector received batch query",
msg => "rabbitmq_connector_received_batch_query",
connector => ResourceID,
data => BatchReq,
state => emqx_utils:redact(State)

View File

@ -456,14 +456,14 @@ parse_sql_template([{Key, H} | T], BatchInsertTks) ->
}
);
{error, Reason} ->
?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}),
?SLOG(error, #{msg => "split_sql_failed", sql => H, reason => Reason}),
parse_sql_template(T, BatchInsertTks)
end;
Type when is_atom(Type) ->
?SLOG(error, #{msg => "detect sql type unsupported", sql => H, type => Type}),
?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}),
parse_sql_template(T, BatchInsertTks);
{error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}),
?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}),
parse_sql_template(T, BatchInsertTks)
end;
parse_sql_template([], BatchInsertTks) ->
@ -491,8 +491,8 @@ apply_template(
{Key, SQL}
end;
apply_template(Query, Templates) ->
%% TODO: more detail infomatoin
?SLOG(error, #{msg => "apply sql template failed", query => Query, templates => Templates}),
%% TODO: more detail information
?SLOG(error, #{msg => "apply_sql_template_failed", query => Query, templates => Templates}),
{error, failed_to_apply_sql_template}.
proc_batch_sql(BatchReqs, BatchInserts, Tokens) ->

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_tdengine, [
{description, "EMQX Enterprise TDEngine Bridge"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [
kernel,

View File

@ -148,7 +148,7 @@ on_batch_query(
end;
on_batch_query(InstanceId, BatchReq, State) ->
LogMeta = #{connector => InstanceId, request => BatchReq, state => State},
?SLOG(error, LogMeta#{msg => "invalid request"}),
?SLOG(error, LogMeta#{msg => "invalid_request"}),
{error, {unrecoverable_error, invalid_request}}.
on_get_status(_InstanceId, #{pool_name := PoolName}) ->
@ -253,14 +253,14 @@ parse_batch_prepare_sql([{Key, H} | T], InsertTksMap, BatchTksMap) ->
BatchTksMap#{Key => BatchTks}
);
Result ->
?SLOG(error, #{msg => "split sql failed", sql => H, result => Result}),
?SLOG(error, #{msg => "split_sql_failed", sql => H, result => Result}),
parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap)
end;
Type when is_atom(Type) ->
?SLOG(error, #{msg => "detect sql type unsupported", sql => H, type => Type}),
?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}),
parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap);
{error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}),
?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}),
parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap)
end;
parse_batch_prepare_sql([], InsertTksMap, BatchTksMap) ->

View File

@ -152,7 +152,7 @@ on_start(
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping postgresql connector",
msg => "stopping_postgresql_connector",
connector => InstId
}),
emqx_resource_pool:stop(InstId).
@ -165,7 +165,7 @@ on_query(
#{pool_name := PoolName} = State
) ->
?SLOG(debug, #{
msg => "postgresql connector received sql query",
msg => "postgresql_connector_received_sql_query",
connector => InstId,
type => TypeOrKey,
sql => NameOrSQL,
@ -200,7 +200,7 @@ on_batch_query(
connector => InstId,
first_request => Request,
state => State,
msg => "batch prepare not implemented"
msg => "batch_prepare_not_implemented"
},
?SLOG(error, Log),
{error, {unrecoverable_error, batch_prepare_not_implemented}};
@ -220,7 +220,7 @@ on_batch_query(
connector => InstId,
request => BatchReq,
state => State,
msg => "invalid request"
msg => "invalid_request"
},
?SLOG(error, Log),
{error, {unrecoverable_error, invalid_request}}
@ -247,7 +247,7 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
#{error => Reason}
),
?SLOG(error, #{
msg => "postgresql connector do sql query failed",
msg => "postgresql_connector_do_sql_query_failed",
connector => InstId,
type => Type,
sql => NameOrSQL,
@ -272,7 +272,7 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
catch
error:function_clause:Stacktrace ->
?SLOG(error, #{
msg => "postgresql connector do sql query failed",
msg => "postgresql_connector_do_sql_query_failed",
connector => InstId,
type => Type,
sql => NameOrSQL,
@ -453,7 +453,7 @@ init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) ->
State#{prepare_statement := Sts};
Error ->
LogMeta = #{
msg => <<"PostgreSQL init prepare statement failed">>, error => Error
msg => <<"postgresql_init_prepare_statement_failed">>, error => Error
},
?SLOG(error, LogMeta),
%% mark the prepare_sql as failed
@ -492,7 +492,7 @@ prepare_sql_to_conn(Conn, Prepares) ->
prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements};
prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) ->
LogMeta = #{msg => "PostgreSQL Prepare Statement", name => Key, prepare_sql => SQL},
LogMeta = #{msg => "postgresql_prepare_statement", name => Key, prepare_sql => SQL},
?SLOG(info, LogMeta),
case epgsql:parse2(Conn, Key, SQL, []) of
{ok, Statement} ->
@ -500,10 +500,10 @@ prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Co
{error, {error, error, _, undefined_table, _, _} = Error} ->
%% Target table is not created
?tp(pgsql_undefined_table, #{}),
?SLOG(error, LogMeta#{msg => "PostgreSQL parse failed", error => Error}),
?SLOG(error, LogMeta#{msg => "postgresql_parse_failed", error => Error}),
{error, undefined_table};
{error, Error} = Other ->
?SLOG(error, LogMeta#{msg => "PostgreSQL parse failed", error => Error}),
?SLOG(error, LogMeta#{msg => "postgresql_parse_failed", error => Error}),
Other
end.

View File

@ -220,7 +220,7 @@ login(post, #{body := Params}) ->
Password = maps:get(<<"password">>, Params),
case emqx_dashboard_admin:sign_token(Username, Password) of
{ok, Token} ->
?SLOG(info, #{msg => "Dashboard login successfully", username => Username}),
?SLOG(info, #{msg => "dashboard_login_successful", username => Username}),
Version = iolist_to_binary(proplists:get_value(version, emqx_sys:info())),
{200, #{
token => Token,
@ -228,7 +228,7 @@ login(post, #{body := Params}) ->
license => #{edition => emqx_release:edition()}
}};
{error, R} ->
?SLOG(info, #{msg => "Dashboard login failed", username => Username, reason => R}),
?SLOG(info, #{msg => "dashboard_login_failed", username => Username, reason => R}),
{401, ?BAD_USERNAME_OR_PWD, <<"Auth failed">>}
end.
@ -239,10 +239,10 @@ logout(_, #{
Username = username(Req, Username0),
case emqx_dashboard_admin:destroy_token_by_username(Username, Token) of
ok ->
?SLOG(info, #{msg => "Dashboard logout successfully", username => Username0}),
?SLOG(info, #{msg => "dashboard_logout_successful", username => Username0}),
204;
_R ->
?SLOG(info, #{msg => "Dashboard logout failed.", username => Username0}),
?SLOG(info, #{msg => "dashboard_logout_failed.", username => Username0}),
{401, ?WRONG_TOKEN_OR_USERNAME, <<"Ensure your token & username">>}
end.
@ -259,11 +259,11 @@ users(post, #{body := Params}) ->
false ->
case emqx_dashboard_admin:add_user(Username, Password, Role, Desc) of
{ok, Result} ->
?SLOG(info, #{msg => "Create dashboard success", username => Username}),
?SLOG(info, #{msg => "create_dashboard_user_success", username => Username}),
{200, filter_result(Result)};
{error, Reason} ->
?SLOG(info, #{
msg => "Create dashboard failed",
msg => "create_dashboard_user_failed",
username => Username,
reason => Reason
}),
@ -286,7 +286,7 @@ user(put, #{bindings := #{username := Username0}, body := Params} = Req) ->
user(delete, #{bindings := #{username := Username0}, headers := Headers} = Req) ->
case Username0 == emqx_dashboard_admin:default_username() of
true ->
?SLOG(info, #{msg => "Dashboard delete admin user failed", username => Username0}),
?SLOG(info, #{msg => "dashboard_delete_admin_user_failed", username => Username0}),
Message = list_to_binary(io_lib:format("Cannot delete user ~p", [Username0])),
{400, ?NOT_ALLOWED, Message};
false ->
@ -300,7 +300,7 @@ user(delete, #{bindings := #{username := Username0}, headers := Headers} = Req)
{404, ?USER_NOT_FOUND, Reason};
{ok, _} ->
?SLOG(info, #{
msg => "Dashboard delete admin user", username => Username0
msg => "dashboard_delete_admin_user", username => Username0
}),
{204}
end
@ -340,12 +340,12 @@ is_self_auth_token(Username, Token) ->
end.
change_pwd(post, #{bindings := #{username := Username}, body := Params}) ->
LogMeta = #{msg => "Dashboard change password", username => binary_to_list(Username)},
LogMeta = #{msg => "dashboard_change_password", username => binary_to_list(Username)},
OldPwd = maps:get(<<"old_pwd">>, Params),
NewPwd = maps:get(<<"new_pwd">>, Params),
case ?EMPTY(OldPwd) orelse ?EMPTY(NewPwd) of
true ->
?SLOG(error, LogMeta#{result => failed, reason => "password undefined or empty"}),
?SLOG(error, LogMeta#{result => failed, reason => "password_undefined_or_empty"}),
{400, ?BAD_REQUEST, <<"Old password or new password undefined">>};
false ->
case emqx_dashboard_admin:change_password(Username, OldPwd, NewPwd) of

View File

@ -40,7 +40,7 @@ init() ->
%% Load is incremental, so it can be called multiple times.
%% NOTE: no garbage collection is done, because stale entries are harmless.
load_desc(EtsTab, File) ->
?SLOG(info, #{msg => "loading desc", file => File}),
?SLOG(info, #{msg => "loading_desc", file => File}),
{ok, Descs} = hocon:load(File),
["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."),
Insert = fun(Namespace, Id, Tag, Text) ->

View File

@ -141,7 +141,7 @@ current_rate(Node) when Node == node() ->
{ok, Rate}
catch
_E:R ->
?SLOG(warning, #{msg => "Dashboard monitor error", reason => R}),
?SLOG(warning, #{msg => "dashboard_monitor_error", reason => R}),
%% Rate map 0, ensure api will not crash.
%% When joining cluster, dashboard monitor restart.
Rate0 = [

View File

@ -121,7 +121,7 @@ login(post, #{bindings := #{backend := Backend}, body := Sign}) ->
Provider = emqx_dashboard_sso:provider(Backend),
case emqx_dashboard_sso:login(Provider, Sign, State) of
{ok, Token} ->
?SLOG(info, #{msg => "Dashboard SSO login successfully", request => Sign}),
?SLOG(info, #{msg => "dashboard_sso_login_successful", request => Sign}),
Version = iolist_to_binary(proplists:get_value(version, emqx_sys:info())),
{200, #{
token => Token,
@ -130,7 +130,7 @@ login(post, #{bindings := #{backend := Backend}, body := Sign}) ->
}};
{error, Reason} ->
?SLOG(info, #{
msg => "Dashboard SSO login failed",
msg => "dashboard_sso_login_failed",
request => Sign,
reason => Reason
}),

View File

@ -134,7 +134,8 @@ complete(
ok ->
ok;
{error, Reason} ->
?SLOG(warning, "filemeta_write_failed", #{
?SLOG(warning, #{
msg => "filemeta_write_failed",
path => ManifestFilepath,
meta => Filemeta,
reason => Reason
@ -294,7 +295,8 @@ try_read_filemeta(Filepath, Info) ->
{ok, Filemeta} ->
Info#{meta => Filemeta};
{error, Reason} ->
?SLOG(warning, "filemeta_inaccessible", #{
?SLOG(warning, #{
msg => "filemeta_inaccessible",
path => Filepath,
reason => Reason
}),
@ -305,13 +307,15 @@ mk_export_uri(RelFilepath) ->
emqx_ft_storage_exporter_fs_api:mk_export_uri(node(), RelFilepath).
log_invalid_entry(Options, {_Type, RelFilepath, Fileinfo = #file_info{}, _Stack}) ->
?SLOG(notice, "filesystem_object_unexpected", #{
?SLOG(notice, #{
msg => "filesystem_object_unexpected",
relpath => RelFilepath,
fileinfo => Fileinfo,
options => Options
});
log_invalid_entry(Options, {_Type, RelFilepath, {error, Reason}, _Stack}) ->
?SLOG(warning, "filesystem_object_inaccessible", #{
?SLOG(warning, #{
msg => "filesystem_object_inaccessible",
relpath => RelFilepath,
reason => Reason,
options => Options
@ -346,7 +350,8 @@ list(_Options, Query) ->
Result = list(Query),
case Result of
#{errors := NodeErrors} ->
?SLOG(warning, "list_exports_errors", #{
?SLOG(warning, #{
msg => "list_exports_errors",
query => Query,
errors => NodeErrors
});

View File

@ -893,6 +893,6 @@ convert_certs(Type, SubDir, Conf) ->
{ok, NSSL = #{}} ->
Conf#{Type => NSSL};
{error, Reason} ->
?SLOG(error, Reason#{msg => bad_ssl_config}),
?SLOG(error, Reason#{msg => "bad_ssl_config", reason => Reason}),
throw({bad_ssl_config, Reason})
end.

View File

@ -1,6 +1,6 @@
{application, emqx_gcp_device, [
{description, "Application simplifying migration from GCP IoT Core"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{mod, {emqx_gcp_device_app, []}},
{applications, [

View File

@ -81,7 +81,7 @@ put_device(FormattedDevice) ->
catch
_Error:Reason ->
?SLOG(error, #{
msg => "Failed to put device",
msg => "failed_to_put_device",
device => FormattedDevice,
reason => Reason
}),
@ -246,7 +246,7 @@ import_device(Device, {NumImported, NumError}) ->
catch
Error:Reason:Stacktrace ->
?SLOG(error, #{
msg => "Failed to import device",
msg => "failed_to_import_device",
exception => Error,
reason => Reason,
stacktrace => Stacktrace

View File

@ -305,7 +305,7 @@ global_zone_configs(put, #{body := Body}, _Req) ->
Acc#{Path => RawConf};
{error, Reason} ->
?SLOG(error, #{
msg => "update global zone failed",
msg => "update_global_zone_failed",
reason => Reason,
path => Path,
value => Value

View File

@ -931,7 +931,7 @@ safe_call_mria(Fun, Args, OnFail) ->
catch
EC:Err:Stack ->
?SLOG(warning, #{
msg => "Call to mria failed",
msg => "call_to_mria_failed",
call => {mria, Fun, Args},
EC => Err,
stacktrace => Stack

View File

@ -91,7 +91,10 @@ register_hook(Rules) ->
[] ->
ok;
_ ->
?SLOG(error, #{rewrite_rule_re_complie_failed => ErrRules}),
?SLOG(error, #{
msg => "rewrite_rule_re_compile_failed",
error_rules => ErrRules
}),
{error, ErrRules}
end.

View File

@ -1,6 +1,6 @@
{application, emqx_mysql, [
{description, "EMQX MySQL Database Connector"},
{vsn, "0.1.1"},
{vsn, "0.1.2"},
{registered, []},
{applications, [
kernel,

View File

@ -294,7 +294,7 @@ init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) ->
ok ->
State;
{error, Reason} ->
LogMeta = #{msg => <<"MySQL init prepare statement failed">>, reason => Reason},
LogMeta = #{msg => <<"mysql_init_prepare_statement_failed">>, reason => Reason},
?SLOG(error, LogMeta),
%% mark the prepare_statement as failed
State#{prepare_statement => {error, Prepares}}
@ -348,7 +348,7 @@ prepare_sql_to_conn_list([Conn | ConnList], PrepareList) ->
prepare_sql_to_conn(Conn, []) when is_pid(Conn) -> ok;
prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList]) when is_pid(Conn) ->
LogMeta = #{msg => "MySQL Prepare Statement", name => Key, prepare_sql => SQL},
LogMeta = #{msg => "mysql_prepare_statement", name => Key, prepare_sql => SQL},
?SLOG(info, LogMeta),
_ = unprepare_sql_to_conn(Conn, Key),
case mysql:prepare(Conn, Key, SQL) of
@ -412,14 +412,14 @@ parse_batch_prepare_sql([{Key, H} | T], Prepares, Tokens, BatchInserts, BatchTks
BatchTks#{Key => ParamsTks}
);
{error, Reason} ->
?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}),
?SLOG(error, #{msg => "split_sql_failed", sql => H, reason => Reason}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks)
end;
Type when is_atom(Type) ->
?SLOG(error, #{msg => "detect sql type unsupported", sql => H, type => Type}),
?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks);
{error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}),
?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks)
end.

View File

@ -133,7 +133,7 @@ on_query(
#{pool_name := PoolName} = State
) ->
?SLOG(debug, #{
msg => "oracle database connector received sql query",
msg => "oracle_connector_received_sql_query",
connector => InstId,
type => TypeOrKey,
sql => NameOrSQL,
@ -158,7 +158,7 @@ on_batch_query(
connector => InstId,
first_request => Request,
state => State,
msg => "batch prepare not implemented"
msg => "batch_prepare_not_implemented"
},
?SLOG(error, Log),
{error, {unrecoverable_error, batch_prepare_not_implemented}};
@ -180,7 +180,7 @@ on_batch_query(
connector => InstId,
request => BatchReq,
state => State,
msg => "invalid request"
msg => "invalid_request"
},
?SLOG(error, Log),
{error, {unrecoverable_error, invalid_request}}
@ -212,7 +212,7 @@ on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) ->
#{error => Reason}
),
?SLOG(error, #{
msg => "oracle database connector do sql query failed",
msg => "oracle_connector_do_sql_query_failed",
connector => InstId,
type => Type,
sql => NameOrSQL,
@ -358,7 +358,7 @@ init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName, params_to
State#{prepare_sql := Sts};
Error ->
LogMeta = #{
msg => <<"Oracle Database init prepare statement failed">>, error => Error
msg => <<"oracle_init_prepare_statement_failed">>, error => Error
},
?SLOG(error, LogMeta),
%% mark the prepare_sql as failed
@ -396,7 +396,7 @@ prepare_sql_to_conn(Conn, Prepares, TokensMap) ->
prepare_sql_to_conn(Conn, [], _TokensMap, Statements) when is_pid(Conn) -> {ok, Statements};
prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], TokensMap, Statements) when is_pid(Conn) ->
LogMeta = #{msg => "Oracle Database Prepare Statement", name => Key, prepare_sql => SQL},
LogMeta = #{msg => "oracle_prepare_statement", name => Key, prepare_sql => SQL},
Tokens = maps:get(Key, TokensMap, []),
?SLOG(info, LogMeta),
case check_if_table_exists(Conn, SQL, Tokens) of

View File

@ -249,10 +249,10 @@ running(info, {flush_metrics, _Ref}, _Data) ->
running(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when
is_map_key(Pid, AsyncWorkers0)
->
?SLOG(info, #{msg => async_worker_died, state => running, reason => Reason}),
?SLOG(info, #{msg => "async_worker_died", state => running, reason => Reason}),
handle_async_worker_down(Data0, Pid);
running(info, Info, _St) ->
?SLOG(error, #{msg => unexpected_msg, state => running, info => Info}),
?SLOG(error, #{msg => "unexpected_msg", state => running, info => Info}),
keep_state_and_data.
blocked(enter, _, #{resume_interval := ResumeT} = St0) ->
@ -282,10 +282,10 @@ blocked(info, {flush_metrics, _Ref}, _Data) ->
blocked(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when
is_map_key(Pid, AsyncWorkers0)
->
?SLOG(info, #{msg => async_worker_died, state => blocked, reason => Reason}),
?SLOG(info, #{msg => "async_worker_died", state => blocked, reason => Reason}),
handle_async_worker_down(Data0, Pid);
blocked(info, Info, _Data) ->
?SLOG(error, #{msg => unexpected_msg, state => blocked, info => Info}),
?SLOG(error, #{msg => "unexpected_msg", state => blocked, info => Info}),
keep_state_and_data.
terminate(_Reason, #{id := Id, index := Index, queue := Q}) ->
@ -856,7 +856,7 @@ handle_query_result(Id, Result, HasBeenSent) ->
{ack | nack, function(), counters()}.
handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(exception, Msg), _HasBeenSent) ->
PostFn = fun() ->
?SLOG(error, #{msg => resource_exception, info => Msg}),
?SLOG(error, #{msg => "resource_exception", info => Msg}),
ok
end,
{nack, PostFn, #{}};
@ -866,19 +866,19 @@ handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(NotWorking, _), _HasBeenSent) wh
{nack, fun() -> ok end, #{}};
handle_query_result_pure(Id, ?RESOURCE_ERROR_M(not_found, Msg), _HasBeenSent) ->
PostFn = fun() ->
?SLOG(error, #{id => Id, msg => resource_not_found, info => Msg}),
?SLOG(error, #{id => Id, msg => "resource_not_found", info => Msg}),
ok
end,
{ack, PostFn, #{dropped_resource_not_found => 1}};
handle_query_result_pure(Id, ?RESOURCE_ERROR_M(stopped, Msg), _HasBeenSent) ->
PostFn = fun() ->
?SLOG(error, #{id => Id, msg => resource_stopped, info => Msg}),
?SLOG(error, #{id => Id, msg => "resource_stopped", info => Msg}),
ok
end,
{ack, PostFn, #{dropped_resource_stopped => 1}};
handle_query_result_pure(Id, ?RESOURCE_ERROR_M(Reason, _), _HasBeenSent) ->
PostFn = fun() ->
?SLOG(error, #{id => Id, msg => other_resource_error, reason => Reason}),
?SLOG(error, #{id => Id, msg => "other_resource_error", reason => Reason}),
ok
end,
{nack, PostFn, #{}};
@ -887,7 +887,7 @@ handle_query_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
true ->
PostFn =
fun() ->
?SLOG(error, #{id => Id, msg => unrecoverable_error, reason => Reason}),
?SLOG(error, #{id => Id, msg => "unrecoverable_error", reason => Reason}),
ok
end,
Counters =
@ -899,7 +899,7 @@ handle_query_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
false ->
PostFn =
fun() ->
?SLOG(error, #{id => Id, msg => send_error, reason => Reason}),
?SLOG(error, #{id => Id, msg => "send_error", reason => Reason}),
ok
end,
{nack, PostFn, #{}}
@ -925,7 +925,7 @@ handle_query_async_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
true ->
PostFn =
fun() ->
?SLOG(error, #{id => Id, msg => unrecoverable_error, reason => Reason}),
?SLOG(error, #{id => Id, msg => "unrecoverable_error", reason => Reason}),
ok
end,
Counters =
@ -936,7 +936,7 @@ handle_query_async_result_pure(Id, {error, Reason} = Error, HasBeenSent) ->
{ack, PostFn, Counters};
false ->
PostFn = fun() ->
?SLOG(error, #{id => Id, msg => async_send_error, reason => Reason}),
?SLOG(error, #{id => Id, msg => "async_send_error", reason => Reason}),
ok
end,
{nack, PostFn, #{}}
@ -1446,7 +1446,7 @@ append_queue(Id, Index, Q, Queries) ->
Dropped = length(Items2),
Counters = #{dropped_queue_full => Dropped},
?SLOG(info, #{
msg => buffer_worker_overflow,
msg => "buffer_worker_overflow",
resource_id => Id,
worker_index => Index,
dropped => Dropped
@ -1850,7 +1850,7 @@ adjust_batch_time(Id, RequestTTL, BatchTime0) ->
false ->
?SLOG(info, #{
id => Id,
msg => adjusting_buffer_worker_batch_time,
msg => "adjusting_buffer_worker_batch_time",
new_batch_time => BatchTime
});
true ->

View File

@ -204,7 +204,7 @@ remove(ResId, ClearMetrics) when is_binary(ResId) ->
safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION)
after
%% Ensure the supervisor has it removed, otherwise the immediate re-add will see a stale process
%% If the 'remove' call babove had succeeded, this is mostly a no-op but still needed to avoid race condition.
%% If the 'remove' call above had succeeded, this is mostly a no-op but still needed to avoid race condition.
%% Otherwise this is a 'infinity' shutdown, so it may take arbitrary long.
emqx_resource_manager_sup:delete_child(ResId)
end.
@ -411,7 +411,7 @@ handle_event(EventType, EventData, State, Data) ->
?SLOG(
error,
#{
msg => ignore_all_other_events,
msg => "ignore_all_other_events",
event_type => EventType,
event_data => EventData,
state => State,
@ -488,7 +488,7 @@ start_resource(Data, From) ->
{next_state, connecting, update_state(UpdatedData, Data), Actions};
{error, Reason} = Err ->
?SLOG(warning, #{
msg => start_resource_failed,
msg => "start_resource_failed",
id => Data#data.id,
reason => Reason
}),
@ -555,7 +555,7 @@ handle_connected_health_check(Data) ->
{keep_state, UpdatedData, health_check_actions(UpdatedData)};
(Status, UpdatedData) ->
?SLOG(warning, #{
msg => health_check_failed,
msg => "health_check_failed",
id => Data#data.id,
status => Status
}),
@ -631,7 +631,7 @@ parse_health_check_result({error, Error}, Data) ->
?SLOG(
error,
#{
msg => health_check_exception,
msg => "health_check_exception",
resource_id => Data#data.id,
reason => Error
}