Merge pull request #10298 from emqx/release-50
Merge release-50 back to master
This commit is contained in:
commit
9bbca07943
|
@ -16,6 +16,9 @@ services:
|
|||
GITHUB_REF: ${GITHUB_REF}
|
||||
networks:
|
||||
- emqx_bridge
|
||||
ports:
|
||||
- 28083:18083
|
||||
- 2883:1883
|
||||
volumes:
|
||||
- ../..:/emqx
|
||||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
|
|
2
Makefile
2
Makefile
|
@ -7,7 +7,7 @@ export EMQX_DEFAULT_RUNNER = debian:11-slim
|
|||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.2.0
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.5-beta.3
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.5
|
||||
export EMQX_REL_FORM ?= tgz
|
||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||
ifeq ($(OS),Windows_NT)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_connector, [
|
||||
{description, "EMQX Data Integration Connectors"},
|
||||
{vsn, "0.1.17"},
|
||||
{vsn, "0.1.18"},
|
||||
{registered, []},
|
||||
{mod, {emqx_connector_app, []}},
|
||||
{applications, [
|
||||
|
|
|
@ -285,7 +285,6 @@ redis_fields() ->
|
|||
{database, #{
|
||||
type => integer(),
|
||||
default => 0,
|
||||
required => true,
|
||||
desc => ?DESC("database")
|
||||
}},
|
||||
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
|
||||
|
|
|
@ -319,14 +319,8 @@ get_basic_usage_info() ->
|
|||
ReferencedBridges =
|
||||
lists:foldl(
|
||||
fun(#{actions := Actions, from := Froms}, Acc) ->
|
||||
BridgeIDs0 =
|
||||
[
|
||||
BridgeID
|
||||
|| From <- Froms,
|
||||
{ok, BridgeID} <-
|
||||
[emqx_bridge_resource:bridge_hookpoint_to_bridge_id(From)]
|
||||
],
|
||||
BridgeIDs1 = lists:filter(fun is_binary/1, Actions),
|
||||
BridgeIDs0 = get_referenced_hookpoints(Froms),
|
||||
BridgeIDs1 = get_egress_bridges(Actions),
|
||||
tally_referenced_bridges(BridgeIDs0 ++ BridgeIDs1, Acc)
|
||||
end,
|
||||
#{},
|
||||
|
@ -490,10 +484,8 @@ forwards_to_bridge(Actions, BridgeId) ->
|
|||
lists:any(fun(A) -> A =:= BridgeId end, Actions).
|
||||
|
||||
references_ingress_bridge(Froms, BridgeId) ->
|
||||
lists:any(
|
||||
fun(ReferenceBridgeId) ->
|
||||
BridgeId =:= ReferenceBridgeId
|
||||
end,
|
||||
lists:member(
|
||||
BridgeId,
|
||||
[
|
||||
RefBridgeId
|
||||
|| From <- Froms,
|
||||
|
@ -501,3 +493,14 @@ references_ingress_bridge(Froms, BridgeId) ->
|
|||
[emqx_bridge_resource:bridge_hookpoint_to_bridge_id(From)]
|
||||
]
|
||||
).
|
||||
|
||||
get_referenced_hookpoints(Froms) ->
|
||||
[
|
||||
BridgeID
|
||||
|| From <- Froms,
|
||||
{ok, BridgeID} <-
|
||||
[emqx_bridge_resource:bridge_hookpoint_to_bridge_id(From)]
|
||||
].
|
||||
|
||||
get_egress_bridges(Actions) ->
|
||||
lists:filter(fun is_binary/1, Actions).
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Clickhouse has got a fix that makes the error message better when users click the test button in the settings dialog.
|
|
@ -0,0 +1 @@
|
|||
Clickhouse 已经修复了一个问题,当用户在设置对话框中点击测试按钮时,错误信息会更清晰。
|
|
@ -216,6 +216,7 @@ emqx_ee_bridge_kafka {
|
|||
zh: "Socket 收包缓存大小"
|
||||
}
|
||||
}
|
||||
# hidden
|
||||
socket_nodelay {
|
||||
desc {
|
||||
en: "When set to 'true', TCP buffer is sent as soon as possible. "
|
||||
|
@ -571,26 +572,27 @@ emqx_ee_bridge_kafka {
|
|||
zh: "指定从哪个 Kafka 主题消费消息。"
|
||||
}
|
||||
label {
|
||||
en: "Kafka topic"
|
||||
zh: "Kafka 主题 "
|
||||
en: "Kafka Topic"
|
||||
zh: "Kafka 主题"
|
||||
}
|
||||
}
|
||||
consumer_max_batch_bytes {
|
||||
desc {
|
||||
en: "Maximum bytes to fetch in a batch of messages."
|
||||
en: "Set how many bytes to pull from Kafka in each fetch request. "
|
||||
"Please note that if the configured value is smaller than the message size in Kafka, it may negatively impact the fetch performance."
|
||||
zh: "在一批消息中要取的最大字节数。"
|
||||
"如果该配置小于 Kafka 中消息到大小,则可能会影响消费性能。"
|
||||
zh: "设置每次从 Kafka 拉取数据的字节数。"
|
||||
"如该配置小于 Kafka 消息的大小,可能会影响消费性能。"
|
||||
}
|
||||
label {
|
||||
en: "Max Bytes"
|
||||
zh: "最大字节数"
|
||||
en: "Fetch Bytes"
|
||||
zh: "拉取字节数"
|
||||
}
|
||||
}
|
||||
# hidden
|
||||
consumer_max_rejoin_attempts {
|
||||
desc {
|
||||
en: "Maximum number of times allowed for a member to re-join the group. If the consumer group can not reach balance after this configured number of attempts, the consumer group member will restart after a delay."
|
||||
zh: "允许一个成员重新加入小组的最大次数。如果超过改配置次数后仍不能成功加入消费组,则会在延迟一段时间后再重试。"
|
||||
zh: "消费组成员允许重新加入小组的最大次数。如超过该配置次数后仍未能成功加入消费组,则会在等待一段时间后重试。"
|
||||
}
|
||||
label {
|
||||
en: "Max Rejoin Attempts"
|
||||
|
@ -599,10 +601,9 @@ emqx_ee_bridge_kafka {
|
|||
}
|
||||
consumer_offset_reset_policy {
|
||||
desc {
|
||||
en: "Defines from which offset a consumer should start fetching when there"
|
||||
" is no commit history or when the commit history becomes invalid."
|
||||
zh: "当没有主题分区没有偏移量的历史记录,或则历史记录失效后,"
|
||||
"消费者应该使用哪个偏移量重新开始消费"
|
||||
en: "Defines from which offset a consumer should start fetching when there "
|
||||
"is no commit history or when the commit history becomes invalid."
|
||||
zh: "如不存在偏移量历史记录或历史记录失效,消费者应使用哪个偏移量开始消费。"
|
||||
}
|
||||
label {
|
||||
en: "Offset Reset Policy"
|
||||
|
@ -616,13 +617,13 @@ emqx_ee_bridge_kafka {
|
|||
}
|
||||
label {
|
||||
en: "Offset Commit Interval"
|
||||
zh: "偏移承诺间隔"
|
||||
zh: "偏移提交间隔"
|
||||
}
|
||||
}
|
||||
consumer_topic_mapping {
|
||||
desc {
|
||||
en: "Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item."
|
||||
zh: "指定 Kafka 主题和 MQTT 主题之间的映射。 必须至少包含一个项目。"
|
||||
zh: "指定 Kafka 主题和 MQTT 主题之间的映射关系。 应至少包含一项。"
|
||||
}
|
||||
label {
|
||||
en: "Topic Mapping"
|
||||
|
@ -632,14 +633,14 @@ emqx_ee_bridge_kafka {
|
|||
consumer_key_encoding_mode {
|
||||
desc {
|
||||
en: "Defines how the key from the Kafka message is"
|
||||
" dealt with before being forwarded via MQTT.\n"
|
||||
" encoded before being forwarded via MQTT.\n"
|
||||
"<code>none</code> Uses the key from the Kafka message unchanged."
|
||||
" Note: in this case, then the key must be a valid UTF-8 string.\n"
|
||||
" Note: in this case, the key must be a valid UTF-8 string.\n"
|
||||
"<code>base64</code> Uses base-64 encoding on the received key."
|
||||
zh: "定义了在通过MQTT转发之前如何处理Kafka消息的 Key。"
|
||||
"<code>none</code> 使用Kafka消息中的 Key 原始值,不进行编码。"
|
||||
" 注意:在这种情况下,Key 必须是一个有效的UTF-8字符串。\n"
|
||||
"<code>base64</code> 对收到的密钥或值使用base-64编码。"
|
||||
zh: "通过 MQTT 转发之前,如何处理 Kafka 消息的 Key。"
|
||||
"<code>none</code> 使用 Kafka 消息中的 Key 原始值,不进行编码。"
|
||||
" 注意:在这种情况下,Key 必须是一个有效的 UTF-8 字符串。\n"
|
||||
"<code>base64</code> 对收到的密钥或值使用 base-64 编码。"
|
||||
}
|
||||
label {
|
||||
en: "Key Encoding Mode"
|
||||
|
@ -649,14 +650,14 @@ emqx_ee_bridge_kafka {
|
|||
consumer_value_encoding_mode {
|
||||
desc {
|
||||
en: "Defines how the value from the Kafka message is"
|
||||
" dealt with before being forwarded via MQTT.\n"
|
||||
" encoded before being forwarded via MQTT.\n"
|
||||
"<code>none</code> Uses the value from the Kafka message unchanged."
|
||||
" Note: in this case, then the value must be a valid UTF-8 string.\n"
|
||||
" Note: in this case, the value must be a valid UTF-8 string.\n"
|
||||
"<code>base64</code> Uses base-64 encoding on the received value."
|
||||
zh: "定义了在通过MQTT转发之前如何处理Kafka消息的 Value。"
|
||||
"<code>none</code> 使用Kafka消息中的 Value 原始值,不进行编码。"
|
||||
" 注意:在这种情况下,Value 必须是一个有效的UTF-8字符串。\n"
|
||||
"<code>base64</code> 对收到的 Value 使用base-64编码。"
|
||||
zh: "通过 MQTT 转发之前,如何处理 Kafka 消息的 Value。"
|
||||
"<code>none</code> 使用 Kafka 消息中的 Value 原始值,不进行编码。"
|
||||
" 注意:在这种情况下,Value 必须是一个有效的 UTF-8 字符串。\n"
|
||||
"<code>base64</code> 对收到的 Value 使用 base-64 编码。"
|
||||
}
|
||||
label {
|
||||
en: "Value Encoding Mode"
|
||||
|
|
|
@ -221,17 +221,21 @@ fields(socket_opts) ->
|
|||
{sndbuf,
|
||||
mk(
|
||||
emqx_schema:bytesize(),
|
||||
#{default => <<"1024KB">>, desc => ?DESC(socket_send_buffer)}
|
||||
#{default => <<"1MB">>, desc => ?DESC(socket_send_buffer)}
|
||||
)},
|
||||
{recbuf,
|
||||
mk(
|
||||
emqx_schema:bytesize(),
|
||||
#{default => <<"1024KB">>, desc => ?DESC(socket_receive_buffer)}
|
||||
#{default => <<"1MB">>, desc => ?DESC(socket_receive_buffer)}
|
||||
)},
|
||||
{nodelay,
|
||||
mk(
|
||||
boolean(),
|
||||
#{default => true, desc => ?DESC(socket_nodelay)}
|
||||
#{
|
||||
default => true,
|
||||
hidden => true,
|
||||
desc => ?DESC(socket_nodelay)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(producer_opts) ->
|
||||
|
|
|
@ -41,14 +41,16 @@ roots() ->
|
|||
fields(config) ->
|
||||
[
|
||||
{server, server()}
|
||||
| add_default_username(emqx_connector_schema_lib:relational_db_fields())
|
||||
| adjust_fields(emqx_connector_schema_lib:relational_db_fields())
|
||||
].
|
||||
|
||||
add_default_username(Fields) ->
|
||||
adjust_fields(Fields) ->
|
||||
lists:map(
|
||||
fun
|
||||
({username, OrigUsernameFn}) ->
|
||||
{username, add_default_fn(OrigUsernameFn, <<"root">>)};
|
||||
({password, OrigPasswordFn}) ->
|
||||
{password, make_required_fn(OrigPasswordFn)};
|
||||
(Field) ->
|
||||
Field
|
||||
end,
|
||||
|
@ -61,6 +63,12 @@ add_default_fn(OrigFn, Default) ->
|
|||
(Field) -> OrigFn(Field)
|
||||
end.
|
||||
|
||||
make_required_fn(OrigFn) ->
|
||||
fun
|
||||
(required) -> true;
|
||||
(Field) -> OrigFn(Field)
|
||||
end.
|
||||
|
||||
server() ->
|
||||
Meta = #{desc => ?DESC("server")},
|
||||
emqx_schema:servers_sc(Meta, ?TD_HOST_OPTIONS).
|
||||
|
|
Loading…
Reference in New Issue