diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf
index 787a39fdb..651de99e9 100644
--- a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf
+++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf
@@ -216,6 +216,7 @@ emqx_ee_bridge_kafka {
zh: "Socket 收包缓存大小"
}
}
+ # hidden
socket_nodelay {
desc {
en: "When set to 'true', TCP buffer is sent as soon as possible. "
@@ -571,26 +572,27 @@ emqx_ee_bridge_kafka {
zh: "指定从哪个 Kafka 主题消费消息。"
}
label {
- en: "Kafka topic"
- zh: "Kafka 主题 "
+ en: "Kafka Topic"
+ zh: "Kafka 主题"
}
}
consumer_max_batch_bytes {
desc {
- en: "Maximum bytes to fetch in a batch of messages."
+ en: "Set how many bytes to pull from Kafka in each fetch request. "
"Please note that if the configured value is smaller than the message size in Kafka, it may negatively impact the fetch performance."
- zh: "在一批消息中要取的最大字节数。"
+ zh: "设置每次从 Kafka 拉取数据的字节数。"
"如果该配置小于 Kafka 中消息到大小,则可能会影响消费性能。"
}
label {
- en: "Max Bytes"
- zh: "最大字节数"
+ en: "Fetch Bytes"
+ zh: "拉取字节数"
}
}
+ # hidden
consumer_max_rejoin_attempts {
desc {
en: "Maximum number of times allowed for a member to re-join the group. If the consumer group can not reach balance after this configured number of attempts, the consumer group member will restart after a delay."
- zh: "允许一个成员重新加入小组的最大次数。如果超过改配置次数后仍不能成功加入消费组,则会在延迟一段时间后再重试。"
+ zh: "允许一个消费组成员重新加入小组的最大次数。如果超过改配置次数后仍不能成功加入消费组,则会在延迟一段时间后再重试。"
}
label {
en: "Max Rejoin Attempts"
@@ -599,10 +601,9 @@ emqx_ee_bridge_kafka {
}
consumer_offset_reset_policy {
desc {
- en: "Defines from which offset a consumer should start fetching when there"
- " is no commit history or when the commit history becomes invalid."
- zh: "当没有主题分区没有偏移量的历史记录,或则历史记录失效后,"
- "消费者应该使用哪个偏移量重新开始消费"
+ en: "Defines from which offset a consumer should start fetching when there "
+ "is no commit history or when the commit history becomes invalid."
+ zh: "当没有偏移量的历史记录,或则历史记录失效后,消费者应该使用哪个偏移量重新开始消费。"
}
label {
en: "Offset Reset Policy"
@@ -616,13 +617,13 @@ emqx_ee_bridge_kafka {
}
label {
en: "Offset Commit Interval"
- zh: "偏移承诺间隔"
+ zh: "偏移提交间隔"
}
}
consumer_topic_mapping {
desc {
- en: "Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item."
- zh: "指定 Kafka 主题和 MQTT 主题之间的映射。 必须至少包含一个项目。"
+ en: "Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item."
+ zh: "指定 Kafka 主题和 MQTT 主题之间的映射关系。 必须至少包含一项。"
}
label {
en: "Topic Mapping"
@@ -632,9 +633,9 @@ emqx_ee_bridge_kafka {
consumer_key_encoding_mode {
desc {
en: "Defines how the key from the Kafka message is"
- " dealt with before being forwarded via MQTT.\n"
+ " encoded before being forwarded via MQTT.\n"
"none
Uses the key from the Kafka message unchanged."
- " Note: in this case, then the key must be a valid UTF-8 string.\n"
+ " Note: in this case, the key must be a valid UTF-8 string.\n"
"base64
Uses base-64 encoding on the received key."
zh: "定义了在通过MQTT转发之前如何处理Kafka消息的 Key。"
"none
使用Kafka消息中的 Key 原始值,不进行编码。"
@@ -649,9 +650,9 @@ emqx_ee_bridge_kafka {
consumer_value_encoding_mode {
desc {
en: "Defines how the value from the Kafka message is"
- " dealt with before being forwarded via MQTT.\n"
+ " encoded before being forwarded via MQTT.\n"
"none
Uses the value from the Kafka message unchanged."
- " Note: in this case, then the value must be a valid UTF-8 string.\n"
+ " Note: in this case, the value must be a valid UTF-8 string.\n"
"base64
Uses base-64 encoding on the received value."
zh: "定义了在通过MQTT转发之前如何处理Kafka消息的 Value。"
"none
使用Kafka消息中的 Value 原始值,不进行编码。"
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl
index f623417b2..4a9263134 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl
@@ -221,17 +221,21 @@ fields(socket_opts) ->
{sndbuf,
mk(
emqx_schema:bytesize(),
- #{default => <<"1024KB">>, desc => ?DESC(socket_send_buffer)}
+ #{default => <<"1MB">>, desc => ?DESC(socket_send_buffer)}
)},
{recbuf,
mk(
emqx_schema:bytesize(),
- #{default => <<"1024KB">>, desc => ?DESC(socket_receive_buffer)}
+ #{default => <<"1MB">>, desc => ?DESC(socket_receive_buffer)}
)},
{nodelay,
mk(
boolean(),
- #{default => true, desc => ?DESC(socket_nodelay)}
+ #{
+ default => true,
+ hidden => true,
+ desc => ?DESC(socket_nodelay)
+ }
)}
];
fields(producer_opts) ->