emqx/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf

668 lines
26 KiB
Plaintext
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

emqx_ee_bridge_kafka {
config_enable {
desc {
en: "Enable (true) or disable (false) this Kafka bridge."
zh: "启用true或停用该falseKafka 数据桥接。"
}
label {
en: "Enable or Disable"
zh: "启用或停用"
}
}
desc_config {
desc {
en: """Configuration for a Kafka bridge."""
zh: """Kafka 桥接配置"""
}
label {
en: "Kafka Bridge Configuration"
zh: "Kafka 桥接配置"
}
}
desc_type {
desc {
en: """The Bridge Type"""
zh: """桥接类型"""
}
label {
en: "Bridge Type"
zh: "桥接类型"
}
}
desc_name {
desc {
en: """Bridge name, used as a human-readable description of the bridge."""
zh: """桥接名字,可读描述"""
}
label {
en: "Bridge Name"
zh: "桥接名字"
}
}
kafka_producer {
desc {
en: "Kafka Producer configuration."
zh: "Kafka Producer 配置。"
}
label {
en: "Kafka Producer"
zh: "Kafka Producer"
}
}
producer_opts {
desc {
en: "Local MQTT data source and Kafka bridge configs."
zh: "本地 MQTT 数据源和 Kafka 桥接的配置。"
}
label {
en: "MQTT to Kafka"
zh: "MQTT 到 Kafka"
}
}
mqtt_topic {
desc {
en: "MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Kafka."
zh: "MQTT 主题数据源由桥接指定,或留空由规则动作指定。"
}
label {
en: "Source MQTT Topic"
zh: "源 MQTT 主题"
}
}
producer_kafka_opts {
desc {
en: "Kafka producer configs."
zh: "Kafka 生产者参数。"
}
label {
en: "Kafka Producer"
zh: "生产者参数"
}
}
bootstrap_hosts {
desc {
en: "A comma separated list of Kafka <code>host[:port]</code> endpoints to bootstrap the client. Default port number is 9092."
zh: "用逗号分隔的 <code>host[:port]</code> 主机列表。默认端口号为 9092。"
}
label {
en: "Bootstrap Hosts"
zh: "主机列表"
}
}
connect_timeout {
desc {
en: "Maximum wait time for TCP connection establishment (including authentication time if enabled)."
zh: "建立 TCP 连接时的最大等待时长(若启用认证,这个等待时长也包含完成认证所需时间)。"
}
label {
en: "Connect Timeout"
zh: "连接超时"
}
}
min_metadata_refresh_interval {
desc {
en: "Minimum time interval the client has to wait before refreshing Kafka broker and topic metadata. "
"Setting too small value may add extra load on Kafka."
zh: "刷新 Kafka broker 和 Kafka 主题元数据段最短时间间隔。设置太小可能会增加 Kafka 压力。"
}
label {
en: "Min Metadata Refresh Interval"
zh: "元数据刷新最小间隔"
}
}
metadata_request_timeout {
desc {
en: "Maximum wait time when fetching metadata from Kafka."
zh: "刷新元数据时最大等待时长。"
}
label {
en: "Metadata Request Timeout"
zh: "元数据请求超时"
}
}
authentication {
desc {
en: "Authentication configs."
zh: "认证参数。"
}
label {
en: "Authentication"
zh: "认证"
}
}
socket_opts {
desc {
en: "Extra socket options."
zh: "更多 Socket 参数设置。"
}
label {
en: "Socket Options"
zh: "Socket 参数"
}
}
auth_sasl_mechanism {
desc {
en: "SASL authentication mechanism."
zh: "SASL 认证方法名称。"
}
label {
en: "Mechanism"
zh: "认证方法"
}
}
auth_sasl_username {
desc {
en: "SASL authentication username."
zh: "SASL 认证的用户名。"
}
label {
en: "Username"
zh: "用户名"
}
}
auth_sasl_password {
desc {
en: "SASL authentication password."
zh: "SASL 认证的密码。"
}
label {
en: "Password"
zh: "密码"
}
}
auth_kerberos_principal {
desc {
en: "SASL GSSAPI authentication Kerberos principal. "
"For example <code>client_name@MY.KERBEROS.REALM.MYDOMAIN.COM</code>, "
"NOTE: The realm in use has to be configured in /etc/krb5.conf in EMQX nodes."
zh: "SASL GSSAPI 认证方法的 Kerberos principal"
"例如 <code>client_name@MY.KERBEROS.REALM.MYDOMAIN.COM</code>"
"注意:这里使用的 realm 需要配置在 EMQX 服务器的 /etc/krb5.conf 中"
}
label {
en: "Kerberos Principal"
zh: "Kerberos Principal"
}
}
auth_kerberos_keytab_file {
desc {
en: "SASL GSSAPI authentication Kerberos keytab file path. "
"NOTE: This file has to be placed in EMQX nodes, and the EMQX service runner user requires read permission."
zh: "SASL GSSAPI 认证方法的 Kerberos keytab 文件。"
"注意:该文件需要上传到 EMQX 服务器中,且运行 EMQX 服务的系统账户需要有读取权限。"
}
label {
en: "Kerberos keytab file"
zh: "Kerberos keytab 文件"
}
}
socket_send_buffer {
desc {
en: "Fine tune the socket send buffer. The default value is tuned for high throughput."
zh: "TCP socket 的发送缓存调优。默认值是针对高吞吐量的一个推荐值。"
}
label {
en: "Socket Send Buffer Size"
zh: "Socket 发送缓存大小"
}
}
socket_receive_buffer {
desc {
en: "Fine tune the socket receive buffer. The default value is tuned for high throughput."
zh: "TCP socket 的收包缓存调优。默认值是针对高吞吐量的一个推荐值。"
}
label {
en: "Socket Receive Buffer Size"
zh: "Socket 收包缓存大小"
}
}
# hidden
socket_nodelay {
desc {
en: "When set to 'true', TCP buffer is sent as soon as possible. "
"Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default)."
zh: "设置true让系统内核立即发送。否则当需要发送的内容很少时可能会有一定延迟默认 40 毫秒)。"
}
label {
en: "No Delay"
zh: "是否关闭延迟发送"
}
}
kafka_topic {
desc {
en: "Kafka topic name"
zh: "Kafka 主题名称"
}
label {
en: "Kafka Topic Name"
zh: "Kafka 主题名称"
}
}
kafka_message {
desc {
en: "Template to render a Kafka message."
zh: "用于生成 Kafka 消息的模版。"
}
label {
en: "Kafka Message Template"
zh: "Kafka 消息模版"
}
}
kafka_message_key {
desc {
en: "Template to render Kafka message key. "
"If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) "
"then Kafka's <code>NULL</code> (but not empty string) is used."
zh: "生成 Kafka 消息 Key 的模版。如果模版生成后为空值,"
"则会使用 Kafka 的 <code>NULL</code> ,而非空字符串。"
}
label {
en: "Message Key"
zh: "消息的 Key"
}
}
kafka_message_value {
desc {
en: "Template to render Kafka message value. "
"If the template is rendered "
"into a NULL value (i.e. there is no such data field in Rule Engine context) "
"then Kafka's <code>NULL</code> (but not empty string) is used."
zh: "生成 Kafka 消息 Value 的模版。如果模版生成后为空值,"
"则会使用 Kafka 的 <code>NULL</code>,而非空字符串。"
}
label {
en: "Message Value"
zh: "消息的 Value"
}
}
kafka_message_timestamp {
desc {
en: "Which timestamp to use. "
"The timestamp is expected to be a millisecond precision Unix epoch "
"which can be in string format, e.g. <code>1661326462115</code> or "
"<code>'1661326462115'</code>. "
"When the desired data field for this template is not found, "
"or if the found data is not a valid integer, "
"the current system timestamp will be used."
zh: "生成 Kafka 消息时间戳的模版。"
"该时间必需是一个整型数值(可以是字符串格式)例如 <code>1661326462115</code> "
"或 <code>'1661326462115'</code>。"
"当所需的输入字段不存在,或不是一个整型时,"
"则会使用当前系统时间。"
}
label {
en: "Message Timestamp"
zh: "消息的时间戳"
}
}
max_batch_bytes {
desc {
en: "Maximum bytes to collect in a Kafka message batch. "
"Most of the Kafka brokers default to a limit of 1 MB batch size. "
"EMQX's default value is less than 1 MB in order to compensate "
"Kafka message encoding overheads (especially when each individual message is very small). "
"When a single message is over the limit, it is still sent (as a single element batch)."
zh: "最大消息批量字节数。"
"大多数 Kafka 环境的默认最低值是 1 MBEMQX 的默认值比 1 MB 更小是因为需要"
"补偿 Kafka 消息编码所需要的额外字节(尤其是当每条消息都很小的情况下)。"
"当单个消息的大小超过该限制时,它仍然会被发送,(相当于该批量中只有单个消息)。"
}
label {
en: "Max Batch Bytes"
zh: "最大批量字节数"
}
}
compression {
desc {
en: "Compression method."
zh: "压缩方法。"
}
label {
en: "Compression"
zh: "压缩"
}
}
partition_strategy {
desc {
en: "Partition strategy is to tell the producer how to dispatch messages to Kafka partitions.\n\n"
"<code>random</code>: Randomly pick a partition for each message\n"
"<code>key_dispatch</code>: Hash Kafka message key to a partition number"
zh: "设置消息发布时应该如何选择 Kafka 分区。\n\n"
"<code>random</code>: 为每个消息随机选择一个分区。\n"
"<code>key_dispatch</code>: Hash Kafka message key to a partition number"
}
label {
en: "Partition Strategy"
zh: "分区选择策略"
}
}
required_acks {
desc {
en: "Required acknowledgements for Kafka partition leader to wait for its followers "
"before it sends back the acknowledgement to EMQX Kafka producer\n\n"
"<code>all_isr</code>: Require all in-sync replicas to acknowledge.\n"
"<code>leader_only</code>: Require only the partition-leader's acknowledgement.\n"
"<code>none</code>: No need for Kafka to acknowledge at all."
zh: "设置 Kafka leader 在返回给 EMQX 确认之前需要等待多少个 follower 的确认。\n\n"
"<code>all_isr</code>: 需要所有的在线复制者都确认。\n"
"<code>leader_only</code>: 仅需要分区 leader 确认。\n"
"<code>none</code>: 无需 Kafka 回复任何确认。"
}
label {
en: "Required Acks"
zh: "Kafka 确认数量"
}
}
partition_count_refresh_interval {
desc {
en: "The time interval for Kafka producer to discover increased number of partitions.\n"
"After the number of partitions is increased in Kafka, EMQX will start taking the \n"
"discovered partitions into account when dispatching messages per <code>partition_strategy</code>."
zh: "配置 Kafka 刷新分区数量的时间间隔。\n"
"EMQX 发现 Kafka 分区数量增加后,会开始按 <code>partition_strategy<code> 配置,把消息发送到新的分区中。"
}
label {
en: "Partition Count Refresh Interval"
zh: "分区数量刷新间隔"
}
}
max_inflight {
desc {
en: "Maximum number of batches allowed for Kafka producer (per-partition) to send before receiving acknowledgement from Kafka. "
"Greater value typically means better throughput. However, there can be a risk of message reordering when this "
"value is greater than 1."
zh: "设置 Kafka 生产者(每个分区一个)在收到 Kafka 的确认前最多发送多少个请求(批量)。"
"调大这个值通常可以增加吞吐量,但是,当该值设置大于 1 时存在消息乱序的风险。"
}
label {
en: "Max Inflight"
zh: "飞行窗口"
}
}
producer_buffer {
desc {
en: "Configure producer message buffer.\n\n"
"Tell Kafka producer how to buffer messages when EMQX has more messages to send than "
"Kafka can keep up, or when Kafka is down."
zh: "配置消息缓存的相关参数。\n\n"
"当 EMQX 需要发送的消息超过 Kafka 处理能力,或者当 Kafka 临时下线时EMQX 内部会将消息缓存起来。"
}
label {
en: "Message Buffer"
zh: "消息缓存"
}
}
buffer_mode {
desc {
en: "Message buffer mode.\n\n"
"<code>memory</code>: Buffer all messages in memory. The messages will be lost in case of EMQX node restart\n"
"<code>disk</code>: Buffer all messages on disk. The messages on disk are able to survive EMQX node restart.\n"
"<code>hybrid</code>: Buffer message in memory first, when up to certain limit "
"(see <code>segment_bytes</code> config for more information), then start offloading "
"messages to disk, Like <code>memory</code> mode, the messages will be lost in case of "
"EMQX node restart."
zh: "消息缓存模式。\n"
"<code>memory</code>: 所有的消息都缓存在内存里。如果 EMQX 服务重启,缓存的消息会丢失。\n"
"<code>disk</code>: 缓存到磁盘上。EMQX 重启后会继续发送重启前未发送完成的消息。\n"
"<code>hybrid</code>: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制"
"(配置项 <code>segment_bytes</code> 描述了该限制)后,后续的消息会缓存到磁盘上。"
"与 <code>memory</code> 模式一样,如果 EMQX 服务重启,缓存的消息会丢失。"
}
label {
en: "Buffer Mode"
zh: "缓存模式"
}
}
buffer_per_partition_limit {
desc {
en: "Number of bytes allowed to buffer for each Kafka partition. "
"When this limit is exceeded, old messages will be dropped in a trade for credits "
"for new messages to be buffered."
zh: "为每个 Kafka 分区设置的最大缓存字节数。当超过这个上限之后,老的消息会被丢弃,"
"为新的消息腾出空间。"
}
label {
en: "Per-partition Buffer Limit"
zh: "Kafka 分区缓存上限"
}
}
buffer_segment_bytes {
desc {
en: "Applicable when buffer mode is set to <code>disk</code> or <code>hybrid</code>.\n"
"This value is to specify the size of each on-disk buffer file."
zh: "当缓存模式是 <code>disk</code> 或 <code>hybrid</code> 时适用。"
"该配置用于指定缓存到磁盘上的文件的大小。"
}
label {
en: "Segment File Bytes"
zh: "缓存文件大小"
}
}
buffer_memory_overload_protection {
desc {
en: "Applicable when buffer mode is set to <code>memory</code>\n"
"EMQX will drop old buffered messages under high memory pressure. "
"The high memory threshold is defined in config <code>sysmon.os.sysmem_high_watermark</code>. "
"NOTE: This config only works on Linux."
zh: "缓存模式是 <code>memory</code> 或 <code>hybrid</code> 时适用。"
"当系统处于高内存压力时,从队列中丢弃旧的消息以减缓内存增长。"
"内存压力值由配置项 <code>sysmon.os.sysmem_high_watermark</code> 决定。"
"注意,该配置仅在 Linux 系统中有效。"
}
label {
en: "Memory Overload Protection"
zh: "内存过载保护"
}
}
auth_username_password {
desc {
en: "Username/password based authentication."
zh: "基于用户名密码的认证。"
}
label {
en: "Username/password Auth"
zh: "用户名密码认证"
}
}
auth_gssapi_kerberos {
desc {
en: "Use GSSAPI/Kerberos authentication."
zh: "使用 GSSAPI/Kerberos 认证。"
}
label {
en: "GSSAPI/Kerberos"
zh: "GSSAPI/Kerberos"
}
}
kafka_consumer {
desc {
en: "Kafka Consumer configuration."
zh: "Kafka 消费者配置。"
}
label {
en: "Kafka Consumer"
zh: "Kafka 消费者"
}
}
consumer_opts {
desc {
en: "Local MQTT publish and Kafka consumer configs."
zh: "本地 MQTT 转发 和 Kafka 消费者配置。"
}
label {
en: "MQTT to Kafka"
zh: "MQTT 到 Kafka"
}
}
consumer_kafka_opts {
desc {
en: "Kafka consumer configs."
zh: "Kafka消费者配置。"
}
label {
en: "Kafka Consumer"
zh: "Kafka 消费者"
}
}
consumer_mqtt_opts {
desc {
en: "Local MQTT message publish."
zh: "本地 MQTT 消息转发。"
}
label {
en: "MQTT publish"
zh: "MQTT 转发"
}
}
consumer_mqtt_topic {
desc {
en: "Local topic to which consumed Kafka messages should be published to."
zh: "设置 Kafka 消息向哪个本地 MQTT 主题转发消息。"
}
label {
en: "MQTT Topic"
zh: "MQTT主题"
}
}
consumer_mqtt_qos {
desc {
en: "MQTT QoS used to publish messages consumed from Kafka."
zh: "转发 MQTT 消息时使用的 QoS。"
}
label {
en: "QoS"
zh: "QoS"
}
}
consumer_mqtt_payload {
desc {
en: "The template for transforming the incoming Kafka message."
" By default, it will use JSON format to serialize"
" inputs from the Kafka message. Such fields are:\n"
"<code>headers</code>: an object containing string key-value pairs.\n"
"<code>key</code>: Kafka message key (uses the chosen key encoding).\n"
"<code>offset</code>: offset for the message.\n"
"<code>topic</code>: Kafka topic.\n"
"<code>ts</code>: message timestamp.\n"
"<code>ts_type</code>: message timestamp type, which is one of"
" <code>create</code>, <code>append</code> or <code>undefined</code>.\n"
"<code>value</code>: Kafka message value (uses the chosen value encoding).\n"
zh: "用于转换收到的 Kafka 消息的模板。 "
"默认情况下,它将使用 JSON 格式来序列化来自 Kafka 的所有字段。 "
"这些字段包括:"
"<code>headers</code>:一个包含字符串键值对的 JSON 对象。\n"
"<code>key</code>Kafka 消息的键(使用选择的编码方式编码)。\n"
"<code>offset</code>:消息的偏移量。\n"
"<code>topic</code>Kafka 主题。\n"
"<code>ts</code>: 消息的时间戳。\n"
"<code>ts_type</code>:消息的时间戳类型,值可能是:"
" <code>create</code> <code>append</code> 或 <code>undefined</code>。\n"
"<code>value</code>: Kafka 消息值(使用选择的编码方式编码)。\n"
}
label {
en: "MQTT Payload Template"
zh: "MQTT Payload Template"
}
}
consumer_kafka_topic {
desc {
en: "Kafka topic to consume from."
zh: "指定从哪个 Kafka 主题消费消息。"
}
label {
en: "Kafka Topic"
zh: "Kafka 主题"
}
}
consumer_max_batch_bytes {
desc {
en: "Set how many bytes to pull from Kafka in each fetch request. "
"Please note that if the configured value is smaller than the message size in Kafka, it may negatively impact the fetch performance."
zh: "设置每次从 Kafka 拉取数据的字节数。"
"如该配置小于 Kafka 消息的大小,可能会影响消费性能。"
}
label {
en: "Fetch Bytes"
zh: "拉取字节数"
}
}
# hidden
consumer_max_rejoin_attempts {
desc {
en: "Maximum number of times allowed for a member to re-join the group. If the consumer group can not reach balance after this configured number of attempts, the consumer group member will restart after a delay."
zh: "消费组成员允许重新加入小组的最大次数。如超过该配置次数后仍未能成功加入消费组,则会在等待一段时间后重试。"
}
label {
en: "Max Rejoin Attempts"
zh: "最大的重新加入尝试"
}
}
consumer_offset_reset_policy {
desc {
en: "Defines from which offset a consumer should start fetching when there "
"is no commit history or when the commit history becomes invalid."
zh: "如不存在偏移量历史记录或历史记录失效,消费者应使用哪个偏移量开始消费。"
}
label {
en: "Offset Reset Policy"
zh: "偏移重置策略"
}
}
consumer_offset_commit_interval_seconds {
desc {
en: "Defines the time interval between two offset commit requests sent for each consumer group."
zh: "指定 Kafka 消费组偏移量提交的时间间隔。"
}
label {
en: "Offset Commit Interval"
zh: "偏移提交间隔"
}
}
consumer_topic_mapping {
desc {
en: "Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item."
zh: "指定 Kafka 主题和 MQTT 主题之间的映射关系。 应至少包含一项。"
}
label {
en: "Topic Mapping"
zh: "主题映射关系"
}
}
consumer_key_encoding_mode {
desc {
en: "Defines how the key from the Kafka message is"
" encoded before being forwarded via MQTT.\n"
"<code>none</code> Uses the key from the Kafka message unchanged."
" Note: in this case, the key must be a valid UTF-8 string.\n"
"<code>base64</code> Uses base-64 encoding on the received key."
zh: "通过 MQTT 转发之前,如何处理 Kafka 消息的 Key。"
"<code>none</code> 使用 Kafka 消息中的 Key 原始值,不进行编码。"
" 注意在这种情况下Key 必须是一个有效的 UTF-8 字符串。\n"
"<code>base64</code> 对收到的密钥或值使用 base-64 编码。"
}
label {
en: "Key Encoding Mode"
zh: "Key 编码模式"
}
}
consumer_value_encoding_mode {
desc {
en: "Defines how the value from the Kafka message is"
" encoded before being forwarded via MQTT.\n"
"<code>none</code> Uses the value from the Kafka message unchanged."
" Note: in this case, the value must be a valid UTF-8 string.\n"
"<code>base64</code> Uses base-64 encoding on the received value."
zh: "通过 MQTT 转发之前,如何处理 Kafka 消息的 Value。"
"<code>none</code> 使用 Kafka 消息中的 Value 原始值,不进行编码。"
" 注意在这种情况下Value 必须是一个有效的 UTF-8 字符串。\n"
"<code>base64</code> 对收到的 Value 使用 base-64 编码。"
}
label {
en: "Value Encoding Mode"
zh: "Value 编码模式"
}
}
}