From 68baa4dcafffd78ff0dee0f35b2c549e5ff27662 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Thu, 8 Feb 2024 15:44:02 +0100 Subject: [PATCH] docs: fix i18n --- .../src/emqx_bridge_http.app.src | 2 +- .../src/emqx_bridge_http_schema.erl | 1 - rel/i18n/emqx_audit_api.hocon | 48 +++++----- rel/i18n/emqx_authn_ldap_schema.hocon | 2 +- rel/i18n/emqx_auto_subscribe_schema.hocon | 2 +- rel/i18n/emqx_bridge_azure_event_hub.hocon | 55 ++++++------ rel/i18n/emqx_bridge_confluent_producer.hocon | 51 ++++++----- rel/i18n/emqx_bridge_es.hocon | 16 ++-- rel/i18n/emqx_bridge_gcp_pubsub.hocon | 14 +-- rel/i18n/emqx_bridge_greptimedb.hocon | 18 ++-- rel/i18n/emqx_bridge_hstreamdb.hocon | 14 +-- .../emqx_bridge_hstreamdb_connector.hocon | 2 +- rel/i18n/emqx_bridge_http_connector.hocon | 2 +- rel/i18n/emqx_bridge_http_schema.hocon | 26 +++--- rel/i18n/emqx_bridge_iotdb.hocon | 4 +- rel/i18n/emqx_bridge_kafka.hocon | 89 +++++++++---------- rel/i18n/emqx_bridge_kinesis.hocon | 18 ++-- rel/i18n/emqx_bridge_mongodb.hocon | 16 ++-- rel/i18n/emqx_bridge_mqtt_schema.hocon | 2 +- rel/i18n/emqx_bridge_mysql.hocon | 18 ++-- rel/i18n/emqx_bridge_pulsar.hocon | 4 +- rel/i18n/emqx_bridge_redis.hocon | 18 ++-- rel/i18n/emqx_bridge_syskeeper.hocon | 20 ++--- rel/i18n/emqx_bridge_syskeeper_proxy.hocon | 8 +- rel/i18n/emqx_schema.hocon | 4 +- 25 files changed, 226 insertions(+), 228 deletions(-) diff --git a/apps/emqx_bridge_http/src/emqx_bridge_http.app.src b/apps/emqx_bridge_http/src/emqx_bridge_http.app.src index 0c355691b..0876d5737 100644 --- a/apps/emqx_bridge_http/src/emqx_bridge_http.app.src +++ b/apps/emqx_bridge_http/src/emqx_bridge_http.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_http, [ {description, "EMQX HTTP Bridge and Connector Application"}, - {vsn, "0.2.2"}, + {vsn, "0.2.3"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource, ehttpc]}, {env, [{emqx_action_info_modules, [emqx_bridge_http_action_info]}]}, diff --git a/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl b/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl index e2ecdd3dc..a6cf17a09 100644 --- a/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl +++ b/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl @@ -187,7 +187,6 @@ request_config() -> mk( egress, #{ - desc => ?DESC("config_direction"), required => {false, recursively}, deprecated => {since, "5.0.12"} } diff --git a/rel/i18n/emqx_audit_api.hocon b/rel/i18n/emqx_audit_api.hocon index 40741310e..1f51c8d77 100644 --- a/rel/i18n/emqx_audit_api.hocon +++ b/rel/i18n/emqx_audit_api.hocon @@ -1,57 +1,61 @@ emqx_audit_api { audit_get.desc: -"""Get audit logs based on filter API, empowers users to efficiently +"""Get audit logs with filtering parameters. This feature enables users to efficiently access the desired audit trail data and facilitates auditing, compliance, -troubleshooting, and security analysis""" +troubleshooting, and security analysis.""" audit_get.label: "List audit logs" filter_node.desc: -"Filter logs based on the node name to which the logs are created." +"Filter logs by the node name where the logs were generated." filter_from.desc: -""""Filter logs based on source type, valid values include: -`dashboard`: Dashboard request logs, requiring the use of a jwt_token. -`rest_api`: API KEY request logs. -`cli`: The emqx command line logs. -`erlang_console`: The emqx remote_console run function logs.""" +"""Filter logs by source type. Possible values are: + +- `dashboard`: Dashboard request logs. +- `rest_api`: API KEY request logs. +- `cli`: The emqx command line logs. +- `erlang_console`: The emqx remote_console run function logs.""" filter_source.desc: -""""Filter logs based on source, Possible values are: -The login username when logs are generated from the dashboard. -The API Keys when logs are generated from the REST API. -empty string when logs are generated from CLI, Erlang console.""" +"""Filter logs by source. Possible values are: + +- The login username to filter logs generated from Dashboard for this specific user. +- The API Key to filter logs generated from the REST API for this specific API key. +- An empty string to filter logs generated from CLI or Erlang console.""" filter_source_ip.desc: -"Filter logs based on source ip when logs are generated from dashboard and REST API." +"Filter logs by source IP when logs, applicable for logs generated from Dashboard or REST API operations." filter_operation_id.desc: -"Filter log with swagger's operation_id when logs are generated from dashboard and REST API." +"Filter logs by swagger's operation_id, applicable for logs generated from Dashboard or REST API operations." filter_operation_type.desc: -"Filter logs with operation type." +"Filter logs by operation type." filter_operation_result.desc: -"Filter logs with operation result." +"Filter logs by operation result." filter_http_status_code.desc: -"Filter The HTTP API with response code when logs are generated from dashboard and REST API." +"""Filter The HTTP API logs by response code, applicable for logs generated from Dashboard or REST API operations.""" filter_http_method.desc: -"Filter The HTTP API Request with method when logs are generated from dashboard and REST API." +"""Filter The HTTP API logs by method, applicable for logs generated from Dashboard or REST API operations.""" filter_gte_duration_ms.desc: -"Filter logs with a duration greater than or equal to given microseconds." +"""Filter logs by age duration, selecting those created no earlier than then given duration time ago.""" filter_lte_duration_ms.desc: -"Filter logs with a duration less than or equal to given microseconds." +"""Filter logs by age duration, selecting those created no later than then given duration time ago.""" filter_gte_created_at.desc: -"Filter logs with a creation time greater than or equal to the given timestamp, rfc3339 or timestamp(millisecond)" +"""Filter logs by creation time, selecting logs created no earlier than the given timestamp. +The timestamp can be provided either in rfc3339 string format or as a millisecond epoch timestamp.""" filter_lte_created_at.desc: -"Filter logs with a creation time less than or equal to the given timestamp, rfc3339 or timestamp(millisecond)" +"""Filter logs by creation time, selecting logs created no later than the given timestamp. +The timestamp can be provided either in rfc3339 string format or as a millisecond epoch timestamp.""" } diff --git a/rel/i18n/emqx_authn_ldap_schema.hocon b/rel/i18n/emqx_authn_ldap_schema.hocon index 40e2882ba..6254eefa3 100644 --- a/rel/i18n/emqx_authn_ldap_schema.hocon +++ b/rel/i18n/emqx_authn_ldap_schema.hocon @@ -4,7 +4,7 @@ ldap.desc: """Configuration of authenticator using LDAP as authentication data source.""" ldap_deprecated.desc: -"""This is a deprecated form, you should avoid using it.""" +"""This is a deprecated form, and you should avoid using it.""" password_attribute.desc: """Indicates which attribute is used to represent the user's password.""" diff --git a/rel/i18n/emqx_auto_subscribe_schema.hocon b/rel/i18n/emqx_auto_subscribe_schema.hocon index e26ed2546..b0c7fdf37 100644 --- a/rel/i18n/emqx_auto_subscribe_schema.hocon +++ b/rel/i18n/emqx_auto_subscribe_schema.hocon @@ -8,7 +8,7 @@ auto_subscribe.label: nl.desc: """Default value 0. -MQTT v3.1.1: if you subscribe to the topic published by yourself, you will receive all messages that you published. +MQTT v3.1.1: if you subscribe to the topic published by yourself, you will receive all messages that you published. MQTT v5: if you set this option as 1 when subscribing, the server will not forward the message you published to you.""" nl.label: diff --git a/rel/i18n/emqx_bridge_azure_event_hub.hocon b/rel/i18n/emqx_bridge_azure_event_hub.hocon index 3d77a508a..a30c37b58 100644 --- a/rel/i18n/emqx_bridge_azure_event_hub.hocon +++ b/rel/i18n/emqx_bridge_azure_event_hub.hocon @@ -45,7 +45,7 @@ socket_receive_buffer.label: """Socket Receive Buffer Size""" socket_tcp_keepalive.desc: -"""Enable TCP keepalive for Azure Event Hubs bridge connections. +"""Enable TCP keepalive. The value is three comma separated numbers in the format of 'Idle,Interval,Probes' - Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200). - Interval: The number of seconds between TCP keep-alive probes (Linux default 75). @@ -111,24 +111,25 @@ max_batch_bytes.label: """Max Batch Bytes""" required_acks.desc: -"""Required acknowledgements for Azure Event Hubs partition leader to wait for its followers before it sends back the acknowledgement to EMQX Azure Event Hubs producer +"""The acknowledgement criteria for the partition leader. It determines the level of confirmation required from partition replicas before sending an acknowledgement back to the producer. all_isr: Require all in-sync replicas to acknowledge. -leader_only: Require only the partition-leader's acknowledgement.""" +leader_only: Require only the partition-leader's acknowledgement. +none: No need for Kafka to acknowledge at all.""" required_acks.label: """Required Acks""" kafka_headers.desc: -"""Please provide a placeholder to be used as Azure Event Hubs Headers
+"""Provide a placeholder for message headers
e.g. ${pub_props}
-Notice that the value of the placeholder must either be an object: +Note that the value of the placeholder must be either an object: {\"foo\": \"bar\"} or an array of key-value pairs: [{\"key\": \"foo\", \"value\": \"bar\"}]""" kafka_headers.label: -"""Azure Event Hubs Headers""" +"""Message Headers""" producer_kafka_ext_headers.desc: """Please provide more key-value pairs for Azure Event Hubs headers
@@ -145,19 +146,19 @@ producer_kafka_ext_header_key.label: """Azure Event Hubs extra header key.""" producer_kafka_ext_header_value.desc: -"""Value of the Azure Event Hubs header. Placeholders in format of ${var} are supported.""" +"""Value of the header. Placeholders in format of ${var} are supported.""" producer_kafka_ext_header_value.label: -"""Value""" +"""Extra Headers Value""" kafka_header_value_encode_mode.desc: -"""Azure Event Hubs headers value encode mode
- - NONE: only add binary values to Azure Event Hubs headers;
- - JSON: only add JSON values to Azure Event Hubs headers, -and encode it to JSON strings before sending.""" +"""The encoding mode for headers. + + - `none`: Add only strings are added as header values + - `json`: Encode header values as JSON string""" kafka_header_value_encode_mode.label: -"""Azure Event Hubs headers value encode mode""" +"""Headers value encode mode""" metadata_request_timeout.desc: """Maximum wait time when fetching metadata from Azure Event Hubs.""" @@ -166,10 +167,10 @@ metadata_request_timeout.label: """Metadata Request Timeout""" desc_type.desc: -"""The Bridge Type""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" socket_nodelay.desc: """When set to 'true', TCP buffer is sent as soon as possible. Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default).""" @@ -214,7 +215,7 @@ auth_sasl_mechanism.label: """Mechanism""" config_enable.desc: -"""Enable (true) or disable (false) this bridge.""" +"""Enable (true) or disable (false) this config.""" config_enable.label: """Enable or Disable""" @@ -226,7 +227,7 @@ desc_config.label: """Azure Event Hubs Bridge Configuration""" buffer_per_partition_limit.desc: -"""Number of bytes allowed to buffer for each Azure Event Hubs partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered.""" +"""Number of bytes allowed to buffer for each partition. When this limit is exceeded, older messages will be discarded to make room for new messages to be buffered.""" buffer_per_partition_limit.label: """Per-partition Buffer Limit""" @@ -238,19 +239,19 @@ bootstrap_hosts.label: """Bootstrap Hosts""" kafka_message_key.desc: -"""Template to render Azure Event Hubs message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hubs's NULL (but not empty string) is used.""" +"""Template for rendering message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then NULL (but not empty string) is used.""" kafka_message_key.label: """Message Key""" kafka_message.desc: -"""Template to render an Azure Event Hubs message.""" +"""Template for rendering a message.""" kafka_message.label: """Azure Event Hubs Message Template""" mqtt_topic.desc: -"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hubs.""" +"""MQTT topic or topic filter as data source (action input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hubs.""" mqtt_topic.label: """Source MQTT Topic""" @@ -262,29 +263,31 @@ kafka_message_value.label: """Message Value""" partition_strategy.desc: -"""Partition strategy is to tell the producer how to dispatch messages to Azure Event Hubs partitions. +"""Partition strategy is to tell the producer how to dispatch messages to partitions. -random: Randomly pick a partition for each message -key_dispatch: Hash Azure Event Hubs message key to a partition number""" +random: Randomly pick a partition for each message. +key_dispatch: Assigns messages to partitions based on a hash of the message key, +ensuring consistent partition for messages with the same key.""" partition_strategy.label: """Partition Strategy""" buffer_segment_bytes.desc: """Applicable when buffer mode is set to disk or hybrid. -This value is to specify the size of each on-disk buffer file.""" +This setting specifies the size of each buffer segment file stored on disk.""" buffer_segment_bytes.label: """Segment File Bytes""" max_inflight.desc: -"""Maximum number of batches allowed for Azure Event Hubs producer (per-partition) to send before receiving acknowledgement from Azure Event Hubs. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1.""" +"""The maximum number of message batches that the producer can send to each partition before it must wait for an acknowledgement. +Setting a higher number can enhance throughput. However, value above 1 may lead to potential message reordering risks.""" max_inflight.label: """Max Inflight""" compression.desc: -"""Compression method.""" +"""Specify the method of compression.""" compression.label: """Compression""" diff --git a/rel/i18n/emqx_bridge_confluent_producer.hocon b/rel/i18n/emqx_bridge_confluent_producer.hocon index 730f0e371..535d910f0 100644 --- a/rel/i18n/emqx_bridge_confluent_producer.hocon +++ b/rel/i18n/emqx_bridge_confluent_producer.hocon @@ -45,7 +45,7 @@ socket_receive_buffer.label: """Socket Receive Buffer Size""" socket_tcp_keepalive.desc: -"""Enable TCP keepalive for Confluent bridge connections. +"""Enable TCP keepalive. The value is three comma separated numbers in the format of 'Idle,Interval,Probes' - Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200). - Interval: The number of seconds between TCP keep-alive probes (Linux default 75). @@ -111,24 +111,25 @@ max_batch_bytes.label: """Max Batch Bytes""" required_acks.desc: -"""Required acknowledgements for Confluent partition leader to wait for its followers before it sends back the acknowledgement to EMQX Confluent producer +"""The acknowledgement criteria for the partition leader. It determines the level of confirmation required from partition replicas before sending an acknowledgement back to the producer. all_isr: Require all in-sync replicas to acknowledge. -leader_only: Require only the partition-leader's acknowledgement.""" +leader_only: Require only the partition-leader's acknowledgement. +none: No need for Kafka to acknowledge at all.""" required_acks.label: """Required Acks""" kafka_headers.desc: -"""Please provide a placeholder to be used as Confluent Headers
+"""Provide a placeholder for message headers
e.g. ${pub_props}
-Notice that the value of the placeholder must either be an object: +Note that the value of the placeholder must be either an object: {\"foo\": \"bar\"} or an array of key-value pairs: [{\"key\": \"foo\", \"value\": \"bar\"}]""" kafka_headers.label: -"""Confluent Headers""" +"""Message Headers""" producer_kafka_ext_headers.desc: """Please provide more key-value pairs for Confluent headers
@@ -145,19 +146,19 @@ producer_kafka_ext_header_key.label: """Confluent extra header key.""" producer_kafka_ext_header_value.desc: -"""Value of the Confluent header. Placeholders in format of ${var} are supported.""" +"""Value of the header. Placeholders in format of ${var} are supported.""" producer_kafka_ext_header_value.label: -"""Value""" +"""Extra Headers Value""" kafka_header_value_encode_mode.desc: -"""Confluent headers value encode mode
- - NONE: only add binary values to Confluent headers;
- - JSON: only add JSON values to Confluent headers, -and encode it to JSON strings before sending.""" +"""The encoding mode for headers. + + - `none`: Add only strings are added as header values + - `json`: Encode header values as JSON string""" kafka_header_value_encode_mode.label: -"""Confluent headers value encode mode""" +"""Headers value encode mode""" metadata_request_timeout.desc: """Maximum wait time when fetching metadata from Confluent.""" @@ -166,7 +167,7 @@ metadata_request_timeout.label: """Metadata Request Timeout""" desc_type.desc: -"""The Action Type""" +"""The action type.""" desc_type.label: """Action Type""" @@ -214,7 +215,7 @@ auth_sasl_mechanism.label: """Mechanism""" config_enable.desc: -"""Enable (true) or disable (false) this action.""" +"""Enable (true) or disable (false) this config.""" config_enable.label: """Enable or Disable""" @@ -226,7 +227,7 @@ desc_config.label: """Confluent Action Configuration""" buffer_per_partition_limit.desc: -"""Number of bytes allowed to buffer for each Confluent partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered.""" +"""Number of bytes allowed to buffer for each partition. When this limit is exceeded, older messages will be discarded to make room for new messages to be buffered.""" buffer_per_partition_limit.label: """Per-partition Buffer Limit""" @@ -238,13 +239,13 @@ bootstrap_hosts.label: """Bootstrap Server""" kafka_message_key.desc: -"""Template to render Confluent message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Confluent's NULL (but not empty string) is used.""" +"""Template for rendering message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then NULL (but not empty string) is used.""" kafka_message_key.label: """Message Key""" kafka_message.desc: -"""Template to render a Confluent message.""" +"""Template for rendering a message.""" kafka_message.label: """Confluent Message Template""" @@ -262,29 +263,31 @@ kafka_message_value.label: """Message Value""" partition_strategy.desc: -"""Partition strategy is to tell the producer how to dispatch messages to Confluent partitions. +"""Partition strategy is to tell the producer how to dispatch messages to partitions. -random: Randomly pick a partition for each message -key_dispatch: Hash Confluent message key to a partition number""" +random: Randomly pick a partition for each message. +key_dispatch: Assigns messages to partitions based on a hash of the message key, +ensuring consistent partition for messages with the same key.""" partition_strategy.label: """Partition Strategy""" buffer_segment_bytes.desc: """Applicable when buffer mode is set to disk or hybrid. -This value is to specify the size of each on-disk buffer file.""" +This setting specifies the size of each buffer file stored on disk.""" buffer_segment_bytes.label: """Segment File Bytes""" max_inflight.desc: -"""Maximum number of batches allowed for Confluent producer (per-partition) to send before receiving acknowledgement from Confluent. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1.""" +"""The maximum number of message batches that the producer can send to each partition before it must wait for an acknowledgement. +Setting a higher number can enhance throughput. However, value above 1 may lead to potential message reordering risks.""" max_inflight.label: """Max Inflight""" compression.desc: -"""Compression method.""" +"""Specify the method of compression.""" compression.label: """Compression""" diff --git a/rel/i18n/emqx_bridge_es.hocon b/rel/i18n/emqx_bridge_es.hocon index 1cff9dbb9..0ed31d95a 100644 --- a/rel/i18n/emqx_bridge_es.hocon +++ b/rel/i18n/emqx_bridge_es.hocon @@ -6,10 +6,10 @@ elasticsearch.label: """ElasticSearch""" config_enable.desc: -"""Enable or disable this bridge""" +"""Enable or disable this action.""" config_enable.label: -"""Enable Or Disable Bridge""" +"""Enable Or Disable Action""" config_authentication.desc: """Authentication configuration""" @@ -24,13 +24,13 @@ auth_basic.label: """Basic auth params""" config_auth_basic_username.desc: -"""The username as configured at the IoTDB REST interface""" +"""The username to authenticate against Elastic Search.""" config_auth_basic_username.label: """HTTP Basic Auth Username""" config_auth_basic_password.desc: -"""The password as configured at the IoTDB REST interface""" +"""The password to authenticate against Elastic Search.""" config_auth_basic_password.label: """HTTP Basic Auth Password""" @@ -69,16 +69,16 @@ config_max_retries.label: """HTTP Request Max Retries""" desc_config.desc: -"""Configuration for Apache IoTDB bridge.""" +"""Configuration for Elastic Search.""" desc_config.label: -"""IoTDB Bridge Configuration""" +"""Elastic Search Action Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""A human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" config_parameters_index.desc: """Name of index, or index alias to perform the action on. diff --git a/rel/i18n/emqx_bridge_gcp_pubsub.hocon b/rel/i18n/emqx_bridge_gcp_pubsub.hocon index 68a6f8578..b2183acc4 100644 --- a/rel/i18n/emqx_bridge_gcp_pubsub.hocon +++ b/rel/i18n/emqx_bridge_gcp_pubsub.hocon @@ -10,19 +10,19 @@ desc_config.desc: """Configuration for a GCP PubSub bridge.""" desc_config.label: -"""GCP PubSub Bridge Configuration""" +"""GCP PubSub Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, used as a human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" local_topic.desc: """The MQTT topic filter to be forwarded to GCP PubSub. All MQTT 'PUBLISH' messages with the topic @@ -62,7 +62,7 @@ pipelining.desc: """A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request.""" pipelining.label: -"""HTTP Pipelineing""" +"""HTTP Pipelining""" pool_size.desc: """The pool size.""" @@ -140,7 +140,7 @@ service_account_json.label: } consumer_mqtt_qos { - desc: "MQTT QoS used to publish messages consumed from GCP PubSub." + desc: "MQTT QoS level applied when publishing messages that are consumed from GCP PubSub." label: "QoS" } diff --git a/rel/i18n/emqx_bridge_greptimedb.hocon b/rel/i18n/emqx_bridge_greptimedb.hocon index 977e6e064..435074248 100644 --- a/rel/i18n/emqx_bridge_greptimedb.hocon +++ b/rel/i18n/emqx_bridge_greptimedb.hocon @@ -1,33 +1,33 @@ emqx_bridge_greptimedb { config_enable.desc: -"""Enable or disable this bridge.""" +"""Enable or disable this action.""" config_enable.label: -"""Enable Or Disable Bridge""" +"""Enable Or Disable Action""" desc_config.desc: -"""Configuration for an GreptimeDB bridge.""" +"""Configuration for an GreptimeDB action.""" desc_config.label: -"""GreptimeDB Bridge Configuration""" +"""GreptimeDB Action Configuration""" desc_name.desc: -"""Bridge name.""" +"""The name of the action.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type.""" +"""The type of the action.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" local_topic.desc: """The MQTT topic filter to be forwarded to the GreptimeDB. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +NOTE: If this Sink is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" diff --git a/rel/i18n/emqx_bridge_hstreamdb.hocon b/rel/i18n/emqx_bridge_hstreamdb.hocon index de9989953..45ec0fdd8 100644 --- a/rel/i18n/emqx_bridge_hstreamdb.hocon +++ b/rel/i18n/emqx_bridge_hstreamdb.hocon @@ -7,10 +7,10 @@ config_direction.label: """Bridge Direction""" desc_config.desc: -"""Configuration for an HStreamDB bridge.""" +"""Configuration for an HStreamDB action.""" desc_config.label: -"""HStreamDB Bridge Configuration""" +"""HStreamDB Action Configuration""" desc_connector.desc: """Generic configuration for the connector.""" @@ -19,21 +19,21 @@ desc_connector.label: """Connector Generic Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, a human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The type of the action.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" local_topic.desc: """The MQTT topic filter to be forwarded to the HStreamDB. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +NOTE: If this action is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" diff --git a/rel/i18n/emqx_bridge_hstreamdb_connector.hocon b/rel/i18n/emqx_bridge_hstreamdb_connector.hocon index 8f7ac2edb..e00b20921 100644 --- a/rel/i18n/emqx_bridge_hstreamdb_connector.hocon +++ b/rel/i18n/emqx_bridge_hstreamdb_connector.hocon @@ -13,7 +13,7 @@ type.label: """Connector Type""" name.desc: -"""Connector name, used as a human-readable description of the connector.""" +"""A human-readable identifier for the connector.""" name.label: """Connector Name""" diff --git a/rel/i18n/emqx_bridge_http_connector.hocon b/rel/i18n/emqx_bridge_http_connector.hocon index 73dbd448f..2c81fc2ac 100644 --- a/rel/i18n/emqx_bridge_http_connector.hocon +++ b/rel/i18n/emqx_bridge_http_connector.hocon @@ -16,7 +16,7 @@ enable_pipelining.desc: """A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request.""" enable_pipelining.label: -"""HTTP Pipelineing""" +"""HTTP Pipelining""" headers.desc: """List of HTTP headers.""" diff --git a/rel/i18n/emqx_bridge_http_schema.hocon b/rel/i18n/emqx_bridge_http_schema.hocon index 416f77834..84a1837f2 100644 --- a/rel/i18n/emqx_bridge_http_schema.hocon +++ b/rel/i18n/emqx_bridge_http_schema.hocon @@ -12,14 +12,8 @@ Template with variables is allowed.""" config_body.label: """HTTP Body""" -config_direction.desc: -"""Deprecated, The direction of this bridge, MUST be 'egress'""" - -config_direction.label: -"""Bridge Direction""" - config_enable_bridge.desc: -"""Enable or disable this bridge""" +"""Enable or disable this action.""" config_enable_bridge.label: """Enable Or Disable Bridge""" @@ -34,7 +28,7 @@ config_headers.label: config_local_topic.desc: """The MQTT topic filter to be forwarded to the HTTP server. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +NOTE: If this action is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" @@ -61,7 +55,7 @@ config_request_timeout.label: """HTTP Request Timeout""" config_url.desc: -"""The URL of the HTTP Bridge.
+"""The URL of the HTTP action.
Template with variables is allowed in the path, but variables cannot be used in the scheme, host, or port part.
For example, http://localhost:9901/${topic} is allowed, but @@ -69,7 +63,7 @@ For example, http://localhost:9901/${topic} is allowed, but is not allowed.""" config_url.label: -"""HTTP Bridge""" +"""URL""" config_path.desc: """The URL path for this Action.
@@ -87,21 +81,21 @@ config_parameters_opts.label: """Parameters""" desc_config.desc: -"""Configuration for an HTTP bridge.""" +"""Configuration for an HTTP action.""" desc_config.label: -"""HTTP Bridge Configuration""" +"""HTTP Action Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, used as a human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" } diff --git a/rel/i18n/emqx_bridge_iotdb.hocon b/rel/i18n/emqx_bridge_iotdb.hocon index a62b84236..f2664495d 100644 --- a/rel/i18n/emqx_bridge_iotdb.hocon +++ b/rel/i18n/emqx_bridge_iotdb.hocon @@ -66,10 +66,10 @@ desc_config.label: """IoTDB Bridge Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, a human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" config_parameters_timestamp.desc: """Timestamp. Placeholders in format of ${var} is supported, the final value can be:
diff --git a/rel/i18n/emqx_bridge_kafka.hocon b/rel/i18n/emqx_bridge_kafka.hocon index 86e417be1..372dfa8bc 100644 --- a/rel/i18n/emqx_bridge_kafka.hocon +++ b/rel/i18n/emqx_bridge_kafka.hocon @@ -6,12 +6,6 @@ connect_timeout.desc: connect_timeout.label: """Connect Timeout""" -producer_opts.desc: -"""Local MQTT data source and Kafka bridge configs.""" - -producer_opts.label: -"""MQTT to Kafka""" - min_metadata_refresh_interval.desc: """Minimum time interval the client has to wait before refreshing Kafka broker and topic metadata. Setting too small value may add extra load on Kafka.""" @@ -45,7 +39,7 @@ socket_receive_buffer.label: """Socket Receive Buffer Size""" socket_tcp_keepalive.desc: -"""Enable TCP keepalive for Kafka bridge connections. +"""Enable TCP keepalive. The value is three comma separated numbers in the format of 'Idle,Interval,Probes' - Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200). - Interval: The number of seconds between TCP keep-alive probes (Linux default 75). @@ -57,10 +51,10 @@ socket_tcp_keepalive.label: """TCP keepalive options""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, used as a human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" consumer_offset_commit_interval_seconds.desc: """Defines the time interval between two offset commit requests sent for each consumer group.""" @@ -191,7 +185,7 @@ max_batch_bytes.label: """Max Batch Bytes""" required_acks.desc: -"""Required acknowledgements for Kafka partition leader to wait for its followers before it sends back the acknowledgement to EMQX Kafka producer +"""The acknowledgement criteria for the partition leader. It determines the level of confirmation required from partition replicas before sending an acknowledgement back to the producer. all_isr: Require all in-sync replicas to acknowledge. leader_only: Require only the partition-leader's acknowledgement. @@ -201,56 +195,56 @@ required_acks.label: """Required Acks""" kafka_headers.desc: -"""Please provide a placeholder to be used as Kafka Headers
+"""Provide a placeholder for message headers
e.g. ${pub_props}
-Notice that the value of the placeholder must either be an object: +Note that the value of the placeholder must be either an object: {\"foo\": \"bar\"} or an array of key-value pairs: [{\"key\": \"foo\", \"value\": \"bar\"}]""" kafka_headers.label: -"""Kafka Headers""" +"""Message Headers""" producer_kafka_ext_headers.desc: -"""Please provide more key-value pairs for Kafka headers
+"""Provide more key-value pairs for message headers
The key-value pairs here will be combined with the -value of kafka_headers field before sending to Kafka.""" +value of kafka_headers field before sending producing.""" producer_kafka_ext_headers.label: -"""Extra Kafka headers""" +"""Extra Headers""" producer_kafka_ext_header_key.desc: -"""Key of the Kafka header. Placeholders in format of ${var} are supported.""" +"""Key of the header. Placeholders in format of ${var} are supported.""" producer_kafka_ext_header_key.label: -"""Kafka extra header key.""" +"""Extra Headers Key""" producer_kafka_ext_header_value.desc: -"""Value of the Kafka header. Placeholders in format of ${var} are supported.""" +"""Value of the header. Placeholders in format of ${var} are supported.""" producer_kafka_ext_header_value.label: -"""Value""" +"""Extra Headers Value""" kafka_header_value_encode_mode.desc: -"""Kafka headers value encode mode
- - NONE: only add binary values to Kafka headers;
- - JSON: only add JSON values to Kafka headers, -and encode it to JSON strings before sending.""" +"""The encoding mode for headers. + + - `none`: Add only strings are added as header values + - `json`: Encode header values as JSON string""" kafka_header_value_encode_mode.label: -"""Kafka headers value encode mode""" +"""Headers value encode mode""" metadata_request_timeout.desc: -"""Maximum wait time when fetching metadata from Kafka.""" +"""Maximum wait time when fetching topic metadata.""" metadata_request_timeout.label: """Metadata Request Timeout""" desc_type.desc: -"""The Bridge Type""" +"""The Action Type""" desc_type.label: -"""Bridge Type""" +"""Action Type""" socket_nodelay.desc: """When set to 'true', TCP buffer is sent as soon as possible. Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default).""" @@ -278,7 +272,7 @@ auth_sasl_mechanism.label: """Mechanism""" config_enable.desc: -"""Enable (true) or disable (false) this Kafka bridge.""" +"""Enable (true) or disable (false) config.""" config_enable.label: """Enable or Disable""" @@ -323,13 +317,13 @@ consumer_value_encoding_mode.label: """Value Encoding Mode""" buffer_per_partition_limit.desc: -"""Number of bytes allowed to buffer for each Kafka partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered.""" +"""Number of bytes allowed to buffer for each partition. When this limit is exceeded, older messages will be discarded to make room for new messages to be buffered.""" buffer_per_partition_limit.label: """Per-partition Buffer Limit""" bootstrap_hosts.desc: -"""A comma separated list of Kafka host[:port] endpoints to bootstrap the client. Default port number is 9092.""" +"""A comma separated list of Kafka host:port endpoints to bootstrap the client.""" bootstrap_hosts.label: """Bootstrap Hosts""" @@ -341,41 +335,42 @@ consumer_max_rejoin_attempts.label: """Max Rejoin Attempts""" kafka_message_key.desc: -"""Template to render Kafka message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Kafka's NULL (but not empty string) is used.""" +"""Template for rendering message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then NULL (but not empty string) is used.""" kafka_message_key.label: """Message Key""" kafka_message.desc: -"""Template to render a Kafka message.""" +"""Template for rendering a message.""" kafka_message.label: -"""Kafka Message Template""" +"""Message Template""" mqtt_topic.desc: -"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Kafka.""" +"""MQTT topic or topic filter as data source (action input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Kafka.""" mqtt_topic.label: """Source MQTT Topic""" kafka_message_value.desc: -"""Template to render Kafka message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Kafka's NULL (but not empty string) is used.""" +"""Template for rendering Kafka message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Kafka's NULL (but not empty string) is used.""" kafka_message_value.label: """Message Value""" partition_strategy.desc: -"""Partition strategy is to tell the producer how to dispatch messages to Kafka partitions. +"""Partition strategy is to tell the producer how to dispatch messages to partitions. -random: Randomly pick a partition for each message -key_dispatch: Hash Kafka message key to a partition number""" +random: Randomly pick a partition for each message. +key_dispatch: Assigns messages to partitions based on a hash of the message key, +ensuring consistent partition for messages with the same key.""" partition_strategy.label: """Partition Strategy""" buffer_segment_bytes.desc: """Applicable when buffer mode is set to disk or hybrid. -This value is to specify the size of each on-disk buffer file.""" +This setting specifies the size of each buffer file stored on disk.""" buffer_segment_bytes.label: """Segment File Bytes""" @@ -387,7 +382,8 @@ consumer_kafka_opts.label: """Kafka Consumer""" max_inflight.desc: -"""Maximum number of batches allowed for Kafka producer (per-partition) to send before receiving acknowledgement from Kafka. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1.""" +"""The maximum number of message batches that the producer can send to each partition before it must wait for an acknowledgement. +Setting a higher number can enhance throughput. However, value above 1 may lead to potential message reordering risks.""" max_inflight.label: """Max Inflight""" @@ -405,7 +401,7 @@ auth_kerberos_keytab_file.label: """Kerberos keytab file""" compression.desc: -"""Compression method.""" +"""Specify the method of compression.""" compression.label: """Compression""" @@ -417,20 +413,19 @@ query_mode.label: """Query mode""" sync_query_timeout.desc: -"""This parameter defines the timeout limit for synchronous queries. It applies only when the bridge query mode is configured to 'sync'.""" +"""This parameter defines the timeout limit for synchronous queries. It applies only when the query mode is configured to 'sync'.""" sync_query_timeout.label: """Synchronous Query Timeout""" - kafka_producer_action.desc: -"""Kafka Producer Action""" +"""Producer Action""" kafka_producer_action.label: -"""Kafka Producer Action""" +"""Producer Action""" ssl_client_opts.desc: -"""TLS/SSL options for Kafka client.""" +"""TLS/SSL options for client.""" ssl_client_opts.label: """TLS/SSL options""" diff --git a/rel/i18n/emqx_bridge_kinesis.hocon b/rel/i18n/emqx_bridge_kinesis.hocon index 188ab82f3..96237fbcf 100644 --- a/rel/i18n/emqx_bridge_kinesis.hocon +++ b/rel/i18n/emqx_bridge_kinesis.hocon @@ -1,28 +1,28 @@ emqx_bridge_kinesis { config_enable.desc: -"""Enable or disable this bridge""" +"""Enable or disable this action""" config_enable.label: -"""Enable Or Disable Bridge""" +"""Enable Or Disable Action""" desc_config.desc: -"""Configuration for an Amazon Kinesis bridge.""" +"""Configuration for an Amazon Kinesis action.""" desc_config.label: -"""Amazon Kinesis Bridge Configuration""" +"""Amazon Kinesis Action Configuration""" desc_name.desc: -"""Bridge name.""" +"""Action name.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" pool_size.desc: """The pool size.""" @@ -33,7 +33,7 @@ pool_size.label: local_topic.desc: """The MQTT topic filter to be forwarded to Amazon Kinesis. All MQTT `PUBLISH` messages with the topic matching the `local_topic` will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also `local_topic` is +NOTE: If this action is used as the action of a rule (EMQX rule engine), and also `local_topic` is configured, then both the data got from the rule and the MQTT messages that match `local_topic` will be forwarded.""" diff --git a/rel/i18n/emqx_bridge_mongodb.hocon b/rel/i18n/emqx_bridge_mongodb.hocon index d7c14588b..cb7c5d30e 100644 --- a/rel/i18n/emqx_bridge_mongodb.hocon +++ b/rel/i18n/emqx_bridge_mongodb.hocon @@ -7,25 +7,25 @@ collection.label: """Collection to be used""" desc_config.desc: -"""Configuration for MongoDB Bridge""" +"""Configuration for MongoDB action""" desc_config.label: -"""MongoDB Bridge Configuration""" +"""MongoDB Action Configuration""" desc_name.desc: -"""Bridge name.""" +"""Action name.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type.""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" enable.desc: -"""Enable or disable this MongoDB Bridge""" +"""Enable or disable this MongoDB Action""" enable.label: """Enable or disable""" @@ -76,7 +76,7 @@ mongodb_action.desc: """Action to interact with a MongoDB connector""" mqtt_topic.desc: -"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in MongoDB.""" +"""MQTT topic or topic filter as data source (action input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in MongoDB.""" mqtt_topic.label: """Source MQTT Topic""" diff --git a/rel/i18n/emqx_bridge_mqtt_schema.hocon b/rel/i18n/emqx_bridge_mqtt_schema.hocon index e05c4fb0a..fc56f6a82 100644 --- a/rel/i18n/emqx_bridge_mqtt_schema.hocon +++ b/rel/i18n/emqx_bridge_mqtt_schema.hocon @@ -7,7 +7,7 @@ config.label: """Config""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Bridge name, used as a human-readable identifier.""" desc_name.label: """Bridge Name""" diff --git a/rel/i18n/emqx_bridge_mysql.hocon b/rel/i18n/emqx_bridge_mysql.hocon index 057b1b145..37b63017a 100644 --- a/rel/i18n/emqx_bridge_mysql.hocon +++ b/rel/i18n/emqx_bridge_mysql.hocon @@ -1,33 +1,33 @@ emqx_bridge_mysql { config_enable.desc: -"""Enable or disable this bridge""" +"""Enable or disable this action""" config_enable.label: -"""Enable Or Disable Bridge""" +"""Enable Or Disable Action""" desc_config.desc: -"""Configuration for an HStreamDB bridge.""" +"""Configuration for a MySQL action.""" desc_config.label: -"""HStreamDB Bridge Configuration""" +"""MySQL Action Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, used as a human-readable identifier of the action.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The action type""" desc_type.label: -"""Bridge Type""" +"""Action Type""" local_topic.desc: """The MQTT topic filter to be forwarded to MySQL. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +NOTE: If this action is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" diff --git a/rel/i18n/emqx_bridge_pulsar.hocon b/rel/i18n/emqx_bridge_pulsar.hocon index d1f5c8b13..e1b6153d3 100644 --- a/rel/i18n/emqx_bridge_pulsar.hocon +++ b/rel/i18n/emqx_bridge_pulsar.hocon @@ -73,8 +73,8 @@ emqx_bridge_pulsar { } desc_name { - desc = "Bridge name, used as a human-readable description of the bridge." - label = "Bridge Name" + desc = "Action name, a human-readable identifier." + label = "Action Name" } desc_type { diff --git a/rel/i18n/emqx_bridge_redis.hocon b/rel/i18n/emqx_bridge_redis.hocon index 5c8a4a941..9845ab7c6 100644 --- a/rel/i18n/emqx_bridge_redis.hocon +++ b/rel/i18n/emqx_bridge_redis.hocon @@ -16,33 +16,33 @@ command_template.label: """Redis Command Template""" config_enable.desc: -"""Enable or disable this bridge""" +"""Enable or disable this action .""" config_enable.label: -"""Enable Or Disable Bridge""" +"""Enable Or Disable Action""" desc_config.desc: -"""Configuration for a Redis bridge.""" +"""Configuration for a Redis action.""" desc_config.label: -"""Redis Bridge Configuration""" +"""Redis Action Configuration""" desc_name.desc: -"""Bridge name, used as a human-readable description of the bridge.""" +"""Action name, used as a human-readable identifier.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The Action Type""" desc_type.label: -"""Bridge Type""" +"""Action Type""" desc_local_topic.desc: """The MQTT topic filter to be forwarded to Redis. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +NOTE: If this action is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" diff --git a/rel/i18n/emqx_bridge_syskeeper.hocon b/rel/i18n/emqx_bridge_syskeeper.hocon index dde379f8a..f5799f552 100644 --- a/rel/i18n/emqx_bridge_syskeeper.hocon +++ b/rel/i18n/emqx_bridge_syskeeper.hocon @@ -1,28 +1,28 @@ emqx_bridge_syskeeper { config_enable.desc: -"""Enable or disable this bridge""" +"""Enable or disable this action.""" config_enable.label: -"""Enable Or Disable Bridge""" +"""Enable Or Disable Action""" desc_config.desc: -"""Configuration for a Syskeeper bridge""" +"""Configuration for a Syskeeper action.""" desc_config.label: -"""Syskeeper Bridge Configuration""" +"""Syskeeper Action Configuration""" desc_name.desc: -"""Bridge name.""" +"""Action name.""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" template.desc: """Template""" @@ -43,13 +43,13 @@ target_qos.label: """Target QoS""" parameters.desc: -"""Syskeeper data bridge parameters""" +"""Syskeeper action parameters""" parameters.label: """Parameters""" mqtt_topic.desc: -"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Syskeeper.""" +"""MQTT topic or topic filter as data source (action input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Syskeeper.""" mqtt_topic.label: """Source MQTT Topic""" diff --git a/rel/i18n/emqx_bridge_syskeeper_proxy.hocon b/rel/i18n/emqx_bridge_syskeeper_proxy.hocon index dc96486d1..48562a335 100644 --- a/rel/i18n/emqx_bridge_syskeeper_proxy.hocon +++ b/rel/i18n/emqx_bridge_syskeeper_proxy.hocon @@ -13,16 +13,16 @@ desc_config.label: """Syskeeper Proxy Connector Configuration""" desc_name.desc: -"""Bridge name""" +"""Action name""" desc_name.label: -"""Bridge Name""" +"""Action Name""" desc_type.desc: -"""The Bridge Type""" +"""The action type.""" desc_type.label: -"""Bridge Type""" +"""Action Type""" listen.desc: """The listening address for this Syskeeper proxy server""" diff --git a/rel/i18n/emqx_schema.hocon b/rel/i18n/emqx_schema.hocon index af4251328..8f08065ec 100644 --- a/rel/i18n/emqx_schema.hocon +++ b/rel/i18n/emqx_schema.hocon @@ -470,7 +470,7 @@ RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
NOTE: QUIC listener supports only 'tlsv1.3' ciphers""" ciphers_schema_quic.label: -"""""" +"""QUIC TLS cipher suites""" fields_mqtt_quic_listener_max_bytes_per_key.desc: """Maximum number of bytes to encrypt with a single 1-RTT encryption key before initiating key update. Default: 274877906944""" @@ -1445,7 +1445,7 @@ RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"""" ciphers_schema_common.label: -"""""" +"""TLS cipher suites""" sys_event_client_disconnected.desc: """Enable to publish client disconnected event messages."""