feat: add Azure Event Hub Producer bridge
Fixes https://emqx.atlassian.net/browse/EMQX-10338
This commit is contained in:
parent
2ac8ba2add
commit
5abe4bed88
|
@ -90,7 +90,8 @@
|
||||||
T == oracle;
|
T == oracle;
|
||||||
T == iotdb;
|
T == iotdb;
|
||||||
T == kinesis_producer;
|
T == kinesis_producer;
|
||||||
T == greptimedb
|
T == greptimedb;
|
||||||
|
T == azure_event_hub_producer
|
||||||
).
|
).
|
||||||
|
|
||||||
-define(ROOT_KEY, bridges).
|
-define(ROOT_KEY, bridges).
|
||||||
|
|
|
@ -49,6 +49,12 @@
|
||||||
update/4
|
update/4
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-callback connector_config(ParsedConfig, BridgeName :: atom() | binary()) ->
|
||||||
|
ParsedConfig
|
||||||
|
when
|
||||||
|
ParsedConfig :: #{atom() => any()}.
|
||||||
|
-optional_callbacks([connector_config/2]).
|
||||||
|
|
||||||
%% bi-directional bridge with producer/consumer or ingress/egress configs
|
%% bi-directional bridge with producer/consumer or ingress/egress configs
|
||||||
-define(IS_BI_DIR_BRIDGE(TYPE),
|
-define(IS_BI_DIR_BRIDGE(TYPE),
|
||||||
(TYPE) =:= <<"mqtt">>
|
(TYPE) =:= <<"mqtt">>
|
||||||
|
@ -65,11 +71,15 @@ bridge_to_resource_type(mqtt) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(<<"webhook">>) -> emqx_bridge_http_connector;
|
bridge_to_resource_type(<<"webhook">>) -> emqx_bridge_http_connector;
|
||||||
bridge_to_resource_type(webhook) -> emqx_bridge_http_connector;
|
bridge_to_resource_type(webhook) -> emqx_bridge_http_connector;
|
||||||
bridge_to_resource_type(BridgeType) -> emqx_bridge_enterprise:resource_type(BridgeType).
|
bridge_to_resource_type(BridgeType) -> emqx_bridge_enterprise:resource_type(BridgeType).
|
||||||
|
|
||||||
|
bridge_impl_module(BridgeType) -> emqx_bridge_enterprise:bridge_impl_module(BridgeType).
|
||||||
-else.
|
-else.
|
||||||
bridge_to_resource_type(<<"mqtt">>) -> emqx_bridge_mqtt_connector;
|
bridge_to_resource_type(<<"mqtt">>) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(mqtt) -> emqx_bridge_mqtt_connector;
|
bridge_to_resource_type(mqtt) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(<<"webhook">>) -> emqx_bridge_http_connector;
|
bridge_to_resource_type(<<"webhook">>) -> emqx_bridge_http_connector;
|
||||||
bridge_to_resource_type(webhook) -> emqx_bridge_http_connector.
|
bridge_to_resource_type(webhook) -> emqx_bridge_http_connector.
|
||||||
|
|
||||||
|
bridge_impl_module(_BridgeType) -> undefined.
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
resource_id(BridgeId) when is_binary(BridgeId) ->
|
resource_id(BridgeId) when is_binary(BridgeId) ->
|
||||||
|
@ -376,8 +386,17 @@ parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
|
||||||
Conf#{bridge_name => Name};
|
Conf#{bridge_name => Name};
|
||||||
parse_confs(<<"kinesis_producer">> = _Type, Name, Conf) ->
|
parse_confs(<<"kinesis_producer">> = _Type, Name, Conf) ->
|
||||||
Conf#{bridge_name => Name};
|
Conf#{bridge_name => Name};
|
||||||
parse_confs(_Type, _Name, Conf) ->
|
parse_confs(BridgeType, BridgeName, Config) ->
|
||||||
Conf.
|
connector_config(BridgeType, BridgeName, Config).
|
||||||
|
|
||||||
|
connector_config(BridgeType, BridgeName, Config) ->
|
||||||
|
Mod = bridge_impl_module(BridgeType),
|
||||||
|
case erlang:function_exported(Mod, connector_config, 2) of
|
||||||
|
true ->
|
||||||
|
Mod:connector_config(Config, BridgeName);
|
||||||
|
false ->
|
||||||
|
Config
|
||||||
|
end.
|
||||||
|
|
||||||
parse_url(Url) ->
|
parse_url(Url) ->
|
||||||
case string:split(Url, "//", leading) of
|
case string:split(Url, "//", leading) of
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
api_schemas/1,
|
api_schemas/1,
|
||||||
examples/1,
|
examples/1,
|
||||||
resource_type/1,
|
resource_type/1,
|
||||||
|
bridge_impl_module/1,
|
||||||
fields/1
|
fields/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -50,7 +51,8 @@ api_schemas(Method) ->
|
||||||
api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method),
|
api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method),
|
||||||
api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method),
|
api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method),
|
||||||
api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer"),
|
api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer"),
|
||||||
api_ref(emqx_bridge_greptimedb, <<"greptimedb">>, Method ++ "_grpc_v1")
|
api_ref(emqx_bridge_greptimedb, <<"greptimedb">>, Method ++ "_grpc_v1"),
|
||||||
|
api_ref(emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_producer")
|
||||||
].
|
].
|
||||||
|
|
||||||
schema_modules() ->
|
schema_modules() ->
|
||||||
|
@ -77,7 +79,8 @@ schema_modules() ->
|
||||||
emqx_bridge_iotdb,
|
emqx_bridge_iotdb,
|
||||||
emqx_bridge_rabbitmq,
|
emqx_bridge_rabbitmq,
|
||||||
emqx_bridge_kinesis,
|
emqx_bridge_kinesis,
|
||||||
emqx_bridge_greptimedb
|
emqx_bridge_greptimedb,
|
||||||
|
emqx_bridge_azure_event_hub
|
||||||
].
|
].
|
||||||
|
|
||||||
examples(Method) ->
|
examples(Method) ->
|
||||||
|
@ -124,7 +127,17 @@ resource_type(oracle) -> emqx_oracle;
|
||||||
resource_type(iotdb) -> emqx_bridge_iotdb_impl;
|
resource_type(iotdb) -> emqx_bridge_iotdb_impl;
|
||||||
resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector;
|
resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector;
|
||||||
resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer;
|
resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer;
|
||||||
resource_type(greptimedb) -> emqx_bridge_greptimedb_connector.
|
resource_type(greptimedb) -> emqx_bridge_greptimedb_connector;
|
||||||
|
%% We use AEH's Kafka interface.
|
||||||
|
resource_type(azure_event_hub_producer) -> emqx_bridge_kafka_impl_producer.
|
||||||
|
|
||||||
|
%% For bridges that need to override connector configurations.
|
||||||
|
bridge_impl_module(BridgeType) when is_binary(BridgeType) ->
|
||||||
|
bridge_impl_module(binary_to_atom(BridgeType, utf8));
|
||||||
|
bridge_impl_module(azure_event_hub_producer) ->
|
||||||
|
emqx_bridge_azure_event_hub;
|
||||||
|
bridge_impl_module(_BridgeType) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
fields(bridges) ->
|
fields(bridges) ->
|
||||||
[
|
[
|
||||||
|
@ -205,8 +218,7 @@ fields(bridges) ->
|
||||||
influxdb_structs() ++
|
influxdb_structs() ++
|
||||||
redis_structs() ++
|
redis_structs() ++
|
||||||
pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs() ++
|
pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs() ++
|
||||||
kinesis_structs() ++
|
kinesis_structs() ++ greptimedb_structs() ++ azure_event_hub_structs().
|
||||||
greptimedb_structs().
|
|
||||||
|
|
||||||
mongodb_structs() ->
|
mongodb_structs() ->
|
||||||
[
|
[
|
||||||
|
@ -374,6 +386,16 @@ kafka_producer_converter(Map, Opts) ->
|
||||||
Map
|
Map
|
||||||
).
|
).
|
||||||
|
|
||||||
|
azure_event_hub_producer_converter(undefined, _) ->
|
||||||
|
undefined;
|
||||||
|
azure_event_hub_producer_converter(Map, Opts) ->
|
||||||
|
maps:map(
|
||||||
|
fun(_Name, Config) ->
|
||||||
|
emqx_bridge_azure_event_hub:producer_converter(Config, Opts)
|
||||||
|
end,
|
||||||
|
Map
|
||||||
|
).
|
||||||
|
|
||||||
rabbitmq_structs() ->
|
rabbitmq_structs() ->
|
||||||
[
|
[
|
||||||
{rabbitmq,
|
{rabbitmq,
|
||||||
|
@ -398,6 +420,19 @@ kinesis_structs() ->
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
azure_event_hub_structs() ->
|
||||||
|
[
|
||||||
|
{azure_event_hub_producer,
|
||||||
|
mk(
|
||||||
|
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_producer")),
|
||||||
|
#{
|
||||||
|
desc => <<"EMQX Enterprise Config">>,
|
||||||
|
converter => fun azure_event_hub_producer_converter/2,
|
||||||
|
required => false
|
||||||
|
}
|
||||||
|
)}
|
||||||
|
].
|
||||||
|
|
||||||
api_ref(Module, Type, Method) ->
|
api_ref(Module, Type, Method) ->
|
||||||
{Type, ref(Module, Method)}.
|
{Type, ref(Module, Method)}.
|
||||||
|
|
||||||
|
|
|
@ -336,7 +336,7 @@ t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
|
||||||
ProbeRes0 = probe_bridge_api(
|
ProbeRes0 = probe_bridge_api(
|
||||||
BridgeType,
|
BridgeType,
|
||||||
BridgeName,
|
BridgeName,
|
||||||
BridgeConfig#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
|
BridgeConfig
|
||||||
),
|
),
|
||||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
||||||
AtomsBefore = erlang:system_info(atom_count),
|
AtomsBefore = erlang:system_info(atom_count),
|
||||||
|
@ -344,7 +344,7 @@ t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
|
||||||
ProbeRes1 = probe_bridge_api(
|
ProbeRes1 = probe_bridge_api(
|
||||||
BridgeType,
|
BridgeType,
|
||||||
BridgeName,
|
BridgeName,
|
||||||
BridgeConfig#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
|
BridgeConfig
|
||||||
),
|
),
|
||||||
|
|
||||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
||||||
|
@ -444,7 +444,11 @@ t_on_get_status(Config, Opts) ->
|
||||||
),
|
),
|
||||||
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||||
ct:sleep(500),
|
ct:sleep(500),
|
||||||
|
?retry(
|
||||||
|
_Interval0 = 200,
|
||||||
|
_Attempts0 = 10,
|
||||||
?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId))
|
?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
)
|
||||||
end),
|
end),
|
||||||
%% Check that it recovers itself.
|
%% Check that it recovers itself.
|
||||||
?retry(
|
?retry(
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Licensor: Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Licensed Work: EMQX Enterprise Edition
|
||||||
|
The Licensed Work is (c) 2023
|
||||||
|
Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Additional Use Grant: Students and educators are granted right to copy,
|
||||||
|
modify, and create derivative work for research
|
||||||
|
or education.
|
||||||
|
Change Date: 2027-02-01
|
||||||
|
Change License: Apache License, Version 2.0
|
||||||
|
|
||||||
|
For information about alternative licensing arrangements for the Software,
|
||||||
|
please contact Licensor: https://www.emqx.com/en/contact
|
||||||
|
|
||||||
|
Notice
|
||||||
|
|
||||||
|
The Business Source License (this document, or the “License”) is not an Open
|
||||||
|
Source license. However, the Licensed Work will eventually be made available
|
||||||
|
under an Open Source License, as stated in this License.
|
||||||
|
|
||||||
|
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
||||||
|
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Terms
|
||||||
|
|
||||||
|
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||||
|
works, redistribute, and make non-production use of the Licensed Work. The
|
||||||
|
Licensor may make an Additional Use Grant, above, permitting limited
|
||||||
|
production use.
|
||||||
|
|
||||||
|
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||||
|
available distribution of a specific version of the Licensed Work under this
|
||||||
|
License, whichever comes first, the Licensor hereby grants you rights under
|
||||||
|
the terms of the Change License, and the rights granted in the paragraph
|
||||||
|
above terminate.
|
||||||
|
|
||||||
|
If your use of the Licensed Work does not comply with the requirements
|
||||||
|
currently in effect as described in this License, you must purchase a
|
||||||
|
commercial license from the Licensor, its affiliated entities, or authorized
|
||||||
|
resellers, or you must refrain from using the Licensed Work.
|
||||||
|
|
||||||
|
All copies of the original and modified Licensed Work, and derivative works
|
||||||
|
of the Licensed Work, are subject to this License. This License applies
|
||||||
|
separately for each version of the Licensed Work and the Change Date may vary
|
||||||
|
for each version of the Licensed Work released by Licensor.
|
||||||
|
|
||||||
|
You must conspicuously display this License on each original or modified copy
|
||||||
|
of the Licensed Work. If you receive the Licensed Work in original or
|
||||||
|
modified form from a third party, the terms and conditions set forth in this
|
||||||
|
License apply to your use of that work.
|
||||||
|
|
||||||
|
Any use of the Licensed Work in violation of this License will automatically
|
||||||
|
terminate your rights under this License for the current and all other
|
||||||
|
versions of the Licensed Work.
|
||||||
|
|
||||||
|
This License does not grant you any right in any trademark or logo of
|
||||||
|
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||||
|
Licensor as expressly required by this License).
|
||||||
|
|
||||||
|
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||||
|
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||||
|
TITLE.
|
||||||
|
|
||||||
|
MariaDB hereby grants you permission to use this License’s text to license
|
||||||
|
your works, and to refer to it using the trademark “Business Source License”,
|
||||||
|
as long as you comply with the Covenants of Licensor below.
|
||||||
|
|
||||||
|
Covenants of Licensor
|
||||||
|
|
||||||
|
In consideration of the right to use this License’s text and the “Business
|
||||||
|
Source License” name and trademark, Licensor covenants to MariaDB, and to all
|
||||||
|
other recipients of the licensed work to be provided by Licensor:
|
||||||
|
|
||||||
|
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
||||||
|
or a license that is compatible with GPL Version 2.0 or a later version,
|
||||||
|
where “compatible” means that software provided under the Change License can
|
||||||
|
be included in a program with software provided under GPL Version 2.0 or a
|
||||||
|
later version. Licensor may specify additional Change Licenses without
|
||||||
|
limitation.
|
||||||
|
|
||||||
|
2. To either: (a) specify an additional grant of rights to use that does not
|
||||||
|
impose any additional restriction on the right granted in this License, as
|
||||||
|
the Additional Use Grant; or (b) insert the text “None”.
|
||||||
|
|
||||||
|
3. To specify a Change Date.
|
||||||
|
|
||||||
|
4. Not to modify this License in any other way.
|
|
@ -0,0 +1,30 @@
|
||||||
|
# Azure Event Hub Data Integration Bridge
|
||||||
|
|
||||||
|
This application houses the Azure Event Hub (AEH) Producer data
|
||||||
|
integration bridge for EMQX Enterprise Edition. It provides the means
|
||||||
|
to connect to Azure Event Hub Producer and publish messages to it via
|
||||||
|
the Kafka protocol.
|
||||||
|
|
||||||
|
Currently, our Kafka Producer library (`wolff`) has its own `replayq`
|
||||||
|
buffering implementation, so this bridge does not require buffer
|
||||||
|
workers from `emqx_resource`. It implements the connection management
|
||||||
|
and interaction without need for a separate connector app, since it's
|
||||||
|
not used by authentication and authorization applications.
|
||||||
|
|
||||||
|
# Documentation links
|
||||||
|
|
||||||
|
For more information about Kafka interface for AEH, please see [the
|
||||||
|
official
|
||||||
|
docs](https://learn.microsoft.com/en-us/azure/event-hubs/azure-event-hubs-kafka-overview).
|
||||||
|
|
||||||
|
# Configurations
|
||||||
|
|
||||||
|
Please see [Ingest Data into Azure Event Hub](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-azure-event-hub.html) for more detailed info.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).
|
|
@ -0,0 +1,2 @@
|
||||||
|
toxiproxy
|
||||||
|
kafka
|
|
@ -0,0 +1,15 @@
|
||||||
|
%% -*- mode: erlang; -*-
|
||||||
|
{erl_opts, [debug_info]}.
|
||||||
|
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.6"}}}
|
||||||
|
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
|
||||||
|
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
||||||
|
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
||||||
|
, {snappyer, "1.2.9"}
|
||||||
|
, {emqx_connector, {path, "../../apps/emqx_connector"}}
|
||||||
|
, {emqx_resource, {path, "../../apps/emqx_resource"}}
|
||||||
|
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
||||||
|
]}.
|
||||||
|
|
||||||
|
{shell, [
|
||||||
|
{apps, [emqx_bridge_azure_event_hub]}
|
||||||
|
]}.
|
|
@ -0,0 +1,15 @@
|
||||||
|
{application, emqx_bridge_azure_event_hub, [
|
||||||
|
{description, "EMQX Enterprise Azure Event Hub Bridge"},
|
||||||
|
{vsn, "0.1.0"},
|
||||||
|
{registered, []},
|
||||||
|
{applications, [
|
||||||
|
kernel,
|
||||||
|
stdlib,
|
||||||
|
emqx_resource,
|
||||||
|
telemetry,
|
||||||
|
wolff
|
||||||
|
]},
|
||||||
|
{env, []},
|
||||||
|
{modules, []},
|
||||||
|
{links, []}
|
||||||
|
]}.
|
|
@ -0,0 +1,267 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_azure_event_hub).
|
||||||
|
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
|
-behaviour(hocon_schema).
|
||||||
|
-behaviour(emqx_bridge_resource).
|
||||||
|
|
||||||
|
%% `hocon_schema' API
|
||||||
|
-export([
|
||||||
|
namespace/0,
|
||||||
|
roots/0,
|
||||||
|
fields/1,
|
||||||
|
desc/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% emqx_bridge_enterprise "unofficial" API
|
||||||
|
-export([conn_bridge_examples/1]).
|
||||||
|
|
||||||
|
-export([connector_config/2]).
|
||||||
|
|
||||||
|
-export([producer_converter/2, host_opts/0]).
|
||||||
|
|
||||||
|
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||||
|
|
||||||
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
%% `hocon_schema' API
|
||||||
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
namespace() -> "bridge_azure_event_hub".
|
||||||
|
|
||||||
|
roots() -> ["config_producer"].
|
||||||
|
|
||||||
|
fields("post_producer") ->
|
||||||
|
Fields = override(
|
||||||
|
emqx_bridge_kafka:fields("post_producer"),
|
||||||
|
producer_overrides()
|
||||||
|
),
|
||||||
|
override_documentations(Fields);
|
||||||
|
fields("config_producer") ->
|
||||||
|
Fields = override(
|
||||||
|
emqx_bridge_kafka:fields(kafka_producer),
|
||||||
|
producer_overrides()
|
||||||
|
),
|
||||||
|
override_documentations(Fields);
|
||||||
|
fields(auth_username_password) ->
|
||||||
|
Fields = override(
|
||||||
|
emqx_bridge_kafka:fields(auth_username_password),
|
||||||
|
auth_overrides()
|
||||||
|
),
|
||||||
|
override_documentations(Fields);
|
||||||
|
fields("ssl_client_opts") ->
|
||||||
|
Fields = override(
|
||||||
|
emqx_schema:fields("ssl_client_opts"),
|
||||||
|
ssl_overrides()
|
||||||
|
),
|
||||||
|
override_documentations(Fields);
|
||||||
|
fields(producer_kafka_opts) ->
|
||||||
|
Fields = override(
|
||||||
|
emqx_bridge_kafka:fields(producer_kafka_opts),
|
||||||
|
kafka_producer_overrides()
|
||||||
|
),
|
||||||
|
override_documentations(Fields);
|
||||||
|
fields(Method) ->
|
||||||
|
Fields = emqx_bridge_kafka:fields(Method),
|
||||||
|
override_documentations(Fields).
|
||||||
|
|
||||||
|
desc(_) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
conn_bridge_examples(Method) ->
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
<<"azure_event_hub_producer">> => #{
|
||||||
|
summary => <<"Azure Event Hub Producer Bridge">>,
|
||||||
|
value => values({Method, producer})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
].
|
||||||
|
|
||||||
|
values({get, AEHType}) ->
|
||||||
|
values({post, AEHType});
|
||||||
|
values({post, AEHType}) ->
|
||||||
|
maps:merge(values(common_config), values(AEHType));
|
||||||
|
values({put, AEHType}) ->
|
||||||
|
values({post, AEHType});
|
||||||
|
values(common_config) ->
|
||||||
|
#{
|
||||||
|
authentication => #{
|
||||||
|
password => <<"******">>
|
||||||
|
},
|
||||||
|
bootstrap_hosts => <<"namespace.servicebus.windows.net:9093">>,
|
||||||
|
connect_timeout => <<"5s">>,
|
||||||
|
enable => true,
|
||||||
|
metadata_request_timeout => <<"4s">>,
|
||||||
|
min_metadata_refresh_interval => <<"3s">>,
|
||||||
|
socket_opts => #{
|
||||||
|
sndbuf => <<"1024KB">>,
|
||||||
|
recbuf => <<"1024KB">>,
|
||||||
|
nodelay => true,
|
||||||
|
tcp_keepalive => <<"none">>
|
||||||
|
}
|
||||||
|
};
|
||||||
|
values(producer) ->
|
||||||
|
#{
|
||||||
|
kafka => #{
|
||||||
|
topic => <<"topic">>,
|
||||||
|
message => #{
|
||||||
|
key => <<"${.clientid}">>,
|
||||||
|
value => <<"${.}">>,
|
||||||
|
timestamp => <<"${.timestamp}">>
|
||||||
|
},
|
||||||
|
max_batch_bytes => <<"896KB">>,
|
||||||
|
partition_strategy => <<"random">>,
|
||||||
|
required_acks => <<"all_isr">>,
|
||||||
|
partition_count_refresh_interval => <<"60s">>,
|
||||||
|
kafka_headers => <<"${.pub_props}">>,
|
||||||
|
kafka_ext_headers => [
|
||||||
|
#{
|
||||||
|
kafka_ext_header_key => <<"clientid">>,
|
||||||
|
kafka_ext_header_value => <<"${clientid}">>
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
kafka_ext_header_key => <<"topic">>,
|
||||||
|
kafka_ext_header_value => <<"${topic}">>
|
||||||
|
}
|
||||||
|
],
|
||||||
|
kafka_header_value_encode_mode => none,
|
||||||
|
max_inflight => 10,
|
||||||
|
buffer => #{
|
||||||
|
mode => <<"hybrid">>,
|
||||||
|
per_partition_limit => <<"2GB">>,
|
||||||
|
segment_bytes => <<"100MB">>,
|
||||||
|
memory_overload_protection => true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
local_topic => <<"mqtt/local/topic">>
|
||||||
|
}.
|
||||||
|
|
||||||
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
%% `emqx_bridge_resource' API
|
||||||
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
connector_config(Config, BridgeName) ->
|
||||||
|
%% Default port for AEH is 9093
|
||||||
|
BootstrapHosts0 = maps:get(bootstrap_hosts, Config),
|
||||||
|
BootstrapHosts = emqx_schema:parse_servers(
|
||||||
|
BootstrapHosts0,
|
||||||
|
emqx_bridge_azure_event_hub:host_opts()
|
||||||
|
),
|
||||||
|
Config#{bridge_name => BridgeName, bootstrap_hosts := BootstrapHosts}.
|
||||||
|
|
||||||
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
%% Internal fns
|
||||||
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
ref(Name) ->
|
||||||
|
hoconsc:ref(?MODULE, Name).
|
||||||
|
|
||||||
|
producer_overrides() ->
|
||||||
|
#{
|
||||||
|
authentication => mk(ref(auth_username_password), #{default => #{}, required => true}),
|
||||||
|
bootstrap_hosts =>
|
||||||
|
mk(
|
||||||
|
binary(),
|
||||||
|
#{
|
||||||
|
required => true,
|
||||||
|
validator => emqx_schema:servers_validator(
|
||||||
|
host_opts(), _Required = true
|
||||||
|
)
|
||||||
|
}
|
||||||
|
),
|
||||||
|
kafka =>
|
||||||
|
mk(ref(producer_kafka_opts), #{
|
||||||
|
required => true,
|
||||||
|
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
|
||||||
|
}),
|
||||||
|
ssl => mk(ref("ssl_client_opts"), #{default => #{<<"enable">> => true}}),
|
||||||
|
type => mk(azure_event_hub_producer, #{required => true})
|
||||||
|
}.
|
||||||
|
|
||||||
|
auth_overrides() ->
|
||||||
|
#{
|
||||||
|
mechanism =>
|
||||||
|
mk(plain, #{
|
||||||
|
required => true,
|
||||||
|
default => plain,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
}),
|
||||||
|
username =>
|
||||||
|
mk(binary(), #{
|
||||||
|
required => true,
|
||||||
|
default => <<"$ConnectionString">>,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
})
|
||||||
|
}.
|
||||||
|
|
||||||
|
ssl_overrides() ->
|
||||||
|
#{
|
||||||
|
"cacerts" => mk(boolean(), #{default => true}),
|
||||||
|
"enable" => mk(true, #{default => true}),
|
||||||
|
"server_name_indication" =>
|
||||||
|
mk(
|
||||||
|
hoconsc:union([disable, auto, string()]),
|
||||||
|
#{
|
||||||
|
example => auto,
|
||||||
|
default => <<"auto">>
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}.
|
||||||
|
|
||||||
|
kafka_producer_overrides() ->
|
||||||
|
#{
|
||||||
|
compression =>
|
||||||
|
mk(no_compression, #{
|
||||||
|
default => no_compression,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
}),
|
||||||
|
required_acks => mk(enum([all_isr, leader_only]), #{default => all_isr})
|
||||||
|
}.
|
||||||
|
|
||||||
|
override_documentations(Fields) ->
|
||||||
|
lists:map(
|
||||||
|
fun({Name, Sc}) ->
|
||||||
|
case hocon_schema:field_schema(Sc, desc) of
|
||||||
|
?DESC(emqx_bridge_kafka, Key) ->
|
||||||
|
%% to please dialyzer...
|
||||||
|
Override = #{type => hocon_schema:field_schema(Sc, type), desc => ?DESC(Key)},
|
||||||
|
{Name, hocon_schema:override(Sc, Override)};
|
||||||
|
_ ->
|
||||||
|
{Name, Sc}
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Fields
|
||||||
|
).
|
||||||
|
|
||||||
|
override(Fields, Overrides) ->
|
||||||
|
lists:map(
|
||||||
|
fun({Name, Sc}) ->
|
||||||
|
case maps:find(Name, Overrides) of
|
||||||
|
{ok, Override} ->
|
||||||
|
{Name, hocon_schema:override(Sc, Override)};
|
||||||
|
error ->
|
||||||
|
{Name, Sc}
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Fields
|
||||||
|
).
|
||||||
|
|
||||||
|
producer_converter(undefined, _HoconOpts) ->
|
||||||
|
undefined;
|
||||||
|
producer_converter(
|
||||||
|
Opts = #{<<"ssl">> := #{<<"server_name_indication">> := <<"auto">>}}, _HoconOpts
|
||||||
|
) ->
|
||||||
|
%% Azure Event Hub's SNI is just the hostname without the Event Hub Namespace...
|
||||||
|
emqx_utils_maps:deep_merge(
|
||||||
|
Opts,
|
||||||
|
#{<<"ssl">> => #{<<"server_name_indication">> => <<"servicebus.windows.net">>}}
|
||||||
|
);
|
||||||
|
producer_converter(Opts, _HoconOpts) ->
|
||||||
|
Opts.
|
||||||
|
|
||||||
|
host_opts() ->
|
||||||
|
#{default_port => 9093}.
|
|
@ -0,0 +1,283 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_azure_event_hub_producer_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
-define(BRIDGE_TYPE, azure_event_hub_producer).
|
||||||
|
-define(BRIDGE_TYPE_BIN, <<"azure_event_hub_producer">>).
|
||||||
|
-define(APPS, [emqx_resource, emqx_bridge, emqx_rule_engine]).
|
||||||
|
|
||||||
|
-import(emqx_common_test_helpers, [on_exit/1]).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% CT boilerplate
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"),
|
||||||
|
KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")),
|
||||||
|
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
|
||||||
|
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
|
||||||
|
ProxyName = "kafka_sasl_ssl",
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of
|
||||||
|
true ->
|
||||||
|
Apps = emqx_cth_suite:start(
|
||||||
|
[
|
||||||
|
emqx_conf,
|
||||||
|
emqx,
|
||||||
|
emqx_management,
|
||||||
|
emqx_resource,
|
||||||
|
emqx_bridge_azure_event_hub,
|
||||||
|
emqx_bridge,
|
||||||
|
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||||
|
],
|
||||||
|
#{work_dir => ?config(priv_dir, Config)}
|
||||||
|
),
|
||||||
|
{ok, Api} = emqx_common_test_http:create_default_app(),
|
||||||
|
[
|
||||||
|
{tc_apps, Apps},
|
||||||
|
{api, Api},
|
||||||
|
{proxy_name, ProxyName},
|
||||||
|
{proxy_host, ProxyHost},
|
||||||
|
{proxy_port, ProxyPort},
|
||||||
|
{kafka_host, KafkaHost},
|
||||||
|
{kafka_port, KafkaPort}
|
||||||
|
| Config
|
||||||
|
];
|
||||||
|
false ->
|
||||||
|
case os:getenv("IS_CI") of
|
||||||
|
"yes" ->
|
||||||
|
throw(no_kafka);
|
||||||
|
_ ->
|
||||||
|
{skip, no_kafka}
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
end_per_suite(Config) ->
|
||||||
|
%% emqx_mgmt_api_test_util:end_suite(),
|
||||||
|
%% ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
||||||
|
%% ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]),
|
||||||
|
%% _ = application:stop(emqx_connector),
|
||||||
|
Apps = ?config(tc_apps, Config),
|
||||||
|
emqx_cth_suite:stop(Apps),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(TestCase, Config) ->
|
||||||
|
common_init_per_testcase(TestCase, Config).
|
||||||
|
|
||||||
|
common_init_per_testcase(TestCase, Config) ->
|
||||||
|
ct:timetrap(timer:seconds(60)),
|
||||||
|
emqx_bridge_testlib:delete_all_bridges(),
|
||||||
|
emqx_config:delete_override_conf_files(),
|
||||||
|
{Name, BridgeConfig, ExtraConfig} = bridge_config(TestCase, Config),
|
||||||
|
KafkaTopic = proplists:get_value(kafka_topic, ExtraConfig),
|
||||||
|
ensure_topic(Config, KafkaTopic, _Opts = #{}),
|
||||||
|
ok = snabbkaffe:start_trace(),
|
||||||
|
ExtraConfig ++
|
||||||
|
[
|
||||||
|
{bridge_type, ?BRIDGE_TYPE},
|
||||||
|
{bridge_name, Name},
|
||||||
|
{bridge_config, BridgeConfig}
|
||||||
|
| Config
|
||||||
|
].
|
||||||
|
|
||||||
|
end_per_testcase(_Testcase, Config) ->
|
||||||
|
case proplists:get_bool(skip_does_not_apply, Config) of
|
||||||
|
true ->
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
emqx_bridge_testlib:delete_all_bridges(),
|
||||||
|
emqx_common_test_helpers:call_janitor(60_000),
|
||||||
|
ok = snabbkaffe:stop(),
|
||||||
|
ok
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper fns
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
bridge_config(TestCase, Config) ->
|
||||||
|
KafkaHost = ?config(kafka_host, Config),
|
||||||
|
KafkaPort = ?config(kafka_port, Config),
|
||||||
|
UniqueNum = integer_to_binary(erlang:unique_integer()),
|
||||||
|
Name = <<
|
||||||
|
(atom_to_binary(TestCase))/binary, UniqueNum/binary
|
||||||
|
>>,
|
||||||
|
KafkaTopic = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]),
|
||||||
|
InnerConfigMap0 =
|
||||||
|
#{
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"bootstrap_hosts">> => iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]),
|
||||||
|
<<"authentication">> =>
|
||||||
|
#{
|
||||||
|
<<"mechanism">> => <<"plain">>,
|
||||||
|
<<"username">> => <<"emqxuser">>,
|
||||||
|
<<"password">> => <<"password">>
|
||||||
|
},
|
||||||
|
<<"connect_timeout">> => <<"5s">>,
|
||||||
|
<<"kafka">> =>
|
||||||
|
#{
|
||||||
|
<<"buffer">> =>
|
||||||
|
#{
|
||||||
|
<<"memory_overload_protection">> => true,
|
||||||
|
<<"mode">> => <<"memory">>,
|
||||||
|
<<"per_partition_limit">> => <<"2GB">>,
|
||||||
|
<<"segment_bytes">> => <<"100MB">>
|
||||||
|
},
|
||||||
|
<<"compression">> => <<"no_compression">>,
|
||||||
|
<<"kafka_header_value_encode_mode">> => <<"none">>,
|
||||||
|
<<"max_batch_bytes">> => <<"896KB">>,
|
||||||
|
<<"max_inflight">> => <<"10">>,
|
||||||
|
<<"message">> =>
|
||||||
|
#{
|
||||||
|
<<"key">> => <<"${.clientid}">>,
|
||||||
|
<<"timestamp">> => <<"${.timestamp}">>,
|
||||||
|
<<"value">> => <<"${.}">>
|
||||||
|
},
|
||||||
|
<<"partition_count_refresh_interval">> => <<"60s">>,
|
||||||
|
<<"partition_strategy">> => <<"random">>,
|
||||||
|
<<"query_mode">> => <<"async">>,
|
||||||
|
<<"required_acks">> => <<"all_isr">>,
|
||||||
|
<<"sync_query_timeout">> => <<"5s">>,
|
||||||
|
<<"topic">> => KafkaTopic
|
||||||
|
},
|
||||||
|
<<"local_topic">> => <<"t/aeh">>,
|
||||||
|
<<"metadata_request_timeout">> => <<"5s">>,
|
||||||
|
<<"min_metadata_refresh_interval">> => <<"3s">>,
|
||||||
|
<<"socket_opts">> =>
|
||||||
|
#{
|
||||||
|
<<"nodelay">> => true,
|
||||||
|
<<"recbuf">> => <<"1024KB">>,
|
||||||
|
<<"sndbuf">> => <<"1024KB">>,
|
||||||
|
<<"tcp_keepalive">> => <<"none">>
|
||||||
|
},
|
||||||
|
<<"ssl">> =>
|
||||||
|
#{
|
||||||
|
<<"cacertfile">> => shared_secret(client_cacertfile),
|
||||||
|
<<"certfile">> => shared_secret(client_certfile),
|
||||||
|
<<"keyfile">> => shared_secret(client_keyfile),
|
||||||
|
<<"ciphers">> => [],
|
||||||
|
<<"depth">> => 10,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"hibernate_after">> => <<"5s">>,
|
||||||
|
<<"log_level">> => <<"notice">>,
|
||||||
|
<<"reuse_sessions">> => true,
|
||||||
|
<<"secure_renegotiate">> => true,
|
||||||
|
<<"server_name_indication">> => <<"disable">>,
|
||||||
|
%% currently, it seems our CI kafka certs fail peer verification
|
||||||
|
<<"verify">> => <<"verify_none">>,
|
||||||
|
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
|
||||||
|
ExtraConfig =
|
||||||
|
[{kafka_topic, KafkaTopic}],
|
||||||
|
{Name, parse_and_check(InnerConfigMap, Name), ExtraConfig}.
|
||||||
|
|
||||||
|
%% check it serializes correctly
|
||||||
|
serde_roundtrip(InnerConfigMap0) ->
|
||||||
|
IOList = hocon_pp:do(InnerConfigMap0, #{}),
|
||||||
|
{ok, InnerConfigMap} = hocon:binary(IOList),
|
||||||
|
InnerConfigMap.
|
||||||
|
|
||||||
|
parse_and_check(InnerConfigMap, Name) ->
|
||||||
|
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||||
|
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
|
||||||
|
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
|
||||||
|
#{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf,
|
||||||
|
ct:pal("config:\n ~p", [Config]),
|
||||||
|
Config.
|
||||||
|
|
||||||
|
shared_secret_path() ->
|
||||||
|
os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret").
|
||||||
|
|
||||||
|
shared_secret(client_keyfile) ->
|
||||||
|
filename:join([shared_secret_path(), "client.key"]);
|
||||||
|
shared_secret(client_certfile) ->
|
||||||
|
filename:join([shared_secret_path(), "client.crt"]);
|
||||||
|
shared_secret(client_cacertfile) ->
|
||||||
|
filename:join([shared_secret_path(), "ca.crt"]);
|
||||||
|
shared_secret(rig_keytab) ->
|
||||||
|
filename:join([shared_secret_path(), "rig.keytab"]).
|
||||||
|
|
||||||
|
ensure_topic(Config, KafkaTopic, Opts) ->
|
||||||
|
KafkaHost = ?config(kafka_host, Config),
|
||||||
|
KafkaPort = ?config(kafka_port, Config),
|
||||||
|
NumPartitions = maps:get(num_partitions, Opts, 3),
|
||||||
|
Endpoints = [{KafkaHost, KafkaPort}],
|
||||||
|
TopicConfigs = [
|
||||||
|
#{
|
||||||
|
name => KafkaTopic,
|
||||||
|
num_partitions => NumPartitions,
|
||||||
|
replication_factor => 1,
|
||||||
|
assignments => [],
|
||||||
|
configs => []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
RequestConfig = #{timeout => 5_000},
|
||||||
|
ConnConfig =
|
||||||
|
#{
|
||||||
|
ssl => emqx_tls_lib:to_client_opts(
|
||||||
|
#{
|
||||||
|
keyfile => shared_secret(client_keyfile),
|
||||||
|
certfile => shared_secret(client_certfile),
|
||||||
|
cacertfile => shared_secret(client_cacertfile),
|
||||||
|
verify => verify_none,
|
||||||
|
enable => true
|
||||||
|
}
|
||||||
|
),
|
||||||
|
sasl => {plain, <<"emqxuser">>, <<"password">>}
|
||||||
|
},
|
||||||
|
case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of
|
||||||
|
ok -> ok;
|
||||||
|
{error, topic_already_exists} -> ok
|
||||||
|
end.
|
||||||
|
|
||||||
|
make_message() ->
|
||||||
|
Time = erlang:unique_integer(),
|
||||||
|
BinTime = integer_to_binary(Time),
|
||||||
|
Payload = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||||
|
#{
|
||||||
|
clientid => BinTime,
|
||||||
|
payload => Payload,
|
||||||
|
timestamp => Time
|
||||||
|
}.
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Testcases
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
t_start_stop(Config) ->
|
||||||
|
emqx_bridge_testlib:t_start_stop(Config, kafka_producer_stopped),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_create_via_http(Config) ->
|
||||||
|
emqx_bridge_testlib:t_create_via_http(Config),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_on_get_status(Config) ->
|
||||||
|
emqx_bridge_testlib:t_on_get_status(Config, #{failure_status => connecting}),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_sync_query(Config) ->
|
||||||
|
ok = emqx_bridge_testlib:t_sync_query(
|
||||||
|
Config,
|
||||||
|
fun make_message/0,
|
||||||
|
fun(Res) -> ?assertEqual(ok, Res) end,
|
||||||
|
emqx_bridge_kafka_impl_producer_sync_query
|
||||||
|
),
|
||||||
|
ok.
|
|
@ -0,0 +1,195 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_azure_event_hub_tests).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
|
||||||
|
%%===========================================================================
|
||||||
|
%% Data Section
|
||||||
|
%%===========================================================================
|
||||||
|
|
||||||
|
%% erlfmt-ignore
|
||||||
|
aeh_producer_hocon() ->
|
||||||
|
"""
|
||||||
|
bridges.azure_event_hub_producer.my_producer {
|
||||||
|
enable = true
|
||||||
|
authentication {
|
||||||
|
password = \"Endpoint=...\"
|
||||||
|
}
|
||||||
|
bootstrap_hosts = \"emqx.servicebus.windows.net:9093\"
|
||||||
|
connect_timeout = 5s
|
||||||
|
kafka {
|
||||||
|
buffer {
|
||||||
|
memory_overload_protection = false
|
||||||
|
mode = memory
|
||||||
|
per_partition_limit = 2GB
|
||||||
|
segment_bytes = 100MB
|
||||||
|
}
|
||||||
|
compression = no_compression
|
||||||
|
kafka_header_value_encode_mode = none
|
||||||
|
max_batch_bytes = 896KB
|
||||||
|
max_inflight = 10
|
||||||
|
message {
|
||||||
|
key = \"${.clientid}\"
|
||||||
|
timestamp = \"${.timestamp}\"
|
||||||
|
value = \"${.}\"
|
||||||
|
}
|
||||||
|
partition_count_refresh_interval = 60s
|
||||||
|
partition_strategy = random
|
||||||
|
query_mode = async
|
||||||
|
required_acks = all_isr
|
||||||
|
sync_query_timeout = 5s
|
||||||
|
topic = test
|
||||||
|
}
|
||||||
|
local_topic = \"t/aeh\"
|
||||||
|
metadata_request_timeout = 5s
|
||||||
|
min_metadata_refresh_interval = 3s
|
||||||
|
socket_opts {
|
||||||
|
recbuf = 1024KB
|
||||||
|
sndbuf = 1024KB
|
||||||
|
tcp_keepalive = none
|
||||||
|
}
|
||||||
|
ssl {
|
||||||
|
ciphers = []
|
||||||
|
depth = 10
|
||||||
|
hibernate_after = 5s
|
||||||
|
log_level = notice
|
||||||
|
reuse_sessions = true
|
||||||
|
secure_renegotiate = true
|
||||||
|
verify = verify_none
|
||||||
|
versions = [tlsv1.3, tlsv1.2]
|
||||||
|
server_name_indication = auto
|
||||||
|
}
|
||||||
|
}
|
||||||
|
""".
|
||||||
|
|
||||||
|
%%===========================================================================
|
||||||
|
%% Helper functions
|
||||||
|
%%===========================================================================
|
||||||
|
|
||||||
|
parse(Hocon) ->
|
||||||
|
{ok, Conf} = hocon:binary(Hocon),
|
||||||
|
Conf.
|
||||||
|
|
||||||
|
check(Conf) when is_map(Conf) ->
|
||||||
|
hocon_tconf:check_plain(emqx_bridge_schema, Conf).
|
||||||
|
|
||||||
|
-define(validation_error(Reason, Value),
|
||||||
|
{emqx_bridge_schema, [
|
||||||
|
#{
|
||||||
|
kind := validation_error,
|
||||||
|
reason := Reason,
|
||||||
|
value := Value
|
||||||
|
}
|
||||||
|
]}
|
||||||
|
).
|
||||||
|
|
||||||
|
-define(ok_config(Cfg), #{
|
||||||
|
<<"bridges">> :=
|
||||||
|
#{
|
||||||
|
<<"azure_event_hub_producer">> :=
|
||||||
|
#{
|
||||||
|
<<"my_producer">> :=
|
||||||
|
Cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}).
|
||||||
|
|
||||||
|
%%===========================================================================
|
||||||
|
%% Test cases
|
||||||
|
%%===========================================================================
|
||||||
|
|
||||||
|
aeh_producer_test_() ->
|
||||||
|
%% ensure this module is loaded when testing only this file
|
||||||
|
_ = emqx_bridge_enterprise:module_info(),
|
||||||
|
BaseConf = parse(aeh_producer_hocon()),
|
||||||
|
Override = fun(Cfg) ->
|
||||||
|
emqx_utils_maps:deep_merge(
|
||||||
|
BaseConf,
|
||||||
|
#{
|
||||||
|
<<"bridges">> =>
|
||||||
|
#{
|
||||||
|
<<"azure_event_hub_producer">> =>
|
||||||
|
#{<<"my_producer">> => Cfg}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
[
|
||||||
|
{"base config",
|
||||||
|
?_assertMatch(
|
||||||
|
?ok_config(
|
||||||
|
#{
|
||||||
|
<<"authentication">> := #{
|
||||||
|
<<"username">> := <<"$ConnectionString">>,
|
||||||
|
<<"mechanism">> := plain
|
||||||
|
},
|
||||||
|
<<"ssl">> := #{
|
||||||
|
<<"enable">> := true,
|
||||||
|
<<"server_name_indication">> := "servicebus.windows.net"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
check(BaseConf)
|
||||||
|
)},
|
||||||
|
{"sni disabled",
|
||||||
|
?_assertMatch(
|
||||||
|
?ok_config(
|
||||||
|
#{<<"ssl">> := #{<<"server_name_indication">> := "disable"}}
|
||||||
|
),
|
||||||
|
check(Override(#{<<"ssl">> => #{<<"server_name_indication">> => <<"disable">>}}))
|
||||||
|
)},
|
||||||
|
{"custom sni",
|
||||||
|
?_assertMatch(
|
||||||
|
?ok_config(
|
||||||
|
#{
|
||||||
|
<<"ssl">> := #{
|
||||||
|
<<"server_name_indication">> := "custom.servicebus.windows.net"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
check(
|
||||||
|
Override(#{
|
||||||
|
<<"ssl">> => #{
|
||||||
|
<<"server_name_indication">> => <<"custom.servicebus.windows.net">>
|
||||||
|
}
|
||||||
|
})
|
||||||
|
)
|
||||||
|
)},
|
||||||
|
{"ssl disabled",
|
||||||
|
?_assertThrow(
|
||||||
|
?validation_error(#{expected := "true"}, "false"),
|
||||||
|
check(Override(#{<<"ssl">> => #{<<"enable">> => <<"false">>}}))
|
||||||
|
)},
|
||||||
|
{"bad authn mechanism: scram sha256",
|
||||||
|
?_assertThrow(
|
||||||
|
?validation_error(#{expected := "plain"}, "scram_sha_256"),
|
||||||
|
check(
|
||||||
|
Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_256">>}})
|
||||||
|
)
|
||||||
|
)},
|
||||||
|
{"bad authn mechanism: scram sha512",
|
||||||
|
?_assertThrow(
|
||||||
|
?validation_error(#{expected := "plain"}, "scram_sha_512"),
|
||||||
|
check(
|
||||||
|
Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_512">>}})
|
||||||
|
)
|
||||||
|
)},
|
||||||
|
{"bad required acks: none",
|
||||||
|
?_assertThrow(
|
||||||
|
?validation_error(not_a_enum_symbol, none),
|
||||||
|
check(Override(#{<<"kafka">> => #{<<"required_acks">> => <<"none">>}}))
|
||||||
|
)},
|
||||||
|
{"bad compression: snappy",
|
||||||
|
?_assertThrow(
|
||||||
|
?validation_error(#{expected := "no_compression"}, "snappy"),
|
||||||
|
check(Override(#{<<"kafka">> => #{<<"compression">> => <<"snappy">>}}))
|
||||||
|
)},
|
||||||
|
{"bad compression: gzip",
|
||||||
|
?_assertThrow(
|
||||||
|
?validation_error(#{expected := "no_compression"}, "gzip"),
|
||||||
|
check(Override(#{<<"kafka">> => #{<<"compression">> => <<"gzip">>}}))
|
||||||
|
)}
|
||||||
|
].
|
|
@ -136,7 +136,7 @@ bridge_config(TestCase, _TestGroup, Config) ->
|
||||||
" iotdb_version = \"~s\"\n"
|
" iotdb_version = \"~s\"\n"
|
||||||
" pool_size = 1\n"
|
" pool_size = 1\n"
|
||||||
" resource_opts = {\n"
|
" resource_opts = {\n"
|
||||||
" health_check_interval = \"5s\"\n"
|
" health_check_interval = \"1s\"\n"
|
||||||
" request_ttl = 30s\n"
|
" request_ttl = 30s\n"
|
||||||
" query_mode = \"async\"\n"
|
" query_mode = \"async\"\n"
|
||||||
" worker_pool_size = 1\n"
|
" worker_pool_size = 1\n"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge_kafka, [
|
{application, emqx_bridge_kafka, [
|
||||||
{description, "EMQX Enterprise Kafka Bridge"},
|
{description, "EMQX Enterprise Kafka Bridge"},
|
||||||
{vsn, "0.1.5"},
|
{vsn, "0.1.6"},
|
||||||
{registered, [emqx_bridge_kafka_consumer_sup]},
|
{registered, [emqx_bridge_kafka_consumer_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
host_opts/0
|
host_opts/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([kafka_producer_converter/2]).
|
-export([kafka_producer_converter/2, producer_strategy_key_validator/1]).
|
||||||
|
|
||||||
%% -------------------------------------------------------------------------------------------------
|
%% -------------------------------------------------------------------------------------------------
|
||||||
%% api
|
%% api
|
||||||
|
|
|
@ -17,6 +17,9 @@
|
||||||
%% Parse comma separated host:port list into a [{Host,Port}] list
|
%% Parse comma separated host:port list into a [{Host,Port}] list
|
||||||
hosts(Hosts) when is_binary(Hosts) ->
|
hosts(Hosts) when is_binary(Hosts) ->
|
||||||
hosts(binary_to_list(Hosts));
|
hosts(binary_to_list(Hosts));
|
||||||
|
hosts([#{hostname := _, port := _} | _] = Servers) ->
|
||||||
|
%% already parsed
|
||||||
|
[{Hostname, Port} || #{hostname := Hostname, port := Port} <- Servers];
|
||||||
hosts(Hosts) when is_list(Hosts) ->
|
hosts(Hosts) when is_list(Hosts) ->
|
||||||
kpro:parse_endpoints(Hosts).
|
kpro:parse_endpoints(Hosts).
|
||||||
|
|
||||||
|
|
|
@ -208,7 +208,9 @@ on_stop(InstanceId, _State) ->
|
||||||
ok;
|
ok;
|
||||||
_ ->
|
_ ->
|
||||||
ok
|
ok
|
||||||
end.
|
end,
|
||||||
|
?tp(kafka_producer_stopped, #{instance_id => InstanceId}),
|
||||||
|
ok.
|
||||||
|
|
||||||
on_query(
|
on_query(
|
||||||
_InstId,
|
_InstId,
|
||||||
|
@ -228,7 +230,10 @@ on_query(
|
||||||
headers_val_encode_mode => KafkaHeadersValEncodeMode
|
headers_val_encode_mode => KafkaHeadersValEncodeMode
|
||||||
},
|
},
|
||||||
KafkaMessage = render_message(Template, KafkaHeaders, Message),
|
KafkaMessage = render_message(Template, KafkaHeaders, Message),
|
||||||
?tp(emqx_bridge_kafka_impl_producer_sync_query, KafkaHeaders),
|
?tp(
|
||||||
|
emqx_bridge_kafka_impl_producer_sync_query,
|
||||||
|
#{headers_config => KafkaHeaders, instance_id => _InstId}
|
||||||
|
),
|
||||||
try
|
try
|
||||||
{_Partition, _Offset} = wolff:send_sync(Producers, [KafkaMessage], SyncTimeout),
|
{_Partition, _Offset} = wolff:send_sync(Producers, [KafkaMessage], SyncTimeout),
|
||||||
ok
|
ok
|
||||||
|
@ -263,7 +268,10 @@ on_query_async(
|
||||||
headers_val_encode_mode => KafkaHeadersValEncodeMode
|
headers_val_encode_mode => KafkaHeadersValEncodeMode
|
||||||
},
|
},
|
||||||
KafkaMessage = render_message(Template, KafkaHeaders, Message),
|
KafkaMessage = render_message(Template, KafkaHeaders, Message),
|
||||||
?tp(emqx_bridge_kafka_impl_producer_async_query, KafkaHeaders),
|
?tp(
|
||||||
|
emqx_bridge_kafka_impl_producer_async_query,
|
||||||
|
#{headers_config => KafkaHeaders, instance_id => _InstId}
|
||||||
|
),
|
||||||
%% * Must be a batch because wolff:send and wolff:send_sync are batch APIs
|
%% * Must be a batch because wolff:send and wolff:send_sync are batch APIs
|
||||||
%% * Must be a single element batch because wolff books calls, but not batch sizes
|
%% * Must be a single element batch because wolff books calls, but not batch sizes
|
||||||
%% for counters and gauges.
|
%% for counters and gauges.
|
||||||
|
|
|
@ -565,6 +565,7 @@ t_send_message_with_headers(Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
[
|
[
|
||||||
#{
|
#{
|
||||||
|
headers_config := #{
|
||||||
ext_headers_tokens := [
|
ext_headers_tokens := [
|
||||||
{
|
{
|
||||||
[{str, <<"clientid">>}],
|
[{str, <<"clientid">>}],
|
||||||
|
@ -578,6 +579,7 @@ t_send_message_with_headers(Config) ->
|
||||||
headers_tokens := [{var, [<<"pub_props">>]}],
|
headers_tokens := [{var, [<<"pub_props">>]}],
|
||||||
headers_val_encode_mode := json
|
headers_val_encode_mode := json
|
||||||
}
|
}
|
||||||
|
}
|
||||||
],
|
],
|
||||||
?of_kind(Kind, Trace)
|
?of_kind(Kind, Trace)
|
||||||
)
|
)
|
||||||
|
|
|
@ -102,6 +102,7 @@
|
||||||
emqx_oracle,
|
emqx_oracle,
|
||||||
emqx_bridge_oracle,
|
emqx_bridge_oracle,
|
||||||
emqx_bridge_rabbitmq,
|
emqx_bridge_rabbitmq,
|
||||||
|
emqx_bridge_azure_event_hub,
|
||||||
emqx_schema_registry,
|
emqx_schema_registry,
|
||||||
emqx_eviction_agent,
|
emqx_eviction_agent,
|
||||||
emqx_node_rebalance,
|
emqx_node_rebalance,
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Implemented Azure Event Hub Producer data integration bridge.
|
3
mix.exs
3
mix.exs
|
@ -193,7 +193,8 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
:emqx_s3,
|
:emqx_s3,
|
||||||
:emqx_schema_registry,
|
:emqx_schema_registry,
|
||||||
:emqx_enterprise,
|
:emqx_enterprise,
|
||||||
:emqx_bridge_kinesis
|
:emqx_bridge_kinesis,
|
||||||
|
:emqx_bridge_azure_event_hub
|
||||||
])
|
])
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,7 @@ is_community_umbrella_app("apps/emqx_s3") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_schema_registry") -> false;
|
is_community_umbrella_app("apps/emqx_schema_registry") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_enterprise") -> false;
|
is_community_umbrella_app("apps/emqx_enterprise") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_kinesis") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_kinesis") -> false;
|
||||||
|
is_community_umbrella_app("apps/emqx_bridge_azure_event_hub") -> false;
|
||||||
is_community_umbrella_app(_) -> true.
|
is_community_umbrella_app(_) -> true.
|
||||||
|
|
||||||
is_jq_supported() ->
|
is_jq_supported() ->
|
||||||
|
|
|
@ -0,0 +1,287 @@
|
||||||
|
emqx_bridge_azure_event_hub {
|
||||||
|
|
||||||
|
connect_timeout.desc:
|
||||||
|
"""Maximum wait time for TCP connection establishment (including authentication time if enabled)."""
|
||||||
|
|
||||||
|
connect_timeout.label:
|
||||||
|
"""Connect Timeout"""
|
||||||
|
|
||||||
|
producer_opts.desc:
|
||||||
|
"""Local MQTT data source and Azure Event Hub bridge configs."""
|
||||||
|
|
||||||
|
producer_opts.label:
|
||||||
|
"""MQTT to Azure Event Hub"""
|
||||||
|
|
||||||
|
min_metadata_refresh_interval.desc:
|
||||||
|
"""Minimum time interval the client has to wait before refreshing Azure Event Hub Kafka broker and topic metadata. Setting too small value may add extra load on Azure Event Hub."""
|
||||||
|
|
||||||
|
min_metadata_refresh_interval.label:
|
||||||
|
"""Min Metadata Refresh Interval"""
|
||||||
|
|
||||||
|
kafka_producer.desc:
|
||||||
|
"""Azure Event Hub Producer configuration."""
|
||||||
|
|
||||||
|
kafka_producer.label:
|
||||||
|
"""Azure Event Hub Producer"""
|
||||||
|
|
||||||
|
producer_buffer.desc:
|
||||||
|
"""Configure producer message buffer.
|
||||||
|
|
||||||
|
Tell Azure Event Hub producer how to buffer messages when EMQX has more messages to send than Azure Event Hub can keep up, or when Azure Event Hub is down."""
|
||||||
|
|
||||||
|
producer_buffer.label:
|
||||||
|
"""Message Buffer"""
|
||||||
|
|
||||||
|
socket_send_buffer.desc:
|
||||||
|
"""Fine tune the socket send buffer. The default value is tuned for high throughput."""
|
||||||
|
|
||||||
|
socket_send_buffer.label:
|
||||||
|
"""Socket Send Buffer Size"""
|
||||||
|
|
||||||
|
socket_receive_buffer.desc:
|
||||||
|
"""Fine tune the socket receive buffer. The default value is tuned for high throughput."""
|
||||||
|
|
||||||
|
socket_receive_buffer.label:
|
||||||
|
"""Socket Receive Buffer Size"""
|
||||||
|
|
||||||
|
socket_tcp_keepalive.desc:
|
||||||
|
"""Enable TCP keepalive for Azure Event Hub bridge connections.
|
||||||
|
The value is three comma separated numbers in the format of 'Idle,Interval,Probes'
|
||||||
|
- Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200).
|
||||||
|
- Interval: The number of seconds between TCP keep-alive probes (Linux default 75).
|
||||||
|
- Probes: The maximum number of TCP keep-alive probes to send before giving up and killing the connection if no response is obtained from the other end (Linux default 9).
|
||||||
|
For example "240,30,5" means: TCP keepalive probes are sent after the connection is idle for 240 seconds, and the probes are sent every 30 seconds until a response is received, if it misses 5 consecutive responses, the connection should be closed.
|
||||||
|
Default: 'none'"""
|
||||||
|
|
||||||
|
socket_tcp_keepalive.label:
|
||||||
|
"""TCP keepalive options"""
|
||||||
|
|
||||||
|
desc_name.desc:
|
||||||
|
"""Bridge name, used as a human-readable description of the bridge."""
|
||||||
|
|
||||||
|
desc_name.label:
|
||||||
|
"""Bridge Name"""
|
||||||
|
|
||||||
|
producer_kafka_opts.desc:
|
||||||
|
"""Azure Event Hub producer configs."""
|
||||||
|
|
||||||
|
producer_kafka_opts.label:
|
||||||
|
"""Azure Event Hub Producer"""
|
||||||
|
|
||||||
|
kafka_topic.desc:
|
||||||
|
"""Event Hub name"""
|
||||||
|
|
||||||
|
kafka_topic.label:
|
||||||
|
"""Event Hub Name"""
|
||||||
|
|
||||||
|
kafka_message_timestamp.desc:
|
||||||
|
"""Which timestamp to use. The timestamp is expected to be a millisecond precision Unix epoch which can be in string format, e.g. <code>1661326462115</code> or <code>'1661326462115'</code>. When the desired data field for this template is not found, or if the found data is not a valid integer, the current system timestamp will be used."""
|
||||||
|
|
||||||
|
kafka_message_timestamp.label:
|
||||||
|
"""Message Timestamp"""
|
||||||
|
|
||||||
|
buffer_mode.desc:
|
||||||
|
"""Message buffer mode.
|
||||||
|
|
||||||
|
<code>memory</code>: Buffer all messages in memory. The messages will be lost in case of EMQX node restart
|
||||||
|
<code>disk</code>: Buffer all messages on disk. The messages on disk are able to survive EMQX node restart.
|
||||||
|
<code>hybrid</code>: Buffer message in memory first, when up to certain limit (see <code>segment_bytes</code> config for more information), then start offloading messages to disk, Like <code>memory</code> mode, the messages will be lost in case of EMQX node restart."""
|
||||||
|
|
||||||
|
buffer_mode.label:
|
||||||
|
"""Buffer Mode"""
|
||||||
|
|
||||||
|
socket_opts.desc:
|
||||||
|
"""Extra socket options."""
|
||||||
|
|
||||||
|
socket_opts.label:
|
||||||
|
"""Socket Options"""
|
||||||
|
|
||||||
|
partition_count_refresh_interval.desc:
|
||||||
|
"""The time interval for Azure Event Hub producer to discover increased number of partitions.
|
||||||
|
After the number of partitions is increased in Azure Event Hub, EMQX will start taking the
|
||||||
|
discovered partitions into account when dispatching messages per <code>partition_strategy</code>."""
|
||||||
|
|
||||||
|
partition_count_refresh_interval.label:
|
||||||
|
"""Partition Count Refresh Interval"""
|
||||||
|
|
||||||
|
max_batch_bytes.desc:
|
||||||
|
"""Maximum bytes to collect in a Azure Event Hub message batch. Most of the Kafka brokers default to a limit of 1 MB batch size. EMQX's default value is less than 1 MB in order to compensate Kafka message encoding overheads (especially when each individual message is very small). When a single message is over the limit, it is still sent (as a single element batch)."""
|
||||||
|
|
||||||
|
max_batch_bytes.label:
|
||||||
|
"""Max Batch Bytes"""
|
||||||
|
|
||||||
|
required_acks.desc:
|
||||||
|
"""Required acknowledgements for Azure Event Hub partition leader to wait for its followers before it sends back the acknowledgement to EMQX Azure Event Hub producer
|
||||||
|
|
||||||
|
<code>all_isr</code>: Require all in-sync replicas to acknowledge.
|
||||||
|
<code>leader_only</code>: Require only the partition-leader's acknowledgement."""
|
||||||
|
|
||||||
|
required_acks.label:
|
||||||
|
"""Required Acks"""
|
||||||
|
|
||||||
|
kafka_headers.desc:
|
||||||
|
"""Please provide a placeholder to be used as Azure Event Hub Headers<br/>
|
||||||
|
e.g. <code>${pub_props}</code><br/>
|
||||||
|
Notice that the value of the placeholder must either be an object:
|
||||||
|
<code>{\"foo\": \"bar\"}</code>
|
||||||
|
or an array of key-value pairs:
|
||||||
|
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>"""
|
||||||
|
|
||||||
|
kafka_headers.label:
|
||||||
|
"""Azure Event Hub Headers"""
|
||||||
|
|
||||||
|
producer_kafka_ext_headers.desc:
|
||||||
|
"""Please provide more key-value pairs for Azure Event Hub headers<br/>
|
||||||
|
The key-value pairs here will be combined with the
|
||||||
|
value of <code>kafka_headers</code> field before sending to Azure Event Hub."""
|
||||||
|
|
||||||
|
producer_kafka_ext_headers.label:
|
||||||
|
"""Extra Azure Event Hub headers"""
|
||||||
|
|
||||||
|
producer_kafka_ext_header_key.desc:
|
||||||
|
"""Key of the Azure Event Hub header. Placeholders in format of ${var} are supported."""
|
||||||
|
|
||||||
|
producer_kafka_ext_header_key.label:
|
||||||
|
"""Azure Event Hub extra header key."""
|
||||||
|
|
||||||
|
producer_kafka_ext_header_value.desc:
|
||||||
|
"""Value of the Azure Event Hub header. Placeholders in format of ${var} are supported."""
|
||||||
|
|
||||||
|
producer_kafka_ext_header_value.label:
|
||||||
|
"""Value"""
|
||||||
|
|
||||||
|
kafka_header_value_encode_mode.desc:
|
||||||
|
"""Azure Event Hub headers value encode mode<br/>
|
||||||
|
- NONE: only add binary values to Azure Event Hub headers;<br/>
|
||||||
|
- JSON: only add JSON values to Azure Event Hub headers,
|
||||||
|
and encode it to JSON strings before sending."""
|
||||||
|
|
||||||
|
kafka_header_value_encode_mode.label:
|
||||||
|
"""Azure Event Hub headers value encode mode"""
|
||||||
|
|
||||||
|
metadata_request_timeout.desc:
|
||||||
|
"""Maximum wait time when fetching metadata from Azure Event Hub."""
|
||||||
|
|
||||||
|
metadata_request_timeout.label:
|
||||||
|
"""Metadata Request Timeout"""
|
||||||
|
|
||||||
|
desc_type.desc:
|
||||||
|
"""The Bridge Type"""
|
||||||
|
|
||||||
|
desc_type.label:
|
||||||
|
"""Bridge Type"""
|
||||||
|
|
||||||
|
socket_nodelay.desc:
|
||||||
|
"""When set to 'true', TCP buffer is sent as soon as possible. Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default)."""
|
||||||
|
|
||||||
|
socket_nodelay.label:
|
||||||
|
"""No Delay"""
|
||||||
|
|
||||||
|
authentication.desc:
|
||||||
|
"""Authentication configs."""
|
||||||
|
|
||||||
|
authentication.label:
|
||||||
|
"""Authentication"""
|
||||||
|
|
||||||
|
buffer_memory_overload_protection.desc:
|
||||||
|
"""Applicable when buffer mode is set to <code>memory</code>
|
||||||
|
EMQX will drop old buffered messages under high memory pressure. The high memory threshold is defined in config <code>sysmon.os.sysmem_high_watermark</code>. NOTE: This config only works on Linux."""
|
||||||
|
|
||||||
|
buffer_memory_overload_protection.label:
|
||||||
|
"""Memory Overload Protection"""
|
||||||
|
|
||||||
|
auth_sasl_mechanism.desc:
|
||||||
|
"""SASL authentication mechanism."""
|
||||||
|
|
||||||
|
auth_sasl_mechanism.label:
|
||||||
|
"""Mechanism"""
|
||||||
|
|
||||||
|
config_enable.desc:
|
||||||
|
"""Enable (true) or disable (false) this bridge."""
|
||||||
|
|
||||||
|
config_enable.label:
|
||||||
|
"""Enable or Disable"""
|
||||||
|
|
||||||
|
desc_config.desc:
|
||||||
|
"""Configuration for an Azure Event Hub bridge."""
|
||||||
|
|
||||||
|
desc_config.label:
|
||||||
|
"""Azure Event Hub Bridge Configuration"""
|
||||||
|
|
||||||
|
buffer_per_partition_limit.desc:
|
||||||
|
"""Number of bytes allowed to buffer for each Azure Event Hub partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered."""
|
||||||
|
|
||||||
|
buffer_per_partition_limit.label:
|
||||||
|
"""Per-partition Buffer Limit"""
|
||||||
|
|
||||||
|
bootstrap_hosts.desc:
|
||||||
|
"""A comma separated list of Azure Event Hub Kafka <code>host[:port]</code> namespace endpoints to bootstrap the client. Default port number is 9093."""
|
||||||
|
|
||||||
|
bootstrap_hosts.label:
|
||||||
|
"""Bootstrap Hosts"""
|
||||||
|
|
||||||
|
kafka_message_key.desc:
|
||||||
|
"""Template to render Azure Event Hub message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hub's <code>NULL</code> (but not empty string) is used."""
|
||||||
|
|
||||||
|
kafka_message_key.label:
|
||||||
|
"""Message Key"""
|
||||||
|
|
||||||
|
kafka_message.desc:
|
||||||
|
"""Template to render an Azure Event Hub message."""
|
||||||
|
|
||||||
|
kafka_message.label:
|
||||||
|
"""Azure Event Hub Message Template"""
|
||||||
|
|
||||||
|
mqtt_topic.desc:
|
||||||
|
"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hub."""
|
||||||
|
|
||||||
|
mqtt_topic.label:
|
||||||
|
"""Source MQTT Topic"""
|
||||||
|
|
||||||
|
kafka_message_value.desc:
|
||||||
|
"""Template to render Azure Event Hub message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hub's <code>NULL</code> (but not empty string) is used."""
|
||||||
|
|
||||||
|
kafka_message_value.label:
|
||||||
|
"""Message Value"""
|
||||||
|
|
||||||
|
partition_strategy.desc:
|
||||||
|
"""Partition strategy is to tell the producer how to dispatch messages to Azure Event Hub partitions.
|
||||||
|
|
||||||
|
<code>random</code>: Randomly pick a partition for each message
|
||||||
|
<code>key_dispatch</code>: Hash Azure Event Hub message key to a partition number"""
|
||||||
|
|
||||||
|
partition_strategy.label:
|
||||||
|
"""Partition Strategy"""
|
||||||
|
|
||||||
|
buffer_segment_bytes.desc:
|
||||||
|
"""Applicable when buffer mode is set to <code>disk</code> or <code>hybrid</code>.
|
||||||
|
This value is to specify the size of each on-disk buffer file."""
|
||||||
|
|
||||||
|
buffer_segment_bytes.label:
|
||||||
|
"""Segment File Bytes"""
|
||||||
|
|
||||||
|
max_inflight.desc:
|
||||||
|
"""Maximum number of batches allowed for Azure Event Hub producer (per-partition) to send before receiving acknowledgement from Azure Event Hub. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1."""
|
||||||
|
|
||||||
|
max_inflight.label:
|
||||||
|
"""Max Inflight"""
|
||||||
|
|
||||||
|
compression.desc:
|
||||||
|
"""Compression method."""
|
||||||
|
|
||||||
|
compression.label:
|
||||||
|
"""Compression"""
|
||||||
|
|
||||||
|
query_mode.desc:
|
||||||
|
"""Query mode. Optional 'sync/async', default 'async'."""
|
||||||
|
|
||||||
|
query_mode.label:
|
||||||
|
"""Query mode"""
|
||||||
|
|
||||||
|
sync_query_timeout.desc:
|
||||||
|
"""This parameter defines the timeout limit for synchronous queries. It applies only when the bridge query mode is configured to 'sync'."""
|
||||||
|
|
||||||
|
sync_query_timeout.label:
|
||||||
|
"""Synchronous Query Timeout"""
|
||||||
|
|
||||||
|
}
|
Loading…
Reference in New Issue