feat: hstreamdb bridge with batch query
This commit is contained in:
parent
b9bfdfd583
commit
1587f038a5
|
@ -32,16 +32,31 @@ conn_bridge_examples(Method) ->
|
||||||
}
|
}
|
||||||
].
|
].
|
||||||
|
|
||||||
values(_Method) ->
|
values(get) ->
|
||||||
|
values(post);
|
||||||
|
values(put) ->
|
||||||
|
values(post);
|
||||||
|
values(post) ->
|
||||||
#{
|
#{
|
||||||
type => hstreamdb,
|
type => <<"hstreamdb">>,
|
||||||
name => <<"demo">>,
|
name => <<"demo">>,
|
||||||
connector => <<"hstreamdb:connector">>,
|
direction => <<"egress">>,
|
||||||
enable => true,
|
url => <<"http://127.0.0.1:6570">>,
|
||||||
direction => egress,
|
stream => <<"stream">>,
|
||||||
local_topic => <<"local/topic/#">>,
|
%% raw HRecord
|
||||||
payload => <<"${payload}">>
|
record_template =>
|
||||||
}.
|
<<"{ \"temperature\": ${payload.temperature}, \"humidity\": ${payload.humidity} }">>,
|
||||||
|
pool_size => 8,
|
||||||
|
%% grpc_timeout => <<"1m">>
|
||||||
|
resource_opts => #{
|
||||||
|
query_mode => sync,
|
||||||
|
batch_size => 100,
|
||||||
|
batch_time => <<"20ms">>
|
||||||
|
},
|
||||||
|
ssl => #{enable => false}
|
||||||
|
};
|
||||||
|
values(_) ->
|
||||||
|
#{}.
|
||||||
|
|
||||||
%% -------------------------------------------------------------------------------------------------
|
%% -------------------------------------------------------------------------------------------------
|
||||||
%% Hocon Schema Definitions
|
%% Hocon Schema Definitions
|
||||||
|
@ -50,41 +65,45 @@ namespace() -> "bridge_hstreamdb".
|
||||||
roots() -> [].
|
roots() -> [].
|
||||||
|
|
||||||
fields("config") ->
|
fields("config") ->
|
||||||
|
hstream_bridge_common_fields() ++
|
||||||
|
connector_fields();
|
||||||
|
fields("post") ->
|
||||||
|
hstream_bridge_common_fields() ++
|
||||||
|
connector_fields() ++
|
||||||
|
type_name_fields();
|
||||||
|
fields("get") ->
|
||||||
|
hstream_bridge_common_fields() ++
|
||||||
|
connector_fields() ++
|
||||||
|
type_name_fields() ++
|
||||||
|
emqx_bridge_schema:status_fields();
|
||||||
|
fields("put") ->
|
||||||
|
hstream_bridge_common_fields() ++
|
||||||
|
connector_fields().
|
||||||
|
|
||||||
|
hstream_bridge_common_fields() ->
|
||||||
|
emqx_bridge_schema:common_bridge_fields() ++
|
||||||
[
|
[
|
||||||
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
|
|
||||||
{direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})},
|
{direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})},
|
||||||
{local_topic, mk(binary(), #{desc => ?DESC("local_topic")})},
|
{local_topic, mk(binary(), #{desc => ?DESC("local_topic")})},
|
||||||
{payload, mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("payload")})},
|
{record_template,
|
||||||
{connector, field(connector)}
|
mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("record_template")})}
|
||||||
];
|
] ++
|
||||||
fields("post") ->
|
emqx_resource_schema:fields("resource_opts").
|
||||||
[type_field(), name_field() | fields("config")];
|
|
||||||
fields("put") ->
|
|
||||||
fields("config");
|
|
||||||
fields("get") ->
|
|
||||||
emqx_bridge_schema:status_fields() ++ fields("post").
|
|
||||||
|
|
||||||
field(connector) ->
|
connector_fields() ->
|
||||||
mk(
|
emqx_bridge_hstreamdb_connector:fields(config).
|
||||||
hoconsc:union([binary(), ref(emqx_bridge_hstreamdb_connector, config)]),
|
|
||||||
#{
|
|
||||||
required => true,
|
|
||||||
example => <<"hstreamdb:demo">>,
|
|
||||||
desc => ?DESC("desc_connector")
|
|
||||||
}
|
|
||||||
).
|
|
||||||
|
|
||||||
desc("config") ->
|
desc("config") ->
|
||||||
?DESC("desc_config");
|
?DESC("desc_config");
|
||||||
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||||
["Configuration for HStream using `", string:to_upper(Method), "` method."];
|
["Configuration for HStreamDB bridge using `", string:to_upper(Method), "` method."];
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
%% -------------------------------------------------------------------------------------------------
|
%% -------------------------------------------------------------------------------------------------
|
||||||
%% internal
|
%% internal
|
||||||
type_field() ->
|
type_name_fields() ->
|
||||||
{type, mk(enum([hstreamdb]), #{required => true, desc => ?DESC("desc_type")})}.
|
[
|
||||||
|
{type, mk(enum([hstreamdb]), #{required => true, desc => ?DESC("desc_type")})},
|
||||||
name_field() ->
|
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}
|
||||||
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
|
].
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
-include_lib("hocon/include/hoconsc.hrl").
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
-include_lib("typerefl/include/types.hrl").
|
-include_lib("typerefl/include/types.hrl").
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
-import(hoconsc, [mk/2, enum/1]).
|
-import(hoconsc, [mk/2, enum/1]).
|
||||||
|
|
||||||
|
@ -17,6 +18,7 @@
|
||||||
on_start/2,
|
on_start/2,
|
||||||
on_stop/2,
|
on_stop/2,
|
||||||
on_query/3,
|
on_query/3,
|
||||||
|
on_batch_query/3,
|
||||||
on_get_status/2
|
on_get_status/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -28,10 +30,12 @@
|
||||||
namespace/0,
|
namespace/0,
|
||||||
roots/0,
|
roots/0,
|
||||||
fields/1,
|
fields/1,
|
||||||
desc/1,
|
desc/1
|
||||||
connector_examples/1
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-define(DEFAULT_GRPC_TIMEOUT, timer:seconds(30)).
|
||||||
|
-define(DEFAULT_GRPC_TIMEOUT_RAW, <<"30s">>).
|
||||||
|
|
||||||
%% -------------------------------------------------------------------------------------------------
|
%% -------------------------------------------------------------------------------------------------
|
||||||
%% resource callback
|
%% resource callback
|
||||||
callback_mode() -> always_sync.
|
callback_mode() -> always_sync.
|
||||||
|
@ -51,13 +55,35 @@ on_stop(InstId, #{client := Client, producer := Producer}) ->
|
||||||
stop_producer => StopProducerRes
|
stop_producer => StopProducerRes
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
-define(FAILED_TO_APPLY_HRECORD_TEMPLATE,
|
||||||
|
{error, {unrecoverable_error, failed_to_apply_hrecord_template}}
|
||||||
|
).
|
||||||
|
|
||||||
on_query(
|
on_query(
|
||||||
_InstId,
|
_InstId,
|
||||||
{send_message, Data},
|
{send_message, Data},
|
||||||
#{producer := Producer, ordering_key := OrderingKey, payload := Payload}
|
_State = #{
|
||||||
|
producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate
|
||||||
|
}
|
||||||
) ->
|
) ->
|
||||||
Record = to_record(OrderingKey, Payload, Data),
|
try to_record(PartitionKey, HRecordTemplate, Data) of
|
||||||
do_append(Producer, Record).
|
Record -> append_record(Producer, Record)
|
||||||
|
catch
|
||||||
|
_:_ -> ?FAILED_TO_APPLY_HRECORD_TEMPLATE
|
||||||
|
end.
|
||||||
|
|
||||||
|
on_batch_query(
|
||||||
|
_InstId,
|
||||||
|
BatchList,
|
||||||
|
_State = #{
|
||||||
|
producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate
|
||||||
|
}
|
||||||
|
) ->
|
||||||
|
try to_multi_part_records(PartitionKey, HRecordTemplate, BatchList) of
|
||||||
|
Records -> append_record(Producer, Records)
|
||||||
|
catch
|
||||||
|
_:_ -> ?FAILED_TO_APPLY_HRECORD_TEMPLATE
|
||||||
|
end.
|
||||||
|
|
||||||
on_get_status(_InstId, #{client := Client}) ->
|
on_get_status(_InstId, #{client := Client}) ->
|
||||||
case is_alive(Client) of
|
case is_alive(Client) of
|
||||||
|
@ -87,43 +113,16 @@ fields(config) ->
|
||||||
[
|
[
|
||||||
{url, mk(binary(), #{required => true, desc => ?DESC("url")})},
|
{url, mk(binary(), #{required => true, desc => ?DESC("url")})},
|
||||||
{stream, mk(binary(), #{required => true, desc => ?DESC("stream_name")})},
|
{stream, mk(binary(), #{required => true, desc => ?DESC("stream_name")})},
|
||||||
{ordering_key, mk(binary(), #{required => false, desc => ?DESC("ordering_key")})},
|
{partition_key, mk(binary(), #{required => false, desc => ?DESC("partition_key")})},
|
||||||
{pool_size, mk(pos_integer(), #{required => true, desc => ?DESC("pool_size")})}
|
{pool_size, mk(pos_integer(), #{required => true, desc => ?DESC("pool_size")})},
|
||||||
];
|
{grpc_timeout, fun grpc_timeout/1}
|
||||||
fields("get") ->
|
] ++ emqx_connector_schema_lib:ssl_fields().
|
||||||
fields("post");
|
|
||||||
fields("put") ->
|
|
||||||
fields(config);
|
|
||||||
fields("post") ->
|
|
||||||
[
|
|
||||||
{type, mk(hstreamdb, #{required => true, desc => ?DESC("type")})},
|
|
||||||
{name, mk(binary(), #{required => true, desc => ?DESC("name")})}
|
|
||||||
] ++ fields("put").
|
|
||||||
|
|
||||||
connector_examples(Method) ->
|
grpc_timeout(type) -> emqx_schema:timeout_duration_ms();
|
||||||
[
|
grpc_timeout(desc) -> ?DESC("grpc_timeout");
|
||||||
#{
|
grpc_timeout(default) -> ?DEFAULT_GRPC_TIMEOUT_RAW;
|
||||||
<<"hstreamdb">> => #{
|
grpc_timeout(required) -> false;
|
||||||
summary => <<"HStreamDB Connector">>,
|
grpc_timeout(_) -> undefined.
|
||||||
value => values(Method)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
].
|
|
||||||
|
|
||||||
values(post) ->
|
|
||||||
maps:merge(values(put), #{name => <<"connector">>});
|
|
||||||
values(get) ->
|
|
||||||
values(post);
|
|
||||||
values(put) ->
|
|
||||||
#{
|
|
||||||
type => hstreamdb,
|
|
||||||
url => <<"http://127.0.0.1:6570">>,
|
|
||||||
stream => <<"stream1">>,
|
|
||||||
ordering_key => <<"some_key">>,
|
|
||||||
pool_size => 8
|
|
||||||
};
|
|
||||||
values(_) ->
|
|
||||||
#{}.
|
|
||||||
|
|
||||||
desc(config) ->
|
desc(config) ->
|
||||||
?DESC("config").
|
?DESC("config").
|
||||||
|
@ -168,6 +167,10 @@ do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize}) ->
|
||||||
}),
|
}),
|
||||||
start_producer(InstId, Client, Config);
|
start_producer(InstId, Client, Config);
|
||||||
_ ->
|
_ ->
|
||||||
|
?tp(
|
||||||
|
hstreamdb_connector_start_failed,
|
||||||
|
#{error => client_not_alive}
|
||||||
|
),
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "hstreamdb connector: client not alive",
|
msg => "hstreamdb connector: client not alive",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
|
@ -202,7 +205,7 @@ is_alive(Client) ->
|
||||||
start_producer(
|
start_producer(
|
||||||
InstId,
|
InstId,
|
||||||
Client,
|
Client,
|
||||||
Options = #{stream := Stream, pool_size := PoolSize, egress := #{payload := PayloadBin}}
|
Options = #{stream := Stream, pool_size := PoolSize}
|
||||||
) ->
|
) ->
|
||||||
%% TODO: change these batch options after we have better disk cache.
|
%% TODO: change these batch options after we have better disk cache.
|
||||||
BatchSize = maps:get(batch_size, Options, 100),
|
BatchSize = maps:get(batch_size, Options, 100),
|
||||||
|
@ -212,7 +215,8 @@ start_producer(
|
||||||
{callback, {?MODULE, on_flush_result, []}},
|
{callback, {?MODULE, on_flush_result, []}},
|
||||||
{max_records, BatchSize},
|
{max_records, BatchSize},
|
||||||
{interval, Interval},
|
{interval, Interval},
|
||||||
{pool_size, PoolSize}
|
{pool_size, PoolSize},
|
||||||
|
{grpc_timeout, maps:get(grpc_timeout, Options, ?DEFAULT_GRPC_TIMEOUT)}
|
||||||
],
|
],
|
||||||
Name = produce_name(InstId),
|
Name = produce_name(InstId),
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
|
@ -224,16 +228,14 @@ start_producer(
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "hstreamdb connector: producer started"
|
msg => "hstreamdb connector: producer started"
|
||||||
}),
|
}),
|
||||||
EnableBatch = maps:get(enable_batch, Options, false),
|
|
||||||
Payload = emqx_placeholder:preproc_tmpl(PayloadBin),
|
|
||||||
OrderingKeyBin = maps:get(ordering_key, Options, <<"">>),
|
|
||||||
OrderingKey = emqx_placeholder:preproc_tmpl(OrderingKeyBin),
|
|
||||||
State = #{
|
State = #{
|
||||||
client => Client,
|
client => Client,
|
||||||
producer => Producer,
|
producer => Producer,
|
||||||
enable_batch => EnableBatch,
|
enable_batch => maps:get(enable_batch, Options, false),
|
||||||
ordering_key => OrderingKey,
|
partition_key => emqx_placeholder:preproc_tmpl(
|
||||||
payload => Payload
|
maps:get(partition_key, Options, <<"">>)
|
||||||
|
),
|
||||||
|
record_template => record_template(Options)
|
||||||
},
|
},
|
||||||
{ok, State};
|
{ok, State};
|
||||||
{error, {already_started, Pid}} ->
|
{error, {already_started, Pid}} ->
|
||||||
|
@ -253,47 +255,53 @@ start_producer(
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
to_record(OrderingKeyTmpl, PayloadTmpl, Data) ->
|
to_record(PartitionKeyTmpl, HRecordTmpl, Data) ->
|
||||||
OrderingKey = emqx_placeholder:proc_tmpl(OrderingKeyTmpl, Data),
|
PartitionKey = emqx_placeholder:proc_tmpl(PartitionKeyTmpl, Data),
|
||||||
Payload = emqx_placeholder:proc_tmpl(PayloadTmpl, Data),
|
RawRecord = emqx_placeholder:proc_tmpl(HRecordTmpl, Data),
|
||||||
to_record(OrderingKey, Payload).
|
to_record(PartitionKey, RawRecord).
|
||||||
|
|
||||||
to_record(OrderingKey, Payload) when is_binary(OrderingKey) ->
|
to_record(PartitionKey, RawRecord) when is_binary(PartitionKey) ->
|
||||||
to_record(binary_to_list(OrderingKey), Payload);
|
to_record(binary_to_list(PartitionKey), RawRecord);
|
||||||
to_record(OrderingKey, Payload) ->
|
to_record(PartitionKey, RawRecord) ->
|
||||||
hstreamdb:to_record(OrderingKey, raw, Payload).
|
hstreamdb:to_record(PartitionKey, raw, RawRecord).
|
||||||
|
|
||||||
do_append(Producer, Record) ->
|
to_multi_part_records(PartitionKeyTmpl, HRecordTmpl, BatchList) ->
|
||||||
do_append(false, Producer, Record).
|
Records0 = lists:map(
|
||||||
|
fun({send_message, Data}) ->
|
||||||
|
to_record(PartitionKeyTmpl, HRecordTmpl, Data)
|
||||||
|
end,
|
||||||
|
BatchList
|
||||||
|
),
|
||||||
|
PartitionKeys = proplists:get_keys(Records0),
|
||||||
|
[
|
||||||
|
{PartitionKey, proplists:get_all_values(PartitionKey, Records0)}
|
||||||
|
|| PartitionKey <- PartitionKeys
|
||||||
|
].
|
||||||
|
|
||||||
%% TODO: this append is async, remove or change it after we have better disk cache.
|
append_record(Producer, MultiPartsRecords) when is_list(MultiPartsRecords) ->
|
||||||
% do_append(true, Producer, Record) ->
|
lists:foreach(fun(Record) -> append_record(Producer, Record) end, MultiPartsRecords);
|
||||||
% case hstreamdb:append(Producer, Record) of
|
append_record(Producer, Record) when is_tuple(Record) ->
|
||||||
% ok ->
|
do_append_records(false, Producer, Record).
|
||||||
% ?SLOG(debug, #{
|
|
||||||
% msg => "hstreamdb producer async append success",
|
%% TODO: only sync request supported. implement async request later.
|
||||||
% record => Record
|
do_append_records(false, Producer, Record) ->
|
||||||
% });
|
|
||||||
% {error, Reason} = Err ->
|
|
||||||
% ?SLOG(error, #{
|
|
||||||
% msg => "hstreamdb producer async append failed",
|
|
||||||
% reason => Reason,
|
|
||||||
% record => Record
|
|
||||||
% }),
|
|
||||||
% Err
|
|
||||||
% end;
|
|
||||||
do_append(false, Producer, Record) ->
|
|
||||||
%% TODO: this append is sync, but it does not support [Record], can only append one Record.
|
|
||||||
%% Change it after we have better dick cache.
|
|
||||||
case hstreamdb:append_flush(Producer, Record) of
|
case hstreamdb:append_flush(Producer, Record) of
|
||||||
{ok, _} ->
|
{ok, _Result} ->
|
||||||
|
?tp(
|
||||||
|
hstreamdb_connector_query_return,
|
||||||
|
#{result => _Result}
|
||||||
|
),
|
||||||
?SLOG(debug, #{
|
?SLOG(debug, #{
|
||||||
msg => "hstreamdb producer sync append success",
|
msg => "HStreamDB producer sync append success",
|
||||||
record => Record
|
record => Record
|
||||||
});
|
});
|
||||||
{error, Reason} = Err ->
|
{error, Reason} = Err ->
|
||||||
|
?tp(
|
||||||
|
hstreamdb_connector_query_return,
|
||||||
|
#{error => Reason}
|
||||||
|
),
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "hstreamdb producer sync append failed",
|
msg => "HStreamDB producer sync append failed",
|
||||||
reason => Reason,
|
reason => Reason,
|
||||||
record => Record
|
record => Record
|
||||||
}),
|
}),
|
||||||
|
@ -306,6 +314,11 @@ client_name(InstId) ->
|
||||||
produce_name(ActionId) ->
|
produce_name(ActionId) ->
|
||||||
list_to_atom("producer:" ++ to_string(ActionId)).
|
list_to_atom("producer:" ++ to_string(ActionId)).
|
||||||
|
|
||||||
|
record_template(#{record_template := RawHRecordTemplate}) ->
|
||||||
|
emqx_placeholder:preproc_tmpl(RawHRecordTemplate);
|
||||||
|
record_template(_) ->
|
||||||
|
emqx_placeholder:preproc_tmpl(<<"${payload}">>).
|
||||||
|
|
||||||
to_string(List) when is_list(List) -> List;
|
to_string(List) when is_list(List) -> List;
|
||||||
to_string(Bin) when is_binary(Bin) -> binary_to_list(Bin);
|
to_string(Bin) when is_binary(Bin) -> binary_to_list(Bin);
|
||||||
to_string(Atom) when is_atom(Atom) -> atom_to_list(Atom).
|
to_string(Atom) when is_atom(Atom) -> atom_to_list(Atom).
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Add HStreamDB bridge support, adapted to the HStreamDB `v0.15.0`.
|
|
@ -6,12 +6,6 @@ config_direction.desc:
|
||||||
config_direction.label:
|
config_direction.label:
|
||||||
"""Bridge Direction"""
|
"""Bridge Direction"""
|
||||||
|
|
||||||
config_enable.desc:
|
|
||||||
"""Enable or disable this bridge"""
|
|
||||||
|
|
||||||
config_enable.label:
|
|
||||||
"""Enable Or Disable Bridge"""
|
|
||||||
|
|
||||||
desc_config.desc:
|
desc_config.desc:
|
||||||
"""Configuration for an HStreamDB bridge."""
|
"""Configuration for an HStreamDB bridge."""
|
||||||
|
|
||||||
|
@ -46,10 +40,10 @@ will be forwarded."""
|
||||||
local_topic.label:
|
local_topic.label:
|
||||||
"""Local Topic"""
|
"""Local Topic"""
|
||||||
|
|
||||||
payload.desc:
|
record_template.desc:
|
||||||
"""The payload to be forwarded to the HStreamDB. Placeholders supported."""
|
"""The HStream Record template to be forwarded to the HStreamDB. Placeholders supported."""
|
||||||
|
|
||||||
payload.label:
|
record_template.label:
|
||||||
"""Payload"""
|
"""HStream Record"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,16 +6,34 @@ config.desc:
|
||||||
config.label:
|
config.label:
|
||||||
"""Connection config"""
|
"""Connection config"""
|
||||||
|
|
||||||
|
type.desc:
|
||||||
|
"""The Connector Type."""
|
||||||
|
|
||||||
|
type.label:
|
||||||
|
"""Connector Type"""
|
||||||
|
|
||||||
name.desc:
|
name.desc:
|
||||||
"""Connector name, used as a human-readable description of the connector."""
|
"""Connector name, used as a human-readable description of the connector."""
|
||||||
|
|
||||||
name.label:
|
name.label:
|
||||||
"""Connector Name"""
|
"""Connector Name"""
|
||||||
|
|
||||||
ordering_key.desc:
|
url.desc:
|
||||||
|
"""HStreamDB Server URL"""
|
||||||
|
|
||||||
|
url.label:
|
||||||
|
"""HStreamDB Server URL"""
|
||||||
|
|
||||||
|
stream_name.desc:
|
||||||
|
"""HStreamDB Stream Name"""
|
||||||
|
|
||||||
|
stream_name.label:
|
||||||
|
"""HStreamDB Stream Name"""
|
||||||
|
|
||||||
|
partition_key.desc:
|
||||||
"""HStreamDB Ordering Key"""
|
"""HStreamDB Ordering Key"""
|
||||||
|
|
||||||
ordering_key.label:
|
partition_key.label:
|
||||||
"""HStreamDB Ordering Key"""
|
"""HStreamDB Ordering Key"""
|
||||||
|
|
||||||
pool_size.desc:
|
pool_size.desc:
|
||||||
|
@ -24,22 +42,10 @@ pool_size.desc:
|
||||||
pool_size.label:
|
pool_size.label:
|
||||||
"""HStreamDB Pool Size"""
|
"""HStreamDB Pool Size"""
|
||||||
|
|
||||||
stream_name.desc:
|
grpc_timeout.desc:
|
||||||
"""HStreamDB Stream Name"""
|
"""HStreamDB gRPC Timeout"""
|
||||||
|
|
||||||
stream_name.label:
|
grpc_timeout.label:
|
||||||
"""HStreamDB Stream Name"""
|
"""HStreamDB gRPC Timeout"""
|
||||||
|
|
||||||
type.desc:
|
|
||||||
"""The Connector Type."""
|
|
||||||
|
|
||||||
type.label:
|
|
||||||
"""Connector Type"""
|
|
||||||
|
|
||||||
url.desc:
|
|
||||||
"""HStreamDB Server URL"""
|
|
||||||
|
|
||||||
url.label:
|
|
||||||
"""HStreamDB Server URL"""
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,6 +270,10 @@ hstream
|
||||||
hstreamDB
|
hstreamDB
|
||||||
hstream
|
hstream
|
||||||
hstreamdb
|
hstreamdb
|
||||||
|
hrecord
|
||||||
|
hRecord
|
||||||
|
Hrecord
|
||||||
|
HRecord
|
||||||
SASL
|
SASL
|
||||||
GSSAPI
|
GSSAPI
|
||||||
keytab
|
keytab
|
||||||
|
|
Loading…
Reference in New Issue