Merge branch 'master' into sync-release-50-to-master

This commit is contained in:
zhongwencool 2023-04-26 10:54:46 +08:00 committed by GitHub
commit 9d893b49eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
496 changed files with 24465 additions and 16504 deletions

View File

@ -7,6 +7,7 @@ INFLUXDB_TAG=2.5.0
TDENGINE_TAG=3.0.2.4 TDENGINE_TAG=3.0.2.4
DYNAMO_TAG=1.21.0 DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11.6 CASSANDRA_TAG=3.11.6
OPENTS_TAG=9aa7f88
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04 SQLSERVER_TAG=2019-CU19-ubuntu-20.04

View File

@ -0,0 +1,9 @@
version: '3.9'
services:
opents_server:
container_name: opents
image: petergrace/opentsdb-docker:${OPENTS_TAG}
restart: always
networks:
- emqx_bridge

View File

@ -0,0 +1,32 @@
version: '3'
services:
pulsar:
container_name: pulsar
image: apachepulsar/pulsar:2.11.0
# ports:
# - 6650:6650
# - 8080:8080
networks:
emqx_bridge:
volumes:
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
restart: always
command:
- bash
- "-c"
- |
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
bin/pulsar standalone -nfw -nss

View File

@ -26,6 +26,7 @@ services:
- 19876:9876 - 19876:9876
- 19042:9042 - 19042:9042
- 19142:9142 - 19142:9142
- 14242:4242
command: command:
- "-host=0.0.0.0" - "-host=0.0.0.0"
- "-config=/config/toxiproxy.json" - "-config=/config/toxiproxy.json"

View File

@ -101,5 +101,23 @@
"listen": "0.0.0.0:1433", "listen": "0.0.0.0:1433",
"upstream": "sqlserver:1433", "upstream": "sqlserver:1433",
"enabled": true "enabled": true
},
{
"name": "opents",
"listen": "0.0.0.0:4242",
"upstream": "opents:4242",
"enabled": true
},
{
"name": "pulsar_plain",
"listen": "0.0.0.0:6652",
"upstream": "pulsar:6652",
"enabled": true
},
{
"name": "pulsar_tls",
"listen": "0.0.0.0:6653",
"upstream": "pulsar:6653",
"enabled": true
} }
] ]

View File

@ -168,6 +168,7 @@ jobs:
REDIS_TAG: "7.0" REDIS_TAG: "7.0"
INFLUXDB_TAG: "2.5.0" INFLUXDB_TAG: "2.5.0"
TDENGINE_TAG: "3.0.2.4" TDENGINE_TAG: "3.0.2.4"
OPENTS_TAG: "9aa7f88"
PROFILE: ${{ matrix.profile }} PROFILE: ${{ matrix.profile }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}

View File

@ -1,7 +1,7 @@
Source code in this repository is variously licensed under below licenses. Source code in this repository is variously licensed under below licenses.
For EMQX: Apache License 2.0, see APL.txt, For Default: Apache License 2.0, see APL.txt,
which applies to all source files except for lib-ee sub-directory. which applies to all source files except for folders applied with Business Source License.
For EMQX Enterprise (since version 5.0): Business Source License 1.1, For EMQX Enterprise (since version 5.0): Business Source License 1.1,
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps.

View File

@ -6,8 +6,9 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2
export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.2.1 export EMQX_DASHBOARD_VERSION ?= v1.2.3
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.2 export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.2
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1 export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
@ -89,12 +90,17 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh)
.PHONY: $(APPS:%=%-ct) .PHONY: $(APPS:%=%-ct)
define gen-app-ct-target define gen-app-ct-target
$1-ct: $(REBAR) $1-ct: $(REBAR)
@$(SCRIPTS)/pre-compile.sh $(PROFILE) $(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ ifneq ($(SUITES),)
--readable=$(CT_READABLE) \ @$(SCRIPTS)/pre-compile.sh $(PROFILE)
--name $(CT_NODE_NAME) \ @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ --readable=$(CT_READABLE) \
--suite $(shell $(SCRIPTS)/find-suites.sh $1) --name $(CT_NODE_NAME) \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
--suite $(SUITES)
else
@echo 'No suites found for $1'
endif
endef endef
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app)))) $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
@ -239,7 +245,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
.PHONY: .PHONY:
merge-config: merge-config:
@$(SCRIPTS)/merge-config.escript @$(SCRIPTS)/merge-config.escript
@$(SCRIPTS)/merge-i18n.escript
## elixir target is to create release packages using Elixir's Mix ## elixir target is to create release packages using Elixir's Mix
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)

View File

@ -1,4 +1,4 @@
%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'. %% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'.
%% Which means the EMQX nodes will connect to each other over TLS. %% Which means the EMQX nodes will connect to each other over TLS.
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html %% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md' %% `apps/emqx/src/bpapi/README.md'
%% Community edition %% Community edition
-define(EMQX_RELEASE_CE, "5.0.22"). -define(EMQX_RELEASE_CE, "5.0.23").
%% Enterprise edition %% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.3-alpha.3"). -define(EMQX_RELEASE_EE, "5.0.3-alpha.3").

View File

@ -57,16 +57,16 @@
-define(ERROR_CODES, [ -define(ERROR_CODES, [
{?BAD_USERNAME_OR_PWD, <<"Bad username or password">>}, {?BAD_USERNAME_OR_PWD, <<"Bad username or password">>},
{?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>}, {?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>},
{'BAD_REQUEST', <<"Request parameters are not legal">>}, {'BAD_REQUEST', <<"Request parameters are invalid">>},
{'NOT_MATCH', <<"Conditions are not matched">>}, {'NOT_MATCH', <<"Conditions are not matched">>},
{'ALREADY_EXISTS', <<"Resource already existed">>}, {'ALREADY_EXISTS', <<"Resource already existed">>},
{'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, {'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>},
{'BAD_LISTENER_ID', <<"Bad listener ID">>}, {'BAD_LISTENER_ID', <<"Bad listener ID">>},
{'BAD_NODE_NAME', <<"Bad Node Name">>}, {'BAD_NODE_NAME', <<"Bad Node Name">>},
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}, {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}, {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}, {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
{'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}, {'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>},
{'CONFLICT', <<"Conflicting request resources">>}, {'CONFLICT', <<"Conflicting request resources">>},
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}, {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}, {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},

View File

@ -27,9 +27,9 @@
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.0"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},

View File

@ -3,7 +3,7 @@
{id, "emqx"}, {id, "emqx"},
{description, "EMQX Core"}, {description, "EMQX Core"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.23"}, {vsn, "5.0.24"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [ {applications, [

View File

@ -30,6 +30,12 @@
stop/0 stop/0
]). ]).
%% Cluster API
-export([
cluster_nodes/1,
running_nodes/0
]).
%% PubSub API %% PubSub API
-export([ -export([
subscribe/1, subscribe/1,
@ -102,6 +108,18 @@ is_running() ->
_ -> true _ -> true
end. end.
%%--------------------------------------------------------------------
%% Cluster API
%%--------------------------------------------------------------------
-spec running_nodes() -> [node()].
running_nodes() ->
mria:running_nodes().
-spec cluster_nodes(all | running | cores | stopped) -> [node()].
cluster_nodes(Type) ->
mria:cluster_nodes(Type).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% PubSub API %% PubSub API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -42,7 +42,9 @@
get_alarms/0, get_alarms/0,
get_alarms/1, get_alarms/1,
format/1, format/1,
format/2 format/2,
safe_activate/3,
safe_deactivate/1
]). ]).
%% gen_server callbacks %% gen_server callbacks
@ -57,7 +59,6 @@
%% Internal exports (RPC) %% Internal exports (RPC)
-export([ -export([
create_activate_alarm/3,
do_get_alarms/0 do_get_alarms/0
]). ]).
@ -123,6 +124,9 @@ activate(Name, Details) ->
activate(Name, Details, Message) -> activate(Name, Details, Message) ->
gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}). gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}).
safe_activate(Name, Details, Message) ->
safe_call({activate_alarm, Name, Details, Message}).
-spec ensure_deactivated(binary() | atom()) -> ok. -spec ensure_deactivated(binary() | atom()) -> ok.
ensure_deactivated(Name) -> ensure_deactivated(Name) ->
ensure_deactivated(Name, no_details). ensure_deactivated(Name, no_details).
@ -155,6 +159,9 @@ deactivate(Name, Details) ->
deactivate(Name, Details, Message) -> deactivate(Name, Details, Message) ->
gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}). gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}).
safe_deactivate(Name) ->
safe_call({deactivate_alarm, Name, no_details, <<"">>}).
-spec delete_all_deactivated_alarms() -> ok. -spec delete_all_deactivated_alarms() -> ok.
delete_all_deactivated_alarms() -> delete_all_deactivated_alarms() ->
gen_server:call(?MODULE, delete_all_deactivated_alarms). gen_server:call(?MODULE, delete_all_deactivated_alarms).
@ -218,17 +225,12 @@ init([]) ->
{ok, #{}, get_validity_period()}. {ok, #{}, get_validity_period()}.
handle_call({activate_alarm, Name, Details, Message}, _From, State) -> handle_call({activate_alarm, Name, Details, Message}, _From, State) ->
Res = mria:transaction( case create_activate_alarm(Name, Details, Message) of
mria:local_content_shard(), {ok, Alarm} ->
fun ?MODULE:create_activate_alarm/3,
[Name, Details, Message]
),
case Res of
{atomic, Alarm} ->
do_actions(activate, Alarm, emqx:get_config([alarm, actions])), do_actions(activate, Alarm, emqx:get_config([alarm, actions])),
{reply, ok, State, get_validity_period()}; {reply, ok, State, get_validity_period()};
{aborted, Reason} -> Err ->
{reply, Reason, State, get_validity_period()} {reply, Err, State, get_validity_period()}
end; end;
handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> handle_call({deactivate_alarm, Name, Details, Message}, _From, State) ->
case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
@ -283,9 +285,9 @@ get_validity_period() ->
emqx:get_config([alarm, validity_period]). emqx:get_config([alarm, validity_period]).
create_activate_alarm(Name, Details, Message) -> create_activate_alarm(Name, Details, Message) ->
case mnesia:read(?ACTIVATED_ALARM, Name) of case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
[#activated_alarm{name = Name}] -> [#activated_alarm{name = Name}] ->
mnesia:abort({error, already_existed}); {error, already_existed};
[] -> [] ->
Alarm = #activated_alarm{ Alarm = #activated_alarm{
name = Name, name = Name,
@ -293,8 +295,8 @@ create_activate_alarm(Name, Details, Message) ->
message = normalize_message(Name, iolist_to_binary(Message)), message = normalize_message(Name, iolist_to_binary(Message)),
activate_at = erlang:system_time(microsecond) activate_at = erlang:system_time(microsecond)
}, },
ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write), ok = mria:dirty_write(?ACTIVATED_ALARM, Alarm),
Alarm {ok, Alarm}
end. end.
do_get_alarms() -> do_get_alarms() ->
@ -474,3 +476,19 @@ normalize_message(Name, <<"">>) ->
list_to_binary(io_lib:format("~p", [Name])); list_to_binary(io_lib:format("~p", [Name]));
normalize_message(_Name, Message) -> normalize_message(_Name, Message) ->
Message. Message.
safe_call(Req) ->
try
gen_server:call(?MODULE, Req)
catch
_:{timeout, _} = Reason ->
?SLOG(warning, #{msg => "emqx_alarm_safe_call_timeout", reason => Reason}),
{error, timeout};
_:Reason:St ->
?SLOG(error, #{
msg => "emqx_alarm_safe_call_exception",
reason => Reason,
stacktrace => St
}),
{error, Reason}
end.

View File

@ -89,7 +89,7 @@
%% Authentication Data Cache %% Authentication Data Cache
auth_cache :: maybe(map()), auth_cache :: maybe(map()),
%% Quota checkers %% Quota checkers
quota :: maybe(emqx_limiter_container:limiter()), quota :: emqx_limiter_container:limiter(),
%% Timers %% Timers
timers :: #{atom() => disabled | maybe(reference())}, timers :: #{atom() => disabled | maybe(reference())},
%% Conn State %% Conn State
@ -760,7 +760,7 @@ do_publish(
handle_out(disconnect, RC, Channel) handle_out(disconnect, RC, Channel)
end. end.
ensure_quota(_, Channel = #channel{quota = undefined}) -> ensure_quota(_, Channel = #channel{quota = infinity}) ->
Channel; Channel;
ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
Cnt = lists:foldl( Cnt = lists:foldl(

View File

@ -111,7 +111,7 @@
listener :: {Type :: atom(), Name :: atom()}, listener :: {Type :: atom(), Name :: atom()},
%% Limiter %% Limiter
limiter :: maybe(limiter()), limiter :: limiter(),
%% limiter buffer for overload use %% limiter buffer for overload use
limiter_buffer :: queue:queue(pending_req()), limiter_buffer :: queue:queue(pending_req()),
@ -182,10 +182,8 @@
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
%% use macro to do compile time limiter's type check -define(LIMITER_BYTES_IN, bytes).
-define(LIMITER_BYTES_IN, bytes_in). -define(LIMITER_MESSAGE_IN, messages).
-define(LIMITER_MESSAGE_IN, message_in).
-define(EMPTY_QUEUE, {[], []}).
-dialyzer({no_match, [info/2]}). -dialyzer({no_match, [info/2]}).
-dialyzer( -dialyzer(
@ -976,55 +974,61 @@ handle_cast(Req, State) ->
list(any()), list(any()),
state() state()
) -> _. ) -> _.
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter( check_limiter(
Needs, Needs,
Data, Data,
WhenOk, WhenOk,
Msgs, Msgs,
#state{ #state{limiter_timer = undefined, limiter = Limiter} = State
limiter = Limiter, ) ->
limiter_timer = LimiterTimer, case emqx_limiter_container:check_list(Needs, Limiter) of
limiter_buffer = Cache {ok, Limiter2} ->
} = State WhenOk(Data, Msgs, State#state{limiter = Limiter2});
) when Limiter =/= undefined -> {pause, Time, Limiter2} ->
case LimiterTimer of ?SLOG(debug, #{
undefined -> msg => "pause_time_dueto_rate_limit",
case emqx_limiter_container:check_list(Needs, Limiter) of needs => Needs,
{ok, Limiter2} -> time_in_ms => Time
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); }),
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_dueto_rate_limit",
needs => Needs,
time_in_ms => Time
}),
Retry = #retry{ Retry = #retry{
types = [Type || {_, Type} <- Needs], types = [Type || {_, Type} <- Needs],
data = Data, data = Data,
next = WhenOk next = WhenOk
}, },
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
TRef = start_timer(Time, limit_timeout), TRef = start_timer(Time, limit_timeout),
{ok, State#state{ {ok, State#state{
limiter = Limiter3, limiter = Limiter3,
limiter_timer = TRef limiter_timer = TRef
}}; }};
{drop, Limiter2} -> {drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}} {ok, State#state{limiter = Limiter2}}
end;
_ ->
%% if there has a retry timer,
%% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}
end; end;
check_limiter(_, Data, WhenOk, Msgs, State) -> check_limiter(
WhenOk(Data, Msgs, State). Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_buffer = Cache} = State
) ->
%% if there has a retry timer,
%% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}.
%% try to perform a retry %% try to perform a retry
-spec retry_limiter(state()) -> _. -spec retry_limiter(state()) -> _.

View File

@ -139,7 +139,8 @@ make_token_bucket_limiter(Cfg, Bucket) ->
Cfg#{ Cfg#{
tokens => emqx_limiter_server:get_initial_val(Cfg), tokens => emqx_limiter_server:get_initial_val(Cfg),
lasttime => ?NOW, lasttime => ?NOW,
bucket => Bucket bucket => Bucket,
capacity => emqx_limiter_schema:calc_capacity(Cfg)
}. }.
%%@doc create a limiter server's reference %%@doc create a limiter server's reference

View File

@ -23,6 +23,7 @@
%% API %% API
-export([ -export([
new/3, new/3,
infinity_bucket/0,
check/3, check/3,
try_restore/2, try_restore/2,
available/1 available/1
@ -58,6 +59,10 @@ new(Counter, Index, Rate) ->
rate => Rate rate => Rate
}. }.
-spec infinity_bucket() -> bucket_ref().
infinity_bucket() ->
infinity.
%% @doc check tokens %% @doc check tokens
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) -> -spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
HasToken :: HasToken ::

View File

@ -34,16 +34,18 @@
-export_type([container/0, check_result/0]). -export_type([container/0, check_result/0]).
-type container() :: #{ -type container() ::
limiter_type() => undefined | limiter(), infinity
%% the retry context of the limiter | #{
retry_key() => limiter_type() => undefined | limiter(),
undefined %% the retry context of the limiter
| retry_context() retry_key() =>
| future(), undefined
%% the retry context of the container | retry_context()
retry_ctx := undefined | any() | future(),
}. %% the retry context of the container
retry_ctx := undefined | any()
}.
-type future() :: pos_integer(). -type future() :: pos_integer().
-type limiter_id() :: emqx_limiter_schema:limiter_id(). -type limiter_id() :: emqx_limiter_schema:limiter_id().
@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) ->
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs), {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc) add_new(Type, Limiter, Acc)
end, end,
lists:foldl(Init, #{retry_ctx => undefined}, Types). Container = lists:foldl(Init, #{retry_ctx => undefined}, Types),
case
lists:all(
fun(Type) ->
maps:get(Type, Container) =:= infinity
end,
Types
)
of
true ->
infinity;
_ ->
Container
end.
-spec add_new(limiter_type(), limiter(), container()) -> container(). -spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) -> add_new(Type, Limiter, Container) ->
@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) ->
%% @doc check the specified limiter %% @doc check the specified limiter
-spec check(pos_integer(), limiter_type(), container()) -> check_result(). -spec check(pos_integer(), limiter_type(), container()) -> check_result().
check(_Need, _Type, infinity) ->
{ok, infinity};
check(Need, Type, Container) -> check(Need, Type, Container) ->
check_list([{Need, Type}], Container). check_list([{Need, Type}], Container).
%% @doc check multiple limiters %% @doc check multiple limiters
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result(). -spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
check_list(_Need, infinity) ->
{ok, infinity};
check_list([{Need, Type} | T], Container) -> check_list([{Need, Type} | T], Container) ->
Limiter = maps:get(Type, Container), Limiter = maps:get(Type, Container),
case emqx_htb_limiter:check(Need, Limiter) of case emqx_htb_limiter:check(Need, Limiter) of
@ -121,11 +140,15 @@ check_list([], Container) ->
%% @doc retry the specified limiter %% @doc retry the specified limiter
-spec retry(limiter_type(), container()) -> check_result(). -spec retry(limiter_type(), container()) -> check_result().
retry(_Type, infinity) ->
{ok, infinity};
retry(Type, Container) -> retry(Type, Container) ->
retry_list([Type], Container). retry_list([Type], Container).
%% @doc retry multiple limiters %% @doc retry multiple limiters
-spec retry_list(list(limiter_type()), container()) -> check_result(). -spec retry_list(list(limiter_type()), container()) -> check_result().
retry_list(_Types, infinity) ->
{ok, infinity};
retry_list([Type | T], Container) -> retry_list([Type | T], Container) ->
Key = ?RETRY_KEY(Type), Key = ?RETRY_KEY(Type),
case Container of case Container of

View File

@ -24,6 +24,7 @@
fields/1, fields/1,
to_rate/1, to_rate/1,
to_capacity/1, to_capacity/1,
to_burst/1,
default_period/0, default_period/0,
to_burst_rate/1, to_burst_rate/1,
to_initial/1, to_initial/1,
@ -31,20 +32,20 @@
get_bucket_cfg_path/2, get_bucket_cfg_path/2,
desc/1, desc/1,
types/0, types/0,
infinity_value/0 calc_capacity/1
]). ]).
-define(KILOBYTE, 1024). -define(KILOBYTE, 1024).
-define(BUCKET_KEYS, [ -define(LISTENER_BUCKET_KEYS, [
{bytes_in, bucket_infinity}, bytes,
{message_in, bucket_infinity}, messages,
{connection, bucket_limit}, connection,
{message_routing, bucket_infinity} message_routing
]). ]).
-type limiter_type() :: -type limiter_type() ::
bytes_in bytes
| message_in | messages
| connection | connection
| message_routing | message_routing
%% internal limiter for unclassified resources %% internal limiter for unclassified resources
@ -54,8 +55,10 @@
-type bucket_name() :: atom(). -type bucket_name() :: atom().
-type rate() :: infinity | float(). -type rate() :: infinity | float().
-type burst_rate() :: 0 | float(). -type burst_rate() :: 0 | float().
%% this is a compatible type for the deprecated field and type `capacity`.
-type burst() :: burst_rate().
%% the capacity of the token bucket %% the capacity of the token bucket
-type capacity() :: non_neg_integer(). %%-type capacity() :: non_neg_integer().
%% initial capacity of the token bucket %% initial capacity of the token bucket
-type initial() :: non_neg_integer(). -type initial() :: non_neg_integer().
-type bucket_path() :: list(atom()). -type bucket_path() :: list(atom()).
@ -72,13 +75,13 @@
-typerefl_from_string({rate/0, ?MODULE, to_rate}). -typerefl_from_string({rate/0, ?MODULE, to_rate}).
-typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}). -typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}).
-typerefl_from_string({capacity/0, ?MODULE, to_capacity}). -typerefl_from_string({burst/0, ?MODULE, to_burst}).
-typerefl_from_string({initial/0, ?MODULE, to_initial}). -typerefl_from_string({initial/0, ?MODULE, to_initial}).
-reflect_type([ -reflect_type([
rate/0, rate/0,
burst_rate/0, burst_rate/0,
capacity/0, burst/0,
initial/0, initial/0,
failure_strategy/0, failure_strategy/0,
bucket_name/0 bucket_name/0
@ -90,14 +93,17 @@
namespace() -> limiter. namespace() -> limiter.
roots() -> [limiter]. roots() ->
[{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}].
fields(limiter) -> fields(limiter) ->
[ [
{Type, {Type,
?HOCON(?R_REF(node_opts), #{ ?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type), desc => ?DESC(Type),
default => #{} default => #{},
importance => ?IMPORTANCE_HIDDEN,
aliases => alias_of_type(Type)
})} })}
|| Type <- types() || Type <- types()
] ++ ] ++
@ -107,6 +113,7 @@ fields(limiter) ->
?R_REF(client_fields), ?R_REF(client_fields),
#{ #{
desc => ?DESC(client), desc => ?DESC(client),
importance => ?IMPORTANCE_HIDDEN,
default => maps:from_list([ default => maps:from_list([
{erlang:atom_to_binary(Type), #{}} {erlang:atom_to_binary(Type), #{}}
|| Type <- types() || Type <- types()
@ -124,30 +131,18 @@ fields(node_opts) ->
})} })}
]; ];
fields(client_fields) -> fields(client_fields) ->
[ client_fields(types(), #{default => #{}});
{Type, fields(bucket_opts) ->
?HOCON(?R_REF(client_opts), #{ fields_of_bucket(<<"infinity">>);
desc => ?DESC(Type),
default => #{}
})}
|| Type <- types()
];
fields(bucket_infinity) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
];
fields(bucket_limit) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
];
fields(client_opts) -> fields(client_opts) ->
[ [
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}, {initial,
?HOCON(initial(), #{
default => <<"0">>,
desc => ?DESC(initial),
importance => ?IMPORTANCE_HIDDEN
})},
%% low_watermark add for emqx_channel and emqx_session %% low_watermark add for emqx_channel and emqx_session
%% both modules consume first and then check %% both modules consume first and then check
%% so we need to use this value to prevent excessive consumption %% so we need to use this value to prevent excessive consumption
@ -157,20 +152,24 @@ fields(client_opts) ->
initial(), initial(),
#{ #{
desc => ?DESC(low_watermark), desc => ?DESC(low_watermark),
default => <<"0">> default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{capacity, {burst,
?HOCON(capacity(), #{ ?HOCON(burst(), #{
desc => ?DESC(client_bucket_capacity), desc => ?DESC(burst),
default => <<"infinity">> default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN,
aliases => [capacity]
})}, })},
{divisible, {divisible,
?HOCON( ?HOCON(
boolean(), boolean(),
#{ #{
desc => ?DESC(divisible), desc => ?DESC(divisible),
default => false default => false,
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{max_retry_time, {max_retry_time,
@ -178,7 +177,8 @@ fields(client_opts) ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
desc => ?DESC(max_retry_time), desc => ?DESC(max_retry_time),
default => <<"10s">> default => <<"10s">>,
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{failure_strategy, {failure_strategy,
@ -186,25 +186,24 @@ fields(client_opts) ->
failure_strategy(), failure_strategy(),
#{ #{
desc => ?DESC(failure_strategy), desc => ?DESC(failure_strategy),
default => force default => force,
importance => ?IMPORTANCE_HIDDEN
} }
)} )}
]; ];
fields(listener_fields) -> fields(listener_fields) ->
bucket_fields(?BUCKET_KEYS, listener_client_fields); composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields);
fields(listener_client_fields) -> fields(listener_client_fields) ->
client_fields(?BUCKET_KEYS); client_fields(?LISTENER_BUCKET_KEYS, #{required => false});
fields(Type) -> fields(Type) ->
bucket_field(Type). simple_bucket_field(Type).
desc(limiter) -> desc(limiter) ->
"Settings for the rate limiter."; "Settings for the rate limiter.";
desc(node_opts) -> desc(node_opts) ->
"Settings for the limiter of the node level."; "Settings for the limiter of the node level.";
desc(bucket_infinity) -> desc(bucket_opts) ->
"Settings for the bucket."; "Settings for the bucket.";
desc(bucket_limit) ->
desc(bucket_infinity);
desc(client_opts) -> desc(client_opts) ->
"Settings for the client in bucket level."; "Settings for the client in bucket level.";
desc(client_fields) -> desc(client_fields) ->
@ -230,19 +229,12 @@ get_bucket_cfg_path(Type, BucketName) ->
[limiter, Type, bucket, BucketName]. [limiter, Type, bucket, BucketName].
types() -> types() ->
[bytes_in, message_in, connection, message_routing, internal]. [bytes, messages, connection, message_routing, internal].
%%-------------------------------------------------------------------- calc_capacity(#{rate := infinity}) ->
%% Internal functions infinity;
%%-------------------------------------------------------------------- calc_capacity(#{rate := Rate, burst := Burst}) ->
erlang:floor(1000 * Rate / default_period()) + Burst.
%% `infinity` to `infinity_value` rules:
%% 1. all infinity capacity will change to infinity_value
%% 2. if the rate of global and bucket both are `infinity`,
%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2`
infinity_value() ->
%% 1 TB
1099511627776.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
@ -251,6 +243,17 @@ infinity_value() ->
to_burst_rate(Str) -> to_burst_rate(Str) ->
to_rate(Str, false, true). to_rate(Str, false, true).
%% The default value of `capacity` is `infinity`,
%% but we have changed `capacity` to `burst` which should not be `infinity`
%% and its default value is 0, so we should convert `infinity` to 0
to_burst(Str) ->
case to_rate(Str, true, true) of
{ok, infinity} ->
{ok, 0};
Any ->
Any
end.
%% rate can be: 10 10MB 10MB/s 10MB/2s infinity %% rate can be: 10 10MB 10MB/s 10MB/2s infinity
%% e.g. the bytes_in regex tree is: %% e.g. the bytes_in regex tree is:
%% %%
@ -335,7 +338,7 @@ to_quota(Str, Regex) ->
{match, [Quota, ""]} -> {match, [Quota, ""]} ->
{ok, erlang:list_to_integer(Quota)}; {ok, erlang:list_to_integer(Quota)};
{match, ""} -> {match, ""} ->
{ok, infinity_value()}; {ok, infinity};
_ -> _ ->
{error, Str} {error, Str}
end end
@ -350,26 +353,33 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
bucket_field(Type) when is_atom(Type) -> %% A bucket with only one type
fields(bucket_infinity) ++ simple_bucket_field(Type) when is_atom(Type) ->
fields(bucket_opts) ++
[ [
{client, {client,
?HOCON( ?HOCON(
?R_REF(?MODULE, client_opts), ?R_REF(?MODULE, client_opts),
#{ #{
desc => ?DESC(client), desc => ?DESC(client),
required => false required => false,
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
} }
)} )}
]. ].
bucket_fields(Types, ClientRef) ->
%% A bucket with multi types
composite_bucket_fields(Types, ClientRef) ->
[ [
{Type, {Type,
?HOCON(?R_REF(?MODULE, Opts), #{ ?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type), desc => ?DESC(?MODULE, Type),
required => false required => false,
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})} })}
|| {Type, Opts} <- Types || Type <- Types
] ++ ] ++
[ [
{client, {client,
@ -382,12 +392,47 @@ bucket_fields(Types, ClientRef) ->
)} )}
]. ].
client_fields(Types) -> fields_of_bucket(Default) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => Default})},
{burst,
?HOCON(burst(), #{
desc => ?DESC(burst),
default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN,
aliases => [capacity]
})},
{initial,
?HOCON(initial(), #{
default => <<"0">>,
desc => ?DESC(initial),
importance => ?IMPORTANCE_HIDDEN
})}
].
client_fields(Types, Meta) ->
[ [
{Type, {Type,
?HOCON(?R_REF(client_opts), #{ ?HOCON(?R_REF(client_opts), Meta#{
desc => ?DESC(Type), desc => ?DESC(Type),
required => false importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})} })}
|| {Type, _} <- Types || Type <- Types
]. ].
importance_of_type(interval) ->
?IMPORTANCE_HIDDEN;
importance_of_type(message_routing) ->
?IMPORTANCE_HIDDEN;
importance_of_type(connection) ->
?IMPORTANCE_HIDDEN;
importance_of_type(_) ->
?DEFAULT_IMPORTANCE.
alias_of_type(messages) ->
[message_in];
alias_of_type(bytes) ->
[bytes_in];
alias_of_type(_) ->
[].

View File

@ -118,17 +118,24 @@ connect(_Id, _Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()}; {ok, emqx_htb_limiter:make_infinity_limiter()};
connect(Id, Type, Cfg) -> connect(Id, Type, Cfg) ->
case find_limiter_cfg(Type, Cfg) of case find_limiter_cfg(Type, Cfg) of
{undefined, _} -> {_ClientCfg, undefined, _NodeCfg} ->
{ok, emqx_htb_limiter:make_infinity_limiter()}; {ok, emqx_htb_limiter:make_infinity_limiter()};
{#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{ClientCfg, #{rate := infinity}, #{rate := infinity}} ->
{ok,
emqx_htb_limiter:make_token_bucket_limiter(
ClientCfg, emqx_limiter_bucket_ref:infinity_bucket()
)};
{ {
#{ #{rate := CliRate} = ClientCfg,
rate := BucketRate, #{rate := BucketRate} = BucketCfg,
capacity := BucketSize _
},
#{rate := CliRate, capacity := CliSize} = ClientCfg
} -> } ->
case emqx_limiter_manager:find_bucket(Id, Type) of case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} -> {ok, Bucket} ->
BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg),
CliSize = emqx_limiter_schema:calc_capacity(ClientCfg),
{ok, {ok,
if if
CliRate < BucketRate orelse CliSize < BucketSize -> CliRate < BucketRate orelse CliSize < BucketSize ->
@ -493,12 +500,14 @@ make_root(#{rate := Rate, burst := Burst}) ->
produced => 0.0 produced => 0.0
}. }.
do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) -> do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
State;
do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of case maps:get(Id, Buckets, undefined) of
undefined -> undefined ->
make_bucket(Id, Cfg, State); make_bucket(Id, Cfg, State);
Bucket -> Bucket ->
Bucket2 = Bucket#{rate := Rate, capacity := Capacity}, Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)},
State#{buckets := Buckets#{Id := Bucket2}} State#{buckets := Buckets#{Id := Bucket2}}
end. end.
@ -509,7 +518,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
}); });
make_bucket( make_bucket(
Id, Id,
#{rate := Rate, capacity := Capacity} = Cfg, #{rate := Rate} = Cfg,
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State #{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
) -> ) ->
NewIndex = Index + 1, NewIndex = Index + 1,
@ -519,7 +528,7 @@ make_bucket(
rate => Rate, rate => Rate,
obtained => Initial, obtained => Initial,
correction => 0, correction => 0,
capacity => Capacity, capacity => emqx_limiter_schema:calc_capacity(Cfg),
counter => Counter, counter => Counter,
index => NewIndex index => NewIndex
}, },
@ -541,19 +550,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
get_initial_val( get_initial_val(
#{ #{
initial := Initial, initial := Initial,
rate := Rate, rate := Rate
capacity := Capacity
} }
) -> ) ->
%% initial will nevner be infinity(see the emqx_limiter_schema)
InfVal = emqx_limiter_schema:infinity_value(),
if if
Initial > 0 -> Initial > 0 ->
Initial; Initial;
Rate =/= infinity -> Rate =/= infinity ->
erlang:min(Rate, Capacity); Rate;
Capacity =/= infinity andalso Capacity =/= InfVal ->
Capacity;
true -> true ->
0 0
end. end.
@ -568,11 +572,12 @@ call(Type, Msg) ->
end. end.
find_limiter_cfg(Type, #{rate := _} = Cfg) -> find_limiter_cfg(Type, #{rate := _} = Cfg) ->
{Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))}; {find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)};
find_limiter_cfg(Type, Cfg) -> find_limiter_cfg(Type, Cfg) ->
{ {
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)),
maps:get(Type, Cfg, undefined), maps:get(Type, Cfg, undefined),
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)) find_node_cfg(Type)
}. }.
find_client_cfg(Type, BucketCfg) -> find_client_cfg(Type, BucketCfg) ->
@ -585,3 +590,6 @@ merge_client_cfg(NodeCfg, undefined) ->
NodeCfg; NodeCfg;
merge_client_cfg(NodeCfg, BucketCfg) -> merge_client_cfg(NodeCfg, BucketCfg) ->
maps:merge(NodeCfg, BucketCfg). maps:merge(NodeCfg, BucketCfg).
find_node_cfg(Type) ->
emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}).

View File

@ -42,7 +42,12 @@
-type ip_port() :: tuple() | integer(). -type ip_port() :: tuple() | integer().
-type cipher() :: map(). -type cipher() :: map().
-type port_number() :: 1..65536. -type port_number() :: 1..65536.
-type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}. -type server_parse_option() :: #{
default_port => port_number(),
no_port => boolean(),
supported_schemes => [string()],
default_scheme => string()
}.
-type url() :: binary(). -type url() :: binary().
-type json_binary() :: binary(). -type json_binary() :: binary().
@ -61,6 +66,12 @@
-typerefl_from_string({url/0, emqx_schema, to_url}). -typerefl_from_string({url/0, emqx_schema, to_url}).
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}). -typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
-type parsed_server() :: #{
hostname := string(),
port => port_number(),
scheme => string()
}.
-export([ -export([
validate_heap_size/1, validate_heap_size/1,
user_lookup_fun_tr/2, user_lookup_fun_tr/2,
@ -164,7 +175,7 @@ roots(high) ->
} }
)}, )},
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)}, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)},
%% NOTE: authorization schema here is only to keep emqx app prue %% NOTE: authorization schema here is only to keep emqx app pure
%% the full schema for EMQX node is injected in emqx_conf_schema. %% the full schema for EMQX node is injected in emqx_conf_schema.
{?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME, {?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME,
sc( sc(
@ -1489,10 +1500,8 @@ fields("broker") ->
sc( sc(
boolean(), boolean(),
#{ #{
%% TODO: deprecated => {since, "5.1.0"} deprecated => {since, "5.1.0"},
%% in favor of session message re-dispatch at termination importance => ?IMPORTANCE_HIDDEN,
%% we will stop supporting dispatch acks for shared
%% subscriptions.
default => false, default => false,
desc => ?DESC(broker_shared_dispatch_ack_enabled) desc => ?DESC(broker_shared_dispatch_ack_enabled)
} }
@ -2227,6 +2236,7 @@ common_ssl_opts_schema(Defaults) ->
#{ #{
default => AvailableVersions, default => AvailableVersions,
desc => ?DESC(common_ssl_opts_schema_versions), desc => ?DESC(common_ssl_opts_schema_versions),
importance => ?IMPORTANCE_HIGH,
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end
} }
)}, )},
@ -2897,7 +2907,7 @@ servers_validator(Opts, Required) ->
%% `no_port': by default it's `false', when set to `true', %% `no_port': by default it's `false', when set to `true',
%% a `throw' exception is raised if the port is found. %% a `throw' exception is raised if the port is found.
-spec parse_server(undefined | string() | binary(), server_parse_option()) -> -spec parse_server(undefined | string() | binary(), server_parse_option()) ->
{string(), port_number()}. undefined | parsed_server().
parse_server(Str, Opts) -> parse_server(Str, Opts) ->
case parse_servers(Str, Opts) of case parse_servers(Str, Opts) of
undefined -> undefined ->
@ -2911,7 +2921,7 @@ parse_server(Str, Opts) ->
%% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints
%% into a list of `{Host, Port}' tuples or just `Host' string. %% into a list of `{Host, Port}' tuples or just `Host' string.
-spec parse_servers(undefined | string() | binary(), server_parse_option()) -> -spec parse_servers(undefined | string() | binary(), server_parse_option()) ->
[{string(), port_number()}]. undefined | [parsed_server()].
parse_servers(undefined, _Opts) -> parse_servers(undefined, _Opts) ->
%% should not parse 'undefined' as string, %% should not parse 'undefined' as string,
%% not to throw exception either, %% not to throw exception either,
@ -2957,6 +2967,9 @@ split_host_port(Str) ->
do_parse_server(Str, Opts) -> do_parse_server(Str, Opts) ->
DefaultPort = maps:get(default_port, Opts, undefined), DefaultPort = maps:get(default_port, Opts, undefined),
NotExpectingPort = maps:get(no_port, Opts, false), NotExpectingPort = maps:get(no_port, Opts, false),
DefaultScheme = maps:get(default_scheme, Opts, undefined),
SupportedSchemes = maps:get(supported_schemes, Opts, []),
NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0,
case is_integer(DefaultPort) andalso NotExpectingPort of case is_integer(DefaultPort) andalso NotExpectingPort of
true -> true ->
%% either provide a default port from schema, %% either provide a default port from schema,
@ -2965,22 +2978,129 @@ do_parse_server(Str, Opts) ->
false -> false ->
ok ok
end, end,
case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of
true ->
%% inconsistent schema
error("bad_schema");
false ->
ok
end,
%% do not split with space, there should be no space allowed between host and port %% do not split with space, there should be no space allowed between host and port
case string:tokens(Str, ":") of Tokens = string:tokens(Str, ":"),
[Hostname, Port] -> Context = #{
NotExpectingPort andalso throw("not_expecting_port_number"), not_expecting_port => NotExpectingPort,
{check_hostname(Hostname), parse_port(Port)}; not_expecting_scheme => NotExpectingScheme,
[Hostname] -> default_port => DefaultPort,
case is_integer(DefaultPort) of default_scheme => DefaultScheme,
true -> opts => Opts
{check_hostname(Hostname), DefaultPort}; },
false when NotExpectingPort -> check_server_parts(Tokens, Context).
check_hostname(Hostname);
false -> check_server_parts([Scheme, "//" ++ Hostname, Port], Context) ->
throw("missing_port_number") #{
end; not_expecting_scheme := NotExpectingScheme,
_ -> not_expecting_port := NotExpectingPort,
throw("bad_host_port") opts := Opts
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
NotExpectingScheme andalso throw("not_expecting_scheme"),
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
check_server_parts([Scheme, "//" ++ Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
opts := Opts
} = Context,
NotExpectingScheme andalso throw("not_expecting_scheme"),
case is_integer(DefaultPort) of
true ->
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => DefaultPort
};
false when NotExpectingPort ->
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname)
};
false ->
throw("missing_port_number")
end;
check_server_parts([Hostname, Port], Context) ->
#{
not_expecting_port := NotExpectingPort,
default_scheme := DefaultScheme
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
case is_list(DefaultScheme) of
false ->
#{
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
true ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => parse_port(Port)
}
end;
check_server_parts([Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
default_scheme := DefaultScheme
} = Context,
case is_integer(DefaultPort) orelse NotExpectingPort of
true ->
ok;
false ->
throw("missing_port_number")
end,
case is_list(DefaultScheme) orelse NotExpectingScheme of
true ->
ok;
false ->
throw("missing_scheme")
end,
case {is_integer(DefaultPort), is_list(DefaultScheme)} of
{true, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => DefaultPort
};
{true, false} ->
#{
hostname => check_hostname(Hostname),
port => DefaultPort
};
{false, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname)
};
{false, false} ->
#{hostname => check_hostname(Hostname)}
end;
check_server_parts(_Tokens, _Context) ->
throw("bad_host_port").
check_scheme(Str, Opts) ->
SupportedSchemes = maps:get(supported_schemes, Opts, []),
IsSupported = lists:member(Str, SupportedSchemes),
case IsSupported of
true ->
Str;
false ->
throw("unsupported_scheme")
end. end.
check_hostname(Str) -> check_hostname(Str) ->

View File

@ -165,7 +165,7 @@ strategy(Group) ->
-spec ack_enabled() -> boolean(). -spec ack_enabled() -> boolean().
ack_enabled() -> ack_enabled() ->
emqx:get_config([broker, shared_dispatch_ack_enabled]). emqx:get_config([broker, shared_dispatch_ack_enabled], false).
do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() -> do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() ->
%% Deadlock otherwise %% Deadlock otherwise
@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) ->
do_dispatch(SubPid, Group, Topic, Msg, fresh) -> do_dispatch(SubPid, Group, Topic, Msg, fresh) ->
case ack_enabled() of case ack_enabled() of
true -> true ->
%% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2 %% TODO: delete this clase after 5.1.0
do_dispatch_with_ack(SubPid, Group, Topic, Msg); do_dispatch_with_ack(SubPid, Group, Topic, Msg);
false -> false ->
send(SubPid, Topic, {deliver, Topic, Msg}) send(SubPid, Topic, {deliver, Topic, Msg})

View File

@ -90,7 +90,7 @@
listener :: {Type :: atom(), Name :: atom()}, listener :: {Type :: atom(), Name :: atom()},
%% Limiter %% Limiter
limiter :: maybe(container()), limiter :: container(),
%% cache operation when overload %% cache operation when overload
limiter_cache :: queue:queue(cache()), limiter_cache :: queue:queue(cache()),
@ -121,8 +121,8 @@
-define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]). -define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]).
-define(ENABLED(X), (X =/= undefined)). -define(ENABLED(X), (X =/= undefined)).
-define(LIMITER_BYTES_IN, bytes_in). -define(LIMITER_BYTES_IN, bytes).
-define(LIMITER_MESSAGE_IN, message_in). -define(LIMITER_MESSAGE_IN, messages).
-dialyzer({no_match, [info/2]}). -dialyzer({no_match, [info/2]}).
-dialyzer({nowarn_function, [websocket_init/1]}). -dialyzer({nowarn_function, [websocket_init/1]}).
@ -579,54 +579,61 @@ handle_timeout(TRef, TMsg, State) ->
list(any()), list(any()),
state() state()
) -> state(). ) -> state().
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter( check_limiter(
Needs, Needs,
Data, Data,
WhenOk, WhenOk,
Msgs, Msgs,
#state{ #state{limiter_timer = undefined, limiter = Limiter} = State
limiter = Limiter,
limiter_timer = LimiterTimer,
limiter_cache = Cache
} = State
) -> ) ->
case LimiterTimer of case emqx_limiter_container:check_list(Needs, Limiter) of
undefined -> {ok, Limiter2} ->
case emqx_limiter_container:check_list(Needs, Limiter) of WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{ok, Limiter2} -> {pause, Time, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); ?SLOG(debug, #{
{pause, Time, Limiter2} -> msg => "pause_time_due_to_rate_limit",
?SLOG(debug, #{ needs => Needs,
msg => "pause_time_due_to_rate_limit", time_in_ms => Time
needs => Needs, }),
time_in_ms => Time
}),
Retry = #retry{ Retry = #retry{
types = [Type || {_, Type} <- Needs], types = [Type || {_, Type} <- Needs],
data = Data, data = Data,
next = WhenOk next = WhenOk
}, },
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
TRef = start_timer(Time, limit_timeout), TRef = start_timer(Time, limit_timeout),
enqueue( enqueue(
{active, false}, {active, false},
State#state{ State#state{
sockstate = blocked, sockstate = blocked,
limiter = Limiter3, limiter = Limiter3,
limiter_timer = TRef limiter_timer = TRef
} }
); );
{drop, Limiter2} -> {drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}} {ok, State#state{limiter = Limiter2}}
end; end;
_ -> check_limiter(
New = #cache{need = Needs, data = Data, next = WhenOk}, Needs,
State#state{limiter_cache = queue:in(New, Cache)} Data,
end. WhenOk,
_Msgs,
#state{limiter_cache = Cache} = State
) ->
New = #cache{need = Needs, data = Data, next = WhenOk},
State#state{limiter_cache = queue:in(New, Cache)}.
-spec retry_limiter(state()) -> state(). -spec retry_limiter(state()) -> state().
retry_limiter(#state{limiter = Limiter} = State) -> retry_limiter(#state{limiter = Limiter} = State) ->

View File

@ -148,6 +148,14 @@ t_run_hook(_) ->
?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)),
?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)).
t_cluster_nodes(_) ->
Expected = [node()],
?assertEqual(Expected, emqx:running_nodes()),
?assertEqual(Expected, emqx:cluster_nodes(running)),
?assertEqual(Expected, emqx:cluster_nodes(all)),
?assertEqual(Expected, emqx:cluster_nodes(cores)),
?assertEqual([], emqx:cluster_nodes(stopped)).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Hook fun %% Hook fun
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -162,8 +162,7 @@ limiter_conf() ->
Make = fun() -> Make = fun() ->
#{ #{
burst => 0, burst => 0,
rate => infinity, rate => infinity
capacity => infinity
} }
end, end,
@ -172,7 +171,7 @@ limiter_conf() ->
Acc#{Name => Make()} Acc#{Name => Make()}
end, end,
#{}, #{},
[bytes_in, message_in, message_routing, connection, internal] [bytes, messages, message_routing, connection, internal]
). ).
stats_conf() -> stats_conf() ->
@ -1258,7 +1257,7 @@ limiter_cfg() ->
Client = #{ Client = #{
rate => 5, rate => 5,
initial => 0, initial => 0,
capacity => 5, burst => 0,
low_watermark => 1, low_watermark => 1,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
@ -1270,7 +1269,7 @@ limiter_cfg() ->
}. }.
bucket_cfg() -> bucket_cfg() ->
#{rate => 10, initial => 0, capacity => 10}. #{rate => 10, initial => 0, burst => 0}.
add_bucket() -> add_bucket() ->
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()). emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).

View File

@ -764,6 +764,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
load_apps => LoadApps, load_apps => LoadApps,
apps => Apps, apps => Apps,
env => Env, env => Env,
join_to => JoinTo,
start_apps => StartApps start_apps => StartApps
} }
] ]

View File

@ -50,7 +50,6 @@ t_fill_default_values(_) ->
}, },
<<"route_batch_clean">> := false, <<"route_batch_clean">> := false,
<<"session_locking_strategy">> := quorum, <<"session_locking_strategy">> := quorum,
<<"shared_dispatch_ack_enabled">> := false,
<<"shared_subscription_strategy">> := round_robin <<"shared_subscription_strategy">> := round_robin
} }
}, },

View File

@ -38,8 +38,6 @@ init_per_suite(Config) ->
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
%% Meck Limiter
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
%% Meck Pd %% Meck Pd
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]), ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
%% Meck Metrics %% Meck Metrics
@ -67,7 +65,6 @@ end_per_suite(_Config) ->
ok = meck:unload(emqx_transport), ok = meck:unload(emqx_transport),
catch meck:unload(emqx_channel), catch meck:unload(emqx_channel),
ok = meck:unload(emqx_cm), ok = meck:unload(emqx_cm),
ok = meck:unload(emqx_htb_limiter),
ok = meck:unload(emqx_pd), ok = meck:unload(emqx_pd),
ok = meck:unload(emqx_metrics), ok = meck:unload(emqx_metrics),
ok = meck:unload(emqx_hooks), ok = meck:unload(emqx_hooks),
@ -421,20 +418,28 @@ t_ensure_rate_limit(_) ->
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
?assertEqual(Limiter, emqx_connection:info(limiter, State1)), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_htb_limiter,
make_infinity_limiter,
fun() -> non_infinity end
),
ok = meck:expect( ok = meck:expect(
emqx_htb_limiter, emqx_htb_limiter,
check, check,
fun(_, Client) -> {pause, 3000, undefined, Client} end fun(_, Client) -> {pause, 3000, undefined, Client} end
), ),
{ok, State2} = emqx_connection:check_limiter( {ok, State2} = emqx_connection:check_limiter(
[{1000, bytes_in}], [{1000, bytes}],
[], [],
WhenOk, WhenOk,
[], [],
st(#{limiter => Limiter}) st(#{limiter => init_limiter()})
), ),
meck:unload(emqx_htb_limiter), meck:unload(emqx_htb_limiter),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
t_activate_socket(_) -> t_activate_socket(_) ->
@ -495,6 +500,7 @@ t_get_conn_info(_) ->
end). end).
t_oom_shutdown(init, Config) -> t_oom_shutdown(init, Config) ->
ok = snabbkaffe:stop(),
ok = snabbkaffe:start_trace(), ok = snabbkaffe:start_trace(),
ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]),
meck:expect( meck:expect(
@ -703,31 +709,32 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
-define(LIMITER_ID, 'tcp:default'). -define(LIMITER_ID, 'tcp:default').
init_limiter() -> init_limiter() ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()). emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()).
limiter_cfg() -> limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
Client = #{ Client = client_cfg(),
rate => Infinity, #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
bucket_cfg() ->
#{rate => infinity, initial => 0, burst => 0}.
client_cfg() ->
#{
rate => infinity,
initial => 0, initial => 0,
capacity => Infinity, burst => 0,
low_watermark => 1, low_watermark => 1,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
failure_strategy => force failure_strategy => force
}, }.
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() -> add_bucket() ->
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
del_bucket() -> del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). emqx_limiter_server:del_bucket(?LIMITER_ID, messages).

View File

@ -72,7 +72,7 @@ t_consume(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 100, rate := 100,
capacity := 100, burst := 0,
initial := 100, initial := 100,
max_retry_time := 1000, max_retry_time := 1000,
failure_strategy := force failure_strategy := force
@ -89,7 +89,7 @@ t_retry(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 50, rate := 50,
capacity := 200, burst := 150,
initial := 0, initial := 0,
max_retry_time := 1000, max_retry_time := 1000,
failure_strategy := force failure_strategy := force
@ -109,7 +109,7 @@ t_restore(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 1, rate := 1,
capacity := 200, burst := 199,
initial := 50, initial := 50,
max_retry_time := 100, max_retry_time := 100,
failure_strategy := force failure_strategy := force
@ -129,7 +129,7 @@ t_max_retry_time(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 1, rate := 1,
capacity := 1, burst := 0,
max_retry_time := 500, max_retry_time := 500,
failure_strategy := drop failure_strategy := drop
} }
@ -139,8 +139,12 @@ t_max_retry_time(_) ->
Begin = ?NOW, Begin = ?NOW,
Result = emqx_htb_limiter:consume(101, Client), Result = emqx_htb_limiter:consume(101, Client),
?assertMatch({drop, _}, Result), ?assertMatch({drop, _}, Result),
Time = ?NOW - Begin, End = ?NOW,
?assert(Time >= 500 andalso Time < 550) Time = End - Begin,
?assert(
Time >= 500 andalso Time < 550,
lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time]))
)
end, end,
with_per_client(Cfg, Case). with_per_client(Cfg, Case).
@ -150,7 +154,7 @@ t_divisible(_) ->
divisible := true, divisible := true,
rate := ?RATE("1000/1s"), rate := ?RATE("1000/1s"),
initial := 600, initial := 600,
capacity := 600 burst := 0
} }
end, end,
Case = fun(BucketCfg) -> Case = fun(BucketCfg) ->
@ -176,7 +180,7 @@ t_low_watermark(_) ->
low_watermark := 400, low_watermark := 400,
rate := ?RATE("1000/1s"), rate := ?RATE("1000/1s"),
initial := 1000, initial := 1000,
capacity := 1000 burst := 0
} }
end, end,
Case = fun(BucketCfg) -> Case = fun(BucketCfg) ->
@ -201,8 +205,7 @@ t_infinity_client(_) ->
Fun = fun(Cfg) -> Cfg end, Fun = fun(Cfg) -> Cfg end,
Case = fun(Cfg) -> Case = fun(Cfg) ->
Client = connect(Cfg), Client = connect(Cfg),
InfVal = emqx_limiter_schema:infinity_value(), ?assertMatch(infinity, Client),
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
Result = emqx_htb_limiter:check(100000, Client), Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result) ?assertEqual({ok, Client}, Result)
end, end,
@ -212,12 +215,12 @@ t_try_restore_agg(_) ->
Fun = fun(#{client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := 1, rate := 1,
capacity := 200, burst := 199,
initial := 50 initial := 50
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
divisible := true, divisible := true,
max_retry_time := 100, max_retry_time := 100,
failure_strategy := force failure_strategy := force
@ -239,11 +242,11 @@ t_short_board(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/1s"), rate := ?RATE("100/1s"),
initial := 0, initial := 0,
capacity := 100 burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := ?RATE("600/1s"), rate := ?RATE("600/1s"),
capacity := 600, burst := 0,
initial := 600 initial := 600
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -261,46 +264,45 @@ t_rate(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
capacity := infinity burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
initial := 0 initial := 0
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun(Cfg) -> Case = fun(Cfg) ->
Time = 1000,
Client = connect(Cfg), Client = connect(Cfg),
Ts1 = erlang:system_time(millisecond),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
timer:sleep(1000), timer:sleep(1100),
Ts2 = erlang:system_time(millisecond),
C2 = emqx_htb_limiter:available(Client), C2 = emqx_htb_limiter:available(Client),
ShouldInc = floor((Ts2 - Ts1) / 100) * 100, ShouldInc = floor(Time / 100) * 100,
Inc = C2 - C1, Inc = C2 - C1,
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
end, end,
with_bucket(Fun, Case). with_bucket(Fun, Case).
t_capacity(_) -> t_capacity(_) ->
Capacity = 600, Capacity = 1200,
Fun = fun(#{client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
capacity := 600 burst := 200
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
initial := 0 initial := 0
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun(Cfg) -> Case = fun(Cfg) ->
Client = connect(Cfg), Client = connect(Cfg),
timer:sleep(1000), timer:sleep(1500),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
?assertEqual(Capacity, C1, "test bucket capacity") ?assertEqual(Capacity, C1, "test bucket capacity")
end, end,
@ -318,11 +320,11 @@ t_collaborative_alloc(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("400/1s"), rate := ?RATE("400/1s"),
initial := 0, initial := 0,
capacity := 600 burst := 200
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := ?RATE("50"), rate := ?RATE("50"),
capacity := 100, burst := 50,
initial := 100 initial := 100
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -363,11 +365,11 @@ t_burst(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("200/1s"), rate := ?RATE("200/1s"),
initial := 0, initial := 0,
capacity := 200 burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := ?RATE("50/1s"), rate := ?RATE("50/1s"),
capacity := 200, burst := 150,
divisible := true divisible := true
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -401,11 +403,11 @@ t_limit_global_with_unlimit_other(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := infinity, rate := infinity,
initial := 0, initial := 0,
capacity := infinity burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
initial := 0 initial := 0
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -414,7 +416,7 @@ t_limit_global_with_unlimit_other(_) ->
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
start_client({b1, Bucket}, ?NOW + 2000, C1, 20), start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2100), timer:sleep(2200),
check_average_rate(C1, 2, 600) check_average_rate(C1, 2, 600)
end, end,
@ -432,7 +434,7 @@ t_check_container(_) ->
Cfg#{ Cfg#{
rate := ?RATE("1000/1s"), rate := ?RATE("1000/1s"),
initial := 1000, initial := 1000,
capacity := 1000 burst := 0
} }
end, end,
Case = fun(#{client := Client} = BucketCfg) -> Case = fun(#{client := Client} = BucketCfg) ->
@ -565,13 +567,73 @@ t_schema_unit(_) ->
?assertMatch({error, _}, M:to_rate("100MB/1")), ?assertMatch({error, _}, M:to_rate("100MB/1")),
?assertMatch({error, _}, M:to_rate("100/10x")), ?assertMatch({error, _}, M:to_rate("100/10x")),
?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")), ?assertEqual({ok, infinity}, M:to_capacity("infinity")),
?assertEqual({ok, 100}, M:to_capacity("100")), ?assertEqual({ok, 100}, M:to_capacity("100")),
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")), ?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")), ?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),
?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")), ?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")),
ok. ok.
compatibility_for_capacity(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.messages.capacity = infinity\n"
" limiter.client.messages.capacity = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
messages := #{burst := 0},
client := #{messages := #{burst := 0}}
},
parse_and_check(CfgStr)
).
compatibility_for_message_in(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.message_in.rate = infinity\n"
" limiter.client.message_in.rate = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
messages := #{rate := infinity},
client := #{messages := #{rate := infinity}}
},
parse_and_check(CfgStr)
).
compatibility_for_bytes_in(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.bytes_in.rate = infinity\n"
" limiter.client.bytes_in.rate = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
bytes := #{rate := infinity},
client := #{bytes := #{rate := infinity}}
},
parse_and_check(CfgStr)
).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%%% Internal functions %%% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -748,17 +810,16 @@ connect(Name, Cfg) ->
Limiter. Limiter.
make_limiter_cfg() -> make_limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{ Client = #{
rate => Infinity, rate => infinity,
initial => 0, initial => 0,
capacity => Infinity, burst => 0,
low_watermark => 0, low_watermark => 0,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
failure_strategy => force failure_strategy => force
}, },
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. #{client => Client, rate => infinity, initial => 0, burst => 0}.
add_bucket(Cfg) -> add_bucket(Cfg) ->
add_bucket(?MODULE, Cfg). add_bucket(?MODULE, Cfg).
@ -812,3 +873,7 @@ apply_modifier(Pairs, #{default := Template}) ->
Acc#{N => M(Template)} Acc#{N => M(Template)}
end, end,
lists:foldl(Fun, #{}, Pairs). lists:foldl(Fun, #{}, Pairs).
parse_and_check(ConfigString) ->
ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString),
emqx:get_config([listeners, tcp, default, limiter]).

View File

@ -219,112 +219,124 @@ parse_server_test_() ->
?T( ?T(
"single server, binary, no port", "single server, binary, no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse(<<"localhost">>) Parse(<<"localhost">>)
) )
), ),
?T( ?T(
"single server, string, no port", "single server, string, no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse("localhost") Parse("localhost")
) )
), ),
?T( ?T(
"single server, list(string), no port", "single server, list(string), no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse(["localhost"]) Parse(["localhost"])
) )
), ),
?T( ?T(
"single server, list(binary), no port", "single server, list(binary), no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse([<<"localhost">>]) Parse([<<"localhost">>])
) )
), ),
?T( ?T(
"single server, binary, with port", "single server, binary, with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse(<<"localhost:9999">>) Parse(<<"localhost:9999">>)
) )
), ),
?T( ?T(
"single server, list(string), with port", "single server, list(string), with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse(["localhost:9999"]) Parse(["localhost:9999"])
) )
), ),
?T( ?T(
"single server, string, with port", "single server, string, with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse("localhost:9999") Parse("localhost:9999")
) )
), ),
?T( ?T(
"single server, list(binary), with port", "single server, list(binary), with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse([<<"localhost:9999">>]) Parse([<<"localhost:9999">>])
) )
), ),
?T( ?T(
"multiple servers, string, no port", "multiple servers, string, no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse("host1, host2") Parse("host1, host2")
) )
), ),
?T( ?T(
"multiple servers, binary, no port", "multiple servers, binary, no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(<<"host1, host2,,,">>) Parse(<<"host1, host2,,,">>)
) )
), ),
?T( ?T(
"multiple servers, list(string), no port", "multiple servers, list(string), no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(["host1", "host2"]) Parse(["host1", "host2"])
) )
), ),
?T( ?T(
"multiple servers, list(binary), no port", "multiple servers, list(binary), no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse([<<"host1">>, <<"host2">>]) Parse([<<"host1">>, <<"host2">>])
) )
), ),
?T( ?T(
"multiple servers, string, with port", "multiple servers, string, with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse("host1:1234, host2:2345") Parse("host1:1234, host2:2345")
) )
), ),
?T( ?T(
"multiple servers, binary, with port", "multiple servers, binary, with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse(<<"host1:1234, host2:2345, ">>) Parse(<<"host1:1234, host2:2345, ">>)
) )
), ),
?T( ?T(
"multiple servers, list(string), with port", "multiple servers, list(string), with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([" host1:1234 ", "host2:2345"]) Parse([" host1:1234 ", "host2:2345"])
) )
), ),
?T( ?T(
"multiple servers, list(binary), with port", "multiple servers, list(binary), with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([<<"host1:1234">>, <<"host2:2345">>]) Parse([<<"host1:1234">>, <<"host2:2345">>])
) )
), ),
@ -350,9 +362,9 @@ parse_server_test_() ->
) )
), ),
?T( ?T(
"multiple servers wihtout port, mixed list(binary|string)", "multiple servers without port, mixed list(binary|string)",
?assertEqual( ?assertEqual(
["host1", "host2"], [#{hostname => "host1"}, #{hostname => "host2"}],
Parse2([<<"host1">>, "host2"], #{no_port => true}) Parse2([<<"host1">>, "host2"], #{no_port => true})
) )
), ),
@ -394,14 +406,18 @@ parse_server_test_() ->
?T( ?T(
"single server map", "single server map",
?assertEqual( ?assertEqual(
[{"host1.domain", 1234}], [#{hostname => "host1.domain", port => 1234}],
HoconParse("host1.domain:1234") HoconParse("host1.domain:1234")
) )
), ),
?T( ?T(
"multiple servers map", "multiple servers map",
?assertEqual( ?assertEqual(
[{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}], [
#{hostname => "host1.domain", port => 1234},
#{hostname => "host2.domain", port => 2345},
#{hostname => "host3.domain", port => 3456}
],
HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456") HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456")
) )
), ),
@ -447,6 +463,171 @@ parse_server_test_() ->
"bad_schema", "bad_schema",
emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true}) emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true})
) )
),
?T(
"scheme, hostname and port",
?assertEqual(
#{scheme => "pulsar+ssl", hostname => "host", port => 6651},
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"pulsar://host",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, no port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, missing port",
?assertThrow(
"missing_port_number",
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => false,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, no default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"host",
#{
default_scheme => "pulsar",
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"host",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"just hostname, expecting missing scheme",
?assertThrow(
"missing_scheme",
emqx_schema:parse_server(
"host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"inconsistent scheme opts",
?assertError(
"bad_schema",
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
default_scheme => "something",
supported_schemes => ["not", "supported"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"unsupported scheme",
?assertThrow(
"unsupported_scheme",
emqx_schema:parse_server(
"pulsar+quic://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar"]
}
)
)
),
?T(
"multiple hostnames with schemes (1)",
?assertEqual(
[
#{scheme => "pulsar", hostname => "host", port => 6649},
#{scheme => "pulsar+ssl", hostname => "other.host", port => 6651},
#{scheme => "pulsar", hostname => "yet.another", port => 6650}
],
emqx_schema:parse_servers(
"pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
) )
]. ].

View File

@ -60,12 +60,12 @@ init(Parent) ->
{ok, #{callbacks => [], owner => Parent}}. {ok, #{callbacks => [], owner => Parent}}.
terminate(_Reason, #{callbacks := Callbacks}) -> terminate(_Reason, #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks). do_terminate(Callbacks).
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) -> handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
{reply, ok, State#{callbacks := [Callback | Callbacks]}}; {reply, ok, State#{callbacks := [Callback | Callbacks]}};
handle_call(terminate, _From, State = #{callbacks := Callbacks}) -> handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks), do_terminate(Callbacks),
{stop, normal, ok, State}; {stop, normal, ok, State};
handle_call(_Req, _From, State) -> handle_call(_Req, _From, State) ->
{reply, error, State}. {reply, error, State}.
@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) ->
{stop, normal, State}; {stop, normal, State};
handle_info(_Msg, State) -> handle_info(_Msg, State) ->
{noreply, State}. {noreply, State}.
%%----------------------------------------------------------------------------------
%% Internal fns
%%----------------------------------------------------------------------------------
do_terminate(Callbacks) ->
lists:foreach(
fun(Fun) ->
try
Fun()
catch
K:E:S ->
ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]),
ct:pal("stacktrace: ~p", [S]),
ok
end
end,
Callbacks
),
ok.

View File

@ -443,7 +443,12 @@ t_websocket_info_deliver(_) ->
t_websocket_info_timeout_limiter(_) -> t_websocket_info_timeout_limiter(_) ->
Ref = make_ref(), Ref = make_ref(),
LimiterT = init_limiter(), {ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
LimiterT = init_limiter(#{
bytes => bucket_cfg(),
messages => bucket_cfg(),
client => #{bytes => client_cfg(Rate)}
}),
Next = fun emqx_ws_connection:when_msg_in/3, Next = fun emqx_ws_connection:when_msg_in/3,
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT), Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
Event = {timeout, Ref, limit_timeout}, Event = {timeout, Ref, limit_timeout},
@ -509,16 +514,16 @@ t_handle_timeout_emit_stats(_) ->
t_ensure_rate_limit(_) -> t_ensure_rate_limit(_) ->
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"), {ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
Limiter = init_limiter(#{ Limiter = init_limiter(#{
bytes_in => bucket_cfg(), bytes => bucket_cfg(),
message_in => bucket_cfg(), messages => bucket_cfg(),
client => #{bytes_in => client_cfg(Rate)} client => #{bytes => client_cfg(Rate)}
}), }),
St = st(#{limiter => Limiter}), St = st(#{limiter => Limiter}),
%% must bigger than value in emqx_ratelimit_SUITE %% must bigger than value in emqx_ratelimit_SUITE
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"), {ok, Need} = emqx_limiter_schema:to_capacity("1GB"),
St1 = ?ws_conn:check_limiter( St1 = ?ws_conn:check_limiter(
[{Need, bytes_in}], [{Need, bytes}],
[], [],
fun(_, _, S) -> S end, fun(_, _, S) -> S end,
[], [],
@ -699,23 +704,21 @@ init_limiter() ->
init_limiter(limiter_cfg()). init_limiter(limiter_cfg()).
init_limiter(LimiterCfg) -> init_limiter(LimiterCfg) ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg). emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg).
limiter_cfg() -> limiter_cfg() ->
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
Client = client_cfg(), Client = client_cfg(),
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
client_cfg() -> client_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(), client_cfg(infinity).
client_cfg(Infinity).
client_cfg(Rate) -> client_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
#{ #{
rate => Rate, rate => Rate,
initial => 0, initial => 0,
capacity => Infinity, burst => 0,
low_watermark => 1, low_watermark => 1,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
@ -723,14 +726,13 @@ client_cfg(Rate) ->
}. }.
bucket_cfg() -> bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(), #{rate => infinity, initial => 0, burst => 0}.
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() -> add_bucket() ->
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
del_bucket() -> del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). emqx_limiter_server:del_bucket(?LIMITER_ID, messages).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authn, [ {application, emqx_authn, [
{description, "EMQX Authentication"}, {description, "EMQX Authentication"},
{vsn, "0.1.17"}, {vsn, "0.1.18"},
{modules, []}, {modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]}, {registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -28,6 +28,7 @@
parse_sql/2, parse_sql/2,
render_deep/2, render_deep/2,
render_str/2, render_str/2,
render_urlencoded_str/2,
render_sql_params/2, render_sql_params/2,
is_superuser/1, is_superuser/1,
bin/1, bin/1,
@ -129,6 +130,13 @@ render_str(Template, Credential) ->
#{return => full_binary, var_trans => fun handle_var/2} #{return => full_binary, var_trans => fun handle_var/2}
). ).
render_urlencoded_str(Template, Credential) ->
emqx_placeholder:proc_tmpl(
Template,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
render_sql_params(ParamList, Credential) -> render_sql_params(ParamList, Credential) ->
emqx_placeholder:proc_tmpl( emqx_placeholder:proc_tmpl(
ParamList, ParamList,
@ -217,6 +225,11 @@ without_password(Credential, [Name | Rest]) ->
without_password(Credential, Rest) without_password(Credential, Rest)
end. end.
urlencode_var({var, _} = Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value));
urlencode_var(Var, Value) ->
handle_var(Var, Value).
handle_var({var, _Name}, undefined) -> handle_var({var, _Name}, undefined) ->
<<>>; <<>>;
handle_var({var, <<"peerhost">>}, PeerHost) -> handle_var({var, <<"peerhost">>}, PeerHost) ->

View File

@ -105,14 +105,16 @@ mnesia(boot) ->
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-scram-builtin_db". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}].
fields(?CONF_NS) -> fields(scram) ->
[ [
{mechanism, emqx_authn_schema:mechanism(scram)}, {mechanism, emqx_authn_schema:mechanism(scram)},
{backend, emqx_authn_schema:backend(built_in_database)}, {backend, emqx_authn_schema:backend(built_in_database)},
@ -120,7 +122,7 @@ fields(?CONF_NS) ->
{iteration_count, fun iteration_count/1} {iteration_count, fun iteration_count/1}
] ++ emqx_authn_schema:common_fields(). ] ++ emqx_authn_schema:common_fields().
desc(?CONF_NS) -> desc(scram) ->
"Settings for Salted Challenge Response Authentication Mechanism\n" "Settings for Salted Challenge Response Authentication Mechanism\n"
"(SCRAM) authentication."; "(SCRAM) authentication.";
desc(_) -> desc(_) ->
@ -141,7 +143,7 @@ iteration_count(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, scram)].
create( create(
AuthenticatorID, AuthenticatorID,

View File

@ -53,34 +53,35 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-http". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields(get) -> fields(http_get) ->
[ [
{method, #{type => get, required => true, desc => ?DESC(method)}}, {method, #{type => get, required => true, desc => ?DESC(method)}},
{headers, fun headers_no_content_type/1} {headers, fun headers_no_content_type/1}
] ++ common_fields(); ] ++ common_fields();
fields(post) -> fields(http_post) ->
[ [
{method, #{type => post, required => true, desc => ?DESC(method)}}, {method, #{type => post, required => true, desc => ?DESC(method)}},
{headers, fun headers/1} {headers, fun headers/1}
] ++ common_fields(). ] ++ common_fields().
desc(get) -> desc(http_get) ->
?DESC(get); ?DESC(get);
desc(post) -> desc(http_post) ->
?DESC(post); ?DESC(post);
desc(_) -> desc(_) ->
undefined. undefined.
@ -158,8 +159,8 @@ request_timeout(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, get), hoconsc:ref(?MODULE, http_get),
hoconsc:ref(?MODULE, post) hoconsc:ref(?MODULE, http_post)
]. ].
union_member_selector(all_union_members) -> union_member_selector(all_union_members) ->
@ -168,9 +169,9 @@ union_member_selector({value, Value}) ->
refs(Value). refs(Value).
refs(#{<<"method">> := <<"get">>}) -> refs(#{<<"method">> := <<"get">>}) ->
[hoconsc:ref(?MODULE, get)]; [hoconsc:ref(?MODULE, http_get)];
refs(#{<<"method">> := <<"post">>}) -> refs(#{<<"method">> := <<"post">>}) ->
[hoconsc:ref(?MODULE, post)]; [hoconsc:ref(?MODULE, http_post)];
refs(_) -> refs(_) ->
throw(#{ throw(#{
field_name => method, field_name => method,
@ -313,9 +314,9 @@ parse_url(Url) ->
BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), BaseUrl = iolist_to_binary([Scheme, "//", HostPort]),
case string:split(Remaining, "?", leading) of case string:split(Remaining, "?", leading) of
[Path, QueryString] -> [Path, QueryString] ->
{BaseUrl, Path, QueryString}; {BaseUrl, <<"/", Path/binary>>, QueryString};
[Path] -> [Path] ->
{BaseUrl, Path, <<>>} {BaseUrl, <<"/", Path/binary>>, <<>>}
end; end;
[HostPort] -> [HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>}
@ -356,7 +357,7 @@ generate_request(Credential, #{
body_template := BodyTemplate body_template := BodyTemplate
}) -> }) ->
Headers = maps:to_list(Headers0), Headers = maps:to_list(Headers0),
Path = emqx_authn_utils:render_str(BasePathTemplate, Credential), Path = emqx_authn_utils:render_urlencoded_str(BasePathTemplate, Credential),
Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential), Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential),
Body = emqx_authn_utils:render_deep(BodyTemplate, Credential), Body = emqx_authn_utils:render_deep(BodyTemplate, Credential),
case Method of case Method of
@ -371,9 +372,9 @@ generate_request(Credential, #{
end. end.
append_query(Path, []) -> append_query(Path, []) ->
encode_path(Path); Path;
append_query(Path, Query) -> append_query(Path, Query) ->
encode_path(Path) ++ "?" ++ binary_to_list(qs(Query)). Path ++ "?" ++ binary_to_list(qs(Query)).
qs(KVs) -> qs(KVs) ->
qs(KVs, []). qs(KVs, []).
@ -435,10 +436,6 @@ parse_body(ContentType, _) ->
uri_encode(T) -> uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)). emqx_http_lib:uri_encode(to_list(T)).
encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
request_for_log(Credential, #{url := Url} = State) -> request_for_log(Credential, #{url := Url} = State) ->
SafeCredential = emqx_authn_utils:without_password(Credential), SafeCredential = emqx_authn_utils:without_password(Credential),
case generate_request(SafeCredential, State) of case generate_request(SafeCredential, State) of

View File

@ -35,18 +35,17 @@
callback_mode() -> always_sync. callback_mode() -> always_sync.
on_start(InstId, Opts) -> on_start(InstId, Opts) ->
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
PoolOpts = [ PoolOpts = [
{pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)}, {pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)},
{connector_opts, Opts} {connector_opts, Opts}
], ],
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of
ok -> {ok, #{pool_name => PoolName}}; ok -> {ok, #{pool_name => InstId}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end. end.
on_stop(_InstId, #{pool_name := PoolName}) -> on_stop(_InstId, #{pool_name := PoolName}) ->
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_resource_pool:stop(PoolName).
on_query(InstId, get_jwks, #{pool_name := PoolName}) -> on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
@ -72,18 +71,17 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) ->
ok. ok.
on_get_status(_InstId, #{pool_name := PoolName}) -> on_get_status(_InstId, #{pool_name := PoolName}) ->
Func = case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of
fun(Conn) ->
case emqx_authn_jwks_client:get_jwks(Conn) of
{ok, _} -> true;
_ -> false
end
end,
case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of
true -> connected; true -> connected;
false -> disconnected false -> disconnected
end. end.
health_check(Conn) ->
case emqx_authn_jwks_client:get_jwks(Conn) of
{ok, _} -> true;
_ -> false
end.
connect(Opts) -> connect(Opts) ->
ConnectorOpts = proplists:get_value(connector_opts, Opts), ConnectorOpts = proplists:get_value(connector_opts, Opts),
emqx_authn_jwks_client:start_link(ConnectorOpts). emqx_authn_jwks_client:start_link(ConnectorOpts).

View File

@ -43,36 +43,57 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-jwt". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields('hmac-based') -> fields(jwt_hmac) ->
[ [
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, %% for hmac, it's the 'algorithm' field which selects this type
%% use_jwks field can be ignored (kept for backward compatibility)
{use_jwks,
sc(
hoconsc:enum([false]),
#{
required => false,
desc => ?DESC(use_jwks),
importance => ?IMPORTANCE_HIDDEN
}
)},
{algorithm, {algorithm,
sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})}, sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})},
{secret, fun secret/1}, {secret, fun secret/1},
{secret_base64_encoded, fun secret_base64_encoded/1} {secret_base64_encoded, fun secret_base64_encoded/1}
] ++ common_fields(); ] ++ common_fields();
fields('public-key') -> fields(jwt_public_key) ->
[ [
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, %% for public-key, it's the 'algorithm' field which selects this type
%% use_jwks field can be ignored (kept for backward compatibility)
{use_jwks,
sc(
hoconsc:enum([false]),
#{
required => false,
desc => ?DESC(use_jwks),
importance => ?IMPORTANCE_HIDDEN
}
)},
{algorithm, {algorithm,
sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})}, sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})},
{public_key, fun public_key/1} {public_key, fun public_key/1}
] ++ common_fields(); ] ++ common_fields();
fields('jwks') -> fields(jwt_jwks) ->
[ [
{use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})}, {use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})},
{endpoint, fun endpoint/1}, {endpoint, fun endpoint/1},
@ -85,12 +106,12 @@ fields('jwks') ->
}} }}
] ++ common_fields(). ] ++ common_fields().
desc('hmac-based') -> desc(jwt_hmac) ->
?DESC('hmac-based'); ?DESC(jwt_hmac);
desc('public-key') -> desc(jwt_public_key) ->
?DESC('public-key'); ?DESC(jwt_public_key);
desc('jwks') -> desc(jwt_jwks) ->
?DESC('jwks'); ?DESC(jwt_jwks);
desc(undefined) -> desc(undefined) ->
undefined. undefined.
@ -160,9 +181,9 @@ from(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, 'hmac-based'), hoconsc:ref(?MODULE, jwt_hmac),
hoconsc:ref(?MODULE, 'public-key'), hoconsc:ref(?MODULE, jwt_public_key),
hoconsc:ref(?MODULE, 'jwks') hoconsc:ref(?MODULE, jwt_jwks)
]. ].
union_member_selector(all_union_members) -> union_member_selector(all_union_members) ->
@ -179,11 +200,11 @@ boolean(<<"false">>) -> false;
boolean(Other) -> Other. boolean(Other) -> Other.
select_ref(true, _) -> select_ref(true, _) ->
[hoconsc:ref(?MODULE, 'jwks')]; [hoconsc:ref(?MODULE, 'jwt_jwks')];
select_ref(false, #{<<"public_key">> := _}) -> select_ref(false, #{<<"public_key">> := _}) ->
[hoconsc:ref(?MODULE, 'public-key')]; [hoconsc:ref(?MODULE, jwt_public_key)];
select_ref(false, _) -> select_ref(false, _) ->
[hoconsc:ref(?MODULE, 'hmac-based')]; [hoconsc:ref(?MODULE, jwt_hmac)];
select_ref(_, _) -> select_ref(_, _) ->
throw(#{ throw(#{
field_name => use_jwks, field_name => use_jwks,

View File

@ -107,14 +107,16 @@ mnesia(boot) ->
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-builtin_db". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}].
fields(?CONF_NS) -> fields(builtin_db) ->
[ [
{mechanism, emqx_authn_schema:mechanism(password_based)}, {mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(built_in_database)}, {backend, emqx_authn_schema:backend(built_in_database)},
@ -122,8 +124,8 @@ fields(?CONF_NS) ->
{password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1} {password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
] ++ emqx_authn_schema:common_fields(). ] ++ emqx_authn_schema:common_fields().
desc(?CONF_NS) -> desc(builtin_db) ->
?DESC(?CONF_NS); ?DESC(builtin_db);
desc(_) -> desc(_) ->
undefined. undefined.
@ -138,7 +140,7 @@ user_id_type(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, builtin_db)].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
create(Config). create(Config).

View File

@ -44,32 +44,33 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-mongodb". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields(standalone) -> fields(mongo_single) ->
common_fields() ++ emqx_connector_mongo:fields(single); common_fields() ++ emqx_connector_mongo:fields(single);
fields('replica-set') -> fields(mongo_rs) ->
common_fields() ++ emqx_connector_mongo:fields(rs); common_fields() ++ emqx_connector_mongo:fields(rs);
fields('sharded-cluster') -> fields(mongo_sharded) ->
common_fields() ++ emqx_connector_mongo:fields(sharded). common_fields() ++ emqx_connector_mongo:fields(sharded).
desc(standalone) -> desc(mongo_single) ->
?DESC(standalone); ?DESC(single);
desc('replica-set') -> desc(mongo_rs) ->
?DESC('replica-set'); ?DESC('replica-set');
desc('sharded-cluster') -> desc(mongo_sharded) ->
?DESC('sharded-cluster'); ?DESC('sharded-cluster');
desc(_) -> desc(_) ->
undefined. undefined.
@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, standalone), hoconsc:ref(?MODULE, mongo_single),
hoconsc:ref(?MODULE, 'replica-set'), hoconsc:ref(?MODULE, mongo_rs),
hoconsc:ref(?MODULE, 'sharded-cluster') hoconsc:ref(?MODULE, mongo_sharded)
]. ].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
@ -254,11 +255,11 @@ union_member_selector({value, Value}) ->
refs(Value). refs(Value).
refs(#{<<"mongo_type">> := <<"single">>}) -> refs(#{<<"mongo_type">> := <<"single">>}) ->
[hoconsc:ref(?MODULE, standalone)]; [hoconsc:ref(?MODULE, mongo_single)];
refs(#{<<"mongo_type">> := <<"rs">>}) -> refs(#{<<"mongo_type">> := <<"rs">>}) ->
[hoconsc:ref(?MODULE, 'replica-set')]; [hoconsc:ref(?MODULE, mongo_rs)];
refs(#{<<"mongo_type">> := <<"sharded">>}) -> refs(#{<<"mongo_type">> := <<"sharded">>}) ->
[hoconsc:ref(?MODULE, 'sharded-cluster')]; [hoconsc:ref(?MODULE, mongo_sharded)];
refs(_) -> refs(_) ->
throw(#{ throw(#{
field_name => mongo_type, field_name => mongo_type,

View File

@ -45,14 +45,16 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-mysql". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}].
fields(?CONF_NS) -> fields(mysql) ->
[ [
{mechanism, emqx_authn_schema:mechanism(password_based)}, {mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(mysql)}, {backend, emqx_authn_schema:backend(mysql)},
@ -62,8 +64,8 @@ fields(?CONF_NS) ->
] ++ emqx_authn_schema:common_fields() ++ ] ++ emqx_authn_schema:common_fields() ++
proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)). proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)).
desc(?CONF_NS) -> desc(mysql) ->
?DESC(?CONF_NS); ?DESC(mysql);
desc(_) -> desc(_) ->
undefined. undefined.
@ -82,7 +84,7 @@ query_timeout(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, mysql)].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
create(Config). create(Config).

View File

@ -49,14 +49,16 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-postgresql". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}].
fields(?CONF_NS) -> fields(postgresql) ->
[ [
{mechanism, emqx_authn_schema:mechanism(password_based)}, {mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(postgresql)}, {backend, emqx_authn_schema:backend(postgresql)},
@ -66,8 +68,8 @@ fields(?CONF_NS) ->
emqx_authn_schema:common_fields() ++ emqx_authn_schema:common_fields() ++
proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)). proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)).
desc(?CONF_NS) -> desc(postgresql) ->
?DESC(?CONF_NS); ?DESC(postgresql);
desc(_) -> desc(_) ->
undefined. undefined.
@ -81,7 +83,7 @@ query(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, postgresql)].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
create(Config). create(Config).

View File

@ -44,32 +44,33 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-redis". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields(standalone) -> fields(redis_single) ->
common_fields() ++ emqx_connector_redis:fields(single); common_fields() ++ emqx_connector_redis:fields(single);
fields(cluster) -> fields(redis_cluster) ->
common_fields() ++ emqx_connector_redis:fields(cluster); common_fields() ++ emqx_connector_redis:fields(cluster);
fields(sentinel) -> fields(redis_sentinel) ->
common_fields() ++ emqx_connector_redis:fields(sentinel). common_fields() ++ emqx_connector_redis:fields(sentinel).
desc(standalone) -> desc(redis_single) ->
?DESC(standalone); ?DESC(single);
desc(cluster) -> desc(redis_cluster) ->
?DESC(cluster); ?DESC(cluster);
desc(sentinel) -> desc(redis_sentinel) ->
?DESC(sentinel); ?DESC(sentinel);
desc(_) -> desc(_) ->
"". "".
@ -93,9 +94,9 @@ cmd(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, standalone), hoconsc:ref(?MODULE, redis_single),
hoconsc:ref(?MODULE, cluster), hoconsc:ref(?MODULE, redis_cluster),
hoconsc:ref(?MODULE, sentinel) hoconsc:ref(?MODULE, redis_sentinel)
]. ].
union_member_selector(all_union_members) -> union_member_selector(all_union_members) ->
@ -104,11 +105,11 @@ union_member_selector({value, Value}) ->
refs(Value). refs(Value).
refs(#{<<"redis_type">> := <<"single">>}) -> refs(#{<<"redis_type">> := <<"single">>}) ->
[hoconsc:ref(?MODULE, standalone)]; [hoconsc:ref(?MODULE, redis_single)];
refs(#{<<"redis_type">> := <<"cluster">>}) -> refs(#{<<"redis_type">> := <<"cluster">>}) ->
[hoconsc:ref(?MODULE, cluster)]; [hoconsc:ref(?MODULE, redis_cluster)];
refs(#{<<"redis_type">> := <<"sentinel">>}) -> refs(#{<<"redis_type">> := <<"sentinel">>}) ->
[hoconsc:ref(?MODULE, sentinel)]; [hoconsc:ref(?MODULE, redis_sentinel)];
refs(_) -> refs(_) ->
throw(#{ throw(#{
field_name => redis_type, field_name => redis_type,

View File

@ -47,7 +47,6 @@
}) })
). ).
-define(SERVER_RESPONSE_URLENCODE(Result), ?SERVER_RESPONSE_URLENCODE(Result, false)).
-define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser), -define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser),
list_to_binary( list_to_binary(
"result=" ++ "result=" ++
@ -166,6 +165,54 @@ test_user_auth(#{
?GLOBAL ?GLOBAL
). ).
t_authenticate_path_placeholders(_Config) ->
ok = emqx_authn_http_test_server:stop(),
{ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>),
ok = emqx_authn_http_test_server:set_handler(
fun(Req0, State) ->
Req =
case cowboy_req:path(Req0) of
<<"/my/p%20ath//us%20er/auth//">> ->
cowboy_req:reply(
200,
#{<<"content-type">> => <<"application/json">>},
emqx_utils_json:encode(#{result => allow, is_superuser => false}),
Req0
);
Path ->
ct:pal("Unexpected path: ~p", [Path]),
cowboy_req:reply(403, Req0)
end,
{ok, Req, State}
end
),
Credentials = ?CREDENTIALS#{
username => <<"us er">>
},
AuthConfig = maps:merge(
raw_http_auth_config(),
#{
<<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>,
<<"body">> => #{}
}
),
{ok, _} = emqx:update_config(
?PATH,
{create_authenticator, ?GLOBAL, AuthConfig}
),
?assertMatch(
{ok, #{is_superuser := false}},
emqx_access_control:authenticate(Credentials)
),
_ = emqx_authn_test_lib:delete_authenticators(
[authentication],
?GLOBAL
).
t_no_value_for_placeholder(_Config) -> t_no_value_for_placeholder(_Config) ->
Handler = fun(Req0, State) -> Handler = fun(Req0, State) ->
{ok, RawBody, Req1} = cowboy_req:read_body(Req0), {ok, RawBody, Req1} = cowboy_req:read_body(Req0),

View File

@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) ->
?assertMatch( ?assertMatch(
{error, #{ {error, #{
kind := validation_error, kind := validation_error,
matched_type := "authn-postgresql:authentication", matched_type := "authn:postgresql",
path := "authentication.1.server", path := "authentication.1.server",
reason := required_field reason := required_field
}}, }},

View File

@ -162,7 +162,7 @@ t_create_invalid_config(_Config) ->
?assertMatch( ?assertMatch(
{error, #{ {error, #{
kind := validation_error, kind := validation_error,
matched_type := "authn-redis:standalone", matched_type := "authn:redis_single",
path := "authentication.1.server", path := "authentication.1.server",
reason := required_field reason := required_field
}}, }},

View File

@ -53,7 +53,7 @@ t_check_schema(_Config) ->
?assertThrow( ?assertThrow(
#{ #{
path := "authentication.1.password_hash_algorithm.name", path := "authentication.1.password_hash_algorithm.name",
matched_type := "authn-builtin_db:authentication/authn-hash:simple", matched_type := "authn:builtin_db/authn-hash:simple",
reason := unable_to_convert_to_enum_symbol reason := unable_to_convert_to_enum_symbol
}, },
Check(ConfigNotOk) Check(ConfigNotOk)
@ -72,7 +72,7 @@ t_check_schema(_Config) ->
#{ #{
path := "authentication.1.password_hash_algorithm", path := "authentication.1.password_hash_algorithm",
reason := "algorithm_name_missing", reason := "algorithm_name_missing",
matched_type := "authn-builtin_db:authentication" matched_type := "authn:builtin_db"
}, },
Check(ConfigMissingAlgoName) Check(ConfigMissingAlgoName)
). ).

View File

@ -32,19 +32,19 @@ union_member_selector_mongo_test_() ->
end}, end},
{"single", fun() -> {"single", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-mongodb:standalone"}), ?ERR(#{matched_type := "authn:mongo_single"}),
Check("{mongo_type: single}") Check("{mongo_type: single}")
) )
end}, end},
{"replica-set", fun() -> {"replica-set", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-mongodb:replica-set"}), ?ERR(#{matched_type := "authn:mongo_rs"}),
Check("{mongo_type: rs}") Check("{mongo_type: rs}")
) )
end}, end},
{"sharded", fun() -> {"sharded", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}), ?ERR(#{matched_type := "authn:mongo_sharded"}),
Check("{mongo_type: sharded}") Check("{mongo_type: sharded}")
) )
end} end}
@ -61,19 +61,19 @@ union_member_selector_jwt_test_() ->
end}, end},
{"jwks", fun() -> {"jwks", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-jwt:jwks"}), ?ERR(#{matched_type := "authn:jwt_jwks"}),
Check("{use_jwks = true}") Check("{use_jwks = true}")
) )
end}, end},
{"publick-key", fun() -> {"publick-key", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-jwt:public-key"}), ?ERR(#{matched_type := "authn:jwt_public_key"}),
Check("{use_jwks = false, public_key = 1}") Check("{use_jwks = false, public_key = 1}")
) )
end}, end},
{"hmac-based", fun() -> {"hmac-based", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-jwt:hmac-based"}), ?ERR(#{matched_type := "authn:jwt_hmac"}),
Check("{use_jwks = false}") Check("{use_jwks = false}")
) )
end} end}
@ -90,19 +90,19 @@ union_member_selector_redis_test_() ->
end}, end},
{"single", fun() -> {"single", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-redis:standalone"}), ?ERR(#{matched_type := "authn:redis_single"}),
Check("{redis_type = single}") Check("{redis_type = single}")
) )
end}, end},
{"cluster", fun() -> {"cluster", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-redis:cluster"}), ?ERR(#{matched_type := "authn:redis_cluster"}),
Check("{redis_type = cluster}") Check("{redis_type = cluster}")
) )
end}, end},
{"sentinel", fun() -> {"sentinel", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-redis:sentinel"}), ?ERR(#{matched_type := "authn:redis_sentinel"}),
Check("{redis_type = sentinel}") Check("{redis_type = sentinel}")
) )
end} end}
@ -119,13 +119,13 @@ union_member_selector_http_test_() ->
end}, end},
{"get", fun() -> {"get", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-http:get"}), ?ERR(#{matched_type := "authn:http_get"}),
Check("{method = get}") Check("{method = get}")
) )
end}, end},
{"post", fun() -> {"post", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-http:post"}), ?ERR(#{matched_type := "authn:http_post"}),
Check("{method = post}") Check("{method = post}")
) )
end} end}

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authz, [ {application, emqx_authz, [
{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.17"}, {vsn, "0.1.18"},
{registered, []}, {registered, []},
{mod, {emqx_authz_app, []}}, {mod, {emqx_authz_app, []}},
{applications, [ {applications, [

View File

@ -161,9 +161,9 @@ parse_url(Url) ->
BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), BaseUrl = iolist_to_binary([Scheme, "//", HostPort]),
case string:split(Remaining, "?", leading) of case string:split(Remaining, "?", leading) of
[Path, QueryString] -> [Path, QueryString] ->
{BaseUrl, Path, QueryString}; {BaseUrl, <<"/", Path/binary>>, QueryString};
[Path] -> [Path] ->
{BaseUrl, Path, <<>>} {BaseUrl, <<"/", Path/binary>>, <<>>}
end; end;
[HostPort] -> [HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>}
@ -185,7 +185,7 @@ generate_request(
} }
) -> ) ->
Values = client_vars(Client, PubSub, Topic), Values = client_vars(Client, PubSub, Topic),
Path = emqx_authz_utils:render_str(BasePathTemplate, Values), Path = emqx_authz_utils:render_urlencoded_str(BasePathTemplate, Values),
Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values), Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values),
Body = emqx_authz_utils:render_deep(BodyTemplate, Values), Body = emqx_authz_utils:render_deep(BodyTemplate, Values),
case Method of case Method of
@ -202,9 +202,9 @@ generate_request(
end. end.
append_query(Path, []) -> append_query(Path, []) ->
encode_path(Path); to_list(Path);
append_query(Path, Query) -> append_query(Path, Query) ->
encode_path(Path) ++ "?" ++ to_list(query_string(Query)). to_list(Path) ++ "?" ++ to_list(query_string(Query)).
query_string(Body) -> query_string(Body) ->
query_string(Body, []). query_string(Body, []).
@ -222,10 +222,6 @@ query_string([{K, V} | More], Acc) ->
uri_encode(T) -> uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)). emqx_http_lib:uri_encode(to_list(T)).
encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
serialize_body(<<"application/json">>, Body) -> serialize_body(<<"application/json">>, Body) ->
emqx_utils_json:encode(Body); emqx_utils_json:encode(Body);
serialize_body(<<"application/x-www-form-urlencoded">>, Body) -> serialize_body(<<"application/x-www-form-urlencoded">>, Body) ->

View File

@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) ->
match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
lists:foldl( lists:foldl(
fun(Principal, Permission) -> fun(Principal, Permission) ->
match_who(ClientInfo, Principal) andalso Permission Permission andalso match_who(ClientInfo, Principal)
end, end,
true, true,
Principals Principals
@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
match_who(ClientInfo, {'or', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'or', Principals}) when is_list(Principals) ->
lists:foldl( lists:foldl(
fun(Principal, Permission) -> fun(Principal, Permission) ->
match_who(ClientInfo, Principal) orelse Permission Permission orelse match_who(ClientInfo, Principal)
end, end,
false, false,
Principals Principals

View File

@ -54,7 +54,7 @@ type_names() ->
file, file,
http_get, http_get,
http_post, http_post,
mnesia, builtin_db,
mongo_single, mongo_single,
mongo_rs, mongo_rs,
mongo_sharded, mongo_sharded,
@ -93,7 +93,7 @@ fields(http_post) ->
{method, method(post)}, {method, method(post)},
{headers, fun headers/1} {headers, fun headers/1}
]; ];
fields(mnesia) -> fields(builtin_db) ->
authz_common_fields(built_in_database); authz_common_fields(built_in_database);
fields(mongo_single) -> fields(mongo_single) ->
authz_common_fields(mongodb) ++ authz_common_fields(mongodb) ++
@ -191,8 +191,8 @@ desc(http_get) ->
?DESC(http_get); ?DESC(http_get);
desc(http_post) -> desc(http_post) ->
?DESC(http_post); ?DESC(http_post);
desc(mnesia) -> desc(builtin_db) ->
?DESC(mnesia); ?DESC(builtin_db);
desc(mongo_single) -> desc(mongo_single) ->
?DESC(mongo_single); ?DESC(mongo_single);
desc(mongo_rs) -> desc(mongo_rs) ->
@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) ->
}) })
end; end;
select_union_member(#{<<"type">> := <<"built_in_database">>}) -> select_union_member(#{<<"type">> := <<"built_in_database">>}) ->
?R_REF(mnesia); ?R_REF(builtin_db);
select_union_member(#{<<"type">> := Type}) -> select_union_member(#{<<"type">> := Type}) ->
select_union_member_loop(Type, type_names()); select_union_member_loop(Type, type_names());
select_union_member(_) -> select_union_member(_) ->

View File

@ -16,7 +16,6 @@
-module(emqx_authz_utils). -module(emqx_authz_utils).
-include_lib("emqx/include/emqx_placeholder.hrl").
-include_lib("emqx_authz.hrl"). -include_lib("emqx_authz.hrl").
-export([ -export([
@ -28,6 +27,7 @@
update_config/2, update_config/2,
parse_deep/2, parse_deep/2,
parse_str/2, parse_str/2,
render_urlencoded_str/2,
parse_sql/3, parse_sql/3,
render_deep/2, render_deep/2,
render_str/2, render_str/2,
@ -128,6 +128,13 @@ render_str(Template, Values) ->
#{return => full_binary, var_trans => fun handle_var/2} #{return => full_binary, var_trans => fun handle_var/2}
). ).
render_urlencoded_str(Template, Values) ->
emqx_placeholder:proc_tmpl(
Template,
client_vars(Values),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
render_sql_params(ParamList, Values) -> render_sql_params(ParamList, Values) ->
emqx_placeholder:proc_tmpl( emqx_placeholder:proc_tmpl(
ParamList, ParamList,
@ -181,6 +188,11 @@ convert_client_var({dn, DN}) -> {cert_subject, DN};
convert_client_var({protocol, Proto}) -> {proto_name, Proto}; convert_client_var({protocol, Proto}) -> {proto_name, Proto};
convert_client_var(Other) -> Other. convert_client_var(Other) -> Other.
urlencode_var({var, _} = Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value));
urlencode_var(Var, Value) ->
handle_var(Var, Value).
handle_var({var, _Name}, undefined) -> handle_var({var, _Name}, undefined) ->
<<>>; <<>>;
handle_var({var, <<"peerhost">>}, IpAddr) -> handle_var({var, <<"peerhost">>}, IpAddr) ->

View File

@ -199,7 +199,7 @@ t_query_params(_Config) ->
peerhost := <<"127.0.0.1">>, peerhost := <<"127.0.0.1">>,
proto_name := <<"MQTT">>, proto_name := <<"MQTT">>,
mountpoint := <<"MOUNTPOINT">>, mountpoint := <<"MOUNTPOINT">>,
topic := <<"t">>, topic := <<"t/1">>,
action := <<"publish">> action := <<"publish">>
} = cowboy_req:match_qs( } = cowboy_req:match_qs(
[ [
@ -241,7 +241,7 @@ t_query_params(_Config) ->
?assertEqual( ?assertEqual(
allow, allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>) emqx_access_control:authorize(ClientInfo, publish, <<"t/1">>)
). ).
t_path(_Config) -> t_path(_Config) ->
@ -249,13 +249,13 @@ t_path(_Config) ->
fun(Req0, State) -> fun(Req0, State) ->
?assertEqual( ?assertEqual(
<< <<
"/authz/users/" "/authz/use%20rs/"
"user%20name/" "user%20name/"
"client%20id/" "client%20id/"
"127.0.0.1/" "127.0.0.1/"
"MQTT/" "MQTT/"
"MOUNTPOINT/" "MOUNTPOINT/"
"t/1/" "t%2F1/"
"publish" "publish"
>>, >>,
cowboy_req:path(Req0) cowboy_req:path(Req0)
@ -264,7 +264,7 @@ t_path(_Config) ->
end, end,
#{ #{
<<"url">> => << <<"url">> => <<
"http://127.0.0.1:33333/authz/users/" "http://127.0.0.1:33333/authz/use%20rs/"
"${username}/" "${username}/"
"${clientid}/" "${clientid}/"
"${peerhost}/" "${peerhost}/"

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_bridge, [ {application, emqx_bridge, [
{description, "EMQX bridges"}, {description, "EMQX bridges"},
{vsn, "0.1.16"}, {vsn, "0.1.17"},
{registered, [emqx_bridge_sup]}, {registered, [emqx_bridge_sup]},
{mod, {emqx_bridge_app, []}}, {mod, {emqx_bridge_app, []}},
{applications, [ {applications, [

View File

@ -70,7 +70,8 @@
T == dynamo; T == dynamo;
T == rocketmq; T == rocketmq;
T == cassandra; T == cassandra;
T == sqlserver T == sqlserver;
T == pulsar_producer
). ).
load() -> load() ->

View File

@ -64,7 +64,7 @@
{BridgeType, BridgeName} -> {BridgeType, BridgeName} ->
EXPR EXPR
catch catch
throw:{invalid_bridge_id, Reason} -> throw:#{reason := Reason} ->
?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>) ?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>)
end end
). ).
@ -546,6 +546,8 @@ schema("/bridges_probe") ->
case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of
ok -> ok ->
?NO_CONTENT; ?NO_CONTENT;
{error, #{kind := validation_error} = Reason} ->
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
?BAD_REQUEST('TEST_FAILED', Reason) ?BAD_REQUEST('TEST_FAILED', Reason)
end; end;

View File

@ -87,7 +87,7 @@ parse_bridge_id(BridgeId) ->
[Type, Name] -> [Type, Name] ->
{to_type_atom(Type), validate_name(Name)}; {to_type_atom(Type), validate_name(Name)};
_ -> _ ->
invalid_bridge_id( invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>> <<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
) )
end. end.
@ -108,14 +108,14 @@ validate_name(Name0) ->
true -> true ->
Name0; Name0;
false -> false ->
invalid_bridge_id(<<"bad name: ", Name0/binary>>) invalid_data(<<"bad name: ", Name0/binary>>)
end; end;
false -> false ->
invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end. end.
-spec invalid_bridge_id(binary()) -> no_return(). -spec invalid_data(binary()) -> no_return().
invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}). invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true; is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true; is_id_char(C) when C >= $a andalso C =< $z -> true;
@ -130,7 +130,7 @@ to_type_atom(Type) ->
erlang:binary_to_existing_atom(Type, utf8) erlang:binary_to_existing_atom(Type, utf8)
catch catch
_:_ -> _:_ ->
invalid_bridge_id(<<"unknown type: ", Type/binary>>) invalid_data(<<"unknown bridge type: ", Type/binary>>)
end. end.
reset_metrics(ResourceId) -> reset_metrics(ResourceId) ->
@ -243,12 +243,19 @@ create_dry_run(Type, Conf0) ->
{error, Reason} -> {error, Reason} ->
{error, Reason}; {error, Reason};
{ok, ConfNew} -> {ok, ConfNew} ->
ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), try
Res = emqx_resource:create_dry_run_local( ParseConf = parse_confs(bin(Type), TmpPath, ConfNew),
bridge_to_resource_type(Type), ParseConf Res = emqx_resource:create_dry_run_local(
), bridge_to_resource_type(Type), ParseConf
_ = maybe_clear_certs(TmpPath, ConfNew), ),
Res Res
catch
%% validation errors
throw:Reason ->
{error, Reason}
after
_ = maybe_clear_certs(TmpPath, ConfNew)
end
end. end.
remove(BridgeId) -> remove(BridgeId) ->
@ -300,10 +307,18 @@ parse_confs(
max_retries := Retry max_retries := Retry
} = Conf } = Conf
) -> ) ->
{BaseUrl, Path} = parse_url(Url), Url1 = bin(Url),
{ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), {BaseUrl, Path} = parse_url(Url1),
BaseUrl1 =
case emqx_http_lib:uri_parse(BaseUrl) of
{ok, BUrl} ->
BUrl;
{error, Reason} ->
Reason1 = emqx_utils:readable_error_msg(Reason),
invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>)
end,
Conf#{ Conf#{
base_url => BaseUrl2, base_url => BaseUrl1,
request => request =>
#{ #{
path => Path, path => Path,
@ -325,6 +340,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% to hocon; keeping this as just `kafka' for backwards compatibility. %% to hocon; keeping this as just `kafka' for backwards compatibility.
parse_confs(<<"kafka">> = _Type, Name, Conf) -> parse_confs(<<"kafka">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name}; Conf#{bridge_name => Name};
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(_Type, _Name, Conf) -> parse_confs(_Type, _Name, Conf) ->
Conf. Conf.
@ -338,7 +355,7 @@ parse_url(Url) ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>} {iolist_to_binary([Scheme, "//", HostPort]), <<>>}
end; end;
[Url] -> [Url] ->
error({invalid_url, Url}) invalid_data(<<"Missing scheme in URL: ", Url/binary>>)
end. end.
str(Bin) when is_binary(Bin) -> binary_to_list(Bin); str(Bin) when is_binary(Bin) -> binary_to_list(Bin);

View File

@ -414,6 +414,18 @@ t_http_crud_apis(Config) ->
}, },
json(maps:get(<<"message">>, PutFail2)) json(maps:get(<<"message">>, PutFail2))
), ),
{ok, 400, _} = request_json(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(<<"localhost:1234/foo">>, Name),
Config
),
{ok, 400, _} = request_json(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(<<"htpp://localhost:12341234/foo">>, Name),
Config
),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config),
@ -498,6 +510,22 @@ t_http_crud_apis(Config) ->
%% Try create bridge with bad characters as name %% Try create bridge with bad characters as name
{ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config), {ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config),
%% Missing scheme in URL
{ok, 400, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(<<"localhost:1234/foo">>, <<"missing_url_scheme">>),
Config
),
%% Invalid port
{ok, 400, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(<<"http://localhost:12341234/foo">>, <<"invalid_port">>),
Config
),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config). {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config).
t_http_bridges_local_topic(Config) -> t_http_bridges_local_topic(Config) ->
@ -1016,6 +1044,34 @@ t_bridges_probe(Config) ->
) )
), ),
%% Missing scheme in URL
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_probe"]),
?HTTP_BRIDGE(<<"203.0.113.3:1234/foo">>),
Config
)
),
%% Invalid port
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_probe"]),
?HTTP_BRIDGE(<<"http://203.0.113.3:12341234/foo">>),
Config
)
),
{ok, 204, _} = request( {ok, 204, _} = request(
post, post,
uri(["bridges_probe"]), uri(["bridges_probe"]),

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,39 @@
# EMQX Cassandra Bridge
[Apache Cassandra](https://github.com/apache/cassandra) is an open-source, distributed
NoSQL database management system that is designed to manage large amounts of structured
and semi-structured data across many commodity servers, providing high availability
with no single point of failure.
It is commonly used in web and mobile applications, IoT, and other systems that
require storing, querying, and analyzing large amounts of data.
The application is used to connect EMQX and Cassandra. User can create a rule
and easily ingest IoT data into Cassandra by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into Cassandra](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-cassa.html)
for how to use EMQX dashboard to ingest IoT data into Cassandra.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
cassandra

View File

@ -0,0 +1,5 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-define(CASSANDRA_DEFAULT_PORT, 9042).

View File

@ -0,0 +1,11 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_cassandra]}
]}.

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_cassandra, [
{description, "EMQX Enterprise Cassandra Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib, ecql]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -1,7 +1,7 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_bridge_cassa). -module(emqx_bridge_cassandra).
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
@ -88,7 +88,7 @@ fields("config") ->
#{desc => ?DESC("local_topic"), default => undefined} #{desc => ?DESC("local_topic"), default => undefined}
)} )}
] ++ emqx_resource_schema:fields("resource_opts") ++ ] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_ee_connector_cassa:fields(config) -- (emqx_bridge_cassandra_connector:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields()); emqx_connector_schema_lib:prepare_statement_fields());
fields("post") -> fields("post") ->
fields("post", cassandra); fields("post", cassandra);

View File

@ -2,12 +2,12 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_connector_cassa). -module(emqx_bridge_cassandra_connector).
-behaviour(emqx_resource). -behaviour(emqx_resource).
-include_lib("emqx_connector/include/emqx_connector.hrl"). -include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl"). -include("emqx_bridge_cassandra.hrl").
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
@ -44,7 +44,7 @@
-type state() :: -type state() ::
#{ #{
poolname := atom(), pool_name := binary(),
prepare_cql := prepares(), prepare_cql := prepares(),
params_tokens := params_tokens(), params_tokens := params_tokens(),
%% returned by ecql:prepare/2 %% returned by ecql:prepare/2
@ -92,7 +92,7 @@ callback_mode() -> async_if_possible.
on_start( on_start(
InstId, InstId,
#{ #{
servers := Servers, servers := Servers0,
keyspace := Keyspace, keyspace := Keyspace,
username := Username, username := Username,
pool_size := PoolSize, pool_size := PoolSize,
@ -104,9 +104,16 @@ on_start(
connector => InstId, connector => InstId,
config => emqx_utils:redact(Config) config => emqx_utils:redact(Config)
}), }),
Servers =
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION)
),
Options = [ Options = [
{nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)}, {nodes, Servers},
{username, Username}, {username, Username},
{password, emqx_secret:wrap(maps:get(password, Config, ""))}, {password, emqx_secret:wrap(maps:get(password, Config, ""))},
{keyspace, Keyspace}, {keyspace, Keyspace},
@ -124,14 +131,10 @@ on_start(
false -> false ->
[] []
end, end,
%% use InstaId of binary type as Pool name, which is supported in ecpool. State = parse_prepare_cql(Config),
PoolName = InstId, case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
Prepares = parse_prepare_cql(Config),
InitState = #{poolname => PoolName, prepare_statement => #{}},
State = maps:merge(InitState, Prepares),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
ok -> ok ->
{ok, init_prepare(State)}; {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
{error, Reason} -> {error, Reason} ->
?tp( ?tp(
cassandra_connector_start_failed, cassandra_connector_start_failed,
@ -140,12 +143,12 @@ on_start(
{error, Reason} {error, Reason}
end. end.
on_stop(InstId, #{poolname := PoolName}) -> on_stop(InstId, #{pool_name := PoolName}) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "stopping_cassandra_connector", msg => "stopping_cassandra_connector",
connector => InstId connector => InstId
}), }),
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_resource_pool:stop(PoolName).
-type request() :: -type request() ::
% emqx_bridge.erl % emqx_bridge.erl
@ -184,7 +187,7 @@ do_single_query(
InstId, InstId,
Request, Request,
Async, Async,
#{poolname := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
{Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request),
?tp( ?tp(
@ -232,7 +235,7 @@ do_batch_query(
InstId, InstId,
Requests, Requests,
Async, Async,
#{poolname := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
CQLs = CQLs =
lists:map( lists:map(
@ -305,8 +308,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
Result Result
end. end.
on_get_status(_InstId, #{poolname := Pool} = State) -> on_get_status(_InstId, #{pool_name := PoolName} = State) ->
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
true -> true ->
case do_check_prepares(State) of case do_check_prepares(State) of
ok -> ok ->
@ -327,7 +330,7 @@ do_get_status(Conn) ->
do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) -> do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) ->
ok; ok;
do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) -> do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) ->
%% retry to prepare %% retry to prepare
case prepare_cql(Prepares, PoolName) of case prepare_cql(Prepares, PoolName) of
{ok, Sts} -> {ok, Sts} ->
@ -397,7 +400,7 @@ parse_prepare_cql([], Prepares, Tokens) ->
params_tokens => Tokens params_tokens => Tokens
}. }.
init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) -> init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) ->
case maps:size(Prepares) of case maps:size(Prepares) of
0 -> 0 ->
State; State;
@ -429,17 +432,17 @@ prepare_cql(Prepares, PoolName) ->
end. end.
do_prepare_cql(Prepares, PoolName) -> do_prepare_cql(Prepares, PoolName) ->
do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}). do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}).
do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) ->
{ok, Conn} = ecpool_worker:client(Worker), {ok, Conn} = ecpool_worker:client(Worker),
case prepare_cql_to_conn(Conn, Prepares) of case prepare_cql_to_conn(Conn, Prepares) of
{ok, Sts} -> {ok, Sts} ->
do_prepare_cql(T, Prepares, PoolName, Sts); do_prepare_cql(T, Prepares, Sts);
Error -> Error ->
Error Error
end; end;
do_prepare_cql([], _Prepares, _PoolName, LastSts) -> do_prepare_cql([], _Prepares, LastSts) ->
{ok, LastSts}. {ok, LastSts}.
prepare_cql_to_conn(Conn, Prepares) -> prepare_cql_to_conn(Conn, Prepares) ->

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_bridge_cassa_SUITE). -module(emqx_bridge_cassandra_SUITE).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-compile(export_all). -compile(export_all).
@ -57,7 +57,7 @@
%% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \ %% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \
%% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \ %% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \
%% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \ %% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \
%% --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl %% --suite apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl
%% %%
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
@ -530,15 +530,16 @@ t_write_failure(Config) ->
fun(Trace0) -> fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]), ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(buffer_worker_flush_nack, Trace0), Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
?assertMatch([#{result := {async_return, {error, _}}} | _], Trace), [#{result := Result} | _] = Trace,
[#{result := {async_return, {error, Error}}} | _] = Trace, case Result of
case Error of {async_return, {error, {resource_error, _}}} ->
{resource_error, _} ->
ok; ok;
{recoverable_error, disconnected} -> {async_return, {error, {recoverable_error, disconnected}}} ->
ok;
{error, {resource_error, _}} ->
ok; ok;
_ -> _ ->
ct:fail("unexpected error: ~p", [Error]) ct:fail("unexpected error: ~p", [Result])
end end
end end
), ),
@ -589,7 +590,7 @@ t_missing_data(Config) ->
{ok, _}, {ok, _},
create_bridge(Config) create_bridge(Config)
), ),
%% emqx_ee_connector_cassa will send missed data as a `null` atom %% emqx_bridge_cassandra_connector will send missed data as a `null` atom
%% to ecql driver %% to ecql driver
?check_trace( ?check_trace(
begin begin

View File

@ -2,13 +2,13 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_connector_cassa_SUITE). -module(emqx_bridge_cassandra_connector_SUITE).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-compile(export_all). -compile(export_all).
-include("emqx_connector.hrl"). -include("emqx_bridge_cassandra.hrl").
-include("emqx_ee_connector.hrl"). -include("emqx_connector/include/emqx_connector.hrl").
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx.hrl").
-include_lib("stdlib/include/assert.hrl"). -include_lib("stdlib/include/assert.hrl").
@ -16,7 +16,7 @@
%% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml` %% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml`
%% You can change it to `127.0.0.1`, if you run this SUITE locally %% You can change it to `127.0.0.1`, if you run this SUITE locally
-define(CASSANDRA_HOST, "cassandra"). -define(CASSANDRA_HOST, "cassandra").
-define(CASSANDRA_RESOURCE_MOD, emqx_ee_connector_cassa). -define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector).
%% This test SUITE requires a running cassandra instance. If you don't want to %% This test SUITE requires a running cassandra instance. If you don't want to
%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script %% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script
@ -38,9 +38,14 @@ groups() ->
[]. [].
cassandra_servers() -> cassandra_servers() ->
emqx_schema:parse_servers( lists:map(
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), fun(#{hostname := Host, port := Port}) ->
#{default_port => ?CASSANDRA_DEFAULT_PORT} {Host, Port}
end,
emqx_schema:parse_servers(
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
#{default_port => ?CASSANDRA_DEFAULT_PORT}
)
). ).
init_per_suite(Config) -> init_per_suite(Config) ->
@ -101,15 +106,15 @@ show(Label, What) ->
erlang:display({Label, What}), erlang:display({Label, What}),
What. What.
perform_lifecycle_check(PoolName, InitialConfig) -> perform_lifecycle_check(ResourceId, InitialConfig) ->
{ok, #{config := CheckedConfig}} = {ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig), emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig),
{ok, #{ {ok, #{
state := #{poolname := ReturnedPoolName} = State, state := #{pool_name := PoolName} = State,
status := InitialStatus status := InitialStatus
}} = }} =
emqx_resource:create_local( emqx_resource:create_local(
PoolName, ResourceId,
?CONNECTOR_RESOURCE_GROUP, ?CONNECTOR_RESOURCE_GROUP,
?CASSANDRA_RESOURCE_MOD, ?CASSANDRA_RESOURCE_MOD,
CheckedConfig, CheckedConfig,
@ -121,45 +126,45 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
state := State, state := State,
status := InitialStatus status := InitialStatus
}} = }} =
emqx_resource:get_instance(PoolName), emqx_resource:get_instance(ResourceId),
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
% % Perform query as further check that the resource is working as expected % % Perform query as further check that the resource is working as expected
(fun() -> (fun() ->
erlang:display({pool_name, PoolName}), erlang:display({pool_name, ResourceId}),
QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()), QueryNoParamsResWrapper = emqx_resource:query(ResourceId, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper) ?assertMatch({ok, _}, QueryNoParamsResWrapper)
end)(), end)(),
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(ResourceId)),
% Resource will be listed still, but state will be changed and healthcheck will fail % Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists. % as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{ {ok, ?CONNECTOR_RESOURCE_GROUP, #{
state := State, state := State,
status := StoppedStatus status := StoppedStatus
}} = }} =
emqx_resource:get_instance(PoolName), emqx_resource:get_instance(ResourceId),
?assertEqual(stopped, StoppedStatus), ?assertEqual(stopped, StoppedStatus),
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Can call stop/1 again on an already stopped instance % Can call stop/1 again on an already stopped instance
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(ResourceId)),
% Make sure it can be restarted and the healthchecks and queries work properly % Make sure it can be restarted and the healthchecks and queries work properly
?assertEqual(ok, emqx_resource:restart(PoolName)), ?assertEqual(ok, emqx_resource:restart(ResourceId)),
% async restart, need to wait resource % async restart, need to wait resource
timer:sleep(500), timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
emqx_resource:get_instance(PoolName), emqx_resource:get_instance(ResourceId),
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
(fun() -> (fun() ->
QueryNoParamsResWrapper = QueryNoParamsResWrapper =
emqx_resource:query(PoolName, test_query_no_params()), emqx_resource:query(ResourceId, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper) ?assertMatch({ok, _}, QueryNoParamsResWrapper)
end)(), end)(),
% Stop and remove the resource in one go. % Stop and remove the resource in one go.
?assertEqual(ok, emqx_resource:remove_local(PoolName)), ?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Should not even be able to get the resource data out of ets now unlike just stopping. % Should not even be able to get the resource data out of ets now unlike just stopping.
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% utils %% utils

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,37 @@
# EMQX ClickHouse Bridge
[ClickHouse](https://github.com/ClickHouse/ClickHouse) is an open-source, column-based
database management system. It is designed for real-time processing of large volumes of
data and is known for its high performance and scalability.
The application is used to connect EMQX and ClickHouse.
User can create a rule and easily ingest IoT data into ClickHouse by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html)
for how to use EMQX dashboard to ingest IoT data into ClickHouse.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_clickhouse, [
{description, "EMQX Enterprise ClickHouse Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,40 @@
# EMQX DynamoDB Bridge
[Dynamodb](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database
service provided by Amazon that's designed for scalability and low-latency access
to structured data.
It's often used in applications that require fast and reliable access to data,
such as mobile, ad tech, and IoT.
The application is used to connect EMQX and DynamoDB.
User can create a rule and easily ingest IoT data into DynamoDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into DynamoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-dynamo.html)
for how to use EMQX dashboard to ingest IoT data into DynamoDB.
- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_dynamo, [
{description, "EMQX Enterprise Dynamo Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,36 @@
# EMQX GCP Pub/Sub Bridge
[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service
provided by Google Cloud Platform (GCP).
The application is used to connect EMQX and GCP Pub/Sub.
User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into GCP Pub/Sub](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-gcp-pubsub.html)
for how to use EMQX dashboard to ingest IoT data into GCP Pub/Sub.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,10 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_gcp_pubsub]}
]}.

View File

@ -0,0 +1,13 @@
{application, emqx_bridge_gcp_pubsub, [
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [
kernel,
stdlib,
ehttpc
]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_bridge_gcp_pubsub). -module(emqx_bridge_gcp_pubsub).
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_connector_gcp_pubsub). -module(emqx_bridge_gcp_pubsub_connector).
-behaviour(emqx_resource). -behaviour(emqx_resource).
@ -26,9 +26,8 @@
]). ]).
-export([reply_delegator/3]). -export([reply_delegator/3]).
-type bridge_id() :: binary().
-type jwt_worker() :: binary(). -type jwt_worker() :: binary().
-type service_account_json() :: emqx_ee_bridge_gcp_pubsub:service_account_json(). -type service_account_json() :: emqx_bridge_gcp_pubsub:service_account_json().
-type config() :: #{ -type config() :: #{
connect_timeout := emqx_schema:duration_ms(), connect_timeout := emqx_schema:duration_ms(),
max_retries := non_neg_integer(), max_retries := non_neg_integer(),
@ -43,7 +42,7 @@
jwt_worker_id := jwt_worker(), jwt_worker_id := jwt_worker(),
max_retries := non_neg_integer(), max_retries := non_neg_integer(),
payload_template := emqx_plugin_libs_rule:tmpl_token(), payload_template := emqx_plugin_libs_rule:tmpl_token(),
pool_name := atom(), pool_name := binary(),
project_id := binary(), project_id := binary(),
pubsub_topic := binary(), pubsub_topic := binary(),
request_timeout := timer:time() request_timeout := timer:time()
@ -82,7 +81,7 @@ on_start(
%% emulating the emulator behavior %% emulating the emulator behavior
%% https://cloud.google.com/pubsub/docs/emulator %% https://cloud.google.com/pubsub/docs/emulator
HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"), HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"),
{Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), #{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}),
PoolType = random, PoolType = random,
Transport = tls, Transport = tls,
TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}), TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}),
@ -102,14 +101,13 @@ on_start(
jwt_worker_id := JWTWorkerId, jwt_worker_id := JWTWorkerId,
project_id := ProjectId project_id := ProjectId
} = ensure_jwt_worker(InstanceId, Config), } = ensure_jwt_worker(InstanceId, Config),
PoolName = emqx_plugin_libs_pool:pool_name(InstanceId),
State = #{ State = #{
connect_timeout => ConnectTimeout, connect_timeout => ConnectTimeout,
instance_id => InstanceId, instance_id => InstanceId,
jwt_worker_id => JWTWorkerId, jwt_worker_id => JWTWorkerId,
max_retries => MaxRetries, max_retries => MaxRetries,
payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate), payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate),
pool_name => PoolName, pool_name => InstanceId,
project_id => ProjectId, project_id => ProjectId,
pubsub_topic => PubSubTopic, pubsub_topic => PubSubTopic,
request_timeout => RequestTimeout request_timeout => RequestTimeout
@ -118,20 +116,20 @@ on_start(
gcp_pubsub_on_start_before_starting_pool, gcp_pubsub_on_start_before_starting_pool,
#{ #{
instance_id => InstanceId, instance_id => InstanceId,
pool_name => PoolName, pool_name => InstanceId,
pool_opts => PoolOpts pool_opts => PoolOpts
} }
), ),
?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => PoolName}), ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => InstanceId}),
case ehttpc_sup:start_pool(PoolName, PoolOpts) of case ehttpc_sup:start_pool(InstanceId, PoolOpts) of
{ok, _} -> {ok, _} ->
{ok, State}; {ok, State};
{error, {already_started, _}} -> {error, {already_started, _}} ->
?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => PoolName}), ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => InstanceId}),
{ok, State}; {ok, State};
{error, Reason} -> {error, Reason} ->
?tp(gcp_pubsub_ehttpc_pool_start_failure, #{ ?tp(gcp_pubsub_ehttpc_pool_start_failure, #{
pool_name => PoolName, pool_name => InstanceId,
reason => Reason reason => Reason
}), }),
{error, Reason} {error, Reason}
@ -140,10 +138,7 @@ on_start(
-spec on_stop(manager_id(), state()) -> ok | {error, term()}. -spec on_stop(manager_id(), state()) -> ok | {error, term()}.
on_stop( on_stop(
InstanceId, InstanceId,
_State = #{ _State = #{jwt_worker_id := JWTWorkerId, pool_name := PoolName}
jwt_worker_id := JWTWorkerId,
pool_name := PoolName
}
) -> ) ->
?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}), ?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}),
?SLOG(info, #{ ?SLOG(info, #{
@ -155,7 +150,7 @@ on_stop(
ehttpc_sup:stop_pool(PoolName). ehttpc_sup:stop_pool(PoolName).
-spec on_query( -spec on_query(
bridge_id(), resource_id(),
{send_message, map()}, {send_message, map()},
state() state()
) -> ) ->
@ -163,32 +158,32 @@ on_stop(
| {ok, status_code(), headers(), body()} | {ok, status_code(), headers(), body()}
| {error, {recoverable_error, term()}} | {error, {recoverable_error, term()}}
| {error, term()}. | {error, term()}.
on_query(BridgeId, {send_message, Selected}, State) -> on_query(ResourceId, {send_message, Selected}, State) ->
Requests = [{send_message, Selected}], Requests = [{send_message, Selected}],
?TRACE( ?TRACE(
"QUERY_SYNC", "QUERY_SYNC",
"gcp_pubsub_received", "gcp_pubsub_received",
#{requests => Requests, connector => BridgeId, state => State} #{requests => Requests, connector => ResourceId, state => State}
), ),
do_send_requests_sync(State, Requests, BridgeId). do_send_requests_sync(State, Requests, ResourceId).
-spec on_query_async( -spec on_query_async(
bridge_id(), resource_id(),
{send_message, map()}, {send_message, map()},
{ReplyFun :: function(), Args :: list()}, {ReplyFun :: function(), Args :: list()},
state() state()
) -> {ok, pid()}. ) -> {ok, pid()}.
on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) ->
Requests = [{send_message, Selected}], Requests = [{send_message, Selected}],
?TRACE( ?TRACE(
"QUERY_ASYNC", "QUERY_ASYNC",
"gcp_pubsub_received", "gcp_pubsub_received",
#{requests => Requests, connector => BridgeId, state => State} #{requests => Requests, connector => ResourceId, state => State}
), ),
do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId).
-spec on_batch_query( -spec on_batch_query(
bridge_id(), resource_id(),
[{send_message, map()}], [{send_message, map()}],
state() state()
) -> ) ->
@ -196,34 +191,30 @@ on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) ->
| {ok, status_code(), headers(), body()} | {ok, status_code(), headers(), body()}
| {error, {recoverable_error, term()}} | {error, {recoverable_error, term()}}
| {error, term()}. | {error, term()}.
on_batch_query(BridgeId, Requests, State) -> on_batch_query(ResourceId, Requests, State) ->
?TRACE( ?TRACE(
"QUERY_SYNC", "QUERY_SYNC",
"gcp_pubsub_received", "gcp_pubsub_received",
#{requests => Requests, connector => BridgeId, state => State} #{requests => Requests, connector => ResourceId, state => State}
), ),
do_send_requests_sync(State, Requests, BridgeId). do_send_requests_sync(State, Requests, ResourceId).
-spec on_batch_query_async( -spec on_batch_query_async(
bridge_id(), resource_id(),
[{send_message, map()}], [{send_message, map()}],
{ReplyFun :: function(), Args :: list()}, {ReplyFun :: function(), Args :: list()},
state() state()
) -> {ok, pid()}. ) -> {ok, pid()}.
on_batch_query_async(BridgeId, Requests, ReplyFunAndArgs, State) -> on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) ->
?TRACE( ?TRACE(
"QUERY_ASYNC", "QUERY_ASYNC",
"gcp_pubsub_received", "gcp_pubsub_received",
#{requests => Requests, connector => BridgeId, state => State} #{requests => Requests, connector => ResourceId, state => State}
), ),
do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId).
-spec on_get_status(manager_id(), state()) -> connected | disconnected. -spec on_get_status(manager_id(), state()) -> connected | disconnected.
on_get_status(InstanceId, State) -> on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} = State) ->
#{
connect_timeout := Timeout,
pool_name := PoolName
} = State,
case do_get_status(InstanceId, PoolName, Timeout) of case do_get_status(InstanceId, PoolName, Timeout) of
true -> true ->
connected; connected;
@ -245,8 +236,7 @@ on_get_status(InstanceId, State) ->
project_id := binary() project_id := binary()
}. }.
ensure_jwt_worker(InstanceId, #{ ensure_jwt_worker(InstanceId, #{
service_account_json := ServiceAccountJSON, service_account_json := ServiceAccountJSON
pubsub_topic := PubSubTopic
}) -> }) ->
#{ #{
project_id := ProjectId, project_id := ProjectId,
@ -276,14 +266,8 @@ ensure_jwt_worker(InstanceId, #{
{ok, Worker0} -> {ok, Worker0} ->
Worker0; Worker0;
Error -> Error ->
?tp( ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
gcp_pubsub_bridge_jwt_worker_failed_to_start, connector => InstanceId,
#{instance_id => InstanceId, reason => Error}
),
?SLOG(error, #{
msg => "failed_to_start_gcp_pubsub_jwt_worker",
instance_id => InstanceId,
pubsub_topic => PubSubTopic,
reason => Error reason => Error
}), }),
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
@ -301,26 +285,14 @@ ensure_jwt_worker(InstanceId, #{
demonitor(MRef, [flush]), demonitor(MRef, [flush]),
ok; ok;
{'DOWN', MRef, process, Worker, Reason} -> {'DOWN', MRef, process, Worker, Reason} ->
?tp( ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
gcp_pubsub_bridge_jwt_worker_failed_to_start,
#{
resource_id => InstanceId,
reason => Reason
}
),
?SLOG(error, #{
msg => "gcp_pubsub_bridge_jwt_worker_failed_to_start",
connector => InstanceId, connector => InstanceId,
reason => Reason reason => Reason
}), }),
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
throw(failed_to_start_jwt_worker) throw(failed_to_start_jwt_worker)
after 10_000 -> after 10_000 ->
?tp(gcp_pubsub_bridge_jwt_timeout, #{resource_id => InstanceId}), ?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => InstanceId}),
?SLOG(warning, #{
msg => "gcp_pubsub_bridge_jwt_timeout",
connector => InstanceId
}),
demonitor(MRef, [flush]), demonitor(MRef, [flush]),
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
throw(timeout_creating_jwt) throw(timeout_creating_jwt)
@ -569,7 +541,7 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) ->
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
end. end.
-spec do_get_status(manager_id(), atom(), timer:time()) -> boolean(). -spec do_get_status(manager_id(), binary(), timer:time()) -> boolean().
do_get_status(InstanceId, PoolName, Timeout) -> do_get_status(InstanceId, PoolName, Timeout) ->
Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)], Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)],
DoPerWorker = DoPerWorker =

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_bridge_gcp_pubsub_SUITE). -module(emqx_bridge_gcp_pubsub_SUITE).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-compile(export_all). -compile(export_all).
@ -70,22 +70,13 @@ init_per_suite(Config) ->
ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]),
{ok, _} = application:ensure_all_started(emqx_connector), {ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(), emqx_mgmt_api_test_util:init_suite(),
HTTPHost = "localhost", Config.
HTTPPort = 56000,
HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort),
true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort),
[
{http_host, HTTPHost},
{http_port, HTTPPort}
| Config
].
end_per_suite(_Config) -> end_per_suite(_Config) ->
emqx_mgmt_api_test_util:end_suite(), emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]), ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]),
_ = application:stop(emqx_connector), _ = application:stop(emqx_connector),
os:unsetenv("PUBSUB_EMULATOR_HOST"),
ok. ok.
init_per_group(sync_query, Config) -> init_per_group(sync_query, Config) ->
@ -113,26 +104,26 @@ init_per_testcase(TestCase, Config0) when
1 -> 1 ->
[{skip_due_to_no_batching, true}]; [{skip_due_to_no_batching, true}];
_ -> _ ->
{ok, _} = start_echo_http_server(),
delete_all_bridges(), delete_all_bridges(),
Tid = install_telemetry_handler(TestCase), Tid = install_telemetry_handler(TestCase),
Config = generate_config(Config0), Config = generate_config(Config0),
put(telemetry_table, Tid), put(telemetry_table, Tid),
[{telemetry_table, Tid} | Config] {ok, HttpServer} = start_echo_http_server(),
[{telemetry_table, Tid}, {http_server, HttpServer} | Config]
end; end;
init_per_testcase(TestCase, Config0) -> init_per_testcase(TestCase, Config0) ->
ct:timetrap({seconds, 30}), ct:timetrap({seconds, 30}),
{ok, _} = start_echo_http_server(), {ok, HttpServer} = start_echo_http_server(),
delete_all_bridges(), delete_all_bridges(),
Tid = install_telemetry_handler(TestCase), Tid = install_telemetry_handler(TestCase),
Config = generate_config(Config0), Config = generate_config(Config0),
put(telemetry_table, Tid), put(telemetry_table, Tid),
[{telemetry_table, Tid} | Config]. [{telemetry_table, Tid}, {http_server, HttpServer} | Config].
end_per_testcase(_TestCase, _Config) -> end_per_testcase(_TestCase, _Config) ->
ok = snabbkaffe:stop(), ok = snabbkaffe:stop(),
delete_all_bridges(), delete_all_bridges(),
ok = emqx_connector_web_hook_server:stop(), ok = stop_echo_http_server(),
emqx_common_test_helpers:call_janitor(), emqx_common_test_helpers:call_janitor(),
ok. ok.
@ -242,7 +233,6 @@ success_http_handler() ->
start_echo_http_server() -> start_echo_http_server() ->
HTTPHost = "localhost", HTTPHost = "localhost",
HTTPPort = 56000,
HTTPPath = <<"/v1/projects/myproject/topics/mytopic:publish">>, HTTPPath = <<"/v1/projects/myproject/topics/mytopic:publish">>,
ServerSSLOpts = ServerSSLOpts =
[ [
@ -250,14 +240,23 @@ start_echo_http_server() ->
{versions, ['tlsv1.2', 'tlsv1.3']}, {versions, ['tlsv1.2', 'tlsv1.3']},
{ciphers, ["ECDHE-RSA-AES256-GCM-SHA384", "TLS_CHACHA20_POLY1305_SHA256"]} {ciphers, ["ECDHE-RSA-AES256-GCM-SHA384", "TLS_CHACHA20_POLY1305_SHA256"]}
] ++ certs(), ] ++ certs(),
{ok, _} = emqx_connector_web_hook_server:start_link(HTTPPort, HTTPPath, ServerSSLOpts), {ok, {HTTPPort, _Pid}} = emqx_connector_web_hook_server:start_link(
random, HTTPPath, ServerSSLOpts
),
ok = emqx_connector_web_hook_server:set_handler(success_http_handler()), ok = emqx_connector_web_hook_server:set_handler(success_http_handler()),
HTTPHost = "localhost",
HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort),
true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort),
{ok, #{ {ok, #{
host_port => HTTPHost ++ ":" ++ integer_to_list(HTTPPort), host_port => HostPort,
host => HTTPHost, host => HTTPHost,
port => HTTPPort port => HTTPPort
}}. }}.
stop_echo_http_server() ->
os:unsetenv("PUBSUB_EMULATOR_HOST"),
ok = emqx_connector_web_hook_server:stop().
certs() -> certs() ->
CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"),
[ [
@ -917,7 +916,7 @@ t_invalid_private_key(Config) ->
#{<<"private_key">> => InvalidPrivateKeyPEM} #{<<"private_key">> => InvalidPrivateKeyPEM}
} }
), ),
#{?snk_kind := gcp_pubsub_bridge_jwt_worker_failed_to_start}, #{?snk_kind := "gcp_pubsub_bridge_jwt_worker_failed_to_start"},
20_000 20_000
), ),
Res Res
@ -928,7 +927,7 @@ t_invalid_private_key(Config) ->
[#{reason := Reason}] when [#{reason := Reason}] when
Reason =:= noproc orelse Reason =:= noproc orelse
Reason =:= {shutdown, {error, empty_key}}, Reason =:= {shutdown, {error, empty_key}},
?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace)
), ),
?assertMatch( ?assertMatch(
[#{error := empty_key}], [#{error := empty_key}],
@ -956,14 +955,14 @@ t_jwt_worker_start_timeout(Config) ->
#{<<"private_key">> => InvalidPrivateKeyPEM} #{<<"private_key">> => InvalidPrivateKeyPEM}
} }
), ),
#{?snk_kind := gcp_pubsub_bridge_jwt_timeout}, #{?snk_kind := "gcp_pubsub_bridge_jwt_timeout"},
20_000 20_000
), ),
Res Res
end, end,
fun(Res, Trace) -> fun(Res, Trace) ->
?assertMatch({ok, _}, Res), ?assertMatch({ok, _}, Res),
?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_timeout, Trace)), ?assertMatch([_], ?of_kind("gcp_pubsub_bridge_jwt_timeout", Trace)),
ok ok
end end
), ),
@ -1329,7 +1328,7 @@ t_failed_to_start_jwt_worker(Config) ->
fun(Trace) -> fun(Trace) ->
?assertMatch( ?assertMatch(
[#{reason := {error, restarting}}], [#{reason := {error, restarting}}],
?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace)
), ),
ok ok
end end

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,38 @@
# EMQX HStreamDB Bridge
[HStreamDB](https://hstream.io/) is streaming database purpose-built to ingest,
store, process, and analyze massive data streams. It is a modern data infrastructure
that unifies messaging, stream processing, and storage to help get value out of
your data in real-time.
The application is used to connect EMQX and HStreamDB.
User can create a rule and easily ingest IoT data into HStreamDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into HStreamDB](todo)
for how to use EMQX dashboard to ingest IoT data into HStreamDB.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_hstreamdb, [
{description, "EMQX Enterprise HStreamDB Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,49 @@
# EMQX InfluxDB Bridge
[InfluxDB](https://github.com/influxdata/influxdb) is an open-source time-series
database that is optimized for storing, retrieving, and querying large volumes of
time-stamped data.
It is commonly used for monitoring and analysis of metrics, events, and real-time
analytics.
InfluxDB is designed to be fast, efficient, and scalable, and it has a SQL-like
query language that makes it easy to extract insights from time-series data.
The application is used to connect EMQX and InfluxDB. User can create a rule and
easily ingest IoT data into InfluxDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into InfluxDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-influxdb.html)
for how to use EMQX dashboard to ingest IoT data into InfluxDB.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
- [Create bridge API doc](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges/paths/~1bridges/post)
list required parameters for creating a InfluxDB bridge.
There are two types of InfluxDB API (`v1` and `v2`), please select the right
version of InfluxDB. Below are several important parameters for `v1`,
- `server`: The IPv4 or IPv6 address or the hostname to connect to.
- `database`: InfluxDB database name
- `write_syntax`: Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_influxdb, [
{description, "EMQX Enterprise InfluxDB Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -10,10 +10,21 @@ workers from `emqx_resource`. It implements the connection management
and interaction without need for a separate connector app, since it's and interaction without need for a separate connector app, since it's
not used by authentication and authorization applications. not used by authentication and authorization applications.
## Contributing # Documentation links
For more information on Apache Kafka, please see its [official
site](https://kafka.apache.org/).
# Configurations
Please see [our official
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html)
for more detailed info.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md). Please see our [contributing.md](../../CONTRIBUTING.md).
## License # License
See [BSL](./BSL.txt). EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_kafka, [ {application, emqx_bridge_kafka, [
{description, "EMQX Enterprise Kafka Bridge"}, {description, "EMQX Enterprise Kafka Bridge"},
{vsn, "0.1.0"}, {vsn, "0.1.1"},
{registered, [emqx_bridge_kafka_consumer_sup]}, {registered, [emqx_bridge_kafka_consumer_sup]},
{applications, [ {applications, [
kernel, kernel,

View File

@ -179,7 +179,12 @@ on_get_status(_InstanceID, State) ->
kafka_client_id := ClientID, kafka_client_id := ClientID,
kafka_topics := KafkaTopics kafka_topics := KafkaTopics
} = State, } = State,
do_get_status(State, ClientID, KafkaTopics, SubscriberId). case do_get_status(ClientID, KafkaTopics, SubscriberId) of
{disconnected, Message} ->
{disconnected, State, Message};
Res ->
Res
end.
%%------------------------------------------------------------------------------------- %%-------------------------------------------------------------------------------------
%% `brod_group_subscriber' API %% `brod_group_subscriber' API
@ -376,41 +381,41 @@ stop_client(ClientID) ->
), ),
ok. ok.
do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) -> do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
case brod:get_partitions_count(ClientID, KafkaTopic) of case brod:get_partitions_count(ClientID, KafkaTopic) of
{ok, NPartitions} -> {ok, NPartitions} ->
case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of
connected -> do_get_status(State, ClientID, RestTopics, SubscriberId); connected -> do_get_status(ClientID, RestTopics, SubscriberId);
disconnected -> disconnected disconnected -> disconnected
end; end;
{error, {client_down, Context}} -> {error, {client_down, Context}} ->
case infer_client_error(Context) of case infer_client_error(Context) of
auth_error -> auth_error ->
Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE, Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE,
{disconnected, State, Message}; {disconnected, Message};
{auth_error, Message0} -> {auth_error, Message0} ->
Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE, Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE,
{disconnected, State, Message}; {disconnected, Message};
connection_refused -> connection_refused ->
Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE, Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE,
{disconnected, State, Message}; {disconnected, Message};
_ -> _ ->
{disconnected, State, ?CLIENT_DOWN_MESSAGE} {disconnected, ?CLIENT_DOWN_MESSAGE}
end; end;
{error, leader_not_available} -> {error, leader_not_available} ->
Message = Message =
"Leader connection not available. Please check the Kafka topic used," "Leader connection not available. Please check the Kafka topic used,"
" the connection parameters and Kafka cluster health", " the connection parameters and Kafka cluster health",
{disconnected, State, Message}; {disconnected, Message};
_ -> _ ->
disconnected disconnected
end; end;
do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) -> do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) ->
connected. connected.
-spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> -spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
connected | disconnected. connected | disconnected.
do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) -> do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
Results = Results =
lists:map( lists:map(
fun(N) -> fun(N) ->

View File

@ -1156,11 +1156,12 @@ t_start_and_consume_ok(Config) ->
), ),
%% Check that the bridge probe API doesn't leak atoms. %% Check that the bridge probe API doesn't leak atoms.
ProbeRes = probe_bridge_api(Config), ProbeRes0 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count), AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms. %% Probe again; shouldn't have created more atoms.
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), ProbeRes1 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count), AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter), ?assertEqual(AtomsBefore, AtomsAfter),
@ -1259,11 +1260,12 @@ t_multiple_topic_mappings(Config) ->
{ok, _} = snabbkaffe:receive_events(SRef0), {ok, _} = snabbkaffe:receive_events(SRef0),
%% Check that the bridge probe API doesn't leak atoms. %% Check that the bridge probe API doesn't leak atoms.
ProbeRes = probe_bridge_api(Config), ProbeRes0 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count), AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms. %% Probe again; shouldn't have created more atoms.
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), ProbeRes1 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count), AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter), ?assertEqual(AtomsBefore, AtomsAfter),
@ -1473,7 +1475,10 @@ do_t_receive_after_recovery(Config) ->
ResourceId = resource_id(Config), ResourceId = resource_id(Config),
?check_trace( ?check_trace(
begin begin
{ok, _} = create_bridge(Config), {ok, _} = create_bridge(
Config,
#{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}}
),
ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000), ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000),
{ok, connected} = emqx_resource_manager:health_check(ResourceId), {ok, connected} = emqx_resource_manager:health_check(ResourceId),
%% 0) ensure each partition commits its offset so it can %% 0) ensure each partition commits its offset so it can

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,36 @@
# EMQX MatrixDB Bridge
[MatrixDB](http://matrixdb.univ-lyon1.fr/) is a biological database focused on
molecular interactions between extracellular proteins and polysaccharides.
The application is used to connect EMQX and MatrixDB.
User can create a rule and easily ingest IoT data into MatrixDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into MatrixDB](todo)
for how to use EMQX dashboard to ingest IoT data into MatrixDB.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_matrix, [
{description, "EMQX Enterprise MatrixDB Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,39 @@
# EMQX MongoDB Bridge
[MongoDB](https://github.com/mongodb/mongo) is a source-available cross-platform
document-oriented database. It is a NoSQL database that stores flexible JSON-like
documents for faster iteration and better data organization.
It provides high availability and scaling with its built-in replication and sharding
features, and is used in a variety of industries
The application is used to connect EMQX and MongoDB.
User can create a rule and easily ingest IoT data into MongoDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into MongoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mongodb.html)
for how to use EMQX dashboard to ingest IoT data into MongoDB.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_mongodb, [
{description, "EMQX Enterprise MongoDB Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

Some files were not shown because too many files have changed in this diff Show More