Merge remote-tracking branch 'origin/release-51' into 0529-donot-copy-cluster-conf-from-newer-version

This commit is contained in:
Zaiming (Stone) Shi 2023-06-02 16:30:58 +02:00
commit 1ba07e1040
64 changed files with 1390 additions and 600 deletions

View File

@ -29,3 +29,33 @@ services:
# - "18080:18080"
networks:
- emqx_bridge
iotdb_0_13:
container_name: iotdb013
hostname: iotdb013
image: apache/iotdb:0.13.4-node
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb013
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb013:10710
- dn_rpc_address=iotdb013
- dn_internal_address=iotdb013
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb013:10710
volumes:
- ./iotdb013/iotdb-rest.properties:/iotdb/conf/iotdb-rest.properties
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge

View File

@ -46,6 +46,7 @@ services:
# IOTDB
- 14242:4242
- 28080:18080
- 38080:38080
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -0,0 +1,58 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
####################
### REST Service Configuration
####################
# Is the REST service enabled
enable_rest_service=true
# the binding port of the REST service
# rest_service_port=18080
# the default row limit to a REST query response when the rowSize parameter is not given in request
# rest_query_default_row_size_limit=10000
# the expiration time of the user login information cache (in seconds)
# cache_expire_in_seconds=28800
# maximum number of users can be stored in the user login cache.
# cache_max_num=100
# init capacity of users can be stored in the user login cache.
# cache_init_num=10
# is SSL enabled
# enable_https=false
# SSL key store path
# key_store_path=
# SSL key store password
# key_store_pwd=
# SSL trust store path
# trust_store_path=
# SSL trust store password.
# trust_store_pwd=
# SSL timeout (in seconds)
# idle_timeout_in_seconds=50000

View File

@ -132,6 +132,12 @@
"upstream": "iotdb:18080",
"enabled": true
},
{
"name": "iotdb013",
"listen": "0.0.0.0:38080",
"upstream": "iotdb013:18080",
"enabled": true
},
{
"name": "minio_tcp",
"listen": "0.0.0.0:19000",

View File

@ -46,15 +46,16 @@ jobs:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
tf_emqx_perf_test:
scenario_1on1:
runs-on: ubuntu-latest
needs:
- prepare
env:
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
TF_AWS_REGION: eu-north-1
TF_AWS_REGION: eu-west-1
TF_VAR_test_duration: 1800
TF_VAR_prometheus_remote_write_url: ${{ secrets.TF_EMQX_PERF_TEST_PROMETHEUS_REMOTE_WRITE_URL }}
TF_VAR_prometheus_remote_write_region: eu-west-1
steps:
- name: Configure AWS Credentials
@ -62,12 +63,13 @@ jobs:
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-north-1
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@v3
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.2
- uses: actions/download-artifact@v3
with:
name: emqx-ubuntu20.04
@ -84,9 +86,9 @@ jobs:
TF_VAR_bench_id: "${{ needs.prepare.outputs.BENCH_ID }}/1on1"
TF_VAR_use_emqttb: 1
TF_VAR_use_emqtt_bench: 0
TF_VAR_emqttb_instance_count: 2
TF_VAR_emqttb_instance_type: "c5.large"
TF_VAR_emqttb_scenario: "@pub --topic 't/%n' --pubinterval 10ms --qos 1 --publatency 50ms --size 16 --num-clients 25000 @sub --topic 't/%n' --num-clients 25000"
TF_VAR_emqttb_instance_count: 1
TF_VAR_emqttb_instance_type: "c5.2xlarge"
TF_VAR_emqttb_scenario: "@pubsub_fwd -n 50_000 --pub-qos 1 --sub-qos 1"
TF_VAR_emqx_instance_type: "c5.xlarge"
TF_VAR_emqx_instance_count: 3
run: |
@ -103,10 +105,10 @@ jobs:
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_URL: ${{ secrets.TF_EMQX_PERF_TEST_SLACK_URL }}
with:
payload: |
{"text": "Performance test result for 1on1 scenario (50k pub, 50k sub): ${{ job.status }}\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Pub message rate*: ${{ steps.scenario_1on1.outputs.PUB_MSG_RATE }}\n*Sub message rate*: ${{ steps.scenario_1on1.outputs.SUB_MSG_RATE }}\nDropped messages: ${{ steps.scenario_1on1.outputs.MESSAGES_DROPPED }}"}
{"text": "Performance test result for 1on1 scenario (50k pub, 50k sub): ${{ job.status }}\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Pub message rate*: `${{ steps.scenario_1on1.outputs.PUB_MSG_RATE }}`\n*Sub message rate*: `${{ steps.scenario_1on1.outputs.SUB_MSG_RATE }}`\nDropped messages: `${{ steps.scenario_1on1.outputs.MESSAGES_DROPPED }}`"}
- name: terraform destroy
if: always()
working-directory: ./tf-emqx-performance-test
@ -116,7 +118,85 @@ jobs:
if: success()
with:
name: metrics
path: "./tf-emqx-performance-test/metrics.json"
path: "./tf-emqx-performance-test/*.json"
- uses: actions/upload-artifact@v3
if: failure()
with:
name: terraform
path: |
./tf-emqx-performance-test/.terraform
./tf-emqx-performance-test/*.tfstate
scenario_1m_conns:
runs-on: ubuntu-latest
needs:
- prepare
- scenario_1on1
env:
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
TF_AWS_REGION: eu-west-1
TF_VAR_test_duration: 1800
TF_VAR_prometheus_remote_write_url: ${{ secrets.TF_EMQX_PERF_TEST_PROMETHEUS_REMOTE_WRITE_URL }}
TF_VAR_prometheus_remote_write_region: eu-west-1
steps:
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@v3
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.2
- uses: actions/download-artifact@v3
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- name: 1m conns scenario
id: scenario_1m_conns
working-directory: ./tf-emqx-performance-test
timeout-minutes: 60
env:
TF_VAR_bench_id: "${{ needs.prepare.outputs.BENCH_ID }}/scenario_1m_conns"
TF_VAR_use_emqttb: 1
TF_VAR_use_emqtt_bench: 0
TF_VAR_emqttb_instance_count: 5
TF_VAR_emqttb_instance_type: "c5.xlarge"
TF_VAR_emqttb_scenario: "@conn -N 200_000 --conninterval 1ms"
TF_VAR_emqx_instance_type: "c5.xlarge"
TF_VAR_emqx_instance_count: 5
run: |
terraform init
terraform apply -auto-approve
./wait-emqttb.sh
./fetch-metrics.sh
echo CLIENT_CONNECT=$(cat metrics.json | jq '[.[]."client.connect"] | add') >> $GITHUB_OUTPUT
terraform destroy -auto-approve
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.TF_EMQX_PERF_TEST_SLACK_URL }}
with:
payload: |
{"text": "Performance test result for 1m conns: ${{ job.status }}\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*client.connect*: `${{ steps.scenario_1m_conns.outputs.CLIENT_CONNECT }}`"}
- name: terraform destroy
if: always()
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@v3
if: success()
with:
name: metrics
path: "./tf-emqx-performance-test/*.json"
- uses: actions/upload-artifact@v3
if: failure()
with:

View File

@ -71,10 +71,10 @@ jobs:
./rebar3 xref
./rebar3 dialyzer
./rebar3 eunit -v
./rebar3 ct -v
./rebar3 ct --name 'test@127.0.0.1' -v --readable=true
./rebar3 proper -d test/props
- uses: actions/upload-artifact@v3
if: failure()
with:
name: logs
name: logs-${{ matrix.runs-on }}
path: apps/emqx/_build/test/logs

View File

@ -16,7 +16,7 @@ endif
# Dashbord version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.2.5-1
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.7
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.8-beta.1
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
# In make 4.4+, for backward-compatibility the value from the original environment is used.

View File

@ -32,10 +32,10 @@
%% `apps/emqx/src/bpapi/README.md'
%% Opensource edition
-define(EMQX_RELEASE_CE, "5.1.0-alpha.1").
-define(EMQX_RELEASE_CE, "5.1.0-alpha.2").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.1.0-alpha.1").
-define(EMQX_RELEASE_EE, "5.1.0-alpha.2").
%% The HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -190,23 +190,11 @@ find_raw(KeyPath) ->
-spec get_zone_conf(atom(), emqx_utils_maps:config_key_path()) -> term().
get_zone_conf(Zone, KeyPath) ->
case find(?ZONE_CONF_PATH(Zone, KeyPath)) of
%% not found in zones, try to find the global config
{not_found, _, _} ->
?MODULE:get(KeyPath);
{ok, Value} ->
Value
end.
?MODULE:get(?ZONE_CONF_PATH(Zone, KeyPath)).
-spec get_zone_conf(atom(), emqx_utils_maps:config_key_path(), term()) -> term().
get_zone_conf(Zone, KeyPath, Default) ->
case find(?ZONE_CONF_PATH(Zone, KeyPath)) of
%% not found in zones, try to find the global config
{not_found, _, _} ->
?MODULE:get(KeyPath, Default);
{ok, Value} ->
Value
end.
?MODULE:get(?ZONE_CONF_PATH(Zone, KeyPath), Default).
-spec put_zone_conf(atom(), emqx_utils_maps:config_key_path(), term()) -> ok.
put_zone_conf(Zone, KeyPath, Conf) ->
@ -231,6 +219,9 @@ find_listener_conf(Type, Listener, KeyPath) ->
-spec put(map()) -> ok.
put(Config) ->
put_with_order(Config).
put1(Config) ->
maps:fold(
fun(RootName, RootValue, _) ->
?MODULE:put([atom(RootName)], RootValue)
@ -246,8 +237,8 @@ erase(RootName) ->
-spec put(emqx_utils_maps:config_key_path(), term()) -> ok.
put(KeyPath, Config) ->
Putter = fun(Path, Map, Value) ->
emqx_utils_maps:deep_put(Path, Map, Value)
Putter = fun(_Path, Map, Value) ->
maybe_update_zone(KeyPath, Map, Value)
end,
do_put(?CONF, Putter, KeyPath, Config).
@ -340,7 +331,9 @@ init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConf).
ok = save_to_config_map(CheckedConf, RawConf),
maybe_init_default_zone(),
ok.
%% Merge environment variable overrides on top, then merge with overrides.
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
@ -840,3 +833,130 @@ to_atom_conf_path(Path, OnFail) ->
V
end
end.
%% @doc Init zones under root `zones'
%% 1. ensure one `default' zone as it is referenced by listeners.
%% if default zone is unset, clone all default values from `GlobalDefaults'
%% if default zone is set, values are merged with `GlobalDefaults'
%% 2. For any user defined zones, merge with `GlobalDefaults'
%%
%% note1, this should be called as post action after emqx_config terms (zones, and GlobalDefaults)
%% are written in the PV storage during emqx config loading/initialization.
-spec maybe_init_default_zone() -> skip | ok.
maybe_init_default_zone() ->
case emqx_config:get([zones], ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC ->
skip;
Zones0 when is_map(Zones0) ->
Zones =
case Zones0 of
#{default := _DefaultZone} = Z1 ->
Z1;
Z2 ->
Z2#{default => #{}}
end,
GLD = zone_global_defaults(),
NewZones = maps:map(
fun(_ZoneName, ZoneVal) ->
merge_with_global_defaults(GLD, ZoneVal)
end,
Zones
),
?MODULE:put([zones], NewZones)
end.
-spec merge_with_global_defaults(map(), map()) -> map().
merge_with_global_defaults(GlobalDefaults, ZoneVal) ->
emqx_utils_maps:deep_merge(GlobalDefaults, ZoneVal).
%% @doc Update zones
%% when 1) zone updates, return *new* zones
%% when 2) zone global config updates, write to PT directly.
%% Zone global defaults are always presented in the configmap (PT) when updating zone
-spec maybe_update_zone(runtime_config_key_path(), RootValue :: map(), Val :: term()) ->
NewZoneVal :: map().
maybe_update_zone([zones | T], ZonesValue, Value) ->
%% note, do not write to PT, return *New value* instead
NewZonesValue = emqx_utils_maps:deep_put(T, ZonesValue, Value),
ExistingZoneNames = maps:keys(?MODULE:get([zones], #{})),
%% Update only new zones with global defaults
GLD = zone_global_defaults(),
maps:fold(
fun(ZoneName, ZoneValue, Acc) ->
Acc#{ZoneName := merge_with_global_defaults(GLD, ZoneValue)}
end,
NewZonesValue,
maps:without(ExistingZoneNames, NewZonesValue)
);
maybe_update_zone([RootName | T], RootValue, Value) when is_atom(RootName) ->
NewRootValue = emqx_utils_maps:deep_put(T, RootValue, Value),
case is_zone_root(RootName) of
false ->
skip;
true ->
%% When updates on global default roots.
ExistingZones = ?MODULE:get([zones], #{}),
RootNameBin = atom_to_binary(RootName),
NewZones = maps:map(
fun(ZoneName, ZoneVal) ->
BinPath = [<<"zones">>, atom_to_binary(ZoneName), RootNameBin],
case
%% look for user defined value from RAWCONF
?MODULE:get_raw(
BinPath,
?CONFIG_NOT_FOUND_MAGIC
)
of
?CONFIG_NOT_FOUND_MAGIC ->
ZoneVal#{RootName => NewRootValue};
RawUserZoneRoot ->
UserDefinedValues = rawconf_to_conf(
emqx_schema, BinPath, RawUserZoneRoot
),
ZoneVal#{
RootName :=
emqx_utils_maps:deep_merge(
NewRootValue,
UserDefinedValues
)
}
end
end,
ExistingZones
),
persistent_term:put(?PERSIS_KEY(?CONF, zones), NewZones)
end,
NewRootValue.
zone_global_defaults() ->
maps:from_list([{K, ?MODULE:get([K])} || K <- zone_roots()]).
-spec is_zone_root(atom) -> boolean().
is_zone_root(Name) ->
lists:member(Name, zone_roots()).
-spec zone_roots() -> [atom()].
zone_roots() ->
lists:map(fun list_to_atom/1, emqx_zone_schema:roots()).
%%%
%%% @doc During init, ensure order of puts that zone is put after the other global defaults.
%%%
put_with_order(#{zones := _Zones} = Conf) ->
put1(maps:without([zones], Conf)),
put1(maps:with([zones], Conf));
put_with_order(Conf) ->
put1(Conf).
%%
%% @doc Helper function that converts raw conf val to runtime conf val
%% with the types info from schema module
-spec rawconf_to_conf(module(), RawPath :: [binary()], RawValue :: term()) -> term().
rawconf_to_conf(SchemaModule, RawPath, RawValue) ->
{_, RawUserDefinedValues} =
check_config(
SchemaModule,
emqx_utils_maps:deep_put(RawPath, #{}, RawValue)
),
AtomPath = to_atom_conf_path(RawPath, {raise_error, maybe_update_zone_error}),
emqx_utils_maps:deep_get(AtomPath, RawUserDefinedValues).

View File

@ -57,7 +57,6 @@
]).
-export([pre_config_update/3, post_config_update/5]).
-export([wait_listener_stopped/1]).
-export([format_bind/1]).

View File

@ -41,8 +41,6 @@
exclusive_subscription => boolean()
}.
-define(MAX_TOPIC_LEVELS, 65535).
-define(PUBCAP_KEYS, [
max_topic_levels,
max_qos_allowed,
@ -154,8 +152,5 @@ get_caps(Zone) ->
get_caps(Keys, Zone) ->
maps:with(
Keys,
maps:merge(
emqx_config:get([mqtt]),
emqx_config:get_zone_conf(Zone, [mqtt])
)
emqx_config:get_zone_conf(Zone, [mqtt])
).

View File

@ -93,7 +93,6 @@ end_per_group(_Group, _Config) ->
emqx_common_test_helpers:stop_apps([]).
init_per_suite(Config) ->
emqx_common_test_helpers:clear_screen(),
Config.
end_per_suite(_Config) ->

View File

@ -27,176 +27,6 @@
all() ->
emqx_common_test_helpers:all(?MODULE).
force_gc_conf() ->
#{bytes => 16777216, count => 16000, enable => true}.
force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_mailbox_size => 1000}.
rpc_conf() ->
#{
async_batch_size => 256,
authentication_timeout => 5000,
call_receive_timeout => 15000,
connect_timeout => 5000,
mode => async,
port_discovery => stateless,
send_timeout => 5000,
socket_buffer => 1048576,
socket_keepalive_count => 9,
socket_keepalive_idle => 900,
socket_keepalive_interval => 75,
socket_recbuf => 1048576,
socket_sndbuf => 1048576,
tcp_client_num => 1,
tcp_server_port => 5369
}.
mqtt_conf() ->
#{
await_rel_timeout => 300000,
idle_timeout => 15000,
ignore_loop_deliver => false,
keepalive_backoff => 0.75,
max_awaiting_rel => 100,
max_clientid_len => 65535,
max_inflight => 32,
max_mqueue_len => 1000,
max_packet_size => 1048576,
max_qos_allowed => 2,
max_subscriptions => infinity,
max_topic_alias => 65535,
max_topic_levels => 128,
mqueue_default_priority => lowest,
mqueue_priorities => disabled,
mqueue_store_qos0 => true,
peer_cert_as_clientid => disabled,
peer_cert_as_username => disabled,
response_information => [],
retain_available => true,
retry_interval => 30000,
server_keepalive => disabled,
session_expiry_interval => 7200000,
shared_subscription => true,
strict_mode => false,
upgrade_qos => false,
use_username_as_clientid => false,
wildcard_subscription => true
}.
listener_mqtt_tcp_conf() ->
#{
acceptors => 16,
zone => default,
access_rules => ["allow all"],
bind => {{0, 0, 0, 0}, 1883},
max_connections => 1024000,
mountpoint => <<>>,
proxy_protocol => false,
proxy_protocol_timeout => 3000,
tcp_options => #{
active_n => 100,
backlog => 1024,
buffer => 4096,
high_watermark => 1048576,
nodelay => false,
reuseaddr => true,
send_timeout => 15000,
send_timeout_close => true
}
}.
listener_mqtt_ws_conf() ->
#{
acceptors => 16,
zone => default,
access_rules => ["allow all"],
bind => {{0, 0, 0, 0}, 8083},
max_connections => 1024000,
mountpoint => <<>>,
proxy_protocol => false,
proxy_protocol_timeout => 3000,
tcp_options =>
#{
active_n => 100,
backlog => 1024,
buffer => 4096,
high_watermark => 1048576,
nodelay => false,
reuseaddr => true,
send_timeout => 15000,
send_timeout_close => true
},
websocket =>
#{
allow_origin_absence => true,
check_origin_enable => false,
check_origins => [],
compress => false,
deflate_opts =>
#{
client_max_window_bits => 15,
mem_level => 8,
server_max_window_bits => 15
},
fail_if_no_subprotocol => true,
idle_timeout => 86400000,
max_frame_size => infinity,
mqtt_path => "/mqtt",
mqtt_piggyback => multiple,
% should allow uppercase in config
proxy_address_header => "X-Forwarded-For",
proxy_port_header => "x-forwarded-port",
supported_subprotocols =>
["mqtt", "mqtt-v3", "mqtt-v3.1.1", "mqtt-v5"]
}
}.
listeners_conf() ->
#{
tcp => #{default => listener_mqtt_tcp_conf()},
ws => #{default => listener_mqtt_ws_conf()}
}.
limiter_conf() ->
Make = fun() ->
#{
burst => 0,
rate => infinity
}
end,
lists:foldl(
fun(Name, Acc) ->
Acc#{Name => Make()}
end,
#{},
[bytes, messages, message_routing, connection, internal]
).
stats_conf() ->
#{enable => true}.
zone_conf() ->
#{}.
basic_conf() ->
#{
force_gc => force_gc_conf(),
force_shutdown => force_shutdown_conf(),
mqtt => mqtt_conf(),
rpc => rpc_conf(),
stats => stats_conf(),
listeners => listeners_conf(),
zones => zone_conf(),
limiter => limiter_conf()
}.
set_test_listener_confs() ->
Conf = emqx_config:get([], #{}),
emqx_config:put(basic_conf()),
Conf.
%%--------------------------------------------------------------------
%% CT Callbacks
%%--------------------------------------------------------------------
@ -242,14 +72,11 @@ init_per_testcase(_TestCase, Config) ->
fun(_) -> {ok, #{is_superuser => false}} end
),
ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> allow end),
%% Set confs
OldConf = set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]),
[{config, OldConf} | Config].
Config.
end_per_testcase(_TestCase, Config) ->
meck:unload([emqx_access_control]),
emqx_config:put(?config(config, Config)),
emqx_common_test_helpers:stop_apps([]),
Config.
@ -885,14 +712,24 @@ t_handle_kicked_publish_will_msg(_) ->
Self = self(),
ok = meck:expect(emqx_broker, publish, fun(M) -> Self ! {pub, M} end),
Msg = emqx_message:make(test, <<"will_topic">>, <<"will_payload">>),
ClientId = test,
WillTopic = <<"will_topic">>,
WillPayload = <<"will_payload">>,
Msg = emqx_message:make(ClientId, WillTopic, WillPayload),
{shutdown, kicked, ok, ?DISCONNECT_PACKET(?RC_ADMINISTRATIVE_ACTION), _} = emqx_channel:handle_call(
kick, channel(#{will_msg => Msg})
),
receive
{pub, Msg} -> ok
after 10_000 -> exit(will_message_not_published)
{pub, RecMsg} ->
?assertEqual(ClientId, RecMsg#message.from, #{msg => Msg}),
?assertEqual(WillTopic, RecMsg#message.topic, #{msg => Msg}),
?assertEqual(WillPayload, RecMsg#message.payload, #{msg => Msg}),
ok
after 5_000 ->
ct:pal("expected message: ~p", [Msg]),
ct:pal("~p mailbox: ~p", [?LINE, process_info(self(), messages)]),
exit(will_message_not_published)
end.
t_handle_call_discard(_) ->

View File

@ -383,7 +383,7 @@ t_certcn_as_clientid_tlsv1_2(_) ->
tls_certcn_as_clientid('tlsv1.2').
t_peercert_preserved_before_connected(_) ->
ok = emqx_config:put_zone_conf(default, [mqtt], #{}),
ok = emqx_config:put_zone_conf(default, [mqtt, peer_cert_as_clientid], false),
ok = emqx_hooks:add(
'client.connect',
{?MODULE, on_hook, ['client.connect', self()]},

View File

@ -756,8 +756,8 @@ start_slave(Name, Opts) when is_map(Opts) ->
throw(Other)
end,
pong = net_adm:ping(Node),
setup_node(Node, Opts),
ok = snabbkaffe:forward_trace(Node),
setup_node(Node, Opts),
Node.
%% Node stopping

View File

@ -19,6 +19,7 @@
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() -> emqx_common_test_helpers:all(?MODULE).
@ -156,3 +157,302 @@ t_cluster_hocon_backup(C) when is_list(C) ->
FilesSorted
),
ok.
t_init_load_emqx_schema(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given empty config file
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
%% When load emqx_schema
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
%% Then default zone is injected with all global defaults
Default = emqx_config:get([zones, default]),
MQTT = emqx_config:get([mqtt]),
Stats = emqx_config:get([stats]),
FD = emqx_config:get([flapping_detect]),
FS = emqx_config:get([force_shutdown]),
CC = emqx_config:get([conn_congestion]),
FG = emqx_config:get([force_gc]),
OP = emqx_config:get([overload_protection]),
?assertMatch(
#{
mqtt := MQTT,
stats := Stats,
flapping_detect := FD,
force_shutdown := FS,
conn_congestion := CC,
force_gc := FG,
overload_protection := OP
},
Default
).
t_init_zones_load_emqx_schema_no_default_for_none_existing(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given empty config file
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
%% When emqx_schema is loaded
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
%% Then read for none existing zone should throw error
?assertError(
{config_not_found, [zones, no_exists]},
emqx_config:get([zones, no_exists])
).
t_init_zones_load_other_schema(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given empty config file
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
%% When load emqx_limiter_schema, not emqx_schema
%% Then load should success
?assertEqual(ok, emqx_config:init_load(emqx_limiter_schema)),
%% Then no zones is loaded.
?assertError(
{config_not_found, [zones]},
emqx_config:get([zones])
),
%% Then no default zone is loaded.
?assertError(
{config_not_found, [zones, default]},
emqx_config:get([zones, default])
).
t_init_zones_with_user_defined_default_zone(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given user defined config for default zone
ConfFile = prepare_conf_file(
?FUNCTION_NAME, <<"zones.default.mqtt.max_topic_alias=1024">>, Config
),
application:set_env(emqx, config_files, [ConfFile]),
%% When schema is loaded
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
%% Then user defined value is set
{MqttV, Others} = maps:take(mqtt, emqx_config:get([zones, default])),
{ZGDMQTT, ExpectedOthers} = maps:take(mqtt, zone_global_defaults()),
?assertEqual(ZGDMQTT#{max_topic_alias := 1024}, MqttV),
%% Then others are defaults
?assertEqual(ExpectedOthers, Others).
t_init_zones_with_user_defined_other_zone(Config) ->
emqx_config:erase_all(),
%% Given user defined config for default zone
ConfFile = prepare_conf_file(
?FUNCTION_NAME, <<"zones.myzone.mqtt.max_topic_alias=1024">>, Config
),
application:set_env(emqx, config_files, [ConfFile]),
%% When schema is loaded
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
%% Then user defined value is set and others are defaults
%% Then user defined value is set
{MqttV, Others} = maps:take(mqtt, emqx_config:get([zones, myzone])),
{ZGDMQTT, ExpectedOthers} = maps:take(mqtt, zone_global_defaults()),
?assertEqual(ZGDMQTT#{max_topic_alias := 1024}, MqttV),
%% Then others are defaults
?assertEqual(ExpectedOthers, Others),
%% Then default zone still have the defaults
?assertEqual(zone_global_defaults(), emqx_config:get([zones, default])).
t_init_zones_with_cust_root_mqtt(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given config file with mqtt user overrides
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"mqtt.retry_interval=10m">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
%% When emqx_schema is loaded
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
%% Then the value is reflected as internal representation in default `zone'
%% and other fields under mqtt are defaults.
GDefaultMqtt = maps:get(mqtt, zone_global_defaults()),
?assertEqual(
GDefaultMqtt#{retry_interval := 600000},
emqx_config:get([zones, default, mqtt])
).
t_default_zone_is_updated_after_global_defaults_updated(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given empty emqx conf
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
?assertNotEqual(900000, emqx_config:get([zones, default, mqtt, retry_interval])),
%% When emqx_schema is loaded
emqx_config:put([mqtt, retry_interval], 900000),
%% Then the value is reflected in default `zone' and other fields under mqtt are defaults.
GDefaultMqtt = maps:get(mqtt, zone_global_defaults()),
?assertEqual(
GDefaultMqtt#{retry_interval := 900000},
emqx_config:get([zones, default, mqtt])
).
t_myzone_is_updated_after_global_defaults_updated(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given emqx conf file with user override in myzone (none default zone)
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"zones.myzone.mqtt.max_inflight=32">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
?assertNotEqual(900000, emqx_config:get([zones, myzone, mqtt, retry_interval])),
%% When update another value of global default
emqx_config:put([mqtt, retry_interval], 900000),
%% Then the value is reflected in myzone and the user defined value unchanged.
GDefaultMqtt = maps:get(mqtt, zone_global_defaults()),
?assertEqual(
GDefaultMqtt#{
retry_interval := 900000,
max_inflight := 32
},
emqx_config:get([zones, myzone, mqtt])
),
%% Then the value is reflected in default zone as well.
?assertEqual(
GDefaultMqtt#{retry_interval := 900000},
emqx_config:get([zones, default, mqtt])
).
t_zone_no_user_defined_overrides(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given emqx conf file with user specified myzone
ConfFile = prepare_conf_file(
?FUNCTION_NAME, <<"zones.myzone.mqtt.retry_interval=10m">>, Config
),
application:set_env(emqx, config_files, [ConfFile]),
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
?assertEqual(600000, emqx_config:get([zones, myzone, mqtt, retry_interval])),
%% When there is an update in global default
emqx_config:put([mqtt, max_inflight], 2),
%% Then the value is reflected in both default and myzone
?assertMatch(2, emqx_config:get([zones, default, mqtt, max_inflight])),
?assertMatch(2, emqx_config:get([zones, myzone, mqtt, max_inflight])),
%% Then user defined value from config is not overwritten
?assertMatch(600000, emqx_config:get([zones, myzone, mqtt, retry_interval])).
t_zone_no_user_defined_overrides_internal_represent(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given emqx conf file with user specified myzone
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"zones.myzone.mqtt.max_inflight=1">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
?assertEqual(1, emqx_config:get([zones, myzone, mqtt, max_inflight])),
%% When there is an update in global default
emqx_config:put([mqtt, max_inflight], 2),
%% Then the value is reflected in default `zone' but not user-defined zone
?assertMatch(2, emqx_config:get([zones, default, mqtt, max_inflight])),
?assertMatch(1, emqx_config:get([zones, myzone, mqtt, max_inflight])).
t_update_global_defaults_no_updates_on_user_overrides(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given default zone config in conf file.
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"zones.default.mqtt.max_inflight=1">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
?assertEqual(1, emqx_config:get([zones, default, mqtt, max_inflight])),
%% When there is an update in global default
emqx_config:put([mqtt, max_inflight], 20),
%% Then the value is not reflected in default `zone'
?assertMatch(1, emqx_config:get([zones, default, mqtt, max_inflight])).
t_zone_update_with_new_zone(Config) when is_list(Config) ->
emqx_config:erase_all(),
%% Given loaded an empty conf file
ConfFile = prepare_conf_file(?FUNCTION_NAME, <<"">>, Config),
application:set_env(emqx, config_files, [ConfFile]),
?assertEqual(ok, emqx_config:init_load(emqx_schema)),
%% When there is an update for creating new zone config
ok = emqx_config:put([zones, myzone, mqtt, max_inflight], 2),
%% Then the value is set and other roots are created with defaults.
GDefaultMqtt = maps:get(mqtt, zone_global_defaults()),
?assertEqual(
GDefaultMqtt#{max_inflight := 2},
emqx_config:get([zones, myzone, mqtt])
).
t_init_zone_with_global_defaults(Config) when is_list(Config) ->
%% Given uninitialized empty config
emqx_config:erase_all(),
Zones = #{myzone => #{mqtt => #{max_inflight => 3}}},
%% when put zones with global default with emqx_config:put/1
GlobalDefaults = zone_global_defaults(),
AllConf = maps:put(zones, Zones, GlobalDefaults),
%% Then put sucess
?assertEqual(ok, emqx_config:put(AllConf)),
%% Then GlobalDefaults are set
?assertEqual(GlobalDefaults, maps:with(maps:keys(GlobalDefaults), emqx_config:get([]))),
%% Then my zone and default zone are set
{MqttV, Others} = maps:take(mqtt, emqx_config:get([zones, myzone])),
{ZGDMQTT, ExpectedOthers} = maps:take(mqtt, GlobalDefaults),
?assertEqual(ZGDMQTT#{max_inflight := 3}, MqttV),
%% Then others are defaults
?assertEqual(ExpectedOthers, Others).
%%%
%%% Helpers
%%%
prepare_conf_file(Name, Content, CTConfig) ->
Filename = tc_conf_file(Name, CTConfig),
filelib:ensure_dir(Filename),
ok = file:write_file(Filename, Content),
Filename.
tc_conf_file(TC, Config) ->
DataDir = ?config(data_dir, Config),
filename:join([DataDir, TC, 'emqx.conf']).
zone_global_defaults() ->
#{
conn_congestion =>
#{enable_alarm => true, min_alarm_sustain_duration => 60000},
flapping_detect =>
#{ban_time => 300000, max_count => 15, window_time => disabled},
force_gc =>
#{bytes => 16777216, count => 16000, enable => true},
force_shutdown =>
#{
enable => true,
max_heap_size => 4194304,
max_mailbox_size => 1000
},
mqtt =>
#{
await_rel_timeout => 300000,
exclusive_subscription => false,
idle_timeout => 15000,
ignore_loop_deliver => false,
keepalive_backoff => 0.75,
keepalive_multiplier => 1.5,
max_awaiting_rel => 100,
max_clientid_len => 65535,
max_inflight => 32,
max_mqueue_len => 1000,
max_packet_size => 1048576,
max_qos_allowed => 2,
max_subscriptions => infinity,
max_topic_alias => 65535,
max_topic_levels => 128,
mqueue_default_priority => lowest,
mqueue_priorities => disabled,
mqueue_store_qos0 => true,
peer_cert_as_clientid => disabled,
peer_cert_as_username => disabled,
response_information => [],
retain_available => true,
retry_interval => 30000,
server_keepalive => disabled,
session_expiry_interval => 7200000,
shared_subscription => true,
strict_mode => false,
upgrade_qos => false,
use_username_as_clientid => false,
wildcard_subscription => true
},
overload_protection =>
#{
backoff_delay => 1,
backoff_gc => false,
backoff_hibernation => true,
backoff_new_conn => true,
enable => false
},
stats => #{enable => true}
}.

View File

@ -57,7 +57,6 @@ init_per_suite(Config) ->
ok = meck:expect(emqx_alarm, deactivate, fun(_) -> ok end),
ok = meck:expect(emqx_alarm, deactivate, fun(_, _) -> ok end),
emqx_channel_SUITE:set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]),
Config.

View File

@ -39,7 +39,7 @@ all() -> emqx_common_test_helpers:all(?MODULE).
%%--------------------------------------------------------------------
init_per_suite(Config) ->
emqx_channel_SUITE:set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]),
ok = meck:new(
[emqx_hooks, emqx_metrics, emqx_broker],
[passthrough, no_history, no_link]

View File

@ -21,6 +21,7 @@
-include_lib("emqx/include/emqx.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(SUITE, ?MODULE).
@ -46,12 +47,30 @@
all() -> emqx_common_test_helpers:all(?SUITE).
init_per_suite(Config) ->
net_kernel:start(['master@127.0.0.1', longnames]),
DistPid =
case net_kernel:nodename() of
ignored ->
%% calling `net_kernel:start' without `epmd'
%% running will result in a failure.
emqx_common_test_helpers:start_epmd(),
{ok, Pid} = net_kernel:start(['master@127.0.0.1', longnames]),
ct:pal("start epmd, node name: ~p", [node()]),
Pid;
_ ->
undefined
end,
emqx_common_test_helpers:boot_modules(all),
emqx_common_test_helpers:start_apps([]),
Config.
[{dist_pid, DistPid} | Config].
end_per_suite(_Config) ->
end_per_suite(Config) ->
DistPid = ?config(dist_pid, Config),
case DistPid of
Pid when is_pid(Pid) ->
net_kernel:stop();
_ ->
ok
end,
emqx_common_test_helpers:stop_apps([]).
init_per_testcase(Case, Config) ->

View File

@ -34,7 +34,6 @@ all() -> emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:boot_modules(all),
emqx_channel_SUITE:set_test_listener_confs(),
?check_trace(
?wait_async_action(
emqx_common_test_helpers:start_apps([]),

View File

@ -137,7 +137,6 @@ end_per_testcase(_, Config) ->
Config.
init_per_suite(Config) ->
emqx_channel_SUITE:set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]),
Config.

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authn, [
{description, "EMQX Authentication"},
{vsn, "0.1.20"},
{vsn, "0.1.21"},
{modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -158,12 +158,15 @@ verify_claims(type) ->
verify_claims(desc) ->
?DESC(?FUNCTION_NAME);
verify_claims(default) ->
#{};
[];
verify_claims(validator) ->
[fun do_check_verify_claims/1];
verify_claims(converter) ->
fun(VerifyClaims) ->
[{to_binary(K), V} || {K, V} <- maps:to_list(VerifyClaims)]
fun
(VerifyClaims) when is_map(VerifyClaims) ->
[{to_binary(K), V} || {K, V} <- maps:to_list(VerifyClaims)];
(VerifyClaims) ->
VerifyClaims
end;
verify_claims(required) ->
false;

View File

@ -42,7 +42,10 @@
list/0
]).
-export([send_message/2]).
-export([
send_message/2,
send_message/4
]).
-export([config_key_path/0]).
@ -199,14 +202,17 @@ send_to_matched_egress_bridges(Topic, Msg) ->
send_message(BridgeId, Message) ->
{BridgeType, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId),
ResId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
send_message(BridgeType, BridgeName, ResId, Message).
send_message(BridgeType, BridgeName, ResId, Message) ->
case emqx:get_config([bridges, BridgeType, BridgeName], not_found) of
not_found ->
{error, {bridge_not_found, BridgeId}};
{error, bridge_not_found};
#{enable := true} = Config ->
QueryOpts = query_opts(Config),
emqx_resource:query(ResId, {send_message, Message}, QueryOpts);
#{enable := false} ->
{error, {bridge_stopped, BridgeId}}
{error, bridge_stopped}
end.
query_opts(Config) ->

View File

@ -17,6 +17,8 @@
-behaviour(application).
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([start/2, stop/1]).
-export([
@ -34,6 +36,7 @@ start(_StartType, _StartArgs) ->
ok = emqx_bridge:load_hook(),
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE),
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_bridge),
?tp(emqx_bridge_app_started, #{}),
{ok, Sup}.
stop(_State) ->

View File

@ -112,7 +112,10 @@ validate_name(Name0, Opts) ->
case lists:all(fun is_id_char/1, Name) of
true ->
case maps:get(atom_name, Opts, true) of
true -> list_to_existing_atom(Name);
% NOTE
% Rule may be created before bridge, thus not `list_to_existing_atom/1`,
% also it is infrequent user input anyway.
true -> list_to_atom(Name);
false -> Name0
end;
false ->

View File

@ -32,7 +32,7 @@ init_per_group(TestGroup, BridgeType, Config) ->
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer([positive])),
MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>,
MQTTTopic = <<"mqtt/topic/abc", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
@ -116,6 +116,7 @@ create_bridge(Config, Overrides) ->
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
ct:pal("creating bridge with config: ~p", [BridgeConfig]),
emqx_bridge:create(BridgeType, Name, BridgeConfig).
create_bridge_api(Config) ->
@ -203,7 +204,7 @@ create_rule_and_action_http(BridgeType, RuleTopic, Config) ->
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck) ->
t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ResourceId = resource_id(Config),
?check_trace(
begin
@ -217,11 +218,13 @@ t_sync_query(Config, MakeMessageFun, IsSuccessCheck) ->
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
[]
fun(Trace) ->
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck) ->
t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ResourceId = resource_id(Config),
ReplyFun =
fun(Pid, Result) ->
@ -236,10 +239,21 @@ t_async_query(Config, MakeMessageFun, IsSuccessCheck) ->
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
emqx_resource:query(ResourceId, Message, #{async_reply_fun => {ReplyFun, [self()]}}),
?assertMatch(
{ok, {ok, _}},
?wait_async_action(
emqx_resource:query(ResourceId, Message, #{
async_reply_fun => {ReplyFun, [self()]}
}),
#{?snk_kind := TracePoint, instance_id := ResourceId},
5_000
)
),
ok
end,
[]
fun(Trace) ->
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
receive
{result, Result} -> IsSuccessCheck(Result)
@ -318,7 +332,7 @@ t_start_stop(Config, StopTracePoint) ->
end,
fun(Trace) ->
%% one for each probe, one for real
?assertMatch([_, _, _], ?of_kind(StopTracePoint, Trace)),
?assertMatch([_, _, #{instance_id := ResourceId}], ?of_kind(StopTracePoint, Trace)),
ok
end
),

View File

@ -1062,10 +1062,15 @@ do_econnrefused_or_timeout_test(Config, Error) ->
fun(Trace) ->
case Error of
econnrefused ->
?assertMatch(
[#{reason := Error, connector := ResourceId} | _],
?of_kind(gcp_pubsub_request_failed, Trace)
);
case ?of_kind(gcp_pubsub_request_failed, Trace) of
[#{reason := Error, connector := ResourceId} | _] ->
ok;
[#{reason := {closed, _Msg}, connector := ResourceId} | _] ->
%% _Msg = "The connection was lost."
ok;
Trace0 ->
error({unexpected_trace, Trace0})
end;
timeout ->
?assertMatch(
[_, _ | _],

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge_iotdb, [
{description, "EMQX Enterprise Apache IoTDB Bridge"},
{vsn, "0.1.1"},
{vsn, "0.1.2"},
{modules, [
emqx_bridge_iotdb,
emqx_bridge_iotdb_impl

View File

@ -23,35 +23,35 @@
scheme := http | https,
host := iolist(),
port := inet:port_number(),
path := '_'
path := _
},
connect_timeout := pos_integer(),
pool_type := random | hash,
pool_size := pos_integer(),
request := undefined | map(),
is_aligned := boolean(),
iotdb_version := binary(),
device_id := binary() | undefined,
atom() => '_'
request => undefined | map(),
is_aligned => boolean(),
iotdb_version => binary(),
device_id => binary() | undefined,
atom() => _
}.
-type state() ::
#{
base_path := '_',
base_path := _,
base_url := #{
scheme := http | https,
host := iolist(),
port := inet:port_number(),
path := '_'
path := _
},
connect_timeout := pos_integer(),
pool_type := random | hash,
pool_size := pos_integer(),
request := undefined | map(),
is_aligned := boolean(),
iotdb_version := binary(),
device_id := binary() | undefined,
atom() => '_'
request => undefined | map(),
is_aligned => boolean(),
iotdb_version => binary(),
device_id => binary() | undefined,
atom() => _
}.
-type manager_id() :: binary().
@ -72,7 +72,7 @@ on_start(InstanceId, Config) ->
instance_id => InstanceId,
request => maps:get(request, State, <<>>)
}),
?tp(iotdb_bridge_started, #{}),
?tp(iotdb_bridge_started, #{instance_id => InstanceId}),
{ok, maps:merge(Config, State)};
{error, Reason} ->
?SLOG(error, #{
@ -104,83 +104,108 @@ on_get_status(InstanceId, State) ->
| {ok, pos_integer(), [term()]}
| {error, term()}.
on_query(InstanceId, {send_message, Message}, State) ->
?tp(iotdb_bridge_on_query, #{instance_id => InstanceId}),
?SLOG(debug, #{
msg => "iotdb_bridge_on_query_called",
instance_id => InstanceId,
send_message => Message,
state => emqx_utils:redact(State)
}),
IoTDBPayload = make_iotdb_insert_request(Message, State),
handle_response(
emqx_connector_http:on_query(
InstanceId, {send_message, IoTDBPayload}, State
)
).
case make_iotdb_insert_request(Message, State) of
{ok, IoTDBPayload} ->
handle_response(
emqx_connector_http:on_query(
InstanceId, {send_message, IoTDBPayload}, State
)
);
Error ->
Error
end.
-spec on_query_async(manager_id(), {send_message, map()}, {function(), [term()]}, state()) ->
{ok, pid()}.
{ok, pid()} | {error, empty_request}.
on_query_async(InstanceId, {send_message, Message}, ReplyFunAndArgs0, State) ->
?tp(iotdb_bridge_on_query_async, #{instance_id => InstanceId}),
?SLOG(debug, #{
msg => "iotdb_bridge_on_query_async_called",
instance_id => InstanceId,
send_message => Message,
state => emqx_utils:redact(State)
}),
IoTDBPayload = make_iotdb_insert_request(Message, State),
ReplyFunAndArgs =
{
fun(Result) ->
Response = handle_response(Result),
emqx_resource:apply_reply_fun(ReplyFunAndArgs0, Response)
end,
[]
},
emqx_connector_http:on_query_async(
InstanceId, {send_message, IoTDBPayload}, ReplyFunAndArgs, State
).
case make_iotdb_insert_request(Message, State) of
{ok, IoTDBPayload} ->
ReplyFunAndArgs =
{
fun(Result) ->
Response = handle_response(Result),
emqx_resource:apply_reply_fun(ReplyFunAndArgs0, Response)
end,
[]
},
emqx_connector_http:on_query_async(
InstanceId, {send_message, IoTDBPayload}, ReplyFunAndArgs, State
);
Error ->
Error
end.
%%--------------------------------------------------------------------
%% Internal Functions
%%--------------------------------------------------------------------
make_parsed_payload(PayloadUnparsed) when is_binary(PayloadUnparsed) ->
emqx_utils_json:decode(PayloadUnparsed, [return_maps]);
make_parsed_payload(PayloadUnparsed) when is_list(PayloadUnparsed) ->
lists:map(fun make_parsed_payload/1, PayloadUnparsed);
make_parsed_payload(
#{
measurement := Measurement,
data_type := DataType,
value := Value
} = Data
) ->
Data#{
<<"measurement">> => Measurement,
<<"data_type">> => DataType,
<<"value">> => Value
}.
get_payload(#{payload := Payload}) ->
Payload;
get_payload(#{<<"payload">> := Payload}) ->
Payload.
parse_payload(ParsedPayload) when is_map(ParsedPayload) ->
ParsedPayload;
parse_payload(UnparsedPayload) when is_binary(UnparsedPayload) ->
emqx_utils_json:decode(UnparsedPayload);
parse_payload(UnparsedPayloads) when is_list(UnparsedPayloads) ->
lists:map(fun parse_payload/1, UnparsedPayloads).
preproc_data_list(DataList) ->
lists:foldl(
fun preproc_data/2,
[],
DataList
).
preproc_data(
#{
<<"measurement">> := Measurement,
<<"data_type">> := DataType,
<<"value">> := Value
} = Data
} = Data,
Acc
) ->
#{
timestamp => emqx_plugin_libs_rule:preproc_tmpl(
maps:get(<<"timestamp">>, Data, <<"now">>)
),
measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement),
data_type => DataType,
value => emqx_plugin_libs_rule:preproc_tmpl(Value)
}.
[
#{
timestamp => maybe_preproc_tmpl(
maps:get(<<"timestamp">>, Data, <<"now">>)
),
measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement),
data_type => DataType,
value => maybe_preproc_tmpl(Value)
}
| Acc
];
preproc_data(_NoMatch, Acc) ->
?SLOG(
warning,
#{
msg => "iotdb_bridge_preproc_data_failed",
required_fields => ['measurement', 'data_type', 'value'],
received => _NoMatch
}
),
Acc.
preproc_data_list(DataList) ->
lists:map(
fun preproc_data/1,
DataList
).
maybe_preproc_tmpl(Value) when is_binary(Value) ->
emqx_plugin_libs_rule:preproc_tmpl(Value);
maybe_preproc_tmpl(Value) ->
Value.
proc_data(PreProcessedData, Msg) ->
NowNS = erlang:system_time(nanosecond),
@ -199,9 +224,7 @@ proc_data(PreProcessedData, Msg) ->
}
) ->
#{
timestamp => iot_timestamp(
emqx_plugin_libs_rule:proc_tmpl(TimestampTkn, Msg), Nows
),
timestamp => iot_timestamp(TimestampTkn, Msg, Nows),
measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Msg),
data_type => DataType,
value => proc_value(DataType, ValueTkn, Msg)
@ -210,6 +233,11 @@ proc_data(PreProcessedData, Msg) ->
PreProcessedData
).
iot_timestamp(Timestamp, _, _) when is_integer(Timestamp) ->
Timestamp;
iot_timestamp(TimestampTkn, Msg, Nows) ->
iot_timestamp(emqx_plugin_libs_rule:proc_tmpl(TimestampTkn, Msg), Nows).
iot_timestamp(Timestamp, #{now_ms := NowMs}) when
Timestamp =:= <<"now">>; Timestamp =:= <<"now_ms">>; Timestamp =:= <<>>
->
@ -240,6 +268,7 @@ replace_var(Val, _Data) ->
Val.
convert_bool(B) when is_boolean(B) -> B;
convert_bool(null) -> null;
convert_bool(1) -> true;
convert_bool(0) -> false;
convert_bool(<<"1">>) -> true;
@ -249,8 +278,7 @@ convert_bool(<<"True">>) -> true;
convert_bool(<<"TRUE">>) -> true;
convert_bool(<<"false">>) -> false;
convert_bool(<<"False">>) -> false;
convert_bool(<<"FALSE">>) -> false;
convert_bool(undefined) -> null.
convert_bool(<<"FALSE">>) -> false.
convert_int(Int) when is_integer(Int) -> Int;
convert_int(Float) when is_float(Float) -> floor(Float);
@ -276,24 +304,29 @@ convert_float(Str) when is_binary(Str) ->
convert_float(undefined) ->
null.
make_iotdb_insert_request(MessageUnparsedPayload, State) ->
Message = maps:update_with(payload, fun make_parsed_payload/1, MessageUnparsedPayload),
make_iotdb_insert_request(Message, State) ->
Payloads = to_list(parse_payload(get_payload(Message))),
IsAligned = maps:get(is_aligned, State, false),
DeviceId = device_id(Message, State),
IotDBVsn = maps:get(iotdb_version, State, ?VSN_1_1_X),
Payload = make_list(maps:get(payload, Message)),
PreProcessedData = preproc_data_list(Payload),
DataList = proc_data(PreProcessedData, Message),
InitAcc = #{timestamps => [], measurements => [], dtypes => [], values => []},
Rows = replace_dtypes(aggregate_rows(DataList, InitAcc), IotDBVsn),
maps:merge(Rows, #{
iotdb_field_key(is_aligned, IotDBVsn) => IsAligned,
iotdb_field_key(device_id, IotDBVsn) => DeviceId
}).
case {device_id(Message, Payloads, State), preproc_data_list(Payloads)} of
{undefined, _} ->
{error, device_id_missing};
{_, []} ->
{error, invalid_data};
{DeviceId, PreProcessedData} ->
DataList = proc_data(PreProcessedData, Message),
InitAcc = #{timestamps => [], measurements => [], dtypes => [], values => []},
Rows = replace_dtypes(aggregate_rows(DataList, InitAcc), IotDBVsn),
{ok,
maps:merge(Rows, #{
iotdb_field_key(is_aligned, IotDBVsn) => IsAligned,
iotdb_field_key(device_id, IotDBVsn) => DeviceId
})}
end.
replace_dtypes(Rows, IotDBVsn) ->
{Types, Map} = maps:take(dtypes, Rows),
Map#{iotdb_field_key(data_types, IotDBVsn) => Types}.
replace_dtypes(Rows0, IotDBVsn) ->
{Types, Rows} = maps:take(dtypes, Rows0),
Rows#{iotdb_field_key(data_types, IotDBVsn) => Types}.
aggregate_rows(DataList, InitAcc) ->
lists:foldr(
@ -368,24 +401,14 @@ iotdb_field_key(data_types, ?VSN_1_0_X) ->
iotdb_field_key(data_types, ?VSN_0_13_X) ->
<<"dataTypes">>.
make_list(List) when is_list(List) -> List;
make_list(Data) -> [Data].
to_list(List) when is_list(List) -> List;
to_list(Data) -> [Data].
device_id(Message, State) ->
device_id(Message, Payloads, State) ->
case maps:get(device_id, State, undefined) of
undefined ->
case maps:get(payload, Message) of
#{<<"device_id">> := DeviceId} ->
DeviceId;
#{device_id := DeviceId} ->
DeviceId;
_NotFound ->
Topic = maps:get(topic, Message),
case re:replace(Topic, "/", ".", [global, {return, binary}]) of
<<"root.", _/binary>> = Device -> Device;
Device -> <<"root.", Device/binary>>
end
end;
%% [FIXME] there could be conflicting device-ids in the Payloads
maps:get(<<"device_id">>, hd(Payloads), undefined);
DeviceId ->
DeviceIdTkn = emqx_plugin_libs_rule:preproc_tmpl(DeviceId),
emqx_plugin_libs_rule:proc_tmpl(DeviceIdTkn, Message)

View File

@ -6,8 +6,10 @@
-compile(nowarn_export_all).
-compile(export_all).
-include("emqx_bridge_iotdb.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(BRIDGE_TYPE_BIN, <<"iotdb">>).
-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_iotdb]).
@ -18,13 +20,15 @@
all() ->
[
{group, plain}
{group, plain},
{group, legacy}
].
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
[
{plain, AllTCs}
{plain, AllTCs},
{legacy, AllTCs}
].
init_per_suite(Config) ->
@ -43,7 +47,32 @@ init_per_group(plain = Type, Config0) ->
[
{bridge_host, Host},
{bridge_port, Port},
{proxy_name, ProxyName}
{proxy_name, ProxyName},
{iotdb_version, ?VSN_1_1_X},
{iotdb_rest_prefix, <<"/rest/v2/">>}
| Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_iotdb);
_ ->
{skip, no_iotdb}
end
end;
init_per_group(legacy = Type, Config0) ->
Host = os:getenv("IOTDB_LEGACY_HOST", "toxiproxy.emqx.net"),
Port = list_to_integer(os:getenv("IOTDB_LEGACY_PORT", "38080")),
ProxyName = "iotdb013",
case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of
true ->
Config = emqx_bridge_testlib:init_per_group(Type, ?BRIDGE_TYPE_BIN, Config0),
[
{bridge_host, Host},
{bridge_port, Port},
{proxy_name, ProxyName},
{iotdb_version, ?VSN_0_13_X},
{iotdb_rest_prefix, <<"/rest/v1/">>}
| Config
];
false ->
@ -58,7 +87,8 @@ init_per_group(_Group, Config) ->
Config.
end_per_group(Group, Config) when
Group =:= plain
Group =:= plain;
Group =:= legacy
->
emqx_bridge_testlib:end_per_group(Config),
ok;
@ -67,7 +97,7 @@ end_per_group(_Group, _Config) ->
init_per_testcase(TestCase, Config0) ->
Config = emqx_bridge_testlib:init_per_testcase(TestCase, Config0, fun bridge_config/3),
reset_service(Config),
iotdb_reset(Config),
Config.
end_per_testcase(TestCase, Config) ->
@ -76,20 +106,23 @@ end_per_testcase(TestCase, Config) ->
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
iotdb_server_url(Host, Port) ->
iolist_to_binary([
"http://",
Host,
":",
integer_to_binary(Port)
]).
bridge_config(TestCase, _TestGroup, Config) ->
UniqueNum = integer_to_binary(erlang:unique_integer()),
Host = ?config(bridge_host, Config),
Port = ?config(bridge_port, Config),
Version = ?config(iotdb_version, Config),
Name = <<
(atom_to_binary(TestCase))/binary, UniqueNum/binary
>>,
ServerURL = iolist_to_binary([
"http://",
Host,
":",
integer_to_binary(Port)
]),
ServerURL = iotdb_server_url(Host, Port),
ConfigString =
io_lib:format(
"bridges.iotdb.~s {\n"
@ -99,6 +132,7 @@ bridge_config(TestCase, _TestGroup, Config) ->
" username = \"root\"\n"
" password = \"root\"\n"
" }\n"
"iotdb_version = \"~s\"\n"
" pool_size = 1\n"
" resource_opts = {\n"
" auto_restart_interval = 5000\n"
@ -109,12 +143,54 @@ bridge_config(TestCase, _TestGroup, Config) ->
"}\n",
[
Name,
ServerURL
ServerURL,
Version
]
),
{Name, ConfigString, emqx_bridge_testlib:parse_and_check(Config, ConfigString, Name)}.
reset_service(Config) ->
make_iotdb_payload(DeviceId, Measurement, Type, Value) ->
#{
measurement => s_to_b(Measurement),
data_type => s_to_b(Type),
value => s_to_b(Value),
device_id => DeviceId,
is_aligned => true
}.
make_iotdb_payload(DeviceId, Measurement, Type, Value, Timestamp) ->
Payload = make_iotdb_payload(DeviceId, Measurement, Type, Value),
Payload#{timestamp => Timestamp}.
s_to_b(S) when is_list(S) -> list_to_binary(S);
s_to_b(V) -> V.
make_message_fun(Topic, Payload) ->
fun() ->
MsgId = erlang:unique_integer([positive]),
#{
topic => Topic,
id => MsgId,
payload => emqx_utils_json:encode(Payload),
retain => true
}
end.
iotdb_topic(Config) ->
?config(mqtt_topic, Config).
iotdb_device(Config) ->
Topic = iotdb_topic(Config),
topic_to_iotdb_device(Topic).
topic_to_iotdb_device(Topic) ->
Device = re:replace(Topic, "/", ".", [global, {return, binary}]),
<<"root.", Device/binary>>.
iotdb_request(Config, Path, Body) ->
iotdb_request(Config, Path, Body, #{}).
iotdb_request(Config, Path, Body, Opts) ->
_BridgeConfig =
#{
<<"base_url">> := BaseURL,
@ -125,99 +201,205 @@ reset_service(Config) ->
} =
?config(bridge_config, Config),
ct:pal("bridge config: ~p", [_BridgeConfig]),
Path = <<BaseURL/binary, "/rest/v2/nonQuery">>,
URL = <<BaseURL/binary, Path/binary>>,
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
Headers = [
{"Content-type", "application/json"},
{"Authorization", binary_to_list(BasicToken)}
],
emqx_mgmt_api_test_util:request_api(post, URL, "", Headers, Body, Opts).
iotdb_reset(Config) ->
Device = iotdb_device(Config),
iotdb_reset(Config, Device).
iotdb_reset(Config, Device) ->
Prefix = ?config(iotdb_rest_prefix, Config),
Body = #{sql => <<"delete from ", Device/binary, ".*">>},
{ok, _} = emqx_mgmt_api_test_util:request_api(post, Path, "", Headers, Body, #{}).
{ok, _} = iotdb_request(Config, <<Prefix/binary, "nonQuery">>, Body).
make_iotdb_payload(DeviceId) ->
make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36").
iotdb_query(Config, Query) ->
Prefix = ?config(iotdb_rest_prefix, Config),
Path = <<Prefix/binary, "query">>,
Opts = #{return_all => true},
Body = #{sql => Query},
iotdb_request(Config, Path, Body, Opts).
make_iotdb_payload(DeviceId, Measurement, Type, Value) ->
#{
measurement => Measurement,
data_type => Type,
value => Value,
device_id => DeviceId,
is_aligned => false
}.
is_success_check({ok, 200, _, Body}) ->
?assert(is_code(200, emqx_utils_json:decode(Body))).
make_message_fun(Topic, Payload) ->
fun() ->
MsgId = erlang:unique_integer([positive]),
#{
topic => Topic,
id => MsgId,
payload => Payload,
retain => true
}
is_code(Code, #{<<"code">> := Code}) -> true;
is_code(_, _) -> false.
is_error_check(Reason) ->
fun(Result) ->
?assertEqual({error, Reason}, Result)
end.
iotdb_device(Config) ->
MQTTTopic = ?config(mqtt_topic, Config),
Device = re:replace(MQTTTopic, "/", ".dev", [global, {return, binary}]),
<<"root.", Device/binary>>.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query_simple(Config) ->
DeviceId = iotdb_device(Config),
Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"),
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(ok, element(1, Result))
end,
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck).
Payload = make_iotdb_payload(DeviceId, "temp", "INT32", "36"),
MakeMessageFun = make_message_fun(iotdb_topic(Config), Payload),
ok = emqx_bridge_testlib:t_sync_query(
Config, MakeMessageFun, fun is_success_check/1, iotdb_bridge_on_query
),
Query = <<"select temp from ", DeviceId/binary>>,
{ok, {{_, 200, _}, _, IoTDBResult}} = iotdb_query(Config, Query),
?assertMatch(
#{<<"values">> := [[36]]},
emqx_utils_json:decode(IoTDBResult)
).
t_async_query(Config) ->
DeviceId = iotdb_device(Config),
Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"),
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(ok, element(1, Result))
end,
emqx_bridge_testlib:t_async_query(Config, MakeMessageFun, IsSuccessCheck).
Payload = make_iotdb_payload(DeviceId, "temp", "INT32", "36"),
MakeMessageFun = make_message_fun(iotdb_topic(Config), Payload),
ok = emqx_bridge_testlib:t_async_query(
Config, MakeMessageFun, fun is_success_check/1, iotdb_bridge_on_query_async
),
Query = <<"select temp from ", DeviceId/binary>>,
{ok, {{_, 200, _}, _, IoTDBResult}} = iotdb_query(Config, Query),
?assertMatch(
#{<<"values">> := [[36]]},
emqx_utils_json:decode(IoTDBResult)
).
t_sync_query_aggregated(Config) ->
DeviceId = iotdb_device(Config),
Payload = [
make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"),
(make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "37"))#{timestamp => <<"mow_us">>},
(make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "38"))#{timestamp => <<"mow_ns">>},
make_iotdb_payload(DeviceId, "charged", <<"BOOLEAN">>, "1"),
make_iotdb_payload(DeviceId, "stoked", <<"BOOLEAN">>, "true"),
make_iotdb_payload(DeviceId, "enriched", <<"BOOLEAN">>, <<"TRUE">>),
make_iotdb_payload(DeviceId, "drained", <<"BOOLEAN">>, "0"),
make_iotdb_payload(DeviceId, "dazzled", <<"BOOLEAN">>, "false"),
make_iotdb_payload(DeviceId, "unplugged", <<"BOOLEAN">>, <<"FALSE">>),
make_iotdb_payload(DeviceId, "weight", <<"FLOAT">>, "87.3"),
make_iotdb_payload(DeviceId, "foo", <<"TEXT">>, <<"bar">>)
make_iotdb_payload(DeviceId, "temp", "INT32", "36", 1685112026290),
make_iotdb_payload(DeviceId, "temp", "INT32", 37, 1685112026291),
make_iotdb_payload(DeviceId, "temp", "INT32", 38.7, 1685112026292),
make_iotdb_payload(DeviceId, "temp", "INT32", "39", <<"1685112026293">>),
make_iotdb_payload(DeviceId, "temp", "INT64", "36", 1685112026294),
make_iotdb_payload(DeviceId, "temp", "INT64", 36, 1685112026295),
make_iotdb_payload(DeviceId, "temp", "INT64", 36.7, 1685112026296),
%% implicit 'now()' timestamp
make_iotdb_payload(DeviceId, "temp", "INT32", "40"),
%% [FIXME] neither nanoseconds nor microseconds don't seem to be supported by IoTDB
(make_iotdb_payload(DeviceId, "temp", "INT32", "41"))#{timestamp => <<"now_us">>},
(make_iotdb_payload(DeviceId, "temp", "INT32", "42"))#{timestamp => <<"now_ns">>},
make_iotdb_payload(DeviceId, "weight", "FLOAT", "87.3", 1685112026290),
make_iotdb_payload(DeviceId, "weight", "FLOAT", 87.3, 1685112026291),
make_iotdb_payload(DeviceId, "weight", "FLOAT", 87, 1685112026292),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", "87.3", 1685112026293),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", 87.3, 1685112026294),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", 87, 1685112026295),
make_iotdb_payload(DeviceId, "charged", "BOOLEAN", "1", 1685112026300),
make_iotdb_payload(DeviceId, "floated", "BOOLEAN", 1, 1685112026300),
make_iotdb_payload(DeviceId, "started", "BOOLEAN", true, 1685112026300),
make_iotdb_payload(DeviceId, "stoked", "BOOLEAN", "true", 1685112026300),
make_iotdb_payload(DeviceId, "enriched", "BOOLEAN", "TRUE", 1685112026300),
make_iotdb_payload(DeviceId, "gutted", "BOOLEAN", "True", 1685112026300),
make_iotdb_payload(DeviceId, "drained", "BOOLEAN", "0", 1685112026300),
make_iotdb_payload(DeviceId, "toasted", "BOOLEAN", 0, 1685112026300),
make_iotdb_payload(DeviceId, "uncharted", "BOOLEAN", false, 1685112026300),
make_iotdb_payload(DeviceId, "dazzled", "BOOLEAN", "false", 1685112026300),
make_iotdb_payload(DeviceId, "unplugged", "BOOLEAN", "FALSE", 1685112026300),
make_iotdb_payload(DeviceId, "unraveled", "BOOLEAN", "False", 1685112026300),
make_iotdb_payload(DeviceId, "undecided", "BOOLEAN", null, 1685112026300),
make_iotdb_payload(DeviceId, "foo", "TEXT", "bar", 1685112026300)
],
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(ok, element(1, Result))
end,
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck).
MakeMessageFun = make_message_fun(iotdb_topic(Config), Payload),
ok = emqx_bridge_testlib:t_sync_query(
Config, MakeMessageFun, fun is_success_check/1, iotdb_bridge_on_query
),
%% check temp
QueryTemp = <<"select temp from ", DeviceId/binary>>,
{ok, {{_, 200, _}, _, ResultTemp}} = iotdb_query(Config, QueryTemp),
?assertMatch(
#{<<"values">> := [[36, 37, 38, 39, 36, 36, 36, 40, 41, 42]]},
emqx_utils_json:decode(ResultTemp)
),
%% check weight
QueryWeight = <<"select weight from ", DeviceId/binary>>,
{ok, {{_, 200, _}, _, ResultWeight}} = iotdb_query(Config, QueryWeight),
?assertMatch(
#{<<"values">> := [[87.3, 87.3, 87.0, 87.3, 87.3, 87.0]]},
emqx_utils_json:decode(ResultWeight)
),
%% check rest ts = 1685112026300
QueryRest = <<"select * from ", DeviceId/binary, " where time = 1685112026300">>,
{ok, {{_, 200, _}, _, ResultRest}} = iotdb_query(Config, QueryRest),
#{<<"values">> := Values, <<"expressions">> := Expressions} = emqx_utils_json:decode(
ResultRest
),
Results = maps:from_list(lists:zipwith(fun(K, [V]) -> {K, V} end, Expressions, Values)),
Exp = #{
exp(DeviceId, "charged") => true,
exp(DeviceId, "floated") => true,
exp(DeviceId, "started") => true,
exp(DeviceId, "stoked") => true,
exp(DeviceId, "enriched") => true,
exp(DeviceId, "gutted") => true,
exp(DeviceId, "drained") => false,
exp(DeviceId, "toasted") => false,
exp(DeviceId, "uncharted") => false,
exp(DeviceId, "dazzled") => false,
exp(DeviceId, "unplugged") => false,
exp(DeviceId, "unraveled") => false,
exp(DeviceId, "undecided") => null,
exp(DeviceId, "foo") => <<"bar">>,
exp(DeviceId, "temp") => null,
exp(DeviceId, "weight") => null
},
?assertEqual(Exp, Results),
ok.
exp(Dev, M0) ->
M = s_to_b(M0),
<<Dev/binary, ".", M/binary>>.
t_sync_query_fail(Config) ->
DeviceId = iotdb_device(Config),
Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "Anton"),
MakeMessageFun = make_message_fun(DeviceId, Payload),
Payload = make_iotdb_payload(DeviceId, "temp", "INT32", "Anton"),
MakeMessageFun = make_message_fun(iotdb_topic(Config), Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(error, element(1, Result))
end,
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck).
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck, iotdb_bridge_on_query).
t_sync_device_id_missing(Config) ->
emqx_bridge_testlib:t_sync_query(
Config,
make_message_fun(iotdb_topic(Config), #{foo => bar}),
is_error_check(device_id_missing),
iotdb_bridge_on_query
).
t_sync_invalid_data(Config) ->
emqx_bridge_testlib:t_sync_query(
Config,
make_message_fun(iotdb_topic(Config), #{foo => bar, device_id => <<"root.sg27">>}),
is_error_check(invalid_data),
iotdb_bridge_on_query
).
t_async_device_id_missing(Config) ->
emqx_bridge_testlib:t_async_query(
Config,
make_message_fun(iotdb_topic(Config), #{foo => bar}),
is_error_check(device_id_missing),
iotdb_bridge_on_query_async
).
t_async_invalid_data(Config) ->
emqx_bridge_testlib:t_async_query(
Config,
make_message_fun(iotdb_topic(Config), #{foo => bar, device_id => <<"root.sg27">>}),
is_error_check(invalid_data),
iotdb_bridge_on_query_async
).
t_create_via_http(Config) ->
emqx_bridge_testlib:t_create_via_http(Config).
@ -227,3 +409,57 @@ t_start_stop(Config) ->
t_on_get_status(Config) ->
emqx_bridge_testlib:t_on_get_status(Config).
t_device_id(Config) ->
ResourceId = emqx_bridge_testlib:resource_id(Config),
%% Create without device_id configured
?assertMatch({ok, _}, emqx_bridge_testlib:create_bridge(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ConfiguredDevice = <<"root.someOtherDevice234">>,
DeviceId = <<"root.deviceFooBar123">>,
Topic = <<"some/random/topic">>,
iotdb_reset(Config, DeviceId),
iotdb_reset(Config, ConfiguredDevice),
Payload1 = make_iotdb_payload(DeviceId, "test", "BOOLEAN", true),
MessageF1 = make_message_fun(Topic, Payload1),
is_success_check(
emqx_resource:simple_sync_query(ResourceId, {send_message, MessageF1()})
),
{ok, {{_, 200, _}, _, Res1_1}} = iotdb_query(Config, <<"select * from ", DeviceId/binary>>),
ct:pal("device_id result: ~p", [emqx_utils_json:decode(Res1_1)]),
#{<<"values">> := Values1_1} = emqx_utils_json:decode(Res1_1),
?assertNot(is_empty(Values1_1)),
iotdb_reset(Config, DeviceId),
iotdb_reset(Config, ConfiguredDevice),
%% reconfigure bridge with device_id
{ok, _} =
emqx_bridge_testlib:update_bridge_api(Config, #{<<"device_id">> => ConfiguredDevice}),
is_success_check(
emqx_resource:simple_sync_query(ResourceId, {send_message, MessageF1()})
),
%% even though we had a device_id in the message it's not being used
{ok, {{_, 200, _}, _, Res2_1}} = iotdb_query(Config, <<"select * from ", DeviceId/binary>>),
#{<<"values">> := Values2_1} = emqx_utils_json:decode(Res2_1),
?assert(is_empty(Values2_1)),
{ok, {{_, 200, _}, _, Res2_2}} = iotdb_query(
Config, <<"select * from ", ConfiguredDevice/binary>>
),
#{<<"values">> := Values2_2} = emqx_utils_json:decode(Res2_2),
?assertNot(is_empty(Values2_2)),
iotdb_reset(Config, DeviceId),
iotdb_reset(Config, ConfiguredDevice),
ok.
is_empty(null) -> true;
is_empty([]) -> true;
is_empty([[]]) -> true;
is_empty(_) -> false.

View File

@ -2078,7 +2078,7 @@ t_resource_manager_crash_after_subscriber_started(Config) ->
10_000
),
case Res of
{error, {config_update_crashed, {killed, _}}} ->
{error, {config_update_crashed, _}} ->
ok;
{ok, _} ->
%% the new manager may have had time to startup
@ -2135,7 +2135,7 @@ t_resource_manager_crash_before_subscriber_started(Config) ->
10_000
),
case Res of
{error, {config_update_crashed, {killed, _}}} ->
{error, {config_update_crashed, _}} ->
ok;
{ok, _} ->
%% the new manager may have had time to startup

View File

@ -83,6 +83,7 @@ start_ingress(ResourceId, Ingress, ClientOpts) ->
{ingress, Ingress},
{client_opts, ClientOpts}
],
ok = emqx_resource:allocate_resource(ResourceId, ingress_pool_name, PoolName),
case emqx_resource_pool:start(PoolName, emqx_bridge_mqtt_ingress, Options) of
ok ->
{ok, #{ingress_pool_name => PoolName}};
@ -132,6 +133,7 @@ start_egress(ResourceId, Egress, ClientOpts) ->
{pool_size, PoolSize},
{client_opts, ClientOpts}
],
ok = emqx_resource:allocate_resource(ResourceId, egress_pool_name, PoolName),
case emqx_resource_pool:start(PoolName, emqx_bridge_mqtt_egress, Options) of
ok ->
{ok, #{
@ -142,13 +144,14 @@ start_egress(ResourceId, Egress, ClientOpts) ->
{error, Reason}
end.
on_stop(ResourceId, State) ->
on_stop(ResourceId, _State) ->
?SLOG(info, #{
msg => "stopping_mqtt_connector",
connector => ResourceId
}),
ok = stop_ingress(State),
ok = stop_egress(State).
Allocated = emqx_resource:get_allocated_resources(ResourceId),
ok = stop_ingress(Allocated),
ok = stop_egress(Allocated).
stop_ingress(#{ingress_pool_name := PoolName}) ->
emqx_resource_pool:stop(PoolName);

View File

@ -203,8 +203,9 @@ oracle_config(TestCase, _ConnectionType, Config) ->
" pool_size = 1\n"
" sql = \"~s\"\n"
" resource_opts = {\n"
" auto_restart_interval = 5000\n"
" request_timeout = 30000\n"
" auto_restart_interval = \"5s\"\n"
" health_check_interval = \"5s\"\n"
" request_timeout = \"30s\"\n"
" query_mode = \"async\"\n"
" enable_batch = true\n"
" batch_size = 3\n"
@ -233,6 +234,11 @@ resource_id(Config) ->
Name = ?config(oracle_name, Config),
emqx_bridge_resource:resource_id(Type, Name).
bridge_id(Config) ->
Type = ?BRIDGE_TYPE_BIN,
Name = ?config(oracle_name, Config),
emqx_bridge_resource:bridge_id(Type, Name).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
@ -361,6 +367,7 @@ t_batch_sync_query(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
BridgeId = bridge_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
@ -380,12 +387,12 @@ t_batch_sync_query(Config) ->
% Send 3 async messages while resource is down. When it comes back, these messages
% will be delivered in sync way. If we try to send sync messages directly, it will
% be sent async as callback_mode is set to async_if_possible.
Message = {send_message, Params},
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(1000),
emqx_resource:query(ResourceId, Message),
emqx_resource:query(ResourceId, Message),
emqx_resource:query(ResourceId, Message)
emqx_bridge:send_message(BridgeId, Params),
emqx_bridge:send_message(BridgeId, Params),
emqx_bridge:send_message(BridgeId, Params),
ok
end),
?retry(
_Sleep = 1_000,

View File

@ -1064,9 +1064,17 @@ t_cluster(Config) ->
?check_trace(
begin
Nodes = [N1, N2 | _] = start_cluster(Cluster),
%% wait until bridge app supervisor is up; by that point,
%% `emqx_config_handler:add_handler' has been called and the node should be
%% ready to create bridges.
NumNodes = length(Nodes),
{ok, _} = snabbkaffe:block_until(
?match_n_events(NumNodes, #{?snk_kind := emqx_bridge_app_started}),
15_000
),
{ok, SRef0} = snabbkaffe:subscribe(
?match_event(#{?snk_kind := pulsar_producer_bridge_started}),
length(Nodes),
NumNodes,
15_000
),
{ok, _} = erpc:call(N1, fun() -> create_bridge(Config) end),

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_tdengine, [
{description, "EMQX Enterprise TDEngine Bridge"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [kernel, stdlib, tdengine]},
{env, []},

View File

@ -108,6 +108,7 @@ on_start(
Prepares = parse_prepare_sql(Config),
State = Prepares#{pool_name => InstanceId, query_opts => query_opts(Config)},
ok = emqx_resource:allocate_resource(InstanceId, pool_name, InstanceId),
case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of
ok ->
{ok, State};
@ -115,12 +116,17 @@ on_start(
Error
end.
on_stop(InstanceId, #{pool_name := PoolName}) ->
on_stop(InstanceId, _State) ->
?SLOG(info, #{
msg => "stopping_tdengine_connector",
connector => InstanceId
}),
emqx_resource_pool:stop(PoolName).
case emqx_resource:get_allocated_resources(InstanceId) of
#{pool_name := PoolName} ->
emqx_resource_pool:stop(PoolName);
_ ->
ok
end.
on_query(InstanceId, {query, SQL}, State) ->
do_query(InstanceId, SQL, State);

View File

@ -219,18 +219,24 @@ on_start(
base_path => BasePath,
request => preprocess_request(maps:get(request, Config, undefined))
},
ok = emqx_resource:allocate_resource(InstId, pool_name, InstId),
case ehttpc_sup:start_pool(InstId, PoolOpts) of
{ok, _} -> {ok, State};
{error, {already_started, _}} -> {ok, State};
{error, Reason} -> {error, Reason}
end.
on_stop(InstId, #{pool_name := PoolName}) ->
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping_http_connector",
connector => InstId
}),
ehttpc_sup:stop_pool(PoolName).
case emqx_resource:get_allocated_resources(InstId) of
#{pool_name := PoolName} ->
ehttpc_sup:stop_pool(PoolName);
_ ->
ok
end.
on_query(InstId, {send_message, Msg}, State) ->
case maps:get(request, State, undefined) of

View File

@ -97,17 +97,23 @@ on_start(
{pool_size, PoolSize},
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL}
],
ok = emqx_resource:allocate_resource(InstId, pool_name, InstId),
case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ SslOpts) of
ok -> {ok, #{pool_name => InstId}};
{error, Reason} -> {error, Reason}
end.
on_stop(InstId, #{pool_name := PoolName}) ->
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping_ldap_connector",
connector => InstId
}),
emqx_resource_pool:stop(PoolName).
case emqx_resource:get_allocated_resources(InstId) of
#{pool_name := PoolName} ->
emqx_resource_pool:stop(PoolName);
_ ->
ok
end.
on_query(InstId, {search, Base, Filter, Attributes}, #{pool_name := PoolName} = State) ->
Request = {Base, Filter, Attributes},

View File

@ -183,6 +183,7 @@ on_start(
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
],
Collection = maps:get(collection, Config, <<"mqtt">>),
ok = emqx_resource:allocate_resource(InstId, pool_name, InstId),
case emqx_resource_pool:start(InstId, ?MODULE, Opts) of
ok ->
{ok, #{
@ -194,12 +195,17 @@ on_start(
{error, Reason}
end.
on_stop(InstId, #{pool_name := PoolName}) ->
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping_mongodb_connector",
connector => InstId
}),
emqx_resource_pool:stop(PoolName).
case emqx_resource:get_allocated_resources(InstId) of
#{pool_name := PoolName} ->
emqx_resource_pool:stop(PoolName);
_ ->
ok
end.
on_query(
InstId,

View File

@ -124,6 +124,7 @@ on_start(
]
),
State = parse_prepare_sql(Config),
ok = emqx_resource:allocate_resource(InstId, pool_name, InstId),
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
ok ->
{ok, init_prepare(State#{pool_name => InstId})};
@ -140,12 +141,17 @@ maybe_add_password_opt(undefined, Options) ->
maybe_add_password_opt(Password, Options) ->
[{password, Password} | Options].
on_stop(InstId, #{pool_name := PoolName}) ->
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping_mysql_connector",
connector => InstId
}),
emqx_resource_pool:stop(PoolName).
case emqx_resource:get_allocated_resources(InstId) of
#{pool_name := PoolName} ->
emqx_resource_pool:stop(PoolName);
_ ->
ok
end.
on_query(InstId, {TypeOrKey, SQLOrKey}, State) ->
on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State);

View File

@ -121,6 +121,7 @@ on_start(
{pool_size, PoolSize}
],
State = parse_prepare_sql(Config),
ok = emqx_resource:allocate_resource(InstId, pool_name, InstId),
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
ok ->
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
@ -132,12 +133,17 @@ on_start(
{error, Reason}
end.
on_stop(InstId, #{pool_name := PoolName}) ->
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping postgresql connector",
connector => InstId
}),
emqx_resource_pool:stop(PoolName).
case emqx_resource:get_allocated_resources(InstId) of
#{pool_name := PoolName} ->
emqx_resource_pool:stop(PoolName);
_ ->
ok
end.
on_query(InstId, {TypeOrKey, NameOrSQL}, State) ->
on_query(InstId, {TypeOrKey, NameOrSQL, []}, State);

View File

@ -160,6 +160,8 @@ on_start(
[{ssl, false}]
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
State = #{pool_name => InstId, type => Type},
ok = emqx_resource:allocate_resource(InstId, type, Type),
ok = emqx_resource:allocate_resource(InstId, pool_name, InstId),
case Type of
cluster ->
case eredis_cluster:start_pool(InstId, Opts ++ [{options, Options}]) of
@ -177,14 +179,18 @@ on_start(
end
end.
on_stop(InstId, #{pool_name := PoolName, type := Type}) ->
on_stop(InstId, _State) ->
?SLOG(info, #{
msg => "stopping_redis_connector",
connector => InstId
}),
case Type of
cluster -> eredis_cluster:stop_pool(PoolName);
_ -> emqx_resource_pool:stop(PoolName)
case emqx_resource:get_allocated_resources(InstId) of
#{pool_name := PoolName, type := cluster} ->
eredis_cluster:stop_pool(PoolName);
#{pool_name := PoolName, type := _} ->
emqx_resource_pool:stop(PoolName);
_ ->
ok
end.
on_query(InstId, {cmd, _} = Query, State) ->

View File

@ -25,7 +25,8 @@ wrap_auth_headers_test_() ->
meck:expect(ehttpc_sup, start_pool, 2, {ok, foo}),
meck:expect(ehttpc, request, fun(_, _, Req, _, _) -> {ok, 200, Req} end),
meck:expect(ehttpc_pool, pick_worker, 1, self()),
[ehttpc_sup, ehttpc, ehttpc_pool]
meck:expect(emqx_resource, allocate_resource, 3, ok),
[ehttpc_sup, ehttpc, ehttpc_pool, emqx_resource]
end,
fun meck:unload/1, fun(_) ->
Config = #{

View File

@ -59,6 +59,11 @@ start_listeners(Listeners) ->
scheme => basic,
description =>
<<"Authorize with [API Keys](https://www.emqx.io/docs/en/v5.0/admin/api.html#api-keys)">>
},
'bearerAuth' => #{
type => http,
scheme => bearer,
description => <<"Authorize with Bearer Token">>
}
}
}
@ -111,7 +116,6 @@ stop_listeners(Listeners) ->
begin
case minirest:stop(Name) of
ok ->
_ = emqx_listeners:wait_listener_stopped(Bind),
?ULOG("Stop listener ~ts on ~ts successfully.~n", [
Name, emqx_listeners:format_bind(Bind)
]);

View File

@ -536,6 +536,7 @@ init_prop(Keys, Init, Type) ->
format_prop(deprecated, Value) when is_boolean(Value) -> Value;
format_prop(deprecated, _) -> true;
format_prop(default, []) -> [];
format_prop(_, Schema) -> to_bin(Schema).
trans_required(Spec, true, _) -> Spec#{required => true};
@ -567,18 +568,7 @@ trans_description(Spec, Hocon, Options) ->
Spec;
Desc ->
Desc1 = binary:replace(Desc, [<<"\n">>], <<"<br/>">>, [global]),
maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon, Options)
end.
maybe_add_summary_from_label(Spec, Hocon, Options) ->
Label =
case desc_struct(Hocon) of
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined, Options);
_ -> undefined
end,
case Label of
undefined -> Spec;
_ -> Spec#{summary => Label}
Spec#{description => Desc1}
end.
get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) ->
@ -970,13 +960,13 @@ to_bin(List) when is_list(List) ->
to_bin(Boolean) when is_boolean(Boolean) -> Boolean;
to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8);
to_bin({Type, Args}) ->
unicode:characters_to_binary(io_lib:format("~ts(~p)", [Type, Args]));
unicode:characters_to_binary(io_lib:format("~ts-~p", [Type, Args]));
to_bin(X) ->
X.
parse_object(PropList = [_ | _], Module, Options) when is_list(PropList) ->
{Props, Required, Refs} = parse_object_loop(PropList, Module, Options),
Object = #{<<"type">> => object, <<"properties">> => Props},
Object = #{<<"type">> => object, <<"properties">> => fix_empty_props(Props)},
case Required of
[] -> {Object, Refs};
_ -> {maps:put(required, Required, Object), Refs}
@ -1012,7 +1002,10 @@ parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs
HoconType = hocon_schema:field_schema(Hocon, type),
Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon),
SchemaToSpec = schema_converter(Options),
Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options),
Init = maps:remove(
summary,
trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options)
),
{Prop, Refs1} = SchemaToSpec(HoconType, Module),
NewRequiredAcc =
case is_required(Hocon) of
@ -1039,9 +1032,15 @@ parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs
%% return true if the field has 'importance' set to 'hidden'
is_hidden(Hocon) ->
hocon_schema:is_hidden(Hocon, #{include_importance_up_from => ?IMPORTANCE_LOW}).
is_required(Hocon) ->
hocon_schema:field_schema(Hocon, required) =:= true.
fix_empty_props([]) ->
#{};
fix_empty_props(Props) ->
Props.
content(ApiSpec) ->
content(ApiSpec, undefined).

View File

@ -20,7 +20,6 @@
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include("emqx_dashboard.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(NAME, 'https:dashboard').

View File

@ -25,7 +25,6 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:load_config(emqx_dashboard_schema, <<"dashboard {}">>),
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
ok = change_i18n_lang(en),
Config.

View File

@ -30,24 +30,11 @@
all() -> emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
mria:start(),
application:load(emqx_dashboard),
emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1),
_ = emqx_mgmt_api_test_util:init_suite([emqx_conf]),
Config.
set_special_configs(emqx_dashboard) ->
emqx_dashboard_api_test_helpers:set_default_config(),
ok;
set_special_configs(_) ->
ok.
end_per_suite(Config) ->
end_suite(),
Config.
end_suite() ->
application:unload(emqx_management),
emqx_common_test_helpers:stop_apps([emqx_dashboard]).
emqx_mgmt_api_test_util:end_suite([emqx_conf]).
t_object(_Config) ->
Spec = #{

View File

@ -55,7 +55,7 @@ t_start_no_session(_Config) ->
Opts = #{
clientinfo => #{
clientid => ?CLIENT_ID,
zone => internal
zone => default
},
conninfo => #{
clientid => ?CLIENT_ID,
@ -76,7 +76,7 @@ t_start_no_expire(_Config) ->
Opts = #{
clientinfo => #{
clientid => ?CLIENT_ID,
zone => internal
zone => default
},
conninfo => #{
clientid => ?CLIENT_ID,
@ -97,7 +97,7 @@ t_start_infinite_expire(_Config) ->
Opts = #{
clientinfo => #{
clientid => ?CLIENT_ID,
zone => internal
zone => default
},
conninfo => #{
clientid => ?CLIENT_ID,

View File

@ -369,6 +369,7 @@ schema("/clients/:clientid/keepalive") ->
put => #{
description => ?DESC(set_keepalive_seconds),
tags => ?TAGS,
hidden => true,
parameters => [{clientid, hoconsc:mk(binary(), #{in => path})}],
'requestBody' => hoconsc:mk(hoconsc:ref(?MODULE, keepalive)),
responses => #{

View File

@ -344,9 +344,7 @@ fields(bytes) ->
description => ?DESC(max_response_bytes),
in => query,
required => false,
default => 1000,
minimum => 0,
maximum => ?MAX_SINT32
default => 1000
}
)}
];

View File

@ -137,9 +137,14 @@ t_global_zone(_Config) ->
),
?assertEqual(lists:usort(ZonesKeys), lists:usort(maps:keys(Zones))),
?assertEqual(
emqx_config:get_zone_conf(no_default, [mqtt, max_qos_allowed]),
emqx_config:get_zone_conf(default, [mqtt, max_qos_allowed]),
emqx_utils_maps:deep_get([<<"mqtt">>, <<"max_qos_allowed">>], Zones)
),
?assertError(
{config_not_found, [zones, no_default, mqtt, max_qos_allowed]},
emqx_config:get_zone_conf(no_default, [mqtt, max_qos_allowed])
),
NewZones1 = emqx_utils_maps:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 1),
NewZones2 = emqx_utils_maps:deep_remove([<<"mqtt">>, <<"peer_cert_as_clientid">>], NewZones1),
{ok, #{<<"mqtt">> := Res}} = update_global_zone(NewZones2),
@ -151,7 +156,11 @@ t_global_zone(_Config) ->
},
Res
),
?assertEqual(1, emqx_config:get_zone_conf(no_default, [mqtt, max_qos_allowed])),
?assertEqual(1, emqx_config:get_zone_conf(default, [mqtt, max_qos_allowed])),
?assertError(
{config_not_found, [zones, no_default, mqtt, max_qos_allowed]},
emqx_config:get_zone_conf(no_default, [mqtt, max_qos_allowed])
),
%% Make sure the override config is updated, and remove the default value.
?assertMatch(#{<<"max_qos_allowed">> := 1}, read_conf(<<"mqtt">>)),

View File

@ -30,7 +30,6 @@ init_suite(Apps, SetConfigs) when is_function(SetConfigs) ->
init_suite(Apps, SetConfigs, #{}).
init_suite(Apps, SetConfigs, Opts) ->
mria:start(),
application:load(emqx_management),
emqx_common_test_helpers:start_apps(Apps ++ [emqx_dashboard], SetConfigs, Opts),
emqx_common_test_http:create_default_app().
@ -40,9 +39,8 @@ end_suite() ->
end_suite(Apps) ->
emqx_common_test_http:delete_default_app(),
application:unload(emqx_management),
emqx_common_test_helpers:stop_apps(Apps ++ [emqx_dashboard]),
emqx_config:delete_override_conf_files(),
application:unload(emqx_management),
ok.
set_special_configs(emqx_dashboard) ->

View File

@ -2,7 +2,7 @@
{application, emqx_prometheus, [
{description, "Prometheus for EMQX"},
% strict semver, bump manually!
{vsn, "5.0.11"},
{vsn, "5.0.12"},
{modules, []},
{registered, [emqx_prometheus_sup]},
{applications, [kernel, stdlib, prometheus, emqx, emqx_management]},

View File

@ -59,7 +59,7 @@ fields("prometheus") ->
?HOCON(
list({string(), string()}),
#{
default => #{},
default => [],
required => false,
converter => fun ?MODULE:convert_headers/1,
desc => ?DESC(headers)
@ -149,6 +149,8 @@ fields("prometheus") ->
desc("prometheus") -> ?DESC(prometheus);
desc(_) -> undefined.
convert_headers(<<>>) ->
[];
convert_headers(Headers) when is_map(Headers) ->
maps:fold(
fun(K, V, Acc) ->

View File

@ -146,21 +146,25 @@ get_action_mod_func(ActionFunc) when is_binary(ActionFunc) ->
try binary_to_existing_atom(Bin) of
Atom -> Atom
catch
error:badarg -> error({unknown_action_function, ActionFunc})
error:badarg -> validation_error(unknown_action_function)
end
end,
case string:split(ActionFunc, ":", all) of
[Func1] -> {emqx_rule_actions, ToAtom(Func1)};
[Mod1, Func1] -> {ToAtom(Mod1), ToAtom(Func1)};
_ -> error({invalid_action_function, ActionFunc})
_ -> validation_error(invalid_action_function)
end.
assert_function_supported(Mod, Func) ->
case erlang:function_exported(Mod, Func, 3) of
true -> ok;
false -> error({action_function_not_supported, Func})
false -> validation_error(action_function_not_supported)
end.
-spec validation_error(any()) -> no_return().
validation_error(Reason) ->
throw(#{kind => validation_error, reason => Reason}).
pre_process_args(Mod, Func, Args) ->
case erlang:function_exported(Mod, pre_process_action_args, 2) of
true -> Mod:pre_process_action_args(Func, Args);

View File

@ -36,7 +36,6 @@
-export([
create_rule/1,
insert_rule/1,
update_rule/1,
delete_rule/1,
get_rule/1
@ -116,25 +115,30 @@ start_link() ->
post_config_update(_, _Req, NewRules, OldRules, _AppEnvs) ->
#{added := Added, removed := Removed, changed := Updated} =
emqx_utils_maps:diff_maps(NewRules, OldRules),
maps_foreach(
fun({Id, {_Old, New}}) ->
{ok, _} = update_rule(New#{id => bin(Id)})
end,
Updated
),
maps_foreach(
fun({Id, _Rule}) ->
ok = delete_rule(bin(Id))
end,
Removed
),
maps_foreach(
fun({Id, Rule}) ->
{ok, _} = create_rule(Rule#{id => bin(Id)})
end,
Added
),
{ok, get_rules()}.
try
maps_foreach(
fun({Id, {_Old, New}}) ->
{ok, _} = update_rule(New#{id => bin(Id)})
end,
Updated
),
maps_foreach(
fun({Id, _Rule}) ->
ok = delete_rule(bin(Id))
end,
Removed
),
maps_foreach(
fun({Id, Rule}) ->
{ok, _} = create_rule(Rule#{id => bin(Id)})
end,
Added
),
{ok, get_rules()}
catch
throw:#{kind := _} = Error ->
{error, Error}
end.
%%------------------------------------------------------------------------------
%% APIs for rules
@ -154,7 +158,7 @@ load_rules() ->
-spec create_rule(map()) -> {ok, rule()} | {error, term()}.
create_rule(Params) ->
create_rule(Params, now_ms()).
create_rule(Params, maps:get(created_at, Params, now_ms())).
create_rule(Params = #{id := RuleId}, CreatedAt) when is_binary(RuleId) ->
case get_rule(RuleId) of
@ -451,8 +455,9 @@ parse_actions(Actions) ->
do_parse_action(Action) when is_map(Action) ->
emqx_rule_actions:parse_action(Action);
do_parse_action(BridgeChannelId) when is_binary(BridgeChannelId) ->
BridgeChannelId.
do_parse_action(BridgeId) when is_binary(BridgeId) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId),
{bridge, Type, Name, emqx_bridge_resource:resource_id(Type, Name)}.
get_all_records(Tab) ->
[Rule#{id => Id} || {Id, Rule} <- ets:tab2list(Tab)].
@ -484,7 +489,8 @@ contains_actions(Actions, Mod0, Func0) ->
).
forwards_to_bridge(Actions, BridgeId) ->
lists:any(fun(A) -> A =:= BridgeId end, Actions).
Action = do_parse_action(BridgeId),
lists:any(fun(A) -> A =:= Action end, Actions).
references_ingress_bridge(Froms, BridgeId) ->
lists:member(
@ -506,4 +512,7 @@ get_referenced_hookpoints(Froms) ->
].
get_egress_bridges(Actions) ->
lists:filter(fun is_binary/1, Actions).
[
emqx_bridge_resource:bridge_id(BridgeType, BridgeName)
|| {bridge, BridgeType, BridgeName, _ResId} <- Actions
].

View File

@ -515,13 +515,13 @@ format_datetime(Timestamp, Unit) ->
format_action(Actions) ->
[do_format_action(Act) || Act <- Actions].
do_format_action({bridge, BridgeType, BridgeName, _ResId}) ->
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
do_format_action(#{mod := Mod, func := Func, args := Args}) ->
#{
function => printable_function_name(Mod, Func),
args => maps:remove(preprocessed_tmpl, Args)
};
do_format_action(BridgeChannelId) when is_binary(BridgeChannelId) ->
BridgeChannelId.
}.
printable_function_name(emqx_rule_actions, Func) ->
Func;

View File

@ -17,7 +17,6 @@
-module(emqx_rule_runtime).
-include("rule_engine.hrl").
-include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_resource/include/emqx_resource_errors.hrl").
@ -50,8 +49,6 @@
iolist_to_binary(io_lib:format("_v_~ts_~p_~p", [TYPE, NAME, erlang:system_time()]))
).
-define(ActionMaxRetry, 3).
%%------------------------------------------------------------------------------
%% Apply rules
%%------------------------------------------------------------------------------
@ -348,10 +345,14 @@ handle_action(RuleId, ActId, Selected, Envs) ->
})
end.
do_handle_action(BridgeId, Selected, _Envs) when is_binary(BridgeId) ->
?TRACE("BRIDGE", "bridge_action", #{bridge_id => BridgeId}),
case emqx_bridge:send_message(BridgeId, Selected) of
{error, {Err, _}} when Err == bridge_not_found; Err == bridge_stopped ->
do_handle_action({bridge, BridgeType, BridgeName, ResId}, Selected, _Envs) ->
?TRACE(
"BRIDGE",
"bridge_action",
#{bridge_id => emqx_bridge_resource:bridge_id(BridgeType, BridgeName)}
),
case emqx_bridge:send_message(BridgeType, BridgeName, ResId, Selected) of
{error, Reason} when Reason == bridge_not_found; Reason == bridge_stopped ->
throw(out_of_service);
Result ->
Result

View File

@ -250,14 +250,14 @@ t_kv_store(_) ->
t_add_get_remove_rule(_Config) ->
RuleId0 = <<"rule-debug-0">>,
ok = emqx_rule_engine:insert_rule(make_simple_rule(RuleId0)),
ok = create_rule(make_simple_rule(RuleId0)),
?assertMatch({ok, #{id := RuleId0}}, emqx_rule_engine:get_rule(RuleId0)),
ok = delete_rule(RuleId0),
?assertEqual(not_found, emqx_rule_engine:get_rule(RuleId0)),
RuleId1 = <<"rule-debug-1">>,
Rule1 = make_simple_rule(RuleId1),
ok = emqx_rule_engine:insert_rule(Rule1),
ok = create_rule(Rule1),
?assertMatch({ok, #{id := RuleId1}}, emqx_rule_engine:get_rule(RuleId1)),
ok = delete_rule(Rule1),
?assertEqual(not_found, emqx_rule_engine:get_rule(RuleId1)),
@ -265,7 +265,7 @@ t_add_get_remove_rule(_Config) ->
t_add_get_remove_rules(_Config) ->
delete_rules_by_ids([Id || #{id := Id} <- emqx_rule_engine:get_rules()]),
ok = insert_rules(
ok = create_rules(
[
make_simple_rule(<<"rule-debug-1">>),
make_simple_rule(<<"rule-debug-2">>)
@ -294,7 +294,7 @@ t_create_existing_rule(_Config) ->
t_get_rules_for_topic(_Config) ->
Len0 = length(emqx_rule_engine:get_rules_for_topic(<<"simple/topic">>)),
ok = insert_rules(
ok = create_rules(
[
make_simple_rule(<<"rule-debug-1">>),
make_simple_rule(<<"rule-debug-2">>)
@ -305,12 +305,12 @@ t_get_rules_for_topic(_Config) ->
ok.
t_get_rules_ordered_by_ts(_Config) ->
Now = fun() -> erlang:system_time(nanosecond) end,
ok = insert_rules(
Now = erlang:system_time(microsecond),
ok = create_rules(
[
make_simple_rule_with_ts(<<"rule-debug-0">>, Now()),
make_simple_rule_with_ts(<<"rule-debug-1">>, Now()),
make_simple_rule_with_ts(<<"rule-debug-2">>, Now())
make_simple_rule_with_ts(<<"rule-debug-0">>, Now + 1),
make_simple_rule_with_ts(<<"rule-debug-1">>, Now + 2),
make_simple_rule_with_ts(<<"rule-debug-2">>, Now + 3)
]
),
?assertMatch(
@ -324,23 +324,19 @@ t_get_rules_ordered_by_ts(_Config) ->
t_get_rules_for_topic_2(_Config) ->
Len0 = length(emqx_rule_engine:get_rules_for_topic(<<"simple/1">>)),
ok = insert_rules(
ok = create_rules(
[
make_simple_rule(<<"rule-debug-1">>, <<"select * from \"simple/#\"">>, [<<"simple/#">>]),
make_simple_rule(<<"rule-debug-2">>, <<"select * from \"simple/+\"">>, [<<"simple/+">>]),
make_simple_rule(<<"rule-debug-3">>, <<"select * from \"simple/+/1\"">>, [
<<"simple/+/1">>
]),
make_simple_rule(<<"rule-debug-4">>, <<"select * from \"simple/1\"">>, [<<"simple/1">>]),
make_simple_rule(<<"rule-debug-1">>, _1 = <<"select * from \"simple/#\"">>),
make_simple_rule(<<"rule-debug-2">>, _2 = <<"select * from \"simple/+\"">>),
make_simple_rule(<<"rule-debug-3">>, <<"select * from \"simple/+/1\"">>),
make_simple_rule(<<"rule-debug-4">>, _3 = <<"select * from \"simple/1\"">>),
make_simple_rule(
<<"rule-debug-5">>,
<<"select * from \"simple/2,simple/+,simple/3\"">>,
[<<"simple/2">>, <<"simple/+">>, <<"simple/3">>]
_4 = <<"select * from \"simple/2\", \"simple/+\", \"simple/3\"">>
),
make_simple_rule(
<<"rule-debug-6">>,
<<"select * from \"simple/2,simple/3,simple/4\"">>,
[<<"simple/2">>, <<"simple/3">>, <<"simple/4">>]
<<"select * from \"simple/2\", \"simple/3\", \"simple/4\"">>
)
]
),
@ -367,52 +363,44 @@ t_get_rules_with_same_event(_Config) ->
?assertEqual([], emqx_rule_engine:get_rules_with_same_event(<<"$events/message_delivered">>)),
?assertEqual([], emqx_rule_engine:get_rules_with_same_event(<<"$events/message_acked">>)),
?assertEqual([], emqx_rule_engine:get_rules_with_same_event(<<"$events/message_dropped">>)),
ok = insert_rules(
ok = create_rules(
[
make_simple_rule(<<"r1">>, <<"select * from \"simple/#\"">>, [<<"simple/#">>]),
make_simple_rule(<<"r2">>, <<"select * from \"abc/+\"">>, [<<"abc/+">>]),
make_simple_rule(<<"r1">>, <<"select * from \"simple/#\"">>),
make_simple_rule(<<"r2">>, <<"select * from \"abc/+\"">>),
make_simple_rule(
<<"r3">>,
<<"select * from \"$events/client_connected\"">>,
[<<"$events/client_connected">>]
<<"select * from \"$events/client_connected\"">>
),
make_simple_rule(
<<"r4">>,
<<"select * from \"$events/client_disconnected\"">>,
[<<"$events/client_disconnected">>]
<<"select * from \"$events/client_disconnected\"">>
),
make_simple_rule(
<<"r5">>,
<<"select * from \"$events/session_subscribed\"">>,
[<<"$events/session_subscribed">>]
<<"select * from \"$events/session_subscribed\"">>
),
make_simple_rule(
<<"r6">>,
<<"select * from \"$events/session_unsubscribed\"">>,
[<<"$events/session_unsubscribed">>]
<<"select * from \"$events/session_unsubscribed\"">>
),
make_simple_rule(
<<"r7">>,
<<"select * from \"$events/message_delivered\"">>,
[<<"$events/message_delivered">>]
<<"select * from \"$events/message_delivered\"">>
),
make_simple_rule(
<<"r8">>,
<<"select * from \"$events/message_acked\"">>,
[<<"$events/message_acked">>]
<<"select * from \"$events/message_acked\"">>
),
make_simple_rule(
<<"r9">>,
<<"select * from \"$events/message_dropped\"">>,
[<<"$events/message_dropped">>]
<<"select * from \"$events/message_dropped\"">>
),
make_simple_rule(
<<"r10">>,
<<
"select * from \"t/1, "
"$events/session_subscribed, $events/client_connected\""
>>,
[<<"t/1">>, <<"$events/session_subscribed">>, <<"$events/client_connected">>]
"select * from \"t/1\", "
"\"$events/session_subscribed\", \"$events/client_connected\""
>>
)
]
),
@ -455,23 +443,18 @@ t_get_rules_with_same_event(_Config) ->
t_get_rule_ids_by_action(_) ->
ID = <<"t_get_rule_ids_by_action">>,
Rule1 = #{
enable => false,
id => ID,
sql => <<"SELECT * FROM \"t\"">>,
from => [<<"t">>],
fields => [<<"*">>],
is_foreach => false,
conditions => {},
actions => [
#{mod => emqx_rule_actions, func => console, args => #{}},
#{mod => emqx_rule_actions, func => republish, args => #{}},
#{function => console, args => #{}},
#{function => republish, args => #{}},
<<"mqtt:my_mqtt_bridge">>,
<<"mysql:foo">>
],
description => ID,
created_at => erlang:system_time(millisecond)
},
ok = insert_rules([Rule1]),
ok = create_rules([Rule1]),
?assertMatch(
[ID],
emqx_rule_engine:get_rule_ids_by_action(#{function => <<"emqx_rule_actions:console">>})
@ -2834,26 +2817,20 @@ republish_action(Topic, Payload, UserProperties) ->
make_simple_rule_with_ts(RuleId, Ts) when is_binary(RuleId) ->
SQL = <<"select * from \"simple/topic\"">>,
Topics = [<<"simple/topic">>],
make_simple_rule(RuleId, SQL, Topics, Ts).
make_simple_rule(RuleId, SQL, Ts).
make_simple_rule(RuleId) when is_binary(RuleId) ->
SQL = <<"select * from \"simple/topic\"">>,
Topics = [<<"simple/topic">>],
make_simple_rule(RuleId, SQL, Topics).
make_simple_rule(RuleId, SQL).
make_simple_rule(RuleId, SQL, Topics) when is_binary(RuleId) ->
make_simple_rule(RuleId, SQL, Topics, erlang:system_time(millisecond)).
make_simple_rule(RuleId, SQL) when is_binary(RuleId) ->
make_simple_rule(RuleId, SQL, erlang:system_time(millisecond)).
make_simple_rule(RuleId, SQL, Topics, Ts) when is_binary(RuleId) ->
make_simple_rule(RuleId, SQL, Ts) when is_binary(RuleId) ->
#{
id => RuleId,
sql => SQL,
from => Topics,
fields => [<<"*">>],
is_foreach => false,
conditions => {},
actions => [#{mod => emqx_rule_actions, func => console, args => #{}}],
actions => [#{function => console, args => #{}}],
description => <<"simple rule">>,
created_at => Ts
}.
@ -3233,13 +3210,12 @@ deps_path(App, RelativePath) ->
local_path(RelativePath) ->
deps_path(emqx_rule_engine, RelativePath).
insert_rules(Rules) ->
lists:foreach(
fun(Rule) ->
ok = emqx_rule_engine:insert_rule(Rule)
end,
Rules
).
create_rules(Rules) ->
lists:foreach(fun create_rule/1, Rules).
create_rule(Rule) ->
{ok, _} = emqx_rule_engine:create_rule(Rule),
ok.
delete_rules_by_ids(Ids) ->
lists:foreach(

View File

@ -0,0 +1,8 @@
Refactored some bridges to avoid leaking resources during crashes at creation, including:
- TDEngine
- WebHook
- LDAP
- MongoDB
- MySQL
- PostgreSQL
- Redis

View File

@ -0,0 +1,2 @@
Reducing overhead of reading configs per zone.

View File

@ -102,7 +102,6 @@ end_per_group(_Group, _Config) ->
ok.
init_per_suite(Config) ->
emqx_common_test_helpers:clear_screen(),
Config.
end_per_suite(_Config) ->