diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index bd925e224..3b00b454f 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -1,9 +1,15 @@ MYSQL_TAG=8 -REDIS_TAG=6 +REDIS_TAG=7.0 MONGO_TAG=5 PGSQL_TAG=13 LDAP_TAG=2.4.50 INFLUXDB_TAG=2.5.0 +TDENGINE_TAG=3.0.2.4 +DYNAMO_TAG=1.21.0 +CASSANDRA_TAG=3.11.6 +OPENTS_TAG=9aa7f88 + +MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server +SQLSERVER_TAG=2019-CU19-ubuntu-20.04 TARGET=emqx/emqx -EMQX_TAG=build-alpine-amd64 diff --git a/.ci/docker-compose-file/Makefile.local b/.ci/docker-compose-file/Makefile.local deleted file mode 100644 index ff4f348b0..000000000 --- a/.ci/docker-compose-file/Makefile.local +++ /dev/null @@ -1,64 +0,0 @@ -.PHONY: help up down ct ct-all bash run - -define usage -make -f .ci/docker-compose-file/Makefile.local up -make -f .ci/docker-compose-file/Makefile.local ct CONTAINER=erlang SUITE=apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl -make -f .ci/docker-compose-file/Makefile.local down -endef -export usage - -help: - @echo "$$usage" - -up: - env \ - MYSQL_TAG=8 \ - REDIS_TAG=6 \ - MONGO_TAG=5 \ - PGSQL_TAG=13 \ - docker compose \ - -f .ci/docker-compose-file/docker-compose.yaml \ - -f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-pgsql-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ - up -d --build --remove-orphans - -down: - docker compose \ - -f .ci/docker-compose-file/docker-compose.yaml \ - -f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-pgsql-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \ - -f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \ - -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ - down --remove-orphans - -ct: - docker exec -i "$(CONTAINER)" bash -c "rebar3 ct --name 'test@127.0.0.1' -v --suite $(SUITE)" - -ct-all: - docker exec -i "$(CONTAINER)" bash -c "make ct" - -bash: - docker exec -it "$(CONTAINER)" bash - -run: - docker exec -it "$(CONTAINER)" bash -c "make run"; diff --git a/.ci/docker-compose-file/cassandra/Dockerfile b/.ci/docker-compose-file/cassandra/Dockerfile new file mode 100644 index 000000000..f974c1b6f --- /dev/null +++ b/.ci/docker-compose-file/cassandra/Dockerfile @@ -0,0 +1,4 @@ +ARG CASSANDRA_TAG=3.11.6 +FROM cassandra:${CASSANDRA_TAG} +COPY cassandra.yaml /etc/cassandra/cassandra.yaml +CMD ["cassandra", "-f"] diff --git a/.ci/docker-compose-file/cassandra/cassandra.yaml b/.ci/docker-compose-file/cassandra/cassandra.yaml new file mode 100644 index 000000000..bc1bf3357 --- /dev/null +++ b/.ci/docker-compose-file/cassandra/cassandra.yaml @@ -0,0 +1,1236 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true + +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +# hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.roles table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: AllowAllAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /var/lib/cassandra/commitlog + +# Enable / disable CDC functionality on a per-node basis. This modifies the logic used +# for write path allocation rejection (standard: never reject. cdc: reject Mutation +# containing a CDC-enabled table if at space limit in cdc_raw_directory). +cdc_enabled: false + +# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the +# segment contains mutations for a CDC-enabled table. This should be placed on a +# separate spindle than the data directories. If not set, the default directory is +# $CASSANDRA_HOME/data/cdc_raw. +# cdc_raw_directory: /var/lib/cassandra/cdc_raw + +# Policy for data disk failures: +# +# die +# shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# +# stop_paranoid +# shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# +# stop +# shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# +# best_effort +# stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# +# ignore +# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Policy for commit disk failures: +# +# die +# shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# +# stop +# shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# +# stop_commit +# shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# +# ignore +# ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possbily +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not neccessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. Available implementations: +# +# org.apache.cassandra.cache.OHCProvider +# Fully off-heap row cache implementation (default). +# +# org.apache.cassandra.cache.SerializingCacheProvider +# This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# This should be positive and less than 2048. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +# commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +memtable_heap_space_in_mb: 2048 +memtable_offheap_space_in_mb: 2048 + +# memtable_cleanup_threshold is deprecated. The default calculation +# is the only reasonable choice. See the comments on memtable_flush_writers +# for more information. +# +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# +# heap_buffers +# on heap nio buffers +# +# offheap_buffers +# off heap (direct) nio buffers +# +# offheap_objects +# off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the number of memtable flush writer threads per disk +# as well as the total number of memtables that can be flushed concurrently. +# These are generally a combination of compute and IO bound. +# +# Memtable flushing is more CPU efficient than memtable ingest and a single thread +# can keep up with the ingest rate of a whole server on a single fast disk +# until it temporarily becomes IO bound under contention typically with compaction. +# At that point you need multiple flush threads. At some point in the future +# it may become CPU bound all the time. +# +# You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation +# metric which should be 0, but will be non-zero if threads are blocked waiting on flushing +# to free memory. +# +# memtable_flush_writers defaults to two for a single data directory. +# This means that two memtables can be flushed concurrently to the single data directory. +# If you have multiple data directories the default is one memtable flushing at a time +# but the flush will use a thread per data directory so you will get two or more writers. +# +# Two is generally enough to flush on a fast disk [array] mounted as a single data directory. +# Adding more flush writers will result in smaller more frequent flushes that introduce more +# compaction overhead. +# +# There is a direct tradeoff between number of memtables that can be flushed concurrently +# and flush size and frequency. More is not better you just need enough flush writers +# to never stall waiting for flushing to free memory. +# +#memtable_flush_writers: 2 + +# Total space to use for change-data-capture logs on disk. +# +# If space gets above this value, Cassandra will throw WriteTimeoutException +# on Mutations including tables with CDC enabled. A CDCCompactor is responsible +# for parsing the raw CDC logs and deleting them when parsing is completed. +# +# The default value is the min of 4096 mb and 1/8th of the total space +# of the drive where cdc_raw_directory resides. +# cdc_total_space_in_mb: 4096 + +# When we hit our cdc_raw limit and the CDCCompactor is either running behind +# or experiencing backpressure, we check at the following interval to see if any +# new space for cdc-tracked tables has been made available. Default to 250ms +# cdc_free_space_check_interval_ms: 250 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +listen_address: localhost + +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# listen_interface: eth0 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. If you're changing this parameter, +# you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: true + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: 0.0.0.0 + +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# rpc_interface: eth1 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +broadcast_rpc_address: 1.2.3.4 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync +# One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha +# Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See also: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and 'man tcp' +# internode_send_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# +# - a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# - but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# How long before a node logs slow queries. Select queries that take longer than +# this timeout to execute, will generate an aggregated log message, so that slow queries +# can be identified. Set this value to zero to disable slow query logging. +slow_query_log_timeout_in_ms: 500 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set keep-alive period for streaming +# This node will send a keep-alive message periodically with this period. +# If the node does not receive a keep-alive message from the peer for +# 2 keep-alive cycles the stream session times out and fail +# Default value is 300s (5 minutes), which means stalled stream +# times out in 10 minutes by default +# streaming_keep_alive_period_in_secs: 300 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH +# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. +# This means that if you start with the default SimpleSnitch, which +# locates every node on "rack1" in "datacenter1", your only options +# if you need to add another datacenter are GossipingPropertyFileSnitch +# (and the older PFS). From there, if you want to migrate to an +# incompatible snitch like Ec2Snitch you can do it by adding new nodes +# under Ec2Snitch (which will locate them in a new "datacenter") and +# decommissioning the old ones. +# +# Out of the box, Cassandra provides: +# +# SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# +# GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# +# PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# +# Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# +# Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# +# NoScheduler +# Has no options +# +# RoundRobin +# throttle_limit +# The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# default_weight +# default_weight is optional and allows for +# overriding the default which is 1. +# weights +# Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# *NOTE* No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: true + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: /certs/server.jks + keystore_password: my_password + require_client_auth: true + # Set trustore and truststore_password if require_client_auth is true + truststore: /certs/truststore.jks + truststore_password: my_password + # More advanced defaults below: + protocol: TLS + store_type: JKS + cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# Can be: +# +# all +# all traffic is compressed +# +# dc +# traffic between different datacenters is compressed +# +# none +# nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +# This threshold can be adjusted to minimize logging if necessary +# gc_log_threshold_in_ms: 200 + +# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at +# INFO level +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# Enables materialized view creation on this node. +# Materialized views are considered experimental and are not recommended for production use. +enable_materialized_views: true + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 + +# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption +# early. Any value size larger than this threshold will result into marking an SSTable +# as corrupted. This should be positive and less than 2048. +# max_value_size_in_mb: 256 + +# Back-pressure settings # +# If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation +# sent to replicas, with the aim of reducing pressure on overloaded replicas. +back_pressure_enabled: false +# The back-pressure strategy applied. +# The default implementation, RateBasedBackPressure, takes three arguments: +# high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. +# If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; +# if above high ratio, the rate limiting is increased by the given factor; +# such factor is usually best configured between 1 and 10, use larger values for a faster recovery +# at the expense of potentially more dropped mutations; +# the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, +# if SLOW at the speed of the slowest one. +# New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and +# provide a public constructor accepting a Map. +back_pressure_strategy: + - class_name: org.apache.cassandra.net.RateBasedBackPressure + parameters: + - high_ratio: 0.90 + factor: 5 + flow: FAST + +# Coalescing Strategies # +# Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). +# On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in +# virtualized environments, the point at which an application can be bound by network packet processing can be +# surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal +# doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process +# is sufficient for many applications such that no load starvation is experienced even without coalescing. +# There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages +# per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one +# trip to read from a socket, and all the task submission work can be done at the same time reducing context switching +# and increasing cache friendliness of network message processing. +# See CASSANDRA-8692 for details. + +# Strategy to use for coalescing messages in OutboundTcpConnection. +# Can be fixed, movingaverage, timehorizon, disabled (default). +# You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. +# otc_coalescing_strategy: DISABLED + +# How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first +# message is received before it will be sent with any accompanying messages. For moving average this is the +# maximum amount of time that will be waited as well as the interval at which messages must arrive on average +# for coalescing to be enabled. +# otc_coalescing_window_us: 200 + +# Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. +# otc_coalescing_enough_coalesced_messages: 8 + +# How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. +# Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory +# taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value +# will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU +# time and queue contention while iterating the backlog of messages. +# An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. +# +# otc_backlog_expiration_interval_ms: 200 diff --git a/.ci/docker-compose-file/certs/README.md b/.ci/docker-compose-file/certs/README.md new file mode 100644 index 000000000..71c389bdd --- /dev/null +++ b/.ci/docker-compose-file/certs/README.md @@ -0,0 +1,23 @@ +Certificate and Key files for testing + +## Cassandra (v3.x) + +### How to convert server PEM to JKS Format + +1. Convert server.crt and server.key to server.p12 + +```bash +openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12 -name "certificate" +``` + +2. Convert server.p12 to server.jks + +```bash +keytool -importkeystore -srckeystore server.p12 -srcstoretype pkcs12 -destkeystore server.jks +``` + +### How to convert CA PEM certificate to truststore.jks + +``` +keytool -import -file ca.pem -keystore truststore.jks +``` diff --git a/.ci/docker-compose-file/certs/client.key b/.ci/docker-compose-file/certs/client.key new file mode 100644 index 000000000..2989d0d78 --- /dev/null +++ b/.ci/docker-compose-file/certs/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzs74tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8Z +OUiOuOjynKvsJeltdmc0L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoV +JXgNT3ru56aZFa6Ov6NhfZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXM +inMd1tsHL7xHLf3KjCbkusA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1 +P/T+W6WBldv0i2WlNbfiuAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/ +FBT5knNtmXTt78xBBlIPFas5BAJIeV4eADx9MwIDAQABAoIBAQCZTvcynpJuxIxn +vmItjK5U/4wIBjZNIawQk6BoG7tR2JyJ/1jcjTw4OX/4wr450JRz7MfUJweD5hDb +OTMtLLNXlG6+YR4vsIUEiSlvhy5srVH0jG5Wq2t6mxBVq7vaRd/OkshnuU79+Pq7 +iHqclS7GSACxYkXWyxE6wtPh5aTWP8joK/LvYFiOqKPilUnLZ4hBhmL7CRUCZ0ZA +QGNyEhlmiAL+LNKW2RLXPBxlKX21X78ahUQmkkTM0lBK9x6hm4dD3SpLqmZyQQ9M +UfiMbU6XOYlDva/USZzrvTDlRf9uCG9QOsZzngP1aIy8Cq3QHECOeMIPO9WQLMll +SyY+SpyJAoGBAP4fhnbDpQC6ekd9TNoU9GE/FNNNGKLh82GDgnGcWU/oIzv8GlaR +rkEHTb6aRoPpjTxWIjJpScs9kycC+7N3oNo9rub4s5UvllI+EgQ95+j/5fnZx6gO +la8ousLy1hTYu9C0nTWdTV3YtfC0l0opn7Friv5QafNmhSn74DqrH0BHAoGBANBV +/NhBDAH1PHzYA+XuNLYTLv56Q4osmoen17nPnFNWb1TtWblzb0yWp86GGDFcs8CZ +eH0mXCRUzGMSWtOHe4CbIm2brAYXuL2t6+DZ1A22gsnW5avNrosZRS7eN7BE7DDj +5cp9+Es9UWnArzJU7jSWwAtA6o47WHfHU/pqRB21AoGAGx6eKPqEF2nPNuXmV7e4 +xNAIluw5XtiiMpvoRdubpG1vpS0oWmi9oe73mwm30MgR7Ih8qciWuXvewmENH3/6 +yI+gpMGR2K/1aN166rz4jOMSVfGp3wN/cev00m0774mZsZI03M3mvccs031ST/XV +Nwf1E2Ldi747I9nfeiNc+G0CgYEAslFHD1ntiyd6VGkYPQ978nPM/2dqs7OluILC +tHmslfAfbpOQ/ph9JRK2IqDHyEhOWoWBiazxpO8n2Yx2TSNjZBpkh2h8/uIC7+cT +Q+tuAya6H0ReZISx5sEEZC8zfx4fA2Gs53qWsN+U9W1FB1GGaWC2k2tG1+KXwD3N +9UJLdxkCgYBB96dsfT7nXmy0JLUz0rQ4umBje6H5uvuaevWdVMEptHB+O7+6CAse +OVwqlFLQ4QC7s4/P9FQwfr/0uMRInB1aC043Haa1LbiRcRIlSuBDUezK5xidUbz+ +uB/ABkwwEuqW3Ns1+QieJyyfoNYKZ2v0RtYxBuieKOpUCm3oNFZRWg== +-----END RSA PRIVATE KEY----- diff --git a/.ci/docker-compose-file/certs/client.pem b/.ci/docker-compose-file/certs/client.pem new file mode 100644 index 000000000..454ca4797 --- /dev/null +++ b/.ci/docker-compose-file/certs/client.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEMjCCAhoCFCOrAvLNRztbFFcN0zrCQXoj73cHMA0GCSqGSIb3DQEBCwUAMDQx +EjAQBgNVBAoMCUVNUVggVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9y +aXR5MB4XDTIzMDMxNzA5MzgzMVoXDTMzMDMxNDA5MzgzMVowdzELMAkGA1UEBhMC +U0UxEjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYD +VQQKDAlNeU9yZ05hbWUxGDAWBgNVBAsMD015U2VydmljZUNsaWVudDESMBAGA1UE +AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzs74 +tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8ZOUiOuOjynKvsJeltdmc0 +L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoVJXgNT3ru56aZFa6Ov6Nh +fZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXMinMd1tsHL7xHLf3KjCbk +usA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1P/T+W6WBldv0i2WlNbfi +uAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/FBT5knNtmXTt78xBBlIP +Fas5BAJIeV4eADx9MwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQBHgfJgMjTgWZXG +eyzIVxaqzWTLxrT7zPy09Mw4qsAl1TfWg9/r8nuskq4bjBQuKm0k9H0HQXz//eFC +Qn85qTHyAmZok6c4ljO2P+kTIl3nkKk5zudmeCTy3W9YBdyWvDXQ/GhbywIfO+1Y +fYA82I5rXVg4c9fUVTNczUFyDNcZzoJoqCS8jwFDtNR0N/fptJN14j8pnYvNV+4c +hZ+pcnhSoz7dD8WjyYCc/QCajJdTyb15i072HxuGmhwltjnwIE/2xfeXCCeUTzsJ +8h4/ABRu9VEqjqDQHepXIflYuVhU38SL0f4ly7neMXmytAbXwGLVM+ME81HG60Bw +8hkfSwKBbEkhUmD6+V1bdUz14I6HjWJt/INtFU+O+MYZbIFt4ep9GKLV3nk97CyL +fwDv5b4WXdC68iWMZqSrADAXr+VG3DgHqpNItj0XmhY6ihmt5tA3Z6IZJj45TShA +vRqTCx3Hf6EO3zf4KCrzaPSSSfVLnGKftA/6oz3bl8EK2e2M44lOspRk4l9k+iBR +sfHPmpiWY0hIiFtd3LD/uGDSBcGkKjU/fLvJZXJpVXwmT9pmK9LzkAPOK1rr97e9 +esHqwe1bo3z7IdeREZ0wdxqGL3BNpm4f1NaIzV/stX+vScau0AyFYXzumjeBIpKa +Gt0A+dZnUfWG6qn5NiRENXxFQSppaA== +-----END CERTIFICATE----- diff --git a/.ci/docker-compose-file/certs/server.jks b/.ci/docker-compose-file/certs/server.jks new file mode 100644 index 000000000..06c2fe184 Binary files /dev/null and b/.ci/docker-compose-file/certs/server.jks differ diff --git a/.ci/docker-compose-file/certs/server.p12 b/.ci/docker-compose-file/certs/server.p12 new file mode 100644 index 000000000..a23d58084 Binary files /dev/null and b/.ci/docker-compose-file/certs/server.p12 differ diff --git a/.ci/docker-compose-file/certs/truststore.jks b/.ci/docker-compose-file/certs/truststore.jks new file mode 100644 index 000000000..5ea593a39 Binary files /dev/null and b/.ci/docker-compose-file/certs/truststore.jks differ diff --git a/.ci/docker-compose-file/clickhouse/config.xml b/.ci/docker-compose-file/clickhouse/config.xml new file mode 100644 index 000000000..085f92a12 --- /dev/null +++ b/.ci/docker-compose-file/clickhouse/config.xml @@ -0,0 +1,678 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + + + + + + + + + + + false + + false + + + https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277 + + + + 8123 + 9000 + 9004 + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + 0 + + + + 10000 + + + 10 + + + 4194304 + + + 0 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + + + + /var/lib/clickhouse/user_files/ + + + /var/lib/clickhouse/access/ + + + /etc/clickhouse-server/users.xml + + + default + + + + + + default + + + + + + + + + true + + + + + + + + + + + + localhost + 9000 + + + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + 127.0.0.1 + 9000 + + + + + 127.0.0.2 + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + + + + 7500 +
+ + + + system + trace_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + system + query_thread_log
+ toYYYYMM(event_date) + 7500 +
+ + + + + + + + system + metric_log
+ 7500 + 1000 +
+ + + + system + asynchronous_metric_log
+ + 60000 +
+ + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + + + + + +
diff --git a/.ci/docker-compose-file/clickhouse/users.xml b/.ci/docker-compose-file/clickhouse/users.xml new file mode 100644 index 000000000..ced773355 --- /dev/null +++ b/.ci/docker-compose-file/clickhouse/users.xml @@ -0,0 +1,110 @@ + + + + + + + + 10000000000 + + + 0 + + + random + + + + + 1 + + + + + + + + + public + + + + ::/0 + + + + default + + + default + + + + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/.ci/docker-compose-file/docker-compose-cassandra.yaml b/.ci/docker-compose-file/docker-compose-cassandra.yaml new file mode 100644 index 000000000..f7143f471 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-cassandra.yaml @@ -0,0 +1,32 @@ +version: '3.9' + +services: + cassandra_server: + container_name: cassandra + build: + context: ./cassandra + args: + CASSANDRA_TAG: ${CASSANDRA_TAG} + image: emqx-cassandra + restart: always + environment: + CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4" + CASSANDRA_RPC_ADDRESS: "0.0.0.0" + HEAP_NEWSIZE: "128M" + MAX_HEAP_SIZE: "2048M" + volumes: + - ./certs:/certs + #ports: + # - "9042:9042" + # - "9142:9142" + command: + - /bin/bash + - -c + - | + /opt/cassandra/bin/cassandra -f -R > /cassandra.log & + /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};" + while [[ $$? -ne 0 ]];do sleep 5; /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"; done + /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "describe keyspaces;" + tail -f /cassandra.log + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-clickhouse.yaml b/.ci/docker-compose-file/docker-compose-clickhouse.yaml new file mode 100644 index 000000000..118f83dc1 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-clickhouse.yaml @@ -0,0 +1,16 @@ +version: '3.9' + +services: + clickhouse: + container_name: clickhouse + image: clickhouse/clickhouse-server:23.1.2.9-alpine + restart: always + volumes: + - ./clickhouse/users.xml:/etc/clickhouse-server/users.xml + - ./clickhouse/config.xml:/etc/clickhouse-server/config.d/config.xml + expose: + - "8123" + ports: + - "8123:8123" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-dynamo.yaml b/.ci/docker-compose-file/docker-compose-dynamo.yaml new file mode 100644 index 000000000..926d6287c --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-dynamo.yaml @@ -0,0 +1,15 @@ +version: '3.9' + +services: + dynamodb-local: + container_name: dynamo + image: amazon/dynamodb-local:${DYNAMO_TAG} + restart: always + ports: + - "8000:8000" + environment: + AWS_ACCESS_KEY_ID: root + AWS_SECRET_ACCESS_KEY: public + AWS_DEFAULT_REGION: us-west-2 + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-iotdb.yaml b/.ci/docker-compose-file/docker-compose-iotdb.yaml new file mode 100644 index 000000000..2e1ea881e --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-iotdb.yaml @@ -0,0 +1,31 @@ +version: '3.9' + +services: + iotdb: + container_name: iotdb + hostname: iotdb + image: apache/iotdb:1.1.0-standalone + restart: always + environment: + - enable_rest_service=true + - cn_internal_address=iotdb + - cn_internal_port=10710 + - cn_consensus_port=10720 + - cn_target_config_node_list=iotdb:10710 + - dn_rpc_address=iotdb + - dn_internal_address=iotdb + - dn_rpc_port=6667 + - dn_mpp_data_exchange_port=10740 + - dn_schema_region_consensus_port=10750 + - dn_data_region_consensus_port=10760 + - dn_target_config_node_list=iotdb:10710 + # volumes: + # - ./data:/iotdb/data + # - ./logs:/iotdb/logs + expose: + - "18080" + # IoTDB's REST interface, uncomment for local testing + # ports: + # - "18080:18080" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-kafka.yaml b/.ci/docker-compose-file/docker-compose-kafka.yaml index 9662b174d..bbfb4080a 100644 --- a/.ci/docker-compose-file/docker-compose-kafka.yaml +++ b/.ci/docker-compose-file/docker-compose-kafka.yaml @@ -2,7 +2,7 @@ version: '3.9' services: zookeeper: - image: wurstmeister/zookeeper + image: docker.io/library/zookeeper:3.6 ports: - "2181:2181" container_name: zookeeper @@ -10,51 +10,57 @@ services: networks: emqx_bridge: ssl_cert_gen: - image: fredrikhgrelland/alpine-jdk11-openssl + # see https://github.com/emqx/docker-images + image: ghcr.io/emqx/certgen:latest container_name: ssl_cert_gen + user: "${DOCKER_USER:-root}" volumes: - - emqx-shared-secret:/var/lib/secret - - ./kafka/generate-certs.sh:/bin/generate-certs.sh - entrypoint: /bin/sh - command: /bin/generate-certs.sh + - /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret kdc: hostname: kdc.emqx.net - image: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04 + image: ghcr.io/emqx/emqx-builder/5.0-33:1.13.4-24.3.4.2-3-ubuntu20.04 container_name: kdc.emqx.net + expose: + - 88 # kdc + - 749 # admin server + # ports: + # - 88:88 + # - 749:749 networks: emqx_bridge: volumes: - - emqx-shared-secret:/var/lib/secret + - /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret - ./kerberos/krb5.conf:/etc/kdc/krb5.conf - ./kerberos/krb5.conf:/etc/krb5.conf - ./kerberos/run.sh:/usr/bin/run.sh command: run.sh kafka_1: - image: wurstmeister/kafka:2.13-2.7.0 - ports: - - "9092:9092" - - "9093:9093" - - "9094:9094" - - "9095:9095" + image: wurstmeister/kafka:2.13-2.8.1 + # ports: + # - "9192-9195:9192-9195" container_name: kafka-1.emqx.net hostname: kafka-1.emqx.net depends_on: - - "kdc" - - "zookeeper" - - "ssl_cert_gen" + kdc: + condition: service_started + zookeeper: + condition: service_started + ssl_cert_gen: + condition: service_completed_successfully environment: KAFKA_BROKER_ID: 1 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL + KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095,LOCAL_PLAINTEXT://:9192,LOCAL_SASL_PLAINTEXT://:9193,LOCAL_SSL://:9194,LOCAL_SASL_SSL://:9195,TOXIPROXY_PLAINTEXT://:9292,TOXIPROXY_SASL_PLAINTEXT://:9293,TOXIPROXY_SSL://:9294,TOXIPROXY_SASL_SSL://:9295 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095,LOCAL_PLAINTEXT://localhost:9192,LOCAL_SASL_PLAINTEXT://localhost:9193,LOCAL_SSL://localhost:9194,LOCAL_SASL_SSL://localhost:9195,TOXIPROXY_PLAINTEXT://toxiproxy.emqx.net:9292,TOXIPROXY_SASL_PLAINTEXT://toxiproxy.emqx.net:9293,TOXIPROXY_SSL://toxiproxy.emqx.net:9294,TOXIPROXY_SASL_SSL://toxiproxy.emqx.net:9295 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL,LOCAL_PLAINTEXT:PLAINTEXT,LOCAL_SASL_PLAINTEXT:SASL_PLAINTEXT,LOCAL_SSL:SSL,LOCAL_SASL_SSL:SASL_SSL,TOXIPROXY_PLAINTEXT:PLAINTEXT,TOXIPROXY_SASL_PLAINTEXT:SASL_PLAINTEXT,TOXIPROXY_SSL:SSL,TOXIPROXY_SASL_SSL:SASL_SSL KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN - KAFKA_JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf" + KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf" KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" KAFKA_CREATE_TOPICS_NG: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1, + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer KAFKA_SSL_TRUSTSTORE_LOCATION: /var/lib/secret/kafka.truststore.jks KAFKA_SSL_TRUSTSTORE_PASSWORD: password @@ -64,7 +70,7 @@ services: networks: emqx_bridge: volumes: - - emqx-shared-secret:/var/lib/secret + - /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret - ./kafka/jaas.conf:/etc/kafka/jaas.conf - ./kafka/kafka-entrypoint.sh:/bin/kafka-entrypoint.sh - ./kerberos/krb5.conf:/etc/kdc/krb5.conf diff --git a/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml b/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml index 8a4c498df..4578ff94f 100644 --- a/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml @@ -13,10 +13,12 @@ services: networks: - emqx_bridge command: - --bind-address "::" - --character-set-server=utf8mb4 - --collation-server=utf8mb4_general_ci - --explicit_defaults_for_timestamp=true - --lower_case_table_names=1 - --max_allowed_packet=128M - --skip-symbolic-links + - --bind-address=0.0.0.0 + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_general_ci + - --lower-case-table-names=1 + - --max-allowed-packet=128M + # Severely limit maximum number of prepared statements the server must permit + # so that we hit potential resource exhaustion earlier in tests. + - --max-prepared-stmt-count=64 + - --skip-symbolic-links diff --git a/.ci/docker-compose-file/docker-compose-mysql-tls.yaml b/.ci/docker-compose-file/docker-compose-mysql-tls.yaml index 47d9ecd83..83fd4658c 100644 --- a/.ci/docker-compose-file/docker-compose-mysql-tls.yaml +++ b/.ci/docker-compose-file/docker-compose-mysql-tls.yaml @@ -23,9 +23,11 @@ services: - --port=3306 - --character-set-server=utf8mb4 - --collation-server=utf8mb4_general_ci - - --explicit_defaults_for_timestamp=true - - --lower_case_table_names=1 - - --max_allowed_packet=128M + - --lower-case-table-names=1 + - --max-allowed-packet=128M + # Severely limit maximum number of prepared statements the server must permit + # so that we hit potential resource exhaustion earlier in tests. + - --max-prepared-stmt-count=64 - --ssl-ca=/etc/certs/ca-cert.pem - --ssl-cert=/etc/certs/server-cert.pem - --ssl-key=/etc/certs/server-key.pem diff --git a/.ci/docker-compose-file/docker-compose-opents.yaml b/.ci/docker-compose-file/docker-compose-opents.yaml new file mode 100644 index 000000000..545aeb015 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-opents.yaml @@ -0,0 +1,9 @@ +version: '3.9' + +services: + opents_server: + container_name: opents + image: petergrace/opentsdb-docker:${OPENTS_TAG} + restart: always + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-oracle.yaml b/.ci/docker-compose-file/docker-compose-oracle.yaml new file mode 100644 index 000000000..ea8965846 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-oracle.yaml @@ -0,0 +1,11 @@ +version: '3.9' + +services: + oracle_server: + container_name: oracle + image: oracleinanutshell/oracle-xe-11g:1.0.0 + restart: always + environment: + ORACLE_DISABLE_ASYNCH_IO: true + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-pulsar.yaml b/.ci/docker-compose-file/docker-compose-pulsar.yaml new file mode 100644 index 000000000..926000ae4 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-pulsar.yaml @@ -0,0 +1,32 @@ +version: '3' + +services: + pulsar: + container_name: pulsar + image: apachepulsar/pulsar:2.11.0 + # ports: + # - 6650:6650 + # - 8080:8080 + networks: + emqx_bridge: + volumes: + - ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem + - ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem + - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem + restart: always + command: + - bash + - "-c" + - | + sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf + sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf + sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf + sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf + sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf + sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf + sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf + sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf + sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf + sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf + echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf + bin/pulsar standalone -nfw -nss diff --git a/.ci/docker-compose-file/docker-compose-rabbitmq.yaml b/.ci/docker-compose-file/docker-compose-rabbitmq.yaml new file mode 100644 index 000000000..76df9d24c --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-rabbitmq.yaml @@ -0,0 +1,17 @@ +version: '3.9' + +services: + rabbitmq: + container_name: rabbitmq + image: rabbitmq:3.11-management + + restart: always + expose: + - "15672" + - "5672" + # We don't want to take ports from the host + # ports: + # - "15672:15672" + # - "5672:5672" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml b/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml index 9c03fc65e..f44a71e14 100644 --- a/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml @@ -1,11 +1,57 @@ version: '3.9' - services: - redis_cluster: + + redis-cluster-1: &redis-node + container_name: redis-cluster-1 image: redis:${REDIS_TAG} - container_name: redis-cluster volumes: - - ./redis/:/data/conf - command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log" + - ./redis/cluster-tcp:/usr/local/etc/redis + command: redis-server /usr/local/etc/redis/redis.conf networks: - emqx_bridge + + + redis-cluster-2: + <<: *redis-node + container_name: redis-cluster-2 + + redis-cluster-3: + <<: *redis-node + container_name: redis-cluster-3 + + redis-cluster-4: + <<: *redis-node + container_name: redis-cluster-4 + + redis-cluster-5: + <<: *redis-node + container_name: redis-cluster-5 + + redis-cluster-6: + <<: *redis-node + container_name: redis-cluster-6 + + redis-cluster-create: + <<: *redis-node + container_name: redis-cluster-create + command: > + redis-cli + --cluster create + redis-cluster-1:6379 + redis-cluster-2:6379 + redis-cluster-3:6379 + redis-cluster-4:6379 + redis-cluster-5:6379 + redis-cluster-6:6379 + --cluster-replicas 1 + --cluster-yes + --pass "public" + --no-auth-warning + depends_on: + - redis-cluster-1 + - redis-cluster-2 + - redis-cluster-3 + - redis-cluster-4 + - redis-cluster-5 + - redis-cluster-6 + diff --git a/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml b/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml index bfbf1a4a3..988620acb 100644 --- a/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml +++ b/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml @@ -1,14 +1,59 @@ version: '3.9' - services: - redis_cluster_tls: - container_name: redis-cluster-tls + + redis-cluster-tls-1: &redis-node + container_name: redis-cluster-tls-1 image: redis:${REDIS_TAG} volumes: - - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt - - ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt - - ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key - - ./redis/:/data/conf - command: bash -c "/bin/bash /data/conf/redis.sh --node cluster --tls-enabled && tail -f /var/log/redis-server.log" + - ./redis/cluster-tls:/usr/local/etc/redis + - ../../apps/emqx/etc/certs:/etc/certs + command: redis-server /usr/local/etc/redis/redis.conf networks: - emqx_bridge + + redis-cluster-tls-2: + <<: *redis-node + container_name: redis-cluster-tls-2 + + redis-cluster-tls-3: + <<: *redis-node + container_name: redis-cluster-tls-3 + + redis-cluster-tls-4: + <<: *redis-node + container_name: redis-cluster-tls-4 + + redis-cluster-tls-5: + <<: *redis-node + container_name: redis-cluster-tls-5 + + redis-cluster-tls-6: + <<: *redis-node + container_name: redis-cluster-tls-6 + + redis-cluster-tls-create: + <<: *redis-node + container_name: redis-cluster-tls-create + command: > + redis-cli + --cluster create + redis-cluster-tls-1:6389 + redis-cluster-tls-2:6389 + redis-cluster-tls-3:6389 + redis-cluster-tls-4:6389 + redis-cluster-tls-5:6389 + redis-cluster-tls-6:6389 + --cluster-replicas 1 + --cluster-yes + --pass "public" + --no-auth-warning + --tls + --insecure + depends_on: + - redis-cluster-tls-1 + - redis-cluster-tls-2 + - redis-cluster-tls-3 + - redis-cluster-tls-4 + - redis-cluster-tls-5 + - redis-cluster-tls-6 + diff --git a/.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml b/.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml index 07c6cfb0a..d395edd2b 100644 --- a/.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml @@ -1,11 +1,41 @@ -version: '3.9' +version: "3" services: - redis_sentinel_server: + + redis-sentinel-master: + container_name: redis-sentinel-master + image: redis:${REDIS_TAG} + volumes: + - ./redis/sentinel-tcp:/usr/local/etc/redis + command: redis-server /usr/local/etc/redis/master.conf + networks: + - emqx_bridge + + redis-sentinel-slave: + container_name: redis-sentinel-slave + image: redis:${REDIS_TAG} + volumes: + - ./redis/sentinel-tcp:/usr/local/etc/redis + command: redis-server /usr/local/etc/redis/slave.conf + networks: + - emqx_bridge + depends_on: + - redis-sentinel-master + + redis-sentinel: container_name: redis-sentinel image: redis:${REDIS_TAG} volumes: - - ./redis/:/data/conf - command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel && tail -f /var/log/redis-server.log" + - ./redis/sentinel-tcp/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf + depends_on: + - redis-sentinel-master + - redis-sentinel-slave + command: > + bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf && + redis-sentinel /usr/local/etc/redis/sentinel.conf" networks: - emqx_bridge + + + + diff --git a/.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml b/.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml index b9eaefa9c..d883e2992 100644 --- a/.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml +++ b/.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml @@ -1,14 +1,44 @@ -version: '3.9' +version: "3" services: - redis_sentinel_server_tls: + + redis-sentinel-tls-master: + container_name: redis-sentinel-tls-master + image: redis:${REDIS_TAG} + volumes: + - ./redis/sentinel-tls:/usr/local/etc/redis + - ../../apps/emqx/etc/certs:/etc/certs + command: redis-server /usr/local/etc/redis/master.conf + networks: + - emqx_bridge + + redis-sentinel-tls-slave: + container_name: redis-sentinel-tls-slave + image: redis:${REDIS_TAG} + volumes: + - ./redis/sentinel-tls:/usr/local/etc/redis + - ../../apps/emqx/etc/certs:/etc/certs + command: redis-server /usr/local/etc/redis/slave.conf + networks: + - emqx_bridge + depends_on: + - redis-sentinel-tls-master + + redis-sentinel-tls: container_name: redis-sentinel-tls image: redis:${REDIS_TAG} volumes: - - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt - - ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt - - ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key - - ./redis/:/data/conf - command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel --tls-enabled && tail -f /var/log/redis-server.log" + - ./redis/sentinel-tls/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf + - ../../apps/emqx/etc/certs:/etc/certs + depends_on: + - redis-sentinel-tls-master + - redis-sentinel-tls-slave + command: > + bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf && + redis-sentinel /usr/local/etc/redis/sentinel.conf" networks: - emqx_bridge + + + + diff --git a/.ci/docker-compose-file/docker-compose-rocketmq.yaml b/.ci/docker-compose-file/docker-compose-rocketmq.yaml new file mode 100644 index 000000000..7e5a2e42e --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-rocketmq.yaml @@ -0,0 +1,34 @@ +version: '3.9' + +services: + mqnamesrv: + image: apache/rocketmq:4.9.4 + container_name: rocketmq_namesrv +# ports: +# - 9876:9876 + volumes: + - ./rocketmq/logs:/opt/logs + - ./rocketmq/store:/opt/store + command: ./mqnamesrv + networks: + - emqx_bridge + + mqbroker: + image: apache/rocketmq:4.9.4 + container_name: rocketmq_broker +# ports: +# - 10909:10909 +# - 10911:10911 + volumes: + - ./rocketmq/logs:/opt/logs + - ./rocketmq/store:/opt/store + - ./rocketmq/conf/broker.conf:/etc/rocketmq/broker.conf + environment: + NAMESRV_ADDR: "rocketmq_namesrv:9876" + JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99" + JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m" + command: ./mqbroker -c /etc/rocketmq/broker.conf + depends_on: + - mqnamesrv + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-sqlserver.yaml b/.ci/docker-compose-file/docker-compose-sqlserver.yaml new file mode 100644 index 000000000..63fcfeecd --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-sqlserver.yaml @@ -0,0 +1,19 @@ +version: '3.9' + +services: + sql_server: + container_name: sqlserver + # See also: + # https://mcr.microsoft.com/en-us/product/mssql/server/about + # https://hub.docker.com/_/microsoft-mssql-server + image: ${MS_IMAGE_ADDR}:${SQLSERVER_TAG} + environment: + # See also: + # https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-environment-variables + ACCEPT_EULA: "Y" + MSSQL_SA_PASSWORD: "mqtt_public1" + restart: always + # ports: + # - "1433:1433" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-tdengine-restful.yaml b/.ci/docker-compose-file/docker-compose-tdengine-restful.yaml new file mode 100644 index 000000000..6cd7f2669 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-tdengine-restful.yaml @@ -0,0 +1,11 @@ +version: '3.9' + +services: + tdengine_server: + container_name: tdengine + image: tdengine/tdengine:${TDENGINE_TAG} + restart: always + ports: + - "6041:6041" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index 66e7ec308..d91118406 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -6,15 +6,28 @@ services: image: ghcr.io/shopify/toxiproxy:2.5.0 restart: always networks: - - emqx_bridge + emqx_bridge: + aliases: + - toxiproxy + - toxiproxy.emqx.net volumes: - "./toxiproxy.json:/config/toxiproxy.json" ports: - 8474:8474 - 8086:8086 - 8087:8087 + - 11433:1433 - 13306:3306 - 13307:3307 + - 15432:5432 + - 15433:5433 + - 16041:6041 + - 18000:8000 + - 19876:9876 + - 19042:9042 + - 19142:9142 + - 14242:4242 + - 28080:18080 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/docker-compose.yaml b/.ci/docker-compose-file/docker-compose.yaml index b55b3196f..6f4b7c04b 100644 --- a/.ci/docker-compose-file/docker-compose.yaml +++ b/.ci/docker-compose-file/docker-compose.yaml @@ -3,7 +3,7 @@ version: '3.9' services: erlang: container_name: erlang - image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04} + image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-33:1.13.4-24.3.4.2-3-ubuntu20.04} env_file: - conf.env environment: @@ -16,14 +16,18 @@ services: GITHUB_REF: ${GITHUB_REF} networks: - emqx_bridge + ports: + - 28083:18083 + - 2883:1883 volumes: - ../..:/emqx - - emqx-shared-secret:/var/lib/secret + - /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret - ./kerberos/krb5.conf:/etc/kdc/krb5.conf - ./kerberos/krb5.conf:/etc/krb5.conf + # - ./odbc/odbcinst.ini:/etc/odbcinst.ini working_dir: /emqx tty: true - user: "${UID_GID}" + user: "${DOCKER_USER:-root}" networks: emqx_bridge: @@ -37,6 +41,3 @@ networks: gateway: 172.100.239.1 - subnet: 2001:3200:3200::/64 gateway: 2001:3200:3200::1 - -volumes: # add this section - emqx-shared-secret: # does not need anything underneath this diff --git a/.ci/docker-compose-file/kafka/generate-certs.sh b/.ci/docker-compose-file/kafka/generate-certs.sh deleted file mode 100755 index 3f1c75550..000000000 --- a/.ci/docker-compose-file/kafka/generate-certs.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/bash - -set -euo pipefail - -set -x - -# Source https://github.com/zmstone/docker-kafka/blob/master/generate-certs.sh - -HOST="*." -DAYS=3650 -PASS="password" - -cd /var/lib/secret/ - -# Delete old files -(rm ca.key ca.crt server.key server.csr server.crt client.key client.csr client.crt server.p12 kafka.keystore.jks kafka.truststore.jks 2>/dev/null || true) - -ls - -echo '== Generate self-signed server and client certificates' -echo '= generate CA' -openssl req -new -x509 -keyout ca.key -out ca.crt -days $DAYS -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST" - -echo '= generate server certificate request' -openssl req -newkey rsa:2048 -sha256 -keyout server.key -out server.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST" - -echo '= sign server certificate' -openssl x509 -req -CA ca.crt -CAkey ca.key -in server.csr -out server.crt -days "$DAYS" -CAcreateserial - -echo '= generate client certificate request' -openssl req -newkey rsa:2048 -sha256 -keyout client.key -out client.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST" - -echo '== sign client certificate' -openssl x509 -req -CA ca.crt -CAkey ca.key -in client.csr -out client.crt -days $DAYS -CAserial ca.srl - -echo '= Convert self-signed certificate to PKCS#12 format' -openssl pkcs12 -export -name "$HOST" -in server.crt -inkey server.key -out server.p12 -CAfile ca.crt -passout pass:"$PASS" - -echo '= Import PKCS#12 into a java keystore' - -echo $PASS | keytool -importkeystore -destkeystore kafka.keystore.jks -srckeystore server.p12 -srcstoretype pkcs12 -alias "$HOST" -storepass "$PASS" - - -echo '= Import CA into java truststore' - -echo yes | keytool -keystore kafka.truststore.jks -alias CARoot -import -file ca.crt -storepass "$PASS" diff --git a/.ci/docker-compose-file/kafka/kafka-entrypoint.sh b/.ci/docker-compose-file/kafka/kafka-entrypoint.sh index 445fd65c9..336a78e74 100755 --- a/.ci/docker-compose-file/kafka/kafka-entrypoint.sh +++ b/.ci/docker-compose-file/kafka/kafka-entrypoint.sh @@ -17,6 +17,7 @@ timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.keytab ]; do sleep 1; echo "+++++++ Wait until SSL certs are generated ++++++++" timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.truststore.jks ]; do sleep 1; done' +keytool -list -v -keystore /var/lib/secret/kafka.keystore.jks -storepass password sleep 3 diff --git a/.ci/docker-compose-file/odbc/odbcinst.ini b/.ci/docker-compose-file/odbc/odbcinst.ini new file mode 100644 index 000000000..dd0241543 --- /dev/null +++ b/.ci/docker-compose-file/odbc/odbcinst.ini @@ -0,0 +1,9 @@ +[ms-sql] +Description=Microsoft ODBC Driver 17 for SQL Server +Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1 +UsageCount=1 + +[ODBC Driver 17 for SQL Server] +Description=Microsoft ODBC Driver 17 for SQL Server +Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1 +UsageCount=1 diff --git a/.ci/docker-compose-file/pgsql/Dockerfile b/.ci/docker-compose-file/pgsql/Dockerfile index f26e18d0e..8598d3f33 100644 --- a/.ci/docker-compose-file/pgsql/Dockerfile +++ b/.ci/docker-compose-file/pgsql/Dockerfile @@ -1,7 +1,7 @@ ARG BUILD_FROM=postgres:13 FROM ${BUILD_FROM} ARG POSTGRES_USER=postgres -COPY --chown=$POSTGRES_USER ./pgsql/pg_hba.conf /var/lib/postgresql/pg_hba.conf +COPY --chown=$POSTGRES_USER ./pgsql/pg_hba_tls.conf /var/lib/postgresql/pg_hba.conf COPY --chown=$POSTGRES_USER certs/server.key /var/lib/postgresql/server.key COPY --chown=$POSTGRES_USER certs/server.crt /var/lib/postgresql/server.crt COPY --chown=$POSTGRES_USER certs/ca.crt /var/lib/postgresql/root.crt diff --git a/.ci/docker-compose-file/pgsql/pg_hba_tls.conf b/.ci/docker-compose-file/pgsql/pg_hba_tls.conf new file mode 100644 index 000000000..356afd9a6 --- /dev/null +++ b/.ci/docker-compose-file/pgsql/pg_hba_tls.conf @@ -0,0 +1,8 @@ +# TYPE DATABASE USER CIDR-ADDRESS METHOD +local all all trust +# TODO: also test with `cert`? will require client certs +hostssl all all 0.0.0.0/0 password +hostssl all all ::/0 password + +hostssl all www-data 0.0.0.0/0 cert clientcert=1 +hostssl all postgres 0.0.0.0/0 cert clientcert=1 diff --git a/.ci/docker-compose-file/redis/.gitignore b/.ci/docker-compose-file/redis/.gitignore deleted file mode 100644 index 23ffe8469..000000000 --- a/.ci/docker-compose-file/redis/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -r700?i.log -nodes.700?.conf -*.rdb diff --git a/.ci/docker-compose-file/redis/cluster-tcp/redis.conf b/.ci/docker-compose-file/redis/cluster-tcp/redis.conf new file mode 100644 index 000000000..79a0d8a73 --- /dev/null +++ b/.ci/docker-compose-file/redis/cluster-tcp/redis.conf @@ -0,0 +1,18 @@ +bind :: 0.0.0.0 +port 6379 +requirepass public + +cluster-enabled yes + +masterauth public + +protected-mode no +daemonize no + +loglevel notice +logfile "" + +always-show-logo no +save "" +appendonly no + diff --git a/.ci/docker-compose-file/redis/cluster-tls/redis.conf b/.ci/docker-compose-file/redis/cluster-tls/redis.conf new file mode 100644 index 000000000..3020f46a7 --- /dev/null +++ b/.ci/docker-compose-file/redis/cluster-tls/redis.conf @@ -0,0 +1,28 @@ +bind :: 0.0.0.0 +port 6379 +requirepass public + +cluster-enabled yes + +masterauth public + +tls-port 6389 +tls-cert-file /etc/certs/cert.pem +tls-key-file /etc/certs/key.pem +tls-ca-cert-file /etc/certs/cacert.pem +tls-auth-clients no + +tls-replication yes +tls-cluster yes + + +protected-mode no +daemonize no + +loglevel notice +logfile "" + +always-show-logo no +save "" +appendonly no + diff --git a/.ci/docker-compose-file/redis/redis-tls.conf b/.ci/docker-compose-file/redis/redis-tls.conf deleted file mode 100644 index c503dc2e8..000000000 --- a/.ci/docker-compose-file/redis/redis-tls.conf +++ /dev/null @@ -1,12 +0,0 @@ -daemonize yes -bind 0.0.0.0 :: -logfile /var/log/redis-server.log -protected-mode no -requirepass public -masterauth public - -tls-cert-file /etc/certs/redis.crt -tls-key-file /etc/certs/redis.key -tls-ca-cert-file /etc/certs/ca.crt -tls-replication yes -tls-cluster yes diff --git a/.ci/docker-compose-file/redis/redis.conf b/.ci/docker-compose-file/redis/redis.conf deleted file mode 100644 index 484d9abf9..000000000 --- a/.ci/docker-compose-file/redis/redis.conf +++ /dev/null @@ -1,6 +0,0 @@ -daemonize yes -bind 0.0.0.0 :: -logfile /var/log/redis-server.log -protected-mode no -requirepass public -masterauth public diff --git a/.ci/docker-compose-file/redis/redis.sh b/.ci/docker-compose-file/redis/redis.sh deleted file mode 100755 index be6462249..000000000 --- a/.ci/docker-compose-file/redis/redis.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash - -set -x - -LOCAL_IP=$(hostname -i | grep -oE '((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])\.){3}(25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])' | head -n 1) - -node=single -tls=false -while [[ $# -gt 0 ]] -do -key="$1" - -case $key in - -n|--node) - node="$2" - shift # past argument - shift # past value - ;; - --tls-enabled) - tls=true - shift # past argument - ;; - *) - shift # past argument - ;; -esac -done - -rm -f \ - /data/conf/r7000i.log \ - /data/conf/r7001i.log \ - /data/conf/r7002i.log \ - /data/conf/nodes.7000.conf \ - /data/conf/nodes.7001.conf \ - /data/conf/nodes.7002.conf - -if [ "$node" = "cluster" ]; then - if $tls; then - redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --tls-port 8000 --cluster-enabled yes - redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --tls-port 8001 --cluster-enabled yes - redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --tls-port 8002 --cluster-enabled yes - else - redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --cluster-enabled yes - redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --cluster-enabled yes - redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --cluster-enabled yes - fi -elif [ "$node" = "sentinel" ]; then - if $tls; then - redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --tls-port 8000 --cluster-enabled no - redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000 - redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000 - - else - redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --cluster-enabled no - redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --cluster-enabled no --slaveof "$LOCAL_IP" 7000 - redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --cluster-enabled no --slaveof "$LOCAL_IP" 7000 - fi -fi - -REDIS_LOAD_FLG=true - -while $REDIS_LOAD_FLG; -do - sleep 1 - redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null - if ! [ -s /data/conf/r7000i.log ]; then - continue - fi - redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null - if ! [ -s /data/conf/r7001i.log ]; then - continue - fi - redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null; - if ! [ -s /data/conf/r7002i.log ]; then - continue - fi - if [ "$node" = "cluster" ] ; then - if $tls; then - yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" \ - --pass public --no-auth-warning \ - --tls true --cacert /etc/certs/ca.crt \ - --cert /etc/certs/redis.crt --key /etc/certs/redis.key - else - yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" \ - --pass public --no-auth-warning - fi - elif [ "$node" = "sentinel" ]; then - tee /_sentinel.conf>/dev/null << EOF -port 26379 -bind 0.0.0.0 :: -daemonize yes -logfile /var/log/redis-server.log -dir /tmp -EOF - if $tls; then - cat >>/_sentinel.conf<>/_sentinel.conf<> .ci/docker-compose-file/conf.cluster.env @@ -29,7 +29,7 @@ esac is_node_up() { local node="$1" docker exec -i "$node" \ - bash -c "emqx eval-erl \"['emqx@node1.emqx.io','emqx@node2.emqx.io'] = maps:get(running_nodes, ekka_cluster:info()).\"" > /dev/null 2>&1 + bash -c "emqx eval \"['emqx@node1.emqx.io','emqx@node2.emqx.io'] = maps:get(running_nodes, ekka_cluster:info()).\"" > /dev/null 2>&1 } is_node_listening() { diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index 34bc5b1db..dee3134f5 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -29,5 +29,107 @@ "listen": "0.0.0.0:6379", "upstream": "redis:6379", "enabled": true + }, + { + "name": "pgsql_tcp", + "listen": "0.0.0.0:5432", + "upstream": "pgsql:5432", + "enabled": true + }, + { + "name": "pgsql_tls", + "listen": "0.0.0.0:5433", + "upstream": "pgsql-tls:5432", + "enabled": true + }, + { + "name": "tdengine_restful", + "listen": "0.0.0.0:6041", + "upstream": "tdengine:6041", + "enabled": true + }, + { + "name": "dynamo", + "listen": "0.0.0.0:8000", + "upstream": "dynamo:8000", + "enabled": true + }, + { + "name": "kafka_plain", + "listen": "0.0.0.0:9292", + "upstream": "kafka-1.emqx.net:9292", + "enabled": true + }, + { + "name": "kafka_sasl_plain", + "listen": "0.0.0.0:9293", + "upstream": "kafka-1.emqx.net:9293", + "enabled": true + }, + { + "name": "kafka_ssl", + "listen": "0.0.0.0:9294", + "upstream": "kafka-1.emqx.net:9294", + "enabled": true + }, + { + "name": "kafka_sasl_ssl", + "listen": "0.0.0.0:9295", + "upstream": "kafka-1.emqx.net:9295", + "enabled": true + }, + { + "name": "rocketmq", + "listen": "0.0.0.0:9876", + "upstream": "rocketmq_namesrv:9876", + "enabled": true + }, + { + "name": "cassa_tcp", + "listen": "0.0.0.0:9042", + "upstream": "cassandra:9042", + "enabled": true + }, + { + "name": "cassa_tls", + "listen": "0.0.0.0:9142", + "upstream": "cassandra:9142", + "enabled": true + }, + { + "name": "sqlserver", + "listen": "0.0.0.0:1433", + "upstream": "sqlserver:1433", + "enabled": true + }, + { + "name": "opents", + "listen": "0.0.0.0:4242", + "upstream": "opents:4242", + "enabled": true + }, + { + "name": "pulsar_plain", + "listen": "0.0.0.0:6652", + "upstream": "pulsar:6652", + "enabled": true + }, + { + "name": "pulsar_tls", + "listen": "0.0.0.0:6653", + "upstream": "pulsar:6653", + "enabled": true + }, + { + "name": "oracle", + "listen": "0.0.0.0:1521", + "upstream": "oracle:1521", + "enabled": true + }, + { + "name": "iotdb", + "listen": "0.0.0.0:18080", + "upstream": "iotdb:18080", + "enabled": true } ] diff --git a/.editorconfig b/.editorconfig index c563aa10d..719028b4d 100644 --- a/.editorconfig +++ b/.editorconfig @@ -20,8 +20,3 @@ indent_size = 4 # Tab indentation (no size specified) [Makefile] indent_style = tab - -# Matches the exact files either package.json or .travis.yml -[{.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a353766d4..be6a1e967 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,43 +1,29 @@ ## Default -* @zmstone @ieQu1 @terry-xiaoyu @qzhuyan @HJianBo @zhongwencool - -## MQTT -/apps/emqx_connector/src/mqtt/ @qzhuyan -/apps/emqx/*/*mqtt* @qzhuyan +* @emqx/emqx-review-board ## apps -/apps/emqx/ @lafirest @thalesmg @HJianBo @ieQu1 -/apps/emqx_authn/ @savonarola @JimMoen @HJianBo -/apps/emqx_authz/ @savonarola @JimMoen @HJianBo -/apps/emqx_auto_subscribe/ @thalesmg @HJianBo -/apps/emqx_bridge/ @terry-xiaoyu @thalesmg -/apps/emqx_conf/ @ieQu1 @thalesmg -/apps/emqx_connector/ @terry-xiaoyu @JimMoen -/apps/emqx_dashboard/ @lafirest @JimMoen -/apps/emqx_exhook/ @lafirest @HJianBo @JimMoen -/apps/emqx_gateway/ @HJianBo @lafirest -/apps/emqx_machine/ @thalesmg @terry-xiaoyu @ieQu1 -/apps/emqx_management/ @HJianBo @lafirest @sstrigler -/apps/emqx_modules/ @thalesmg @terry-xiaoyu @HJianBo -/apps/emqx_plugin_libs/ @terry-xiaoyu @lafirest -/apps/emqx_plugins/ @thalesmg @JimMoen @ieQu1 -/apps/emqx_prometheus/ @JimMoen @ieQu1 -/apps/emqx_psk/ @lafirest @thalesmg @terry-xiaoyu -/apps/emqx_resource/ @terry-xiaoyu @thalesmg -/apps/emqx_replay/ @ieQu1 -/apps/emqx_retainer/ @lafirest @ieQu1 @thalesmg -/apps/emqx_rule_engine/ @terry-xiaoyu @HJianBo @kjellwinblad -/apps/emqx_slow_subs/ @lafirest @HJianBo -/apps/emqx_statsd/ @JimMoen @HJianBo - -## other -/lib-ee/ @thalesmg -/bin/ @zmstone @thalesmg @terry-xiaoyu @id -/rel/ @zmstone @thalesmg @id - +/apps/emqx/ @emqx/emqx-review-board @lafirest +/apps/emqx_authn/ @emqx/emqx-review-board @JimMoen @savonarola +/apps/emqx_authz/ @emqx/emqx-review-board @JimMoen @savonarola +/apps/emqx_connector/ @emqx/emqx-review-board @JimMoen +/apps/emqx_dashboard/ @emqx/emqx-review-board @JimMoen @lafirest +/apps/emqx_exhook/ @emqx/emqx-review-board @JimMoen @lafirest +/apps/emqx_gateway/ @emqx/emqx-review-board @lafirest +/apps/emqx_management/ @emqx/emqx-review-board @lafirest @sstrigler +/apps/emqx_plugin_libs/ @emqx/emqx-review-board @lafirest +/apps/emqx_plugins/ @emqx/emqx-review-board @JimMoen +/apps/emqx_prometheus/ @emqx/emqx-review-board @JimMoen +/apps/emqx_psk/ @emqx/emqx-review-board @lafirest +/apps/emqx_retainer/ @emqx/emqx-review-board @lafirest +/apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad +/apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest +/apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen +/apps/emqx_replay @emqx/emqx-review-board @ieQu1 ## CI -/.github/ @id -/.ci/ @id -/scripts/ @id -/build @id -/deploy/ @id +/deploy/ @emqx/emqx-review-board @Rory-Z + +## @Meggielqk owns all files in any i18n directory anywhere in the project +/i18n/ @Meggielqk + +## no owner for changelogs, anyone can approve +/changes diff --git a/.github/PULL_REQUEST_TEMPLATE/ci.md b/.github/PULL_REQUEST_TEMPLATE/ci.md deleted file mode 100644 index 764933516..000000000 --- a/.github/PULL_REQUEST_TEMPLATE/ci.md +++ /dev/null @@ -1,7 +0,0 @@ -Fixes - -## PR Checklist -Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: - -- [ ] If changed package build ci, pass [this action](https://github.com/emqx/emqx/actions/workflows/build_packages.yaml) (manual trigger) -- [ ] Change log has been added to `changes/` dir for user-facing artifacts update diff --git a/.github/PULL_REQUEST_TEMPLATE/doc.md b/.github/PULL_REQUEST_TEMPLATE/doc.md deleted file mode 100644 index af1c9127f..000000000 --- a/.github/PULL_REQUEST_TEMPLATE/doc.md +++ /dev/null @@ -1 +0,0 @@ -Fixes diff --git a/.github/PULL_REQUEST_TEMPLATE/v4.md b/.github/PULL_REQUEST_TEMPLATE/v4.md deleted file mode 100644 index 11b282091..000000000 --- a/.github/PULL_REQUEST_TEMPLATE/v4.md +++ /dev/null @@ -1,12 +0,0 @@ -Fixes - -## PR Checklist -Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: - -- [ ] Added tests for the changes -- [ ] Changed lines covered in coverage report -- [ ] Change log has been added to `changes/` dir -- [ ] `appup` files updated (execute `scripts/update-appup.sh emqx`) -- [ ] For internal contributor: there is a jira ticket to track this change -- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up -- [ ] In case of non-backward compatible changes, reviewer should check this item as a write-off, and add details in **Backward Compatibility** section diff --git a/.github/PULL_REQUEST_TEMPLATE/v5.md b/.github/PULL_REQUEST_TEMPLATE/v5.md deleted file mode 100644 index a0db01e1a..000000000 --- a/.github/PULL_REQUEST_TEMPLATE/v5.md +++ /dev/null @@ -1,11 +0,0 @@ -Fixes - -## PR Checklist -Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: - -- [ ] Added tests for the changes -- [ ] Changed lines covered in coverage report -- [ ] Change log has been added to `changes/` dir -- [ ] For internal contributor: there is a jira ticket to track this change -- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up -- [ ] Schema changes are backward compatible diff --git a/.github/actions/docker-meta/action.yaml b/.github/actions/docker-meta/action.yaml deleted file mode 100644 index 13ab21da6..000000000 --- a/.github/actions/docker-meta/action.yaml +++ /dev/null @@ -1,81 +0,0 @@ -name: 'Docker meta' -inputs: - profile: - required: true - type: string - registry: - required: true - type: string - arch: - required: true - type: string - otp: - required: true - type: string - elixir: - required: false - type: string - default: '' - builder_base: - required: true - type: string - owner: - required: true - type: string - docker_tags: - required: true - type: string - -outputs: - emqx_name: - description: "EMQX name" - value: ${{ steps.pre-meta.outputs.emqx_name }} - version: - description: "docker image version" - value: ${{ steps.meta.outputs.version }} - tags: - description: "docker image tags" - value: ${{ steps.meta.outputs.tags }} - labels: - description: "docker image labels" - value: ${{ steps.meta.outputs.labels }} - -runs: - using: composite - steps: - - name: prepare for docker/metadata-action - id: pre-meta - shell: bash - run: | - emqx_name=${{ inputs.profile }} - img_suffix=${{ inputs.arch }} - img_labels="org.opencontainers.image.otp.version=${{ inputs.otp }}" - if [ -n "${{ inputs.elixir }}" ]; then - emqx_name="emqx-elixir" - img_suffix="elixir-${{ inputs.arch }}" - img_labels="org.opencontainers.image.elixir.version=${{ inputs.elixir }}\n${img_labels}" - fi - if [ "${{ inputs.profile }}" = "emqx" ]; then - img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}" - fi - if [ "${{ inputs.profile }}" = "emqx-enterprise" ]; then - img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}" - fi - if [[ "${{ inputs.builder_base }}" =~ "alpine" ]]; then - img_suffix="${img_suffix}-alpine" - fi - echo "emqx_name=${emqx_name}" >> $GITHUB_OUTPUT - echo "img_suffix=${img_suffix}" >> $GITHUB_OUTPUT - echo "img_labels=${img_labels}" >> $GITHUB_OUTPUT - echo "img_name=${{ inputs.registry }}/${{ inputs.owner }}/${{ inputs.profile }}" >> $GITHUB_OUTPUT - - uses: docker/metadata-action@v4 - id: meta - with: - images: - ${{ steps.pre-meta.outputs.img_name }} - flavor: | - suffix=-${{ steps.pre-meta.outputs.img_suffix }} - tags: | - type=raw,value=${{ inputs.docker_tags }} - labels: - ${{ steps.pre-meta.outputs.img_labels }} diff --git a/.github/actions/package-macos/action.yaml b/.github/actions/package-macos/action.yaml index 95915dd7d..49d9b3dbf 100644 --- a/.github/actions/package-macos/action.yaml +++ b/.github/actions/package-macos/action.yaml @@ -3,7 +3,7 @@ inputs: profile: # emqx, emqx-enterprise required: true type: string - otp: # 25.1.2-2, 24.3.4.2-1 + otp: # 25.1.2-2, 24.3.4.2-2 required: true type: string os: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..7cb91f0d4 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +Fixes + + + +## Summary +copilot:summary + +## PR Checklist +Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: + +- [ ] Added tests for the changes +- [ ] Changed lines covered in coverage report +- [ ] Change log has been added to `changes/{ce,ee}/(feat|perf|fix)-.en.md` files +- [ ] For internal contributor: there is a jira ticket to track this change +- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up +- [ ] Schema changes are backward compatible + +## Checklist for CI (.github/workflows) changes + +- [ ] If changed package build workflow, pass [this action](https://github.com/emqx/emqx/actions/workflows/build_packages.yaml) (manual trigger) +- [ ] Change log has been added to `changes/` dir for user-facing artifacts update diff --git a/.github/workflows/apps_version_check.yaml b/.github/workflows/apps_version_check.yaml index 13e26b204..52c467786 100644 --- a/.github/workflows/apps_version_check.yaml +++ b/.github/workflows/apps_version_check.yaml @@ -4,7 +4,7 @@ on: [pull_request] jobs: check_apps_version: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index 76238c75f..7c0a5dc87 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -9,27 +9,30 @@ on: tags: - v* - e* - release: - types: - - published + - docker-latest-* workflow_dispatch: inputs: branch_or_tag: required: false profile: required: false + default: 'emqx' + is_latest: + required: false + default: false jobs: prepare: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04" outputs: - BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} - IS_DOCKER_LATEST: ${{ steps.get_profile.outputs.IS_DOCKER_LATEST }} + PROFILE: ${{ steps.get_profile.outputs.PROFILE }} + EDITION: ${{ steps.get_profile.outputs.EDITION }} + IS_LATEST: ${{ steps.get_profile.outputs.IS_LATEST }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} - DOCKER_TAG_VERSION: ${{ steps.get_profile.outputs.DOCKER_TAG_VERSION }} + VERSION: ${{ steps.get_profile.outputs.VERSION }} steps: - uses: actions/checkout@v3 @@ -40,59 +43,61 @@ jobs: - name: Get profiles to build id: get_profile + env: + INPUTS_PROFILE: ${{ github.event.inputs.profile }} run: | cd source - tag=${{ github.ref }} # tag docker-latest-ce or docker-latest-ee if git describe --tags --exact --match 'docker-latest-*' 2>/dev/null; then - echo 'docker_latest=true due to docker-latest-* tag' - docker_latest=true - elif [ "${{ github.event_name }}" = "release" ]; then - echo 'docker_latest=true due to release' - docker_latest=true + echo 'is_latest=true due to docker-latest-* tag' + is_latest=true + elif [ "${{ inputs.is_latest }}" = "true" ]; then + echo 'is_latest=true due to manual input from workflow_dispatch' + is_latest=true else - echo 'docker_latest=false' - docker_latest=false + echo 'is_latest=false' + is_latest=false fi - if git describe --tags --match "[v|e]*" --exact; then + # resolve profile + if git describe --tags --match "v*" --exact; then echo "This is an exact git tag, will publish images" is_exact='true' + PROFILE=emqx + elif git describe --tags --match "e*" --exact; then + echo "This is an exact git tag, will publish images" + is_exact='true' + PROFILE=emqx-enterprise else echo "This is NOT an exact git tag, will not publish images" is_exact='false' fi - case $tag in - refs/tags/v*) - PROFILE='emqx' + + case "${PROFILE:-$INPUTS_PROFILE}" in + emqx) + EDITION='Opensource' ;; - refs/tags/e*) - PROFILE=emqx-enterprise + emqx-enterprise) + EDITION='Enterprise' ;; *) - PROFILE=${{ github.event.inputs.profile }} - case "$PROFILE" in - emqx) - true - ;; - emqx-enterprise) - true - ;; - *) - echo "ERROR: Failed to resolve build profile" - exit 1 - ;; - esac + echo "ERROR: Failed to resolve build profile" + exit 1 ;; esac + VSN="$(./pkg-vsn.sh "$PROFILE")" - echo "Building $PROFILE image with tag $VSN (latest=$docker_latest)" - echo "IS_DOCKER_LATEST=$docker_latest" >> $GITHUB_OUTPUT + echo "Building emqx/$PROFILE:$VSN image (latest=$is_latest)" + echo "Push = $is_exact" + echo "IS_LATEST=$is_latest" >> $GITHUB_OUTPUT echo "IS_EXACT_TAG=$is_exact" >> $GITHUB_OUTPUT - echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT - echo "DOCKER_TAG_VERSION=$VSN" >> $GITHUB_OUTPUT + echo "PROFILE=$PROFILE" >> $GITHUB_OUTPUT + echo "EDITION=$EDITION" >> $GITHUB_OUTPUT + echo "VERSION=$VSN" >> $GITHUB_OUTPUT - name: get_all_deps + env: + PROFILE: ${{ steps.get_profile.outputs.PROFILE }} run: | - make -C source deps-all + PROFILE=$PROFILE make -C source deps-$PROFILE zip -ryq source.zip source/* source/.[^.]* - uses: actions/upload-artifact@v3 with: @@ -100,37 +105,33 @@ jobs: path: source.zip docker: - runs-on: ${{ matrix.arch[1] }} + runs-on: ubuntu-22.04 needs: prepare strategy: fail-fast: false matrix: - arch: - - [amd64, ubuntu-20.04] - - [arm64, aws-arm64] profile: - - ${{ needs.prepare.outputs.BUILD_PROFILE }} + - "${{ needs.prepare.outputs.PROFILE }}" registry: - 'docker.io' - 'public.ecr.aws' os: - - [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"] - [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] # NOTE: 'otp' and 'elixir' are to configure emqx-builder image # only support latest otp and elixir, not a matrix builder: - - 5.0-26 # update to latest + - 5.0-34 # update to latest otp: - - 24.3.4.2-1 # switch to 25 once ready to release 5.1 + - 24.3.4.2-3 # switch to 25 once ready to release 5.1 elixir: - - 1.13.4 # update to latest + - 'no_elixir' + - '1.13.4' # update to latest exclude: # TODO: publish enterprise to ecr too? - registry: 'public.ecr.aws' profile: emqx-enterprise + steps: - - uses: AutoModality/action-clean@v1 - if: matrix.arch[1] == 'aws-arm64' - uses: actions/download-artifact@v3 with: name: source @@ -138,16 +139,17 @@ jobs: - name: unzip source code run: unzip -q source.zip + - uses: docker/setup-qemu-action@v2 - uses: docker/setup-buildx-action@v2 - - name: Login for docker. + - name: Login to hub.docker.com uses: docker/login-action@v2 if: matrix.registry == 'docker.io' with: username: ${{ secrets.DOCKER_HUB_USER }} password: ${{ secrets.DOCKER_HUB_TOKEN }} - - name: Login for AWS ECR + - name: Login to AWS ECR uses: docker/login-action@v2 if: matrix.registry == 'public.ecr.aws' with: @@ -156,229 +158,44 @@ jobs: password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} ecr: true - - uses: ./source/.github/actions/docker-meta + - name: prepare for docker/metadata-action + id: pre-meta + shell: bash + run: | + extra_labels= + img_suffix= + if [ "${{ matrix.elixir }}" != 'no_elixir' ]; then + img_suffix="-elixir" + extra_labels="org.opencontainers.image.elixir.version=${{ matrix.elixir }}" + fi + + echo "img_suffix=$img_suffix" >> $GITHUB_OUTPUT + echo "extra_labels=$extra_labels" >> $GITHUB_OUTPUT + + - uses: docker/metadata-action@v4 id: meta with: - profile: ${{ matrix.profile }} - registry: ${{ matrix.registry }} - arch: ${{ matrix.arch[0] }} - otp: ${{ matrix.otp }} - builder_base: ${{ matrix.os[0] }} - owner: ${{ github.repository_owner }} - docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} + images: | + ${{ matrix.registry }}/${{ github.repository_owner }}/${{ matrix.profile }} + flavor: | + suffix=${{ steps.pre-meta.outputs.img_suffix }} + tags: | + type=raw,value=${{ needs.prepare.outputs.VERSION }} + type=raw,value=latest,enable=${{ needs.prepare.outputs.IS_LATEST }} + labels: | + org.opencontainers.image.otp.version=${{ matrix.otp }} + org.opencontainers.image.edition=${{ needs.prepare.outputs.EDITION }} + ${{ steps.pre-meta.outputs.extra_labels }} - uses: docker/build-push-action@v3 with: push: ${{ needs.prepare.outputs.IS_EXACT_TAG == 'true' || github.repository_owner != 'emqx' }} pull: true no-cache: true - platforms: linux/${{ matrix.arch[0] }} + platforms: linux/amd64,linux/arm64 tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | - BUILD_FROM=ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} - RUN_FROM=${{ matrix.os[1] }} - EMQX_NAME=${{ steps.meta.outputs.emqx_name }} + EMQX_NAME=${{ matrix.profile }}${{ steps.pre-meta.outputs.img_suffix }} file: source/${{ matrix.os[2] }} context: source - - - name: Docker Hub Description - if: matrix.registry == 'docker.io' - uses: peter-evans/dockerhub-description@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - repository: "emqx/${{ needs.prepare.outputs.BUILD_PROFILE }}" - readme-filepath: ./source/deploy/docker/README.md - short-description: "The most scalable open-source MQTT broker for IoT, IIoT, connected vehicles, and more." - - docker-elixir: - runs-on: ${{ matrix.arch[1] }} - needs: prepare - # do not build elixir images for ee for now - if: needs.prepare.outputs.BUILD_PROFILE == 'emqx' - - strategy: - fail-fast: false - matrix: - arch: - - [amd64, ubuntu-20.04] - - [arm64, aws-arm64] - profile: - - ${{ needs.prepare.outputs.BUILD_PROFILE }} - registry: - - 'docker.io' - os: - - [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - builder: - - 5.0-26 # update to latest - otp: - - 25.1.2-2 # update to latest - elixir: - - 1.13.4 # update to latest - - steps: - - uses: AutoModality/action-clean@v1 - if: matrix.arch[1] == 'aws-arm64' - - uses: actions/download-artifact@v3 - with: - name: source - path: . - - name: unzip source code - run: unzip -q source.zip - - - uses: docker/setup-buildx-action@v2 - - - name: Login for docker. - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_HUB_USER }} - password: ${{ secrets.DOCKER_HUB_TOKEN }} - - - uses: ./source/.github/actions/docker-meta - id: meta - with: - profile: ${{ matrix.profile }} - registry: ${{ matrix.registry }} - arch: ${{ matrix.arch[0] }} - otp: ${{ matrix.otp }} - elixir: ${{ matrix.elixir }} - builder_base: ${{ matrix.os[0] }} - owner: ${{ github.repository_owner }} - docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - - - uses: docker/build-push-action@v3 - with: - push: ${{ needs.prepare.outputs.IS_EXACT_TAG == 'true' || github.repository_owner != 'emqx' }} - pull: true - no-cache: true - platforms: linux/${{ matrix.arch[0] }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - BUILD_FROM=ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} - RUN_FROM=${{ matrix.os[1] }} - EMQX_NAME=${{ steps.meta.outputs.emqx_name }} - file: source/${{ matrix.os[2] }} - context: source - - docker-push-multi-arch-manifest: - # note, we only run on amd64 - if: needs.prepare.outputs.IS_EXACT_TAG - needs: - - prepare - - docker - runs-on: ${{ matrix.arch[1] }} - strategy: - fail-fast: false - matrix: - arch: - - [amd64, ubuntu-20.04] - profile: - - ${{ needs.prepare.outputs.BUILD_PROFILE }} - os: - - [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"] - - [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - # NOTE: only support latest otp version, not a matrix - otp: - - 24.3.4.2-1 # switch to 25 once ready to release 5.1 - registry: - - 'docker.io' - - 'public.ecr.aws' - exclude: - - registry: 'public.ecr.aws' - profile: emqx-enterprise - - steps: - - uses: actions/download-artifact@v3 - with: - name: source - path: . - - - name: unzip source code - run: unzip -q source.zip - - - uses: docker/login-action@v2 - if: matrix.registry == 'docker.io' - with: - username: ${{ secrets.DOCKER_HUB_USER }} - password: ${{ secrets.DOCKER_HUB_TOKEN }} - - - uses: docker/login-action@v2 - if: matrix.registry == 'public.ecr.aws' - with: - registry: public.ecr.aws - username: ${{ secrets.AWS_ACCESS_KEY_ID }} - password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - ecr: true - - - uses: ./source/.github/actions/docker-meta - id: meta - with: - profile: ${{ matrix.profile }} - registry: ${{ matrix.registry }} - arch: ${{ matrix.arch[0] }} - otp: ${{ matrix.otp }} - builder_base: ${{ matrix.os[0] }} - owner: ${{ github.repository_owner }} - docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - - - name: update manifest for multiarch image - working-directory: source - run: | - is_latest="${{ needs.prepare.outputs.IS_DOCKER_LATEST }}" - scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" "$is_latest" - - docker-elixir-push-multi-arch-manifest: - # note, we only run on amd64 - # do not build enterprise elixir images for now - if: needs.prepare.outputs.IS_EXACT_TAG == 'true' && needs.prepare.outputs.BUILD_PROFILE == 'emqx' - needs: - - prepare - - docker-elixir - runs-on: ${{ matrix.arch[1] }} - strategy: - fail-fast: false - matrix: - arch: - - [amd64, ubuntu-20.04] - profile: - - ${{ needs.prepare.outputs.BUILD_PROFILE }} - # NOTE: for docker, only support latest otp version, not a matrix - otp: - - 25.1.2-2 # update to latest - elixir: - - 1.13.4 # update to latest - registry: - - 'docker.io' - - steps: - - uses: actions/download-artifact@v3 - with: - name: source - path: . - - - name: unzip source code - run: unzip -q source.zip - - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_HUB_USER }} - password: ${{ secrets.DOCKER_HUB_TOKEN }} - - - uses: ./source/.github/actions/docker-meta - id: meta - with: - profile: ${{ matrix.profile }} - registry: ${{ matrix.registry }} - arch: ${{ matrix.arch[0] }} - otp: ${{ matrix.otp }} - elixir: ${{ matrix.elixir }} - builder_base: ${{ matrix.os[0] }} - owner: ${{ github.repository_owner }} - docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - - - name: update manifest for multiarch image - working-directory: source - run: | - scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" false diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 36deef717..c88232dc9 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -5,8 +5,6 @@ concurrency: cancel-in-progress: true on: - schedule: - - cron: '0 */6 * * *' push: branches: - 'ci/**' @@ -22,24 +20,22 @@ on: jobs: prepare: - runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04 + runs-on: ubuntu-22.04 + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04 outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} - + VERSION: ${{ steps.get_profile.outputs.VERSION }} steps: - uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used - path: source fetch-depth: 0 - name: Get profile to build id: get_profile run: | - cd source - git config --global --add safe.directory "$(pwd)" + git config --global --add safe.directory "$GITHUB_WORKSPACE" tag=${{ github.ref }} if git describe --tags --match "[v|e]*" --exact; then echo "WARN: This is an exact git tag, will publish release" @@ -75,47 +71,36 @@ jobs: ;; esac echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT - - name: get_all_deps - run: | - make -C source deps-all - zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v3 - with: - name: source - path: source.zip + echo "VERSION=$(./pkg-vsn.sh $PROFILE)" >> $GITHUB_OUTPUT windows: runs-on: windows-2019 if: startsWith(github.ref_name, 'v') - needs: prepare strategy: fail-fast: false matrix: profile: # for now only CE for windows - emqx steps: - - uses: actions/download-artifact@v3 + - uses: actions/checkout@v3 with: - name: source - path: . - - name: unzip source code - run: Expand-Archive -Path source.zip -DestinationPath ./ + ref: ${{ github.event.inputs.branch_or_tag }} + fetch-depth: 0 + - uses: ilammy/msvc-dev-cmd@v1.12.0 - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - name: build env: PYTHON: python DIAGNOSTIC: 1 - working-directory: source run: | # ensure crypto app (openssl) erl -eval "erlang:display(crypto:info_lib())" -s init stop make ${{ matrix.profile }}-tgz - name: run emqx timeout-minutes: 5 - working-directory: source run: | ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start Start-Sleep -s 5 @@ -130,15 +115,7 @@ jobs: if: success() with: name: ${{ matrix.profile }} - path: source/_packages/${{ matrix.profile }}/ - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() && github.event_name == 'schedule' - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled run of ${{ github.workflow }}@Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + path: _packages/${{ matrix.profile }}/ mac: needs: prepare @@ -148,22 +125,18 @@ jobs: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} otp: - - 24.3.4.2-1 + - 24.3.4.2-3 os: - macos-11 + - macos-12 - macos-12-arm64 runs-on: ${{ matrix.os }} steps: - uses: emqx/self-hosted-cleanup-action@v1.0.3 - - uses: actions/download-artifact@v3 + - uses: actions/checkout@v3 with: - name: source - path: . - - name: unzip source code - run: | - ln -s . source - unzip -o -q source.zip - rm source source.zip + ref: ${{ github.event.inputs.branch_or_tag }} + fetch-depth: 0 - uses: ./.github/actions/package-macos with: profile: ${{ matrix.profile }} @@ -178,18 +151,12 @@ jobs: with: name: ${{ matrix.profile }} path: _packages/${{ matrix.profile }}/ - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() && github.event_name == 'schedule' - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} linux: needs: prepare runs-on: ${{ matrix.build_machine }} + # always run in builder container because the host might have the wrong OTP version etc. + # otherwise buildx.sh does not run docker if arch and os matches the target arch and os. container: image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" @@ -199,45 +166,47 @@ jobs: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} otp: - - 24.3.4.2-1 + - 24.3.4.2-3 arch: - amd64 - arm64 os: + - ubuntu22.04 - ubuntu20.04 - ubuntu18.04 - debian11 - debian10 + - el9 - el8 - el7 - amzn2 build_machine: - aws-arm64 - - ubuntu-20.04 + - ubuntu-22.04 builder: - - 5.0-26 + - 5.0-34 elixir: - 1.13.4 exclude: - arch: arm64 - build_machine: ubuntu-20.04 + build_machine: ubuntu-22.04 - arch: amd64 build_machine: aws-arm64 include: - profile: emqx - otp: 25.1.2-2 + otp: 25.1.2-3 arch: amd64 - os: ubuntu20.04 - build_machine: ubuntu-20.04 - builder: 5.0-26 + os: ubuntu22.04 + build_machine: ubuntu-22.04 + builder: 5.0-34 elixir: 1.13.4 release_with: elixir - profile: emqx - otp: 25.1.2-2 + otp: 25.1.2-3 arch: amd64 os: amzn2 - build_machine: ubuntu-20.04 - builder: 5.0-26 + build_machine: ubuntu-22.04 + builder: 5.0-34 elixir: 1.13.4 release_with: elixir @@ -248,24 +217,20 @@ jobs: steps: - uses: AutoModality/action-clean@v1 if: matrix.build_machine == 'aws-arm64' - - uses: actions/download-artifact@v3 + + - uses: actions/checkout@v3 with: - name: source - path: . - - name: unzip source code - run: unzip -q source.zip + ref: ${{ github.event.inputs.branch_or_tag }} + fetch-depth: 0 + - name: build emqx packages - working-directory: source env: - BUILDER: ${{ matrix.builder }} ELIXIR: ${{ matrix.elixir }} - OTP: ${{ matrix.otp }} PROFILE: ${{ matrix.profile }} ARCH: ${{ matrix.arch }} - SYSTEM: ${{ matrix.os }} run: | set -eu - git config --global --add safe.directory "/__w/emqx/emqx" + git config --global --add safe.directory "$GITHUB_WORKSPACE" # Align path for CMake caches if [ ! "$PWD" = "/emqx" ]; then ln -s $PWD /emqx @@ -274,7 +239,8 @@ jobs: echo "pwd is $PWD" PKGTYPES="tgz pkg" IS_ELIXIR="no" - if [ ${{ matrix.release_with }} == 'elixir' ]; then + WITH_ELIXIR=${{ matrix.release_with }} + if [ "${WITH_ELIXIR:-}" == 'elixir' ]; then PKGTYPES="tgz" # set Elixir build flag IS_ELIXIR="yes" @@ -286,26 +252,18 @@ jobs: --pkgtype "${PKGTYPE}" \ --arch "${ARCH}" \ --elixir "${IS_ELIXIR}" \ - --builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${SYSTEM}" + --builder "force_host" done - uses: actions/upload-artifact@v3 if: success() with: name: ${{ matrix.profile }} - path: source/_packages/${{ matrix.profile }}/ - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() && github.event_name == 'schedule' - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + path: _packages/${{ matrix.profile }}/ publish_artifacts: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: [prepare, mac, linux] - if: ${{ needs.prepare.outputs.IS_EXACT_TAG }} + if: needs.prepare.outputs.IS_EXACT_TAG == 'true' strategy: fail-fast: false matrix: @@ -328,7 +286,7 @@ jobs: echo "$(cat $var.sha256) $var" | sha256sum -c || exit 1 done cd - - - uses: aws-actions/configure-aws-credentials@v1-node16 + - uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml new file mode 100644 index 000000000..b7ef44a79 --- /dev/null +++ b/.github/workflows/build_packages_cron.yaml @@ -0,0 +1,127 @@ +name: Scheduled build packages + +concurrency: + group: build-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: true + +on: + schedule: + - cron: '0 */6 * * *' + workflow_dispatch: + +jobs: + linux: + if: github.repository_owner == 'emqx' + runs-on: aws-${{ matrix.arch }} + # always run in builder container because the host might have the wrong OTP version etc. + # otherwise buildx.sh does not run docker if arch and os matches the target arch and os. + container: + image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" + + strategy: + fail-fast: false + matrix: + profile: + - ['emqx', 'master'] + - ['emqx-enterprise', 'release-50'] + otp: + - 24.3.4.2-3 + arch: + - amd64 + os: + - debian10 + - amzn2 + builder: + - 5.0-34 + elixir: + - 1.13.4 + + defaults: + run: + shell: bash + + steps: + - uses: emqx/self-hosted-cleanup-action@v1.0.3 + - uses: actions/checkout@v3 + with: + ref: ${{ matrix.profile[1] }} + fetch-depth: 0 + + - name: build emqx packages + env: + ELIXIR: ${{ matrix.elixir }} + PROFILE: ${{ matrix.profile[0] }} + ARCH: ${{ matrix.arch }} + run: | + set -eu + git config --global --add safe.directory "$GITHUB_WORKSPACE" + PKGTYPES="tgz pkg" + IS_ELIXIR="no" + for PKGTYPE in ${PKGTYPES}; + do + ./scripts/buildx.sh \ + --profile "${PROFILE}" \ + --pkgtype "${PKGTYPE}" \ + --arch "${ARCH}" \ + --elixir "${IS_ELIXIR}" \ + --builder "force_host" + done + - uses: actions/upload-artifact@v3 + if: success() + with: + name: ${{ matrix.profile[0] }} + path: _packages/${{ matrix.profile[0] }}/ + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + if: failure() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Scheduled build of ${{ matrix.profile[0] }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + + mac: + runs-on: ${{ matrix.os }} + if: github.repository_owner == 'emqx' + + strategy: + fail-fast: false + matrix: + profile: + - emqx + branch: + - master + otp: + - 24.3.4.2-3 + os: + - macos-12 + - macos-12-arm64 + + steps: + - uses: emqx/self-hosted-cleanup-action@v1.0.3 + - uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 0 + - uses: ./.github/actions/package-macos + with: + profile: ${{ matrix.profile }} + otp: ${{ matrix.otp }} + os: ${{ matrix.os }} + apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }} + apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} + apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} + apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} + - uses: actions/upload-artifact@v3 + if: success() + with: + name: ${{ matrix.profile }} + path: _packages/${{ matrix.profile }}/ + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + if: failure() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 0e72232bb..06bcb98a2 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -8,6 +8,7 @@ on: push: branches: - master + - release-50 pull_request: # GitHub pull_request action is by default triggered when # opened reopened or synchronize, @@ -29,13 +30,14 @@ jobs: fail-fast: false matrix: profile: - - ["emqx", "24.3.4.2-1", "el7"] - - ["emqx", "25.1.2-2", "ubuntu20.04"] - - ["emqx-enterprise", "24.3.4.2-1", "ubuntu20.04"] + - ["emqx", "24.3.4.2-3", "el7", "erlang"] + - ["emqx", "25.1.2-3", "ubuntu22.04", "elixir"] + - ["emqx-enterprise", "24.3.4.2-3", "amzn2", "erlang"] + - ["emqx-enterprise", "25.1.2-3", "ubuntu20.04", "erlang"] builder: - - 5.0-26 + - 5.0-34 elixir: - - 1.13.4 + - '1.13.4' container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}" @@ -52,22 +54,22 @@ jobs: run: | git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: build and test tgz package + if: matrix.profile[3] == 'erlang' run: | make ${EMQX_NAME}-tgz ./scripts/pkg-tests.sh ${EMQX_NAME}-tgz - - name: run static checks - if: contains(matrix.os, 'ubuntu') - run: | - make static_checks - name: build and test deb/rpm packages + if: matrix.profile[3] == 'erlang' run: | make ${EMQX_NAME}-pkg ./scripts/pkg-tests.sh ${EMQX_NAME}-pkg - name: build and test tgz package (Elixir) + if: matrix.profile[3] == 'elixir' run: | make ${EMQX_NAME}-elixir-tgz ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-tgz - name: build and test deb/rpm packages (Elixir) + if: matrix.profile[3] == 'elixir' run: | make ${EMQX_NAME}-elixir-pkg ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg @@ -80,7 +82,7 @@ jobs: name: "${{ matrix.profile[0] }}_schema_dump" path: | scripts/spellcheck - _build/${{ matrix.profile[0] }}/lib/emqx_dashboard/priv/www/static/schema.json + _build/docgen/${{ matrix.profile[0] }}/schema-en.json windows: runs-on: windows-2019 @@ -94,7 +96,7 @@ jobs: steps: - uses: actions/checkout@v3 - uses: ilammy/msvc-dev-cmd@v1.12.0 - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: ${{ matrix.otp }} - name: build @@ -130,7 +132,7 @@ jobs: - emqx - emqx-enterprise otp: - - 24.3.4.2-1 + - 24.3.4.2-3 os: - macos-11 - macos-12-arm64 @@ -156,6 +158,59 @@ jobs: name: ${{ matrix.os }} path: _packages/**/* + docker: + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + profile: + - ["emqx", "5.0.16"] + - ["emqx-enterprise", "5.0.1"] + + steps: + - uses: actions/checkout@v3 + - name: prepare + run: | + EMQX_NAME=${{ matrix.profile[0] }} + PKG_VSN=${PKG_VSN:-$(./pkg-vsn.sh $EMQX_NAME)} + EMQX_IMAGE_TAG=emqx/$EMQX_NAME:test + EMQX_IMAGE_OLD_VERSION_TAG=emqx/$EMQX_NAME:${{ matrix.profile[1] }} + echo "EMQX_NAME=$EMQX_NAME" >> $GITHUB_ENV + echo "PKG_VSN=$PKG_VSN" >> $GITHUB_ENV + echo "EMQX_IMAGE_TAG=$EMQX_IMAGE_TAG" >> $GITHUB_ENV + echo "EMQX_IMAGE_OLD_VERSION_TAG=$EMQX_IMAGE_OLD_VERSION_TAG" >> $GITHUB_ENV + - uses: docker/setup-buildx-action@v2 + - name: build and export to Docker + uses: docker/build-push-action@v4 + with: + context: . + file: ./deploy/docker/Dockerfile + load: true + tags: ${{ env.EMQX_IMAGE_TAG }} + build-args: | + EMQX_NAME=${{ env.EMQX_NAME }} + - name: test docker image + run: | + CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG) + HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) + ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT + docker stop $CID + - name: test two nodes cluster with proto_dist=inet_tls in docker + run: | + ./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG + HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy) + ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT + # cleanup + ./scripts/test/start-two-nodes-in-docker.sh -c + - name: export docker image + run: | + docker save $EMQX_IMAGE_TAG | gzip > $EMQX_NAME-$PKG_VSN.tar.gz + - uses: actions/upload-artifact@v3 + with: + name: "${{ matrix.profile[0] }}-docker" + path: "${{ env.EMQX_NAME }}-${{ env.PKG_VSN }}.tar.gz" + spellcheck: needs: linux strategy: @@ -172,4 +227,4 @@ jobs: path: /tmp/ - name: Run spellcheck run: | - bash /tmp/scripts/spellcheck/spellcheck.sh /tmp/_build/${{ matrix.profile }}/lib/emqx_dashboard/priv/www/static/schema.json + bash /tmp/scripts/spellcheck/spellcheck.sh /tmp/_build/docgen/${{ matrix.profile }}/schema-en.json diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index 59fc01d74..52ebf9efc 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -1,11 +1,12 @@ name: Check Rebar Dependencies -on: [pull_request, push] +on: + pull_request: jobs: check_deps_integrity: - runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04 + runs-on: ubuntu-22.04 + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/code_style_check.yaml b/.github/workflows/code_style_check.yaml index 6ab8ea8b6..1508cdd6e 100644 --- a/.github/workflows/code_style_check.yaml +++ b/.github/workflows/code_style_check.yaml @@ -4,8 +4,8 @@ on: [pull_request] jobs: code_style_check: - runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04" + runs-on: ubuntu-22.04 + container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04" steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/elixir_apps_check.yaml b/.github/workflows/elixir_apps_check.yaml index 62000f421..7e942f3f3 100644 --- a/.github/workflows/elixir_apps_check.yaml +++ b/.github/workflows/elixir_apps_check.yaml @@ -2,13 +2,14 @@ name: Check Elixir Release Applications -on: [pull_request, push] +on: + pull_request: jobs: elixir_apps_check: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 # just use the latest builder - container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04" strategy: fail-fast: false @@ -35,6 +36,7 @@ jobs: run: ./scripts/check-elixir-applications.exs env: MIX_ENV: ${{ matrix.profile }} + PROFILE: ${{ matrix.profile }} # - name: check applications started with emqx_machine # run: ./scripts/check-elixir-emqx-machine-boot-discrepancies.exs # env: diff --git a/.github/workflows/elixir_deps_check.yaml b/.github/workflows/elixir_deps_check.yaml index 5e64d69c4..e967c186b 100644 --- a/.github/workflows/elixir_deps_check.yaml +++ b/.github/workflows/elixir_deps_check.yaml @@ -2,12 +2,13 @@ name: Elixir Dependency Version Check -on: [pull_request, push] +on: + pull_request: jobs: elixir_deps_check: - runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04 + runs-on: ubuntu-22.04 + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04 steps: - name: Checkout @@ -22,7 +23,18 @@ jobs: mix local.hex --force mix local.rebar --force mix deps.get + # we check only enterprise because `rebar3 tree`, even if an + # enterprise app is excluded from `project_app_dirs` in + # `rebar.config.erl`, will still list dependencies from it. + # Since the enterprise profile is a superset of the + # community one and thus more complete, we use the former. + env: + MIX_ENV: emqx-enterprise + PROFILE: emqx-enterprise - name: check elixir deps run: ./scripts/check-elixir-deps-discrepancies.exs + env: + MIX_ENV: emqx-enterprise + PROFILE: emqx-enterprise ... diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index f53051d1d..9a916d332 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -11,13 +11,13 @@ on: jobs: elixir_release_build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: profile: - emqx - emqx-enterprise - container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04 steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.github/workflows/green_master.yaml b/.github/workflows/green_master.yaml new file mode 100644 index 000000000..1161ca7d4 --- /dev/null +++ b/.github/workflows/green_master.yaml @@ -0,0 +1,26 @@ +--- + +name: Keep master green + +on: + schedule: + # run hourly + - cron: "0 * * * *" + workflow_dispatch: + +jobs: + rerun-failed-jobs: + runs-on: ubuntu-22.04 + if: github.repository_owner == 'emqx' + permissions: + checks: read + actions: write + steps: + - uses: actions/checkout@v3 + + - name: run script + shell: bash + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + python3 scripts/rerun-failed-checks.py diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml new file mode 100644 index 000000000..6de9ddd21 --- /dev/null +++ b/.github/workflows/performance_test.yaml @@ -0,0 +1,126 @@ +name: Performance Test Suite + +on: + push: + branches: + - 'perf/**' + schedule: + - cron: '0 1 * * *' + workflow_dispatch: + inputs: + ref: + required: false + +jobs: + prepare: + runs-on: ubuntu-latest + if: github.repository_owner == 'emqx' + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04 + outputs: + BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} + PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.ref }} + - name: Work around https://github.com/actions/checkout/issues/766 + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + - id: prepare + run: | + echo "EMQX_NAME=emqx" >> $GITHUB_ENV + echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT + - name: Build deb package + run: | + make ${EMQX_NAME}-pkg + ./scripts/pkg-tests.sh ${EMQX_NAME}-pkg + - name: Get package file name + id: package_file + run: | + echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT + - uses: actions/upload-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }} + + tf_emqx_perf_test: + runs-on: ubuntu-latest + needs: + - prepare + env: + TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }} + TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }} + TF_AWS_REGION: eu-north-1 + TF_VAR_test_duration: 1800 + + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} + aws-region: eu-north-1 + - name: Checkout tf-emqx-performance-test + uses: actions/checkout@v3 + with: + repository: emqx/tf-emqx-performance-test + path: tf-emqx-performance-test + - uses: actions/download-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: tf-emqx-performance-test/ + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + - name: 1on1 scenario + id: scenario_1on1 + working-directory: ./tf-emqx-performance-test + timeout-minutes: 60 + env: + TF_VAR_bench_id: "${{ needs.prepare.outputs.BENCH_ID }}/1on1" + TF_VAR_use_emqttb: 1 + TF_VAR_use_emqtt_bench: 0 + TF_VAR_emqttb_instance_count: 2 + TF_VAR_emqttb_instance_type: "c5.large" + TF_VAR_emqttb_scenario: "@pub --topic 't/%n' --pubinterval 10ms --qos 1 --publatency 50ms --size 16 --num-clients 25000 @sub --topic 't/%n' --num-clients 25000" + TF_VAR_emqx_instance_type: "c5.xlarge" + TF_VAR_emqx_instance_count: 3 + run: | + terraform init + terraform apply -auto-approve + ./wait-emqttb.sh + ./fetch-metrics.sh + MESSAGES_RECEIVED=$(cat metrics.json | jq '[.[]."messages.received"] | add') + MESSAGES_SENT=$(cat metrics.json | jq '[.[]."messages.sent"] | add') + echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT + echo PUB_MSG_RATE=$(($MESSAGES_RECEIVED / $TF_VAR_test_duration)) >> $GITHUB_OUTPUT + echo SUB_MSG_RATE=$(($MESSAGES_SENT / $TF_VAR_test_duration)) >> $GITHUB_OUTPUT + terraform destroy -auto-approve + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Performance test result for 1on1 scenario (50k pub, 50k sub): ${{ job.status }}\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Pub message rate*: ${{ steps.scenario_1on1.outputs.PUB_MSG_RATE }}\n*Sub message rate*: ${{ steps.scenario_1on1.outputs.SUB_MSG_RATE }}\nDropped messages: ${{ steps.scenario_1on1.outputs.MESSAGES_DROPPED }}"} + - name: terraform destroy + if: always() + working-directory: ./tf-emqx-performance-test + run: | + terraform destroy -auto-approve + - uses: actions/upload-artifact@v3 + if: success() + with: + name: metrics + path: "./tf-emqx-performance-test/metrics.json" + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: terraform + path: | + ./tf-emqx-performance-test/.terraform + ./tf-emqx-performance-test/*.tfstate diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 3c157cf31..30de6f3b1 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -15,11 +15,11 @@ on: jobs: upload: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false steps: - - uses: aws-actions/configure-aws-credentials@v1-node16 + - uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -53,25 +53,15 @@ jobs: BUCKET=${{ secrets.AWS_S3_BUCKET }} OUTPUT_DIR=${{ steps.profile.outputs.s3dir }} aws s3 cp --recursive s3://$BUCKET/$OUTPUT_DIR/${{ github.ref_name }} packages - cd packages - DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1' - # all packages including full-name and default-name are uploaded to s3 - # but we only upload default-name packages (and elixir) as github artifacts - # so we rename (overwrite) non-default packages before uploading - while read -r fname; do - default_fname=$(echo "$fname" | sed "s/-${DEFAULT_BEAM_PLATFORM}//g") - echo "$fname -> $default_fname" - mv -f "$fname" "$default_fname" - done < <(find . -maxdepth 1 -type f | grep -E "emqx(-enterprise)?-5\.[0-9]+\.[0-9]+.*-${DEFAULT_BEAM_PLATFORM}" | grep -v elixir) - uses: alexellis/upload-assets@0.4.0 env: GITHUB_TOKEN: ${{ github.token }} with: asset_paths: '["packages/*"]' - name: update to emqx.io - if: github.event_name == 'release' || inputs.publish_release_artefacts + if: startsWith(github.ref_name, 'v') && (github.event_name == 'release' || inputs.publish_release_artefacts) run: | - set -e -x -u + set -eux curl -w %{http_code} \ --insecure \ -H "Content-Type: application/json" \ @@ -79,14 +69,35 @@ jobs: -X POST \ -d "{\"repo\":\"emqx/emqx\", \"tag\": \"${{ github.ref_name }}\" }" \ ${{ secrets.EMQX_IO_RELEASE_API }} - - name: update homebrew packages - if: steps.profile.outputs.profile == 'emqx' && (github.event_name == 'release' || inputs.publish_release_artefacts) + - name: Push to packagecloud.io + env: + PROFILE: ${{ steps.profile.outputs.profile }} + VERSION: ${{ steps.profile.outputs.version }} + PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }} run: | - if [ -z $(echo $version | grep -oE "(alpha|beta|rc)\.[0-9]") ]; then - curl --silent --show-error \ - -H "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - -X POST \ - -d "{\"ref\":\"v1.0.4\",\"inputs\":{\"version\": \"${{ github.ref_name }}\"}}" \ - "https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_homebrew.yaml/dispatches" + set -eu + REPO=$PROFILE + if [ $PROFILE = 'emqx-enterprise' ]; then + REPO='emqx-enterprise5' fi + function push() { + docker run -t --rm -e PACKAGECLOUD_TOKEN=$PACKAGECLOUD_TOKEN -v $(pwd)/$2:/w/$2 -w /w ghcr.io/emqx/package_cloud push emqx/$REPO/$1 $2 + } + push "debian/buster" "packages/$PROFILE-$VERSION-debian10-amd64.deb" + push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb" + push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb" + push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb" + push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb" + push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb" + push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb" + push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb" + push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb" + push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb" + push "el/6" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm" + push "el/6" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm" + push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm" + push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm" + push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm" + push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm" + push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm" + push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm" diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index ddbb14609..551d0d9e6 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -12,26 +12,26 @@ jobs: strategy: matrix: builder: - - 5.0-26 + - 5.0-34 otp: - - 24.3.4.2-1 - - 25.1.2-2 + - 24.3.4.2-3 + - 25.1.2-3 # no need to use more than 1 version of Elixir, since tests # run using only Erlang code. This is needed just to specify # the base image. elixir: - 1.13.4 os: - - ubuntu20.04 + - ubuntu22.04 arch: - amd64 runs-on: - aws-amd64 - - ubuntu-20.04 + - ubuntu-22.04 use-self-hosted: - ${{ github.repository_owner == 'emqx' }} exclude: - - runs-on: ubuntu-20.04 + - runs-on: ubuntu-22.04 use-self-hosted: true - runs-on: aws-amd64 use-self-hosted: false diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 9e44034fb..185c76be1 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -7,16 +7,17 @@ concurrency: on: push: branches: - - '**' + - master + - 'ci/**' tags: - v* pull_request: jobs: prepare: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-alpine3.15.1 + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-debian11 steps: - uses: actions/checkout@v3 @@ -33,7 +34,7 @@ jobs: path: source.zip docker_test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: prepare strategy: @@ -47,17 +48,17 @@ jobs: - mnesia - rlog os: - - ["alpine3.15.1", "alpine:3.15.1"] + - ["debian11", "debian:11-slim"] builder: - - 5.0-26 + - 5.0-34 otp: - - 24.3.4.2-1 + - 24.3.4.2-3 elixir: - 1.13.4 arch: - amd64 steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/download-artifact@v3 @@ -107,7 +108,7 @@ jobs: docker exec node1.emqx.io node_dump helm_test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: prepare strategy: @@ -122,9 +123,9 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.0-26 + - 5.0-34 otp: - - 24.3.4.2-1 + - 24.3.4.2-3 elixir: - 1.13.4 arch: @@ -132,7 +133,7 @@ jobs: # - emqx-enterprise # TODO test enterprise steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/download-artifact@v3 @@ -166,8 +167,8 @@ jobs: --set image.pullPolicy=Never \ --set image.tag=$EMQX_TAG \ --set emqxAclConfig="" \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ + --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \ + --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ deploy/charts/${{ matrix.profile }} \ @@ -184,8 +185,8 @@ jobs: --set image.pullPolicy=Never \ --set image.tag=$EMQX_TAG \ --set emqxAclConfig="" \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ + --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \ + --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ deploy/charts/${{ matrix.profile }} \ @@ -201,12 +202,25 @@ jobs: echo "waiting emqx started"; sleep 10; done + - name: Get Token + timeout-minutes: 1 + run: | + kubectl port-forward service/${{ matrix.profile }} 18083:18083 > /dev/null & + + while + [ "$(curl --silent -X 'GET' 'http://127.0.0.1:18083/api/v5/status' | tail -n1)" != "emqx is running" ] + do + echo "waiting emqx" + sleep 1 + done + + echo "TOKEN=$(curl --silent -X 'POST' 'http://127.0.0.1:18083/api/v5/login' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"username": "admin","password": "public"}' | jq -r ".token")" >> $GITHUB_ENV + - name: Check cluster timeout-minutes: 10 run: | - kubectl port-forward service/${{ matrix.profile }} 18083:18083 > /dev/null & while - [ "$(curl --silent --basic -u admin:public -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')" != "3" ]; + [ "$(curl --silent -H "Authorization: Bearer $TOKEN" -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')" != "3" ]; do echo "waiting ${{ matrix.profile }} cluster scale" sleep 1 diff --git a/.github/workflows/run_gitlint.yaml b/.github/workflows/run_gitlint.yaml index 9eb03c0b8..52082c56e 100644 --- a/.github/workflows/run_gitlint.yaml +++ b/.github/workflows/run_gitlint.yaml @@ -4,41 +4,13 @@ on: [pull_request] jobs: run_gitlint: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - - name: Checkout source code - uses: actions/checkout@v3 - - name: Install gitlint - run: | - sudo apt-get update - sudo apt install gitlint - - name: Set auth header - if: endsWith(github.repository, 'enterprise') - run: | - echo 'AUTH_HEADER<> $GITHUB_ENV - echo "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" >> $GITHUB_ENV - echo 'EOF' >> $GITHUB_ENV + - uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Run gitlint shell: bash run: | - pr_number=$(echo $GITHUB_REF | awk 'BEGIN { FS = "/" } ; { print $3 }') - messages="$(curl --silent --show-error \ - --header "${{ env.AUTH_HEADER }}" \ - --header "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${GITHUB_REPOSITORY}/pulls/${pr_number}/commits")" - len=$(echo $messages | jq length) - result=true - for i in $( seq 0 $(($len - 1)) ); do - message=$(echo $messages | jq -r .[$i].commit.message) - echo "commit message: $message" - status=0 - echo $message | gitlint -C ./.github/workflows/.gitlint || status=$? - if [ $status -ne 0 ]; then - result=false - fi - done - if ! ${result} ; then - echo "Some of the commit messages are not structured as The Conventional Commits specification. Please check CONTRIBUTING.md for our process on PR." - exit 1 - fi - echo "success" + set -ex + docker run --ulimit nofile=1024 -v $(pwd):/repo -w /repo ghcr.io/emqx/gitlint --commits ${{ github.event.pull_request.base.sha }}..$GITHUB_SHA --config .github/workflows/.gitlint diff --git a/.github/workflows/run_jmeter_tests.yaml b/.github/workflows/run_jmeter_tests.yaml index 6eaf4aa75..e402c7fed 100644 --- a/.github/workflows/run_jmeter_tests.yaml +++ b/.github/workflows/run_jmeter_tests.yaml @@ -10,11 +10,11 @@ on: jobs: build_emqx_for_jmeter_tests: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 outputs: version: ${{ steps.build_docker.outputs.version}} steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - name: download jmeter @@ -44,7 +44,7 @@ jobs: path: ./emqx.tar advanced_feat: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -57,7 +57,7 @@ jobs: needs: build_emqx_for_jmeter_tests steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/checkout@v3 @@ -92,7 +92,7 @@ jobs: - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt - ref: broker-autotest-v2 + ref: broker-autotest-v5 path: scripts - uses: actions/setup-java@v3 with: @@ -126,7 +126,7 @@ jobs: - name: check logs run: | if cat jmeter_logs/${{ matrix.scripts_type }}.jtl | grep -e 'true' > /dev/null 2>&1; then - echo "check logs filed" + echo "check logs failed" exit 1 fi - uses: actions/upload-artifact@v3 @@ -136,7 +136,7 @@ jobs: path: ./jmeter_logs pgsql_authn_authz: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -153,7 +153,7 @@ jobs: needs: build_emqx_for_jmeter_tests steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/checkout@v3 @@ -191,7 +191,7 @@ jobs: - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt - ref: broker-autotest-v2 + ref: broker-autotest-v5 path: scripts - uses: actions/setup-java@v3 with: @@ -235,7 +235,7 @@ jobs: - name: check logs run: | if cat jmeter_logs/${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }}.jtl | grep -e 'true' > /dev/null 2>&1; then - echo "check logs filed" + echo "check logs failed" exit 1 fi - uses: actions/upload-artifact@v3 @@ -245,7 +245,7 @@ jobs: path: ./jmeter_logs mysql_authn_authz: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -259,7 +259,7 @@ jobs: needs: build_emqx_for_jmeter_tests steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/checkout@v3 @@ -297,7 +297,7 @@ jobs: - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt - ref: broker-autotest-v2 + ref: broker-autotest-v5 path: scripts - uses: actions/setup-java@v3 with: @@ -341,7 +341,7 @@ jobs: - name: check logs run: | if cat jmeter_logs/${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}.jtl | grep -e 'true' > /dev/null 2>&1; then - echo "check logs filed" + echo "check logs failed" exit 1 fi - uses: actions/upload-artifact@v3 @@ -351,7 +351,7 @@ jobs: path: ./jmeter_logs JWT_authn: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -361,7 +361,7 @@ jobs: needs: build_emqx_for_jmeter_tests steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/checkout@v3 @@ -396,7 +396,7 @@ jobs: - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt - ref: broker-autotest-v2 + ref: broker-autotest-v5 path: scripts - name: run jwks_server timeout-minutes: 10 @@ -439,7 +439,7 @@ jobs: - name: check logs run: | if cat jmeter_logs/${{ matrix.scripts_type }}.jtl | grep -e 'true' > /dev/null 2>&1; then - echo "check logs filed" + echo "check logs failed" exit 1 fi - uses: actions/upload-artifact@v3 @@ -449,7 +449,7 @@ jobs: path: ./jmeter_logs built_in_database_authn_authz: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -460,7 +460,7 @@ jobs: needs: build_emqx_for_jmeter_tests steps: - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/checkout@v3 @@ -496,7 +496,7 @@ jobs: - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt - ref: broker-autotest-v2 + ref: broker-autotest-v5 path: scripts - uses: actions/setup-java@v3 with: @@ -531,7 +531,7 @@ jobs: - name: check logs run: | if cat jmeter_logs/${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}.jtl | grep -e 'true' > /dev/null 2>&1; then - echo "check logs filed" + echo "check logs failed" exit 1 fi - uses: actions/upload-artifact@v3 @@ -541,7 +541,7 @@ jobs: path: ./jmeter_logs delete-artifact: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: [advanced_feat,pgsql_authn_authz,JWT_authn,mysql_authn_authz,built_in_database_authn_authz] steps: - uses: geekyeggo/delete-artifact@v2 diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index 0cf7e51ee..2ad7c3345 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -4,19 +4,18 @@ concurrency: group: relup-${{ github.event_name }}-${{ github.ref }} cancel-in-progress: true -on: - push: - branches: - - '**' - tags: - - v* - - e* - pull_request: +# on: +# push: +# branches: +# - '**' +# tags: +# - e* +# pull_request: jobs: relup_test_plan: - runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04" + runs-on: ubuntu-22.04 + container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04" outputs: CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} @@ -43,7 +42,7 @@ jobs: run: | set -x cd emqx - make emqx-tgz + export PROFILE='emqx-enterprise' make emqx-enterprise-tgz - uses: actions/upload-artifact@v3 name: Upload built emqx and test scenario @@ -59,7 +58,7 @@ jobs: needs: - relup_test_plan if: needs.relup_test_plan.outputs.OLD_VERSIONS != '[]' - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: @@ -72,7 +71,7 @@ jobs: shell: bash steps: # setup Erlang to run lux - - uses: emqx/setup-beam@v1.16.1-emqx + - uses: erlef/setup-beam@v1.15.2 with: otp-version: 24.3.4.6 - uses: actions/checkout@v3 diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index e8534ef68..b82b545df 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -7,227 +7,260 @@ concurrency: on: push: branches: - - '**' + - master + - 'ci/**' tags: - v* - e* pull_request: +env: + IS_CI: "yes" + jobs: - build-matrix: - runs-on: ubuntu-latest - outputs: - prepare: ${{ steps.matrix.outputs.prepare }} - host: ${{ steps.matrix.outputs.host }} - docker: ${{ steps.matrix.outputs.docker }} - runs-on: ${{ steps.runner.outputs.runs-on }} - steps: - - uses: actions/checkout@v3 - - name: Build matrix - id: matrix - run: | - APPS="$(./scripts/find-apps.sh --ci)" - MATRIX="$(echo "${APPS}" | jq -c ' - [ - (.[] | select(.profile == "emqx") | . + { - builder: "5.0-26", - otp: "25.1.2-2", - elixir: "1.13.4" - }), - (.[] | select(.profile == "emqx-enterprise") | . + { - builder: "5.0-26", - otp: ["24.3.4.2-1", "25.1.2-2"][], - elixir: "1.13.4" - }) - ] - ')" - echo "${MATRIX}" | jq - MATRIX_PREPARE="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')" - MATRIX_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')" - MATRIX_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')" - echo "prepare=${MATRIX_PREPARE}" | tee -a $GITHUB_OUTPUT - echo "host=${MATRIX_HOST}" | tee -a $GITHUB_OUTPUT - echo "docker=${MATRIX_DOCKER}" | tee -a $GITHUB_OUTPUT - - name: Choose runner host - id: runner - run: | - RUNS_ON="ubuntu-20.04" - ${{ github.repository_owner == 'emqx' }} && RUNS_ON="aws-amd64" - echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT + build-matrix: + runs-on: ubuntu-22.04 + outputs: + prepare: ${{ steps.matrix.outputs.prepare }} + host: ${{ steps.matrix.outputs.host }} + docker: ${{ steps.matrix.outputs.docker }} + runs-on: ${{ steps.runner.outputs.runs-on }} + steps: + - uses: actions/checkout@v3 + - name: Build matrix + id: matrix + run: | + APPS="$(./scripts/find-apps.sh --ci)" + MATRIX="$(echo "${APPS}" | jq -c ' + [ + (.[] | select(.profile == "emqx") | . + { + builder: "5.0-34", + otp: "25.1.2-3", + elixir: "1.13.4" + }), + (.[] | select(.profile == "emqx-enterprise") | . + { + builder: "5.0-34", + otp: ["24.3.4.2-3", "25.1.2-3"][], + elixir: "1.13.4" + }) + ] + ')" + echo "${MATRIX}" | jq + MATRIX_PREPARE="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')" + MATRIX_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')" + MATRIX_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')" + echo "prepare=${MATRIX_PREPARE}" | tee -a $GITHUB_OUTPUT + echo "host=${MATRIX_HOST}" | tee -a $GITHUB_OUTPUT + echo "docker=${MATRIX_DOCKER}" | tee -a $GITHUB_OUTPUT + - name: Choose runner host + id: runner + run: | + RUNS_ON="ubuntu-22.04" + ${{ github.repository_owner == 'emqx' }} && RUNS_ON="aws-amd64" + echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT - prepare: - runs-on: aws-amd64 - needs: [build-matrix] - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} - container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/checkout@v3 - with: - path: source - - name: get_all_deps - working-directory: source - env: - PROFILE: ${{ matrix.profile }} - #DIAGNOSTIC: 1 - run: | - make ensure-rebar3 - # fetch all deps and compile - make ${{ matrix.profile }} - make test-compile - cd .. - zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: source.zip + prepare: + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + needs: [build-matrix] + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/checkout@v3 + with: + path: source + - name: get_all_deps + working-directory: source + env: + PROFILE: ${{ matrix.profile }} + run: | + make ensure-rebar3 + # fetch all deps and compile + make ${{ matrix.profile }}-compile + make test-compile + cd .. + zip -ryq source.zip source/* source/.[^.]* + - uses: actions/upload-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: source.zip - eunit_and_proper: - needs: - - build-matrix - - prepare - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} - - defaults: - run: - shell: bash - container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: . - - name: unzip source code - run: unzip -o -q source.zip - # produces eunit.coverdata - - name: eunit - env: - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - working-directory: source - run: make eunit - - # produces proper.coverdata - - name: proper - env: - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - working-directory: source - run: make proper - - - uses: actions/upload-artifact@v3 - with: - name: coverdata - path: source/_build/test/cover - - ct_docker: - needs: - - build-matrix - - prepare - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.docker) }} - - defaults: - run: - shell: bash - - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: . - - name: unzip source code - run: unzip -q source.zip - - name: run tests - working-directory: source - env: - DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - MONGO_TAG: 5 - MYSQL_TAG: 8 - PGSQL_TAG: 13 - REDIS_TAG: 6 - INFLUXDB_TAG: 2.5.0 - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - run: ./scripts/ct/run.sh --app ${{ matrix.app }} - - uses: actions/upload-artifact@v3 - with: - name: coverdata - path: source/_build/test/cover - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} - path: source/_build/test/logs - - ct: - needs: - - build-matrix - - prepare - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.host) }} - - container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - defaults: - run: - shell: bash - - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: . - - name: unzip source code - run: unzip -q source.zip - - # produces $PROFILE-.coverdata - - name: run common test - working-directory: source - env: - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - run: | - make "${{ matrix.app }}-ct" - - uses: actions/upload-artifact@v3 - with: - name: coverdata - path: source/_build/test/cover - if-no-files-found: warn # do not fail if no coverdata found - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} - path: source/_build/test/logs - - make_cover: - needs: - - eunit_and_proper - - ct - - ct_docker - runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04" - steps: + static_checks: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + steps: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 with: - name: source-emqx-enterprise-24.3.4.2-1 + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -o -q source.zip + - uses: actions/cache@v3 + with: + path: "source/emqx_dialyzer_${{ matrix.otp }}_plt" + key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }} + - name: run static checks + env: + PROFILE: ${{ matrix.profile }} + working-directory: source + run: make static_checks + + eunit_and_proper: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + + defaults: + run: + shell: bash + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -o -q source.zip + # produces eunit.coverdata + - name: eunit + env: + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + working-directory: source + run: make eunit + + # produces proper.coverdata + - name: proper + env: + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + working-directory: source + run: make proper + + - uses: actions/upload-artifact@v3 + with: + name: coverdata + path: source/_build/test/cover + + ct_docker: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.docker) }} + + defaults: + run: + shell: bash + + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -q source.zip + - name: run tests + working-directory: source + env: + DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + MONGO_TAG: "5" + MYSQL_TAG: "8" + PGSQL_TAG: "13" + REDIS_TAG: "7.0" + INFLUXDB_TAG: "2.5.0" + TDENGINE_TAG: "3.0.2.4" + OPENTS_TAG: "9aa7f88" + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} + - uses: actions/upload-artifact@v3 + with: + name: coverdata + path: source/_build/test/cover + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} + path: source/_build/test/logs + + ct: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.host) }} + + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + defaults: + run: + shell: bash + + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -q source.zip + + # produces $PROFILE-.coverdata + - name: run common test + working-directory: source + env: + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + run: | + make "${{ matrix.app }}-ct" + - uses: actions/upload-artifact@v3 + with: + name: coverdata + path: source/_build/test/cover + if-no-files-found: warn # do not fail if no coverdata found + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} + path: source/_build/test/logs + + make_cover: + needs: + - eunit_and_proper + - ct + - ct_docker + runs-on: ubuntu-22.04 + container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04" + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-emqx-enterprise-24.3.4.2-3 path: . - name: unzip source code run: unzip -q source.zip @@ -256,15 +289,15 @@ jobs: if: failure() run: cat rebar3.crashdump - # do this in a separate job - upload_coverdata: - needs: make_cover - runs-on: ubuntu-20.04 - steps: - - name: Coveralls Finished - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - curl -v -k https://coveralls.io/webhook \ - --header "Content-Type: application/json" \ - --data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true + # do this in a separate job + upload_coverdata: + needs: make_cover + runs-on: ubuntu-22.04 + steps: + - name: Coveralls Finished + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + curl -v -k https://coveralls.io/webhook \ + --header "Content-Type: application/json" \ + --data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index 56a6645e1..7f29572b9 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -1,10 +1,11 @@ name: Shellcheck -on: [pull_request, push] +on: + pull_request: jobs: shellcheck: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Checkout source code uses: actions/checkout@v3 diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 32abe1721..6b67c6f3b 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -10,7 +10,8 @@ on: jobs: stale: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 + if: github.repository_owner == 'emqx' permissions: issues: write pull-requests: none diff --git a/.github/workflows/upload-helm-charts.yaml b/.github/workflows/upload-helm-charts.yaml index 319b50e24..4b18efef8 100644 --- a/.github/workflows/upload-helm-charts.yaml +++ b/.github/workflows/upload-helm-charts.yaml @@ -11,11 +11,11 @@ on: jobs: upload: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false steps: - - uses: aws-actions/configure-aws-credentials@v1-node16 + - uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.gitignore b/.gitignore index 4780aab38..ceb12182f 100644 --- a/.gitignore +++ b/.gitignore @@ -43,8 +43,7 @@ tmp/ _packages elvis emqx_dialyzer_*_plt -*/emqx_dashboard/priv/www -*/emqx_dashboard/priv/i18n.conf +*/emqx_dashboard/priv/ dist.zip scripts/git-token apps/*/etc/*.all @@ -67,6 +66,9 @@ mix.lock apps/emqx/test/emqx_static_checks_data/master.bpapi # rendered configurations *.conf.rendered +*.conf.rendered.* lux_logs/ /.prepare bom.json +ct_run*/ +apps/emqx_conf/etc/emqx.conf.all.rendered* diff --git a/.tool-versions b/.tool-versions index 0f7c9b32e..b4d8f8675 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -erlang 24.3.4.2-1 +erlang 24.3.4.2-3 elixir 1.13.4-otp-24 diff --git a/APL.txt b/APL.txt index 8dada3eda..dcb926a55 100644 --- a/APL.txt +++ b/APL.txt @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright (c) 2016-2023 EMQ Technologies Co., Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 118e9a046..272a602e9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -55,7 +55,7 @@ Must be one of the following: - **chore**: Updating grunt tasks etc; no production code change - **perf**: A code change that improves performance - **test**: Adding missing tests, refactoring tests; no production code change -- **build**: Changes that affect the CI/CD pipeline or build system or external dependencies (example scopes: travis, jenkins, makefile) +- **build**: Changes that affect the CI/CD pipeline or build system or external dependencies (example scopes: jenkins, makefile) - **ci**: Changes provided by DevOps for CI purposes. - **revert**: Reverts a previous commit. diff --git a/LICENSE b/LICENSE index 2a081b135..8ff0a9060 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,7 @@ Source code in this repository is variously licensed under below licenses. -For EMQX: Apache License 2.0, see APL.txt, -which applies to all source files except for lib-ee sub-directory. +For Default: Apache License 2.0, see APL.txt, +which applies to all source files except for folders applied with Business Source License. For EMQX Enterprise (since version 5.0): Business Source License 1.1, -see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. +see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps. diff --git a/Makefile b/Makefile index c2b33786b..1ad4421aa 100644 --- a/Makefile +++ b/Makefile @@ -2,12 +2,8 @@ REBAR = $(CURDIR)/rebar3 BUILD = $(CURDIR)/build SCRIPTS = $(CURDIR)/scripts export EMQX_RELUP ?= true -export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-debian11 +export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11 export EMQX_DEFAULT_RUNNER = debian:11-slim -export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) -export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.1.4 -export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.9 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 ifeq ($(OS),Windows_NT) @@ -17,6 +13,22 @@ else FIND=find endif +# Dashbord version +# from https://github.com/emqx/emqx-dashboard5 +export EMQX_DASHBOARD_VERSION ?= v1.2.4-1 +export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6 + +# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used +# In make 4.4+, for backward-compatibility the value from the original environment is used. +# so the shell script will be executed tons of times. +# https://github.com/emqx/emqx/pull/10627 +ifeq ($(strip $(OTP_VSN)),) + export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh) +endif +ifeq ($(strip $(ELIXIR_VSN)),) + export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh) +endif + PROFILE ?= emqx REL_PROFILES := emqx emqx-enterprise PKG_PROFILES := emqx-pkg emqx-enterprise-pkg @@ -73,24 +85,36 @@ proper: $(REBAR) test-compile: $(REBAR) merge-config $(REBAR) as test compile +.PHONY: $(REL_PROFILES:%=%-compile) +$(REL_PROFILES:%=%-compile): $(REBAR) merge-config + $(REBAR) as $(@:%-compile=%) compile + .PHONY: ct ct: $(REBAR) merge-config @ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct +## only check bpapi for enterprise profile because it's a super-set. .PHONY: static_checks static_checks: - @$(REBAR) as check do dialyzer, xref, ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE) + @$(REBAR) as check do xref, dialyzer + @if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi + ./scripts/check-i18n-style.sh APPS=$(shell $(SCRIPTS)/find-apps.sh) .PHONY: $(APPS:%=%-ct) define gen-app-ct-target -$1-ct: $(REBAR) - @$(SCRIPTS)/pre-compile.sh $(PROFILE) - @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ - --name $(CT_NODE_NAME) \ - --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ - --suite $(shell $(SCRIPTS)/find-suites.sh $1) +$1-ct: $(REBAR) merge-config + $(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1)) +ifneq ($(SUITES),) + @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ + --readable=$(CT_READABLE) \ + --name $(CT_NODE_NAME) \ + --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ + --suite $(SUITES) +else + @echo 'No suites found for $1' +endif endef $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app)))) @@ -103,7 +127,7 @@ endef $(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app)))) .PHONY: ct-suite -ct-suite: $(REBAR) +ct-suite: $(REBAR) merge-config ifneq ($(TESTCASE),) ifneq ($(GROUP),) $(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) --group $(GROUP) @@ -130,6 +154,11 @@ COMMON_DEPS := $(REBAR) $(REL_PROFILES:%=%): $(COMMON_DEPS) @$(BUILD) $(@) rel +.PHONY: compile $(PROFILES:%=compile-%) +compile: $(PROFILES:%=compile-%) +$(PROFILES:%=compile-%): + @$(BUILD) $(@:compile-%=%) apps + ## Not calling rebar3 clean because ## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc. ## 2. it's slow @@ -148,7 +177,9 @@ $(PROFILES:%=clean-%): .PHONY: clean-all clean-all: @rm -f rebar.lock + @rm -rf deps @rm -rf _build + @rm -f emqx_dialyzer_*_plt .PHONY: deps-all deps-all: $(REBAR) $(PROFILES:%=deps-%) @@ -212,11 +243,15 @@ endef $(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt)))) .PHONY: run -run: $(PROFILE) quickrun +run: compile-$(PROFILE) quickrun .PHONY: quickrun quickrun: - ./_build/$(PROFILE)/rel/emqx/bin/emqx console + ./dev -p $(PROFILE) + +## Take the currently set PROFILE +docker: + @$(BUILD) $(PROFILE) docker ## docker target is to create docker instructions .PHONY: $(REL_PROFILES:%=%-docker) $(REL_PROFILES:%=%-elixir-docker) @@ -230,7 +265,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt)))) .PHONY: merge-config: @$(SCRIPTS)/merge-config.escript - @$(SCRIPTS)/merge-i18n.escript ## elixir target is to create release packages using Elixir's Mix .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) diff --git a/README-CN.md b/README-CN.md index 7e8cdd8a7..314b34b9a 100644 --- a/README-CN.md +++ b/README-CN.md @@ -1,7 +1,7 @@ # EMQX [![GitHub Release](https://img.shields.io/github/release/emqx/emqx?color=brightgreen&label=Release)](https://github.com/emqx/emqx/releases) -[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) +[![Build Status](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml/badge.svg)](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/) @@ -11,9 +11,6 @@ [![YouTube](https://img.shields.io/badge/Subscribe-EMQ%20中文-FF0000?logo=youtube)](https://www.youtube.com/channel/UCir_r04HIsLjf2qqyZ4A8Cg) - -[English](./README.md) | 简体中文 | [русский](./README-RU.md) - EMQX 是一款全球下载量超千万的大规模分布式物联网 MQTT 服务器,单集群支持 1 亿物联网设备连接,消息分发时延低于 1 毫秒。为高可靠、高性能的物联网实时数据移动、处理和集成提供动力,助力企业构建关键业务的 IoT 平台与应用。 EMQX 自 2013 年在 GitHub 发布开源版本以来,获得了来自 50 多个国家和地区的 20000 余家企业用户的广泛认可,累计连接物联网关键设备超过 1 亿台。 @@ -76,7 +73,7 @@ EMQX Cloud 文档:[docs.emqx.com/zh/cloud/latest/](https://docs.emqx.com/zh/cl 我们选取了各个编程语言中热门的 MQTT 客户端 SDK,并提供代码示例,帮助您快速掌握 MQTT 客户端库的使用。 -- [MQTT X](https://mqttx.app/zh) +- [MQTTX](https://mqttx.app/zh) 优雅的跨平台 MQTT 5.0 客户端工具,提供了桌面端、命令行、Web 三种版本,帮助您更快的开发和调试 MQTT 服务和应用。 diff --git a/README-RU.md b/README-RU.md index 8a35177af..6baf38e2c 100644 --- a/README-RU.md +++ b/README-RU.md @@ -1,7 +1,7 @@ # Брокер EMQX [![GitHub Release](https://img.shields.io/github/release/emqx/emqx?color=brightgreen&label=Release)](https://github.com/emqx/emqx/releases) -[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) +[![Build Status](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml/badge.svg)](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/) @@ -9,7 +9,6 @@ [![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) [![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) -[English](./README.md) | [简体中文](./README-CN.md) | русский *EMQX* — это самый масштабируемый и популярный высокопроизводительный MQTT брокер с полностью открытым кодом для интернета вещей, межмашинного взаимодействия и мобильных приложений. EMQX может поддерживать более чем 100 миллионов одновременных соединенией на одном кластере с задержкой в 1 миллисекунду, а также принимать и обрабабывать миллионы MQTT сообщений в секунду. @@ -72,7 +71,7 @@ docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p Мы выбрали популярные SDK клиентов MQTT на различных языках программирования и предоставили примеры кода, которые помогут вам быстро понять, как использовать клиенты MQTT. -- [MQTT X](https://mqttx.app/) +- [MQTTX](https://mqttx.app/) Элегантный кроссплатформенный клиент MQTT 5.0, в виде десктопного приложения, приложения для командной строки и веб-приложения, чтобы помочь вам быстрее разрабатывать и отлаживать службы и приложения MQTT. diff --git a/README.md b/README.md index 1831ced60..28b8cbf43 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # EMQX [![GitHub Release](https://img.shields.io/github/release/emqx/emqx?color=brightgreen&label=Release)](https://github.com/emqx/emqx/releases) -[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) +[![Build Status](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml/badge.svg)](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/) @@ -10,9 +10,6 @@ [![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) - -English | [简体中文](./README-CN.md) | [русский](./README-RU.md) - EMQX is the world's most scalable open-source MQTT broker with a high performance that connects 100M+ IoT devices in 1 cluster, while maintaining 1M message per second throughput and sub-millisecond latency. EMQX supports multiple open standard protocols like MQTT, HTTP, QUIC, and WebSocket. It’s 100% compliant with MQTT 5.0 and 3.x standard, and secures bi-directional communication with MQTT over TLS/SSL and various authentication mechanisms. @@ -25,7 +22,7 @@ For more information, please visit [EMQX homepage](https://www.emqx.io/). ## Get Started -#### EMQX Cloud +#### Run EMQX in the Cloud The simplest way to set up EMQX is to create a managed deployment with EMQX Cloud. You can [try EMQX Cloud for free](https://www.emqx.com/en/signup?utm_source=github.com&utm_medium=referral&utm_campaign=emqx-readme-to-cloud&continue=https://cloud-intl.emqx.com/console/deployments/0?oper=new), no credit card required. @@ -62,6 +59,7 @@ For more organised improvement proposals, you can send pull requests to [EIP](ht ## Get Involved - Follow [@EMQTech on Twitter](https://twitter.com/EMQTech). +- Join our [Slack](https://slack-invite.emqx.io/). - If you have a specific question, check out our [discussion forums](https://github.com/emqx/emqx/discussions). - For general discussions, join us on the [official Discord](https://discord.gg/xYGf3fQnES) team. - Keep updated on [EMQX YouTube](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) by subscribing. @@ -76,7 +74,7 @@ For more organised improvement proposals, you can send pull requests to [EIP](ht We have selected popular MQTT client SDKs in various programming languages and provided code examples to help you quickly understand the use of MQTT clients. -- [MQTT X](https://mqttx.app/) +- [MQTTX](https://mqttx.app/) An elegant cross-platform MQTT 5.0 client tool that provides desktop, command line, and web to help you develop and debug MQTT services and applications faster. diff --git a/apps/emqx/etc/emqx.conf b/apps/emqx/etc/emqx.conf index 43dcfd411..e69de29bb 100644 --- a/apps/emqx/etc/emqx.conf +++ b/apps/emqx/etc/emqx.conf @@ -1,39 +0,0 @@ -listeners.tcp.default { - bind = "0.0.0.0:1883" - max_connections = 1024000 -} - -listeners.ssl.default { - bind = "0.0.0.0:8883" - max_connections = 512000 - ssl_options { - keyfile = "{{ platform_etc_dir }}/certs/key.pem" - certfile = "{{ platform_etc_dir }}/certs/cert.pem" - cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem" - } -} - -listeners.ws.default { - bind = "0.0.0.0:8083" - max_connections = 1024000 - websocket.mqtt_path = "/mqtt" -} - -listeners.wss.default { - bind = "0.0.0.0:8084" - max_connections = 512000 - websocket.mqtt_path = "/mqtt" - ssl_options { - keyfile = "{{ platform_etc_dir }}/certs/key.pem" - certfile = "{{ platform_etc_dir }}/certs/cert.pem" - cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem" - } -} - -# listeners.quic.default { -# enabled = true -# bind = "0.0.0.0:14567" -# max_connections = 1024000 -# keyfile = "{{ platform_etc_dir }}/certs/key.pem" -# certfile = "{{ platform_etc_dir }}/certs/cert.pem" -#} diff --git a/apps/emqx/etc/ssl_dist.conf b/apps/emqx/etc/ssl_dist.conf index 6048ddf60..b4c16e2cc 100644 --- a/apps/emqx/etc/ssl_dist.conf +++ b/apps/emqx/etc/ssl_dist.conf @@ -1,7 +1,9 @@ -%% The options in the {server, Opts} tuple are used when calling ssl:ssl_accept/3, -%% and the options in the {client, Opts} tuple are used when calling ssl:connect/4. -%% -%% More information at: http://erlang.org/doc/apps/ssl/ssl_distribution.html +%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'. +%% Which means the EMQX nodes will connect to each other over TLS. +%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html + +%% For more information in technical details see: http://erlang.org/doc/apps/ssl/ssl_distribution.html + [{server, [ %{log_level, debug}, %% NOTE: debug level logging impacts performance, and need to set EMQX logging level to 'debug' diff --git a/apps/emqx/etc/vm.args.cloud b/apps/emqx/etc/vm.args.cloud index edd6944c2..34fc19777 100644 --- a/apps/emqx/etc/vm.args.cloud +++ b/apps/emqx/etc/vm.args.cloud @@ -24,9 +24,6 @@ ## Sets the maximum number of atoms the virtual machine can handle. #+t 1048576 -## Set the location of crash dumps -#-env ERL_CRASH_DUMP {{ platform_log_dir }}/crash.dump - ## Set how many times generational garbages collections can be done without ## forcing a fullsweep collection. -env ERL_FULLSWEEP_AFTER 1000 @@ -40,11 +37,6 @@ ## Prevent user from accidentally calling a function from the prompt that could harm a running system. -stdlib restricted_shell emqx_restricted_shell -## Specifies the net_kernel tick time in seconds. -## This is the approximate time a connected node may be unresponsive until -## it is considered down and thereby disconnected. --kernel net_ticktime 120 - ## Sets the distribution buffer busy limit (dist_buf_busy_limit). ## Preferably set in `emqx.conf`, #+zdbbl 8192 @@ -121,3 +113,6 @@ ## Mnesia thresholds -mnesia dump_log_write_threshold 5000 -mnesia dump_log_time_threshold 60000 + +## Disable os_mon's disksup by default +-os_mon start_disksup false diff --git a/apps/emqx/i18n/emqx_limiter_i18n.conf b/apps/emqx/i18n/emqx_limiter_i18n.conf deleted file mode 100644 index 3657df694..000000000 --- a/apps/emqx/i18n/emqx_limiter_i18n.conf +++ /dev/null @@ -1,190 +0,0 @@ -emqx_limiter_schema { - - failure_strategy { - desc { - en: """The strategy when all the retries failed.""" - zh: """当所有的重试都失败后的处理策略""" - } - label: { - en: """Failure Strategy""" - zh: """失败策略""" - } - } - - max_retry_time { - desc { - en: """The maximum retry time when acquire failed.""" - zh: """申请失败后,尝试重新申请的时长最大值""" - } - label: { - en: """Max Retry Time""" - zh: """最大重试时间""" - } - } - - divisible { - desc { - en: """Is it possible to split the number of requested tokens?""" - zh: """申请的令牌数是否可以被分割""" - } - label: { - en: """Divisible""" - zh: """是否可分割""" - } - } - - client_bucket_capacity { - desc { - en: """The capacity of per user.""" - zh: """每个使用者的令牌容量上限""" - } - label: { - en: """Capacity""" - zh: """容量""" - } - } - - capacity { - desc { - en: """The capacity of this token bucket.""" - zh: """该令牌桶的容量""" - } - label: { - en: """Capacity""" - zh: """容量""" - } - } - - low_watermark { - desc { - en: """If the remaining tokens are lower than this value, -the check/consume will succeed, but it will be forced to wait for a short period of time.""" - zh: """当桶中剩余的令牌数低于这个值,即使令牌申请成功了,也会被强制暂停一会儿""" - } - label: { - en: """Low Watermark""" - zh: """低水位线""" - } - } - - initial { - desc { - en: """The initial number of tokens for this bucket.""" - zh: """桶中的初始令牌数""" - } - label: { - en: """Initial""" - zh: """初始令牌数""" - } - } - - rate { - desc { - en: """Rate for this bucket.""" - zh: """桶的令牌生成速率""" - } - label: { - en: """Rate""" - zh: """速率""" - } - } - - client { - desc { - en: """The rate limit for each user of the bucket""" - zh: """对桶的每个使用者的速率控制设置""" - } - label: { - en: """Per Client""" - zh: """每个使用者的限制""" - } - } - - bucket_cfg { - desc { - en: """Bucket Configs""" - zh: """桶的配置""" - } - label: { - en: """Buckets""" - zh: """桶的配置""" - } - } - - burst { - desc { - en: """The burst, This value is based on rate.
- This value + rate = the maximum limit that can be achieved when limiter burst.""" - zh: """突发速率。 -突发速率允许短时间内速率超过设置的速率值,突发速率 + 速率 = 当前桶能达到的最大速率值""" - } - label: { - en: """Burst""" - zh: """突发速率""" - } - } - - message_routing { - desc { - en: """The message routing limiter. -This is used to limit the forwarding rate for this EMQX node. -Once the limit is reached, new publish will be refused""" - zh: """消息派发速率控制器。 -这个用来控制当前节点内的消息派发速率,当达到最大速率后,新的推送将会被拒绝""" - } - label: { - en: """Message Routing""" - zh: """消息派发""" - } - } - - connection { - desc { - en: """The connection limiter. -This is used to limit the connection rate for this EMQX node. -Once the limit is reached, new connections will be refused""" - zh: """连接速率控制器。 -这个用来控制当前节点上的连接速率,当达到最大速率后,新的连接将会被拒绝""" - } - label: { - en: """Connection""" - zh: """连接速率""" - } - } - - message_in { - desc { - en: """The message in limiter. -This is used to limit the inbound message numbers for this EMQX node -Once the limit is reached, the restricted client will be slow down even be hung for a while.""" - zh: """流入速率控制器。 -这个用来控制当前节点上的消息流入速率,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" - } - label: { - en: """Message In""" - zh: """消息流入速率""" - } - } - - bytes_in { - desc { - en: """The bytes_in limiter. -This is used to limit the inbound bytes rate for this EMQX node. -Once the limit is reached, the restricted client will be slow down even be hung for a while.""" - zh: """流入字节率控制器。 -这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" - } - label: { - en: """Bytes In""" - zh: """流入字节率""" - } - } - - internal { - desc { - en: """Limiter for EMQX internal app.""" - zh: """EMQX 内部功能所用限制器。""" - - } - } -} diff --git a/apps/emqx/i18n/emqx_schema_i18n.conf b/apps/emqx/i18n/emqx_schema_i18n.conf deleted file mode 100644 index 750c0c2cd..000000000 --- a/apps/emqx/i18n/emqx_schema_i18n.conf +++ /dev/null @@ -1,2609 +0,0 @@ -emqx_schema { - - force_shutdown_enable { - desc { - en: "Enable `force_shutdown` feature." - zh: "启用 `force_shutdown` 功能。" - } - label { - en: "Enable `force_shutdown` feature" - zh: "启用 `force_shutdown` 功能" - } - } - - force_shutdown_max_message_queue_len { - desc { - en: "Maximum message queue length." - zh: "消息队列的最大长度。" - } - label { - en: "Maximum mailbox queue length of process." - zh: "进程邮箱消息队列的最大长度" - } - } - - force_shutdown_max_heap_size { - desc { - en: "Total heap size" - zh: "Heap 的总大小。" - } - label { - en: "Total heap size" - zh: "Heap 的总大小" - } - } - - overload_protection_enable { - desc { - en: "React on system overload or not." - zh: "是否对系统过载做出反应。" - } - label { - en: "React on system overload or not" - zh: "是否对系统过载做出反应" - } - } - - overload_protection_backoff_delay { - desc { - en: "When at high load, some unimportant tasks could be delayed for execution, here set the duration in milliseconds precision." - zh: "高负载时,一些不重要的任务可能会延迟执行,在这里设置允许延迟的时间。单位为毫秒。" - } - label { - en: "Delay Time" - zh: "延迟时间" - } - } - - overload_protection_backoff_gc { - desc { - en: "When at high load, skip forceful GC." - zh: "高负载时,跳过强制 GC。" - } - label { - en: "Skip GC" - zh: "跳过GC" - } - } - - overload_protection_backoff_hibernation { - desc { - en: "When at high load, skip process hibernation." - zh: "高负载时,跳过进程休眠。" - } - label { - en: "Skip hibernation" - zh: "跳过休眠" - } - } - - overload_protection_backoff_new_conn { - desc { - en: "When at high load, close new incoming connections." - zh: "高负载时,拒绝新进来的客户端连接。" - } - label { - en: "Close new connections" - zh: "关闭新连接" - } - } - - conn_congestion_enable_alarm { - desc { - en: "Enable or disable connection congestion alarm." - zh: "启用或者禁用连接阻塞告警功能。" - } - label { - en: "Enable/disable congestion alarm" - zh: "启用/禁用阻塞告警" - } - } - - conn_congestion_min_alarm_sustain_duration { - desc { - en: "Minimal time before clearing the alarm.
" - "The alarm is cleared only when there's no pending data in
" - "the queue, and at least min_alarm_sustain_duration" - "milliseconds passed since the last time we considered the connection 'congested'.
" - "This is to avoid clearing and raising the alarm again too often." - zh: "清除警报前的最短时间。
" - "只有当队列中没有挂起的数据,并且连接至少被堵塞了 min_alarm_sustain_duration 毫秒时,
" - "报警才会被清除。这是为了避免太频繁地清除和再次发出警报。" - } - label { - en: "Sustain duration" - zh: "告警维持时间" - } - } - - force_gc_enable { - desc { - en: "Enable forced garbage collection." - zh: "启用强制垃圾回收。" - } - label { - en: "Enable forced garbage collection" - zh: "启用强制垃圾回收" - } - } - - force_gc_count { - desc { - en: "GC the process after this many received messages." - zh: "在进程收到多少消息之后,对此进程执行垃圾回收。" - } - label { - en: "Process GC messages num" - zh: "垃圾回收消息数" - } - } - - force_gc_bytes { - desc { - en: "GC the process after specified number of bytes have passed through." - zh: "在进程处理过多少个字节之后,对此进程执行垃圾回收。" - } - label { - en: "Process GC bytes" - zh: "垃圾回收字节数" - } - } - - sysmon_vm_process_check_interval { - desc { - en: "The time interval for the periodic process limit check." - zh: "定期进程限制检查的时间间隔。" - } - label { - en: "Process limit check interval" - zh: "进程限制检查时间" - } - } - - sysmon_vm_process_high_watermark { - desc { - en: "The threshold, as percentage of processes, for how many\n" - " processes can simultaneously exist at the local node before the corresponding\n" - " alarm is raised." - zh: "在发出相应警报之前,本地节点上可以同时存在多少进程的阈值(以进程百分比表示)。" - } - label { - en: "Process high watermark" - zh: "进程数高水位线" - } - } - - sysmon_vm_process_low_watermark { - desc { - en: "The threshold, as percentage of processes, for how many\n" - " processes can simultaneously exist at the local node before the corresponding\n" - " alarm is cleared." - zh: "在清除相应警报之前,本地节点上可以同时存在多少进程的阈值(以进程百分比表示)。" - } - label { - en: "Process low watermark" - zh: "进程数低水位线" - } - } - - sysmon_vm_long_gc { - desc { - en: "Enable Long GC monitoring." - zh: "启用长垃圾回收监控。" - } - label { - en: "Enable Long GC monitoring." - zh: "启用长垃圾回收监控" - } - } - - sysmon_vm_long_schedule { - desc { - en: "Enable Long Schedule monitoring." - zh: "启用长调度监控。" - } - label { - en: "Enable Long Schedule monitoring." - zh: "启用长调度监控" - } - } - - sysmon_vm_large_heap { - desc { - en: "Enable Large Heap monitoring." - zh: "启用大 heap 监控。" - } - label { - en: "Enable Large Heap monitoring." - zh: "启用大 heap 监控" - } - } - - sysmon_vm_busy_dist_port { - desc { - en: "Enable Busy Distribution Port monitoring." - zh: "启用分布式端口过忙监控。" - } - label { - en: "Enable Busy Distribution Port monitoring." - zh: "启用分布式端口过忙监控" - } - } - - sysmon_vm_busy_port { - desc { - en: "Enable Busy Port monitoring." - zh: "启用端口过忙监控。" - } - label { - en: "Enable Busy Port monitoring." - zh: "启用端口过忙监控" - } - } - - sysmon_os_cpu_check_interval { - desc { - en: "The time interval for the periodic CPU check." - zh: "定期 CPU 检查的时间间隔。" - } - label { - en: "The time interval for the periodic CPU check." - zh: "定期 CPU 检查的时间间隔" - } - } - - sysmon_os_cpu_high_watermark { - desc { - en: "The threshold, as percentage of system CPU load,\n" - " for how much system cpu can be used before the corresponding alarm is raised." - zh: "在发出相应警报之前可以使用多少系统 CPU 的阈值,以系统CPU负载的百分比表示。" - } - label { - en: "CPU high watermark" - zh: "CPU 高水位线" - } - } - - sysmon_os_cpu_low_watermark { - desc { - en: "The threshold, as percentage of system CPU load,\n" - " for how much system cpu can be used before the corresponding alarm is cleared." - zh: "在解除相应警报之前可以使用多少系统 CPU 的阈值,以系统CPU负载的百分比表示。" - } - label { - en: "CPU low watermark" - zh: "CPU 低水位线" - } - } - - sysmon_os_mem_check_interval { - desc { - en: "The time interval for the periodic memory check." - zh: "定期内存检查的时间间隔。" - } - label { - en: "Mem check interval" - zh: "内存检查间隔" - } - } - - sysmon_os_sysmem_high_watermark { - desc { - en: "The threshold, as percentage of system memory,\n" - " for how much system memory can be allocated before the corresponding alarm is raised." - zh: "在发出相应报警之前可以分配多少系统内存的阈值,以系统内存的百分比表示。" - } - label { - en: "SysMem high wartermark" - zh: "系统内存高水位线" - } - } - - sysmon_os_procmem_high_watermark { - desc { - en: "The threshold, as percentage of system memory,\n" - " for how much system memory can be allocated by one Erlang process before\n" - " the corresponding alarm is raised." - zh: "在发出相应警报之前,一个Erlang进程可以分配多少系统内存的阈值,以系统内存的百分比表示。" - } - label { - en: "ProcMem high wartermark" - zh: "进程内存高水位线" - } - } - - sysmon_top_num_items { - desc { - en: "The number of top processes per monitoring group" - zh: "每个监视组的顶级进程数。" - } - label { - en: "Top num items" - zh: "顶级进程数" - } - } - - sysmon_top_sample_interval { - desc { - en: "Specifies how often process top should be collected" - zh: "指定应收集进程顶部的频率。" - } - label { - en: "Top sample interval" - zh: "取样时间" - } - } - - sysmon_top_max_procs { - desc { - en: "Stop collecting data when the number of processes\n" - "in the VM exceeds this value" - zh: "当 VM 中的进程数超过此值时,停止收集数据。" - } - label { - en: "Max procs" - zh: "最大进程数" - } - } - - sysmon_top_db_hostname { - desc { - en: "Hostname of the PostgreSQL database that collects the data points" - zh: "收集数据点的 PostgreSQL 数据库的主机名。" - } - label { - en: "DB Hostname" - zh: "数据库主机名" - } - } - - sysmon_top_db_port { - desc { - en: "Port of the PostgreSQL database that collects the data points." - zh: "收集数据点的 PostgreSQL 数据库的端口。" - } - label { - en: "DB Port" - zh: "数据库端口" - } - } - - sysmon_top_db_username { - desc { - en: "Username of the PostgreSQL database" - zh: "PostgreSQL 数据库的用户名" - } - label { - en: "DB Username" - zh: "数据库用户名" - } - } - - sysmon_top_db_password { - desc { - en: "EMQX user password in the PostgreSQL database" - zh: "PostgreSQL 数据库的密码" - } - label { - en: "DB Password" - zh: "数据库密码" - } - } - - sysmon_top_db_name { - desc { - en: "PostgreSQL database name" - zh: "PostgreSQL 数据库的数据库名" - } - label { - en: "DB Name" - zh: "数据库名" - } - } - - alarm_actions { - desc { - en: "The actions triggered when the alarm is activated.
" - "Currently, the following actions are supported: log and " - "publish.\n" - "log is to write the alarm to log (console or file).\n" - "publish is to publish the alarm as an MQTT message to " - "the system topics:\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate" - zh: "警报激活时触发的动作。
" - "目前,支持以下操作:log 和 " - "publish.\n" - "log 将告警写入日志 (控制台或者文件).\n" - "publish 将告警作为 MQTT 消息发布到系统主题:\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate" - } - label: { - en: "Alarm Actions" - zh: "告警动作" - } - } - - alarm_size_limit { - desc { - en: "The maximum total number of deactivated alarms to keep as history.
" - "When this limit is exceeded, the oldest deactivated alarms are " - "deleted to cap the total number.\n" - zh: "要保留为历史记录的已停用报警的最大总数。当超过此限制时,将删除最旧的停用报警,以限制总数。" - } - label: { - en: "Alarm size limit" - zh: "告警总数限制" - } - } - - alarm_validity_period { - desc { - en: "Retention time of deactivated alarms. Alarms are not deleted immediately\n" - "when deactivated, but after the retention time.\n" - zh: "停用报警的保留时间。报警在停用时不会立即删除,而是在保留时间之后删除。" - } - label: { - en: "Alarm validity period" - zh: "告警保留时间" - } - } - - flapping_detect_enable { - desc { - en: "Enable flapping connection detection feature." - zh: "启用抖动检测功能。" - } - label: { - en: "Enable flapping detection" - zh: "启用抖动检测" - } - } - - flapping_detect_max_count { - desc { - en: "The maximum number of disconnects allowed for a MQTT Client in `window_time`" - zh: "MQTT 客户端在“窗口”时间内允许的最大断开次数。" - } - label: { - en: "Max count" - zh: "最大断开次数" - } - } - - flapping_detect_window_time { - desc { - en: "The time window for flapping detection." - zh: "抖动检测的时间窗口。" - } - label: { - en: "Window time" - zh: "时间窗口" - } - } - - flapping_detect_ban_time { - desc { - en: "How long the flapping clientid will be banned." - zh: "抖动的客户端将会被禁止登录多长时间。" - } - label: { - en: "Ban time" - zh: "禁止登录时长" - } - } - - persistent_session_store_enabled { - desc { - en: "Use the database to store information about persistent sessions.\n" - "This makes it possible to migrate a client connection to another\n" - "cluster node if a node is stopped.\n" - zh: "使用数据库存储有关持久会话的信息。\n" - "这使得在节点停止时,可以将客户端连接迁移到另一个群集节点。" - } - label: { - en: "Enable persistent session store" - zh: "启用持久会话保存" - } - } - - persistent_session_store_backend { - desc { - en: "Database management system used to store information about persistent sessions and messages.\n" - "- `builtin`: Use the embedded database (mria)" - zh: "用于存储持久性会话和信息的数据库管理后端\n" - "- `builtin`: 使用内置的数据库(mria)" - } - label: { - en: "Backend" - zh: "后端类型" - } - } - - persistent_store_on_disc { - desc { - en: "Save information about the persistent sessions on disc.\n" - "If this option is enabled, persistent sessions will survive full restart of the cluster.\n" - "Otherwise, all the data will be stored in RAM, and it will be lost when all the nodes in the cluster are stopped." - zh: "将持久会话数据保存在磁盘上。如果为 false 则存储在内存中。\n" - "如开启, 持久会话数据可在集群重启后恢复。\n" - "如关闭, 数据仅存储在内存中, 则在整个集群停止后丢失。" - } - label: { - en: "Persist on disc" - zh: "持久化在磁盘上" - } - } - - persistent_store_ram_cache { - desc { - en: "Maintain a copy of the data in RAM for faster access." - zh: "在内存中保持一份数据的副本,以便更快地访问。" - } - label: { - en: "RAM cache" - zh: "内存缓存" - } - } - - persistent_session_store_max_retain_undelivered { - desc { - en: "The time messages that was not delivered to a persistent session\n" - "is stored before being garbage collected if the node the previous\n" - "session was handled on restarts of is stopped.\n" - zh: "如果重新启动时处理上一个会话的节点已停止,则未传递到持久会话的消息在垃圾收集之前会被存储。" - } - label: { - en: "Max retain undelivered" - zh: "未投递的消息保留条数" - } - } - - persistent_session_store_message_gc_interval { - desc { - en: "The starting interval for garbage collection of undelivered messages to\n" - "a persistent session. This affects how often the \"max_retain_undelivered\"\n" - "is checked for removal.\n" - zh: "将未送达的消息垃圾收集到持久会话的开始间隔。\n" - "这会影响检查 \"max_retain_undelivered\"(最大保留未送达)的删除频率。" - } - label: { - en: "Message GC interval" - zh: "消息清理间隔" - } - } - - persistent_session_store_session_message_gc_interval { - desc { - en: "The starting interval for garbage collection of transient data for\n" - "persistent session messages. This does not affect the lifetime length\n" - "of persistent session messages.\n" - zh: "持久会话消息的临时数据垃圾收集的开始间隔。\n" - "这不会影响持久会话消息的生命周期长度。\n" - } - label: { - en: "Session message GC interval" - zh: "会话消息清理间隔" - } - } - - persistent_session_builtin_session_table { - desc { - en: "Performance tuning options for built-in session table." - zh: "用于内建会话表的性能调优参数。" - } - label: { - en: "Persistent session" - zh: "持久会话" - } - } - - persistent_session_builtin_sess_msg_table { - desc { - en: "Performance tuning options for built-in session messages table." - zh: "优化内置的会话消息表的配置。" - } - label: { - en: "Persistent session messages" - zh: "用于内建会话管理表的性能调优参数" - } - } - - persistent_session_builtin_messages_table { - desc { - en: "Performance tuning options for built-in messages table." - zh: "用于内建消息表的性能调优参数。" - } - label: { - en: "Persistent messages" - zh: "持久化消息" - } - } - - stats_enable { - desc { - en: "Enable/disable statistic data collection." - zh: "启用/禁用统计数据收集功能。" - } - label: { - en: "Enable/disable statistic data collection." - zh: "启用/禁用统计数据收集功能" - } - } - - zones { - desc { - en: """A zone is a set of configs grouped by the zone name. -For flexible configuration mapping, the name can be set to a listener's zone config. -NOTE: A built-in zone named default is auto created and can not be deleted. -""" - zh: """zone 是按name 分组的一组配置。 -对于灵活的配置映射,可以将 name 设置为侦听器的 zone 配置。 -注:名为 default 的内置区域是自动创建的,无法删除。""" - } - } - - mqtt { - desc { - en: """Global MQTT configuration. -The configs here work as default values which can be overridden in zone configs -""" - zh: """全局的 MQTT 配置项。 -mqtt 下所有的配置作为全局的默认值存在,它可以被 zone 中的配置覆盖。""" - } - } - - mqtt_idle_timeout { - desc { - en: """After the TCP connection is established, if the MQTT CONNECT packet from the client is not received within the time specified by idle_timeout, the connection will be disconnected.""" - zh: """TCP 连接建立后,如果在 idle_timeout 指定的时间内未收到客户端的 MQTT CONNECT 报文,则连接将被断开。""" - } - label: { - en: """Idle Timeout""" - zh: """空闲超时""" - } - } - - mqtt_max_packet_size { - desc { - en: """Maximum MQTT packet size allowed.""" - zh: """允许的最大 MQTT 报文大小。""" - } - label: { - en: """Max Packet Size""" - zh: """最大报文大小""" - } - } - - mqtt_max_clientid_len { - desc { - en: """Maximum allowed length of MQTT Client ID.""" - zh: """允许的最大 MQTT Client ID 长度。""" - } - label: { - en: """Max Client ID Length""" - zh: """最大 Client ID 长度""" - } - } - - mqtt_max_topic_levels { - desc { - en: """Maximum topic levels allowed.""" - zh: """允许的最大主题层级。""" - } - label: { - en: """Max Topic Levels""" - zh: """最大主题层级""" - } - } - - mqtt_max_qos_allowed { - desc { - en: """Maximum QoS allowed.""" - zh: """允许的最大 QoS 等级。""" - } - label: { - en: """Max QoS""" - zh: """最大 QoS""" - } - } - - mqtt_max_topic_alias { - desc { - en: """Maximum topic alias, 0 means no topic alias supported.""" - zh: """允许的最大主题别名数,0 表示不支持主题别名。""" - } - label: { - en: """Max Topic Alias""" - zh: """最大主题别名""" - } - } - - mqtt_retain_available { - desc { - en: """Whether to enable support for MQTT retained message.""" - zh: """是否启用对 MQTT 保留消息的支持。""" - } - label: { - en: """Retain Available""" - zh: """保留消息可用""" - } - } - - mqtt_wildcard_subscription { - desc { - en: """Whether to enable support for MQTT wildcard subscription.""" - zh: """是否启用对 MQTT 通配符订阅的支持。""" - } - label: { - en: """Wildcard Subscription Available""" - zh: """通配符订阅可用""" - } - } - - mqtt_shared_subscription { - desc { - en: """Whether to enable support for MQTT shared subscription.""" - zh: """是否启用对 MQTT 共享订阅的支持。""" - } - label: { - en: """Shared Subscription Available""" - zh: """共享订阅可用""" - } - } - - mqtt_exclusive_subscription { - desc { - en: """Whether to enable support for MQTT exclusive subscription.""" - zh: """是否启用对 MQTT 排它订阅的支持。""" - } - label: { - en: """Exclusive Subscription""" - zh: """排它订阅""" - } - } - - mqtt_ignore_loop_deliver { - desc { - en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0, similar to No Local subscription option in MQTT 5.0.""" - zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略投递自己发布的消息,类似于 MQTT 5.0 中的 No Local 订阅选项。""" - } - label: { - en: """Ignore Loop Deliver""" - zh: """忽略循环投递""" - } - } - - mqtt_strict_mode { - desc { - en: """Parse MQTT messages in strict mode. -When set to true, invalid utf8 strings in for example client ID, topic name, etc. will cause the client to be disconnected""" - zh: """是否以严格模式解析 MQTT 消息。 -当设置为 true 时,例如客户端 ID、主题名称等中的无效 utf8 字符串将导致客户端断开连接。""" - } - label: { - en: """Strict Mode""" - zh: """严格模式""" - } - } - - mqtt_response_information { - desc { - en: """Specify the response information returned to the client. This feature is disabled if is set to \"\". Applies only to clients using MQTT 5.0.""" - zh: """指定返回给客户端的响应信息。如果设置为 \"\",则禁用此功能。仅适用于使用 MQTT 5.0 协议的客户端。""" - } - label: { - en: """Response Information""" - zh: """响应信息""" - } - } - - mqtt_server_keepalive { - desc { - en: """The keep alive that EMQX requires the client to use. If configured as disabled, it means that the keep alive specified by the client will be used. Requires Server Keep Alive in MQTT 5.0, so it is only applicable to clients using MQTT 5.0 protocol.""" - zh: """EMQX 要求客户端使用的保活时间,配置为 disabled 表示将使用客户端指定的保活时间。需要用到 MQTT 5.0 中的 Server Keep Alive,因此仅适用于使用 MQTT 5.0 协议的客户端。""" - } - label: { - en: """Server Keep Alive""" - zh: """服务端保持连接""" - } - } - - mqtt_keepalive_backoff { - desc { - en: """The backoff multiplier used by the broker to determine the client keep alive timeout. If EMQX doesn't receive any packet in Keep Alive * Backoff * 2 seconds, EMQX will close the current connection.""" - zh: """Broker 判定客户端保活超时使用的退避乘数。如果 EMQX 在 Keep Alive * Backoff * 2 秒内未收到任何报文,EMQX 将关闭当前连接。""" - } - label: { - en: """Keep Alive Backoff""" - zh: """保持连接退避乘数""" - } - } - - mqtt_max_subscriptions { - desc { - en: """Maximum number of subscriptions allowed per client.""" - zh: """允许每个客户端建立的最大订阅数量。""" - } - label: { - en: """Max Subscriptions""" - zh: """最大订阅数量""" - } - } - - mqtt_upgrade_qos { - desc { - en: """Force upgrade of QoS level according to subscription.""" - zh: """投递消息时,是否根据订阅主题时的 QoS 等级来强制提升派发的消息的 QoS 等级。""" - } - label: { - en: """Upgrade QoS""" - zh: """升级 QoS""" - } - } - - mqtt_max_inflight { - desc { - en: """Maximum number of QoS 1 and QoS 2 messages that are allowed to be delivered simultaneously before completing the acknowledgment.""" - zh: """允许在完成应答前同时投递的 QoS 1 和 QoS 2 消息的最大数量。""" - } - label: { - en: """Max Inflight""" - zh: """最大飞行窗口""" - } - - } - - mqtt_retry_interval { - desc { - en: """Retry interval for QoS 1/2 message delivering.""" - zh: """QoS 1/2 消息的重新投递间隔。""" - } - label: { - en: """Retry Interval""" - zh: """重试间隔""" - } - } - - mqtt_max_awaiting_rel { - desc { - en: """For each publisher session, the maximum number of outstanding QoS 2 messages pending on the client to send PUBREL. After reaching this limit, new QoS 2 PUBLISH requests will be rejected with `147(0x93)` until either PUBREL is received or timed out.""" - zh: """每个发布者的会话中,都存在一个队列来处理客户端发送的 QoS 2 消息。该队列会存储 QoS 2 消息的报文 ID 直到收到客户端的 PUBREL 或超时,达到队列长度的限制后,新的 QoS 2 消息发布会被拒绝,并返回 `147(0x93)` 错误。""" - } - label: { - en: """Max Awaiting PUBREL""" - zh: """PUBREL 等待队列长度""" - } - } - - mqtt_await_rel_timeout { - desc { - en: """For client to broker QoS 2 message, the time limit for the broker to wait before the `PUBREL` message is received. The wait is aborted after timed out, meaning the packet ID is freed for new `PUBLISH` requests. Receiving a stale `PUBREL` causes a warning level log. Note, the message is delivered to subscribers before entering the wait for PUBREL.""" - zh: """客户端发布 QoS 2 消息时,服务器等待 `PUBREL` 的最长时延。超过该时长后服务器会放弃等待,该PACKET ID 会被释放,从而允许后续新的 PUBLISH 消息使用。如果超时后收到 PUBREL,服务器将会产生一条告警日志。注意,向订阅客户端转发消息的动作发生在进入等待之前。""" - } - label: { - en: """Max Awaiting PUBREL TIMEOUT""" - zh: """PUBREL 最大等待时间""" - } - } - - mqtt_session_expiry_interval { - desc { - en: """Specifies how long the session will expire after the connection is disconnected, only for non-MQTT 5.0 connections.""" - zh: """指定会话将在连接断开后多久过期,仅适用于非 MQTT 5.0 的连接。""" - } - label: { - en: """Session Expiry Interval""" - zh: """会话过期间隔""" - } - } - - mqtt_max_mqueue_len { - desc { - en: """Maximum queue length. Enqueued messages when persistent client disconnected, or inflight window is full.""" - zh: """消息队列最大长度。持久客户端断开连接或飞行窗口已满时排队的消息长度。""" - } - label: { - en: """Max Message Queue Length""" - zh: """最大消息队列长度""" - } - } - - mqtt_mqueue_priorities { - desc { - en: """Topic priorities. Priority number [1-255] -There's no priority table by default, hence all messages are treated equal. - -**NOTE**: Comma and equal signs are not allowed for priority topic names. -**NOTE**: Messages for topics not in the priority table are treated as either highest or lowest priority depending on the configured value for mqtt.mqueue_default_priority. - -**Examples**: -To configure \"topic/1\" > \"topic/2\": -mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8} -""" - zh: """主题优先级。取值范围 [1-255] -默认优先级表为空,即所有的主题优先级相同。 - -注:优先主题名称中不支持使用逗号和等号。 -注:不在此列表中的主题,被视为最高/最低优先级,这取决于mqtt.mqueue_default_priority 的配置 - -示例: -配置 \"topic/1\" > \"topic/2\": -mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8} -""" - } - label: { - en: """Topic Priorities""" - zh: """主题优先级""" - } - } - - mqtt_mqueue_default_priority { - desc { - en: """Default topic priority, which will be used by topics not in Topic Priorities (mqueue_priorities).""" - zh: """默认的主题优先级,不在 主题优先级mqueue_priorities) 中的主题将会使用该优先级。""" - } - label: { - en: """Default Topic Priorities""" - zh: """默认主题优先级""" - } - } - - mqtt_mqueue_store_qos0 { - desc { - en: """Specifies whether to store QoS 0 messages in the message queue while the connection is down but the session remains.""" - zh: """指定在连接断开但会话保持期间,是否需要在消息队列中存储 QoS 0 消息。""" - } - label: { - en: """Store QoS 0 Message""" - zh: """存储 QoS 0 消息""" - } - } - - mqtt_use_username_as_clientid { - desc { - en: """Whether to user Client ID as Username. -This setting takes effect later than Use Peer Certificate as Username (peer_cert_as_username) and Use peer certificate as Client ID (peer_cert_as_clientid). -""" - zh: """是否使用用户名作为客户端 ID。 -此设置的作用时间晚于 使用对端证书作为用户名peer_cert_as_username) 和 使用对端证书作为客户端 IDpeer_cert_as_clientid)。 -""" - } - label: { - en: """Use Username as Client ID""" - zh: """使用用户名作为客户端 ID""" - } - } - - mqtt_peer_cert_as_username { - desc { - en: """Use the CN, DN field in the peer certificate or the entire certificate content as Username. Only works for the TLS connection. -Supported configurations are the following: -- cn: Take the CN field of the certificate as Username -- dn: Take the DN field of the certificate as Username -- crt: Take the content of the DER or PEM certificate as Username -- pem: Convert DER certificate content to PEM format as Username -- md5: Take the MD5 value of the content of the DER or PEM certificate as Username -""" - zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为用户名。仅适用于 TLS 连接。 -目前支持配置为以下内容: -- cn: 取证书的 CN 字段作为 Username -- dn: 取证书的 DN 字段作为 Username -- crt: 取 DERPEM 证书的内容作为 Username -- pem: 将 DER 证书内容转换为 PEM 格式后作为 Username -- md5: 取 DERPEM 证书的内容的 MD5 值作为 Username -""" - } - label: { - en: """Use Peer Certificate as Username""" - zh: """使用对端证书作为用户名""" - } - } - - mqtt_peer_cert_as_clientid { - desc { - en: """Use the CN, DN field in the peer certificate or the entire certificate content as Client ID. Only works for the TLS connection. -Supported configurations are the following: -- cn: Take the CN field of the certificate as Client ID -- dn: Take the DN field of the certificate as Client ID -- crt: Take the content of the DER or PEM certificate as Client ID -- pem: Convert DER certificate content to PEM format as Client ID -- md5: Take the MD5 value of the content of the DER or PEM certificate as Client ID -""" - zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接。 -目前支持配置为以下内容: -- cn: 取证书的 CN 字段作为 Client ID -- dn: 取证书的 DN 字段作为 Client ID -- crt: 取 DERPEM 证书的内容作为 Client ID -- pem: 将 DER 证书内容转换为 PEM 格式后作为 Client ID -- md5: 取 DERPEM 证书的内容的 MD5 值作为 Client ID -""" - } - label: { - en: """Use Peer Certificate as Client ID""" - zh: """使用对端证书作为客户端 ID""" - } - } - - broker { - desc { - en: """Message broker options.""" - zh: """Broker 相关配置项。""" - } - } - - broker_enable_session_registry { - desc { - en: """Enable session registry""" - zh: """是否启用 Session Registry""" - } - } - - broker_session_locking_strategy { - desc { - en: """Session locking strategy in a cluster. - - `local`: only lock the session on the current node - - `one`: select only one remote node to lock the session - - `quorum`: select some nodes to lock the session - - `all`: lock the session on all the nodes in the cluster -""" - - zh: """Session 在集群中的锁策略。 - - `loca`:仅锁本节点的 Session; - - `one`:任选一个其它节点加锁; - - `quorum`:选择集群中半数以上的节点加锁; - - `all`:选择所有节点加锁。 -""" - } - } - - broker_shared_subscription_strategy { - desc { - en: """Dispatch strategy for shared subscription. - - `random`: dispatch the message to a random selected subscriber - - `round_robin`: select the subscribers in a round-robin manner - - `sticky`: always use the last selected subscriber to dispatch, until the subscriber disconnects. - - `hash`: select the subscribers by the hash of `clientIds` -""" - - zh: """共享订阅消息派发策略。 - - `random`:随机挑选一个共享订阅者派发; - - `round_robin`:使用 round-robin 策略派发; - - `sticky`:总是使用上次选中的订阅者派发,直到它断开连接; - - `hash`:使用发送者的 Client ID 进行 Hash 来选择订阅者。 -""" - } - } - - broker_shared_dispatch_ack_enabled { - desc { - en: """Deprecated, will be removed in 5.1. -Enable/disable shared dispatch acknowledgement for QoS 1 and QoS 2 messages. -This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline. -""" - - zh: """该配置项已废弃,会在 5.1 中移除。 -启用/禁用 QoS 1 和 QoS 2 消息的共享派发确认。 -开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线)重新派发给另外一个订阅者。 -""" - } - } - - broker_route_batch_clean { - desc { - en: """Enable batch clean for deleted routes.""" - zh: """是否开启批量清除路由。""" - } - } - - shared_subscription_group_strategy { - desc { - en: """Per group dispatch strategy for shared subscription. -This config is a map from shared subscription group name to the strategy -name. The group name should be of format `[A-Za-z0-9]`. i.e. no -special characters are allowed. -""" - cn: """设置共享订阅组为单位的分发策略。该配置是一个从组名到 -策略名的一个map,组名不得包含 `[A-Za-z0-9]` 之外的特殊字符。 -""" - } - - } - - shared_subscription_strategy_enum { - desc { - en: """Dispatch strategy for shared subscription. -- `random`: dispatch the message to a random selected subscriber -- `round_robin`: select the subscribers in a round-robin manner -- `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group -- `sticky`: always use the last selected subscriber to dispatch, -until the subscriber disconnects. -- `hash`: select the subscribers by the hash of `clientIds` -- `local`: send to a random local subscriber. If local -subscriber was not found, send to a random subscriber cluster-wide -""" - cn: """共享订阅的分发策略名称。 -- `random`:随机选择一个组内成员; -- `round_robin`:循环选择下一个成员; -- `round_robin_per_group`:在共享组内循环选择下一个成员; -- `sticky`:使用上一次选中的成员; -- `hash`:根据 ClientID 哈希映射到一个成员; -- `local`:随机分发到节点本地成成员,如果本地成员不存在,则随机分发到任意一个成员。 -""" - - } - } - - broker_perf_route_lock_type { - desc { - en: """Performance tuning for subscribing/unsubscribing a wildcard topic. -Change this parameter only when there are many wildcard topics. - -NOTE: when changing from/to `global` lock, it requires all nodes in the cluster to be stopped before the change. - - `key`: mnesia transactional updates with per-key locks. Recommended for a single-node setup. - - `tab`: mnesia transactional updates with table lock. Recommended for a cluster setup. - - `global`: updates are protected with a global lock. Recommended for large clusters. -""" - zh: """通配主题订阅/取消订阅性能调优。 -建议仅当通配符主题较多时才更改此参数。 - -注:当从/更改为 `global` 锁时,它要求集群中的所有节点在更改之前停止。 - - `key`:为 Mnesia 事务涉及到的每个 key 上锁,建议单节点时使用。 - - `tab`:为 Mnesia 事务涉及到的表上锁,建议在集群中使用。 - - `global`:所以更新操作都被全局的锁保护,仅建议在超大规模集群中使用。 -""" - } - } - - broker_perf_trie_compaction { - desc { - en: """Enable trie path compaction. -Enabling it significantly improves wildcard topic subscribe rate, if wildcard topics have unique prefixes like: 'sensor/{{id}}/+/', where ID is unique per subscriber. -Topic match performance (when publishing) may degrade if messages are mostly published to topics with large number of levels. - -NOTE: This is a cluster-wide configuration. It requires all nodes to be stopped before changing it. -""" - zh: """是否开启主题表压缩存储。 -启用它会显着提高通配符主题订阅率,如果通配符主题具有唯一前缀,例如:'sensor/{{id}}/+/',其中每个订阅者的 ID 是唯一的。 -如果消息主要发布到具有大量级别的主题,则主题匹配性能(发布时)可能会降低。 - -注意:这是一个集群范围的配置。 它要求在更改之前停止所有节点。 -""" - } - } - - sys_topics { - desc { - en: """System topics configuration.""" - zh: """系统主题配置。""" - } - } - - sys_msg_interval { - desc { - en: """Time interval of publishing `$SYS` messages.""" - zh: """发送 `$SYS` 主题的间隔时间。""" - } - } - - sys_heartbeat_interval { - desc { - en: """Time interval for publishing following heartbeat messages: - - `$SYS/brokers//uptime` - - `$SYS/brokers//datetime` -""" - zh: """发送心跳系统消息的间隔时间,它包括: - - `$SYS/brokers//uptime` - - `$SYS/brokers//datetime` -""" - } - } - - sys_event_messages { - desc { - en: """Client events messages.""" - zh: """客户端事件消息。""" - } - } - - sys_event_client_connected { - desc { - en: """Enable to publish client connected event messages""" - zh: """是否开启客户端已连接事件消息。""" - } - } - - sys_event_client_disconnected { - desc { - en: """Enable to publish client disconnected event messages.""" - zh: """是否开启客户端已断开连接事件消息。""" - } - } - - sys_event_client_subscribed { - desc { - en: """Enable to publish event message that client subscribed a topic successfully.""" - zh: """是否开启客户端已成功订阅主题事件消息。""" - } - } - - sys_event_client_unsubscribed { - desc { - en: """Enable to publish event message that client unsubscribed a topic successfully.""" - zh: """是否开启客户端已成功取消订阅主题事件消息。""" - } - } - - -fields_authorization_no_match { - desc { - en: """ -Default access control action if the user or client matches no ACL rules, -or if no such user or client is found by the configurable authorization -sources such as built_in_database, an HTTP API, or a query against PostgreSQL. -Find more details in 'authorization.sources' config. -""" - zh: """ -如果用户或客户端不匹配ACL规则,或者从可配置授权源(比如内置数据库、HTTP API 或 PostgreSQL 等。)内未找 -到此类用户或客户端时,模式的认访问控制操作。 -在“授权”中查找更多详细信息。 -""" - } - label: { - en: "Authorization no match" - zh: "未匹时的默认授权动作" - } -} - -fields_authorization_deny_action { - desc { - en: """The action when the authorization check rejects an operation.""" - zh: """授权检查拒绝操作时的操作。""" - } - label: { - en: "Authorization deny action" - zh: "授权检查拒绝操作时的操作" - } -} - -fields_cache_enable { - desc { - en: """Enable or disable the authorization cache.""" - zh: """启用或禁用授权缓存。""" - } - label: { - en: "Enable or disable the authorization cache." - zh: "启用或禁用授权缓存" - } -} - -fields_cache_max_size { - desc { - en: """Maximum number of cached items.""" - zh: """缓存项的最大数量。""" - } - label: { - en: "Maximum number of cached items." - zh: "缓存项的最大数量" - } -} - -fields_cache_ttl { - desc { - en: """Time to live for the cached data. """ - zh: """缓存数据的生存时间。""" - } - label: { - en: "Time to live for the cached data." - zh: "缓存数据的生存时间。" - } -} - -fields_deflate_opts_level { - desc { - en: """Compression level. """ - zh: """压缩级别""" - } - label: { - en: "Compression level" - zh: "压缩级别" - } -} - -fields_deflate_opts_mem_level { - desc { - en: """ -Specifies the size of the compression state.
-Lower values decrease memory usage per connection. -""" - zh: """ -指定压缩状态的大小
-较低的值会减少每个连接的内存使用。 -""" - } - label: { - en: "Size of the compression state" - zh: "压缩状态大小" - } -} - -fields_deflate_opts_strategy { - desc { - en: """Specifies the compression strategy.""" - zh: """指定压缩策略。""" - } - label: { - en: "compression strategy" - zh: "指定压缩策略" - } -} - -fields_deflate_opts_server_context_takeover { - desc { - en: """Takeover means the compression state is retained between server messages. """ - zh: """接管意味着在服务器消息之间保留压缩状态。""" - } - label: { - en: "Server context takeover" - zh: "服务上下文接管" - } -} - -fields_deflate_opts_client_context_takeover { - desc { - en: """Takeover means the compression state is retained between client messages. """ - zh: """接管意味着在客户端消息之间保留压缩状态。""" - } - label: { - en: "Client context takeover" - zh: "客户端上下文接管" - } -} - -fields_deflate_opts_server_max_window_bits { - desc { - en: """Specifies the size of the compression context for the server.""" - zh: """指定服务器压缩上下文的大小。""" - } - label: { - en: "Server compression max window size" - zh: "服务器压缩窗口大小" - } -} - -fields_deflate_opts_client_max_window_bits { - desc { - en: """Specifies the size of the compression context for the client.""" - zh: """指定客户端压缩上下文的大小。""" - } - label: { - en: "Client compression max window size" - zh: "压缩窗口大小" - } -} - -client_ssl_opts_schema_enable { - desc { - en: """Enable TLS. """ - zh: """启用 TLS。""" - } - label: { - en: "Enable TLS." - zh: "启用 TLS" - } -} - -common_ssl_opts_schema_cacertfile { - desc { - en: """ -Trusted PEM format CA certificates bundle file.
-The certificates in this file are used to verify the TLS peer's certificates. -Append new certificates to the file if new CAs are to be trusted. -There is no need to restart EMQX to have the updated file loaded, because -the system regularly checks if file has been updated (and reload).
-NOTE: invalidating (deleting) a certificate from the file will not affect -already established connections. -""" - zh: """ -受信任的PEM格式 CA 证书捆绑文件
-此文件中的证书用于验证TLS对等方的证书。 -如果要信任新 CA,请将新证书附加到文件中。 -无需重启EMQX即可加载更新的文件,因为系统会定期检查文件是否已更新(并重新加载)
-注意:从文件中失效(删除)证书不会影响已建立的连接。 -""" - } - label: { - en: "CACertfile" - zh: "CA 证书文件" - } -} - -common_ssl_opts_schema_certfile { - desc { - en: """ -PEM format certificates chain file.
-The certificates in this file should be in reversed order of the certificate -issue chain. That is, the host's certificate should be placed in the beginning -of the file, followed by the immediate issuer certificate and so on. -Although the root CA certificate is optional, it should be placed at the end of -the file if it is to be added. -""" - zh: """ -PEM格式证书链文件
-此文件中的证书应与证书颁发链的顺序相反。也就是说,主机的证书应该放在文件的开头, -然后是直接颁发者 CA 证书,依此类推,一直到根 CA 证书。 -根 CA 证书是可选的,如果想要添加,应加到文件到最末端。 -""" - } - label: { - en: "Certfile" - zh: "证书文件" - } -} - -common_ssl_opts_schema_keyfile { - desc { - en: """PEM format private key file. """ - zh: """PEM格式的私钥文件。""" - } - label: { - en: "Keyfile" - zh: "私钥文件" - } -} - -common_ssl_opts_schema_verify { - desc { - en: """Enable or disable peer verification. """ - zh: """启用或禁用对等验证。""" - } - label: { - en: "Verify peer" - zh: "对等验证" - } -} - -common_ssl_opts_schema_reuse_sessions { - desc { - en: """Enable TLS session reuse. """ - zh: """启用 TLS 会话重用。""" - } - label: { - en: "TLS session reuse" - zh: "TLS 会话重用" - } -} - -common_ssl_opts_schema_depth { - desc { - en: """ -Maximum number of non-self-issued intermediate certificates that can follow the peer certificate in a valid certification path. -So, if depth is 0 the PEER must be signed by the trusted ROOT-CA directly;
-if 1 the path can be PEER, Intermediate-CA, ROOT-CA;
-if 2 the path can be PEER, Intermediate-CA1, Intermediate-CA2, ROOT-CA.
-""" - zh: """ -在有效的证书路径中,可以跟随对等证书的非自颁发中间证书的最大数量。 -因此,如果深度为0,则对等方必须由受信任的根 CA 直接签名;
-如果是1,路径可以是 PEER、中间 CA、ROOT-CA;
-如果是2,则路径可以是PEER、中间 CA1、中间 CA2、ROOT-CA。 -""" - } - label: { - en: "CACert Depth" - zh: "CA 证书深度" - } -} - -common_ssl_opts_schema_password { - desc { - en: """ -String containing the user's password. -Only used if the private key file is password-protected. -""" - zh: """ -包含用户密码的字符串。 -仅在私钥文件受密码保护时使用。 -""" - } - label: { - en: "Keyfile passphrase" - zh: "秘钥文件密码" - } -} - -common_ssl_opts_schema_versions { - desc { - en: """ -All TLS/DTLS versions to be supported.
-NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config.
-In case PSK cipher suites are intended, make sure to configure -['tlsv1.2', 'tlsv1.1'] here. -""" - zh: """ -支持所有TLS/DTLS版本
- -注:PSK 的 Ciphers 无法在 tlsv1.3 中使用,如果打算使用 PSK 密码套件,请确保这里配置为 ["tlsv1.2","tlsv1.1"]。 -""" - } - label: { - en: "SSL versions" - zh: "SSL 版本" - } -} - -ciphers_schema_common { - desc { - en: """ -This config holds TLS cipher suite names separated by comma, -or as an array of strings. e.g. -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. -
-Ciphers (and their ordering) define the way in which the -client and server encrypts information over the network connection. -Selecting a good cipher suite is critical for the -application's data security, confidentiality and performance. - -The names should be in OpenSSL string format (not RFC format). -All default values and examples provided by EMQX config -documentation are all in OpenSSL format.
- -NOTE: Certain cipher suites are only compatible with -specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') -incompatible cipher suites will be silently dropped. -For instance, if only 'tlsv1.3' is given in the versions, -configuring cipher suites for other versions will have no effect. -
- -NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
-PSK cipher suites: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
-""" - zh: """ -此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"或 -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 -
-密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 -选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 - -名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 -EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
-注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 -不兼容的密码套件将被自动删除。 - -例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 - -
-注:PSK 的 Ciphers 不支持 tlsv1.3
-如果打算使用PSK密码套件 tlsv1.3。应在ssl.versions中禁用。 - -
-PSK 密码套件: -"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
-""" - } - label: { - en: "" - zh: "" - } -} - -ciphers_schema_quic { - desc { - en: """ -This config holds TLS cipher suite names separated by comma, -or as an array of strings. e.g. -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. -
-Ciphers (and their ordering) define the way in which the -client and server encrypts information over the network connection. -Selecting a good cipher suite is critical for the -application's data security, confidentiality and performance. - -The names should be in OpenSSL string format (not RFC format). -All default values and examples provided by EMQX config -documentation are all in OpenSSL format.
- -NOTE: Certain cipher suites are only compatible with -specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') -incompatible cipher suites will be silently dropped. -For instance, if only 'tlsv1.3' is given in the versions, -configuring cipher suites for other versions will have no effect. -
- -NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
-PSK cipher suites: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
- -NOTE: QUIC listener supports only 'tlsv1.3' ciphers
-""" - zh: """ -此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"或 -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 -
-密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 -选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 - -名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 -EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
-注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 -不兼容的密码套件将被自动删除。 - -例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 - -
-注:PSK 的 Ciphers 不支持 tlsv1.3
-如果打算使用PSK密码套件,tlsv1.3。应在ssl.versions中禁用。 - -
-PSK 密码套件: -"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
- -注:QUIC 监听器不支持 tlsv1.3 的 ciphers -""" - } - label: { - en: "" - zh: "" - } -} - -common_ssl_opts_schema_user_lookup_fun { - desc { - en: """EMQX-internal callback that is used to lookup pre-shared key (PSK) identity. """ - zh: """用于查找预共享密钥(PSK)标识的 EMQX 内部回调。""" - } - label: { - en: "SSL PSK user lookup fun" - zh: "SSL PSK 用户回调" - } -} - -common_ssl_opts_schema_secure_renegotiate { - desc { - en: """ -SSL parameter renegotiation is a feature that allows a client and a server -to renegotiate the parameters of the SSL connection on the fly. -RFC 5746 defines a more secure way of doing this. By enabling secure renegotiation, -you drop support for the insecure renegotiation, prone to MitM attacks. -""" - zh: """ -SSL 参数重新协商是一种允许客户端和服务器动态重新协商 SSL 连接参数的功能。 -RFC 5746 定义了一种更安全的方法。通过启用安全的重新协商,您就失去了对不安全的重新协商的支持,从而容易受到 MitM 攻击。 -""" - } - label: { - en: "SSL renegotiate" - zh: "SSL 重新协商" - } -} - -server_ssl_opts_schema_dhfile { - desc { - en: """ -Path to a file containing PEM-encoded Diffie-Hellman parameters -to be used by the server if a cipher suite using Diffie-Hellman -key exchange is negotiated. If not specified, default parameters -are used.
-NOTE: The dhfile option is not supported by TLS 1.3. -""" - zh: """ -如果协商使用Diffie-Hellman密钥交换的密码套件,则服务器将使用包含PEM编码的Diffie-Hellman参数的文件的路径。如果未指定,则使用默认参数。
- -注意:TLS 1.3不支持dhfile选项。 -""" - } - label: { - en: "SSL dhfile" - zh: "SSL dhfile" - } -} - -server_ssl_opts_schema_fail_if_no_peer_cert { - desc { - en: """ -Used together with {verify, verify_peer} by an TLS/DTLS server. -If set to true, the server fails if the client does not have a -certificate to send, that is, sends an empty certificate. -If set to false, it fails only if the client sends an invalid -certificate (an empty certificate is considered valid). -""" - zh: """ -TLS/DTLS 服务器与 {verify,verify_peer} 一起使用。 -如果设置为true,则如果客户端没有要发送的证书,即发送空证书,服务器将失败。 -如果设置为false,则仅当客户端发送无效证书(空证书被视为有效证书)时才会失败。 -""" - } - label: { - en: "SSL fail if no peer cert" - zh: "没有证书则 SSL 失败" - } -} - -server_ssl_opts_schema_honor_cipher_order { - desc { - en: """ -An important security setting, it forces the cipher to be set based - on the server-specified order instead of the client-specified order, - hence enforcing the (usually more properly configured) security - ordering of the server administrator. -""" - zh: """ -一个重要的安全设置,它强制根据服务器指定的顺序而不是客户机指定的顺序设置密码,从而强制服务器管理员执行(通常配置得更正确)安全顺序。 -""" - } - label: { - en: "SSL honor cipher order" - zh: "SSL honor cipher order" - } -} - -server_ssl_opts_schema_client_renegotiation { - desc { - en: """ -In protocols that support client-initiated renegotiation, -the cost of resources of such an operation is higher for the server than the client. -This can act as a vector for denial of service attacks. -The SSL application already takes measures to counter-act such attempts, -but client-initiated renegotiation can be strictly disabled by setting this option to false. -The default value is true. Note that disabling renegotiation can result in -long-lived connections becoming unusable due to limits on -the number of messages the underlying cipher suite can encipher. -""" - zh: """ -在支持客户机发起的重新协商的协议中,这种操作的资源成本对于服务器来说高于客户机。 -这可能会成为拒绝服务攻击的载体。 -SSL 应用程序已经采取措施来反击此类尝试,但通过将此选项设置为 false,可以严格禁用客户端发起的重新协商。 -默认值为 true。请注意,由于基础密码套件可以加密的消息数量有限,禁用重新协商可能会导致长期连接变得不可用。 -""" - } - label: { - en: "SSL client renegotiation" - zh: "SSL 客户端冲协商" - } -} - -server_ssl_opts_schema_handshake_timeout { - desc { - en: """ -Maximum time duration allowed for the handshake to complete -""" - zh: """ -握手完成所允许的最长时间 -""" - } - label: { - en: "Handshake timeout" - zh: "握手超时时间" - } -} - -server_ssl_opts_schema_gc_after_handshake { - desc { - en: """ -Memory usage tuning. If enabled, will immediately perform a garbage collection after -the TLS/SSL handshake. -""" - zh: """ -内存使用调优。如果启用,将在TLS/SSL握手完成后立即执行垃圾回收。 -TLS/SSL握手建立后立即进行GC。 -""" - } - label: { - en: "Perform GC after handshake" - zh: "握手后执行GC" - } -} - -fields_listeners_tcp { - desc { - en: """TCP listeners.""" - zh: """TCP 监听器。""" - } - label: { - en: "TCP listeners" - zh: "TCP 监听器" - } -} - -fields_listeners_ssl { - desc { - en: """SSL listeners.""" - zh: """SSL 监听器。""" - } - label: { - en: "SSL listeners" - zh: "SSL 监听器" - } -} - -fields_listeners_ws { - desc { - en: """HTTP websocket listeners.""" - zh: """HTTP websocket 监听器。""" - } - label: { - en: "HTTP websocket listeners" - zh: "HTTP websocket 监听器" - } -} - -fields_listeners_wss { - desc { - en: """HTTPS websocket listeners.""" - zh: """HTTPS websocket 监听器。""" - } - label: { - en: "HTTPS websocket listeners" - zh: "HTTPS websocket 监听器" - } -} - -fields_listeners_quic { - desc { - en: """QUIC listeners.""" - zh: """QUIC 监听器。""" - } - label: { - en: "QUIC listeners" - zh: "QUIC 监听器" - } -} - -fields_listener_enabled { - desc { - en: """Enable listener. """ - zh: """启停监听器。""" - } - label: { - en: "Enable listener" - zh: "启停监听器" - } -} - -fields_mqtt_quic_listener_certfile { - desc { - en: """Path to the certificate file.""" - zh: """证书文件。""" - } - label: { - en: "Certificate file" - zh: "证书文件" - } -} - -fields_mqtt_quic_listener_keyfile { - desc { - en: """Path to the secret key file. """ - zh: """私钥文件。""" - } - label: { - en: "Key file" - zh: "私钥文件" - } -} - -fields_mqtt_quic_listener_idle_timeout { - desc { - en: """How long a connection can go idle before it is gracefully shut down. 0 to disable""" - zh: """一个连接在被关闭之前可以空闲多长时间。0表示禁用。""" - } - label: { - en: "Idle Timeout" - zh: "空闲超时时间" - } -} - -fields_mqtt_quic_listener_handshake_idle_timeout { - desc { - en: """How long a handshake can idle before it is discarded. """ - zh: """一个握手在被丢弃之前可以空闲多长时间。""" - } - label: { - en: "Handshake Idle Timeout" - zh: "握手空闲超时时间" - } -} - -fields_mqtt_quic_listener_keep_alive_interval { - desc { - en: """ -How often to send PING frames to keep a connection alive. 0 means disabled. -""" - zh: """ -发送 PING 帧的频率,以保活连接. 设为 0 表示禁用。 -""" - } - label: { - en: "Keep Alive Interval" - zh: "PING 保活频率" - } -} - -base_listener_bind { - desc { - en: """ -IP address and port for the listening socket. -""" - zh: """ -监听套接字的 IP 地址和端口。 -""" - } - label: { - en: "IP address and port" - zh: "IP 地址和端口" - } -} - -base_listener_acceptors { - desc { - en: """The size of the listener's receiving pool.""" - zh: """监听器接收池的大小。""" - } - label: { - en: "Acceptors Num" - zh: "接收器数量" - } -} - -base_listener_max_connections { - desc { - en: """The maximum number of concurrent connections allowed by the listener. """ - zh: """监听器允许的最大并发连接数。""" - } - label: { - en: "Max connections" - zh: "最大并发连接数" - } -} - -base_listener_mountpoint { - desc { - en: """ -When publishing or subscribing, prefix all topics with a mountpoint string. -The prefixed string will be removed from the topic name when the message -is delivered to the subscriber. The mountpoint is a way that users can use -to implement isolation of message routing between different listeners. -For example if a client A subscribes to `t` with `listeners.tcp.\.mountpoint` -set to `some_tenant`, then the client actually subscribes to the topic -`some_tenant/t`. Similarly, if another client B (connected to the same listener -as the client A) sends a message to topic `t`, the message is routed -to all the clients subscribed `some_tenant/t`, so client A will receive the -message, with topic name `t`.
-Set to `""` to disable the feature.
- -Variables in mountpoint string: - - ${clientid}: clientid - - ${username}: username -""" - zh: """ -发布或订阅时,请在所有主题前面加上 mountpoint 字符串。 - -将消息传递给订阅者时,将从主题名称中删除带前缀的字符串。挂载点是一种用户可以用来实现不同侦听器之间消息路由隔离的方法。 - -例如,如果客户机 A 使用 listeners.tcp.\.mountpoint 设置为'some_tenant',那么客户端实际上订阅了主题'some_tenant/t'。
-类似地,如果另一个客户端B(与客户端A连接到同一个侦听器)向主题 't' 发送消息,该消息将路由到所有订阅了'some_租户/t'的客户端,因此客户端 A 将接收主题名为't'的消息
- -设置为"" 以禁用该功能
- -mountpoint 字符串中的变量: -- ${clientid}: clientid -- ${username}: username -""" - } - label: { - en: "mountpoint" - zh: "mountpoint" - } -} - -base_listener_zone { - desc { - en: """ -The configuration zone to which the listener belongs. -""" - zh: """ -监听器所属的配置组。 -""" - } - label: { - en: "Zone" - zh: "配置组" - } -} - -base_listener_limiter { - desc { - en: """ -Type of the rate limit. -""" - zh: """ -速率限制类型 -""" - } - label: { - en: "Type of the rate limit." - zh: "速率限制类型" - } -} - -base_listener_enable_authn { - desc { - en: """ -Set true (default) to enable client authentication on this listener, the authentication -process goes through the configured authentication chain. -When set to false to allow any clients with or without authentication information such as username or password to log in. -When set to quick_deny_anonymous, it behaves like when set to true but clients will be -denied immediately without going through any authenticators if username is not provided. This is useful to fence off -anonymous clients early. -""" - zh: """ -配置 true (默认值)启用客户端进行身份认证,通过检查认配置的认认证器链来决定是否允许接入。 -配置 false 时,将不对客户端做任何认证,任何客户端,不论是不是携带用户名等认证信息,都可以接入。 -配置 quick_deny_anonymous 时,行为跟 true 类似,但是会对匿名 -客户直接拒绝,不做使用任何认证器对客户端进行身份检查。 -""" - } - label: { - en: "Enable authentication" - zh: "启用身份认证" - } -} - -mqtt_listener_access_rules { - desc { - en: """ -The access control rules for this listener.
See: https://github.com/emqtt/esockd#allowdeny -""" - zh: """此监听器的访问控制规则。""" - } - label: { - en: "Access rules" - zh: "访问控制规则" - } -} - -mqtt_listener_proxy_protocol { - desc { - en: """ -Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx.
-See: https://www.haproxy.com/blog/haproxy/proxy-protocol/ -""" - zh: """ -如果EMQX集群部署在 HAProxy 或 Nginx 之后,请启用代理协议 V1/2
-详情见: https://www.haproxy.com/blog/haproxy/proxy-protocol/ -""" - } - label: { - en: "Proxy protocol" - zh: "Proxy protocol" - } -} - -mqtt_listener_proxy_protocol_timeout { - desc { - en: """ -Timeout for proxy protocol. EMQX will close the TCP connection if proxy protocol packet is not received within the timeout. -""" - zh: """ -代理协议超时。如果在超时时间内未收到代理协议数据包,EMQX将关闭TCP连接。 -""" - } - label: { - en: "Proxy protocol timeout" - zh: "Proxy protocol 超时时间" - } -} - -global_authentication { - desc { - en: """Default authentication configs for all MQTT listeners. - -For per-listener overrides see authentication in listener configs - -This option can be configured with: -
    -
  • []: The default value, it allows *ALL* logins
  • -
  • one: For example {enable:true,backend:\"built_in_database\",mechanism=\"password_based\"}
  • -
  • chain: An array of structs.
  • -
- -When a chain is configured, the login credentials are checked against the backends per the configured order, until an 'allow' or 'deny' decision can be made. - -If there is no decision after a full chain exhaustion, the login is rejected. -""" - zh: """全局 MQTT 监听器的默认认证配置。 为每个监听器配置认证参考监听器器配置中的authentication 配置。 - -该配置可以被配置为: -
    -
  • []: 默认值,允许所有的登录请求 -
  • 配置为单认证器,例如 {enable:true,backend:\"built_in_database\",mechanism=\"password_based\"}
  • -
  • 配置为认证器数组
  • -
- -当配置为认证链后,登录凭证会按照配置的顺序进行检查,直到做出allowdeny的结果。 - -如果在所有的认证器都执行完后,还是没有结果,登录将被拒绝。 -""" - } -} - -listener_authentication { - desc { - en: """ -Per-listener authentication override. -Authentication can be one single authenticator instance or a chain of authenticators as an array. -When authenticating a login (username, client ID, etc.) the authenticators are checked in the configured order.
- -""" - zh: """ -监听器认证重载。 - -认证配置可以是单个认证器实例,也可以是一个认证器数组组成的认证链。 -执行登录验证时(用户名、客户端 ID 等),将按配置的顺序执行。 -""" - } - label: { - en: "Per-listener authentication override" - zh: "每个监听器的认证覆盖" - } -} - -fields_rate_limit_max_conn_rate { - desc { - en: """Maximum connections per second.""" - zh: """每秒最大连接数。""" - } - label: { - en: "Max connection rate" - zh: "每秒最大连接数" - } -} - -fields_rate_limit_conn_messages_in { - desc { - en: """Message limit for the external MQTT connections.""" - zh: """外部 MQTT 连接的消息限制。""" - } - label: { - en: "connecting messages in" - zh: "外部 MQTT 连接的消息限制" - } -} - -fields_rate_limit_conn_bytes_in { - desc { - en: """ -Limit the rate of receiving packets for a MQTT connection. -The rate is counted by bytes of packets per second. -""" - zh: """ -限制 MQTT 连接接收数据包的速率。 -速率以每秒的数据包字节数计算。 -""" - } - label: { - en: "Connection bytes in" - zh: "数据包速率" - } -} - -client_ssl_opts_schema_server_name_indication { - desc { - en: """ -Specify the host name to be used in TLS Server Name Indication extension.
-For instance, when connecting to "server.example.net", the genuine server -which accepts the connection and performs TLS handshake may differ from the -host the TLS client initially connects to, e.g. when connecting to an IP address -or when the host has multiple resolvable DNS records
-If not specified, it will default to the host name string which is used -to establish the connection, unless it is IP addressed used.
-The host name is then also used in the host name verification of the peer -certificate.
The special value 'disable' prevents the Server Name -Indication extension from being sent and disables the hostname -verification check. -""" - zh: """ -指定要在 TLS 服务器名称指示扩展中使用的主机名。
-例如,当连接到 "server.example.net" 时,接受连接并执行 TLS 握手的真正服务器可能与 TLS 客户端最初连接到的主机不同, -例如,当连接到 IP 地址时,或者当主机具有多个可解析的 DNS 记录时
-如果未指定,它将默认为使用的主机名字符串 -建立连接,除非使用 IP 地址
-然后,主机名也用于对等机的主机名验证证书
-特殊值 disable 阻止发送服务器名称指示扩展,并禁用主机名验证检查。 -""" - } - label: { - en: "Server Name Indication" - zh: "服务器名称指示" - } -} - -fields_tcp_opts_active_n { - desc { - en: """ -Specify the {active, N} option for this Socket.
-See: https://erlang.org/doc/man/inet.html#setopts-2 -""" - zh: """ -为此套接字指定{active,N}选项
-See: https://erlang.org/doc/man/inet.html#setopts-2 -""" - } - label: { - en: "active_n" - zh: "active_n" - } -} - -fields_tcp_opts_backlog { - desc { - en: """ -TCP backlog defines the maximum length that the queue of - pending connections can grow to. -""" - zh: """ -TCP backlog 定义了挂起连接队列可以增长到的最大长度。 -""" - } - label: { - en: "TCP backlog length" - zh: "TCP 连接队列长度" - } -} - -fields_tcp_opts_send_timeout { - desc { - en: """The TCP send timeout for the connections. """ - zh: """连接的 TCP 发送超时。""" - } - label: { - en: "TCP send timeout" - zh: "TCP 发送超时" - } -} - -fields_tcp_opts_send_timeout_close { - desc { - en: """ -Close the connection if send timeout. -""" - zh: """ -如果发送超时,则关闭连接。 -""" - } - label: { - en: "TCP send timeout close" - zh: "TCP 发送超时关闭连接" - } -} - -fields_tcp_opts_recbuf { - desc { - en: """ -The TCP receive buffer (OS kernel) for the connections. -""" - zh: """ -连接的 TCP 接收缓冲区(OS 内核)。 -""" - } - label: { - en: "TCP receive buffer" - zh: "TCP 接收缓冲区" - } -} - -fields_tcp_opts_sndbuf { - desc { - en: """ -The TCP send buffer (OS kernel) for the connections. -""" - zh: """ -连接的 TCP 发送缓冲区(OS 内核)。 -""" - } - label: { - en: "TCP send buffer" - zh: "TCP 发送缓冲区" - } -} - -fields_tcp_opts_buffer { - desc { - en: """ -The size of the user-space buffer used by the driver. -""" - zh: """ -驱动程序使用的用户空间缓冲区的大小。 -""" - } - label: { - en: "TCP user-space buffer" - zh: "TCP 用户态缓冲区" - } -} - -fields_tcp_opts_high_watermark { - desc { - en: """ -The socket is set to a busy state when the amount of data queued internally - by the VM socket implementation reaches this limit. -""" - zh: """ -当 VM 套接字实现内部排队的数据量达到此限制时,套接字将设置为忙碌状态。 -""" - } - label: { - en: "TCP 高水位线" - zh: "" - } -} - -fields_tcp_opts_nodelay { - desc { - en: """ -The TCP_NODELAY flag for the connections. -""" - zh: """ -连接的 TCP_NODELAY 标识 -""" - } - label: { - en: "TCP_NODELAY" - zh: "TCP_NODELAY" - } -} - -fields_tcp_opts_reuseaddr { - desc { - en: """ -The SO_REUSEADDR flag for the connections. -""" - zh: """ -连接的 SO_REUSEADDR 标识 -""" - } - label: { - en: "SO_REUSEADDR" - zh: "SO_REUSEADDR" - } -} - -fields_trace_payload_encode { - desc { - en: """ -Determine the format of the payload format in the trace file.
-`text`: Text-based protocol or plain text protocol. - It is recommended when payload is JSON encoded.
-`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
-`hidden`: payload is obfuscated as `******` - -""" - zh: """ -确定跟踪文件中有效负载格式的格式。
-`text`:基于文本的协议或纯文本协议。 -建议在有效负载为JSON编码时使用
-`hex`:二进制十六进制编码。当有效负载是自定义二进制协议时,建议使用此选项
-`hidden`:有效负载被模糊化为 `******` -""" - } - label: { - en: "Payload encode" - zh: "有效负载编码" - } -} - -fields_ws_opts_mqtt_path { - desc { - en: """ -WebSocket's MQTT protocol path. So the address of EMQX Broker's WebSocket is: -ws://{ip}:{port}/mqtt -""" - zh: """ -WebSocket 的 MQTT 协议路径。因此,EMQX Broker的WebSocket地址为: -ws://{ip}:{port}/mqtt -""" - } - label: { - en: "WS MQTT Path" - zh: "WS MQTT 路径" - } -} - -fields_ws_opts_mqtt_piggyback { - desc { - en: """ -Whether a WebSocket message is allowed to contain multiple MQTT packets. -""" - zh: """ -WebSocket消息是否允许包含多个 MQTT 数据包。 -""" - } - label: { - en: "MQTT Piggyback" - zh: "MQTT Piggyback" - } -} - -fields_ws_opts_compress { - desc { - en: """ -If true, compress WebSocket messages using zlib.
-The configuration items under deflate_opts belong to the compression-related parameter configuration. -""" - zh: """ -如果 true,则使用zlib 压缩 WebSocket 消息
-deflate_opts 下的配置项属于压缩相关参数配置。 -""" - } - label: { - en: "Ws compress" - zh: "Ws 压缩" - } -} - -fields_ws_opts_idle_timeout { - desc { - en: """ -Close transport-layer connections from the clients that have not sent MQTT CONNECT -message within this interval. -""" - zh: """ -关闭在此间隔内未发送 MQTT CONNECT 消息的客户端的传输层连接。 -""" - } - label: { - en: "WS idle timeout" - zh: "WS 空闲时间" - } -} - -fields_ws_opts_max_frame_size { - desc { - en: """ -The maximum length of a single MQTT packet. -""" - zh: """ -单个 MQTT 数据包的最大长度。 -""" - } - label: { - en: "Max frame size" - zh: "最大数据包长度" - } -} - -fields_ws_opts_fail_if_no_subprotocol { - desc { - en: """ -If true, the server will return an error when - the client does not carry the Sec-WebSocket-Protocol field. -
Note: WeChat applet needs to disable this verification. -""" - zh: """ -如果true,当客户端未携带Sec WebSocket Protocol字段时,服务器将返回一个错误。 -
注意:微信小程序需要禁用此验证。 -""" - } - label: { - en: "Fail if no subprotocol" - zh: "无 subprotocol 则失败" - } -} - -fields_ws_opts_supported_subprotocols { - desc { - en: """ -Comma-separated list of supported subprotocols. -""" - zh: """ -逗号分隔的 subprotocols 支持列表。 -""" - } - label: { - en: "Supported subprotocols" - zh: "Subprotocols 支持列表" - } -} - -fields_ws_opts_check_origin_enable { - desc { - en: """ -If true, origin HTTP header will be - validated against the list of allowed origins configured in check_origins - parameter. -""" - zh: """ -如果trueoriginHTTP 头将根据check_origins参数中配置的允许来源列表进行验证。 -""" - } - label: { - en: "Check origin" - zh: "检查 origin" - } -} - -fields_ws_opts_allow_origin_absence { - desc { - en: """ -If false and check_origin_enable is - true, the server will reject requests that don't have origin - HTTP header. -""" - zh: """ -If false and check_origin_enable is true, the server will reject requests that don't have origin HTTP header. -""" - } - label: { - en: "Allow origin absence" - zh: "允许 origin 缺失" - } -} - -fields_ws_opts_check_origins { - desc { - en: """ -List of allowed origins.
See check_origin_enable. -""" - zh: """ -允许的 origins 列表 -""" - } - label: { - en: "Allowed origins" - zh: "允许的 origins" - } -} - -fields_ws_opts_proxy_address_header { - desc { - en: """ -HTTP header used to pass information about the client IP address. -Relevant when the EMQX cluster is deployed behind a load-balancer. -""" - zh: """ -HTTP 头,用于传递有关客户端 IP 地址的信息。 -当 EMQX 集群部署在负载平衡器后面时,这一点非常重要。 -""" - } - label: { - en: "Proxy address header" - zh: "客户端地址头" - } -} - -fields_ws_opts_proxy_port_header { - desc { - en: """ -HTTP header used to pass information about the client port. -Relevant when the EMQX cluster is deployed behind a load-balancer. -""" - zh: """ -HTTP 头,用于传递有关客户端端口的信息。 -当 EMQX 集群部署在负载平衡器后面时,这一点非常重要。 -""" - } - label: { - en: "Proxy port header" - zh: "客户端端口头" - } -} - -} diff --git a/apps/emqx/include/asserts.hrl b/apps/emqx/include/asserts.hrl new file mode 100644 index 000000000..98d8e72fc --- /dev/null +++ b/apps/emqx/include/asserts.hrl @@ -0,0 +1,31 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This file contains common macros for testing. +%% It must not be used anywhere except in test suites. + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(assertWaitEvent(Code, EventMatch, Timeout), + ?assertMatch( + {_, {ok, EventMatch}}, + ?wait_async_action( + Code, + EventMatch, + Timeout + ) + ) +). diff --git a/apps/emqx/include/emqx_channel.hrl b/apps/emqx/include/emqx_channel.hrl new file mode 100644 index 000000000..d4362633a --- /dev/null +++ b/apps/emqx/include/emqx_channel.hrl @@ -0,0 +1,42 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-define(CHANNEL_METRICS, [ + recv_pkt, + recv_msg, + 'recv_msg.qos0', + 'recv_msg.qos1', + 'recv_msg.qos2', + 'recv_msg.dropped', + 'recv_msg.dropped.await_pubrel_timeout', + send_pkt, + send_msg, + 'send_msg.qos0', + 'send_msg.qos1', + 'send_msg.qos2', + 'send_msg.dropped', + 'send_msg.dropped.expired', + 'send_msg.dropped.queue_full', + 'send_msg.dropped.too_large' +]). + +-define(INFO_KEYS, [ + conninfo, + conn_state, + clientinfo, + session, + will_msg +]). diff --git a/apps/emqx/include/emqx_hooks.hrl b/apps/emqx/include/emqx_hooks.hrl index 1665492c5..2373b5928 100644 --- a/apps/emqx/include/emqx_hooks.hrl +++ b/apps/emqx/include/emqx_hooks.hrl @@ -34,6 +34,7 @@ -define(HP_BRIDGE, 870). -define(HP_DELAY_PUB, 860). %% apps that can stop the hooks chain from continuing +-define(HP_NODE_REBALANCE, 110). -define(HP_EXHOOK, 100). %% == Lowest Priority = 0, don't change this value as the plugins may depend on it. diff --git a/apps/emqx/include/emqx_placeholder.hrl b/apps/emqx/include/emqx_placeholder.hrl index c4d235caf..d5da3fb18 100644 --- a/apps/emqx/include/emqx_placeholder.hrl +++ b/apps/emqx/include/emqx_placeholder.hrl @@ -17,6 +17,8 @@ -ifndef(EMQX_PLACEHOLDER_HRL). -define(EMQX_PLACEHOLDER_HRL, true). +-define(PH_VAR_THIS, <<"$_THIS_">>). + -define(PH(Type), <<"${", Type/binary, "}">>). %% action: publish/subscribe/all diff --git a/apps/emqx/include/emqx_quic.hrl b/apps/emqx/include/emqx_quic.hrl new file mode 100644 index 000000000..a16784d5d --- /dev/null +++ b/apps/emqx/include/emqx_quic.hrl @@ -0,0 +1,25 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_QUIC_HRL). +-define(EMQX_QUIC_HRL, true). + +%% MQTT Over QUIC Shutdown Error code. +-define(MQTT_QUIC_CONN_NOERROR, 0). +-define(MQTT_QUIC_CONN_ERROR_CTRL_STREAM_DOWN, 1). +-define(MQTT_QUIC_CONN_ERROR_OVERLOADED, 2). + +-endif. diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index 98b17eae7..2bb5877f1 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,10 +32,10 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.13"). +-define(EMQX_RELEASE_CE, "5.0.25-rc.1"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.0-beta.6"). +-define(EMQX_RELEASE_EE, "5.0.4-alpha.1"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx/include/emqx_schema.hrl b/apps/emqx/include/emqx_schema.hrl new file mode 100644 index 000000000..307bb20c5 --- /dev/null +++ b/apps/emqx/include/emqx_schema.hrl @@ -0,0 +1,23 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-ifndef(EMQX_SCHEMA_HRL). +-define(EMQX_SCHEMA_HRL, true). + +-define(TOMBSTONE_TYPE, marked_for_deletion). +-define(TOMBSTONE_VALUE, <<"marked_for_deletion">>). +-define(TOMBSTONE_CONFIG_CHANGE_REQ, mark_it_for_deletion). + +-endif. diff --git a/apps/emqx/src/emqx_trace/emqx_trace.hrl b/apps/emqx/include/emqx_trace.hrl similarity index 93% rename from apps/emqx/src/emqx_trace/emqx_trace.hrl rename to apps/emqx/include/emqx_trace.hrl index 096e786dd..62028bcc0 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace.hrl +++ b/apps/emqx/include/emqx_trace.hrl @@ -24,6 +24,8 @@ filter :: emqx_types:topic() | emqx_types:clientid() | emqx_trace:ip_address() | undefined | '_', enable = true :: boolean() | '_', + payload_encode = text :: hex | text | hidden | '_', + extra = #{} :: map() | '_', start_at :: integer() | undefined | '_', end_at :: integer() | undefined | '_' }). diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index 858ce96ce..ba1438374 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -15,10 +15,8 @@ %%-------------------------------------------------------------------- %% HTTP API Auth --define(WRONG_USERNAME_OR_PWD, 'WRONG_USERNAME_OR_PWD'). --define(WRONG_USERNAME_OR_PWD_OR_API_KEY_OR_API_SECRET, - 'WRONG_USERNAME_OR_PWD_OR_API_KEY_OR_API_SECRET' -). +-define(BAD_USERNAME_OR_PWD, 'BAD_USERNAME_OR_PWD'). +-define(BAD_API_KEY_OR_SECRET, 'BAD_API_KEY_OR_SECRET'). %% Bad Request -define(BAD_REQUEST, 'BAD_REQUEST'). @@ -57,18 +55,18 @@ %% All codes -define(ERROR_CODES, [ - {'WRONG_USERNAME_OR_PWD', <<"Wrong username or pwd">>}, - {'WRONG_USERNAME_OR_PWD_OR_API_KEY_OR_API_SECRET', <<"Wrong username & pwd or key & secret">>}, - {'BAD_REQUEST', <<"Request parameters are not legal">>}, + {?BAD_USERNAME_OR_PWD, <<"Bad username or password">>}, + {?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>}, + {'BAD_REQUEST', <<"Request parameters are invalid">>}, {'NOT_MATCH', <<"Conditions are not matched">>}, {'ALREADY_EXISTS', <<"Resource already existed">>}, - {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, + {'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>}, {'BAD_LISTENER_ID', <<"Bad listener ID">>}, {'BAD_NODE_NAME', <<"Bad Node Name">>}, {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}, {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}, {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}, - {'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}, + {'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>}, {'CONFLICT', <<"Conflicting request resources">>}, {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}, {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}, diff --git a/apps/emqx/include/logger.hrl b/apps/emqx/include/logger.hrl index e93aa46f4..27ffc6cc0 100644 --- a/apps/emqx/include/logger.hrl +++ b/apps/emqx/include/logger.hrl @@ -48,9 +48,9 @@ -define(TRACE(Level, Tag, Msg, Meta), begin case persistent_term:get(?TRACE_FILTER, []) of [] -> ok; - %% We can't bind filter list to a variablebecause we pollute the calling scope with it. + %% We can't bind filter list to a variable because we pollute the calling scope with it. %% We also don't want to wrap the macro body in a fun - %% beacause this adds overhead to the happy path. + %% because this adds overhead to the happy path. %% So evaluate `persistent_term:get` twice. _ -> emqx_trace:log(persistent_term:get(?TRACE_FILTER, []), Msg, (Meta)#{trace_tag => Tag}) end, diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index 1a1bac140..dceb38c47 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -1,14 +1,19 @@ %% This file is automatically generated by `make static_checks`, do not edit. {emqx,1}. +{emqx,2}. {emqx_authn,1}. {emqx_authz,1}. {emqx_bridge,1}. +{emqx_bridge,2}. +{emqx_bridge,3}. +{emqx_bridge,4}. {emqx_broker,1}. {emqx_cm,1}. {emqx_conf,1}. {emqx_conf,2}. {emqx_dashboard,1}. {emqx_delayed,1}. +{emqx_eviction_agent,1}. {emqx_exhook,1}. {emqx_gateway_api_listeners,1}. {emqx_gateway_cm,1}. @@ -22,8 +27,13 @@ {emqx_mgmt_cluster,1}. {emqx_mgmt_trace,1}. {emqx_mgmt_trace,2}. +{emqx_node_rebalance,1}. +{emqx_node_rebalance_api,1}. +{emqx_node_rebalance_evacuation,1}. +{emqx_node_rebalance_status,1}. {emqx_persistent_session,1}. {emqx_plugin_libs,1}. +{emqx_plugins,1}. {emqx_prometheus,1}. {emqx_resource,1}. {emqx_retainer,1}. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 8ff4fea58..97a0c0f31 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -18,24 +18,25 @@ ]}. %% Deps here may duplicate with emqx.git root level rebar.config -%% but there not be any descrpancy. +%% but there may not be any discrepancy. %% This rebar.config is necessary because the app may be used as a %% `git_subdir` dependency in other projects. {deps, [ + {emqx_utils, {path, "../emqx_utils"}}, {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}}, - {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, - {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, + {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, - {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.7"}}}, + {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.33.0"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.6"}}}, + {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, - {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} + {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}} ]}. -{plugins, [{rebar3_proper, "0.12.1"}]}. +{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}. {extra_src_dirs, [{"etc", [recursive]}]}. {profiles, [ {test, [ @@ -43,7 +44,7 @@ {meck, "0.9.2"}, {proper, "1.4.0"}, {bbmustache, "1.10.0"}, - {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.7.0-rc.1"}}} + {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.8.5"}}} ]}, {extra_src_dirs, [{"test", [recursive]}]} ]} @@ -58,4 +59,12 @@ {statistics, true} ]}. -{project_plugins, [erlfmt]}. +{project_plugins, [ + {erlfmt, [ + {files, [ + "{src,include,test}/*.{hrl,erl,app.src}", + "rebar.config", + "rebar.config.script" + ]} + ]} +]}. diff --git a/apps/emqx/rebar.config.script b/apps/emqx/rebar.config.script index 75f748017..7aadb1f59 100644 --- a/apps/emqx/rebar.config.script +++ b/apps/emqx/rebar.config.script @@ -24,7 +24,20 @@ IsQuicSupp = fun() -> end, Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}}, -Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.16"}}}. +Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.114"}}}. + +Dialyzer = fun(Config) -> + {dialyzer, OldDialyzerConfig} = lists:keyfind(dialyzer, 1, Config), + {plt_extra_apps, OldExtra} = lists:keyfind(plt_extra_apps, 1, OldDialyzerConfig), + Extra = OldExtra ++ [quicer || IsQuicSupp()], + NewDialyzerConfig = [{plt_extra_apps, Extra} | OldDialyzerConfig], + lists:keystore( + dialyzer, + 1, + Config, + {dialyzer, NewDialyzerConfig} + ) +end. ExtraDeps = fun(C) -> {deps, Deps0} = lists:keyfind(deps, 1, C), @@ -43,4 +56,4 @@ ExtraDeps = fun(C) -> ) end, -ExtraDeps(CONFIG). +Dialyzer(ExtraDeps(CONFIG)). diff --git a/apps/emqx/rebar3 b/apps/emqx/rebar3 deleted file mode 100755 index edb85b3c9..000000000 Binary files a/apps/emqx/rebar3 and /dev/null differ diff --git a/apps/emqx/src/config/emqx_config_logger.erl b/apps/emqx/src/config/emqx_config_logger.erl index babf9c431..a7bf54aee 100644 --- a/apps/emqx/src/config/emqx_config_logger.erl +++ b/apps/emqx/src/config/emqx_config_logger.erl @@ -18,7 +18,8 @@ -behaviour(emqx_config_handler). %% API --export([add_handler/0, remove_handler/0]). +-export([tr_handlers/1, tr_level/1]). +-export([add_handler/0, remove_handler/0, refresh_config/0]). -export([post_config_update/5]). -define(LOG, [log]). @@ -31,29 +32,222 @@ remove_handler() -> ok = emqx_config_handler:remove_handler(?LOG), ok. -post_config_update(?LOG, _Req, _NewConf, _OldConf, AppEnvs) -> - Kernel = proplists:get_value(kernel, AppEnvs), - NewHandlers = proplists:get_value(logger, Kernel, []), - Level = proplists:get_value(logger_level, Kernel, warning), - ok = update_log_handlers(NewHandlers), - ok = emqx_logger:set_primary_log_level(Level), - application:set_env(kernel, logger_level, Level), - ok; -post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) -> +%% refresh logger config when booting, the cluster config may have changed after node start. +%% Kernel's app env is confirmed before the node starts, +%% but we only copy cluster.conf from other node after this node starts, +%% so we need to refresh the logger config after this node starts. +%% It will not affect the logger config when cluster.conf is unchanged. +refresh_config() -> + %% read the checked config + LogConfig = emqx:get_config(?LOG, undefined), + do_refresh_config(#{log => LogConfig}). + +%% this call is shared between initial config refresh at boot +%% and dynamic config update from HTTP API +do_refresh_config(Conf) -> + Handlers = tr_handlers(Conf), + ok = update_log_handlers(Handlers), + Level = tr_level(Conf), + ok = maybe_update_log_level(Level), ok. +%% always refresh config when the override config is changed +post_config_update(?LOG, _Req, NewConf, _OldConf, _AppEnvs) -> + do_refresh_config(#{log => NewConf}). + +maybe_update_log_level(NewLevel) -> + OldLevel = emqx_logger:get_primary_log_level(), + case OldLevel =:= NewLevel of + true -> + %% no change + ok; + false -> + ok = emqx_logger:set_primary_log_level(NewLevel), + %% also update kernel's logger_level for troubleshooting + %% what is actually in effect is the logger's primary log level + ok = application:set_env(kernel, logger_level, NewLevel), + log_to_console("Config override: log level is set to '~p'~n", [NewLevel]) + end. + +log_to_console(Fmt, Args) -> + io:format(standard_error, Fmt, Args). + update_log_handlers(NewHandlers) -> OldHandlers = application:get_env(kernel, logger, []), - lists:foreach( - fun({handler, HandlerId, _Mod, _Conf}) -> - logger:remove_handler(HandlerId) + NewHandlersIds = lists:map(fun({handler, Id, _Mod, _Conf}) -> Id end, NewHandlers), + OldHandlersIds = lists:map(fun({handler, Id, _Mod, _Conf}) -> Id end, OldHandlers), + Removes = lists:map(fun(Id) -> {removed, Id} end, OldHandlersIds -- NewHandlersIds), + MapFn = fun({handler, Id, Mod, Conf} = Handler) -> + case lists:keyfind(Id, 2, OldHandlers) of + {handler, Id, Mod, Conf} -> + %% no change + false; + {handler, Id, _Mod, _Conf} -> + {true, {updated, Handler}}; + false -> + {true, {enabled, Handler}} + end + end, + AddsAndUpdates = lists:filtermap(MapFn, NewHandlers), + lists:foreach(fun update_log_handler/1, Removes ++ AddsAndUpdates), + ok = application:set_env(kernel, logger, NewHandlers), + ok. + +update_log_handler({removed, Id}) -> + log_to_console("Config override: ~s is removed~n", [id_for_log(Id)]), + logger:remove_handler(Id); +update_log_handler({Action, {handler, Id, Mod, Conf}}) -> + log_to_console("Config override: ~s is ~p~n", [id_for_log(Id), Action]), + % may return {error, {not_found, Id}} + _ = logger:remove_handler(Id), + case logger:add_handler(Id, Mod, Conf) of + ok -> + ok; + %% Don't crash here, otherwise the cluster rpc will retry the wrong handler forever. + {error, Reason} -> + log_to_console( + "Config override: ~s is ~p, but failed to add handler: ~p~n", + [id_for_log(Id), Action, Reason] + ) + end, + ok. + +id_for_log(console) -> "log.console"; +id_for_log(Other) -> "log.file." ++ atom_to_list(Other). + +atom(Id) when is_binary(Id) -> binary_to_atom(Id, utf8); +atom(Id) when is_atom(Id) -> Id. + +%% @doc Translate raw config to app-env compatible log handler configs list. +tr_handlers(Conf) -> + %% mute the default handler + tr_console_handler(Conf) ++ + tr_file_handlers(Conf). + +%% For the default logger that outputs to console +tr_console_handler(Conf) -> + case conf_get("log.console.enable", Conf) of + true -> + ConsoleConf = conf_get("log.console", Conf), + [ + {handler, console, logger_std_h, #{ + level => conf_get("log.console.level", Conf), + config => (log_handler_conf(ConsoleConf))#{type => standard_io}, + formatter => log_formatter(ConsoleConf), + filters => log_filter(ConsoleConf) + }} + ]; + false -> + [] + end. + +%% For the file logger +tr_file_handlers(Conf) -> + Handlers = logger_file_handlers(Conf), + lists:map(fun tr_file_handler/1, Handlers). + +tr_file_handler({HandlerName, SubConf}) -> + {handler, atom(HandlerName), logger_disk_log_h, #{ + level => conf_get("level", SubConf), + config => (log_handler_conf(SubConf))#{ + type => wrap, + file => conf_get("to", SubConf), + max_no_files => conf_get("rotation_count", SubConf), + max_no_bytes => conf_get("rotation_size", SubConf) + }, + formatter => log_formatter(SubConf), + filters => log_filter(SubConf), + filesync_repeat_interval => no_repeat + }}. + +logger_file_handlers(Conf) -> + lists:filter( + fun({_Name, Handler}) -> + conf_get("enable", Handler, false) end, - OldHandlers -- NewHandlers - ), - lists:foreach( - fun({handler, HandlerId, Mod, Conf}) -> - logger:add_handler(HandlerId, Mod, Conf) + maps:to_list(conf_get("log.file", Conf, #{})) + ). + +conf_get(Key, Conf) -> emqx_schema:conf_get(Key, Conf). +conf_get(Key, Conf, Default) -> emqx_schema:conf_get(Key, Conf, Default). + +log_handler_conf(Conf) -> + SycModeQlen = conf_get("sync_mode_qlen", Conf), + DropModeQlen = conf_get("drop_mode_qlen", Conf), + FlushQlen = conf_get("flush_qlen", Conf), + Overkill = conf_get("overload_kill", Conf), + BurstLimit = conf_get("burst_limit", Conf), + #{ + sync_mode_qlen => SycModeQlen, + drop_mode_qlen => DropModeQlen, + flush_qlen => FlushQlen, + overload_kill_enable => conf_get("enable", Overkill), + overload_kill_qlen => conf_get("qlen", Overkill), + overload_kill_mem_size => conf_get("mem_size", Overkill), + overload_kill_restart_after => conf_get("restart_after", Overkill), + burst_limit_enable => conf_get("enable", BurstLimit), + burst_limit_max_count => conf_get("max_count", BurstLimit), + burst_limit_window_time => conf_get("window_time", BurstLimit) + }. + +log_formatter(Conf) -> + CharsLimit = + case conf_get("chars_limit", Conf) of + unlimited -> unlimited; + V when V > 0 -> V end, - NewHandlers -- OldHandlers - ), - application:set_env(kernel, logger, NewHandlers). + TimeOffSet = + case conf_get("time_offset", Conf) of + "system" -> ""; + "utc" -> 0; + OffSetStr -> OffSetStr + end, + SingleLine = conf_get("single_line", Conf), + Depth = conf_get("max_depth", Conf), + do_formatter(conf_get("formatter", Conf), CharsLimit, SingleLine, TimeOffSet, Depth). + +%% helpers +do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth) -> + {emqx_logger_jsonfmt, #{ + chars_limit => CharsLimit, + single_line => SingleLine, + time_offset => TimeOffSet, + depth => Depth + }}; +do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth) -> + {emqx_logger_textfmt, #{ + template => [time, " [", level, "] ", msg, "\n"], + chars_limit => CharsLimit, + single_line => SingleLine, + time_offset => TimeOffSet, + depth => Depth + }}. + +log_filter(Conf) -> + case conf_get("supervisor_reports", Conf) of + error -> [{drop_progress_reports, {fun logger_filters:progress/2, stop}}]; + progress -> [] + end. + +tr_level(Conf) -> + ConsoleLevel = conf_get("log.console.level", Conf, undefined), + FileLevels = [conf_get("level", SubConf) || {_, SubConf} <- logger_file_handlers(Conf)], + case FileLevels ++ [ConsoleLevel || ConsoleLevel =/= undefined] of + %% warning is the default level we should use + [] -> warning; + Levels -> least_severe_log_level(Levels) + end. + +least_severe_log_level(Levels) -> + hd(sort_log_levels(Levels)). + +sort_log_levels(Levels) -> + lists:sort( + fun(A, B) -> + case logger:compare_levels(A, B) of + R when R == lt; R == eq -> true; + gt -> false + end + end, + Levels + ). diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index bd7617e74..5ca8fc797 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.14"}, + {vsn, "5.0.25"}, {modules, []}, {registered, []}, {applications, [ @@ -16,7 +16,6 @@ cowboy, sasl, os_mon, - jiffy, lc, hocon ]}, diff --git a/apps/emqx/src/emqx.appup.src b/apps/emqx/src/emqx.appup.src index d3121c97b..04bf1f428 100644 --- a/apps/emqx/src/emqx.appup.src +++ b/apps/emqx/src/emqx.appup.src @@ -1,33 +1,5 @@ %% -*- mode: erlang -*- %% Unless you know what you are doing, DO NOT edit manually!! {VSN, - [{"5.0.0", - [{load_module,emqx_quic_connection,brutal_purge,soft_purge,[]}, - {load_module,emqx_config,brutal_purge,soft_purge,[]}, - {load_module,emqx_channel,brutal_purge,soft_purge,[]}, - {load_module,emqx_schema,brutal_purge,soft_purge,[]}, - {load_module,emqx_release,brutal_purge,soft_purge,[]}, - {load_module,emqx_authentication,brutal_purge,soft_purge,[]}, - {load_module,emqx_metrics,brutal_purge,soft_purge,[]}, - {add_module,emqx_exclusive_subscription}, - {apply,{emqx_exclusive_subscription,on_add_module,[]}}, - {load_module,emqx_broker,brutal_purge,soft_purge,[]}, - {load_module,emqx_mqtt_caps,brutal_purge,soft_purge,[]}, - {load_module,emqx_topic,brutal_purge,soft_purge,[]}, - {load_module,emqx_relup}]}, - {<<".*">>,[]}], - [{"5.0.0", - [{load_module,emqx_quic_connection,brutal_purge,soft_purge,[]}, - {load_module,emqx_config,brutal_purge,soft_purge,[]}, - {load_module,emqx_channel,brutal_purge,soft_purge,[]}, - {load_module,emqx_schema,brutal_purge,soft_purge,[]}, - {load_module,emqx_release,brutal_purge,soft_purge,[]}, - {load_module,emqx_authentication,brutal_purge,soft_purge,[]}, - {load_module,emqx_metrics,brutal_purge,soft_purge,[]}, - {load_module,emqx_broker,brutal_purge,soft_purge,[]}, - {load_module,emqx_mqtt_caps,brutal_purge,soft_purge,[]}, - {load_module,emqx_topic,brutal_purge,soft_purge,[]}, - {apply,{emqx_exclusive_subscription,on_delete_module,[]}}, - {delete_module,emqx_exclusive_subscription}, - {load_module,emqx_relup}]}, - {<<".*">>,[]}]}. + [{<<".*">>,[]}], + [{<<".*">>,[]}]}. diff --git a/apps/emqx/src/emqx.erl b/apps/emqx/src/emqx.erl index 6e4aa9922..ffee5fba7 100644 --- a/apps/emqx/src/emqx.erl +++ b/apps/emqx/src/emqx.erl @@ -30,6 +30,12 @@ stop/0 ]). +%% Cluster API +-export([ + cluster_nodes/1, + running_nodes/0 +]). + %% PubSub API -export([ subscribe/1, @@ -102,6 +108,18 @@ is_running() -> _ -> true end. +%%-------------------------------------------------------------------- +%% Cluster API +%%-------------------------------------------------------------------- + +-spec running_nodes() -> [node()]. +running_nodes() -> + mria:running_nodes(). + +-spec cluster_nodes(all | running | cores | stopped) -> [node()]. +cluster_nodes(Type) -> + mria:cluster_nodes(Type). + %%-------------------------------------------------------------------- %% PubSub API %%-------------------------------------------------------------------- @@ -164,29 +182,29 @@ run_hook(HookPoint, Args) -> run_fold_hook(HookPoint, Args, Acc) -> emqx_hooks:run_fold(HookPoint, Args, Acc). --spec get_config(emqx_map_lib:config_key_path()) -> term(). +-spec get_config(emqx_utils_maps:config_key_path()) -> term(). get_config(KeyPath) -> emqx_config:get(KeyPath). --spec get_config(emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_config(emqx_utils_maps:config_key_path(), term()) -> term(). get_config(KeyPath, Default) -> emqx_config:get(KeyPath, Default). --spec get_raw_config(emqx_map_lib:config_key_path()) -> term(). +-spec get_raw_config(emqx_utils_maps:config_key_path()) -> term(). get_raw_config(KeyPath) -> emqx_config:get_raw(KeyPath). --spec get_raw_config(emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_raw_config(emqx_utils_maps:config_key_path(), term()) -> term(). get_raw_config(KeyPath, Default) -> emqx_config:get_raw(KeyPath, Default). --spec update_config(emqx_map_lib:config_key_path(), emqx_config:update_request()) -> +-spec update_config(emqx_utils_maps:config_key_path(), emqx_config:update_request()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. update_config(KeyPath, UpdateReq) -> update_config(KeyPath, UpdateReq, #{}). -spec update_config( - emqx_map_lib:config_key_path(), + emqx_utils_maps:config_key_path(), emqx_config:update_request(), emqx_config:update_opts() ) -> @@ -198,12 +216,12 @@ update_config([RootName | _] = KeyPath, UpdateReq, Opts) -> {{update, UpdateReq}, Opts} ). --spec remove_config(emqx_map_lib:config_key_path()) -> +-spec remove_config(emqx_utils_maps:config_key_path()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. remove_config(KeyPath) -> remove_config(KeyPath, #{}). --spec remove_config(emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> +-spec remove_config(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. remove_config([RootName | _] = KeyPath, Opts) -> emqx_config_handler:update_config( @@ -212,7 +230,7 @@ remove_config([RootName | _] = KeyPath, Opts) -> {remove, Opts} ). --spec reset_config(emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> +-spec reset_config(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. reset_config([RootName | _] = KeyPath, Opts) -> case emqx_config:get_default_value(KeyPath) of diff --git a/apps/emqx/src/emqx_alarm.erl b/apps/emqx/src/emqx_alarm.erl index 6122ff596..056f36050 100644 --- a/apps/emqx/src/emqx_alarm.erl +++ b/apps/emqx/src/emqx_alarm.erl @@ -42,7 +42,9 @@ get_alarms/0, get_alarms/1, format/1, - format/2 + format/2, + safe_activate/3, + safe_deactivate/1 ]). %% gen_server callbacks @@ -57,7 +59,6 @@ %% Internal exports (RPC) -export([ - create_activate_alarm/3, do_get_alarms/0 ]). @@ -89,7 +90,7 @@ mnesia(boot) -> ok = mria:create_table( ?ACTIVATED_ALARM, [ - {type, set}, + {type, ordered_set}, {storage, disc_copies}, {local_content, true}, {record_name, activated_alarm}, @@ -123,6 +124,9 @@ activate(Name, Details) -> activate(Name, Details, Message) -> gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}). +safe_activate(Name, Details, Message) -> + safe_call({activate_alarm, Name, Details, Message}). + -spec ensure_deactivated(binary() | atom()) -> ok. ensure_deactivated(Name) -> ensure_deactivated(Name, no_details). @@ -155,6 +159,9 @@ deactivate(Name, Details) -> deactivate(Name, Details, Message) -> gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}). +safe_deactivate(Name) -> + safe_call({deactivate_alarm, Name, no_details, <<"">>}). + -spec delete_all_deactivated_alarms() -> ok. delete_all_deactivated_alarms() -> gen_server:call(?MODULE, delete_all_deactivated_alarms). @@ -218,17 +225,12 @@ init([]) -> {ok, #{}, get_validity_period()}. handle_call({activate_alarm, Name, Details, Message}, _From, State) -> - Res = mria:transaction( - mria:local_content_shard(), - fun ?MODULE:create_activate_alarm/3, - [Name, Details, Message] - ), - case Res of - {atomic, Alarm} -> + case create_activate_alarm(Name, Details, Message) of + {ok, Alarm} -> do_actions(activate, Alarm, emqx:get_config([alarm, actions])), {reply, ok, State, get_validity_period()}; - {aborted, Reason} -> - {reply, Reason, State, get_validity_period()} + Err -> + {reply, Err, State, get_validity_period()} end; handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of @@ -283,9 +285,9 @@ get_validity_period() -> emqx:get_config([alarm, validity_period]). create_activate_alarm(Name, Details, Message) -> - case mnesia:read(?ACTIVATED_ALARM, Name) of + case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of [#activated_alarm{name = Name}] -> - mnesia:abort({error, already_existed}); + {error, already_existed}; [] -> Alarm = #activated_alarm{ name = Name, @@ -293,8 +295,8 @@ create_activate_alarm(Name, Details, Message) -> message = normalize_message(Name, iolist_to_binary(Message)), activate_at = erlang:system_time(microsecond) }, - ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write), - Alarm + ok = mria:dirty_write(?ACTIVATED_ALARM, Alarm), + {ok, Alarm} end. do_get_alarms() -> @@ -325,19 +327,20 @@ deactivate_alarm( false -> ok end, + Now = erlang:system_time(microsecond), HistoryAlarm = make_deactivated_alarm( ActivateAt, Name, Details0, Msg0, - erlang:system_time(microsecond) + Now ), DeActAlarm = make_deactivated_alarm( ActivateAt, Name, Details, normalize_message(Name, iolist_to_binary(Message)), - erlang:system_time(microsecond) + Now ), mria:dirty_write(?DEACTIVATED_ALARM, HistoryAlarm), mria:dirty_delete(?ACTIVATED_ALARM, Name), @@ -422,7 +425,7 @@ do_actions(deactivate, Alarm = #deactivated_alarm{name = Name}, [log | More]) -> do_actions(deactivate, Alarm, More); do_actions(Operation, Alarm, [publish | More]) -> Topic = topic(Operation), - {ok, Payload} = emqx_json:safe_encode(normalize(Alarm)), + {ok, Payload} = emqx_utils_json:safe_encode(normalize(Alarm)), Message = emqx_message:make( ?MODULE, 0, @@ -473,3 +476,19 @@ normalize_message(Name, <<"">>) -> list_to_binary(io_lib:format("~p", [Name])); normalize_message(_Name, Message) -> Message. + +safe_call(Req) -> + try + gen_server:call(?MODULE, Req) + catch + _:{timeout, _} = Reason -> + ?SLOG(warning, #{msg => "emqx_alarm_safe_call_timeout", reason => Reason}), + {error, timeout}; + _:Reason:St -> + ?SLOG(error, #{ + msg => "emqx_alarm_safe_call_exception", + reason => Reason, + stacktrace => St + }), + {error, Reason} + end. diff --git a/apps/emqx/src/emqx_app.erl b/apps/emqx/src/emqx_app.erl index 6188d8030..77ece1c60 100644 --- a/apps/emqx/src/emqx_app.erl +++ b/apps/emqx/src/emqx_app.erl @@ -72,9 +72,13 @@ set_init_config_load_done() -> get_init_config_load_done() -> application:get_env(emqx, init_config_load_done, false). +%% @doc Set the transaction id from which this node should start applying after boot. +%% The transaction ID is received from the core node which we just copied the latest +%% config from. set_init_tnx_id(TnxId) -> application:set_env(emqx, cluster_rpc_init_tnx_id, TnxId). +%% @doc Get the transaction id from which this node should start applying after boot. get_init_tnx_id() -> application:get_env(emqx, cluster_rpc_init_tnx_id, -1). diff --git a/apps/emqx/src/emqx_authentication.erl b/apps/emqx/src/emqx_authentication.erl index 017bc982d..fe93bed68 100644 --- a/apps/emqx/src/emqx_authentication.erl +++ b/apps/emqx/src/emqx_authentication.erl @@ -661,7 +661,7 @@ do_authenticate( _ -> ok end, - {ok, Result} + {stop, Result} catch Class:Reason:Stacktrace -> ?TRACE_AUTHN(warning, "authenticator_error", #{ @@ -759,9 +759,10 @@ maybe_unhook(State) -> State. do_create_authenticator(AuthenticatorID, #{enable := Enable} = Config, Providers) -> - case maps:get(authn_type(Config), Providers, undefined) of + Type = authn_type(Config), + case maps:get(Type, Providers, undefined) of undefined -> - {error, no_available_provider}; + {error, {no_available_provider_for, Type}}; Provider -> case Provider:create(AuthenticatorID, Config) of {ok, State} -> diff --git a/apps/emqx/src/emqx_authentication_config.erl b/apps/emqx/src/emqx_authentication_config.erl index 98471e152..98c0a19f8 100644 --- a/apps/emqx/src/emqx_authentication_config.erl +++ b/apps/emqx/src/emqx_authentication_config.erl @@ -29,9 +29,13 @@ authn_type/1 ]). --ifdef(TEST). --export([convert_certs/2, convert_certs/3, clear_certs/2]). --endif. +%% Used in emqx_gateway +-export([ + certs_dir/2, + convert_certs/2, + convert_certs/3, + clear_certs/2 +]). -export_type([config/0]). @@ -136,7 +140,7 @@ do_pre_config_update({move_authenticator, _ChainName, AuthenticatorID, Position} ) -> ok | {ok, map()} | {error, term()}. post_config_update(_, UpdateReq, NewConfig, OldConfig, AppEnvs) -> - do_post_config_update(UpdateReq, check_configs(to_list(NewConfig)), OldConfig, AppEnvs). + do_post_config_update(UpdateReq, to_list(NewConfig), OldConfig, AppEnvs). do_post_config_update({create_authenticator, ChainName, Config}, NewConfig, _OldConfig, _AppEnvs) -> NConfig = get_authenticator_config(authenticator_id(Config), NewConfig), @@ -175,56 +179,6 @@ do_post_config_update( ) -> emqx_authentication:move_authenticator(ChainName, AuthenticatorID, Position). -check_configs(Configs) -> - Providers = emqx_authentication:get_providers(), - lists:map(fun(C) -> do_check_config(C, Providers) end, Configs). - -do_check_config(Config, Providers) -> - Type = authn_type(Config), - case maps:get(Type, Providers, false) of - false -> - ?SLOG(warning, #{ - msg => "unknown_authn_type", - type => Type, - providers => Providers - }), - throw({unknown_authn_type, Type}); - Module -> - do_check_config(Type, Config, Module) - end. - -do_check_config(Type, Config, Module) -> - F = - case erlang:function_exported(Module, check_config, 1) of - true -> - fun Module:check_config/1; - false -> - fun(C) -> - Key = list_to_binary(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME), - AtomKey = list_to_atom(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME), - R = hocon_tconf:check_plain( - Module, - #{Key => C}, - #{atom_key => true} - ), - maps:get(AtomKey, R) - end - end, - try - F(Config) - catch - C:E:S -> - ?SLOG(warning, #{ - msg => "failed_to_check_config", - config => Config, - type => Type, - exception => C, - reason => E, - stacktrace => S - }), - throw({bad_authenticator_config, #{type => Type, reason => E}}) - end. - to_list(undefined) -> []; to_list(M) when M =:= #{} -> []; to_list(M) when is_map(M) -> [M]; @@ -327,9 +281,9 @@ atom(Bin) -> binary_to_existing_atom(Bin, utf8). certs_dir(ChainName, ConfigOrID) -> DirName = dir(ChainName, ConfigOrID), SubDir = iolist_to_binary(filename:join(["authn", DirName])), - binary:replace(SubDir, <<":">>, <<"-">>, [global]). + emqx_utils:safe_filename(SubDir). dir(ChainName, ID) when is_binary(ID) -> - binary:replace(iolist_to_binary([to_bin(ChainName), "-", ID]), <<":">>, <<"-">>); + emqx_utils:safe_filename(iolist_to_binary([to_bin(ChainName), "-", ID])); dir(ChainName, Config) when is_map(Config) -> dir(ChainName, authenticator_id(Config)). diff --git a/apps/emqx/src/emqx_banned.erl b/apps/emqx/src/emqx_banned.erl index 758c570da..a0ccd93d7 100644 --- a/apps/emqx/src/emqx_banned.erl +++ b/apps/emqx/src/emqx_banned.erl @@ -243,7 +243,7 @@ handle_info(Info, State) -> {noreply, State}. terminate(_Reason, #{expiry_timer := TRef}) -> - emqx_misc:cancel_timer(TRef). + emqx_utils:cancel_timer(TRef). code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -254,10 +254,10 @@ code_change(_OldVsn, State, _Extra) -> -ifdef(TEST). ensure_expiry_timer(State) -> - State#{expiry_timer := emqx_misc:start_timer(10, expire)}. + State#{expiry_timer := emqx_utils:start_timer(10, expire)}. -else. ensure_expiry_timer(State) -> - State#{expiry_timer := emqx_misc:start_timer(timer:minutes(1), expire)}. + State#{expiry_timer := emqx_utils:start_timer(timer:minutes(1), expire)}. -endif. expire_banned_items(Now) -> diff --git a/apps/emqx/src/emqx_batch.erl b/apps/emqx/src/emqx_batch.erl index 2fe09942c..22e812975 100644 --- a/apps/emqx/src/emqx_batch.erl +++ b/apps/emqx/src/emqx_batch.erl @@ -85,7 +85,7 @@ commit(Batch = #batch{batch_q = Q, commit_fun = Commit}) -> reset(Batch). reset(Batch = #batch{linger_timer = TRef}) -> - _ = emqx_misc:cancel_timer(TRef), + _ = emqx_utils:cancel_timer(TRef), Batch#batch{batch_q = [], linger_timer = undefined}. -spec size(batch()) -> non_neg_integer(). diff --git a/apps/emqx/src/emqx_broker.erl b/apps/emqx/src/emqx_broker.erl index 7b8e3dddd..5f7c4aaf5 100644 --- a/apps/emqx/src/emqx_broker.erl +++ b/apps/emqx/src/emqx_broker.erl @@ -71,7 +71,7 @@ code_change/3 ]). --import(emqx_tables, [lookup_value/2, lookup_value/3]). +-import(emqx_utils_ets, [lookup_value/2, lookup_value/3]). -ifdef(TEST). -compile(export_all). @@ -92,7 +92,7 @@ start_link(Pool, Id) -> ok = create_tabs(), gen_server:start_link( - {local, emqx_misc:proc_name(?BROKER, Id)}, + {local, emqx_utils:proc_name(?BROKER, Id)}, ?MODULE, [Pool, Id], [] @@ -106,16 +106,16 @@ start_link(Pool, Id) -> create_tabs() -> TabOpts = [public, {read_concurrency, true}, {write_concurrency, true}], - %% SubOption: {SubPid, Topic} -> SubOption - ok = emqx_tables:new(?SUBOPTION, [set | TabOpts]), + %% SubOption: {Topic, SubPid} -> SubOption + ok = emqx_utils_ets:new(?SUBOPTION, [ordered_set | TabOpts]), %% Subscription: SubPid -> Topic1, Topic2, Topic3, ... %% duplicate_bag: o(1) insert - ok = emqx_tables:new(?SUBSCRIPTION, [duplicate_bag | TabOpts]), + ok = emqx_utils_ets:new(?SUBSCRIPTION, [duplicate_bag | TabOpts]), %% Subscriber: Topic -> SubPid1, SubPid2, SubPid3, ... %% bag: o(n) insert:( - ok = emqx_tables:new(?SUBSCRIBER, [bag | TabOpts]). + ok = emqx_utils_ets:new(?SUBSCRIBER, [bag | TabOpts]). %%------------------------------------------------------------------------------ %% Subscribe API @@ -136,7 +136,7 @@ subscribe(Topic, SubId, SubOpts0) when is_binary(Topic), ?IS_SUBID(SubId), is_ma SubOpts = maps:merge(?DEFAULT_SUBOPTS, SubOpts0), _ = emqx_trace:subscribe(Topic, SubId, SubOpts), SubPid = self(), - case ets:member(?SUBOPTION, {SubPid, Topic}) of + case subscribed(SubPid, Topic) of %% New false -> ok = emqx_broker_helper:register_sub(SubPid, SubId), @@ -164,16 +164,16 @@ do_subscribe(undefined, Topic, SubPid, SubOpts) -> case emqx_broker_helper:get_sub_shard(SubPid, Topic) of 0 -> true = ets:insert(?SUBSCRIBER, {Topic, SubPid}), - true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}), + true = ets:insert(?SUBOPTION, {{Topic, SubPid}, SubOpts}), call(pick(Topic), {subscribe, Topic}); I -> true = ets:insert(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), - true = ets:insert(?SUBOPTION, {{SubPid, Topic}, maps:put(shard, I, SubOpts)}), + true = ets:insert(?SUBOPTION, {{Topic, SubPid}, maps:put(shard, I, SubOpts)}), call(pick({Topic, I}), {subscribe, Topic, I}) end; %% Shared subscription do_subscribe(Group, Topic, SubPid, SubOpts) -> - true = ets:insert(?SUBOPTION, {{SubPid, Topic}, SubOpts}), + true = ets:insert(?SUBOPTION, {{Topic, SubPid}, SubOpts}), emqx_shared_sub:subscribe(Group, Topic, SubPid). %%-------------------------------------------------------------------- @@ -183,7 +183,7 @@ do_subscribe(Group, Topic, SubPid, SubOpts) -> -spec unsubscribe(emqx_types:topic()) -> ok. unsubscribe(Topic) when is_binary(Topic) -> SubPid = self(), - case ets:lookup(?SUBOPTION, {SubPid, Topic}) of + case ets:lookup(?SUBOPTION, {Topic, SubPid}) of [{_, SubOpts}] -> _ = emqx_broker_helper:reclaim_seq(Topic), _ = emqx_trace:unsubscribe(Topic, SubOpts), @@ -193,16 +193,16 @@ unsubscribe(Topic) when is_binary(Topic) -> end. do_unsubscribe(Topic, SubPid, SubOpts) -> - true = ets:delete(?SUBOPTION, {SubPid, Topic}), + true = ets:delete(?SUBOPTION, {Topic, SubPid}), true = ets:delete_object(?SUBSCRIPTION, {SubPid, Topic}), Group = maps:get(share, SubOpts, undefined), - do_unsubscribe(Group, Topic, SubPid, SubOpts), - emqx_exclusive_subscription:unsubscribe(Topic, SubOpts). + do_unsubscribe(Group, Topic, SubPid, SubOpts). do_unsubscribe(undefined, Topic, SubPid, SubOpts) -> case maps:get(shard, SubOpts, 0) of 0 -> true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), + emqx_exclusive_subscription:unsubscribe(Topic, SubOpts), cast(pick(Topic), {unsubscribed, Topic}); I -> true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), @@ -362,18 +362,11 @@ subscribers(Shard = {shard, _Topic, _I}) -> subscriber_down(SubPid) -> lists:foreach( fun(Topic) -> - case lookup_value(?SUBOPTION, {SubPid, Topic}) of + case lookup_value(?SUBOPTION, {Topic, SubPid}) of SubOpts when is_map(SubOpts) -> _ = emqx_broker_helper:reclaim_seq(Topic), - true = ets:delete(?SUBOPTION, {SubPid, Topic}), - case maps:get(shard, SubOpts, 0) of - 0 -> - true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), - ok = cast(pick(Topic), {unsubscribed, Topic}); - I -> - true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), - ok = cast(pick({Topic, I}), {unsubscribed, Topic, I}) - end; + true = ets:delete(?SUBOPTION, {Topic, SubPid}), + do_unsubscribe(undefined, Topic, SubPid, SubOpts); undefined -> ok end @@ -390,7 +383,7 @@ subscriber_down(SubPid) -> [{emqx_types:topic(), emqx_types:subopts()}]. subscriptions(SubPid) when is_pid(SubPid) -> [ - {Topic, lookup_value(?SUBOPTION, {SubPid, Topic}, #{})} + {Topic, lookup_value(?SUBOPTION, {Topic, SubPid}, #{})} || Topic <- lookup_value(?SUBSCRIPTION, SubPid, []) ]; subscriptions(SubId) -> @@ -403,19 +396,19 @@ subscriptions(SubId) -> -spec subscriptions_via_topic(emqx_types:topic()) -> [emqx_types:subopts()]. subscriptions_via_topic(Topic) -> - MatchSpec = [{{{'_', '$1'}, '_'}, [{'=:=', '$1', Topic}], ['$_']}], + MatchSpec = [{{{Topic, '_'}, '_'}, [], ['$_']}], ets:select(?SUBOPTION, MatchSpec). -spec subscribed(pid() | emqx_types:subid(), emqx_types:topic()) -> boolean(). subscribed(SubPid, Topic) when is_pid(SubPid) -> - ets:member(?SUBOPTION, {SubPid, Topic}); + ets:member(?SUBOPTION, {Topic, SubPid}); subscribed(SubId, Topic) when ?IS_SUBID(SubId) -> SubPid = emqx_broker_helper:lookup_subpid(SubId), - ets:member(?SUBOPTION, {SubPid, Topic}). + ets:member(?SUBOPTION, {Topic, SubPid}). -spec get_subopts(pid(), emqx_types:topic()) -> maybe(emqx_types:subopts()). get_subopts(SubPid, Topic) when is_pid(SubPid), is_binary(Topic) -> - lookup_value(?SUBOPTION, {SubPid, Topic}); + lookup_value(?SUBOPTION, {Topic, SubPid}); get_subopts(SubId, Topic) when ?IS_SUBID(SubId) -> case emqx_broker_helper:lookup_subpid(SubId) of SubPid when is_pid(SubPid) -> @@ -430,7 +423,7 @@ set_subopts(Topic, NewOpts) when is_binary(Topic), is_map(NewOpts) -> %% @private set_subopts(SubPid, Topic, NewOpts) -> - Sub = {SubPid, Topic}, + Sub = {Topic, SubPid}, case ets:lookup(?SUBOPTION, Sub) of [{_, OldOpts}] -> ets:insert(?SUBOPTION, {Sub, maps:merge(OldOpts, NewOpts)}); diff --git a/apps/emqx/src/emqx_broker_helper.erl b/apps/emqx/src/emqx_broker_helper.erl index 91b4c4994..06f249678 100644 --- a/apps/emqx/src/emqx_broker_helper.erl +++ b/apps/emqx/src/emqx_broker_helper.erl @@ -73,11 +73,11 @@ register_sub(SubPid, SubId) when is_pid(SubPid) -> -spec lookup_subid(pid()) -> maybe(emqx_types:subid()). lookup_subid(SubPid) when is_pid(SubPid) -> - emqx_tables:lookup_value(?SUBMON, SubPid). + emqx_utils_ets:lookup_value(?SUBMON, SubPid). -spec lookup_subpid(emqx_types:subid()) -> maybe(pid()). lookup_subpid(SubId) -> - emqx_tables:lookup_value(?SUBID, SubId). + emqx_utils_ets:lookup_value(?SUBID, SubId). -spec get_sub_shard(pid(), emqx_types:topic()) -> non_neg_integer(). get_sub_shard(SubPid, Topic) -> @@ -105,15 +105,15 @@ reclaim_seq(Topic) -> init([]) -> %% Helper table - ok = emqx_tables:new(?HELPER, [{read_concurrency, true}]), + ok = emqx_utils_ets:new(?HELPER, [{read_concurrency, true}]), %% Shards: CPU * 32 true = ets:insert(?HELPER, {shards, emqx_vm:schedulers() * 32}), %% SubSeq: Topic -> SeqId ok = emqx_sequence:create(?SUBSEQ), %% SubId: SubId -> SubPid - ok = emqx_tables:new(?SUBID, [public, {read_concurrency, true}, {write_concurrency, true}]), + ok = emqx_utils_ets:new(?SUBID, [public, {read_concurrency, true}, {write_concurrency, true}]), %% SubMon: SubPid -> SubId - ok = emqx_tables:new(?SUBMON, [public, {read_concurrency, true}, {write_concurrency, true}]), + ok = emqx_utils_ets:new(?SUBMON, [public, {read_concurrency, true}, {write_concurrency, true}]), %% Stats timer ok = emqx_stats:update_interval(broker_stats, fun emqx_broker:stats_fun/0), {ok, #{pmon => emqx_pmon:new()}}. @@ -131,7 +131,7 @@ handle_cast(Msg, State) -> {noreply, State}. handle_info({'DOWN', _MRef, process, SubPid, _Reason}, State = #{pmon := PMon}) -> - SubPids = [SubPid | emqx_misc:drain_down(?BATCH_SIZE)], + SubPids = [SubPid | emqx_utils:drain_down(?BATCH_SIZE)], ok = emqx_pool:async_submit( fun lists:foreach/2, [fun clean_down/1, SubPids] ), diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index b6be52c6e..3fb6a5f6b 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -18,6 +18,7 @@ -module(emqx_channel). -include("emqx.hrl"). +-include("emqx_channel.hrl"). -include("emqx_mqtt.hrl"). -include("logger.hrl"). -include("types.hrl"). @@ -57,11 +58,17 @@ clear_keepalive/1 ]). +%% Export for emqx_channel implementations +-export([ + maybe_nack/1, + maybe_mark_as_delivered/2 +]). + %% Exports for CT -export([set_field/3]). -import( - emqx_misc, + emqx_utils, [ run_fold/3, pipeline/3, @@ -69,7 +76,7 @@ ] ). --export_type([channel/0, opts/0]). +-export_type([channel/0, opts/0, conn_state/0]). -record(channel, { %% MQTT ConnInfo @@ -89,7 +96,7 @@ %% Authentication Data Cache auth_cache :: maybe(map()), %% Quota checkers - quota :: maybe(emqx_limiter_container:limiter()), + quota :: emqx_limiter_container:limiter(), %% Timers timers :: #{atom() => disabled | maybe(reference())}, %% Conn State @@ -131,33 +138,6 @@ quota_timer => expire_quota_limit }). --define(CHANNEL_METRICS, [ - recv_pkt, - recv_msg, - 'recv_msg.qos0', - 'recv_msg.qos1', - 'recv_msg.qos2', - 'recv_msg.dropped', - 'recv_msg.dropped.await_pubrel_timeout', - send_pkt, - send_msg, - 'send_msg.qos0', - 'send_msg.qos1', - 'send_msg.qos2', - 'send_msg.dropped', - 'send_msg.dropped.expired', - 'send_msg.dropped.queue_full', - 'send_msg.dropped.too_large' -]). - --define(INFO_KEYS, [ - conninfo, - conn_state, - clientinfo, - session, - will_msg -]). - -define(LIMITER_ROUTING, message_routing). -dialyzer({no_match, [shutdown/4, ensure_timer/2, interval/2]}). @@ -224,6 +204,8 @@ set_session(Session, Channel = #channel{conninfo = ConnInfo, clientinfo = Client Channel#channel{session = Session1}. -spec stats(channel()) -> emqx_types:stats(). +stats(#channel{session = undefined}) -> + emqx_pd:get_counters(?CHANNEL_METRICS); stats(#channel{session = Session}) -> lists:append(emqx_session:stats(Session), emqx_pd:get_counters(?CHANNEL_METRICS)). @@ -274,7 +256,9 @@ init( ), {NClientInfo, NConnInfo} = take_ws_cookie(ClientInfo, ConnInfo), #channel{ - conninfo = NConnInfo, + %% We remove the peercert because it duplicates to what's stored in the socket, + %% Saving a copy here causes unnecessary wast of memory (about 1KB per connection). + conninfo = maps:put(peercert, undefined, NConnInfo), clientinfo = NClientInfo, topic_aliases = #{ inbound => #{}, @@ -618,7 +602,7 @@ process_connect( NChannel = Channel#channel{session = Session}, handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, ensure_connected(NChannel)); {ok, #{session := Session, present := true, pendings := Pendings}} -> - Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())), + Pendings1 = lists:usort(lists:append(Pendings, emqx_utils:drain_deliver())), NChannel = Channel#channel{ session = Session, resuming = true, @@ -756,7 +740,7 @@ do_publish( handle_out(disconnect, RC, Channel) end. -ensure_quota(_, Channel = #channel{quota = undefined}) -> +ensure_quota(_, Channel = #channel{quota = infinity}) -> Channel; ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> Cnt = lists:foldl( @@ -1074,10 +1058,12 @@ handle_out(unsuback, {PacketId, _ReasonCodes}, Channel) -> handle_out(disconnect, ReasonCode, Channel) when is_integer(ReasonCode) -> ReasonName = disconnect_reason(ReasonCode), handle_out(disconnect, {ReasonCode, ReasonName}, Channel); -handle_out(disconnect, {ReasonCode, ReasonName}, Channel = ?IS_MQTT_V5) -> - Packet = ?DISCONNECT_PACKET(ReasonCode), +handle_out(disconnect, {ReasonCode, ReasonName}, Channel) -> + handle_out(disconnect, {ReasonCode, ReasonName, #{}}, Channel); +handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel = ?IS_MQTT_V5) -> + Packet = ?DISCONNECT_PACKET(ReasonCode, Props), {ok, [{outgoing, Packet}, {close, ReasonName}], Channel}; -handle_out(disconnect, {_ReasonCode, ReasonName}, Channel) -> +handle_out(disconnect, {_ReasonCode, ReasonName, _Props}, Channel) -> {ok, {close, ReasonName}, Channel}; handle_out(auth, {ReasonCode, Properties}, Channel) -> {ok, ?AUTH_PACKET(ReasonCode, Properties), Channel}; @@ -1194,13 +1180,19 @@ handle_call( {takeover, 'end'}, Channel = #channel{ session = Session, - pendings = Pendings + pendings = Pendings, + conninfo = #{clientid := ClientId} } ) -> ok = emqx_session:takeover(Session), %% TODO: Should not drain deliver here (side effect) - Delivers = emqx_misc:drain_deliver(), + Delivers = emqx_utils:drain_deliver(), AllPendings = lists:append(Delivers, Pendings), + ?tp( + debug, + emqx_channel_takeover_end, + #{clientid => ClientId} + ), disconnect_and_shutdown(takenover, AllPendings, Channel); handle_call(list_authz_cache, Channel) -> {reply, emqx_authz_cache:list_authz_cache(), Channel}; @@ -1272,6 +1264,8 @@ handle_info(die_if_test = Info, Channel) -> die_if_test_compiled(), ?SLOG(error, #{msg => "unexpected_info", info => Info}), {ok, Channel}; +handle_info({disconnect, ReasonCode, ReasonName, Props}, Channel) -> + handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel); handle_info(Info, Channel) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {ok, Channel}. @@ -1398,7 +1392,7 @@ ensure_timer(Name, Channel = #channel{timers = Timers}) -> ensure_timer(Name, Time, Channel = #channel{timers = Timers}) -> Msg = maps:get(Name, ?TIMER_TABLE), - TRef = emqx_misc:start_timer(Time, Msg), + TRef = emqx_utils:start_timer(Time, Msg), Channel#channel{timers = Timers#{Name => TRef}}. reset_timer(Name, Channel) -> @@ -1427,7 +1421,6 @@ interval(will_timer, #channel{will_msg = WillMsg}) -> -spec terminate(any(), channel()) -> ok. terminate(_, #channel{conn_state = idle} = _Channel) -> - ?tp(channel_terminated, #{channel => _Channel}), ok; terminate(normal, Channel) -> run_terminate_hook(normal, Channel); @@ -1460,10 +1453,8 @@ persist_if_session(#channel{session = Session} = Channel) -> end. run_terminate_hook(_Reason, #channel{session = undefined} = _Channel) -> - ?tp(channel_terminated, #{channel => _Channel}), ok; run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session} = _Channel) -> - ?tp(channel_terminated, #{channel => _Channel}), emqx_session:terminate(ClientInfo, Reason, Session). %%-------------------------------------------------------------------- @@ -1629,7 +1620,7 @@ check_banned(_ConnPkt, #channel{clientinfo = ClientInfo}) -> %% Flapping count_flapping_event(_ConnPkt, Channel = #channel{clientinfo = ClientInfo = #{zone := Zone}}) -> - emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) andalso + is_integer(emqx_config:get_zone_conf(Zone, [flapping_detect, window_time])) andalso emqx_flapping:detect(ClientInfo), {ok, Channel}. @@ -2044,7 +2035,7 @@ clear_keepalive(Channel = #channel{timers = Timers}) -> undefined -> Channel; TRef -> - emqx_misc:cancel_timer(TRef), + emqx_utils:cancel_timer(TRef), Channel#channel{timers = maps:without([alive_timer], Timers)} end. %%-------------------------------------------------------------------- @@ -2129,17 +2120,24 @@ publish_will_msg( ClientInfo = #{mountpoint := MountPoint}, Msg = #message{topic = Topic} ) -> - case emqx_access_control:authorize(ClientInfo, publish, Topic) of - allow -> - NMsg = emqx_mountpoint:mount(MountPoint, Msg), - _ = emqx_broker:publish(NMsg), - ok; - deny -> + PublishingDisallowed = emqx_access_control:authorize(ClientInfo, publish, Topic) =/= allow, + ClientBanned = emqx_banned:check(ClientInfo), + case PublishingDisallowed orelse ClientBanned of + true -> ?tp( warning, last_will_testament_publish_denied, - #{topic => Topic} + #{ + topic => Topic, + client_banned => ClientBanned, + publishing_disallowed => PublishingDisallowed + } ), + ok; + false -> + NMsg = emqx_mountpoint:mount(MountPoint, Msg), + NMsg2 = NMsg#message{timestamp = erlang:system_time(millisecond)}, + _ = emqx_broker:publish(NMsg2), ok end. @@ -2234,7 +2232,7 @@ get_mqtt_conf(Zone, Key, Default) -> %%-------------------------------------------------------------------- set_field(Name, Value, Channel) -> - Pos = emqx_misc:index_of(Name, record_info(fields, channel)), + Pos = emqx_utils:index_of(Name, record_info(fields, channel)), setelement(Pos + 1, Channel, Value). get_mqueue(#channel{session = Session}) -> diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index 66e9a2aee..66c1db36e 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -19,9 +19,12 @@ -behaviour(gen_server). +-include("emqx.hrl"). -include("logger.hrl"). -include("types.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("stdlib/include/qlc.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). -export([start_link/0]). @@ -67,7 +70,14 @@ %% Test/debug interface -export([ all_channels/0, - all_client_ids/0 + all_client_ids/0, + get_session_confs/2 +]). + +%% Client management +-export([ + channel_with_session_table/1, + live_connection_table/1 ]). %% gen_server callbacks @@ -152,7 +162,7 @@ start_link() -> insert_channel_info(ClientId, Info, Stats) -> Chan = {ClientId, self()}, true = ets:insert(?CHAN_INFO_TAB, {Chan, Info, Stats}), - ?tp(debug, insert_channel_info, #{client_id => ClientId}), + ?tp(debug, insert_channel_info, #{clientid => ClientId}), ok. %% @private @@ -296,9 +306,9 @@ open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> register_channel(ClientId, Self, ConnInfo), {ok, #{ - session => Session1, + session => clean_session(Session1), present => true, - pendings => Pendings + pendings => clean_pendings(Pendings) }}; {living, ConnMod, ChanPid, Session} -> ok = emqx_session:resume(ClientInfo, Session), @@ -315,9 +325,9 @@ open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> ), register_channel(ClientId, Self, ConnInfo), {ok, #{ - session => Session1, + session => clean_session(Session1), present => true, - pendings => Pendings + pendings => clean_pendings(Pendings) }}; {error, _} -> CreateSess() @@ -355,6 +365,7 @@ get_session_confs(#{zone := Zone, clientid := ClientId}, #{ max_inflight => MaxInflight, retry_interval => get_mqtt_conf(Zone, retry_interval), await_rel_timeout => get_mqtt_conf(Zone, await_rel_timeout), + max_awaiting_rel => get_mqtt_conf(Zone, max_awaiting_rel), mqueue => mqueue_confs(Zone), %% TODO: Add conf for allowing/disallowing persistent sessions. %% Note that the connection info is already enriched to have @@ -462,23 +473,23 @@ request_stepdown(Action, ConnMod, Pid) -> catch % emqx_ws_connection: call _:noproc -> - ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_gone", #{stale_pid => Pid, action => Action}), {error, noproc}; % emqx_connection: gen_server:call _:{noproc, _} -> - ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_gone", #{stale_pid => Pid, action => Action}), {error, noproc}; _:{shutdown, _} -> - ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_shutdown", #{stale_pid => Pid, action => Action}), {error, noproc}; _:{{shutdown, _}, _} -> - ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_shutdown", #{stale_pid => Pid, action => Action}), {error, noproc}; _:{timeout, {gen_server, call, _}} -> ?tp( warning, "session_stepdown_request_timeout", - #{pid => Pid, action => Action, stale_channel => stale_channel_info(Pid)} + #{stale_pid => Pid, action => Action, stale_channel => stale_channel_info(Pid)} ), ok = force_kill(Pid), {error, timeout}; @@ -487,7 +498,7 @@ request_stepdown(Action, ConnMod, Pid) -> error, "session_stepdown_request_exception", #{ - pid => Pid, + stale_pid => Pid, action => Action, reason => Error, stacktrace => St, @@ -590,6 +601,40 @@ all_channels() -> Pat = [{{'_', '$1'}, [], ['$1']}], ets:select(?CHAN_TAB, Pat). +%% @doc Get clientinfo for all clients with sessions +channel_with_session_table(ConnModuleList) -> + Ms = ets:fun2ms( + fun({{ClientId, _ChanPid}, Info, _Stats}) -> + {ClientId, Info} + end + ), + Table = ets:table(?CHAN_INFO_TAB, [{traverse, {select, Ms}}]), + ConnModules = sets:from_list(ConnModuleList, [{version, 2}]), + qlc:q([ + {ClientId, ConnState, ConnInfo, ClientInfo} + || {ClientId, #{ + conn_state := ConnState, + clientinfo := ClientInfo, + conninfo := #{clean_start := false, conn_mod := ConnModule} = ConnInfo + }} <- + Table, + sets:is_element(ConnModule, ConnModules) + ]). + +%% @doc Get all local connection query handle +live_connection_table(ConnModules) -> + Ms = lists:map(fun live_connection_ms/1, ConnModules), + Table = ets:table(?CHAN_CONN_TAB, [{traverse, {select, Ms}}]), + qlc:q([{ClientId, ChanPid} || {ClientId, ChanPid} <- Table, is_channel_connected(ChanPid)]). + +live_connection_ms(ConnModule) -> + {{{'$1', '$2'}, ConnModule}, [], [{{'$1', '$2'}}]}. + +is_channel_connected(ChanPid) when node(ChanPid) =:= node() -> + ets:member(?CHAN_LIVE_TAB, ChanPid); +is_channel_connected(_ChanPid) -> + false. + %% @doc Get all registered clientIDs. Debug/test interface all_client_ids() -> Pat = [{{'$1', '_'}, [], ['$1']}], @@ -648,10 +693,10 @@ cast(Msg) -> gen_server:cast(?CM, Msg). init([]) -> TabOpts = [public, {write_concurrency, true}], - ok = emqx_tables:new(?CHAN_TAB, [bag, {read_concurrency, true} | TabOpts]), - ok = emqx_tables:new(?CHAN_CONN_TAB, [bag | TabOpts]), - ok = emqx_tables:new(?CHAN_INFO_TAB, [ordered_set, compressed | TabOpts]), - ok = emqx_tables:new(?CHAN_LIVE_TAB, [ordered_set, {write_concurrency, true} | TabOpts]), + ok = emqx_utils_ets:new(?CHAN_TAB, [bag, {read_concurrency, true} | TabOpts]), + ok = emqx_utils_ets:new(?CHAN_CONN_TAB, [bag | TabOpts]), + ok = emqx_utils_ets:new(?CHAN_INFO_TAB, [ordered_set, compressed | TabOpts]), + ok = emqx_utils_ets:new(?CHAN_LIVE_TAB, [ordered_set, {write_concurrency, true} | TabOpts]), ok = emqx_stats:update_interval(chan_stats, fun ?MODULE:stats_fun/0), State = #{chan_pmon => emqx_pmon:new()}, {ok, State}. @@ -668,8 +713,8 @@ handle_cast(Msg, State) -> {noreply, State}. handle_info({'DOWN', _MRef, process, Pid, _Reason}, State = #{chan_pmon := PMon}) -> - ?tp(emqx_cm_process_down, #{pid => Pid, reason => _Reason}), - ChanPids = [Pid | emqx_misc:drain_down(?BATCH_SIZE)], + ?tp(emqx_cm_process_down, #{stale_pid => Pid, reason => _Reason}), + ChanPids = [Pid | emqx_utils:drain_down(?BATCH_SIZE)], {Items, PMon1} = emqx_pmon:erase_all(ChanPids, PMon), lists:foreach(fun mark_channel_disconnected/1, ChanPids), ok = emqx_pool:async_submit(fun lists:foreach/2, [fun ?MODULE:clean_down/1, Items]), @@ -690,7 +735,8 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- clean_down({ChanPid, ClientId}) -> - do_unregister_channel({ClientId, ChanPid}). + do_unregister_channel({ClientId, ChanPid}), + ok = ?tp(debug, emqx_cm_clean_down, #{client_id => ClientId}). stats_fun() -> lists:foreach(fun update_stats/1, ?CHAN_STATS). @@ -716,12 +762,12 @@ get_chann_conn_mod(ClientId, ChanPid) -> wrap_rpc(emqx_cm_proto_v1:get_chann_conn_mod(ClientId, ChanPid)). mark_channel_connected(ChanPid) -> - ?tp(emqx_cm_connected_client_count_inc, #{}), + ?tp(emqx_cm_connected_client_count_inc, #{chan_pid => ChanPid}), ets:insert_new(?CHAN_LIVE_TAB, {ChanPid, true}), ok. mark_channel_disconnected(ChanPid) -> - ?tp(emqx_cm_connected_client_count_dec, #{}), + ?tp(emqx_cm_connected_client_count_dec, #{chan_pid => ChanPid}), ets:delete(?CHAN_LIVE_TAB, ChanPid), ok. @@ -730,3 +776,14 @@ get_connected_client_count() -> undefined -> 0; Size -> Size end. + +clean_session(Session) -> + emqx_session:filter_queue(fun is_banned_msg/1, Session). + +clean_pendings(Pendings) -> + lists:filter(fun is_banned_msg/1, Pendings). + +is_banned_msg(#message{from = ClientId}) -> + [] =:= emqx_banned:look_up({clientid, ClientId}); +is_banned_msg({deliver, _Topic, Msg}) -> + is_banned_msg(Msg). diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index 49962e490..630952166 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -18,12 +18,13 @@ -compile({no_auto_import, [get/0, get/1, put/2, erase/1]}). -elvis([{elvis_style, god_modules, disable}]). -include("logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -export([ init_load/1, init_load/2, - init_load/3, read_override_conf/1, + has_deprecated_file/0, delete_override_conf_files/0, check_config/2, fill_defaults/1, @@ -32,8 +33,9 @@ save_configs/5, save_to_app_env/1, save_to_config_map/2, - save_to_override_conf/2 + save_to_override_conf/3 ]). +-export([merge_envs/2]). -export([ get_root/1, @@ -86,6 +88,10 @@ remove_handlers/0 ]). +-ifdef(TEST). +-export([erase_all/0]). +-endif. + -include("logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -96,6 +102,8 @@ -define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]). -define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). + -export_type([ update_request/0, raw_config/0, @@ -137,50 +145,48 @@ -type app_envs() :: [proplists:property()]. %% @doc For the given path, get root value enclosed in a single-key map. --spec get_root(emqx_map_lib:config_key_path()) -> map(). +-spec get_root(emqx_utils_maps:config_key_path()) -> map(). get_root([RootName | _]) -> #{RootName => do_get(?CONF, [RootName], #{})}. %% @doc For the given path, get raw root value enclosed in a single-key map. %% key is ensured to be binary. get_root_raw([RootName | _]) -> - #{bin(RootName) => do_get_raw([RootName], #{})}. + #{bin(RootName) => get_raw([RootName], #{})}. %% @doc Get a config value for the given path. %% The path should at least include root config name. --spec get(emqx_map_lib:config_key_path()) -> term(). +-spec get(emqx_utils_maps:config_key_path()) -> term(). get(KeyPath) -> do_get(?CONF, KeyPath). --spec get(emqx_map_lib:config_key_path(), term()) -> term(). +-spec get(emqx_utils_maps:config_key_path(), term()) -> term(). get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default). --spec find(emqx_map_lib:config_key_path()) -> - {ok, term()} | {not_found, emqx_map_lib:config_key_path(), term()}. +-spec find(emqx_utils_maps:config_key_path()) -> + {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find([]) -> - Ref = make_ref(), - case do_get(?CONF, [], Ref) of - Ref -> {not_found, []}; + case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find(KeyPath) -> atom_conf_path( KeyPath, - fun(AtomKeyPath) -> emqx_map_lib:deep_find(AtomKeyPath, get_root(KeyPath)) end, + fun(AtomKeyPath) -> emqx_utils_maps:deep_find(AtomKeyPath, get_root(KeyPath)) end, {return, {not_found, KeyPath}} ). --spec find_raw(emqx_map_lib:config_key_path()) -> - {ok, term()} | {not_found, emqx_map_lib:config_key_path(), term()}. +-spec find_raw(emqx_utils_maps:config_key_path()) -> + {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find_raw([]) -> - Ref = make_ref(), - case do_get_raw([], Ref) of - Ref -> {not_found, []}; + case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find_raw(KeyPath) -> - emqx_map_lib:deep_find([bin(Key) || Key <- KeyPath], get_root_raw(KeyPath)). + emqx_utils_maps:deep_find([bin(Key) || Key <- KeyPath], get_root_raw(KeyPath)). --spec get_zone_conf(atom(), emqx_map_lib:config_key_path()) -> term(). +-spec get_zone_conf(atom(), emqx_utils_maps:config_key_path()) -> term(). get_zone_conf(Zone, KeyPath) -> case find(?ZONE_CONF_PATH(Zone, KeyPath)) of %% not found in zones, try to find the global config @@ -190,7 +196,7 @@ get_zone_conf(Zone, KeyPath) -> Value end. --spec get_zone_conf(atom(), emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_zone_conf(atom(), emqx_utils_maps:config_key_path(), term()) -> term(). get_zone_conf(Zone, KeyPath, Default) -> case find(?ZONE_CONF_PATH(Zone, KeyPath)) of %% not found in zones, try to find the global config @@ -200,24 +206,24 @@ get_zone_conf(Zone, KeyPath, Default) -> Value end. --spec put_zone_conf(atom(), emqx_map_lib:config_key_path(), term()) -> ok. +-spec put_zone_conf(atom(), emqx_utils_maps:config_key_path(), term()) -> ok. put_zone_conf(Zone, KeyPath, Conf) -> ?MODULE:put(?ZONE_CONF_PATH(Zone, KeyPath), Conf). --spec get_listener_conf(atom(), atom(), emqx_map_lib:config_key_path()) -> term(). +-spec get_listener_conf(atom(), atom(), emqx_utils_maps:config_key_path()) -> term(). get_listener_conf(Type, Listener, KeyPath) -> ?MODULE:get(?LISTENER_CONF_PATH(Type, Listener, KeyPath)). --spec get_listener_conf(atom(), atom(), emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_listener_conf(atom(), atom(), emqx_utils_maps:config_key_path(), term()) -> term(). get_listener_conf(Type, Listener, KeyPath, Default) -> ?MODULE:get(?LISTENER_CONF_PATH(Type, Listener, KeyPath), Default). --spec put_listener_conf(atom(), atom(), emqx_map_lib:config_key_path(), term()) -> ok. +-spec put_listener_conf(atom(), atom(), emqx_utils_maps:config_key_path(), term()) -> ok. put_listener_conf(Type, Listener, KeyPath, Conf) -> ?MODULE:put(?LISTENER_CONF_PATH(Type, Listener, KeyPath), Conf). --spec find_listener_conf(atom(), atom(), emqx_map_lib:config_key_path()) -> - {ok, term()} | {not_found, emqx_map_lib:config_key_path(), term()}. +-spec find_listener_conf(atom(), atom(), emqx_utils_maps:config_key_path()) -> + {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find_listener_conf(Type, Listener, KeyPath) -> find(?LISTENER_CONF_PATH(Type, Listener, KeyPath)). @@ -225,31 +231,31 @@ find_listener_conf(Type, Listener, KeyPath) -> put(Config) -> maps:fold( fun(RootName, RootValue, _) -> - ?MODULE:put([RootName], RootValue) + ?MODULE:put([atom(RootName)], RootValue) end, ok, Config ). erase(RootName) -> - persistent_term:erase(?PERSIS_KEY(?CONF, bin(RootName))), + persistent_term:erase(?PERSIS_KEY(?CONF, atom(RootName))), persistent_term:erase(?PERSIS_KEY(?RAW_CONF, bin(RootName))), ok. --spec put(emqx_map_lib:config_key_path(), term()) -> ok. +-spec put(emqx_utils_maps:config_key_path(), term()) -> ok. put(KeyPath, Config) -> Putter = fun(Path, Map, Value) -> - emqx_map_lib:deep_put(Path, Map, Value) + emqx_utils_maps:deep_put(Path, Map, Value) end, do_put(?CONF, Putter, KeyPath, Config). %% Puts value into configuration even if path doesn't exist %% For paths of non-existing atoms use force_put(KeyPath, Config, unsafe) --spec force_put(emqx_map_lib:config_key_path(), term()) -> ok. +-spec force_put(emqx_utils_maps:config_key_path(), term()) -> ok. force_put(KeyPath, Config) -> force_put(KeyPath, Config, safe). --spec force_put(emqx_map_lib:config_key_path(), term(), safe | unsafe) -> ok. +-spec force_put(emqx_utils_maps:config_key_path(), term(), safe | unsafe) -> ok. force_put(KeyPath0, Config, Safety) -> KeyPath = case Safety of @@ -257,19 +263,19 @@ force_put(KeyPath0, Config, Safety) -> unsafe -> [unsafe_atom(Key) || Key <- KeyPath0] end, Putter = fun(Path, Map, Value) -> - emqx_map_lib:deep_force_put(Path, Map, Value) + emqx_utils_maps:deep_force_put(Path, Map, Value) end, do_put(?CONF, Putter, KeyPath, Config). --spec get_default_value(emqx_map_lib:config_key_path()) -> {ok, term()} | {error, term()}. +-spec get_default_value(emqx_utils_maps:config_key_path()) -> {ok, term()} | {error, term()}. get_default_value([RootName | _] = KeyPath) -> BinKeyPath = [bin(Key) || Key <- KeyPath], case find_raw([RootName]) of {ok, RawConf} -> - RawConf1 = emqx_map_lib:deep_remove(BinKeyPath, #{bin(RootName) => RawConf}), + RawConf1 = emqx_utils_maps:deep_remove(BinKeyPath, #{bin(RootName) => RawConf}), try fill_defaults(get_schema_mod(RootName), RawConf1, #{}) of FullConf -> - case emqx_map_lib:deep_find(BinKeyPath, FullConf) of + case emqx_utils_maps:deep_find(BinKeyPath, FullConf) of {not_found, _, _} -> {error, no_default_value}; {ok, Val} -> {ok, Val} end @@ -280,10 +286,12 @@ get_default_value([RootName | _] = KeyPath) -> {error, {rootname_not_found, RootName}} end. --spec get_raw(emqx_map_lib:config_key_path()) -> term(). +-spec get_raw(emqx_utils_maps:config_key_path()) -> term(). +get_raw([Root | T]) when is_atom(Root) -> get_raw([bin(Root) | T]); get_raw(KeyPath) -> do_get_raw(KeyPath). --spec get_raw(emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_raw(emqx_utils_maps:config_key_path(), term()) -> term(). +get_raw([Root | T], Default) when is_atom(Root) -> get_raw([bin(Root) | T], Default); get_raw(KeyPath, Default) -> do_get_raw(KeyPath, Default). -spec put_raw(map()) -> ok. @@ -296,10 +304,10 @@ put_raw(Config) -> hocon_maps:ensure_plain(Config) ). --spec put_raw(emqx_map_lib:config_key_path(), term()) -> ok. +-spec put_raw(emqx_utils_maps:config_key_path(), term()) -> ok. put_raw(KeyPath, Config) -> Putter = fun(Path, Map, Value) -> - emqx_map_lib:deep_force_put(Path, Map, Value) + emqx_utils_maps:deep_force_put(Path, Map, Value) end, do_put(?RAW_CONF, Putter, KeyPath, Config). @@ -308,90 +316,116 @@ put_raw(KeyPath, Config) -> %%============================================================================ init_load(SchemaMod) -> ConfFiles = application:get_env(emqx, config_files, []), - init_load(SchemaMod, ConfFiles, #{raw_with_default => true}). - -init_load(SchemaMod, Opts) when is_map(Opts) -> - ConfFiles = application:get_env(emqx, config_files, []), - init_load(SchemaMod, ConfFiles, Opts); -init_load(SchemaMod, ConfFiles) -> - init_load(SchemaMod, ConfFiles, #{raw_with_default => false}). + init_load(SchemaMod, ConfFiles). %% @doc Initial load of the given config files. %% NOTE: The order of the files is significant, configs from files ordered %% in the rear of the list overrides prior values. -spec init_load(module(), [string()] | binary() | hocon:config()) -> ok. -init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) -> - init_load(SchemaMod, parse_hocon(Conf), Opts); -init_load(SchemaMod, RawConf, Opts) when is_map(RawConf) -> +init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) -> ok = save_schema_mod_and_names(SchemaMod), - %% Merge environment variable overrides on top + HasDeprecatedFile = has_deprecated_file(), + RawConf0 = load_config_files(HasDeprecatedFile, Conf), + warning_deprecated_root_key(RawConf0), + RawConf1 = + case HasDeprecatedFile of + true -> + overlay_v0(SchemaMod, RawConf0); + false -> + overlay_v1(SchemaMod, RawConf0) + end, + RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1), + %% check configs against the schema + {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}), + save_to_app_env(AppEnvs), + ok = save_to_config_map(CheckedConf, RawConf). + +%% Merge environment variable overrides on top, then merge with overrides. +overlay_v0(SchemaMod, RawConf) when is_map(RawConf) -> RawConfWithEnvs = merge_envs(SchemaMod, RawConf), + Overrides = read_override_confs(), + hocon:deep_merge(RawConfWithEnvs, Overrides). + +%% Merge environment variable overrides on top. +overlay_v1(SchemaMod, RawConf) when is_map(RawConf) -> + merge_envs(SchemaMod, RawConf). + +%% @doc Read merged cluster + local overrides. +read_override_confs() -> ClusterOverrides = read_override_conf(#{override_to => cluster}), LocalOverrides = read_override_conf(#{override_to => local}), - Overrides = hocon:deep_merge(ClusterOverrides, LocalOverrides), - RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides), - RootNames = get_root_names(), - RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts), - %% check configs against the schema - {_AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}), - ok = save_to_config_map(CheckedConf, RawConfAll). + hocon:deep_merge(ClusterOverrides, LocalOverrides). %% keep the raw and non-raw conf has the same keys to make update raw conf easier. -raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) -> - Fun = fun(Name, Acc) -> - case maps:is_key(Name, RawConf) of - true -> - Acc; - false -> - case lists:keyfind(Name, 1, hocon_schema:roots(SchemaMod)) of - false -> - Acc; - {_, {_, Schema}} -> - Acc#{Name => schema_default(Schema)} - end - end - end, - RawDefault = lists:foldl(Fun, #{}, RootNames), - maps:merge(RawConf, fill_defaults(SchemaMod, RawDefault, #{})); -raw_conf_with_default(_SchemaMod, _RootNames, RawConf, _Opts) -> - RawConf. +fill_defaults_for_all_roots(SchemaMod, RawConf0) -> + RootSchemas = hocon_schema:roots(SchemaMod), + %% the roots which are missing from the loaded configs + MissingRoots = lists:filtermap( + fun({BinName, Sc}) -> + case maps:is_key(BinName, RawConf0) orelse is_already_loaded(BinName) of + true -> false; + false -> {true, Sc} + end + end, + RootSchemas + ), + RawConf = lists:foldl( + fun({RootName, Schema}, Acc) -> + Acc#{bin(RootName) => seed_default(Schema)} + end, + RawConf0, + MissingRoots + ), + fill_defaults(RawConf). -schema_default(Schema) -> - case hocon_schema:field_schema(Schema, type) of - ?ARRAY(_) -> - []; - ?LAZY(?ARRAY(_)) -> - []; - ?LAZY(?UNION(Unions)) -> - case [A || ?ARRAY(A) <- Unions] of - [_ | _] -> []; - _ -> #{} - end; - _ -> - #{} +%% So far, this can only return true when testing. +%% e.g. when testing an app, we need to load its config first +%% then start emqx_conf application which will load the +%% possibly empty config again (then filled with defaults). +is_already_loaded(Name) -> + ?MODULE:get_raw([Name], #{}) =/= #{}. + +%% if a root is not found in the raw conf, fill it with default values. +seed_default(Schema) -> + case hocon_schema:field_schema(Schema, default) of + undefined -> + %% so far all roots without a default value are objects + #{}; + Value -> + Value end. -parse_hocon(Conf) -> +load_config_files(HasDeprecatedFile, Conf) -> IncDirs = include_dirs(), - case do_parse_hocon(Conf, IncDirs) of + case do_parse_hocon(HasDeprecatedFile, Conf, IncDirs) of {ok, HoconMap} -> HoconMap; {error, Reason} -> ?SLOG(error, #{ - msg => "failed_to_load_hocon_conf", + msg => "failed_to_load_config_file", reason => Reason, pwd => file:get_cwd(), include_dirs => IncDirs, config_file => Conf }), - error(failed_to_load_hocon_conf) + error(failed_to_load_config_file) end. -do_parse_hocon(Conf, IncDirs) -> +do_parse_hocon(true, Conf, IncDirs) -> Opts = #{format => map, include_dirs => IncDirs}, case is_binary(Conf) of true -> hocon:binary(Conf, Opts); false -> hocon:files(Conf, Opts) + end; +do_parse_hocon(false, Conf, IncDirs) -> + Opts = #{format => map, include_dirs => IncDirs}, + case is_binary(Conf) of + %% only use in test + true -> + hocon:binary(Conf, Opts); + false -> + ClusterFile = cluster_hocon_file(), + hocon:files([ClusterFile | Conf], Opts) end. include_dirs() -> @@ -401,9 +435,7 @@ merge_envs(SchemaMod, RawConf) -> Opts = #{ required => false, format => map, - apply_override_envs => true, - remove_env_meta => true, - check_lazy => true + apply_override_envs => true }, hocon_tconf:merge_env_overrides(SchemaMod, RawConf, all, Opts). @@ -416,38 +448,20 @@ check_config(SchemaMod, RawConf, Opts0) -> try do_check_config(SchemaMod, RawConf, Opts0) catch - throw:{Schema, Errors} -> - compact_errors(Schema, Errors) + throw:Errors:Stacktrace -> + {error, Reason} = emqx_hocon:compact_errors(Errors, Stacktrace), + erlang:raise(throw, Reason, Stacktrace) end. -%% HOCON tries to be very informative about all the detailed errors -%% it's maybe too much when reporting to the user --spec compact_errors(any(), any()) -> no_return(). -compact_errors(Schema, [Error0 | More]) when is_map(Error0) -> - Error1 = Error0#{discarded_errors_count => length(More)}, - Error = - case is_atom(Schema) of - true -> - Error1#{schema_module => Schema}; - false -> - Error1 - end, - throw(Error); -compact_errors(Schema, Errors) -> - %% unexpected, we need the stacktrace reported, hence error - error({Schema, Errors}). - do_check_config(SchemaMod, RawConf, Opts0) -> Opts1 = #{ return_plain => true, - format => map, - %% Don't check lazy types, such as authenticate - check_lazy => false + format => map }, Opts = maps:merge(Opts0, Opts1), {AppEnvs, CheckedConf} = hocon_tconf:map_translate(SchemaMod, RawConf, Opts), - {AppEnvs, emqx_map_lib:unsafe_atom_key_map(CheckedConf)}. + {AppEnvs, emqx_utils_maps:unsafe_atom_key_map(CheckedConf)}. fill_defaults(RawConf) -> fill_defaults(RawConf, #{}). @@ -483,10 +497,12 @@ fill_defaults(SchemaMod, RawConf, Opts0) -> %% Delete override config files. -spec delete_override_conf_files() -> ok. delete_override_conf_files() -> - F1 = override_conf_file(#{override_to => local}), - F2 = override_conf_file(#{override_to => cluster}), + F1 = deprecated_conf_file(#{override_to => local}), + F2 = deprecated_conf_file(#{override_to => cluster}), + F3 = cluster_hocon_file(), ok = ensure_file_deleted(F1), - ok = ensure_file_deleted(F2). + ok = ensure_file_deleted(F2), + ok = ensure_file_deleted(F3). ensure_file_deleted(F) -> case file:delete(F) of @@ -497,19 +513,33 @@ ensure_file_deleted(F) -> -spec read_override_conf(map()) -> raw_config(). read_override_conf(#{} = Opts) -> - File = override_conf_file(Opts), + File = + case has_deprecated_file() of + true -> deprecated_conf_file(Opts); + false -> cluster_hocon_file() + end, load_hocon_file(File, map). -override_conf_file(Opts) when is_map(Opts) -> +%% @doc Return `true' if this node is upgraded from older version which used cluster-override.conf for +%% cluster-wide config persistence. +has_deprecated_file() -> + DeprecatedFile = deprecated_conf_file(#{override_to => cluster}), + filelib:is_regular(DeprecatedFile). + +deprecated_conf_file(Opts) when is_map(Opts) -> Key = case maps:get(override_to, Opts, cluster) of local -> local_override_conf_file; cluster -> cluster_override_conf_file end, application:get_env(emqx, Key, undefined); -override_conf_file(Which) when is_atom(Which) -> +deprecated_conf_file(Which) when is_atom(Which) -> application:get_env(emqx, Which, undefined). +%% The newer version cluster-wide config persistence file. +cluster_hocon_file() -> + application:get_env(emqx, cluster_hocon_file, undefined). + -spec save_schema_mod_and_names(module()) -> ok. save_schema_mod_and_names(SchemaMod) -> RootNames = hocon_schema:root_names(SchemaMod), @@ -522,6 +552,13 @@ save_schema_mod_and_names(SchemaMod) -> names => lists:usort(OldNames ++ RootNames) }). +-ifdef(TEST). +erase_all() -> + Names = get_root_names(), + lists:foreach(fun erase/1, Names), + persistent_term:erase(?PERSIS_SCHEMA_MODS). +-endif. + -spec get_schema_mod() -> #{binary() => atom()}. get_schema_mod() -> maps:get(mods, persistent_term:get(?PERSIS_SCHEMA_MODS, #{mods => #{}})). @@ -534,35 +571,38 @@ get_schema_mod(RootName) -> get_root_names() -> maps:get(names, persistent_term:get(?PERSIS_SCHEMA_MODS, #{names => []})). --spec save_configs(app_envs(), config(), raw_config(), raw_config(), update_opts()) -> ok. -save_configs(_AppEnvs, Conf, RawConf, OverrideConf, Opts) -> - %% We first try to save to override.conf, because saving to files is more error prone +-spec save_configs( + app_envs(), config(), raw_config(), raw_config(), update_opts() +) -> ok. + +save_configs(AppEnvs, Conf, RawConf, OverrideConf, Opts) -> + %% We first try to save to files, because saving to files is more error prone %% than saving into memory. - ok = save_to_override_conf(OverrideConf, Opts), - %% We may need also support hot config update for the apps that use application envs. - %% If that is the case uncomment the following line to update the configs to app env - %save_to_app_env(_AppEnvs), + HasDeprecatedFile = has_deprecated_file(), + ok = save_to_override_conf(HasDeprecatedFile, OverrideConf, Opts), + save_to_app_env(AppEnvs), save_to_config_map(Conf, RawConf). +%% we ignore kernel app env, +%% because the old app env will be used in emqx_config_logger:post_config_update/5 +-define(IGNORE_APPS, [kernel]). + -spec save_to_app_env([tuple()]) -> ok. -save_to_app_env(AppEnvs) -> - lists:foreach( - fun({AppName, Envs}) -> - [application:set_env(AppName, Par, Val) || {Par, Val} <- Envs] - end, - AppEnvs - ). +save_to_app_env(AppEnvs0) -> + AppEnvs = lists:filter(fun({App, _}) -> not lists:member(App, ?IGNORE_APPS) end, AppEnvs0), + application:set_env(AppEnvs). -spec save_to_config_map(config(), raw_config()) -> ok. save_to_config_map(Conf, RawConf) -> ?MODULE:put(Conf), ?MODULE:put_raw(RawConf). --spec save_to_override_conf(raw_config(), update_opts()) -> ok | {error, term()}. -save_to_override_conf(undefined, _) -> +-spec save_to_override_conf(boolean(), raw_config(), update_opts()) -> ok | {error, term()}. +save_to_override_conf(_, undefined, _) -> ok; -save_to_override_conf(RawConf, Opts) -> - case override_conf_file(Opts) of +%% TODO: Remove deprecated override conf file when 5.1 +save_to_override_conf(true, RawConf, Opts) -> + case deprecated_conf_file(Opts) of undefined -> ok; FileName -> @@ -578,6 +618,24 @@ save_to_override_conf(RawConf, Opts) -> }), {error, Reason} end + end; +save_to_override_conf(false, RawConf, _Opts) -> + case cluster_hocon_file() of + undefined -> + ok; + FileName -> + ok = filelib:ensure_dir(FileName), + case file:write_file(FileName, hocon_pp:do(RawConf, #{})) of + ok -> + ok; + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_save_conf_file", + filename => FileName, + reason => Reason + }), + {error, Reason} + end end. add_handlers() -> @@ -594,8 +652,16 @@ load_hocon_file(FileName, LoadType) -> case filelib:is_regular(FileName) of true -> Opts = #{include_dirs => include_dirs(), format => LoadType}, - {ok, Raw0} = hocon:load(FileName, Opts), - Raw0; + case hocon:load(FileName, Opts) of + {ok, Raw0} -> + Raw0; + {error, Reason} -> + throw(#{ + msg => failed_to_load_conf, + reason => Reason, + file => FileName + }) + end; false -> #{} end. @@ -607,11 +673,9 @@ do_get_raw(Path, Default) -> do_get(?RAW_CONF, Path, Default). do_get(Type, KeyPath) -> - Ref = make_ref(), - Res = do_get(Type, KeyPath, Ref), - case Res =:= Ref of - true -> error({config_not_found, KeyPath}); - false -> Res + case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath}); + Res -> Res end. do_get(Type, [], Default) -> @@ -630,9 +694,9 @@ do_get(Type, [], Default) -> false -> AllConf end; do_get(Type, [RootName], Default) -> - persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), Default); + persistent_term:get(?PERSIS_KEY(Type, RootName), Default); do_get(Type, [RootName | KeyPath], Default) -> - RootV = persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), #{}), + RootV = persistent_term:get(?PERSIS_KEY(Type, RootName), #{}), do_deep_get(Type, KeyPath, RootV, Default). do_put(Type, Putter, [], DeepValue) -> @@ -646,16 +710,16 @@ do_put(Type, Putter, [], DeepValue) -> do_put(Type, Putter, [RootName | KeyPath], DeepValue) -> OldValue = do_get(Type, [RootName], #{}), NewValue = do_deep_put(Type, Putter, KeyPath, OldValue, DeepValue), - persistent_term:put(?PERSIS_KEY(Type, bin(RootName)), NewValue). + persistent_term:put(?PERSIS_KEY(Type, RootName), NewValue). do_deep_get(?CONF, KeyPath, Map, Default) -> atom_conf_path( KeyPath, - fun(AtomKeyPath) -> emqx_map_lib:deep_get(AtomKeyPath, Map, Default) end, + fun(AtomKeyPath) -> emqx_utils_maps:deep_get(AtomKeyPath, Map, Default) end, {return, Default} ); do_deep_get(?RAW_CONF, KeyPath, Map, Default) -> - emqx_map_lib:deep_get([bin(Key) || Key <- KeyPath], Map, Default). + emqx_utils_maps:deep_get([bin(Key) || Key <- KeyPath], Map, Default). do_deep_put(?CONF, Putter, KeyPath, Map, Value) -> atom_conf_path( @@ -688,6 +752,22 @@ bin(Bin) when is_binary(Bin) -> Bin; bin(Str) when is_list(Str) -> list_to_binary(Str); bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). +warning_deprecated_root_key(RawConf) -> + case maps:keys(RawConf) -- get_root_names() of + [] -> + ok; + Keys -> + Unknowns = string:join([binary_to_list(K) || K <- Keys], ","), + ?tp(unknown_config_keys, #{unknown_config_keys => Unknowns}), + ?SLOG( + warning, + #{ + msg => "config_key_not_recognized", + unknown_config_keys => Unknowns + } + ) + end. + conf_key(?CONF, RootName) -> atom(RootName); conf_key(?RAW_CONF, RootName) -> diff --git a/apps/emqx/src/emqx_config_handler.erl b/apps/emqx/src/emqx_config_handler.erl index a0a99b62e..0bad19f9e 100644 --- a/apps/emqx/src/emqx_config_handler.erl +++ b/apps/emqx/src/emqx_config_handler.erl @@ -18,6 +18,7 @@ -module(emqx_config_handler). -include("logger.hrl"). +-include("emqx_schema.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -behaviour(gen_server). @@ -43,7 +44,6 @@ terminate/2, code_change/3 ]). --export([is_mutable/3]). -define(MOD, {mod}). -define(WKEY, '?'). @@ -230,26 +230,15 @@ process_update_request([_], _Handlers, {remove, _Opts}) -> process_update_request(ConfKeyPath, _Handlers, {remove, Opts}) -> OldRawConf = emqx_config:get_root_raw(ConfKeyPath), BinKeyPath = bin_path(ConfKeyPath), - case check_permissions(remove, BinKeyPath, OldRawConf, Opts) of - allow -> - NewRawConf = emqx_map_lib:deep_remove(BinKeyPath, OldRawConf), - OverrideConf = remove_from_override_config(BinKeyPath, Opts), - {ok, NewRawConf, OverrideConf, Opts}; - {deny, Reason} -> - {error, {permission_denied, Reason}} - end; + NewRawConf = emqx_utils_maps:deep_remove(BinKeyPath, OldRawConf), + OverrideConf = remove_from_override_config(BinKeyPath, Opts), + {ok, NewRawConf, OverrideConf, Opts}; process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}) -> OldRawConf = emqx_config:get_root_raw(ConfKeyPath), case do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) of {ok, NewRawConf} -> - BinKeyPath = bin_path(ConfKeyPath), - case check_permissions(update, BinKeyPath, NewRawConf, Opts) of - allow -> - OverrideConf = merge_to_override_config(NewRawConf, Opts), - {ok, NewRawConf, OverrideConf, Opts}; - {deny, Reason} -> - {error, {permission_denied, Reason}} - end; + OverrideConf = merge_to_override_config(NewRawConf, Opts), + {ok, NewRawConf, OverrideConf, Opts}; Error -> Error end. @@ -271,8 +260,10 @@ do_update_config( SubOldRawConf = get_sub_config(ConfKeyBin, OldRawConf), SubHandlers = get_sub_handlers(ConfKey, Handlers), case do_update_config(SubConfKeyPath, SubHandlers, SubOldRawConf, UpdateReq, ConfKeyPath) of - {ok, NewUpdateReq} -> merge_to_old_config(#{ConfKeyBin => NewUpdateReq}, OldRawConf); - Error -> Error + {ok, NewUpdateReq} -> + merge_to_old_config(#{ConfKeyBin => NewUpdateReq}, OldRawConf); + Error -> + Error end. check_and_save_configs( @@ -445,7 +436,7 @@ remove_from_override_config(_BinKeyPath, #{persistent := false}) -> undefined; remove_from_override_config(BinKeyPath, Opts) -> OldConf = emqx_config:read_override_conf(Opts), - emqx_map_lib:deep_remove(BinKeyPath, OldConf). + emqx_utils_maps:deep_remove(BinKeyPath, OldConf). %% apply new config on top of override config merge_to_override_config(_RawConf, #{persistent := false}) -> @@ -457,17 +448,23 @@ merge_to_override_config(RawConf, Opts) -> up_req({remove, _Opts}) -> '$remove'; up_req({{update, Req}, _Opts}) -> Req. -return_change_result(ConfKeyPath, {{update, _Req}, Opts}) -> - #{ - config => emqx_config:get(ConfKeyPath), - raw_config => return_rawconf(ConfKeyPath, Opts) - }; +return_change_result(ConfKeyPath, {{update, Req}, Opts}) -> + case Req =/= ?TOMBSTONE_CONFIG_CHANGE_REQ of + true -> + #{ + config => emqx_config:get(ConfKeyPath), + raw_config => return_rawconf(ConfKeyPath, Opts) + }; + false -> + %% like remove, nothing to return + #{} + end; return_change_result(_ConfKeyPath, {remove, _Opts}) -> #{}. return_rawconf(ConfKeyPath, #{rawconf_with_defaults := true}) -> FullRawConf = emqx_config:fill_defaults(emqx_config:get_raw([])), - emqx_map_lib:deep_get(bin_path(ConfKeyPath), FullRawConf); + emqx_utils_maps:deep_get(bin_path(ConfKeyPath), FullRawConf); return_rawconf(ConfKeyPath, _) -> emqx_config:get_raw(ConfKeyPath). @@ -485,16 +482,16 @@ atom(Atom) when is_atom(Atom) -> -dialyzer({nowarn_function, do_remove_handler/2}). do_remove_handler(ConfKeyPath, Handlers) -> - NewHandlers = emqx_map_lib:deep_remove(ConfKeyPath ++ [?MOD], Handlers), + NewHandlers = emqx_utils_maps:deep_remove(ConfKeyPath ++ [?MOD], Handlers), remove_empty_leaf(ConfKeyPath, NewHandlers). remove_empty_leaf([], Handlers) -> Handlers; remove_empty_leaf(KeyPath, Handlers) -> - case emqx_map_lib:deep_find(KeyPath, Handlers) =:= {ok, #{}} of + case emqx_utils_maps:deep_find(KeyPath, Handlers) =:= {ok, #{}} of %% empty leaf true -> - Handlers1 = emqx_map_lib:deep_remove(KeyPath, Handlers), + Handlers1 = emqx_utils_maps:deep_remove(KeyPath, Handlers), SubKeyPath = lists:sublist(KeyPath, length(KeyPath) - 1), remove_empty_leaf(SubKeyPath, Handlers1); false -> @@ -511,7 +508,7 @@ assert_callback_function(Mod) -> end, ok. --spec schema(module(), emqx_map_lib:config_key_path()) -> hocon_schema:schema(). +-spec schema(module(), emqx_utils_maps:config_key_path()) -> hocon_schema:schema(). schema(SchemaModule, [RootKey | _]) -> Roots = hocon_schema:roots(SchemaModule), {Field, Translations} = @@ -546,98 +543,3 @@ load_prev_handlers() -> save_handlers(Handlers) -> application:set_env(emqx, ?MODULE, Handlers). - -check_permissions(_Action, _ConfKeyPath, _NewRawConf, #{override_to := local}) -> - allow; -check_permissions(Action, ConfKeyPath, NewRawConf, _Opts) -> - case emqx_map_lib:deep_find(ConfKeyPath, NewRawConf) of - {ok, NewRaw} -> - LocalOverride = emqx_config:read_override_conf(#{override_to => local}), - case emqx_map_lib:deep_find(ConfKeyPath, LocalOverride) of - {ok, LocalRaw} -> - case is_mutable(Action, NewRaw, LocalRaw) of - ok -> - allow; - {error, Error} -> - ?SLOG(error, #{ - msg => "prevent_remove_local_override_conf", - config_key_path => ConfKeyPath, - error => Error - }), - {deny, "Disable changed from local-override.conf"} - end; - {not_found, _, _} -> - allow - end; - {not_found, _, _} -> - allow - end. - -is_mutable(Action, NewRaw, LocalRaw) -> - try - KeyPath = [], - is_mutable(KeyPath, Action, NewRaw, LocalRaw) - catch - throw:Error -> Error - end. - --define(REMOVE_FAILED, "remove_failed"). --define(UPDATE_FAILED, "update_failed"). - -is_mutable(KeyPath, Action, New = #{}, Local = #{}) -> - maps:foreach( - fun(Key, SubLocal) -> - case maps:find(Key, New) of - error -> ok; - {ok, SubNew} -> is_mutable(KeyPath ++ [Key], Action, SubNew, SubLocal) - end - end, - Local - ); -is_mutable(KeyPath, remove, Update, Origin) -> - throw({error, {?REMOVE_FAILED, KeyPath, Update, Origin}}); -is_mutable(_KeyPath, update, Val, Val) -> - ok; -is_mutable(KeyPath, update, Update, Origin) -> - throw({error, {?UPDATE_FAILED, KeyPath, Update, Origin}}). - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). - -is_mutable_update_test() -> - Action = update, - ?assertEqual(ok, is_mutable(Action, #{}, #{})), - ?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => #{}}}}, #{a => #{b => #{c => #{}}}})), - ?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 1}}})), - ?assertEqual( - {error, {?UPDATE_FAILED, [a, b, c], 1, 2}}, - is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 2}}}) - ), - ?assertEqual( - {error, {?UPDATE_FAILED, [a, b, d], 2, 3}}, - is_mutable(Action, #{a => #{b => #{c => 1, d => 2}}}, #{a => #{b => #{c => 1, d => 3}}}) - ), - ok. - -is_mutable_remove_test() -> - Action = remove, - ?assertEqual(ok, is_mutable(Action, #{}, #{})), - ?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => #{}}}}, #{a1 => #{b => #{c => #{}}}})), - ?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b1 => #{c => 1}}})), - ?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c1 => 1}}})), - - ?assertEqual( - {error, {?REMOVE_FAILED, [a, b, c], 1, 1}}, - is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 1}}}) - ), - ?assertEqual( - {error, {?REMOVE_FAILED, [a, b, c], 1, 2}}, - is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 2}}}) - ), - ?assertEqual( - {error, {?REMOVE_FAILED, [a, b, c], 1, 1}}, - is_mutable(Action, #{a => #{b => #{c => 1, d => 2}}}, #{a => #{b => #{c => 1, d => 3}}}) - ), - ok. - --endif. diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 0c6481399..79654e510 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -14,7 +14,13 @@ %% limitations under the License. %%-------------------------------------------------------------------- -%% MQTT/TCP|TLS Connection +%% This module interacts with the transport layer of MQTT +%% Transport: +%% - TCP connection +%% - TCP/TLS connection +%% - QUIC Stream +%% +%% for WebSocket @see emqx_ws_connection.erl -module(emqx_connection). -include("emqx.hrl"). @@ -71,7 +77,7 @@ -export([set_field/3]). -import( - emqx_misc, + emqx_utils, [start_timer/2] ). @@ -105,13 +111,16 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(limiter()), + limiter :: limiter(), %% limiter buffer for overload use limiter_buffer :: queue:queue(pending_req()), %% limiter timers - limiter_timer :: undefined | reference() + limiter_timer :: undefined | reference(), + + %% QUIC conn owner pid if in use. + quic_conn_pid :: maybe(pid()) }). -record(retry, { @@ -173,10 +182,8 @@ -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). -%% use macro to do compile time limiter's type check --define(LIMITER_BYTES_IN, bytes_in). --define(LIMITER_MESSAGE_IN, message_in). --define(EMPTY_QUEUE, {[], []}). +-define(LIMITER_BYTES_IN, bytes). +-define(LIMITER_MESSAGE_IN, messages). -dialyzer({no_match, [info/2]}). -dialyzer( @@ -189,12 +196,16 @@ ]} ). --spec start_link( - esockd:transport(), - esockd:socket() | {pid(), quicer:connection_handler()}, - emqx_channel:opts() -) -> - {ok, pid()}. +-spec start_link + (esockd:transport(), esockd:socket(), emqx_channel:opts()) -> + {ok, pid()}; + ( + emqx_quic_stream, + {ConnOwner :: pid(), quicer:connection_handle(), quicer:new_conn_props()}, + emqx_quic_connection:cb_state() + ) -> + {ok, pid()}. + start_link(Transport, Socket, Options) -> Args = [self(), Transport, Socket, Options], CPid = proc_lib:spawn_link(?MODULE, init, Args), @@ -247,7 +258,7 @@ stats(#state{ {error, _} -> [] end, ChanStats = emqx_channel:stats(Channel), - ProcStats = emqx_misc:proc_stats(), + ProcStats = emqx_utils:proc_stats(), lists:append([SockStats, ChanStats, ProcStats]). %% @doc Set TCP keepalive socket options to override system defaults. @@ -329,6 +340,7 @@ init_state( }, ParseState = emqx_frame:initial_parse_state(FrameOpts), Serialize = emqx_frame:serialize_opts(), + %% Init Channel Channel = emqx_channel:init(ConnInfo, Opts), GcState = case emqx_config:get_zone_conf(Zone, [force_gc]) of @@ -359,7 +371,9 @@ init_state( zone = Zone, listener = Listener, limiter_buffer = queue:new(), - limiter_timer = undefined + limiter_timer = undefined, + %% for quic streams to inherit + quic_conn_pid = maps:get(conn_pid, Opts, undefined) }. run_loop( @@ -376,7 +390,7 @@ run_loop( emqx_channel:info(zone, Channel), [force_shutdown] ), - emqx_misc:tune_heap_size(ShutdownPolicy), + emqx_utils:tune_heap_size(ShutdownPolicy), case activate_socket(State) of {ok, NState} -> hibernate(Parent, NState); @@ -403,14 +417,19 @@ exit_on_sock_error(Reason) -> recvloop( Parent, State = #state{ - idle_timeout = IdleTimeout, + idle_timeout = IdleTimeout0, zone = Zone } ) -> + IdleTimeout = + case IdleTimeout0 of + infinity -> infinity; + _ -> IdleTimeout0 + 100 + end, receive Msg -> handle_recv(Msg, Parent, State) - after IdleTimeout + 100 -> + after IdleTimeout -> case emqx_olp:backoff_hibernation(Zone) of true -> recvloop(Parent, State); @@ -451,7 +470,7 @@ ensure_stats_timer(_Timeout, State) -> -compile({inline, [cancel_stats_timer/1]}). cancel_stats_timer(State = #state{stats_timer = TRef}) when is_reference(TRef) -> ?tp(debug, cancel_stats_timer, #{}), - ok = emqx_misc:cancel_timer(TRef), + ok = emqx_utils:cancel_timer(TRef), State#state{stats_timer = undefined}; cancel_stats_timer(State) -> State. @@ -471,7 +490,9 @@ process_msg([Msg | More], State) -> {ok, Msgs, NState} -> process_msg(append_msg(More, Msgs), NState); {stop, Reason, NState} -> - {stop, Reason, NState} + {stop, Reason, NState}; + {stop, Reason} -> + {stop, Reason, State} end catch exit:normal -> @@ -502,7 +523,6 @@ append_msg(Q, Msg) -> %%-------------------------------------------------------------------- %% Handle a Msg - handle_msg({'$gen_call', From, Req}, State) -> case handle_call(From, Req, State) of {reply, Reply, NState} -> @@ -520,11 +540,10 @@ handle_msg({Inet, _Sock, Data}, State) when Inet == tcp; Inet == ssl -> inc_counter(incoming_bytes, Oct), ok = emqx_metrics:inc('bytes.received', Oct), when_bytes_in(Oct, Data, State); -handle_msg({quic, Data, _Sock, _, _, _}, State) -> - Oct = iolist_size(Data), - inc_counter(incoming_bytes, Oct), - ok = emqx_metrics:inc('bytes.received', Oct), - when_bytes_in(Oct, Data, State); +handle_msg({quic, Data, _Stream, #{len := Len}}, State) when is_binary(Data) -> + inc_counter(incoming_bytes, Len), + ok = emqx_metrics:inc('bytes.received', Len), + when_bytes_in(Len, Data, State); handle_msg(check_cache, #state{limiter_buffer = Cache} = State) -> case queue:peek(Cache) of empty -> @@ -537,7 +556,7 @@ handle_msg( {incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, State = #state{idle_timer = IdleTimer} ) -> - ok = emqx_misc:cancel_timer(IdleTimer), + ok = emqx_utils:cancel_timer(IdleTimer), Serialize = emqx_frame:serialize_opts(ConnPkt), NState = State#state{ serialize = Serialize, @@ -545,6 +564,7 @@ handle_msg( }, handle_incoming(Packet, NState); handle_msg({incoming, Packet}, State) -> + ?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}), handle_incoming(Packet, State); handle_msg({outgoing, Packets}, State) -> handle_outgoing(Packets, State); @@ -571,7 +591,7 @@ handle_msg( #state{listener = {Type, Listener}} = State ) -> ActiveN = get_active_n(Type, Listener), - Delivers = [Deliver | emqx_misc:drain_deliver(ActiveN)], + Delivers = [Deliver | emqx_utils:drain_deliver(ActiveN)], with_channel(handle_deliver, [Delivers], State); %% Something sent handle_msg({inet_reply, _Sock, ok}, State = #state{listener = {Type, Listener}}) -> @@ -589,9 +609,20 @@ handle_msg({inet_reply, _Sock, {error, Reason}}, State) -> handle_msg({connack, ConnAck}, State) -> handle_outgoing(ConnAck, State); handle_msg({close, Reason}, State) -> + %% @FIXME here it could be close due to appl error. ?TRACE("SOCKET", "socket_force_closed", #{reason => Reason}), handle_info({sock_closed, Reason}, close_socket(State)); -handle_msg({event, connected}, State = #state{channel = Channel}) -> +handle_msg( + {event, connected}, + State = #state{ + channel = Channel, + serialize = Serialize, + parse_state = PS, + quic_conn_pid = QuicConnPid + } +) -> + QuicConnPid =/= undefined andalso + emqx_quic_connection:activate_data_streams(QuicConnPid, {PS, Serialize, Channel}), ClientId = emqx_channel:info(clientid, Channel), emqx_cm:insert_channel_info(ClientId, info(State), stats(State)); handle_msg({event, disconnected}, State = #state{channel = Channel}) -> @@ -648,6 +679,12 @@ maybe_raise_exception(#{ stacktrace := Stacktrace }) -> erlang:raise(Exception, Context, Stacktrace); +maybe_raise_exception({shutdown, normal}) -> + ok; +maybe_raise_exception(normal) -> + ok; +maybe_raise_exception(shutdown) -> + ok; maybe_raise_exception(Reason) -> exit(Reason). @@ -726,6 +763,12 @@ handle_timeout(TRef, Msg, State) -> %% Parse incoming data -compile({inline, [when_bytes_in/3]}). when_bytes_in(Oct, Data, State) -> + ?SLOG(debug, #{ + msg => "raw_bin_received", + size => Oct, + bin => binary_to_list(binary:encode_hex(Data)), + type => "hex" + }), {Packets, NState} = parse_incoming(Data, [], State), Len = erlang:length(Packets), check_limiter( @@ -736,6 +779,7 @@ when_bytes_in(Oct, Data, State) -> NState ). +%% @doc: return a reversed Msg list -compile({inline, [next_incoming_msgs/3]}). next_incoming_msgs([Packet], Msgs, State) -> {ok, [{incoming, Packet} | Msgs], State}; @@ -778,7 +822,6 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> handle_incoming(Packet, State) when is_record(Packet, mqtt_packet) -> ok = inc_incoming_stats(Packet), - ?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}), with_channel(handle_in, [Packet], State); handle_incoming(FrameError, State) -> with_channel(handle_in, [FrameError], State). @@ -859,6 +902,7 @@ send(IoData, #state{transport = Transport, socket = Socket, channel = Channel}) ok; Error = {error, _Reason} -> %% Send an inet_reply to postpone handling the error + %% @FIXME: why not just return error? self() ! {inet_reply, Socket, Error}, ok end. @@ -882,12 +926,14 @@ handle_info({sock_error, Reason}, State) -> false -> ok end, handle_info({sock_closed, Reason}, close_socket(State)); -handle_info({quic, peer_send_shutdown, _Stream}, State) -> - handle_info({sock_closed, force}, close_socket(State)); -handle_info({quic, closed, _Channel, ReasonFlag}, State) -> - handle_info({sock_closed, ReasonFlag}, State); -handle_info({quic, closed, _Stream}, State) -> - handle_info({sock_closed, force}, State); +%% handle QUIC control stream events +handle_info({quic, Event, Handle, Prop}, State) when is_atom(Event) -> + case emqx_quic_stream:Event(Handle, Prop, State) of + {{continue, Msgs}, NewState} -> + {ok, Msgs, NewState}; + Other -> + Other + end; handle_info(Info, State) -> with_channel(handle_info, [Info], State). @@ -928,55 +974,61 @@ handle_cast(Req, State) -> list(any()), state() ) -> _. + +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_buffer = Cache - } = State -) when Limiter =/= undefined -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_dueto_rate_limit", - needs => Needs, - time_in_ms => Time - }), + #state{limiter_timer = undefined, limiter = Limiter} = State +) -> + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_dueto_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - {ok, State#state{ - limiter = Limiter3, - limiter_timer = TRef - }}; - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - %% if there has a retry timer, - %% cache the operation and execute it after the retry is over - %% the maximum length of the cache queue is equal to the active_n - New = #pending_req{need = Needs, data = Data, next = WhenOk}, - {ok, State#state{limiter_buffer = queue:in(New, Cache)}} + {ok, State#state{ + limiter = Limiter3, + limiter_timer = TRef + }}; + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} end; -check_limiter(_, Data, WhenOk, Msgs, State) -> - WhenOk(Data, Msgs, State). +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_buffer = Cache} = State +) -> + %% if there has a retry timer, + %% cache the operation and execute it after the retry is over + %% the maximum length of the cache queue is equal to the active_n + New = #pending_req{need = Needs, data = Data, next = WhenOk}, + {ok, State#state{limiter_buffer = queue:in(New, Cache)}}. %% try to perform a retry -spec retry_limiter(state()) -> _. @@ -1025,7 +1077,7 @@ check_oom(State = #state{channel = Channel}) -> emqx_channel:info(zone, Channel), [force_shutdown] ), ?tp(debug, check_oom, #{policy => ShutdownPolicy}), - case emqx_misc:check_oom(ShutdownPolicy) of + case emqx_utils:check_oom(ShutdownPolicy) of {shutdown, Reason} -> %% triggers terminate/2 callback immediately erlang:exit({shutdown, Reason}); @@ -1152,7 +1204,7 @@ inc_counter(Key, Inc) -> %%-------------------------------------------------------------------- set_field(Name, Value, State) -> - Pos = emqx_misc:index_of(Name, record_info(fields, state)), + Pos = emqx_utils:index_of(Name, record_info(fields, state)), setelement(Pos + 1, State, Value). get_state(Pid) -> diff --git a/apps/emqx/src/emqx_const_v1.erl b/apps/emqx/src/emqx_const_v1.erl new file mode 100644 index 000000000..aef4d5101 --- /dev/null +++ b/apps/emqx/src/emqx_const_v1.erl @@ -0,0 +1,24 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% @doc Never update this module, create a v2 instead. +%%-------------------------------------------------------------------- + +-module(emqx_const_v1). + +-export([make_sni_fun/1]). + +make_sni_fun(ListenerID) -> + fun(SN) -> emqx_ocsp_cache:sni_fun(SN, ListenerID) end. diff --git a/apps/emqx/src/emqx_crl_cache.erl b/apps/emqx/src/emqx_crl_cache.erl new file mode 100644 index 000000000..084313420 --- /dev/null +++ b/apps/emqx/src/emqx_crl_cache.erl @@ -0,0 +1,314 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% @doc EMQX CRL cache. +%%-------------------------------------------------------------------- + +-module(emqx_crl_cache). + +%% API +-export([ + start_link/0, + start_link/1, + register_der_crls/2, + refresh/1, + evict/1 +]). + +%% gen_server callbacks +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2 +]). + +%% internal exports +-export([http_get/2]). + +-behaviour(gen_server). + +-include("logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(HTTP_TIMEOUT, timer:seconds(15)). +-define(RETRY_TIMEOUT, 5_000). +-ifdef(TEST). +-define(MIN_REFRESH_PERIOD, timer:seconds(5)). +-else. +-define(MIN_REFRESH_PERIOD, timer:minutes(1)). +-endif. +-define(DEFAULT_REFRESH_INTERVAL, timer:minutes(15)). +-define(DEFAULT_CACHE_CAPACITY, 100). + +-record(state, { + refresh_timers = #{} :: #{binary() => timer:tref()}, + refresh_interval = timer:minutes(15) :: timer:time(), + http_timeout = ?HTTP_TIMEOUT :: timer:time(), + %% keeps track of URLs by insertion time + insertion_times = gb_trees:empty() :: gb_trees:tree(timer:time(), url()), + %% the set of cached URLs, for testing if an URL is already + %% registered. + cached_urls = sets:new([{version, 2}]) :: sets:set(url()), + cache_capacity = 100 :: pos_integer(), + %% for future use + extra = #{} :: map() +}). +-type url() :: uri_string:uri_string(). +-type state() :: #state{}. + +%%-------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------- + +start_link() -> + Config = gather_config(), + start_link(Config). + +start_link(Config = #{cache_capacity := _, refresh_interval := _, http_timeout := _}) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, Config, []). + +-spec refresh(url()) -> ok. +refresh(URL) -> + gen_server:cast(?MODULE, {refresh, URL}). + +-spec evict(url()) -> ok. +evict(URL) -> + gen_server:cast(?MODULE, {evict, URL}). + +%% Adds CRLs in DER format to the cache and register them for periodic +%% refresh. +-spec register_der_crls(url(), [public_key:der_encoded()]) -> ok. +register_der_crls(URL, CRLs) when is_list(CRLs) -> + gen_server:cast(?MODULE, {register_der_crls, URL, CRLs}). + +%%-------------------------------------------------------------------- +%% gen_server behaviour +%%-------------------------------------------------------------------- + +init(Config) -> + #{ + cache_capacity := CacheCapacity, + refresh_interval := RefreshIntervalMS, + http_timeout := HTTPTimeoutMS + } = Config, + State = #state{ + cache_capacity = CacheCapacity, + refresh_interval = RefreshIntervalMS, + http_timeout = HTTPTimeoutMS + }, + {ok, State}. + +handle_call(Call, _From, State) -> + {reply, {error, {bad_call, Call}}, State}. + +handle_cast({evict, URL}, State0 = #state{refresh_timers = RefreshTimers0}) -> + emqx_ssl_crl_cache:delete(URL), + MTimer = maps:get(URL, RefreshTimers0, undefined), + emqx_utils:cancel_timer(MTimer), + RefreshTimers = maps:without([URL], RefreshTimers0), + State = State0#state{refresh_timers = RefreshTimers}, + ?tp( + crl_cache_evict, + #{url => URL} + ), + {noreply, State}; +handle_cast({register_der_crls, URL, CRLs}, State0) -> + handle_register_der_crls(State0, URL, CRLs); +handle_cast({refresh, URL}, State0) -> + case do_http_fetch_and_cache(URL, State0#state.http_timeout) of + {error, Error} -> + ?tp(crl_refresh_failure, #{error => Error, url => URL}), + ?SLOG(error, #{ + msg => "failed_to_fetch_crl_response", + url => URL, + error => Error + }), + {noreply, ensure_timer(URL, State0, ?RETRY_TIMEOUT)}; + {ok, _CRLs} -> + ?SLOG(debug, #{ + msg => "fetched_crl_response", + url => URL + }), + {noreply, ensure_timer(URL, State0)} + end; +handle_cast(_Cast, State) -> + {noreply, State}. + +handle_info( + {timeout, TRef, {refresh, URL}}, + State = #state{ + refresh_timers = RefreshTimers, + http_timeout = HTTPTimeoutMS + } +) -> + case maps:get(URL, RefreshTimers, undefined) of + TRef -> + ?tp(debug, crl_refresh_timer, #{url => URL}), + case do_http_fetch_and_cache(URL, HTTPTimeoutMS) of + {error, Error} -> + ?SLOG(error, #{ + msg => "failed_to_fetch_crl_response", + url => URL, + error => Error + }), + {noreply, ensure_timer(URL, State, ?RETRY_TIMEOUT)}; + {ok, _CRLs} -> + ?tp(debug, crl_refresh_timer_done, #{url => URL}), + {noreply, ensure_timer(URL, State)} + end; + _ -> + {noreply, State} + end; +handle_info(_Info, State) -> + {noreply, State}. + +%%-------------------------------------------------------------------- +%% internal functions +%%-------------------------------------------------------------------- + +http_get(URL, HTTPTimeout) -> + httpc:request( + get, + {URL, [{"connection", "close"}]}, + [{timeout, HTTPTimeout}], + [{body_format, binary}] + ). + +do_http_fetch_and_cache(URL, HTTPTimeoutMS) -> + ?tp(crl_http_fetch, #{crl_url => URL}), + Resp = ?MODULE:http_get(URL, HTTPTimeoutMS), + case Resp of + {ok, {{_, 200, _}, _, Body}} -> + case parse_crls(Body) of + error -> + {error, invalid_crl}; + CRLs -> + %% Note: must ensure it's a string and not a + %% binary because that's what the ssl manager uses + %% when doing lookups. + emqx_ssl_crl_cache:insert(to_string(URL), {der, CRLs}), + ?tp(crl_cache_insert, #{url => URL, crls => CRLs}), + {ok, CRLs} + end; + {ok, {{_, Code, _}, _, Body}} -> + {error, {bad_response, #{code => Code, body => Body}}}; + {error, Error} -> + {error, {http_error, Error}} + end. + +parse_crls(Bin) -> + try + [CRL || {'CertificateList', CRL, not_encrypted} <- public_key:pem_decode(Bin)] + catch + _:_ -> + error + end. + +ensure_timer(URL, State = #state{refresh_interval = Timeout}) -> + ensure_timer(URL, State, Timeout). + +ensure_timer(URL, State = #state{refresh_timers = RefreshTimers0}, Timeout) -> + ?tp(crl_cache_ensure_timer, #{url => URL, timeout => Timeout}), + MTimer = maps:get(URL, RefreshTimers0, undefined), + emqx_utils:cancel_timer(MTimer), + RefreshTimers = RefreshTimers0#{ + URL => emqx_utils:start_timer( + Timeout, + {refresh, URL} + ) + }, + State#state{refresh_timers = RefreshTimers}. + +-spec gather_config() -> + #{ + cache_capacity := pos_integer(), + refresh_interval := timer:time(), + http_timeout := timer:time() + }. +gather_config() -> + %% TODO: add a config handler to refresh the config when those + %% globals change? + CacheCapacity = emqx_config:get([crl_cache, capacity], ?DEFAULT_CACHE_CAPACITY), + RefreshIntervalMS0 = emqx_config:get([crl_cache, refresh_interval], ?DEFAULT_REFRESH_INTERVAL), + MinimumRefreshInverval = ?MIN_REFRESH_PERIOD, + RefreshIntervalMS = max(RefreshIntervalMS0, MinimumRefreshInverval), + HTTPTimeoutMS = emqx_config:get([crl_cache, http_timeout], ?HTTP_TIMEOUT), + #{ + cache_capacity => CacheCapacity, + refresh_interval => RefreshIntervalMS, + http_timeout => HTTPTimeoutMS + }. + +-spec handle_register_der_crls(state(), url(), [public_key:der_encoded()]) -> {noreply, state()}. +handle_register_der_crls(State0, URL0, CRLs) -> + #state{cached_urls = CachedURLs0} = State0, + URL = to_string(URL0), + case sets:is_element(URL, CachedURLs0) of + true -> + {noreply, State0}; + false -> + emqx_ssl_crl_cache:insert(URL, {der, CRLs}), + ?tp(debug, new_crl_url_inserted, #{url => URL}), + State1 = do_register_url(State0, URL), + State2 = handle_cache_overflow(State1), + State = ensure_timer(URL, State2), + {noreply, State} + end. + +-spec do_register_url(state(), url()) -> state(). +do_register_url(State0, URL) -> + #state{ + cached_urls = CachedURLs0, + insertion_times = InsertionTimes0 + } = State0, + Now = erlang:monotonic_time(), + CachedURLs = sets:add_element(URL, CachedURLs0), + InsertionTimes = gb_trees:enter(Now, URL, InsertionTimes0), + State0#state{ + cached_urls = CachedURLs, + insertion_times = InsertionTimes + }. + +-spec handle_cache_overflow(state()) -> state(). +handle_cache_overflow(State0) -> + #state{ + cached_urls = CachedURLs0, + insertion_times = InsertionTimes0, + cache_capacity = CacheCapacity, + refresh_timers = RefreshTimers0 + } = State0, + case sets:size(CachedURLs0) > CacheCapacity of + false -> + State0; + true -> + {_Time, OldestURL, InsertionTimes} = gb_trees:take_smallest(InsertionTimes0), + emqx_ssl_crl_cache:delete(OldestURL), + MTimer = maps:get(OldestURL, RefreshTimers0, undefined), + emqx_utils:cancel_timer(MTimer), + RefreshTimers = maps:remove(OldestURL, RefreshTimers0), + CachedURLs = sets:del_element(OldestURL, CachedURLs0), + ?tp(debug, crl_cache_overflow, #{oldest_url => OldestURL}), + State0#state{ + insertion_times = InsertionTimes, + cached_urls = CachedURLs, + refresh_timers = RefreshTimers + } + end. + +to_string(B) when is_binary(B) -> + binary_to_list(B); +to_string(L) when is_list(L) -> + L. diff --git a/apps/emqx/src/emqx_exclusive_subscription.erl b/apps/emqx/src/emqx_exclusive_subscription.erl index afb6317b7..a1f7f76ae 100644 --- a/apps/emqx/src/emqx_exclusive_subscription.erl +++ b/apps/emqx/src/emqx_exclusive_subscription.erl @@ -32,7 +32,8 @@ -export([ check_subscribe/2, - unsubscribe/2 + unsubscribe/2, + clear/0 ]). %% Internal exports (RPC) @@ -77,7 +78,7 @@ on_add_module() -> mnesia(boot). on_delete_module() -> - mria:clear_table(?EXCLUSIVE_SHARD). + clear(). %%-------------------------------------------------------------------- %% APIs @@ -101,6 +102,9 @@ unsubscribe(Topic, #{is_exclusive := true}) -> unsubscribe(_Topic, _SubOpts) -> ok. +clear() -> + mria:clear_table(?TAB). + %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_flapping.erl b/apps/emqx/src/emqx_flapping.erl index 64e4ed6c3..70b1a3232 100644 --- a/apps/emqx/src/emqx_flapping.erl +++ b/apps/emqx/src/emqx_flapping.erl @@ -27,6 +27,10 @@ %% API -export([detect/1]). +-ifdef(TEST). +-export([get_policy/2]). +-endif. + %% gen_server callbacks -export([ init/1, @@ -39,15 +43,6 @@ %% Tab -define(FLAPPING_TAB, ?MODULE). -%% Default Policy --define(FLAPPING_THRESHOLD, 30). --define(FLAPPING_DURATION, 60000). --define(FLAPPING_BANNED_INTERVAL, 300000). --define(DEFAULT_DETECT_POLICY, #{ - max_count => ?FLAPPING_THRESHOLD, - window_time => ?FLAPPING_DURATION, - ban_time => ?FLAPPING_BANNED_INTERVAL -}). -record(flapping, { clientid :: emqx_types:clientid(), @@ -69,7 +64,7 @@ stop() -> gen_server:stop(?MODULE). %% @doc Detect flapping when a MQTT client disconnected. -spec detect(emqx_types:clientinfo()) -> boolean(). detect(#{clientid := ClientId, peerhost := PeerHost, zone := Zone}) -> - Policy = #{max_count := Threshold} = get_policy(Zone), + Policy = #{max_count := Threshold} = get_policy([max_count, window_time, ban_time], Zone), %% The initial flapping record sets the detect_cnt to 0. InitVal = #flapping{ clientid = ClientId, @@ -89,8 +84,22 @@ detect(#{clientid := ClientId, peerhost := PeerHost, zone := Zone}) -> end end. -get_policy(Zone) -> - emqx_config:get_zone_conf(Zone, [flapping_detect]). +get_policy(Keys, Zone) when is_list(Keys) -> + RootKey = flapping_detect, + Conf = emqx_config:get_zone_conf(Zone, [RootKey]), + lists:foldl( + fun(Key, Acc) -> + case maps:find(Key, Conf) of + {ok, V} -> Acc#{Key => V}; + error -> Acc#{Key => emqx_config:get([RootKey, Key])} + end + end, + #{}, + Keys + ); +get_policy(Key, Zone) -> + #{Key := Conf} = get_policy([Key], Zone), + Conf. now_diff(TS) -> erlang:system_time(millisecond) - TS. @@ -99,7 +108,7 @@ now_diff(TS) -> erlang:system_time(millisecond) - TS. %%-------------------------------------------------------------------- init([]) -> - ok = emqx_tables:new(?FLAPPING_TAB, [ + ok = emqx_utils_ets:new(?FLAPPING_TAB, [ public, set, {keypos, #flapping.clientid}, @@ -166,8 +175,7 @@ handle_cast(Msg, State) -> handle_info({timeout, _TRef, {garbage_collect, Zone}}, State) -> Timestamp = - erlang:system_time(millisecond) - - maps:get(window_time, get_policy(Zone)), + erlang:system_time(millisecond) - get_policy(window_time, Zone), MatchSpec = [{{'_', '_', '_', '$1', '_'}, [{'<', '$1', Timestamp}], [true]}], ets:select_delete(?FLAPPING_TAB, MatchSpec), _ = start_timer(Zone), @@ -183,15 +191,19 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. start_timer(Zone) -> - WindTime = maps:get(window_time, get_policy(Zone)), - emqx_misc:start_timer(WindTime, {garbage_collect, Zone}). + case get_policy(window_time, Zone) of + WindowTime when is_integer(WindowTime) -> + emqx_utils:start_timer(WindowTime, {garbage_collect, Zone}); + disabled -> + ok + end. start_timers() -> - lists:foreach( - fun({Zone, _ZoneConf}) -> + maps:foreach( + fun(Zone, _ZoneConf) -> start_timer(Zone) end, - maps:to_list(emqx:get_config([zones], #{})) + emqx:get_config([zones], #{}) ). fmt_host(PeerHost) -> diff --git a/apps/emqx/src/emqx_guid.erl b/apps/emqx/src/emqx_guid.erl index fea4e70b0..d313723fb 100644 --- a/apps/emqx/src/emqx_guid.erl +++ b/apps/emqx/src/emqx_guid.erl @@ -145,10 +145,10 @@ npid() -> NPid. to_hexstr(I) when byte_size(I) =:= 16 -> - emqx_misc:bin_to_hexstr(I, upper). + emqx_utils:bin_to_hexstr(I, upper). from_hexstr(S) when byte_size(S) =:= 32 -> - emqx_misc:hexstr_to_bin(S). + emqx_utils:hexstr_to_bin(S). to_base62(<>) -> emqx_base62:encode(I). diff --git a/apps/emqx/src/emqx_hocon.erl b/apps/emqx/src/emqx_hocon.erl index 7e9dbca77..08192e9be 100644 --- a/apps/emqx/src/emqx_hocon.erl +++ b/apps/emqx/src/emqx_hocon.erl @@ -20,8 +20,11 @@ -export([ format_path/1, check/2, + check/3, + compact_errors/2, format_error/1, - format_error/2 + format_error/2, + make_schema/1 ]). %% @doc Format hocon config field path to dot-separated string in iolist format. @@ -35,20 +38,23 @@ format_path([Name | Rest]) -> [iol(Name), "." | format_path(Rest)]. %% Always return plain map with atom keys. -spec check(module(), hocon:config() | iodata()) -> {ok, hocon:config()} | {error, any()}. -check(SchemaModule, Conf) when is_map(Conf) -> +check(SchemaModule, Conf) -> %% TODO: remove required %% fields should state required or not in their schema Opts = #{atom_key => true, required => false}, + check(SchemaModule, Conf, Opts). + +check(SchemaModule, Conf, Opts) when is_map(Conf) -> try {ok, hocon_tconf:check_plain(SchemaModule, Conf, Opts)} catch - throw:Reason -> - {error, Reason} + throw:Errors:Stacktrace -> + compact_errors(Errors, Stacktrace) end; -check(SchemaModule, HoconText) -> +check(SchemaModule, HoconText, Opts) -> case hocon:binary(HoconText, #{format => map}) of {ok, MapConfig} -> - check(SchemaModule, MapConfig); + check(SchemaModule, MapConfig, Opts); {error, Reason} -> {error, Reason} end. @@ -79,6 +85,9 @@ format_error({_Schema, [#{kind := K} = First | Rest] = All}, Opts) when format_error(_Other, _) -> false. +make_schema(Fields) -> + #{roots => Fields, fields => #{}}. + %% Ensure iolist() iol(B) when is_binary(B) -> B; iol(A) when is_atom(A) -> atom_to_binary(A, utf8); @@ -86,3 +95,34 @@ iol(L) when is_list(L) -> L. no_stacktrace(Map) -> maps:without([stacktrace], Map). + +%% @doc HOCON tries to be very informative about all the detailed errors +%% it's maybe too much when reporting to the user +-spec compact_errors(any(), Stacktrace :: list()) -> {error, any()}. +compact_errors({SchemaModule, Errors}, Stacktrace) -> + compact_errors(SchemaModule, Errors, Stacktrace). + +compact_errors(SchemaModule, [Error0 | More], _Stacktrace) when is_map(Error0) -> + Error1 = + case length(More) of + 0 -> + Error0; + N -> + Error0#{unshown_errors_count => N} + end, + Error = + case is_atom(SchemaModule) of + true -> + Error1#{schema_module => SchemaModule}; + false -> + Error1 + end, + {error, Error}; +compact_errors(SchemaModule, Error, Stacktrace) -> + %% unexpected, we need the stacktrace reported + %% if this happens it's a bug in hocon_tconf + {error, #{ + schema_module => SchemaModule, + exception => Error, + stacktrace => Stacktrace + }}. diff --git a/apps/emqx/src/emqx_hooks.erl b/apps/emqx/src/emqx_hooks.erl index 1784d8ea3..0b8dc0941 100644 --- a/apps/emqx/src/emqx_hooks.erl +++ b/apps/emqx/src/emqx_hooks.erl @@ -229,7 +229,7 @@ lookup(HookPoint) -> %%-------------------------------------------------------------------- init([]) -> - ok = emqx_tables:new(?TAB, [{keypos, #hook.name}, {read_concurrency, true}]), + ok = emqx_utils_ets:new(?TAB, [{keypos, #hook.name}, {read_concurrency, true}]), {ok, #{}}. handle_call({add, HookPoint, Callback = #callback{action = {M, F, _}}}, _From, State) -> diff --git a/apps/emqx/src/emqx_kernel_sup.erl b/apps/emqx/src/emqx_kernel_sup.erl index 21ed8576a..1027ef639 100644 --- a/apps/emqx/src/emqx_kernel_sup.erl +++ b/apps/emqx/src/emqx_kernel_sup.erl @@ -35,8 +35,9 @@ init([]) -> child_spec(emqx_hooks, worker), child_spec(emqx_stats, worker), child_spec(emqx_metrics, worker), - child_spec(emqx_ctl, worker), - child_spec(emqx_authn_authz_metrics_sup, supervisor) + child_spec(emqx_authn_authz_metrics_sup, supervisor), + child_spec(emqx_ocsp_cache, worker), + child_spec(emqx_crl_cache, worker) ] }}. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl index 83bc2ec72..bcd4166af 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl @@ -22,7 +22,7 @@ %% API -export([ - make_token_bucket_limiter/2, + make_local_limiter/2, make_ref_limiter/2, check/2, consume/2, @@ -32,12 +32,11 @@ make_future/1, available/1 ]). --export_type([token_bucket_limiter/0]). +-export_type([local_limiter/0]). -%% a token bucket limiter with a limiter server's bucket reference - -%% the number of tokens currently available --type token_bucket_limiter() :: #{ +%% a token bucket limiter which may or not contains a reference to another limiter, +%% and can be used in a client alone +-type local_limiter() :: #{ tokens := non_neg_integer(), rate := decimal(), capacity := decimal(), @@ -58,12 +57,12 @@ retry_ctx => undefined %% the retry context - | retry_context(token_bucket_limiter()), + | retry_context(local_limiter()), %% allow to add other keys atom => any() }. -%% a limiter server's bucket reference +%% a limiter instance which only contains a reference to another limiter(bucket) -type ref_limiter() :: #{ max_retry_time := non_neg_integer(), failure_strategy := failure_strategy(), @@ -88,7 +87,7 @@ }. -type bucket() :: emqx_limiter_bucket_ref:bucket_ref(). --type limiter() :: token_bucket_limiter() | ref_limiter() | infinity. +-type limiter() :: local_limiter() | ref_limiter() | infinity. -type millisecond() :: non_neg_integer(). -type pause_type() :: pause | partial. @@ -116,7 +115,7 @@ rate := decimal(), initial := non_neg_integer(), low_watermark := non_neg_integer(), - capacity := decimal(), + burst := decimal(), divisible := boolean(), max_retry_time := non_neg_integer(), failure_strategy := failure_strategy() @@ -134,12 +133,13 @@ %% API %%-------------------------------------------------------------------- %%@doc create a limiter --spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _. -make_token_bucket_limiter(Cfg, Bucket) -> +-spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _. +make_local_limiter(Cfg, Bucket) -> Cfg#{ tokens => emqx_limiter_server:get_initial_val(Cfg), lasttime => ?NOW, - bucket => Bucket + bucket => Bucket, + capacity => emqx_limiter_schema:calc_capacity(Cfg) }. %%@doc create a limiter server's reference @@ -311,8 +311,8 @@ on_failure(throw, Limiter) -> Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]), erlang:throw({rate_check_fail, Message}). --spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) -> - inner_check_result(token_bucket_limiter()). +-spec do_check_with_parent_limiter(pos_integer(), local_limiter()) -> + inner_check_result(local_limiter()). do_check_with_parent_limiter( Need, #{ @@ -335,7 +335,7 @@ do_check_with_parent_limiter( ) end. --spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()). +-spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()). do_reset( Need, #{ @@ -375,7 +375,7 @@ return_pause(infinity, PauseType, Fun, Diff, Limiter) -> {PauseType, ?MINIMUM_PAUSE, make_retry_context(Fun, Diff), Limiter}; return_pause(Rate, PauseType, Fun, Diff, Limiter) -> Val = erlang:round(Diff * emqx_limiter_schema:default_period() / Rate), - Pause = emqx_misc:clamp(Val, ?MINIMUM_PAUSE, ?MAXIMUM_PAUSE), + Pause = emqx_utils:clamp(Val, ?MINIMUM_PAUSE, ?MAXIMUM_PAUSE), {PauseType, Pause, make_retry_context(Fun, Diff), Limiter}. -spec make_retry_context(undefined | retry_fun(Limiter), non_neg_integer()) -> diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter.app.src b/apps/emqx/src/emqx_limiter/src/emqx_limiter.app.src index 69c1c6fb0..4d3dee84e 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter.app.src +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter.app.src @@ -2,7 +2,7 @@ {application, emqx_limiter, [ {description, "EMQX Hierarchical Limiter"}, % strict semver, bump manually! - {vsn, "1.0.0"}, + {vsn, "1.0.1"}, {modules, []}, {registered, [emqx_limiter_sup]}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl index fe30e41e9..139564df7 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl @@ -23,6 +23,7 @@ %% API -export([ new/3, + infinity_bucket/0, check/3, try_restore/2, available/1 @@ -58,6 +59,10 @@ new(Counter, Index, Rate) -> rate => Rate }. +-spec infinity_bucket() -> bucket_ref(). +infinity_bucket() -> + infinity. + %% @doc check tokens -spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) -> HasToken :: diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl index ea02152a9..6a9101a0f 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl @@ -34,16 +34,18 @@ -export_type([container/0, check_result/0]). --type container() :: #{ - limiter_type() => undefined | limiter(), - %% the retry context of the limiter - retry_key() => - undefined - | retry_context() - | future(), - %% the retry context of the container - retry_ctx := undefined | any() -}. +-type container() :: + infinity + | #{ + limiter_type() => undefined | limiter(), + %% the retry context of the limiter + retry_key() => + undefined + | retry_context() + | future(), + %% the retry context of the container + retry_ctx := undefined | any() + }. -type future() :: pos_integer(). -type limiter_id() :: emqx_limiter_schema:limiter_id(). @@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) -> {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs), add_new(Type, Limiter, Acc) end, - lists:foldl(Init, #{retry_ctx => undefined}, Types). + Container = lists:foldl(Init, #{retry_ctx => undefined}, Types), + case + lists:all( + fun(Type) -> + maps:get(Type, Container) =:= infinity + end, + Types + ) + of + true -> + infinity; + _ -> + Container + end. -spec add_new(limiter_type(), limiter(), container()) -> container(). add_new(Type, Limiter, Container) -> @@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) -> %% @doc check the specified limiter -spec check(pos_integer(), limiter_type(), container()) -> check_result(). +check(_Need, _Type, infinity) -> + {ok, infinity}; check(Need, Type, Container) -> check_list([{Need, Type}], Container). %% @doc check multiple limiters -spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result(). +check_list(_Need, infinity) -> + {ok, infinity}; check_list([{Need, Type} | T], Container) -> Limiter = maps:get(Type, Container), case emqx_htb_limiter:check(Need, Limiter) of @@ -121,11 +140,15 @@ check_list([], Container) -> %% @doc retry the specified limiter -spec retry(limiter_type(), container()) -> check_result(). +retry(_Type, infinity) -> + {ok, infinity}; retry(Type, Container) -> retry_list([Type], Container). %% @doc retry multiple limiters -spec retry_list(list(limiter_type()), container()) -> check_result(). +retry_list(_Types, infinity) -> + {ok, infinity}; retry_list([Type | T], Container) -> Key = ?RETRY_KEY(Type), case Container of diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl index 297bdffb0..afabc2580 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl @@ -30,6 +30,12 @@ post_config_update/5 ]). +-export([ + find_root/1, + insert_root/2, + delete_root/1 +]). + -export([ start_server/1, start_server/2, @@ -62,6 +68,7 @@ -define(UID(Id, Type), {Id, Type}). -define(TAB, emqx_limiter_counters). +-define(ROOT_ID, root). %%-------------------------------------------------------------------- %% API @@ -104,15 +111,29 @@ insert_bucket(Id, Type, Bucket) -> ). -spec delete_bucket(limiter_id(), limiter_type()) -> true. -delete_bucket(Type, Id) -> +delete_bucket(Id, Type) -> ets:delete(?TAB, ?UID(Id, Type)). +-spec find_root(limiter_type()) -> + {ok, bucket_ref()} | undefined. +find_root(Type) -> + find_bucket(?ROOT_ID, Type). + +-spec insert_root( + limiter_type(), + bucket_ref() +) -> boolean(). +insert_root(Type, Bucket) -> + insert_bucket(?ROOT_ID, Type, Bucket). + +-spec delete_root(limiter_type()) -> true. +delete_root(Type) -> + delete_bucket(?ROOT_ID, Type). + post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) -> - Types = lists:delete(client, maps:keys(NewConf)), - _ = [on_post_config_update(Type, NewConf) || Type <- Types], - ok; -post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) -> - on_post_config_update(Type, NewConf). + Conf = emqx_limiter_schema:convert_node_opts(NewConf), + _ = [on_post_config_update(Type, Cfg) || {Type, Cfg} <- maps:to_list(Conf)], + ok. %%-------------------------------------------------------------------- %% @doc @@ -256,8 +277,7 @@ format_status(_Opt, Status) -> %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- -on_post_config_update(Type, NewConf) -> - Config = maps:get(Type, NewConf), +on_post_config_update(Type, Config) -> case emqx_limiter_server:whereis(Type) of undefined -> start_server(Type, Config); diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index fa67e1977..667a38396 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -24,6 +24,7 @@ fields/1, to_rate/1, to_capacity/1, + to_burst/1, default_period/0, to_burst_rate/1, to_initial/1, @@ -31,14 +32,27 @@ get_bucket_cfg_path/2, desc/1, types/0, - infinity_value/0 + short_paths/0, + calc_capacity/1, + extract_with_type/2, + default_client_config/0, + short_paths_fields/1, + get_listener_opts/1, + get_node_opts/1, + convert_node_opts/1 ]). -define(KILOBYTE, 1024). +-define(LISTENER_BUCKET_KEYS, [ + bytes, + messages, + connection, + message_routing +]). -type limiter_type() :: - bytes_in - | message_in + bytes + | messages | connection | message_routing %% internal limiter for unclassified resources @@ -48,8 +62,10 @@ -type bucket_name() :: atom(). -type rate() :: infinity | float(). -type burst_rate() :: 0 | float(). +%% this is a compatible type for the deprecated field and type `capacity`. +-type burst() :: burst_rate(). %% the capacity of the token bucket --type capacity() :: non_neg_integer(). +%%-type capacity() :: non_neg_integer(). %% initial capacity of the token bucket -type initial() :: non_neg_integer(). -type bucket_path() :: list(atom()). @@ -66,13 +82,13 @@ -typerefl_from_string({rate/0, ?MODULE, to_rate}). -typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}). --typerefl_from_string({capacity/0, ?MODULE, to_capacity}). +-typerefl_from_string({burst/0, ?MODULE, to_burst}). -typerefl_from_string({initial/0, ?MODULE, to_initial}). -reflect_type([ rate/0, burst_rate/0, - capacity/0, + burst/0, initial/0, failure_strategy/0, bucket_name/0 @@ -84,58 +100,61 @@ namespace() -> limiter. -roots() -> [limiter]. +roots() -> + [ + {limiter, + hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{ + importance => ?IMPORTANCE_HIDDEN + })} + ]. fields(limiter) -> - [ - {Type, - ?HOCON(?R_REF(node_opts), #{ - desc => ?DESC(Type), - default => #{} - })} - || Type <- types() - ] ++ + short_paths_fields(?MODULE) ++ [ + {Type, + ?HOCON(?R_REF(node_opts), #{ + desc => ?DESC(Type), + importance => ?IMPORTANCE_HIDDEN, + required => {false, recursively}, + aliases => alias_of_type(Type) + })} + || Type <- types() + ] ++ + [ + %% This is an undocumented feature, and it won't be support anymore {client, ?HOCON( ?R_REF(client_fields), #{ desc => ?DESC(client), - default => maps:from_list([ - {erlang:atom_to_binary(Type), #{}} - || Type <- types() - ]) + importance => ?IMPORTANCE_HIDDEN, + required => {false, recursively}, + deprecated => {since, "5.0.25"} } )} ]; fields(node_opts) -> [ - {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, + {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})}, {burst, ?HOCON(burst_rate(), #{ desc => ?DESC(burst), - default => 0 + default => <<"0">> })} ]; fields(client_fields) -> - [ - {Type, - ?HOCON(?R_REF(client_opts), #{ - desc => ?DESC(Type), - default => #{} - })} - || Type <- types() - ]; + client_fields(types()); fields(bucket_opts) -> - [ - {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, - {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})}, - {initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})} - ]; + fields_of_bucket(<<"infinity">>); fields(client_opts) -> [ - {rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})}, - {initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}, + {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })}, %% low_watermark add for emqx_channel and emqx_session %% both modules consume first and then check %% so we need to use this value to prevent excessive consumption @@ -145,20 +164,24 @@ fields(client_opts) -> initial(), #{ desc => ?DESC(low_watermark), - default => "0" + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN } )}, - {capacity, - ?HOCON(capacity(), #{ - desc => ?DESC(client_bucket_capacity), - default => "infinity" + {burst, + ?HOCON(burst(), #{ + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] })}, {divisible, ?HOCON( boolean(), #{ desc => ?DESC(divisible), - default => false + default => false, + importance => ?IMPORTANCE_HIDDEN } )}, {max_retry_time, @@ -166,7 +189,8 @@ fields(client_opts) -> emqx_schema:duration(), #{ desc => ?DESC(max_retry_time), - default => "10s" + default => <<"10s">>, + importance => ?IMPORTANCE_HIDDEN } )}, {failure_strategy, @@ -174,16 +198,25 @@ fields(client_opts) -> failure_strategy(), #{ desc => ?DESC(failure_strategy), - default => force + default => force, + importance => ?IMPORTANCE_HIDDEN } )} ]; fields(listener_fields) -> - bucket_fields([bytes_in, message_in, connection, message_routing], listener_client_fields); + composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields); fields(listener_client_fields) -> - client_fields([bytes_in, message_in, connection, message_routing]); + client_fields(?LISTENER_BUCKET_KEYS); fields(Type) -> - bucket_field(Type). + simple_bucket_field(Type). + +short_paths_fields(DesModule) -> + [ + {Name, + ?HOCON(rate(), #{desc => ?DESC(DesModule, Name), required => false, example => Example})} + || {Name, Example} <- + lists:zip(short_paths(), [<<"1000/s">>, <<"1000/s">>, <<"100MB/s">>]) + ]. desc(limiter) -> "Settings for the rate limiter."; @@ -216,19 +249,84 @@ get_bucket_cfg_path(Type, BucketName) -> [limiter, Type, bucket, BucketName]. types() -> - [bytes_in, message_in, connection, message_routing, internal]. + [bytes, messages, connection, message_routing, internal]. -%%-------------------------------------------------------------------- -%% Internal functions -%%-------------------------------------------------------------------- +short_paths() -> + [max_conn_rate, messages_rate, bytes_rate]. -%% `infinity` to `infinity_value` rules: -%% 1. all infinity capacity will change to infinity_value -%% 2. if the rate of global and bucket both are `infinity`, -%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2` -infinity_value() -> - %% 1 TB - 1099511627776. +calc_capacity(#{rate := infinity}) -> + infinity; +calc_capacity(#{rate := Rate, burst := Burst}) -> + erlang:floor(1000 * Rate / default_period()) + Burst. + +extract_with_type(_Type, undefined) -> + undefined; +extract_with_type(Type, #{client := ClientCfg} = BucketCfg) -> + BucketVal = maps:find(Type, BucketCfg), + ClientVal = maps:find(Type, ClientCfg), + merge_client_bucket(Type, ClientVal, BucketVal); +extract_with_type(Type, BucketCfg) -> + BucketVal = maps:find(Type, BucketCfg), + merge_client_bucket(Type, undefined, BucketVal). + +%% Since the client configuration can be absent and be a undefined value, +%% but we must need some basic settings to control the behaviour of the limiter, +%% so here add this helper function to generate a default setting. +%% This is a temporary workaround until we found a better way to simplify. +default_client_config() -> + #{ + rate => infinity, + initial => 0, + low_watermark => 0, + burst => 0, + divisible => false, + max_retry_time => timer:seconds(10), + failure_strategy => force + }. + +default_bucket_config() -> + #{ + rate => infinity, + burst => 0, + initial => 0 + }. + +get_listener_opts(Conf) -> + Limiter = maps:get(limiter, Conf, undefined), + ShortPaths = maps:with(short_paths(), Conf), + get_listener_opts(Limiter, ShortPaths). + +get_node_opts(Type) -> + Opts = emqx:get_config([limiter, Type], default_bucket_config()), + case type_to_short_path_name(Type) of + undefined -> + Opts; + Name -> + case emqx:get_config([limiter, Name], undefined) of + undefined -> + Opts; + Rate -> + Opts#{rate := Rate} + end + end. + +convert_node_opts(Conf) -> + DefBucket = default_bucket_config(), + ShorPaths = short_paths(), + Fun = fun + %% The `client` in the node options was deprecated + (client, _Value, Acc) -> + Acc; + (Name, Value, Acc) -> + case lists:member(Name, ShorPaths) of + true -> + Type = short_path_name_to_type(Name), + Acc#{Type => DefBucket#{rate => Value}}; + _ -> + Acc#{Name => Value} + end + end, + maps:fold(Fun, #{}, Conf). %%-------------------------------------------------------------------- %% Internal functions @@ -237,6 +335,17 @@ infinity_value() -> to_burst_rate(Str) -> to_rate(Str, false, true). +%% The default value of `capacity` is `infinity`, +%% but we have changed `capacity` to `burst` which should not be `infinity` +%% and its default value is 0, so we should convert `infinity` to 0 +to_burst(Str) -> + case to_rate(Str, true, true) of + {ok, infinity} -> + {ok, 0}; + Any -> + Any + end. + %% rate can be: 10 10MB 10MB/s 10MB/2s infinity %% e.g. the bytes_in regex tree is: %% @@ -321,7 +430,7 @@ to_quota(Str, Regex) -> {match, [Quota, ""]} -> {ok, erlang:list_to_integer(Quota)}; {match, ""} -> - {ok, infinity_value()}; + {ok, infinity}; _ -> {error, Str} end @@ -336,7 +445,8 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). -bucket_field(Type) when is_atom(Type) -> +%% A bucket with only one type +simple_bucket_field(Type) when is_atom(Type) -> fields(bucket_opts) ++ [ {client, @@ -344,16 +454,22 @@ bucket_field(Type) when is_atom(Type) -> ?R_REF(?MODULE, client_opts), #{ desc => ?DESC(client), - required => false + required => {false, recursively}, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) } )} ]. -bucket_fields(Types, ClientRef) -> + +%% A bucket with multi types +composite_bucket_fields(Types, ClientRef) -> [ {Type, ?HOCON(?R_REF(?MODULE, bucket_opts), #{ desc => ?DESC(?MODULE, Type), - required => false + required => {false, recursively}, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) })} || Type <- Types ] ++ @@ -363,17 +479,101 @@ bucket_fields(Types, ClientRef) -> ?R_REF(?MODULE, ClientRef), #{ desc => ?DESC(client), - required => false + required => {false, recursively} } )} ]. +fields_of_bucket(Default) -> + [ + {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => Default})}, + {burst, + ?HOCON(burst(), #{ + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] + })}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })} + ]. + client_fields(Types) -> [ {Type, ?HOCON(?R_REF(client_opts), #{ desc => ?DESC(Type), - required => false + required => false, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) })} || Type <- Types ]. + +importance_of_type(interval) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(message_routing) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(connection) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(_) -> + ?DEFAULT_IMPORTANCE. + +alias_of_type(messages) -> + [message_in]; +alias_of_type(bytes) -> + [bytes_in]; +alias_of_type(_) -> + []. + +merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) -> + #{Type => BucketVal, client => #{Type => ClientVal}}; +merge_client_bucket(Type, {ok, ClientVal}, _) -> + #{client => #{Type => ClientVal}}; +merge_client_bucket(Type, _, {ok, BucketVal}) -> + #{Type => BucketVal}; +merge_client_bucket(_, _, _) -> + undefined. + +short_path_name_to_type(max_conn_rate) -> + connection; +short_path_name_to_type(messages_rate) -> + messages; +short_path_name_to_type(bytes_rate) -> + bytes. + +type_to_short_path_name(connection) -> + max_conn_rate; +type_to_short_path_name(messages) -> + messages_rate; +type_to_short_path_name(bytes) -> + bytes_rate; +type_to_short_path_name(_) -> + undefined. + +get_listener_opts(Limiter, ShortPaths) when map_size(ShortPaths) =:= 0 -> + Limiter; +get_listener_opts(undefined, ShortPaths) -> + convert_listener_short_paths(ShortPaths); +get_listener_opts(Limiter, ShortPaths) -> + Shorts = convert_listener_short_paths(ShortPaths), + emqx_utils_maps:deep_merge(Limiter, Shorts). + +convert_listener_short_paths(ShortPaths) -> + DefBucket = default_bucket_config(), + DefClient = default_client_config(), + Fun = fun(Name, Rate, Acc) -> + Type = short_path_name_to_type(Name), + case Name of + max_conn_rate -> + Acc#{Type => DefBucket#{rate => Rate}}; + _ -> + Client = maps:get(client, Acc, #{}), + Acc#{client => Client#{Type => DefClient#{rate => Rate}}} + end + end, + maps:fold(Fun, #{}, ShortPaths). diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl index 44663ceeb..488f47851 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl @@ -59,7 +59,8 @@ burst := rate(), %% token generation interval(second) period := pos_integer(), - produced := float() + produced := float(), + correction := emqx_limiter_decimal:zero_or_float() }. -type bucket() :: #{ @@ -98,6 +99,7 @@ %% minimum coefficient for overloaded limiter -define(OVERLOAD_MIN_ALLOC, 0.3). -define(COUNTER_SIZE, 8). +-define(ROOT_COUNTER_IDX, 1). -export_type([index/0]). -import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]). @@ -110,40 +112,24 @@ -spec connect( limiter_id(), limiter_type(), - bucket_name() | #{limiter_type() => bucket_name() | undefined} + hocons:config() | undefined ) -> {ok, emqx_htb_limiter:limiter()} | {error, _}. -%% If no bucket path is set in config, there will be no limit -connect(_Id, _Type, undefined) -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; +%% undefined is the default situation, no limiter setting by default +connect(Id, Type, undefined) -> + create_limiter(Id, Type, undefined, undefined); +connect(Id, Type, #{rate := _} = Cfg) -> + create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg); connect(Id, Type, Cfg) -> - case find_limiter_cfg(Type, Cfg) of - {undefined, _} -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; - { - #{ - rate := BucketRate, - capacity := BucketSize - }, - #{rate := CliRate, capacity := CliSize} = ClientCfg - } -> - case emqx_limiter_manager:find_bucket(Id, Type) of - {ok, Bucket} -> - {ok, - if - CliRate < BucketRate orelse CliSize < BucketSize -> - emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket); - true -> - emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket) - end}; - undefined -> - ?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}), - {error, invalid_bucket} - end - end. + create_limiter( + Id, + Type, + emqx_utils_maps:deep_get([client, Type], Cfg, undefined), + maps:get(Type, Cfg, undefined) + ). -spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok. -add_bucket(_Id, _Type, undefine) -> +add_bucket(_Id, _Type, undefined) -> ok; add_bucket(Id, Type, Cfg) -> ?CALL(Type, {add_bucket, Id, Cfg}). @@ -281,7 +267,8 @@ handle_info(Info, State) -> Reason :: normal | shutdown | {shutdown, term()} | term(), State :: term() ) -> any(). -terminate(_Reason, _State) -> +terminate(_Reason, #{type := Type}) -> + emqx_limiter_manager:delete_root(Type), ok. %%-------------------------------------------------------------------- @@ -336,10 +323,14 @@ oscillation( oscillate(Interval), Ordereds = get_ordered_buckets(Buckets), {Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets), - maybe_burst(State#{ - buckets := Buckets2, - root := Root#{produced := Produced + Alloced} - }). + State2 = maybe_adjust_root_tokens( + State#{ + buckets := Buckets2, + root := Root#{produced := Produced + Alloced} + }, + Alloced + ), + maybe_burst(State2). %% @doc horizontal spread -spec transverse( @@ -412,6 +403,24 @@ get_ordered_buckets(Buckets) -> Buckets ). +-spec maybe_adjust_root_tokens(state(), float()) -> state(). +maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) -> + State; +maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate -> + State; +maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) -> + InFlow = Rate - Alloced, + Token = counters:get(Counter, ?ROOT_COUNTER_IDX), + case Token >= Rate of + true -> + State; + _ -> + Available = erlang:min(Rate - Token, InFlow), + {Inc, Root2} = emqx_limiter_correction:add(Available, Root), + counters:add(Counter, ?ROOT_COUNTER_IDX, Inc), + State#{root := Root2} + end. + -spec maybe_burst(state()) -> state(). maybe_burst( #{ @@ -472,15 +481,19 @@ dispatch_burst_to_buckets([], _, Alloced, Buckets) -> -spec init_tree(emqx_limiter_schema:limiter_type()) -> state(). init_tree(Type) when is_atom(Type) -> - Cfg = emqx:get_config([limiter, Type]), + Cfg = emqx_limiter_schema:get_node_opts(Type), init_tree(Type, Cfg). -init_tree(Type, Cfg) -> +init_tree(Type, #{rate := Rate} = Cfg) -> + Counter = counters:new(?COUNTER_SIZE, [write_concurrency]), + RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate), + emqx_limiter_manager:insert_root(Type, RootBucket), #{ type => Type, root => make_root(Cfg), - counter => counters:new(?COUNTER_SIZE, [write_concurrency]), - index => 0, + counter => Counter, + %% The first slot is reserved for the root + index => ?ROOT_COUNTER_IDX, buckets => #{} }. @@ -490,15 +503,18 @@ make_root(#{rate := Rate, burst := Burst}) -> rate => Rate, burst => Burst, period => emqx_limiter_schema:default_period(), - produced => 0.0 + produced => 0.0, + correction => 0 }. -do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) -> +do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) -> + State; +do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) -> case maps:get(Id, Buckets, undefined) of undefined -> make_bucket(Id, Cfg, State); Bucket -> - Bucket2 = Bucket#{rate := Rate, capacity := Capacity}, + Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)}, State#{buckets := Buckets#{Id := Bucket2}} end. @@ -509,7 +525,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) -> }); make_bucket( Id, - #{rate := Rate, capacity := Capacity} = Cfg, + #{rate := Rate} = Cfg, #{type := Type, counter := Counter, index := Index, buckets := Buckets} = State ) -> NewIndex = Index + 1, @@ -519,7 +535,7 @@ make_bucket( rate => Rate, obtained => Initial, correction => 0, - capacity => Capacity, + capacity => emqx_limiter_schema:calc_capacity(Cfg), counter => Counter, index => NewIndex }, @@ -541,19 +557,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) -> get_initial_val( #{ initial := Initial, - rate := Rate, - capacity := Capacity + rate := Rate } ) -> - %% initial will nevner be infinity(see the emqx_limiter_schema) - InfVal = emqx_limiter_schema:infinity_value(), if Initial > 0 -> Initial; Rate =/= infinity -> - erlang:min(Rate, Capacity); - Capacity =/= infinity andalso Capacity =/= InfVal -> - Capacity; + Rate; true -> 0 end. @@ -567,21 +578,58 @@ call(Type, Msg) -> gen_server:call(Pid, Msg) end. -find_limiter_cfg(Type, #{rate := _} = Cfg) -> - {Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))}; -find_limiter_cfg(Type, Cfg) -> - { - maps:get(Type, Cfg, undefined), - find_client_cfg(Type, emqx_map_lib:deep_get([client, Type], Cfg, undefined)) - }. +create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity -> + create_limiter_with_client(Id, Type, ClientCfg, BucketCfg); +create_limiter(Id, Type, _, BucketCfg) -> + create_limiter_without_client(Id, Type, BucketCfg). -find_client_cfg(Type, BucketCfg) -> - NodeCfg = emqx:get_config([limiter, client, Type], undefined), - merge_client_cfg(NodeCfg, BucketCfg). +%% create a limiter with the client-level configuration +create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) -> + case find_referenced_bucket(Id, Type, BucketCfg) of + false -> + {ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)}; + {ok, Bucket, RefCfg} -> + create_limiter_with_ref(Bucket, ClientCfg, RefCfg); + Error -> + Error + end. -merge_client_cfg(undefined, BucketCfg) -> - BucketCfg; -merge_client_cfg(NodeCfg, undefined) -> - NodeCfg; -merge_client_cfg(NodeCfg, BucketCfg) -> - maps:merge(NodeCfg, BucketCfg). +%% create a limiter only with the referenced configuration +create_limiter_without_client(Id, Type, BucketCfg) -> + case find_referenced_bucket(Id, Type, BucketCfg) of + false -> + {ok, emqx_htb_limiter:make_infinity_limiter()}; + {ok, Bucket, RefCfg} -> + ClientCfg = emqx_limiter_schema:default_client_config(), + create_limiter_with_ref(Bucket, ClientCfg, RefCfg); + Error -> + Error + end. + +create_limiter_with_ref( + Bucket, + #{rate := CliRate} = ClientCfg, + #{rate := RefRate} +) when CliRate < RefRate -> + {ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)}; +create_limiter_with_ref(Bucket, ClientCfg, _) -> + {ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}. + +%% this is a listener(server)-level reference +find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -> + case emqx_limiter_manager:find_bucket(Id, Type) of + {ok, Bucket} -> + {ok, Bucket, Cfg}; + _ -> + ?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}), + {error, invalid_bucket} + end; +%% this is a node-level reference +find_referenced_bucket(_Id, Type, _) -> + case emqx_limiter_schema:get_node_opts(Type) of + #{rate := infinity} -> + false; + NodeCfg -> + {ok, Bucket} = emqx_limiter_manager:find_root(Type), + {ok, Bucket, NodeCfg} + end. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server_sup.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server_sup.erl index cba11ede2..be9b62d01 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server_sup.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server_sup.erl @@ -86,7 +86,7 @@ init([]) -> %% Internal functions %%--================================================================== make_child(Type) -> - Cfg = emqx:get_config([limiter, Type]), + Cfg = emqx_limiter_schema:get_node_opts(Type), make_child(Type, Cfg). make_child(Type, Cfg) -> diff --git a/apps/emqx/src/emqx_listeners.erl b/apps/emqx/src/emqx_listeners.erl index fb6096e80..2b80000dc 100644 --- a/apps/emqx/src/emqx_listeners.erl +++ b/apps/emqx/src/emqx_listeners.erl @@ -20,6 +20,7 @@ -elvis([{elvis_style, dont_repeat_yourself, #{min_complexity => 10000}}]). -include("emqx_mqtt.hrl"). +-include("emqx_schema.hrl"). -include("logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). @@ -33,7 +34,9 @@ is_running/1, current_conns/2, max_conns/2, - id_example/0 + id_example/0, + default_max_conn/0, + shutdown_count/2 ]). -export([ @@ -57,8 +60,15 @@ -export([format_bind/1]). +-ifdef(TEST). +-export([certs_dir/2]). +-endif. + +-type listener_id() :: atom() | binary(). + -define(CONF_KEY_PATH, [listeners, '?', '?']). -define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]). +-define(MARK_DEL, ?TOMBSTONE_CONFIG_CHANGE_REQ). -spec id_example() -> atom(). id_example() -> 'tcp:default'. @@ -68,9 +78,7 @@ id_example() -> 'tcp:default'. list_raw() -> [ {listener_id(Type, LName), Type, LConf} - || %% FIXME: quic is not supported update vi dashboard yet - {Type, LName, LConf} <- do_list_raw(), - Type =/= <<"quic">> + || {Type, LName, LConf} <- do_list_raw() ]. list() -> @@ -103,19 +111,22 @@ do_list_raw() -> format_raw_listeners({Type0, Conf}) -> Type = binary_to_atom(Type0), - lists:map( - fun({LName, LConf0}) when is_map(LConf0) -> - Bind = parse_bind(LConf0), - Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}), - LConf1 = maps:remove(<<"authentication">>, LConf0), - LConf3 = maps:put(<<"running">>, Running, LConf1), - CurrConn = - case Running of - true -> current_conns(Type, LName, Bind); - false -> 0 - end, - LConf4 = maps:put(<<"current_connections">>, CurrConn, LConf3), - {Type0, LName, LConf4} + lists:filtermap( + fun + ({LName, LConf0}) when is_map(LConf0) -> + Bind = parse_bind(LConf0), + Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}), + LConf1 = maps:remove(<<"authentication">>, LConf0), + LConf2 = maps:put(<<"running">>, Running, LConf1), + CurrConn = + case Running of + true -> current_conns(Type, LName, Bind); + false -> 0 + end, + LConf = maps:put(<<"current_connections">>, CurrConn, LConf2), + {true, {Type0, LName, LConf}}; + ({_LName, _MarkDel}) -> + false end, maps:to_list(Conf) ). @@ -166,6 +177,11 @@ current_conns(Type, Name, ListenOn) when Type == tcp; Type == ssl -> esockd:get_current_connections({listener_id(Type, Name), ListenOn}); current_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss -> proplists:get_value(all_connections, ranch:info(listener_id(Type, Name))); +current_conns(quic, _Name, _ListenOn) -> + case quicer:perf_counters() of + {ok, PerfCnts} -> proplists:get_value(conn_active, PerfCnts); + _ -> 0 + end; current_conns(_, _, _) -> {error, not_support}. @@ -180,6 +196,17 @@ max_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss -> max_conns(_, _, _) -> {error, not_support}. +shutdown_count(ID, ListenOn) -> + {ok, #{type := Type, name := Name}} = parse_listener_id(ID), + shutdown_count(Type, Name, ListenOn). + +shutdown_count(Type, Name, ListenOn) when Type == tcp; Type == ssl -> + esockd:get_shutdown_count({listener_id(Type, Name), ListenOn}); +shutdown_count(Type, _Name, _ListenOn) when Type =:= ws; Type =:= wss -> + []; +shutdown_count(_, _, _) -> + {error, not_support}. + %% @doc Start all listeners. -spec start() -> ok. start() -> @@ -188,7 +215,7 @@ start() -> ok = emqx_config_handler:add_handler(?CONF_KEY_PATH, ?MODULE), foreach_listeners(fun start_listener/3). --spec start_listener(atom()) -> ok | {error, term()}. +-spec start_listener(listener_id()) -> ok | {error, term()}. start_listener(ListenerId) -> apply_on_listener(ListenerId, fun start_listener/3). @@ -239,7 +266,7 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) -> restart() -> foreach_listeners(fun restart_listener/3). --spec restart_listener(atom()) -> ok | {error, term()}. +-spec restart_listener(listener_id()) -> ok | {error, term()}. restart_listener(ListenerId) -> apply_on_listener(ListenerId, fun restart_listener/3). @@ -264,7 +291,7 @@ stop() -> _ = emqx_config_handler:remove_handler(?CONF_KEY_PATH), foreach_listeners(fun stop_listener/3). --spec stop_listener(atom()) -> ok | {error, term()}. +-spec stop_listener(listener_id()) -> ok | {error, term()}. stop_listener(ListenerId) -> apply_on_listener(ListenerId, fun stop_listener/3). @@ -320,7 +347,8 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when Type == tcp; Type == ssl -> Id = listener_id(Type, ListenerName), - add_limiter_bucket(Id, Opts), + Limiter = limiter(Opts), + add_limiter_bucket(Id, Limiter), esockd:open( Id, ListenOn, @@ -329,7 +357,7 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when #{ listener => {Type, ListenerName}, zone => zone(Opts), - limiter => limiter(Opts), + limiter => Limiter, enable_authn => enable_authn(Opts) } ]} @@ -339,9 +367,10 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when Type == ws; Type == wss -> Id = listener_id(Type, ListenerName), - add_limiter_bucket(Id, Opts), + Limiter = limiter(Opts), + add_limiter_bucket(Id, Limiter), RanchOpts = ranch_opts(Type, ListenOn, Opts), - WsOpts = ws_opts(Type, ListenerName, Opts), + WsOpts = ws_opts(Type, ListenerName, Opts, Limiter), case Type of ws -> cowboy:start_clear(Id, RanchOpts, WsOpts); wss -> cowboy:start_tls(Id, RanchOpts, WsOpts) @@ -363,38 +392,60 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) -> case [A || {quicer, _, _} = A <- application:which_applications()] of [_] -> DefAcceptors = erlang:system_info(schedulers_online) * 8, - ListenOpts = [ - {cert, maps:get(certfile, Opts)}, - {key, maps:get(keyfile, Opts)}, - {alpn, ["mqtt"]}, - {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])}, - {keep_alive_interval_ms, maps:get(keep_alive_interval, Opts, 0)}, - {idle_timeout_ms, maps:get(idle_timeout, Opts, 0)}, - {handshake_idle_timeout_ms, maps:get(handshake_idle_timeout, Opts, 10000)}, - {server_resumption_level, 2} - ], + SSLOpts = maps:merge( + maps:with([certfile, keyfile], Opts), + maps:get(ssl_options, Opts, #{}) + ), + ListenOpts = + [ + {certfile, str(maps:get(certfile, SSLOpts))}, + {keyfile, str(maps:get(keyfile, SSLOpts))}, + {alpn, ["mqtt"]}, + {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])}, + {keep_alive_interval_ms, maps:get(keep_alive_interval, Opts, 0)}, + {idle_timeout_ms, maps:get(idle_timeout, Opts, 0)}, + {handshake_idle_timeout_ms, maps:get(handshake_idle_timeout, Opts, 10000)}, + {server_resumption_level, maps:get(server_resumption_level, Opts, 2)}, + {verify, maps:get(verify, SSLOpts, verify_none)} + ] ++ + case maps:get(cacertfile, SSLOpts, undefined) of + undefined -> []; + CaCertFile -> [{cacertfile, str(CaCertFile)}] + end ++ + case maps:get(password, SSLOpts, undefined) of + undefined -> []; + Password -> [{password, str(Password)}] + end ++ + optional_quic_listener_opts(Opts), + Limiter = limiter(Opts), ConnectionOpts = #{ conn_callback => emqx_quic_connection, - peer_unidi_stream_count => 1, - peer_bidi_stream_count => 10, + peer_unidi_stream_count => maps:get(peer_unidi_stream_count, Opts, 1), + peer_bidi_stream_count => maps:get(peer_bidi_stream_count, Opts, 10), zone => zone(Opts), listener => {quic, ListenerName}, - limiter => limiter(Opts) + limiter => Limiter }, - StreamOpts = [{stream_callback, emqx_quic_stream}], + StreamOpts = #{ + stream_callback => emqx_quic_stream, + active => 1 + }, + Id = listener_id(quic, ListenerName), - add_limiter_bucket(Id, Opts), + add_limiter_bucket(Id, Limiter), quicer:start_listener( Id, ListenOn, - {ListenOpts, ConnectionOpts, StreamOpts} + {maps:from_list(ListenOpts), ConnectionOpts, StreamOpts} ); [] -> {ok, {skipped, quic_app_missing}} end. %% Update the listeners at runtime -pre_config_update([listeners, Type, Name], {create, NewConf}, undefined) -> +pre_config_update([listeners, Type, Name], {create, NewConf}, V) when + V =:= undefined orelse V =:= ?TOMBSTONE_VALUE +-> CertsDir = certs_dir(Type, Name), {ok, convert_certs(CertsDir, NewConf)}; pre_config_update([listeners, _Type, _Name], {create, _NewConf}, _RawConf) -> @@ -402,26 +453,31 @@ pre_config_update([listeners, _Type, _Name], {create, _NewConf}, _RawConf) -> pre_config_update([listeners, _Type, _Name], {update, _Request}, undefined) -> {error, not_found}; pre_config_update([listeners, Type, Name], {update, Request}, RawConf) -> - NewConfT = emqx_map_lib:deep_merge(RawConf, Request), + NewConfT = emqx_utils_maps:deep_merge(RawConf, Request), NewConf = ensure_override_limiter_conf(NewConfT, Request), CertsDir = certs_dir(Type, Name), {ok, convert_certs(CertsDir, NewConf)}; pre_config_update([listeners, _Type, _Name], {action, _Action, Updated}, RawConf) -> - NewConf = emqx_map_lib:deep_merge(RawConf, Updated), + NewConf = emqx_utils_maps:deep_merge(RawConf, Updated), {ok, NewConf}; +pre_config_update([listeners, _Type, _Name], ?MARK_DEL, _RawConf) -> + {ok, ?TOMBSTONE_VALUE}; pre_config_update(_Path, _Request, RawConf) -> {ok, RawConf}. post_config_update([listeners, Type, Name], {create, _Request}, NewConf, undefined, _AppEnvs) -> start_listener(Type, Name, NewConf); post_config_update([listeners, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) -> + try_clear_ssl_files(certs_dir(Type, Name), NewConf, OldConf), + ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf), case NewConf of #{enabled := true} -> restart_listener(Type, Name, {OldConf, NewConf}); _ -> ok end; -post_config_update([listeners, _Type, _Name], '$remove', undefined, undefined, _AppEnvs) -> - ok; -post_config_update([listeners, Type, Name], '$remove', undefined, OldConf, _AppEnvs) -> +post_config_update([listeners, Type, Name], Op, _, OldConf, _AppEnvs) when + Op =:= ?MARK_DEL andalso is_map(OldConf) +-> + ok = unregister_ocsp_stapling_refresh(Type, Name), case stop_listener(Type, Name, OldConf) of ok -> _ = emqx_authentication:delete_chain(listener_id(Type, Name)), @@ -434,10 +490,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo #{enabled := NewEnabled} = NewConf, #{enabled := OldEnabled} = OldConf, case {NewEnabled, OldEnabled} of - {true, true} -> restart_listener(Type, Name, {OldConf, NewConf}); - {true, false} -> start_listener(Type, Name, NewConf); - {false, true} -> stop_listener(Type, Name, OldConf); - {false, false} -> stop_listener(Type, Name, OldConf) + {true, true} -> + ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf), + restart_listener(Type, Name, {OldConf, NewConf}); + {true, false} -> + ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf), + start_listener(Type, Name, NewConf); + {false, true} -> + ok = unregister_ocsp_stapling_refresh(Type, Name), + stop_listener(Type, Name, OldConf); + {false, false} -> + ok = unregister_ocsp_stapling_refresh(Type, Name), + stop_listener(Type, Name, OldConf) end; post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) -> ok. @@ -446,7 +510,7 @@ esockd_opts(ListenerId, Type, Opts0) -> Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Limiter = limiter(Opts0), Opts2 = - case maps:get(connection, Limiter, undefined) of + case emqx_limiter_schema:extract_with_type(connection, Limiter) of undefined -> Opts1; BucketCfg -> @@ -462,17 +526,22 @@ esockd_opts(ListenerId, Type, Opts0) -> }, maps:to_list( case Type of - tcp -> Opts3#{tcp_options => tcp_opts(Opts0)}; - ssl -> Opts3#{ssl_options => ssl_opts(Opts0), tcp_options => tcp_opts(Opts0)} + tcp -> + Opts3#{tcp_options => tcp_opts(Opts0)}; + ssl -> + OptsWithCRL = inject_crl_config(Opts0), + OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL), + SSLOpts = ssl_opts(OptsWithSNI), + Opts3#{ssl_options => SSLOpts, tcp_options => tcp_opts(Opts0)} end ). -ws_opts(Type, ListenerName, Opts) -> +ws_opts(Type, ListenerName, Opts, Limiter) -> WsPaths = [ - {emqx_map_lib:deep_get([websocket, mqtt_path], Opts, "/mqtt"), emqx_ws_connection, #{ + {emqx_utils_maps:deep_get([websocket, mqtt_path], Opts, "/mqtt"), emqx_ws_connection, #{ zone => zone(Opts), listener => {Type, ListenerName}, - limiter => limiter(Opts), + limiter => Limiter, enable_authn => enable_authn(Opts) }} ], @@ -507,7 +576,7 @@ esockd_access_rules(StrRules) -> [A, CIDR] = string:tokens(S, " "), %% esockd rules only use words 'allow' and 'deny', both are existing %% comparison of strings may be better, but there is a loss of backward compatibility - case emqx_misc:safe_to_existing_atom(A) of + case emqx_utils:safe_to_existing_atom(A) of {ok, Action} -> [ { @@ -529,7 +598,7 @@ esockd_access_rules(StrRules) -> merge_default(Options) -> case lists:keytake(tcp_options, 1, Options) of {value, {tcp_options, TcpOpts}, Options1} -> - [{tcp_options, emqx_misc:merge_opts(?MQTT_SOCKOPTS, TcpOpts)} | Options1]; + [{tcp_options, emqx_utils:merge_opts(?MQTT_SOCKOPTS, TcpOpts)} | Options1]; false -> [{tcp_options, ?MQTT_SOCKOPTS} | Options] end. @@ -570,6 +639,7 @@ format_bind(Bin) when is_binary(Bin) -> listener_id(Type, ListenerName) -> list_to_atom(lists:append([str(Type), ":", str(ListenerName)])). +-spec parse_listener_id(listener_id()) -> {ok, #{type => atom(), name => atom()}} | {error, term()}. parse_listener_id(Id) -> case string:split(str(Id), ":", leading) of [Type, Name] -> @@ -585,28 +655,31 @@ zone(Opts) -> maps:get(zone, Opts, undefined). limiter(Opts) -> - maps:get(limiter, Opts, #{}). + emqx_limiter_schema:get_listener_opts(Opts). -add_limiter_bucket(Id, #{limiter := Limiter}) -> +add_limiter_bucket(_Id, undefined) -> + ok; +add_limiter_bucket(Id, Limiter) -> maps:fold( fun(Type, Cfg, _) -> emqx_limiter_server:add_bucket(Id, Type, Cfg) end, ok, maps:without([client], Limiter) - ); -add_limiter_bucket(_Id, _Cfg) -> - ok. + ). -del_limiter_bucket(Id, #{limiter := Limiters}) -> - lists:foreach( - fun(Type) -> - emqx_limiter_server:del_bucket(Id, Type) - end, - maps:keys(Limiters) - ); -del_limiter_bucket(_Id, _Cfg) -> - ok. +del_limiter_bucket(Id, Conf) -> + case limiter(Conf) of + undefined -> + ok; + Limiter -> + lists:foreach( + fun(Type) -> + emqx_limiter_server:del_bucket(Id, Type) + end, + maps:keys(Limiter) + ) + end. enable_authn(Opts) -> maps:get(enable_authn, Opts, true). @@ -670,7 +743,7 @@ certs_dir(Type, Name) -> iolist_to_binary(filename:join(["listeners", Type, Name])). convert_certs(CertsDir, Conf) -> - case emqx_tls_lib:ensure_ssl_files(CertsDir, maps:get(<<"ssl_options">>, Conf, undefined)) of + case emqx_tls_lib:ensure_ssl_files(CertsDir, get_ssl_options(Conf)) of {ok, undefined} -> Conf; {ok, SSL} -> @@ -681,7 +754,7 @@ convert_certs(CertsDir, Conf) -> end. clear_certs(CertsDir, Conf) -> - OldSSL = maps:get(<<"ssl_options">>, Conf, undefined), + OldSSL = get_ssl_options(Conf), emqx_tls_lib:delete_ssl_files(CertsDir, undefined, OldSSL). filter_stacktrace({Reason, _Stacktrace}) -> Reason; @@ -692,3 +765,112 @@ ensure_override_limiter_conf(Conf, #{<<"limiter">> := Limiter}) -> Conf#{<<"limiter">> => Limiter}; ensure_override_limiter_conf(Conf, _) -> Conf. + +try_clear_ssl_files(CertsDir, NewConf, OldConf) -> + NewSSL = get_ssl_options(NewConf), + OldSSL = get_ssl_options(OldConf), + emqx_tls_lib:delete_ssl_files(CertsDir, NewSSL, OldSSL). + +get_ssl_options(Conf) -> + case maps:find(ssl_options, Conf) of + {ok, SSL} -> + SSL; + error -> + maps:get(<<"ssl_options">>, Conf, undefined) + end. + +%% @doc Get QUIC optional settings for low level tunings. +%% @see quicer:quic_settings() +-spec optional_quic_listener_opts(map()) -> proplists:proplist(). +optional_quic_listener_opts(Conf) when is_map(Conf) -> + maps:to_list( + maps:filter( + fun(Name, _V) -> + lists:member( + Name, + quic_listener_optional_settings() + ) + end, + Conf + ) + ). + +-spec quic_listener_optional_settings() -> [atom()]. +quic_listener_optional_settings() -> + [ + max_bytes_per_key, + %% In conf schema we use handshake_idle_timeout + handshake_idle_timeout_ms, + %% In conf schema we use idle_timeout + idle_timeout_ms, + %% not use since we are server + %% tls_client_max_send_buffer, + tls_server_max_send_buffer, + stream_recv_window_default, + stream_recv_buffer_default, + conn_flow_control_window, + max_stateless_operations, + initial_window_packets, + send_idle_timeout_ms, + initial_rtt_ms, + max_ack_delay_ms, + disconnect_timeout_ms, + %% In conf schema, we use keep_alive_interval + keep_alive_interval_ms, + %% over written by conn opts + peer_bidi_stream_count, + %% over written by conn opts + peer_unidi_stream_count, + retry_memory_limit, + load_balancing_mode, + max_operations_per_drain, + send_buffering_enabled, + pacing_enabled, + migration_enabled, + datagram_receive_enabled, + server_resumption_level, + minimum_mtu, + maximum_mtu, + mtu_discovery_search_complete_timeout_us, + mtu_discovery_missing_probe_count, + max_binding_stateless_operations, + stateless_operation_expiration_ms + ]. + +inject_sni_fun(ListenerId, Conf = #{ssl_options := #{ocsp := #{enable_ocsp_stapling := true}}}) -> + emqx_ocsp_cache:inject_sni_fun(ListenerId, Conf); +inject_sni_fun(_ListenerId, Conf) -> + Conf. + +inject_crl_config( + Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts} +) -> + HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)), + Conf#{ + ssl_options := SSLOpts#{ + %% `crl_check => true' doesn't work + crl_check => peer, + crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}} + } + }; +inject_crl_config(Conf) -> + Conf. + +maybe_unregister_ocsp_stapling_refresh( + ssl = Type, Name, #{ssl_options := #{ocsp := #{enable_ocsp_stapling := false}}} = _Conf +) -> + unregister_ocsp_stapling_refresh(Type, Name), + ok; +maybe_unregister_ocsp_stapling_refresh(_Type, _Name, _Conf) -> + ok. + +unregister_ocsp_stapling_refresh(Type, Name) -> + ListenerId = listener_id(Type, Name), + emqx_ocsp_cache:unregister_listener(ListenerId), + ok. + +%% There is currently an issue with frontend +%% infinity is not a good value for it, so we use 5m for now +default_max_conn() -> + %% TODO: <<"infinity">> + 5_000_000. diff --git a/apps/emqx/src/emqx_logger.erl b/apps/emqx/src/emqx_logger.erl index 114a1af49..6087acd8a 100644 --- a/apps/emqx/src/emqx_logger.erl +++ b/apps/emqx/src/emqx_logger.erl @@ -237,7 +237,7 @@ set_log_handler_level(HandlerId, Level) -> end. %% @doc Set both the primary and all handlers level in one command --spec set_log_level(logger:handler_id()) -> ok | {error, term()}. +-spec set_log_level(logger:level()) -> ok | {error, term()}. set_log_level(Level) -> case set_primary_log_level(Level) of ok -> set_all_log_handlers_level(Level); diff --git a/apps/emqx/src/emqx_logger_jsonfmt.erl b/apps/emqx/src/emqx_logger_jsonfmt.erl index 22cf75153..0e72fd2b5 100644 --- a/apps/emqx/src/emqx_logger_jsonfmt.erl +++ b/apps/emqx/src/emqx_logger_jsonfmt.erl @@ -62,11 +62,11 @@ %% The JSON object is pretty-printed. %% NOTE: do not use this function for logging. best_effort_json(Input) -> - best_effort_json(Input, [space, {indent, 4}]). + best_effort_json(Input, [pretty, force_utf8]). best_effort_json(Input, Opts) -> Config = #{depth => unlimited, single_line => true}, JsonReady = best_effort_json_obj(Input, Config), - jsx:encode(JsonReady, Opts). + emqx_utils_json:encode(JsonReady, Opts). -spec format(logger:log_event(), config()) -> iodata(). format(#{level := Level, msg := Msg, meta := Meta} = Event, Config0) when is_map(Config0) -> @@ -92,7 +92,7 @@ format(Msg, Meta, Config) -> } end, Data = maps:without([report_cb], Data0), - jiffy:encode(json_obj(Data, Config)). + emqx_utils_json:encode(json_obj(Data, Config)). maybe_format_msg({report, Report} = Msg, #{report_cb := Cb} = Meta, Config) -> case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of @@ -378,15 +378,15 @@ p_config() -> best_effort_json_test() -> ?assertEqual( - <<"{}">>, + <<"{\n \n}">>, emqx_logger_jsonfmt:best_effort_json([]) ), ?assertEqual( - <<"{\n \"key\": []\n}">>, + <<"{\n \"key\" : [\n \n ]\n}">>, emqx_logger_jsonfmt:best_effort_json(#{key => []}) ), ?assertEqual( - <<"[\n {\n \"key\": []\n }\n]">>, + <<"[\n {\n \"key\" : [\n \n ]\n }\n]">>, emqx_logger_jsonfmt:best_effort_json([#{key => []}]) ), ok. diff --git a/apps/emqx/src/emqx_logger_textfmt.erl b/apps/emqx/src/emqx_logger_textfmt.erl index 3695929d9..3dce8a2ec 100644 --- a/apps/emqx/src/emqx_logger_textfmt.erl +++ b/apps/emqx/src/emqx_logger_textfmt.erl @@ -22,20 +22,62 @@ check_config(X) -> logger_formatter:check_config(X). -format(#{msg := {report, Report0}, meta := Meta} = Event, Config) when is_map(Report0) -> - Report1 = enrich_report_mfa(Report0, Meta), - Report2 = enrich_report_clientid(Report1, Meta), - Report3 = enrich_report_peername(Report2, Meta), - Report4 = enrich_report_topic(Report3, Meta), - logger_formatter:format(Event#{msg := {report, Report4}}, Config); +format(#{msg := {report, ReportMap}, meta := Meta} = Event, Config) when is_map(ReportMap) -> + ReportList = enrich_report(ReportMap, Meta), + Report = + case is_list_report_acceptable(Meta) of + true -> + ReportList; + false -> + maps:from_list(ReportList) + end, + logger_formatter:format(Event#{msg := {report, Report}}, Config); format(#{msg := {string, String}} = Event, Config) -> format(Event#{msg => {"~ts ", [String]}}, Config); +%% trace format(#{msg := Msg0, meta := Meta} = Event, Config) -> Msg1 = enrich_client_info(Msg0, Meta), Msg2 = enrich_mfa(Msg1, Meta), Msg3 = enrich_topic(Msg2, Meta), logger_formatter:format(Event#{msg := Msg3}, Config). +is_list_report_acceptable(#{report_cb := Cb}) -> + Cb =:= fun logger:format_otp_report/1 orelse Cb =:= fun logger:format_report/1; +is_list_report_acceptable(_) -> + false. + +enrich_report(ReportRaw, Meta) -> + %% clientid and peername always in emqx_conn's process metadata. + %% topic can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2 + Topic = + case maps:get(topic, Meta, undefined) of + undefined -> maps:get(topic, ReportRaw, undefined); + Topic0 -> Topic0 + end, + ClientId = maps:get(clientid, Meta, undefined), + Peer = maps:get(peername, Meta, undefined), + MFA = maps:get(mfa, Meta, undefined), + Line = maps:get(line, Meta, undefined), + Msg = maps:get(msg, ReportRaw, undefined), + %% turn it into a list so that the order of the fields is determined + lists:foldl( + fun + ({_, undefined}, Acc) -> Acc; + (Item, Acc) -> [Item | Acc] + end, + maps:to_list(maps:without([topic, msg, clientid], ReportRaw)), + [ + {topic, try_format_unicode(Topic)}, + {clientid, try_format_unicode(ClientId)}, + {peername, Peer}, + {line, Line}, + {mfa, mfa(MFA)}, + {msg, Msg} + ] + ). + +try_format_unicode(undefined) -> + undefined; try_format_unicode(Char) -> List = try @@ -53,30 +95,6 @@ try_format_unicode(Char) -> _ -> List end. -enrich_report_mfa(Report, #{mfa := Mfa, line := Line}) -> - Report#{mfa => mfa(Mfa), line => Line}; -enrich_report_mfa(Report, _) -> - Report. - -enrich_report_clientid(Report, #{clientid := ClientId}) -> - Report#{clientid => try_format_unicode(ClientId)}; -enrich_report_clientid(Report, _) -> - Report. - -enrich_report_peername(Report, #{peername := Peername}) -> - Report#{peername => Peername}; -enrich_report_peername(Report, _) -> - Report. - -%% clientid and peername always in emqx_conn's process metadata. -%% topic can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2 -enrich_report_topic(Report, #{topic := Topic}) -> - Report#{topic => try_format_unicode(Topic)}; -enrich_report_topic(Report = #{topic := Topic}, _) -> - Report#{topic => try_format_unicode(Topic)}; -enrich_report_topic(Report, _) -> - Report. - enrich_mfa({Fmt, Args}, #{mfa := Mfa, line := Line}) when is_list(Fmt) -> {Fmt ++ " mfa: ~ts line: ~w", Args ++ [mfa(Mfa), Line]}; enrich_mfa(Msg, _) -> @@ -96,4 +114,5 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) -> enrich_topic(Msg, _) -> Msg. -mfa({M, F, A}) -> atom_to_list(M) ++ ":" ++ atom_to_list(F) ++ "/" ++ integer_to_list(A). +mfa(undefined) -> undefined; +mfa({M, F, A}) -> [atom_to_list(M), ":", atom_to_list(F), "/" ++ integer_to_list(A)]. diff --git a/apps/emqx/src/emqx_metrics.erl b/apps/emqx/src/emqx_metrics.erl index c2e297623..21a114c0f 100644 --- a/apps/emqx/src/emqx_metrics.erl +++ b/apps/emqx/src/emqx_metrics.erl @@ -541,7 +541,7 @@ init([]) -> CRef = counters:new(?MAX_SIZE, [write_concurrency]), ok = persistent_term:put(?MODULE, CRef), % Create index mapping table - ok = emqx_tables:new(?TAB, [{keypos, 2}, {read_concurrency, true}]), + ok = emqx_utils_ets:new(?TAB, [{keypos, 2}, {read_concurrency, true}]), Metrics = lists:append([ ?BYTES_METRICS, ?PACKET_METRICS, diff --git a/apps/emqx/src/emqx_metrics_worker.erl b/apps/emqx/src/emqx_metrics_worker.erl index 241ba599f..5f41346cb 100644 --- a/apps/emqx/src/emqx_metrics_worker.erl +++ b/apps/emqx/src/emqx_metrics_worker.erl @@ -31,6 +31,7 @@ -export([ inc/3, inc/4, + observe/4, get/3, get_gauge/3, set_gauge/5, @@ -38,6 +39,8 @@ get_gauges/2, delete_gauges/2, get_rate/2, + get_slide/2, + get_slide/3, get_counters/2, create_metrics/3, create_metrics/4, @@ -67,7 +70,16 @@ -define(SAMPLING, 1). -endif. --export_type([metrics/0, handler_name/0, metric_id/0]). +-export_type([metrics/0, handler_name/0, metric_id/0, metric_spec/0]). + +% Default +-type metric_type() :: + %% Simple counter + counter + %% Sliding window average + | slide. + +-type metric_spec() :: {metric_type(), atom()}. -type rate() :: #{ current := float(), @@ -77,6 +89,7 @@ -type metrics() :: #{ counters := #{metric_name() => integer()}, gauges := #{metric_name() => integer()}, + slides := #{metric_name() => number()}, rate := #{metric_name() => rate()} }. -type handler_name() :: atom(). @@ -103,9 +116,22 @@ last5m_smpl = [] :: list() }). +-record(slide_datapoint, { + sum :: non_neg_integer(), + samples :: non_neg_integer(), + time :: non_neg_integer() +}). + +-record(slide, { + %% Total number of samples through the history + n_samples = 0 :: non_neg_integer(), + datapoints = [] :: [#slide_datapoint{}] +}). + -record(state, { metric_ids = sets:new(), - rates :: undefined | #{metric_id() => #rate{}} + rates :: #{metric_id() => #{metric_name() => #rate{}}} | undefined, + slides = #{} :: #{metric_id() => #{metric_name() => #slide{}}} }). %%------------------------------------------------------------------------------ @@ -126,14 +152,18 @@ child_spec(ChldName, Name) -> modules => [emqx_metrics_worker] }. --spec create_metrics(handler_name(), metric_id(), [metric_name()]) -> ok | {error, term()}. +-spec create_metrics(handler_name(), metric_id(), [metric_spec() | metric_name()]) -> + ok | {error, term()}. create_metrics(Name, Id, Metrics) -> - create_metrics(Name, Id, Metrics, Metrics). + Metrics1 = desugar(Metrics), + Counters = filter_counters(Metrics1), + create_metrics(Name, Id, Metrics1, Counters). --spec create_metrics(handler_name(), metric_id(), [metric_name()], [metric_name()]) -> +-spec create_metrics(handler_name(), metric_id(), [metric_spec() | metric_name()], [atom()]) -> ok | {error, term()}. create_metrics(Name, Id, Metrics, RateMetrics) -> - gen_server:call(Name, {create_metrics, Id, Metrics, RateMetrics}). + Metrics1 = desugar(Metrics), + gen_server:call(Name, {create_metrics, Id, Metrics1, RateMetrics}). -spec clear_metrics(handler_name(), metric_id()) -> ok. clear_metrics(Name, Id) -> @@ -156,7 +186,7 @@ get(Name, Id, Metric) -> not_found -> 0; Ref when is_atom(Metric) -> - counters:get(Ref, idx_metric(Name, Id, Metric)); + counters:get(Ref, idx_metric(Name, Id, counter, Metric)); Ref when is_integer(Metric) -> counters:get(Ref, Metric) end. @@ -171,21 +201,37 @@ get_counters(Name, Id) -> fun(_Metric, Index) -> get(Name, Id, Index) end, - get_indexes(Name, Id) + get_indexes(Name, counter, Id) ). +-spec get_slide(handler_name(), metric_id()) -> map(). +get_slide(Name, Id) -> + gen_server:call(Name, {get_slide, Id}). + +%% Get the average for a specified sliding window period. +%% +%% It will only account for the samples recorded in the past `Window' seconds. +-spec get_slide(handler_name(), metric_id(), non_neg_integer()) -> number(). +get_slide(Name, Id, Window) -> + gen_server:call(Name, {get_slide, Id, Window}). + -spec reset_counters(handler_name(), metric_id()) -> ok. reset_counters(Name, Id) -> - Indexes = maps:values(get_indexes(Name, Id)), - Ref = get_ref(Name, Id), - lists:foreach(fun(Idx) -> counters:put(Ref, Idx, 0) end, Indexes). + case get_ref(Name, Id) of + not_found -> + ok; + Ref -> + #{size := Size} = counters:info(Ref), + lists:foreach(fun(Idx) -> counters:put(Ref, Idx, 0) end, lists:seq(1, Size)) + end. -spec get_metrics(handler_name(), metric_id()) -> metrics(). get_metrics(Name, Id) -> #{ rate => get_rate(Name, Id), counters => get_counters(Name, Id), - gauges => get_gauges(Name, Id) + gauges => get_gauges(Name, Id), + slides => get_slide(Name, Id) }. -spec inc(handler_name(), metric_id(), atom()) -> ok. @@ -194,7 +240,37 @@ inc(Name, Id, Metric) -> -spec inc(handler_name(), metric_id(), metric_name(), integer()) -> ok. inc(Name, Id, Metric, Val) -> - counters:add(get_ref(Name, Id), idx_metric(Name, Id, Metric), Val). + counters:add(get_ref(Name, Id), idx_metric(Name, Id, counter, Metric), Val). + +%% Add a sample to the slide. +%% +%% Slide is short for "sliding window average" type of metric. +%% +%% It allows to monitor an average of some observed values in time, +%% and it's mainly used for performance analysis. For example, it can +%% be used to report run time of operations. +%% +%% Consider an example: +%% +%% ``` +%% emqx_metrics_worker:create_metrics(Name, Id, [{slide, a}]), +%% emqx_metrics_worker:observe(Name, Id, a, 10), +%% emqx_metrics_worker:observe(Name, Id, a, 30), +%% #{a := 20} = emqx_metrics_worker:get_slide(Name, Id, _Window = 1). +%% ''' +%% +%% After recording 2 samples, this metric becomes 20 (the average of 10 and 30). +%% +%% But after 1 second it becomes 0 again, unless new samples are recorded. +%% +-spec observe(handler_name(), metric_id(), atom(), integer()) -> ok. +observe(Name, Id, Metric, Val) -> + #{ref := CRef, slide := Idx} = maps:get(Id, get_pterm(Name)), + Index = maps:get(Metric, Idx), + %% Update sum: + counters:add(CRef, Index, Val), + %% Update number of samples: + counters:add(CRef, Index + 1, 1). -spec set_gauge(handler_name(), metric_id(), worker_id(), metric_name(), integer()) -> ok. set_gauge(Name, Id, WorkerId, Metric, Val) -> @@ -300,9 +376,9 @@ handle_call({get_rate, Id}, _From, State = #state{rates = Rates}) -> handle_call( {create_metrics, Id, Metrics, RateMetrics}, _From, - State = #state{metric_ids = MIDs, rates = Rates} + State = #state{metric_ids = MIDs, rates = Rates, slides = Slides} ) -> - case RateMetrics -- Metrics of + case RateMetrics -- filter_counters(Metrics) of [] -> RatePerId = maps:from_list([{M, #rate{}} || M <- RateMetrics]), Rate1 = @@ -310,9 +386,11 @@ handle_call( undefined -> #{Id => RatePerId}; _ -> Rates#{Id => RatePerId} end, + Slides1 = Slides#{Id => create_slides(Metrics)}, {reply, create_counters(get_self_name(), Id, Metrics), State#state{ metric_ids = sets:add_element(Id, MIDs), - rates = Rate1 + rates = Rate1, + slides = Slides1 }}; _ -> {reply, {error, not_super_set_of, {RateMetrics, Metrics}}, State} @@ -320,7 +398,7 @@ handle_call( handle_call( {delete_metrics, Id}, _From, - State = #state{metric_ids = MIDs, rates = Rates} + State = #state{metric_ids = MIDs, rates = Rates, slides = Slides} ) -> Name = get_self_name(), delete_counters(Name, Id), @@ -331,29 +409,43 @@ handle_call( case Rates of undefined -> undefined; _ -> maps:remove(Id, Rates) - end + end, + slides = maps:remove(Id, Slides) }}; handle_call( {reset_metrics, Id}, _From, - State = #state{rates = Rates} + State = #state{rates = Rates, slides = Slides} ) -> - Name = get_self_name(), - delete_gauges(Name, Id), - {reply, reset_counters(Name, Id), State#state{ + delete_gauges(get_self_name(), Id), + NewRates = + case Rates of + undefined -> + undefined; + _ -> + ResetRate = + maps:map( + fun(_Key, _Value) -> #rate{} end, + maps:get(Id, Rates, #{}) + ), + maps:put(Id, ResetRate, Rates) + end, + SlideSpecs = [{slide, I} || I <- maps:keys(maps:get(Id, Slides, #{}))], + NewSlides = Slides#{Id => create_slides(SlideSpecs)}, + {reply, reset_counters(get_self_name(), Id), State#state{ rates = - case Rates of - undefined -> - undefined; - _ -> - ResetRate = - maps:map( - fun(_Key, _Value) -> #rate{} end, - maps:get(Id, Rates, #{}) - ), - maps:put(Id, ResetRate, Rates) - end + NewRates, + slides = NewSlides }}; +handle_call({get_slide, Id}, _From, State = #state{slides = Slides}) -> + SlidesForID = maps:get(Id, Slides, #{}), + {reply, maps:map(fun(Metric, Slide) -> do_get_slide(Id, Metric, Slide) end, SlidesForID), + State}; +handle_call({get_slide, Id, Window}, _From, State = #state{slides = Slides}) -> + SlidesForID = maps:get(Id, Slides, #{}), + {reply, + maps:map(fun(Metric, Slide) -> do_get_slide(Window, Id, Metric, Slide) end, SlidesForID), + State}; handle_call(_Request, _From, State) -> {reply, ok, State}. @@ -363,7 +455,7 @@ handle_cast(_Msg, State) -> handle_info(ticking, State = #state{rates = undefined}) -> erlang:send_after(timer:seconds(?SAMPLING), self(), ticking), {noreply, State}; -handle_info(ticking, State = #state{rates = Rates0}) -> +handle_info(ticking, State = #state{rates = Rates0, slides = Slides0}) -> Rates = maps:map( fun(Id, RatesPerID) -> @@ -376,8 +468,20 @@ handle_info(ticking, State = #state{rates = Rates0}) -> end, Rates0 ), + Slides = + maps:map( + fun(Id, SlidesPerID) -> + maps:map( + fun(Metric, Slide) -> + update_slide(Id, Metric, Slide) + end, + SlidesPerID + ) + end, + Slides0 + ), erlang:send_after(timer:seconds(?SAMPLING), self(), ticking), - {noreply, State#state{rates = Rates}}; + {noreply, State#state{rates = Rates, slides = Slides}}; handle_info(_Info, State) -> {noreply, State}. @@ -408,17 +512,18 @@ create_counters(_Name, _Id, []) -> error({create_counter_error, must_provide_a_list_of_metrics}); create_counters(Name, Id, Metrics) -> %% backup the old counters - OlderCounters = maps:with(Metrics, get_counters(Name, Id)), + OlderCounters = maps:with(filter_counters(Metrics), get_counters(Name, Id)), %% create the new counter - Size = length(Metrics), - Indexes = maps:from_list(lists:zip(Metrics, lists:seq(1, Size))), + {Size, Indexes} = create_metric_indexes(Metrics), Counters = get_pterm(Name), CntrRef = counters:new(Size, [write_concurrency]), persistent_term:put( ?CntrRef(Name), - Counters#{Id => #{ref => CntrRef, indexes => Indexes}} + Counters#{Id => Indexes#{ref => CntrRef}} ), - %% restore the old counters + %% Restore the old counters. Slides are not restored, since they + %% are periodically zeroed anyway. We do lose some samples in the + %% current interval, but that's acceptable for now. lists:foreach( fun({Metric, N}) -> inc(Name, Id, Metric, N) @@ -426,6 +531,16 @@ create_counters(Name, Id, Metrics) -> maps:to_list(OlderCounters) ). +create_metric_indexes(Metrics) -> + create_metric_indexes(Metrics, 1, [], []). + +create_metric_indexes([], Size, Counters, Slides) -> + {Size, #{counter => maps:from_list(Counters), slide => maps:from_list(Slides)}}; +create_metric_indexes([{counter, Id} | Rest], Index, Counters, Slides) -> + create_metric_indexes(Rest, Index + 1, [{Id, Index} | Counters], Slides); +create_metric_indexes([{slide, Id} | Rest], Index, Counters, Slides) -> + create_metric_indexes(Rest, Index + 2, Counters, [{Id, Index} | Slides]). + delete_counters(Name, Id) -> persistent_term:put(?CntrRef(Name), maps:remove(Id, get_pterm(Name))). @@ -435,12 +550,12 @@ get_ref(Name, Id) -> error -> not_found end. -idx_metric(Name, Id, Metric) -> - maps:get(Metric, get_indexes(Name, Id)). +idx_metric(Name, Id, Type, Metric) -> + maps:get(Metric, get_indexes(Name, Type, Id)). -get_indexes(Name, Id) -> +get_indexes(Name, Type, Id) -> case maps:find(Id, get_pterm(Name)) of - {ok, #{indexes := Indexes}} -> Indexes; + {ok, #{Type := Indexes}} -> Indexes; error -> #{} end. @@ -488,6 +603,53 @@ calculate_rate(CurrVal, #rate{ tick = Tick + 1 }. +do_get_slide(Id, Metric, S = #slide{n_samples = NSamples}) -> + #{ + n_samples => NSamples, + current => do_get_slide(2, Id, Metric, S), + last5m => do_get_slide(?SECS_5M, Id, Metric, S) + }. + +do_get_slide(Window, Id, Metric, #slide{datapoints = DP0}) -> + Datapoint = get_slide_datapoint(Id, Metric), + {N, Sum} = get_slide_window(os:system_time(second) - Window, [Datapoint | DP0], 0, 0), + case N > 0 of + true -> Sum div N; + false -> 0 + end. + +get_slide_window(_StartTime, [], N, S) -> + {N, S}; +get_slide_window(StartTime, [#slide_datapoint{time = T} | _], N, S) when T < StartTime -> + {N, S}; +get_slide_window(StartTime, [#slide_datapoint{samples = N, sum = S} | Rest], AccN, AccS) -> + get_slide_window(StartTime, Rest, AccN + N, AccS + S). + +get_slide_datapoint(Id, Metric) -> + Name = get_self_name(), + CRef = get_ref(Name, Id), + Index = idx_metric(Name, Id, slide, Metric), + Total = counters:get(CRef, Index), + N = counters:get(CRef, Index + 1), + #slide_datapoint{ + sum = Total, + samples = N, + time = os:system_time(second) + }. + +update_slide(Id, Metric, Slide0 = #slide{n_samples = NSamples, datapoints = DPs}) -> + Datapoint = get_slide_datapoint(Id, Metric), + %% Reset counters: + Name = get_self_name(), + CRef = get_ref(Name, Id), + Index = idx_metric(Name, Id, slide, Metric), + counters:put(CRef, Index, 0), + counters:put(CRef, Index + 1, 0), + Slide0#slide{ + datapoints = [Datapoint | lists:droplast(DPs)], + n_samples = Datapoint#slide_datapoint.samples + NSamples + }. + format_rates_of_id(RatesPerId) -> maps:map( fun(_Metric, Rates) -> @@ -510,6 +672,27 @@ precision(Float, N) -> Base = math:pow(10, N), round(Float * Base) / Base. +desugar(Metrics) -> + lists:map( + fun + (Atom) when is_atom(Atom) -> + {counter, Atom}; + (Spec = {_, _}) -> + Spec + end, + Metrics + ). + +filter_counters(Metrics) -> + [K || {counter, K} <- Metrics]. + +create_slides(Metrics) -> + EmptyDatapoints = [ + #slide_datapoint{sum = 0, samples = 0, time = 0} + || _ <- lists:seq(1, ?SECS_5M div ?SAMPLING) + ], + maps:from_list([{K, #slide{datapoints = EmptyDatapoints}} || {slide, K} <- Metrics]). + get_self_name() -> {registered_name, Name} = process_info(self(), registered_name), Name. diff --git a/apps/emqx/src/emqx_mqtt_caps.erl b/apps/emqx/src/emqx_mqtt_caps.erl index 1806ede1d..897bb93c4 100644 --- a/apps/emqx/src/emqx_mqtt_caps.erl +++ b/apps/emqx/src/emqx_mqtt_caps.erl @@ -37,7 +37,6 @@ max_qos_allowed => emqx_types:qos(), retain_available => boolean(), wildcard_subscription => boolean(), - subscription_identifiers => boolean(), shared_subscription => boolean(), exclusive_subscription => boolean() }. @@ -58,18 +57,17 @@ exclusive_subscription ]). --define(DEFAULT_CAPS, #{ - max_packet_size => ?MAX_PACKET_SIZE, - max_clientid_len => ?MAX_CLIENTID_LEN, - max_topic_alias => ?MAX_TOPIC_AlIAS, - max_topic_levels => ?MAX_TOPIC_LEVELS, - max_qos_allowed => ?QOS_2, - retain_available => true, - wildcard_subscription => true, - subscription_identifiers => true, - shared_subscription => true, - exclusive_subscription => false -}). +-define(DEFAULT_CAPS_KEYS, [ + max_packet_size, + max_clientid_len, + max_topic_alias, + max_topic_levels, + max_qos_allowed, + retain_available, + wildcard_subscription, + shared_subscription, + exclusive_subscription +]). -spec check_pub( emqx_types:zone(), @@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) -> error -> Flags end, - maps:with(?PUBCAP_KEYS, get_caps(Zone)) + get_caps(?PUBCAP_KEYS, Zone) ). do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when @@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) -> ) -> ok_or_error(emqx_types:reason_code()). check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) -> - Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)), + Caps = get_caps(?SUBCAP_KEYS, Zone), Flags = lists:foldl( fun (max_topic_levels, Map) -> @@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) -> ok. get_caps(Zone) -> - lists:foldl( - fun({K, V}, Acc) -> - Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)} - end, - #{}, - maps:to_list(?DEFAULT_CAPS) + get_caps(?DEFAULT_CAPS_KEYS, Zone). +get_caps(Keys, Zone) -> + maps:with( + Keys, + maps:merge( + emqx_config:get([mqtt]), + emqx_config:get_zone_conf(Zone, [mqtt]) + ) ). diff --git a/apps/emqx/src/emqx_mqueue.erl b/apps/emqx/src/emqx_mqueue.erl index 494e2b33e..fbf29d754 100644 --- a/apps/emqx/src/emqx_mqueue.erl +++ b/apps/emqx/src/emqx_mqueue.erl @@ -67,7 +67,8 @@ out/1, stats/1, dropped/1, - to_list/1 + to_list/1, + filter/2 ]). -define(NO_PRIORITY_TABLE, disabled). @@ -158,6 +159,19 @@ max_len(#mqueue{max_len = MaxLen}) -> MaxLen. to_list(MQ) -> to_list(MQ, []). +-spec filter(fun((any()) -> boolean()), mqueue()) -> mqueue(). +filter(_Pred, #mqueue{len = 0} = MQ) -> + MQ; +filter(Pred, #mqueue{q = Q, len = Len, dropped = Droppend} = MQ) -> + Q2 = ?PQUEUE:filter(Pred, Q), + case ?PQUEUE:len(Q2) of + Len -> + MQ; + Len2 -> + Diff = Len - Len2, + MQ#mqueue{q = Q2, len = Len2, dropped = Droppend + Diff} + end. + to_list(MQ, Acc) -> case out(MQ) of {empty, _MQ} -> diff --git a/apps/emqx/src/emqx_ocsp_cache.erl b/apps/emqx/src/emqx_ocsp_cache.erl new file mode 100644 index 000000000..ef0411b37 --- /dev/null +++ b/apps/emqx/src/emqx_ocsp_cache.erl @@ -0,0 +1,548 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% @doc EMQX OCSP cache. +%%-------------------------------------------------------------------- + +-module(emqx_ocsp_cache). + +-include("logger.hrl"). +-include_lib("public_key/include/public_key.hrl"). +-include_lib("ssl/src/ssl_handshake.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-behaviour(gen_server). + +-export([ + start_link/0, + sni_fun/2, + fetch_response/1, + register_listener/2, + unregister_listener/1, + inject_sni_fun/2 +]). + +%% gen_server API +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + code_change/3 +]). + +%% internal export; only for mocking in tests +-export([http_get/2]). + +-define(CACHE_TAB, ?MODULE). +-define(CALL_TIMEOUT, 20_000). +-define(RETRY_TIMEOUT, 5_000). +-define(REFRESH_TIMER(LID), {refresh_timer, LID}). +-ifdef(TEST). +-define(MIN_REFRESH_INTERVAL, timer:seconds(5)). +-else. +-define(MIN_REFRESH_INTERVAL, timer:minutes(1)). +-endif. + +%% Allow usage of OTP certificate record fields (camelCase). +-elvis([ + {elvis_style, atom_naming_convention, #{ + regex => "^([a-z][a-z0-9]*_?)([a-zA-Z0-9]*_?)*$", + enclosed_atoms => ".*" + }} +]). + +%%-------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------- + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +sni_fun(_ServerName, ListenerID) -> + Res = + try + fetch_response(ListenerID) + catch + _:_ -> error + end, + case Res of + {ok, Response} -> + [ + {certificate_status, #certificate_status{ + status_type = ?CERTIFICATE_STATUS_TYPE_OCSP, + response = Response + }} + ]; + error -> + [] + end. + +fetch_response(ListenerID) -> + case do_lookup(ListenerID) of + {ok, DERResponse} -> + {ok, DERResponse}; + {error, invalid_listener_id} -> + error; + {error, not_cached} -> + ?tp(ocsp_cache_miss, #{listener_id => ListenerID}), + ?SLOG(debug, #{ + msg => "fetching_new_ocsp_response", + listener_id => ListenerID + }), + http_fetch(ListenerID) + end. + +register_listener(ListenerID, Opts) -> + gen_server:call(?MODULE, {register_listener, ListenerID, Opts}, ?CALL_TIMEOUT). + +unregister_listener(ListenerID) -> + gen_server:cast(?MODULE, {unregister_listener, ListenerID}). + +-spec inject_sni_fun(emqx_listeners:listener_id(), map()) -> map(). +inject_sni_fun(ListenerID, Conf0) -> + SNIFun = emqx_const_v1:make_sni_fun(ListenerID), + Conf = emqx_utils_maps:deep_merge(Conf0, #{ssl_options => #{sni_fun => SNIFun}}), + ok = ?MODULE:register_listener(ListenerID, Conf), + Conf. + +%%-------------------------------------------------------------------- +%% gen_server behaviour +%%-------------------------------------------------------------------- + +init(_Args) -> + logger:set_process_metadata(#{domain => [emqx, ocsp, cache]}), + emqx_utils_ets:new(?CACHE_TAB, [ + named_table, + public, + {heir, whereis(emqx_kernel_sup), none}, + {read_concurrency, true} + ]), + ?tp(ocsp_cache_init, #{}), + {ok, #{}}. + +handle_call({http_fetch, ListenerID}, _From, State) -> + case do_lookup(ListenerID) of + {ok, DERResponse} -> + {reply, {ok, DERResponse}, State}; + {error, invalid_listener_id} -> + {reply, error, State}; + {error, not_cached} -> + Conf = undefined, + with_refresh_params(ListenerID, Conf, {reply, error, State}, fun(Params) -> + case do_http_fetch_and_cache(ListenerID, Params) of + error -> {reply, error, ensure_timer(ListenerID, State, ?RETRY_TIMEOUT)}; + {ok, Response} -> {reply, {ok, Response}, ensure_timer(ListenerID, State)} + end + end) + end; +handle_call({register_listener, ListenerID, Conf}, _From, State0) -> + ?SLOG(debug, #{ + msg => "registering_ocsp_cache", + listener_id => ListenerID + }), + RefreshInterval0 = emqx_utils_maps:deep_get([ssl_options, ocsp, refresh_interval], Conf), + RefreshInterval = max(RefreshInterval0, ?MIN_REFRESH_INTERVAL), + State = State0#{{refresh_interval, ListenerID} => RefreshInterval}, + %% we need to pass the config along because this might be called + %% during the listener's `post_config_update', hence the config is + %% not yet "commited" and accessible when we need it. + Message = {refresh, ListenerID, Conf}, + {reply, ok, ensure_timer(ListenerID, Message, State, 0)}; +handle_call(Call, _From, State) -> + {reply, {error, {unknown_call, Call}}, State}. + +handle_cast({unregister_listener, ListenerID}, State0) -> + State2 = + case maps:take(?REFRESH_TIMER(ListenerID), State0) of + error -> + State0; + {TRef, State1} -> + emqx_utils:cancel_timer(TRef), + State1 + end, + State = maps:remove({refresh_interval, ListenerID}, State2), + ?tp(ocsp_cache_listener_unregistered, #{listener_id => ListenerID}), + {noreply, State}; +handle_cast(_Cast, State) -> + {noreply, State}. + +handle_info({timeout, TRef, {refresh, ListenerID}}, State0) -> + case maps:get(?REFRESH_TIMER(ListenerID), State0, undefined) of + TRef -> + ?tp(ocsp_refresh_timer, #{listener_id => ListenerID}), + ?SLOG(debug, #{ + msg => "refreshing_ocsp_response", + listener_id => ListenerID + }), + Conf = undefined, + handle_refresh(ListenerID, Conf, State0); + _ -> + {noreply, State0} + end; +handle_info({timeout, TRef, {refresh, ListenerID, Conf}}, State0) -> + case maps:get(?REFRESH_TIMER(ListenerID), State0, undefined) of + TRef -> + ?tp(ocsp_refresh_timer, #{listener_id => ListenerID}), + ?SLOG(debug, #{ + msg => "refreshing_ocsp_response", + listener_id => ListenerID + }), + handle_refresh(ListenerID, Conf, State0); + _ -> + {noreply, State0} + end; +handle_info(_Info, State) -> + {noreply, State}. + +code_change(_Vsn, State, _Extra) -> + {ok, State}. + +%%-------------------------------------------------------------------- +%% internal functions +%%-------------------------------------------------------------------- + +http_fetch(ListenerID) -> + %% TODO: configurable call timeout? + gen_server:call(?MODULE, {http_fetch, ListenerID}, ?CALL_TIMEOUT). + +with_listener_config(ListenerID, ConfPath, ErrorResp, Fn) -> + case emqx_listeners:parse_listener_id(ListenerID) of + {ok, #{type := Type, name := Name}} -> + case emqx_config:get_listener_conf(Type, Name, ConfPath, not_found) of + not_found -> + ?SLOG(error, #{ + msg => "listener_config_missing", + listener_id => ListenerID + }), + ErrorResp; + Config -> + Fn(Config) + end; + _Err -> + ?SLOG(error, #{ + msg => "listener_id_not_found", + listener_id => ListenerID + }), + ErrorResp + end. + +cache_key(ListenerID) -> + with_listener_config(ListenerID, [ssl_options], error, fun + (#{certfile := ServerCertPemPath}) -> + #'Certificate'{ + tbsCertificate = + #'TBSCertificate'{ + signature = Signature + } + } = read_server_cert(ServerCertPemPath), + {ok, {ocsp_response, Signature}}; + (OtherConfig) -> + ?SLOG(error, #{ + msg => "listener_config_inconsistent", + listener_id => ListenerID, + config => OtherConfig + }), + error + end). + +do_lookup(ListenerID) -> + CacheKey = cache_key(ListenerID), + case CacheKey of + error -> + {error, invalid_listener_id}; + {ok, Key} -> + %% Respond immediately if a concurrent call already fetched it. + case ets:lookup(?CACHE_TAB, Key) of + [{_, DERResponse}] -> + ?tp(ocsp_cache_hit, #{listener_id => ListenerID}), + {ok, DERResponse}; + [] -> + {error, not_cached} + end + end. + +read_server_cert(ServerCertPemPath0) -> + ServerCertPemPath = to_bin(ServerCertPemPath0), + case ets:lookup(ssl_pem_cache, ServerCertPemPath) of + [{_, [{'Certificate', ServerCertDer, _} | _]}] -> + public_key:der_decode('Certificate', ServerCertDer); + [] -> + case file:read_file(ServerCertPemPath) of + {ok, ServerCertPem} -> + [{'Certificate', ServerCertDer, _} | _] = + public_key:pem_decode(ServerCertPem), + public_key:der_decode('Certificate', ServerCertDer); + {error, Error1} -> + error({bad_server_cert_file, Error1}) + end + end. + +handle_refresh(ListenerID, Conf, State0) -> + %% no point in retrying if the config is inconsistent or non + %% existent. + State1 = maps:without([{refresh_interval, ListenerID}, ?REFRESH_TIMER(ListenerID)], State0), + with_refresh_params(ListenerID, Conf, {noreply, State1}, fun(Params) -> + case do_http_fetch_and_cache(ListenerID, Params) of + error -> + ?SLOG(debug, #{ + msg => "failed_to_fetch_ocsp_response", + listener_id => ListenerID + }), + {noreply, ensure_timer(ListenerID, State0, ?RETRY_TIMEOUT)}; + {ok, _Response} -> + ?SLOG(debug, #{ + msg => "fetched_ocsp_response", + listener_id => ListenerID + }), + {noreply, ensure_timer(ListenerID, State0)} + end + end). + +with_refresh_params(ListenerID, Conf, ErrorRet, Fn) -> + case get_refresh_params(ListenerID, Conf) of + error -> + ErrorRet; + {ok, Params} -> + try + Fn(Params) + catch + Kind:Error -> + ?SLOG(error, #{ + msg => "error_fetching_ocsp_response", + listener_id => ListenerID, + error => {Kind, Error} + }), + ErrorRet + end + end. + +get_refresh_params(ListenerID, undefined = _Conf) -> + %% during normal periodic refreshes, we read from the emqx config. + with_listener_config(ListenerID, [ssl_options], error, fun + ( + #{ + ocsp := #{ + issuer_pem := IssuerPemPath, + responder_url := ResponderURL, + refresh_http_timeout := HTTPTimeout + }, + certfile := ServerCertPemPath + } + ) -> + {ok, #{ + issuer_pem => IssuerPemPath, + responder_url => ResponderURL, + refresh_http_timeout => HTTPTimeout, + server_certfile => ServerCertPemPath + }}; + (OtherConfig) -> + ?SLOG(error, #{ + msg => "listener_config_inconsistent", + listener_id => ListenerID, + config => OtherConfig + }), + error + end); +get_refresh_params(_ListenerID, #{ + ssl_options := #{ + ocsp := #{ + issuer_pem := IssuerPemPath, + responder_url := ResponderURL, + refresh_http_timeout := HTTPTimeout + }, + certfile := ServerCertPemPath + } +}) -> + {ok, #{ + issuer_pem => IssuerPemPath, + responder_url => ResponderURL, + refresh_http_timeout => HTTPTimeout, + server_certfile => ServerCertPemPath + }}; +get_refresh_params(_ListenerID, _Conf) -> + error. + +do_http_fetch_and_cache(ListenerID, Params) -> + #{ + issuer_pem := IssuerPemPath, + responder_url := ResponderURL, + refresh_http_timeout := HTTPTimeout, + server_certfile := ServerCertPemPath + } = Params, + IssuerPem = + case file:read_file(IssuerPemPath) of + {ok, IssuerPem0} -> IssuerPem0; + {error, Error0} -> error({bad_issuer_pem_file, Error0}) + end, + ServerCert = read_server_cert(ServerCertPemPath), + Request = build_ocsp_request(IssuerPem, ServerCert), + ?tp(ocsp_http_fetch, #{ + listener_id => ListenerID, + responder_url => ResponderURL, + timeout => HTTPTimeout + }), + RequestURI = iolist_to_binary([ResponderURL, Request]), + Resp = ?MODULE:http_get(RequestURI, HTTPTimeout), + case Resp of + {ok, {{_, 200, _}, _, Body}} -> + ?SLOG(debug, #{ + msg => "caching_ocsp_response", + listener_id => ListenerID + }), + %% if we got this far, the certfile is correct. + {ok, CacheKey} = cache_key(ListenerID), + true = ets:insert(?CACHE_TAB, {CacheKey, Body}), + ?tp(ocsp_http_fetch_and_cache, #{ + listener_id => ListenerID, + headers => true + }), + {ok, Body}; + {ok, {200, Body}} -> + ?SLOG(debug, #{ + msg => "caching_ocsp_response", + listener_id => ListenerID + }), + %% if we got this far, the certfile is correct. + {ok, CacheKey} = cache_key(ListenerID), + true = ets:insert(?CACHE_TAB, {CacheKey, Body}), + ?tp(ocsp_http_fetch_and_cache, #{ + listener_id => ListenerID, + headers => false + }), + {ok, Body}; + {ok, {{_, Code, _}, _, Body}} -> + ?tp( + error, + ocsp_http_fetch_bad_code, + #{ + listener_id => ListenerID, + body => Body, + code => Code, + headers => true + } + ), + ?SLOG(error, #{ + msg => "error_fetching_ocsp_response", + listener_id => ListenerID, + code => Code, + body => Body + }), + error; + {ok, {Code, Body}} -> + ?tp( + error, + ocsp_http_fetch_bad_code, + #{ + listener_id => ListenerID, + body => Body, + code => Code, + headers => false + } + ), + ?SLOG(error, #{ + msg => "error_fetching_ocsp_response", + listener_id => ListenerID, + code => Code, + body => Body + }), + error; + {error, Error} -> + ?tp( + error, + ocsp_http_fetch_error, + #{ + listener_id => ListenerID, + error => Error + } + ), + ?SLOG(error, #{ + msg => "error_fetching_ocsp_response", + listener_id => ListenerID, + error => Error + }), + error + end. + +http_get(URL, HTTPTimeout) -> + httpc:request( + get, + {URL, [{"connection", "close"}]}, + [{timeout, HTTPTimeout}], + [{body_format, binary}] + ). + +ensure_timer(ListenerID, State) -> + Timeout = maps:get({refresh_interval, ListenerID}, State, timer:minutes(5)), + ensure_timer(ListenerID, State, Timeout). + +ensure_timer(ListenerID, State, Timeout) -> + ensure_timer(ListenerID, {refresh, ListenerID}, State, Timeout). + +ensure_timer(ListenerID, Message, State, Timeout) -> + emqx_utils:cancel_timer(maps:get(?REFRESH_TIMER(ListenerID), State, undefined)), + State#{ + ?REFRESH_TIMER(ListenerID) => emqx_utils:start_timer( + Timeout, + Message + ) + }. + +build_ocsp_request(IssuerPem, ServerCert) -> + [{'Certificate', IssuerDer, _} | _] = public_key:pem_decode(IssuerPem), + #'Certificate'{ + tbsCertificate = + #'TBSCertificate'{ + serialNumber = SerialNumber, + issuer = Issuer + } + } = ServerCert, + #'Certificate'{ + tbsCertificate = + #'TBSCertificate'{ + subjectPublicKeyInfo = + #'SubjectPublicKeyInfo'{subjectPublicKey = IssuerPublicKeyDer} + } + } = public_key:der_decode('Certificate', IssuerDer), + IssuerDNHash = crypto:hash(sha, public_key:der_encode('Name', Issuer)), + IssuerPKHash = crypto:hash(sha, IssuerPublicKeyDer), + Req = #'OCSPRequest'{ + tbsRequest = + #'TBSRequest'{ + version = 0, + requestList = + [ + #'Request'{ + reqCert = + #'CertID'{ + hashAlgorithm = + #'AlgorithmIdentifier'{ + algorithm = ?'id-sha1', + %% ??? + parameters = <<5, 0>> + }, + issuerNameHash = IssuerDNHash, + issuerKeyHash = IssuerPKHash, + serialNumber = SerialNumber + } + } + ] + } + }, + ReqDer = public_key:der_encode('OCSPRequest', Req), + base64:encode_to_string(ReqDer). + +to_bin(Str) when is_list(Str) -> list_to_binary(Str); +to_bin(Bin) when is_binary(Bin) -> Bin. diff --git a/apps/emqx/src/emqx_os_mon.erl b/apps/emqx/src/emqx_os_mon.erl index a06f56a4c..144d2bfe5 100644 --- a/apps/emqx/src/emqx_os_mon.erl +++ b/apps/emqx/src/emqx_os_mon.erl @@ -23,8 +23,6 @@ -export([start_link/0]). -export([ - get_mem_check_interval/0, - set_mem_check_interval/1, get_sysmem_high_watermark/0, set_sysmem_high_watermark/1, get_procmem_high_watermark/0, @@ -46,6 +44,9 @@ terminate/2, code_change/3 ]). +-ifdef(TEST). +-export([is_sysmem_check_supported/0]). +-endif. -include("emqx.hrl"). @@ -61,14 +62,6 @@ update(OS) -> %% API %%-------------------------------------------------------------------- -get_mem_check_interval() -> - memsup:get_check_interval(). - -set_mem_check_interval(Seconds) when Seconds < 60000 -> - memsup:set_check_interval(1); -set_mem_check_interval(Seconds) -> - memsup:set_check_interval(Seconds div 60000). - get_sysmem_high_watermark() -> gen_server:call(?OS_MON, ?FUNCTION_NAME, infinity). @@ -93,9 +86,9 @@ init([]) -> %% memsup is not reliable, ignore memsup:set_sysmem_high_watermark(1.0), SysHW = init_os_monitor(), - _ = start_mem_check_timer(), - _ = start_cpu_check_timer(), - {ok, #{sysmem_high_watermark => SysHW}}. + MemRef = start_mem_check_timer(), + CpuRef = start_cpu_check_timer(), + {ok, #{sysmem_high_watermark => SysHW, mem_time_ref => MemRef, cpu_time_ref => CpuRef}}. init_os_monitor() -> init_os_monitor(emqx:get_config([sysmon, os])). @@ -103,11 +96,9 @@ init_os_monitor() -> init_os_monitor(OS) -> #{ sysmem_high_watermark := SysHW, - procmem_high_watermark := PHW, - mem_check_interval := MCI + procmem_high_watermark := PHW } = OS, set_procmem_high_watermark(PHW), - set_mem_check_interval(MCI), ok = update_mem_alarm_status(SysHW), SysHW. @@ -125,13 +116,15 @@ handle_cast(Msg, State) -> handle_info({timeout, _Timer, mem_check}, #{sysmem_high_watermark := HWM} = State) -> ok = update_mem_alarm_status(HWM), - ok = start_mem_check_timer(), - {noreply, State}; + Ref = start_mem_check_timer(), + {noreply, State#{mem_time_ref => Ref}}; handle_info({timeout, _Timer, cpu_check}, State) -> CPUHighWatermark = emqx:get_config([sysmon, os, cpu_high_watermark]) * 100, CPULowWatermark = emqx:get_config([sysmon, os, cpu_low_watermark]) * 100, - case emqx_vm:cpu_util() of - 0 -> + CPUVal = emqx_vm:cpu_util(), + case CPUVal of + %% 0 or 0.0 + Busy when Busy == 0 -> ok; Busy when Busy > CPUHighWatermark -> _ = emqx_alarm:activate( @@ -156,11 +149,14 @@ handle_info({timeout, _Timer, cpu_check}, State) -> _Busy -> ok end, - ok = start_cpu_check_timer(), - {noreply, State}; -handle_info({monitor_conf_update, OS}, _State) -> + Ref = start_cpu_check_timer(), + {noreply, State#{cpu_time_ref => Ref}}; +handle_info({monitor_conf_update, OS}, State) -> + cancel_outdated_timer(State), SysHW = init_os_monitor(OS), - {noreply, #{sysmem_high_watermark => SysHW}}; + MemRef = start_mem_check_timer(), + CpuRef = start_cpu_check_timer(), + {noreply, #{sysmem_high_watermark => SysHW, mem_time_ref => MemRef, cpu_time_ref => CpuRef}}; handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), {noreply, State}. @@ -174,11 +170,15 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- +cancel_outdated_timer(#{mem_time_ref := MemRef, cpu_time_ref := CpuRef}) -> + emqx_utils:cancel_timer(MemRef), + emqx_utils:cancel_timer(CpuRef), + ok. start_cpu_check_timer() -> Interval = emqx:get_config([sysmon, os, cpu_check_interval]), case erlang:system_info(system_architecture) of - "x86_64-pc-linux-musl" -> ok; + "x86_64-pc-linux-musl" -> undefined; _ -> start_timer(Interval, cpu_check) end. @@ -191,12 +191,11 @@ start_mem_check_timer() -> true -> start_timer(Interval, mem_check); false -> - ok + undefined end. start_timer(Interval, Msg) -> - _ = emqx_misc:start_timer(Interval, Msg), - ok. + emqx_utils:start_timer(Interval, Msg). update_mem_alarm_status(HWM) when HWM > 1.0 orelse HWM < 0.0 -> ?SLOG(warning, #{msg => "discarded_out_of_range_mem_alarm_threshold", value => HWM}), @@ -223,7 +222,7 @@ do_update_mem_alarm_status(HWM0) -> }, usage_msg(Usage, mem) ); - _ -> + false -> ok = emqx_alarm:ensure_deactivated( high_system_memory_usage, #{ @@ -236,5 +235,5 @@ do_update_mem_alarm_status(HWM0) -> ok. usage_msg(Usage, What) -> - %% devide by 1.0 to ensure float point number + %% divide by 1.0 to ensure float point number iolist_to_binary(io_lib:format("~.2f% ~p usage", [Usage / 1.0, What])). diff --git a/apps/emqx/src/emqx_packet.erl b/apps/emqx/src/emqx_packet.erl index 8f539563e..32bd3df53 100644 --- a/apps/emqx/src/emqx_packet.erl +++ b/apps/emqx/src/emqx_packet.erl @@ -16,6 +16,8 @@ -module(emqx_packet). +-elvis([{elvis_style, no_spec_with_records, disable}]). + -include("emqx.hrl"). -include("emqx_mqtt.hrl"). @@ -475,9 +477,13 @@ format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()). format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) -> HeaderIO = format_header(Header), case format_variable(Variable, Payload, PayloadEncode) of - "" -> HeaderIO; - VarIO -> [HeaderIO, ",", VarIO] - end. + "" -> [HeaderIO, ")"]; + VarIO -> [HeaderIO, ", ", VarIO, ")"] + end; +%% receive a frame error packet, such as {frame_error,frame_too_large} or +%% {frame_error,#{expected => <<"'MQTT' or 'MQIsdp'">>,hint => invalid_proto_name,received => <<"bad_name">>}} +format(FrameError, _PayloadEncode) -> + lists:flatten(io_lib:format("~tp", [FrameError])). format_header(#mqtt_packet_header{ type = Type, @@ -485,14 +491,14 @@ format_header(#mqtt_packet_header{ qos = QoS, retain = Retain }) -> - io_lib:format("~ts(Q~p, R~p, D~p)", [type_name(Type), QoS, i(Retain), i(Dup)]). + io_lib:format("~ts(Q~p, R~p, D~p", [type_name(Type), QoS, i(Retain), i(Dup)]). format_variable(undefined, _, _) -> ""; format_variable(Variable, undefined, PayloadEncode) -> format_variable(Variable, PayloadEncode); format_variable(Variable, Payload, PayloadEncode) -> - [format_variable(Variable, PayloadEncode), format_payload(Payload, PayloadEncode)]. + [format_variable(Variable, PayloadEncode), ", ", format_payload(Payload, PayloadEncode)]. format_variable( #mqtt_packet_connect{ @@ -599,7 +605,8 @@ format_variable(#mqtt_packet_auth{reason_code = ReasonCode}, _) -> format_variable(PacketId, _) when is_integer(PacketId) -> io_lib:format("PacketId=~p", [PacketId]). -format_password(undefined) -> "undefined"; +format_password(undefined) -> ""; +format_password(<<>>) -> ""; format_password(_Password) -> "******". format_payload(Payload, text) -> ["Payload=", io_lib:format("~ts", [Payload])]; diff --git a/apps/emqx/src/emqx_passwd.erl b/apps/emqx/src/emqx_passwd.erl index 0d264f45f..dc940645b 100644 --- a/apps/emqx/src/emqx_passwd.erl +++ b/apps/emqx/src/emqx_passwd.erl @@ -57,22 +57,27 @@ %% APIs %%-------------------------------------------------------------------- --spec check_pass(hash_params(), password_hash(), password()) -> boolean(). -check_pass({pbkdf2, MacFun, Salt, Iterations, DKLength}, PasswordHash, Password) -> +-spec check_pass(hash_params(), password_hash(), password() | undefined) -> boolean(). +check_pass(_Algo, _Hash, undefined) -> + false; +check_pass(Algo, Hash, Password) -> + do_check_pass(Algo, Hash, Password). + +do_check_pass({pbkdf2, MacFun, Salt, Iterations, DKLength}, PasswordHash, Password) -> case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of {ok, HashPasswd} -> compare_secure(hex(HashPasswd), PasswordHash); {error, _Reason} -> false end; -check_pass({bcrypt, Salt}, PasswordHash, Password) -> +do_check_pass({bcrypt, Salt}, PasswordHash, Password) -> case bcrypt:hashpw(Password, Salt) of {ok, HashPasswd} -> compare_secure(list_to_binary(HashPasswd), PasswordHash); {error, _Reason} -> false end; -check_pass({_SimpleHash, _Salt, _SaltPosition} = HashParams, PasswordHash, Password) -> +do_check_pass({_SimpleHash, _Salt, _SaltPosition} = HashParams, PasswordHash, Password) -> Hash = hash(HashParams, Password), compare_secure(Hash, PasswordHash). diff --git a/apps/emqx/src/emqx_pool.erl b/apps/emqx/src/emqx_pool.erl index 1691a533a..1cb5f429c 100644 --- a/apps/emqx/src/emqx_pool.erl +++ b/apps/emqx/src/emqx_pool.erl @@ -57,7 +57,7 @@ -spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> gen_server:start_link( - {local, emqx_misc:proc_name(?MODULE, Id)}, + {local, emqx_utils:proc_name(?MODULE, Id)}, ?MODULE, [Pool, Id], [{hibernate_after, 1000}] diff --git a/apps/emqx/src/emqx_quic_connection.erl b/apps/emqx/src/emqx_quic_connection.erl index 9a2589a3a..a77ec28f2 100644 --- a/apps/emqx/src/emqx_quic_connection.erl +++ b/apps/emqx/src/emqx_quic_connection.erl @@ -14,60 +14,282 @@ %% limitations under the License. %%-------------------------------------------------------------------- +%% @doc impl. the quic connection owner process. -module(emqx_quic_connection). -ifndef(BUILD_WITHOUT_QUIC). --include_lib("quicer/include/quicer.hrl"). --else. --define(QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0). --endif. -%% Callbacks +-include("logger.hrl"). +-include_lib("quicer/include/quicer.hrl"). +-include_lib("emqx/include/emqx_quic.hrl"). + +-behaviour(quicer_connection). + -export([ init/1, - new_conn/2, - connected/2, - shutdown/2 + new_conn/3, + connected/3, + transport_shutdown/3, + shutdown/3, + closed/3, + local_address_changed/3, + peer_address_changed/3, + streams_available/3, + peer_needs_streams/3, + resumed/3, + new_stream/3 ]). --type cb_state() :: map() | proplists:proplist(). +-export([activate_data_streams/2]). --spec init(cb_state()) -> cb_state(). -init(ConnOpts) when is_list(ConnOpts) -> - init(maps:from_list(ConnOpts)); +-export([ + handle_call/3, + handle_info/2 +]). + +-type cb_state() :: #{ + %% connecion owner pid + conn_pid := pid(), + %% Pid of ctrl stream + ctrl_pid := undefined | pid(), + %% quic connecion handle + conn := undefined | quicer:conneciton_handle(), + %% Data streams that handoff from this process + %% these streams could die/close without effecting the connecion/session. + %@TODO type? + streams := [{pid(), quicer:stream_handle()}], + %% New stream opts + stream_opts := map(), + %% If conneciton is resumed from session ticket + is_resumed => boolean(), + %% mqtt message serializer config + serialize => undefined, + _ => _ +}. +-type cb_ret() :: quicer_lib:cb_ret(). + +%% @doc Data streams initializions are started in parallel with control streams, data streams are blocked +%% for the activation from control stream after it is accepted as a legit conneciton. +%% For security, the initial number of allowed data streams from client should be limited by +%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count` +-spec activate_data_streams(pid(), { + emqx_frame:parse_state(), emqx_frame:serialize_opts(), emqx_channel:channel() +}) -> ok. +activate_data_streams(ConnOwner, {PS, Serialize, Channel}) -> + gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity). + +%% @doc conneciton owner init callback +-spec init(map()) -> {ok, cb_state()}. +init(#{stream_opts := SOpts} = S) when is_list(SOpts) -> + init(S#{stream_opts := maps:from_list(SOpts)}); init(ConnOpts) when is_map(ConnOpts) -> - ConnOpts. + {ok, init_cb_state(ConnOpts)}. --spec new_conn(quicer:connection_handler(), cb_state()) -> {ok, cb_state()} | {error, any()}. -new_conn(Conn, #{zone := Zone} = S) -> +-spec closed(quicer:conneciton_handle(), quicer:conn_closed_props(), cb_state()) -> + {stop, normal, cb_state()}. +closed(_Conn, #{is_peer_acked := _} = Prop, S) -> + ?SLOG(debug, Prop), + {stop, normal, S}. + +%% @doc handle the new incoming connecion as the connecion acceptor. +-spec new_conn(quicer:connection_handle(), quicer:new_conn_props(), cb_state()) -> + {ok, cb_state()} | {error, any(), cb_state()}. +new_conn( + Conn, + #{version := _Vsn} = ConnInfo, + #{zone := Zone, conn := undefined, ctrl_pid := undefined} = S +) -> process_flag(trap_exit, true), + ?SLOG(debug, ConnInfo), case emqx_olp:is_overloaded() andalso is_zone_olp_enabled(Zone) of false -> - {ok, Pid} = emqx_connection:start_link(emqx_quic_stream, {self(), Conn}, S), + %% Start control stream process + StartOption = S, + {ok, CtrlPid} = emqx_connection:start_link( + emqx_quic_stream, + {self(), Conn, maps:without([crypto_buffer], ConnInfo)}, + StartOption + ), receive - {Pid, stream_acceptor_ready} -> + {CtrlPid, stream_acceptor_ready} -> ok = quicer:async_handshake(Conn), - {ok, S}; - {'EXIT', Pid, _Reason} -> - {error, stream_accept_error} + {ok, S#{conn := Conn, ctrl_pid := CtrlPid}}; + {'EXIT', _Pid, _Reason} -> + {stop, stream_accept_error, S} end; true -> emqx_metrics:inc('olp.new_conn'), - {error, overloaded} + _ = quicer:async_shutdown_connection( + Conn, + ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, + ?MQTT_QUIC_CONN_ERROR_OVERLOADED + ), + {stop, normal, S} end. --spec connected(quicer:connection_handler(), cb_state()) -> {ok, cb_state()} | {error, any()}. -connected(Conn, #{slow_start := false} = S) -> - {ok, _Pid} = emqx_connection:start_link(emqx_quic_stream, Conn, S), +%% @doc callback when connection is connected. +-spec connected(quicer:connection_handle(), quicer:connected_props(), cb_state()) -> + {ok, cb_state()} | {error, any(), cb_state()}. +connected(_Conn, Props, S) -> + ?SLOG(debug, Props), + {ok, S}. + +%% @doc callback when connection is resumed from 0-RTT +-spec resumed(quicer:connection_handle(), SessionData :: binary() | false, cb_state()) -> cb_ret(). +%% reserve resume conn with callback. +%% resumed(Conn, Data, #{resumed_callback := ResumeFun} = S) when +%% is_function(ResumeFun) +%% -> +%% ResumeFun(Conn, Data, S); +resumed(_Conn, _Data, S) -> + {ok, S#{is_resumed := true}}. + +%% @doc callback for handling orphan data streams +%% depends on the connecion state and control stream state. +-spec new_stream(quicer:stream_handle(), quicer:new_stream_props(), cb_state()) -> cb_ret(). +new_stream( + Stream, + #{is_orphan := true, flags := _Flags} = Props, + #{ + conn := Conn, + streams := Streams, + stream_opts := SOpts, + zone := Zone, + limiter := Limiter, + parse_state := PS, + channel := Channel, + serialize := Serialize + } = S +) -> + %% Cherry pick options for data streams + SOpts1 = SOpts#{ + is_local => false, + zone => Zone, + % unused + limiter => Limiter, + parse_state => PS, + channel => Channel, + serialize => Serialize, + quic_event_mask => ?QUICER_STREAM_EVENT_MASK_START_COMPLETE + }, + {ok, NewStreamOwner} = quicer_stream:start_link( + emqx_quic_data_stream, + Stream, + Conn, + SOpts1, + Props + ), + case quicer:handoff_stream(Stream, NewStreamOwner, {PS, Serialize, Channel}) of + ok -> + ok; + E -> + %% Only log, keep connecion alive. + ?SLOG(error, #{message => "new stream handoff failed", stream => Stream, error => E}) + end, + %% @TODO maybe keep them in `inactive_streams' + {ok, S#{streams := [{NewStreamOwner, Stream} | Streams]}}. + +%% @doc callback for handling remote connecion shutdown. +-spec shutdown(quicer:connection_handle(), quicer:error_code(), cb_state()) -> cb_ret(). +shutdown(Conn, ErrorCode, S) -> + ErrorCode =/= 0 andalso ?SLOG(debug, #{error_code => ErrorCode, state => S}), + _ = quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0), + {ok, S}. + +%% @doc callback for handling transport error, such as idle timeout +-spec transport_shutdown(quicer:connection_handle(), quicer:transport_shutdown_props(), cb_state()) -> + cb_ret(). +transport_shutdown(_C, DownInfo, S) when is_map(DownInfo) -> + ?SLOG(debug, DownInfo), + {ok, S}. + +%% @doc callback for handling for peer addr changed. +-spec peer_address_changed(quicer:connection_handle(), quicer:quicer_addr(), cb_state) -> cb_ret(). +peer_address_changed(_C, _NewAddr, S) -> + %% @TODO update conn info in emqx_quic_stream + {ok, S}. + +%% @doc callback for handling local addr change, currently unused +-spec local_address_changed(quicer:connection_handle(), quicer:quicer_addr(), cb_state()) -> + cb_ret(). +local_address_changed(_C, _NewAddr, S) -> + {ok, S}. + +%% @doc callback for handling remote stream limit updates +-spec streams_available( + quicer:connection_handle(), + {BidirStreams :: non_neg_integer(), UnidirStreams :: non_neg_integer()}, + cb_state() +) -> cb_ret(). +streams_available(_C, {BidirCnt, UnidirCnt}, S) -> + {ok, S#{ + peer_bidi_stream_count => BidirCnt, + peer_unidi_stream_count => UnidirCnt + }}. + +%% @doc callback for handling request when remote wants for more streams +%% should cope with rate limiting +%% @TODO this is not going to get triggered in current version +%% ref: https://github.com/microsoft/msquic/issues/3120 +-spec peer_needs_streams(quicer:connection_handle(), undefined, cb_state()) -> cb_ret(). +peer_needs_streams(_C, undefined, S) -> + ?SLOG(info, #{ + msg => "ignore: peer need more streames", info => maps:with([conn_pid, ctrl_pid], S) + }), + {ok, S}. + +%% @doc handle API calls +-spec handle_call(Req :: term(), gen_server:from(), cb_state()) -> cb_ret(). +handle_call( + {activate_data_streams, {PS, Serialize, Channel} = ActivateData}, + _From, + #{streams := Streams} = S +) -> + _ = [ + %% Try to activate streams individually if failed, stream will shutdown on its own. + %% we dont care about the return val here. + %% note, this is only used after control stream pass the validation. The data streams + %% that are called here are assured to be inactived (data processing hasn't been started). + catch emqx_quic_data_stream:activate_data(OwnerPid, ActivateData) + || {OwnerPid, _Stream} <- Streams + ], + {reply, ok, S#{ + channel := Channel, + serialize := Serialize, + parse_state := PS + }}; +handle_call(_Req, _From, S) -> + {reply, {error, unimpl}, S}. + +%% @doc handle DOWN messages from streams. +handle_info({'EXIT', Pid, Reason}, #{ctrl_pid := Pid, conn := Conn} = S) -> + Code = + case Reason of + normal -> + ?MQTT_QUIC_CONN_NOERROR; + _ -> + ?MQTT_QUIC_CONN_ERROR_CTRL_STREAM_DOWN + end, + _ = quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, Code), {ok, S}; -connected(_Conn, S) -> - {ok, S}. - --spec shutdown(quicer:connection_handler(), cb_state()) -> {ok, cb_state()} | {error, any()}. -shutdown(Conn, S) -> - quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0), - {ok, S}. +handle_info({'EXIT', Pid, Reason}, #{streams := Streams} = S) -> + case proplists:is_defined(Pid, Streams) of + true when + Reason =:= normal orelse + Reason =:= {shutdown, protocol_error} orelse + Reason =:= killed + -> + {ok, S}; + true -> + ?SLOG(info, #{message => "Data stream unexpected exit", reason => Reason}), + {ok, S}; + false -> + {stop, unknown_pid_down, S} + end. +%%% +%%% Internals +%%% -spec is_zone_olp_enabled(emqx_types:zone()) -> boolean(). is_zone_olp_enabled(Zone) -> case emqx_config:get_zone_conf(Zone, [overload_protection]) of @@ -76,3 +298,20 @@ is_zone_olp_enabled(Zone) -> _ -> false end. + +-spec init_cb_state(map()) -> cb_state(). +init_cb_state(#{zone := _Zone} = Map) -> + Map#{ + conn_pid => self(), + ctrl_pid => undefined, + conn => undefined, + streams => [], + parse_state => undefined, + channel => undefined, + serialize => undefined, + is_resumed => false + }. + +%% BUILD_WITHOUT_QUIC +-else. +-endif. diff --git a/apps/emqx/src/emqx_quic_data_stream.erl b/apps/emqx/src/emqx_quic_data_stream.erl new file mode 100644 index 000000000..0b89870a8 --- /dev/null +++ b/apps/emqx/src/emqx_quic_data_stream.erl @@ -0,0 +1,469 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% +%% @doc QUIC data stream +%% Following the behaviour of emqx_connection: +%% The MQTT packets and their side effects are handled *atomically*. +%% + +-module(emqx_quic_data_stream). + +-ifndef(BUILD_WITHOUT_QUIC). +-behaviour(quicer_remote_stream). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("quicer/include/quicer.hrl"). +-include("emqx_mqtt.hrl"). +-include("logger.hrl"). + +%% Connection Callbacks +-export([ + init_handoff/4, + post_handoff/3, + send_complete/3, + peer_send_shutdown/3, + peer_send_aborted/3, + peer_receive_aborted/3, + send_shutdown_complete/3, + stream_closed/3, + passive/3 +]). + +-export([handle_stream_data/4]). + +%% gen_server API +-export([activate_data/2]). + +-export([ + handle_call/3, + handle_info/2, + handle_continue/2 +]). + +-type cb_ret() :: quicer_stream:cb_ret(). +-type cb_state() :: quicer_stream:cb_state(). +-type error_code() :: quicer:error_code(). +-type connection_handle() :: quicer:connection_handle(). +-type stream_handle() :: quicer:stream_handle(). +-type handoff_data() :: { + emqx_frame:parse_state() | undefined, + emqx_frame:serialize_opts() | undefined, + emqx_channel:channel() | undefined +}. +%% +%% @doc Activate the data handling. +%% Note, data handling is disabled before finishing the validation over control stream. +-spec activate_data(pid(), { + emqx_frame:parse_state(), emqx_frame:serialize_opts(), emqx_channel:channel() +}) -> ok. +activate_data(StreamPid, {PS, Serialize, Channel}) -> + gen_server:call(StreamPid, {activate, {PS, Serialize, Channel}}, infinity). + +%% +%% @doc Handoff from previous owner, from the connection owner. +%% Note, unlike control stream, there is no acceptor for data streams. +%% The connection owner get new stream, spawn new proc and then handover to it. +%% +-spec init_handoff(stream_handle(), map(), connection_handle(), quicer:new_stream_props()) -> + {ok, cb_state()}. +init_handoff( + Stream, + _StreamOpts, + Connection, + #{is_orphan := true, flags := Flags} +) -> + {ok, init_state(Stream, Connection, Flags)}. + +%% +%% @doc Post handoff data stream +%% +-spec post_handoff(stream_handle(), handoff_data(), cb_state()) -> cb_ret(). +post_handoff(_Stream, {undefined = _PS, undefined = _Serialize, undefined = _Channel}, S) -> + %% When the channel isn't ready yet. + %% Data stream should wait for activate call with ?MODULE:activate_data/2 + {ok, S}; +post_handoff(Stream, {PS, Serialize, Channel}, S) -> + ?tp(debug, ?FUNCTION_NAME, #{channel => Channel, serialize => Serialize}), + _ = quicer:setopt(Stream, active, 10), + {ok, S#{channel := Channel, serialize := Serialize, parse_state := PS}}. + +-spec peer_receive_aborted(stream_handle(), error_code(), cb_state()) -> cb_ret(). +peer_receive_aborted(Stream, ErrorCode, #{is_unidir := _} = S) -> + %% we abort send with same reason + _ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT, ErrorCode), + {ok, S}. + +-spec peer_send_aborted(stream_handle(), error_code(), cb_state()) -> cb_ret(). +peer_send_aborted(Stream, ErrorCode, #{is_unidir := _} = S) -> + %% we abort receive with same reason + _ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT_RECEIVE, ErrorCode), + {ok, S}. + +-spec peer_send_shutdown(stream_handle(), undefined, cb_state()) -> cb_ret(). +peer_send_shutdown(Stream, undefined, S) -> + ok = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 0), + {ok, S}. + +-spec send_complete(stream_handle(), IsCanceled :: boolean(), cb_state()) -> cb_ret(). +send_complete(_Stream, false, S) -> + {ok, S}; +send_complete(_Stream, true = _IsCanceled, S) -> + {ok, S}. + +-spec send_shutdown_complete(stream_handle(), error_code(), cb_state()) -> cb_ret(). +send_shutdown_complete(_Stream, _Flags, S) -> + {ok, S}. + +-spec handle_stream_data(stream_handle(), binary(), quicer:recv_data_props(), cb_state()) -> + cb_ret(). +handle_stream_data( + _Stream, + Bin, + _Flags, + #{ + is_unidir := false, + channel := Channel, + parse_state := PS, + data_queue := QueuedData, + task_queue := TQ + } = State +) when + %% assert get stream data only after channel is created + Channel =/= undefined +-> + {MQTTPackets, NewPS} = parse_incoming(list_to_binary(lists:reverse([Bin | QueuedData])), PS), + NewTQ = lists:foldl( + fun(Item, Acc) -> + queue:in(Item, Acc) + end, + TQ, + [{incoming, P} || P <- lists:reverse(MQTTPackets)] + ), + {{continue, handle_appl_msg}, State#{parse_state := NewPS, task_queue := NewTQ}}. + +-spec passive(stream_handle(), undefined, cb_state()) -> cb_ret(). +passive(Stream, undefined, S) -> + _ = quicer:setopt(Stream, active, 10), + {ok, S}. + +-spec stream_closed(stream_handle(), quicer:stream_closed_props(), cb_state()) -> cb_ret(). +stream_closed( + _Stream, + #{ + is_conn_shutdown := IsConnShutdown, + is_app_closing := IsAppClosing, + is_shutdown_by_app := IsAppShutdown, + is_closed_remotely := IsRemote, + status := Status, + error := Code + }, + S +) when + is_boolean(IsConnShutdown) andalso + is_boolean(IsAppClosing) andalso + is_boolean(IsAppShutdown) andalso + is_boolean(IsRemote) andalso + is_atom(Status) andalso + is_integer(Code) +-> + {stop, normal, S}. + +-spec handle_call(Request :: term(), From :: {pid(), term()}, cb_state()) -> cb_ret(). +handle_call(Call, _From, S) -> + do_handle_call(Call, S). + +-spec handle_continue(Continue :: term(), cb_state()) -> cb_ret(). +handle_continue(handle_appl_msg, #{task_queue := Q} = S) -> + case queue:out(Q) of + {{value, Item}, Q2} -> + do_handle_appl_msg(Item, S#{task_queue := Q2}); + {empty, _Q} -> + {ok, S} + end. + +%%% Internals +do_handle_appl_msg( + {outgoing, Packets}, + #{ + channel := Channel, + stream := _Stream, + serialize := _Serialize + } = S +) when + Channel =/= undefined +-> + case handle_outgoing(Packets, S) of + {ok, Size} -> + ok = emqx_metrics:inc('bytes.sent', Size), + {{continue, handle_appl_msg}, S}; + {error, E1, E2} -> + {stop, {E1, E2}, S}; + {error, E} -> + {stop, E, S} + end; +do_handle_appl_msg({incoming, #mqtt_packet{} = Packet}, #{channel := Channel} = S) when + Channel =/= undefined +-> + ok = inc_incoming_stats(Packet), + with_channel(handle_in, [Packet], S); +do_handle_appl_msg({incoming, {frame_error, _} = FE}, #{channel := Channel} = S) when + Channel =/= undefined +-> + with_channel(handle_in, [FE], S); +do_handle_appl_msg({close, Reason}, S) -> + %% @TODO shall we abort shutdown or graceful shutdown here? + with_channel(handle_info, [{sock_closed, Reason}], S); +do_handle_appl_msg({event, updated}, S) -> + %% Data stream don't care about connection state changes. + {{continue, handle_appl_msg}, S}. + +handle_info(Deliver = {deliver, _, _}, S) -> + Delivers = [Deliver], + with_channel(handle_deliver, [Delivers], S); +handle_info({timeout, Ref, Msg}, S) -> + with_channel(handle_timeout, [Ref, Msg], S); +handle_info(Info, State) -> + with_channel(handle_info, [Info], State). + +with_channel(Fun, Args, #{channel := Channel, task_queue := Q} = S) when + Channel =/= undefined +-> + case apply(emqx_channel, Fun, Args ++ [Channel]) of + ok -> + {{continue, handle_appl_msg}, S}; + {ok, Msgs, NewChannel} when is_list(Msgs) -> + {{continue, handle_appl_msg}, S#{ + task_queue := queue:join(Q, queue:from_list(Msgs)), + channel := NewChannel + }}; + {ok, Msg, NewChannel} when is_record(Msg, mqtt_packet) -> + {{continue, handle_appl_msg}, S#{ + task_queue := queue:in({outgoing, Msg}, Q), channel := NewChannel + }}; + %% @FIXME WTH? + {ok, {outgoing, _} = Msg, NewChannel} -> + {{continue, handle_appl_msg}, S#{task_queue := queue:in(Msg, Q), channel := NewChannel}}; + {ok, NewChannel} -> + {{continue, handle_appl_msg}, S#{channel := NewChannel}}; + %% @TODO optimisation for shutdown wrap + {shutdown, Reason, NewChannel} -> + {stop, {shutdown, Reason}, S#{channel := NewChannel}}; + {shutdown, Reason, Msgs, NewChannel} when is_list(Msgs) -> + %% @TODO handle outgoing? + {stop, {shutdown, Reason}, S#{ + channel := NewChannel, + task_queue := queue:join(Q, queue:from_list(Msgs)) + }}; + {shutdown, Reason, Msg, NewChannel} -> + {stop, {shutdown, Reason}, S#{ + channel := NewChannel, + task_queue := queue:in(Msg, Q) + }} + end. + +handle_outgoing(#mqtt_packet{} = P, S) -> + handle_outgoing([P], S); +handle_outgoing(Packets, #{serialize := Serialize, stream := Stream, is_unidir := false}) when + is_list(Packets) +-> + OutBin = [serialize_packet(P, Serialize) || P <- filter_disallowed_out(Packets)], + %% Send data async but still want send feedback via {quic, send_complete, ...} + Res = quicer:async_send(Stream, OutBin, ?QUICER_SEND_FLAG_SYNC), + ?TRACE("MQTT", "mqtt_packet_sent", #{packets => Packets}), + [ok = inc_outgoing_stats(P) || P <- Packets], + Res. + +serialize_packet(Packet, Serialize) -> + try emqx_frame:serialize_pkt(Packet, Serialize) of + <<>> -> + ?SLOG(warning, #{ + msg => "packet_is_discarded", + reason => "frame_is_too_large", + packet => emqx_packet:format(Packet, hidden) + }), + ok = emqx_metrics:inc('delivery.dropped.too_large'), + ok = emqx_metrics:inc('delivery.dropped'), + ok = inc_outgoing_stats({error, message_too_large}), + <<>>; + Data -> + Data + catch + %% Maybe Never happen. + throw:{?FRAME_SERIALIZE_ERROR, Reason} -> + ?SLOG(info, #{ + reason => Reason, + input_packet => Packet + }), + erlang:error({?FRAME_SERIALIZE_ERROR, Reason}); + error:Reason:Stacktrace -> + ?SLOG(error, #{ + input_packet => Packet, + exception => Reason, + stacktrace => Stacktrace + }), + erlang:error(?FRAME_SERIALIZE_ERROR) + end. + +-spec init_state( + quicer:stream_handle(), + quicer:connection_handle(), + quicer:new_stream_props() +) -> + % @TODO + map(). +init_state(Stream, Connection, OpenFlags) -> + init_state(Stream, Connection, OpenFlags, undefined). + +init_state(Stream, Connection, OpenFlags, PS) -> + %% quic stream handle + #{ + stream => Stream, + %% quic connection handle + conn => Connection, + %% if it is QUIC unidi stream + is_unidir => quicer:is_unidirectional(OpenFlags), + %% Frame Parse State + parse_state => PS, + %% Peer Stream handle in a pair for type unidir only + peer_stream => undefined, + %% if the stream is locally initiated. + is_local => false, + %% queue binary data when is NOT connected, in reversed order. + data_queue => [], + %% Channel from connection + %% `undefined' means the connection is not connected. + channel => undefined, + %% serialize opts for connection + serialize => undefined, + %% Current working queue + task_queue => queue:new() + }. + +-spec do_handle_call(term(), cb_state()) -> cb_ret(). +do_handle_call( + {activate, {PS, Serialize, Channel}}, + #{ + channel := undefined, + stream := Stream, + serialize := undefined + } = S +) -> + NewS = S#{channel := Channel, serialize := Serialize, parse_state := PS}, + %% We use quic protocol for flow control, and we don't check return val + case quicer:setopt(Stream, active, true) of + ok -> + {reply, ok, NewS}; + {error, E} -> + ?SLOG(error, #{msg => "set stream active failed", error => E}), + {stop, E, NewS} + end; +do_handle_call(_Call, _S) -> + {error, unimpl}. + +%% @doc return reserved order of Packets +parse_incoming(Data, PS) -> + try + do_parse_incoming(Data, [], PS) + catch + throw:{?FRAME_PARSE_ERROR, Reason} -> + ?SLOG(info, #{ + reason => Reason, + input_bytes => Data + }), + {[{frame_error, Reason}], PS}; + error:Reason:Stacktrace -> + ?SLOG(error, #{ + input_bytes => Data, + reason => Reason, + stacktrace => Stacktrace + }), + {[{frame_error, Reason}], PS} + end. + +do_parse_incoming(<<>>, Packets, ParseState) -> + {Packets, ParseState}; +do_parse_incoming(Data, Packets, ParseState) -> + case emqx_frame:parse(Data, ParseState) of + {more, NParseState} -> + {Packets, NParseState}; + {ok, Packet, Rest, NParseState} -> + do_parse_incoming(Rest, [Packet | Packets], NParseState) + end. + +%% followings are copied from emqx_connection +-compile({inline, [inc_incoming_stats/1]}). +inc_incoming_stats(Packet = ?PACKET(Type)) -> + inc_counter(recv_pkt, 1), + case Type =:= ?PUBLISH of + true -> + inc_counter(recv_msg, 1), + inc_qos_stats(recv_msg, Packet), + inc_counter(incoming_pubs, 1); + false -> + ok + end, + emqx_metrics:inc_recv(Packet). + +-compile({inline, [inc_outgoing_stats/1]}). +inc_outgoing_stats({error, message_too_large}) -> + inc_counter('send_msg.dropped', 1), + inc_counter('send_msg.dropped.too_large', 1); +inc_outgoing_stats(Packet = ?PACKET(Type)) -> + inc_counter(send_pkt, 1), + case Type of + ?PUBLISH -> + inc_counter(send_msg, 1), + inc_counter(outgoing_pubs, 1), + inc_qos_stats(send_msg, Packet); + _ -> + ok + end, + emqx_metrics:inc_sent(Packet). + +inc_counter(Key, Inc) -> + _ = emqx_pd:inc_counter(Key, Inc), + ok. + +inc_qos_stats(Type, Packet) -> + case inc_qos_stats_key(Type, emqx_packet:qos(Packet)) of + undefined -> + ignore; + Key -> + inc_counter(Key, 1) + end. + +inc_qos_stats_key(send_msg, ?QOS_0) -> 'send_msg.qos0'; +inc_qos_stats_key(send_msg, ?QOS_1) -> 'send_msg.qos1'; +inc_qos_stats_key(send_msg, ?QOS_2) -> 'send_msg.qos2'; +inc_qos_stats_key(recv_msg, ?QOS_0) -> 'recv_msg.qos0'; +inc_qos_stats_key(recv_msg, ?QOS_1) -> 'recv_msg.qos1'; +inc_qos_stats_key(recv_msg, ?QOS_2) -> 'recv_msg.qos2'; +%% for bad qos +inc_qos_stats_key(_, _) -> undefined. + +filter_disallowed_out(Packets) -> + lists:filter(fun is_datastream_out_pkt/1, Packets). + +is_datastream_out_pkt(#mqtt_packet{header = #mqtt_packet_header{type = Type}}) when + Type > 2 andalso Type < 12 +-> + true; +is_datastream_out_pkt(_) -> + false. +%% BUILD_WITHOUT_QUIC +-else. +-endif. diff --git a/apps/emqx/src/emqx_quic_stream.erl b/apps/emqx/src/emqx_quic_stream.erl index 567488862..f60345fe9 100644 --- a/apps/emqx/src/emqx_quic_stream.erl +++ b/apps/emqx/src/emqx_quic_stream.erl @@ -14,9 +14,18 @@ %% limitations under the License. %%-------------------------------------------------------------------- -%% MQTT/QUIC Stream +%% MQTT over QUIC +%% multistreams: This is the control stream. +%% single stream: This is the only main stream. +%% callbacks are from emqx_connection process rather than quicer_stream -module(emqx_quic_stream). +-ifndef(BUILD_WITHOUT_QUIC). + +-behaviour(quicer_remote_stream). + +-include("logger.hrl"). + %% emqx transport Callbacks -export([ type/1, @@ -31,44 +40,84 @@ sockname/1, peercert/1 ]). +-include_lib("quicer/include/quicer.hrl"). +-include_lib("emqx/include/emqx_quic.hrl"). -wait({ConnOwner, Conn}) -> +-type cb_ret() :: quicer_stream:cb_ret(). +-type cb_data() :: quicer_stream:cb_state(). +-type connection_handle() :: quicer:connection_handle(). +-type stream_handle() :: quicer:stream_handle(). + +-export([ + send_complete/3, + peer_send_shutdown/3, + peer_send_aborted/3, + peer_receive_aborted/3, + send_shutdown_complete/3, + stream_closed/3, + passive/3 +]). + +-export_type([socket/0]). + +-opaque socket() :: {quic, connection_handle(), stream_handle(), socket_info()}. + +-type socket_info() :: #{ + is_orphan => boolean(), + ctrl_stream_start_flags => quicer:stream_open_flags(), + %% and quicer:new_conn_props() + _ => _ +}. + +%%% For Accepting New Remote Stream +-spec wait({pid(), connection_handle(), socket_info()}) -> + {ok, socket()} | {error, enotconn}. +wait({ConnOwner, Conn, ConnInfo}) -> {ok, Conn} = quicer:async_accept_stream(Conn, []), ConnOwner ! {self(), stream_acceptor_ready}, receive - %% from msquic - {quic, new_stream, Stream} -> - {ok, {quic, Conn, Stream}}; + %% New incoming stream, this is a *control* stream + {quic, new_stream, Stream, #{is_orphan := IsOrphan, flags := StartFlags}} -> + SocketInfo = ConnInfo#{ + is_orphan => IsOrphan, + ctrl_stream_start_flags => StartFlags + }, + {ok, socket(Conn, Stream, SocketInfo)}; + %% connection closed event for stream acceptor + {quic, closed, undefined, undefined} -> + {error, enotconn}; + %% Connection owner process down {'EXIT', ConnOwner, _Reason} -> {error, enotconn} end. +-spec type(_) -> quic. type(_) -> quic. -peername({quic, Conn, _Stream}) -> +peername({quic, Conn, _Stream, _Info}) -> quicer:peername(Conn). -sockname({quic, Conn, _Stream}) -> +sockname({quic, Conn, _Stream, _Info}) -> quicer:sockname(Conn). peercert(_S) -> %% @todo but unsupported by msquic nossl. -getstat({quic, Conn, _Stream}, Stats) -> +getstat({quic, Conn, _Stream, _Info}, Stats) -> case quicer:getstat(Conn, Stats) of {error, _} -> {error, closed}; Res -> Res end. -setopts(Socket, Opts) -> +setopts({quic, _Conn, Stream, _Info}, Opts) -> lists:foreach( fun ({Opt, V}) when is_atom(Opt) -> - quicer:setopt(Socket, Opt, V); + quicer:setopt(Stream, Opt, V); (Opt) when is_atom(Opt) -> - quicer:setopt(Socket, Opt, true) + quicer:setopt(Stream, Opt, true) end, Opts ), @@ -84,9 +133,18 @@ getopts(_Socket, _Opts) -> {buffer, 80000} ]}. -fast_close({quic, _Conn, Stream}) -> - %% Flush send buffer, gracefully shutdown - quicer:async_shutdown_stream(Stream), +%% @TODO supply some App Error Code from caller +fast_close({ConnOwner, Conn, _ConnInfo}) when is_pid(ConnOwner) -> + %% handshake aborted. + _ = quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0), + ok; +fast_close({quic, _Conn, Stream, _Info}) -> + %% Force flush + _ = quicer:async_shutdown_stream(Stream), + %% @FIXME Since we shutdown the control stream, we shutdown the connection as well + %% *BUT* Msquic does not flush the send buffer if we shutdown the connection after + %% gracefully shutdown the stream. + % quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0), ok. -spec ensure_ok_or_exit(atom(), list(term())) -> term(). @@ -102,8 +160,92 @@ ensure_ok_or_exit(Fun, Args = [Sock | _]) when is_atom(Fun), is_list(Args) -> Result end. -async_send({quic, _Conn, Stream}, Data, _Options) -> - case quicer:send(Stream, Data) of +async_send({quic, _Conn, Stream, _Info}, Data, _Options) -> + case quicer:async_send(Stream, Data, ?QUICER_SEND_FLAG_SYNC) of {ok, _Len} -> ok; + {error, X, Y} -> {error, {X, Y}}; Other -> Other end. + +%%% +%%% quicer stream callbacks +%%% + +-spec peer_receive_aborted(stream_handle(), non_neg_integer(), cb_data()) -> cb_ret(). +peer_receive_aborted(Stream, ErrorCode, S) -> + _ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT, ErrorCode), + {ok, S}. + +-spec peer_send_aborted(stream_handle(), non_neg_integer(), cb_data()) -> cb_ret(). +peer_send_aborted(Stream, ErrorCode, S) -> + %% we abort receive with same reason + _ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT, ErrorCode), + {ok, S}. + +-spec peer_send_shutdown(stream_handle(), undefined, cb_data()) -> cb_ret(). +peer_send_shutdown(Stream, undefined, S) -> + ok = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 0), + {ok, S}. + +-spec send_complete(stream_handle(), boolean(), cb_data()) -> cb_ret(). +send_complete(_Stream, false, S) -> + {ok, S}; +send_complete(_Stream, true = _IsCancelled, S) -> + ?SLOG(error, #{message => "send cancelled"}), + {ok, S}. + +-spec send_shutdown_complete(stream_handle(), boolean(), cb_data()) -> cb_ret(). +send_shutdown_complete(_Stream, _IsGraceful, S) -> + {ok, S}. + +-spec passive(stream_handle(), undefined, cb_data()) -> cb_ret(). +passive(Stream, undefined, S) -> + case quicer:setopt(Stream, active, 10) of + ok -> ok; + Error -> ?SLOG(error, #{message => "set active error", error => Error}) + end, + {ok, S}. + +-spec stream_closed(stream_handle(), quicer:stream_closed_props(), cb_data()) -> + {{continue, term()}, cb_data()}. +stream_closed( + _Stream, + #{ + is_conn_shutdown := IsConnShutdown, + is_app_closing := IsAppClosing, + is_shutdown_by_app := IsAppShutdown, + is_closed_remotely := IsRemote, + status := Status, + error := Code + }, + S +) when + is_boolean(IsConnShutdown) andalso + is_boolean(IsAppClosing) andalso + is_boolean(IsAppShutdown) andalso + is_boolean(IsRemote) andalso + is_atom(Status) andalso + is_integer(Code) +-> + %% For now we fake a sock_closed for + %% emqx_connection:process_msg to append + %% a msg to be processed + Reason = + case Code of + ?MQTT_QUIC_CONN_NOERROR -> + normal; + _ -> + Status + end, + {{continue, {sock_closed, Reason}}, S}. + +%%% +%%% Internals +%%% +-spec socket(connection_handle(), stream_handle(), socket_info()) -> socket(). +socket(Conn, CtrlStream, Info) when is_map(Info) -> + {quic, Conn, CtrlStream, Info}. + +%% BUILD_WITHOUT_QUIC +-else. +-endif. diff --git a/apps/emqx/src/emqx_release.erl b/apps/emqx/src/emqx_release.erl index 94a0bd9a1..4ecf8598b 100644 --- a/apps/emqx/src/emqx_release.erl +++ b/apps/emqx/src/emqx_release.erl @@ -31,11 +31,21 @@ ce => "EMQX" }). +-define(EMQX_REL_NAME, #{ + ee => <<"Enterprise">>, + ce => <<"Opensource">> +}). + -define(EMQX_REL_VSNS, #{ ee => ?EMQX_RELEASE_EE, ce => ?EMQX_RELEASE_CE }). +-define(EMQX_REL_VSN_PREFIX, #{ + ee => "e", + ce => "v" +}). + %% @doc Return EMQX description. description() -> maps:get(edition(), ?EMQX_DESCS). @@ -46,14 +56,18 @@ description() -> -spec edition() -> ce | ee. -ifdef(EMQX_RELEASE_EDITION). edition() -> ?EMQX_RELEASE_EDITION. -edition_vsn_prefix() -> "e". -edition_longstr() -> <<"Enterprise">>. -else. edition() -> ce. -edition_vsn_prefix() -> "v". -edition_longstr() -> <<"Opensource">>. -endif. +%% @doc Return EMQX version prefix string. +edition_vsn_prefix() -> + maps:get(edition(), ?EMQX_REL_VSN_PREFIX). + +%% @doc Return EMQX edition name, ee => Enterprise ce => Opensource. +edition_longstr() -> + maps:get(edition(), ?EMQX_REL_NAME). + %% @doc Return the release version. version() -> case lists:keyfind(emqx_vsn, 1, ?MODULE:module_info(compile)) of diff --git a/apps/emqx/src/emqx_router.erl b/apps/emqx/src/emqx_router.erl index 7c9cc61b0..42430af5d 100644 --- a/apps/emqx/src/emqx_router.erl +++ b/apps/emqx/src/emqx_router.erl @@ -98,7 +98,7 @@ mnesia(boot) -> -spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> gen_server:start_link( - {local, emqx_misc:proc_name(?MODULE, Id)}, + {local, emqx_utils:proc_name(?MODULE, Id)}, ?MODULE, [Pool, Id], [{hibernate_after, 1000}] diff --git a/apps/emqx/src/emqx_router_helper.erl b/apps/emqx/src/emqx_router_helper.erl index 6dd479323..4bff98072 100644 --- a/apps/emqx/src/emqx_router_helper.erl +++ b/apps/emqx/src/emqx_router_helper.erl @@ -144,7 +144,7 @@ handle_info({mnesia_table_event, {delete, {?ROUTING_NODE, _Node}, _}}, State) -> %% ignore {noreply, State}; handle_info({mnesia_table_event, Event}, State) -> - ?SLOG(error, #{msg => "unexpected_mnesia_table_event", event => Event}), + ?SLOG(debug, #{msg => "unexpected_mnesia_table_event", event => Event}), {noreply, State}; handle_info({nodedown, Node}, State = #{nodes := Nodes}) -> global:trans( @@ -167,9 +167,15 @@ handle_info(Info, State) -> {noreply, State}. terminate(_Reason, _State) -> - ok = ekka:unmonitor(membership), - emqx_stats:cancel_update(route_stats), - mnesia:unsubscribe({table, ?ROUTING_NODE, simple}). + try + ok = ekka:unmonitor(membership), + emqx_stats:cancel_update(route_stats), + mnesia:unsubscribe({table, ?ROUTING_NODE, simple}) + catch + exit:{noproc, {gen_server, call, [mria_membership, _]}} -> + ?SLOG(warning, #{msg => "mria_membership_down"}), + ok + end. code_change(_OldVsn, State, _Extra) -> {ok, State}. diff --git a/apps/emqx/src/emqx_rpc.erl b/apps/emqx/src/emqx_rpc.erl index e1b5122c4..062bde68b 100644 --- a/apps/emqx/src/emqx_rpc.erl +++ b/apps/emqx/src/emqx_rpc.erl @@ -27,6 +27,8 @@ cast/5, multicall/4, multicall/5, + multicall_on_running/5, + on_running/3, unwrap_erpc/1 ]). @@ -91,6 +93,17 @@ multicall(Nodes, Mod, Fun, Args) -> multicall(Key, Nodes, Mod, Fun, Args) -> gen_rpc:multicall(rpc_nodes([{Key, Node} || Node <- Nodes]), Mod, Fun, Args). +-spec multicall_on_running([node()], module(), atom(), list(), timeout()) -> [term() | {error, _}]. +multicall_on_running(Nodes, Mod, Fun, Args, Timeout) -> + unwrap_erpc(erpc:multicall(Nodes, emqx_rpc, on_running, [Mod, Fun, Args], Timeout)). + +-spec on_running(module(), atom(), list()) -> term(). +on_running(Mod, Fun, Args) -> + case emqx:is_running() of + true -> apply(Mod, Fun, Args); + false -> error(emqx_down) + end. + -spec cast(node(), module(), atom(), list()) -> cast_result(). cast(Node, Mod, Fun, Args) -> %% Note: using a non-ordered cast here, since the generated key is diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 4cd78b575..5a66ad5a0 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -23,6 +23,7 @@ -dialyzer(no_fail_call). -elvis([{elvis_style, invalid_dynamic_call, disable}]). +-include("emqx_schema.hrl"). -include("emqx_authentication.hrl"). -include("emqx_access_control.hrl"). -include_lib("typerefl/include/types.hrl"). @@ -41,8 +42,15 @@ -type bar_separated_list() :: list(). -type ip_port() :: tuple() | integer(). -type cipher() :: map(). --type port_number() :: 1..65536. --type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}. +-type port_number() :: 1..65535. +-type server_parse_option() :: #{ + default_port => port_number(), + no_port => boolean(), + supported_schemes => [string()], + default_scheme => string() +}. +-type url() :: binary(). +-type json_binary() :: binary(). -typerefl_from_string({duration/0, emqx_schema, to_duration}). -typerefl_from_string({duration_s/0, emqx_schema, to_duration_s}). @@ -56,13 +64,22 @@ -typerefl_from_string({ip_port/0, emqx_schema, to_ip_port}). -typerefl_from_string({cipher/0, emqx_schema, to_erl_cipher_suite}). -typerefl_from_string({comma_separated_atoms/0, emqx_schema, to_comma_separated_atoms}). +-typerefl_from_string({url/0, emqx_schema, to_url}). +-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}). + +-type parsed_server() :: #{ + hostname := string(), + port => port_number(), + scheme => string() +}. -export([ validate_heap_size/1, user_lookup_fun_tr/2, validate_alarm_actions/1, non_empty_string/1, - validations/0 + validations/0, + naive_env_interpolation/1 ]). -export([qos/0]). @@ -81,7 +98,9 @@ to_bar_separated_list/1, to_ip_port/1, to_erl_cipher_suite/1, - to_comma_separated_atoms/1 + to_comma_separated_atoms/1, + to_url/1, + to_json_binary/1 ]). -export([ @@ -93,6 +112,12 @@ convert_servers/2 ]). +%% tombstone types +-export([ + tombstone_map/2, + get_tombstone_map_value_type/1 +]). + -behaviour(hocon_schema). -reflect_type([ @@ -108,18 +133,29 @@ bar_separated_list/0, ip_port/0, cipher/0, - comma_separated_atoms/0 + comma_separated_atoms/0, + url/0, + json_binary/0, + port_number/0 ]). --export([namespace/0, roots/0, roots/1, fields/1, desc/1]). +-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]). -export([conf_get/2, conf_get/3, keys/2, filter/1]). -export([server_ssl_opts_schema/2, client_ssl_opts_schema/1, ciphers_schema/1]). +-export([password_converter/2, bin_str_converter/2]). +-export([authz_fields/0]). -export([sc/2, map/2]). -elvis([{elvis_style, god_modules, disable}]). +-define(BIT(Bits), (1 bsl (Bits))). +-define(MAX_UINT(Bits), (?BIT(Bits) - 1)). + namespace() -> broker. +tags() -> + [<<"EMQX">>]. + roots() -> %% TODO change config importance to a field metadata roots(high) ++ roots(medium) ++ roots(low). @@ -129,25 +165,31 @@ roots(high) -> {"listeners", sc( ref("listeners"), - #{} - )}, - {"zones", - sc( - map("name", ref("zone")), - #{desc => ?DESC(zones)} + #{importance => ?IMPORTANCE_HIGH} )}, {"mqtt", sc( ref("mqtt"), - #{desc => ?DESC(mqtt)} + #{ + desc => ?DESC(mqtt), + importance => ?IMPORTANCE_MEDIUM + } + )}, + {"zones", + sc( + map("name", ref("zone")), + #{ + desc => ?DESC(zones), + importance => ?IMPORTANCE_LOW + } )}, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)}, - %% NOTE: authorization schema here is only to keep emqx app prue + %% NOTE: authorization schema here is only to keep emqx app pure %% the full schema for EMQX node is injected in emqx_conf_schema. {?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME, sc( ref(?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME), - #{} + #{importance => ?IMPORTANCE_HIDDEN} )} ]; roots(medium) -> @@ -170,7 +212,7 @@ roots(medium) -> {"overload_protection", sc( ref("overload_protection"), - #{} + #{importance => ?IMPORTANCE_HIDDEN} )} ]; roots(low) -> @@ -183,12 +225,16 @@ roots(low) -> {"conn_congestion", sc( ref("conn_congestion"), - #{} + #{ + importance => ?IMPORTANCE_HIDDEN + } )}, {"stats", sc( ref("stats"), - #{} + #{ + importance => ?IMPORTANCE_HIDDEN + } )}, {"sysmon", sc( @@ -203,17 +249,22 @@ roots(low) -> {"flapping_detect", sc( ref("flapping_detect"), - #{} + #{importance => ?IMPORTANCE_HIDDEN} )}, {"persistent_session_store", sc( ref("persistent_session_store"), - #{} + #{importance => ?IMPORTANCE_HIDDEN} )}, {"trace", sc( ref("trace"), - #{} + #{importance => ?IMPORTANCE_HIDDEN} + )}, + {"crl_cache", + sc( + ref("crl_cache"), + #{importance => ?IMPORTANCE_HIDDEN} )} ]. @@ -263,7 +314,7 @@ fields("persistent_session_store") -> sc( duration(), #{ - default => "1h", + default => <<"1h">>, desc => ?DESC(persistent_session_store_max_retain_undelivered) } )}, @@ -271,7 +322,7 @@ fields("persistent_session_store") -> sc( duration(), #{ - default => "1h", + default => <<"1h">>, desc => ?DESC(persistent_session_store_message_gc_interval) } )}, @@ -279,7 +330,7 @@ fields("persistent_session_store") -> sc( duration(), #{ - default => "1m", + default => <<"1m">>, desc => ?DESC(persistent_session_store_session_message_gc_interval) } )} @@ -318,37 +369,14 @@ fields("stats") -> boolean(), #{ default => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(stats_enable) } )} ]; fields("authorization") -> - [ - {"no_match", - sc( - hoconsc:enum([allow, deny]), - #{ - default => allow, - required => true, - desc => ?DESC(fields_authorization_no_match) - } - )}, - {"deny_action", - sc( - hoconsc:enum([ignore, disconnect]), - #{ - default => ignore, - required => true, - desc => ?DESC(fields_authorization_deny_action) - } - )}, - {"cache", - sc( - ref(?MODULE, "cache"), - #{} - )} - ]; -fields("cache") -> + authz_fields(); +fields("authz_cache") -> [ {"enable", sc( @@ -371,7 +399,7 @@ fields("cache") -> sc( duration(), #{ - default => "1m", + default => <<"1m">>, desc => ?DESC(fields_cache_ttl) } )} @@ -382,7 +410,7 @@ fields("mqtt") -> sc( hoconsc:union([infinity, duration()]), #{ - default => "15s", + default => <<"15s">>, desc => ?DESC(mqtt_idle_timeout) } )}, @@ -390,7 +418,7 @@ fields("mqtt") -> sc( bytesize(), #{ - default => "1MB", + default => <<"1MB">>, desc => ?DESC(mqtt_max_packet_size) } )}, @@ -526,7 +554,7 @@ fields("mqtt") -> sc( duration(), #{ - default => "30s", + default => <<"30s">>, desc => ?DESC(mqtt_retry_interval) } )}, @@ -542,7 +570,7 @@ fields("mqtt") -> sc( duration(), #{ - default => "300s", + default => <<"300s">>, desc => ?DESC(mqtt_await_rel_timeout) } )}, @@ -550,7 +578,7 @@ fields("mqtt") -> sc( duration(), #{ - default => "2h", + default => <<"2h">>, desc => ?DESC(mqtt_session_expiry_interval) } )}, @@ -612,8 +640,7 @@ fields("mqtt") -> )} ]; fields("zone") -> - Fields = emqx_zone_schema:roots(), - [{F, ref(emqx_zone_schema, F)} || F <- Fields]; + emqx_zone_schema:zone(); fields("flapping_detect") -> [ {"enable", @@ -621,30 +648,32 @@ fields("flapping_detect") -> boolean(), #{ default => false, + deprecated => {since, "5.0.23"}, desc => ?DESC(flapping_detect_enable) } )}, - {"max_count", - sc( - integer(), - #{ - default => 15, - desc => ?DESC(flapping_detect_max_count) - } - )}, {"window_time", sc( - duration(), + hoconsc:union([disabled, duration()]), #{ - default => "1m", + default => disabled, + importance => ?IMPORTANCE_HIGH, desc => ?DESC(flapping_detect_window_time) } )}, + {"max_count", + sc( + non_neg_integer(), + #{ + default => 15, + desc => ?DESC(flapping_detect_max_count) + } + )}, {"ban_time", sc( duration(), #{ - default => "5m", + default => <<"5m">>, desc => ?DESC(flapping_detect_ban_time) } )} @@ -659,19 +688,20 @@ fields("force_shutdown") -> desc => ?DESC(force_shutdown_enable) } )}, - {"max_message_queue_len", + {"max_mailbox_size", sc( range(0, inf), #{ default => 1000, - desc => ?DESC(force_shutdown_max_message_queue_len) + aliases => [max_message_queue_len], + desc => ?DESC(force_shutdown_max_mailbox_size) } )}, {"max_heap_size", sc( wordsize(), #{ - default => "32MB", + default => <<"32MB">>, desc => ?DESC(force_shutdown_max_heap_size), validator => fun ?MODULE:validate_heap_size/1 } @@ -734,7 +764,7 @@ fields("conn_congestion") -> sc( duration(), #{ - default => "1m", + default => <<"1m">>, desc => ?DESC(conn_congestion_min_alarm_sustain_duration) } )} @@ -758,7 +788,7 @@ fields("force_gc") -> sc( bytesize(), #{ - default => "16MB", + default => <<"16MB">>, desc => ?DESC(force_gc_bytes) } )} @@ -767,45 +797,83 @@ fields("listeners") -> [ {"tcp", sc( - map(name, ref("mqtt_tcp_listener")), + tombstone_map(name, ref("mqtt_tcp_listener")), #{ desc => ?DESC(fields_listeners_tcp), + converter => fun(X, _) -> + ensure_default_listener(X, tcp) + end, required => {false, recursively} } )}, {"ssl", sc( - map(name, ref("mqtt_ssl_listener")), + tombstone_map(name, ref("mqtt_ssl_listener")), #{ desc => ?DESC(fields_listeners_ssl), + converter => fun(X, _) -> ensure_default_listener(X, ssl) end, required => {false, recursively} } )}, {"ws", sc( - map(name, ref("mqtt_ws_listener")), + tombstone_map(name, ref("mqtt_ws_listener")), #{ desc => ?DESC(fields_listeners_ws), + converter => fun(X, _) -> ensure_default_listener(X, ws) end, required => {false, recursively} } )}, {"wss", sc( - map(name, ref("mqtt_wss_listener")), + tombstone_map(name, ref("mqtt_wss_listener")), #{ desc => ?DESC(fields_listeners_wss), + converter => fun(X, _) -> ensure_default_listener(X, wss) end, required => {false, recursively} } )}, {"quic", sc( - map(name, ref("mqtt_quic_listener")), + tombstone_map(name, ref("mqtt_quic_listener")), #{ desc => ?DESC(fields_listeners_quic), + converter => fun keep_default_tombstone/2, required => {false, recursively} } )} ]; +fields("crl_cache") -> + %% Note: we make the refresh interval and HTTP timeout global (not + %% per-listener) because multiple SSL listeners might point to the + %% same URL. If they had diverging timeout options, it would be + %% confusing. + [ + {refresh_interval, + sc( + duration(), + #{ + default => <<"15m">>, + desc => ?DESC("crl_cache_refresh_interval") + } + )}, + {http_timeout, + sc( + duration(), + #{ + default => <<"15s">>, + desc => ?DESC("crl_cache_refresh_http_timeout") + } + )}, + {capacity, + sc( + pos_integer(), + #{ + default => 100, + desc => ?DESC("crl_cache_capacity") + } + )} + ]; fields("mqtt_tcp_listener") -> mqtt_listener(1883) ++ [ @@ -826,7 +894,7 @@ fields("mqtt_ssl_listener") -> {"ssl_options", sc( ref("listener_ssl_opts"), - #{} + #{validator => fun mqtt_ssl_listener_ssl_options_validator/1} )} ]; fields("mqtt_ws_listener") -> @@ -864,40 +932,237 @@ fields("mqtt_wss_listener") -> ]; fields("mqtt_quic_listener") -> [ - %% TODO: ensure cacertfile is configurable {"certfile", sc( string(), - #{desc => ?DESC(fields_mqtt_quic_listener_certfile)} + #{ + %% TODO: deprecated => {since, "5.1.0"} + desc => ?DESC(fields_mqtt_quic_listener_certfile), + importance => ?IMPORTANCE_HIDDEN + } )}, {"keyfile", sc( string(), - #{desc => ?DESC(fields_mqtt_quic_listener_keyfile)} + #{ + %% TODO: deprecated => {since, "5.1.0"} + desc => ?DESC(fields_mqtt_quic_listener_keyfile), + importance => ?IMPORTANCE_HIDDEN + } )}, {"ciphers", ciphers_schema(quic)}, + + {"max_bytes_per_key", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(64), + ?DESC(fields_mqtt_quic_listener_max_bytes_per_key) + )}, + {"tls_server_max_send_buffer", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_tls_server_max_send_buffer) + )}, + {"stream_recv_window_default", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_stream_recv_window_default) + )}, + {"stream_recv_buffer_default", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_stream_recv_buffer_default) + )}, + {"conn_flow_control_window", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_conn_flow_control_window) + )}, + {"max_stateless_operations", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_max_stateless_operations) + )}, + {"initial_window_packets", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_initial_window_packets) + )}, + {"send_idle_timeout_ms", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_send_idle_timeout_ms) + )}, + {"initial_rtt_ms", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_initial_rtt_ms) + )}, + {"max_ack_delay_ms", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_max_ack_delay_ms) + )}, + {"disconnect_timeout_ms", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_disconnect_timeout_ms) + )}, {"idle_timeout", sc( duration_ms(), #{ default => 0, - desc => ?DESC(fields_mqtt_quic_listener_idle_timeout) + desc => ?DESC(fields_mqtt_quic_listener_idle_timeout), + %% TODO: deprecated => {since, "5.1.0"} + %% deprecated, use idle_timeout_ms instead + importance => ?IMPORTANCE_HIDDEN } )}, + {"idle_timeout_ms", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(64), + ?DESC(fields_mqtt_quic_listener_idle_timeout_ms) + )}, {"handshake_idle_timeout", sc( duration_ms(), #{ - default => "10s", - desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout) + default => <<"10s">>, + desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout), + %% TODO: deprecated => {since, "5.1.0"} + %% use handshake_idle_timeout_ms + importance => ?IMPORTANCE_HIDDEN } )}, + {"handshake_idle_timeout_ms", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(64), + ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout_ms) + )}, {"keep_alive_interval", sc( duration_ms(), #{ default => 0, - desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval) + desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval), + %% TODO: deprecated => {since, "5.1.0"} + %% use keep_alive_interval_ms instead + importance => ?IMPORTANCE_HIDDEN + } + )}, + {"keep_alive_interval_ms", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(32), + ?DESC(fields_mqtt_quic_listener_keep_alive_interval_ms) + )}, + {"peer_bidi_stream_count", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_peer_bidi_stream_count) + )}, + {"peer_unidi_stream_count", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_peer_unidi_stream_count) + )}, + {"retry_memory_limit", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_retry_memory_limit) + )}, + {"load_balancing_mode", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_load_balancing_mode) + )}, + {"max_operations_per_drain", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(8), + ?DESC(fields_mqtt_quic_listener_max_operations_per_drain) + )}, + {"send_buffering_enabled", + quic_feature_toggle( + ?DESC(fields_mqtt_quic_listener_send_buffering_enabled) + )}, + {"pacing_enabled", + quic_feature_toggle( + ?DESC(fields_mqtt_quic_listener_pacing_enabled) + )}, + {"migration_enabled", + quic_feature_toggle( + ?DESC(fields_mqtt_quic_listener_migration_enabled) + )}, + {"datagram_receive_enabled", + quic_feature_toggle( + ?DESC(fields_mqtt_quic_listener_datagram_receive_enabled) + )}, + {"server_resumption_level", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(8), + ?DESC(fields_mqtt_quic_listener_server_resumption_level) + )}, + {"minimum_mtu", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_minimum_mtu) + )}, + {"maximum_mtu", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_maximum_mtu) + )}, + {"mtu_discovery_search_complete_timeout_us", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(64), + ?DESC(fields_mqtt_quic_listener_mtu_discovery_search_complete_timeout_us) + )}, + {"mtu_discovery_missing_probe_count", + quic_lowlevel_settings_uint( + 1, + ?MAX_UINT(8), + ?DESC(fields_mqtt_quic_listener_mtu_discovery_missing_probe_count) + )}, + {"max_binding_stateless_operations", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_max_binding_stateless_operations) + )}, + {"stateless_operation_expiration_ms", + quic_lowlevel_settings_uint( + 0, + ?MAX_UINT(16), + ?DESC(fields_mqtt_quic_listener_stateless_operation_expiration_ms) + )}, + {"ssl_options", + sc( + ref("listener_quic_ssl_opts"), + #{ + required => false, + desc => ?DESC(fields_mqtt_quic_listener_ssl_options) } )} ] ++ base_listener(14567); @@ -907,7 +1172,7 @@ fields("ws_opts") -> sc( string(), #{ - default => "/mqtt", + default => <<"/mqtt">>, desc => ?DESC(fields_ws_opts_mqtt_path) } )}, @@ -931,7 +1196,7 @@ fields("ws_opts") -> sc( duration(), #{ - default => "7200s", + default => <<"7200s">>, desc => ?DESC(fields_ws_opts_idle_timeout) } )}, @@ -955,7 +1220,7 @@ fields("ws_opts") -> sc( comma_separated_list(), #{ - default => "mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5", + default => <<"mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5">>, desc => ?DESC(fields_ws_opts_supported_subprotocols) } )}, @@ -987,7 +1252,7 @@ fields("ws_opts") -> sc( string(), #{ - default => "x-forwarded-for", + default => <<"x-forwarded-for">>, desc => ?DESC(fields_ws_opts_proxy_address_header) } )}, @@ -995,7 +1260,7 @@ fields("ws_opts") -> sc( string(), #{ - default => "x-forwarded-port", + default => <<"x-forwarded-port">>, desc => ?DESC(fields_ws_opts_proxy_port_header) } )}, @@ -1027,7 +1292,7 @@ fields("tcp_opts") -> sc( duration(), #{ - default => "15s", + default => <<"15s">>, desc => ?DESC(fields_tcp_opts_send_timeout) } )}, @@ -1068,7 +1333,7 @@ fields("tcp_opts") -> sc( bytesize(), #{ - default => "1MB", + default => <<"1MB">>, desc => ?DESC(fields_tcp_opts_high_watermark) } )}, @@ -1109,8 +1374,66 @@ fields("listener_wss_opts") -> }, true ); +fields("listener_quic_ssl_opts") -> + %% Mark unsupported TLS options deprecated. + Schema0 = server_ssl_opts_schema(#{}, false), + Schema1 = lists:keydelete("ocsp", 1, Schema0), + lists:map( + fun({Name, Schema}) -> + case is_quic_ssl_opts(Name) of + true -> + {Name, Schema}; + false -> + {Name, Schema#{deprecated => {since, "5.0.20"}}} + end + end, + Schema1 + ); fields("ssl_client_opts") -> client_ssl_opts_schema(#{}); +fields("ocsp") -> + [ + {enable_ocsp_stapling, + sc( + boolean(), + #{ + default => false, + desc => ?DESC("server_ssl_opts_schema_enable_ocsp_stapling") + } + )}, + {responder_url, + sc( + url(), + #{ + required => false, + desc => ?DESC("server_ssl_opts_schema_ocsp_responder_url") + } + )}, + {issuer_pem, + sc( + binary(), + #{ + required => false, + desc => ?DESC("server_ssl_opts_schema_ocsp_issuer_pem") + } + )}, + {refresh_interval, + sc( + duration(), + #{ + default => <<"5m">>, + desc => ?DESC("server_ssl_opts_schema_ocsp_refresh_interval") + } + )}, + {refresh_http_timeout, + sc( + duration(), + #{ + default => <<"15s">>, + desc => ?DESC("server_ssl_opts_schema_ocsp_refresh_http_timeout") + } + )} + ]; fields("deflate_opts") -> [ {"level", @@ -1205,10 +1528,8 @@ fields("broker") -> sc( boolean(), #{ - %% TODO: deprecated => {since, "5.1.0"} - %% in favor of session message re-dispatch at termination - %% we will stop supporting dispatch acks for shared - %% subscriptions. + deprecated => {since, "5.1.0"}, + importance => ?IMPORTANCE_HIDDEN, default => false, desc => ?DESC(broker_shared_dispatch_ack_enabled) } @@ -1224,14 +1545,16 @@ fields("broker") -> {"perf", sc( ref("broker_perf"), - #{} + #{importance => ?IMPORTANCE_HIDDEN} )}, + %% FIXME: Need new design for shared subscription group {"shared_subscription_group", sc( map(name, ref("shared_subscription_group")), #{ example => #{<<"example_group">> => #{<<"strategy">> => <<"random">>}}, - desc => ?DESC(shared_subscription_group_strategy) + desc => ?DESC(shared_subscription_group_strategy), + importance => ?IMPORTANCE_HIDDEN } )} ]; @@ -1279,7 +1602,7 @@ fields("sys_topics") -> sc( hoconsc:union([disabled, duration()]), #{ - default => "1m", + default => <<"1m">>, desc => ?DESC(sys_msg_interval) } )}, @@ -1287,7 +1610,7 @@ fields("sys_topics") -> sc( hoconsc:union([disabled, duration()]), #{ - default => "30s", + default => <<"30s">>, desc => ?DESC(sys_heartbeat_interval) } )}, @@ -1347,7 +1670,9 @@ fields("sysmon") -> {"top", sc( ref("sysmon_top"), - #{} + %% Userful monitoring solution when benchmarking, + %% but hardly common enough for regular users. + #{importance => ?IMPORTANCE_HIDDEN} )} ]; fields("sysmon_vm") -> @@ -1356,7 +1681,7 @@ fields("sysmon_vm") -> sc( duration(), #{ - default => "30s", + default => <<"30s">>, desc => ?DESC(sysmon_vm_process_check_interval) } )}, @@ -1364,7 +1689,7 @@ fields("sysmon_vm") -> sc( percent(), #{ - default => "80%", + default => <<"80%">>, desc => ?DESC(sysmon_vm_process_high_watermark) } )}, @@ -1372,7 +1697,7 @@ fields("sysmon_vm") -> sc( percent(), #{ - default => "60%", + default => <<"60%">>, desc => ?DESC(sysmon_vm_process_low_watermark) } )}, @@ -1388,7 +1713,7 @@ fields("sysmon_vm") -> sc( hoconsc:union([disabled, duration()]), #{ - default => "240ms", + default => <<"240ms">>, desc => ?DESC(sysmon_vm_long_schedule) } )}, @@ -1396,7 +1721,7 @@ fields("sysmon_vm") -> sc( hoconsc:union([disabled, bytesize()]), #{ - default => "32MB", + default => <<"32MB">>, desc => ?DESC(sysmon_vm_large_heap) } )}, @@ -1423,7 +1748,7 @@ fields("sysmon_os") -> sc( duration(), #{ - default => "60s", + default => <<"60s">>, desc => ?DESC(sysmon_os_cpu_check_interval) } )}, @@ -1431,7 +1756,7 @@ fields("sysmon_os") -> sc( percent(), #{ - default => "80%", + default => <<"80%">>, desc => ?DESC(sysmon_os_cpu_high_watermark) } )}, @@ -1439,7 +1764,7 @@ fields("sysmon_os") -> sc( percent(), #{ - default => "60%", + default => <<"60%">>, desc => ?DESC(sysmon_os_cpu_low_watermark) } )}, @@ -1447,7 +1772,7 @@ fields("sysmon_os") -> sc( hoconsc:union([disabled, duration()]), #{ - default => "60s", + default => <<"60s">>, desc => ?DESC(sysmon_os_mem_check_interval) } )}, @@ -1455,7 +1780,7 @@ fields("sysmon_os") -> sc( percent(), #{ - default => "70%", + default => <<"70%">>, desc => ?DESC(sysmon_os_sysmem_high_watermark) } )}, @@ -1463,7 +1788,7 @@ fields("sysmon_os") -> sc( percent(), #{ - default => "5%", + default => <<"5%">>, desc => ?DESC(sysmon_os_procmem_high_watermark) } )} @@ -1484,7 +1809,7 @@ fields("sysmon_top") -> emqx_schema:duration(), #{ mapping => "system_monitor.top_sample_interval", - default => "2s", + default => <<"2s">>, desc => ?DESC(sysmon_top_sample_interval) } )}, @@ -1503,7 +1828,7 @@ fields("sysmon_top") -> #{ mapping => "system_monitor.db_hostname", desc => ?DESC(sysmon_top_db_hostname), - default => "" + default => <<>> } )}, {"db_port", @@ -1520,7 +1845,7 @@ fields("sysmon_top") -> string(), #{ mapping => "system_monitor.db_username", - default => "system_monitor", + default => <<"system_monitor">>, desc => ?DESC(sysmon_top_db_username) } )}, @@ -1529,8 +1854,10 @@ fields("sysmon_top") -> binary(), #{ mapping => "system_monitor.db_password", - default => "system_monitor_password", - desc => ?DESC(sysmon_top_db_password) + default => <<"system_monitor_password">>, + desc => ?DESC(sysmon_top_db_password), + converter => fun password_converter/2, + sensitive => true } )}, {"db_name", @@ -1538,7 +1865,7 @@ fields("sysmon_top") -> string(), #{ mapping => "system_monitor.db_name", - default => "postgres", + default => <<"postgres">>, desc => ?DESC(sysmon_top_db_name) } )} @@ -1568,7 +1895,7 @@ fields("alarm") -> sc( duration(), #{ - default => "24h", + default => <<"24h">>, example => "24h", desc => ?DESC(alarm_validity_period) } @@ -1579,6 +1906,8 @@ fields("trace") -> {"payload_encode", sc(hoconsc:enum([hex, text, hidden]), #{ default => text, + deprecated => {since, "5.0.22"}, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(fields_trace_payload_encode) })} ]. @@ -1607,10 +1936,12 @@ mqtt_listener(Bind) -> duration(), #{ desc => ?DESC(mqtt_listener_proxy_protocol_timeout), - default => "3s" + default => <<"3s">> } )}, - {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(listener)} + {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, (authentication(listener))#{ + importance => ?IMPORTANCE_HIDDEN + }} ]. base_listener(Bind) -> @@ -1644,7 +1975,7 @@ base_listener(Bind) -> sc( hoconsc:union([infinity, pos_integer()]), #{ - default => infinity, + default => emqx_listeners:default_max_conn(), desc => ?DESC(base_listener_max_connections) } )}, @@ -1672,9 +2003,7 @@ base_listener(Bind) -> ), #{ desc => ?DESC(base_listener_limiter), - default => #{ - <<"connection">> => #{<<"rate">> => <<"1000/s">>, <<"capacity">> => 1000} - } + importance => ?IMPORTANCE_HIDDEN } )}, {"enable_authn", @@ -1685,7 +2014,7 @@ base_listener(Bind) -> default => true } )} - ]. + ] ++ emqx_limiter_schema:short_paths_fields(?MODULE). desc("persistent_session_store") -> "Settings for message persistence."; @@ -1704,7 +2033,7 @@ desc("mqtt") -> "Global MQTT configuration.
" "The configs here work as default values which can be overridden\n" "in zone configs"; -desc("cache") -> +desc("authz_cache") -> "Settings for the authorization cache."; desc("zone") -> "A `Zone` defines a set of configuration items (such as the maximum number of connections)" @@ -1786,6 +2115,12 @@ desc("listener_ssl_opts") -> "Socket options for SSL connections."; desc("listener_wss_opts") -> "Socket options for WebSocket/SSL connections."; +desc("fields_mqtt_quic_listener_certfile") -> + "Path to the certificate file. Will be deprecated in 5.1, use '.ssl_options.certfile' instead."; +desc("fields_mqtt_quic_listener_keyfile") -> + "Path to the secret key file. Will be deprecated in 5.1, use '.ssl_options.keyfile' instead."; +desc("listener_quic_ssl_opts") -> + "TLS options for QUIC transport."; desc("ssl_client_opts") -> "Socket options for SSL clients."; desc("deflate_opts") -> @@ -1826,22 +2161,22 @@ desc("trace") -> "Real-time filtering logs for the ClientID or Topic or IP for debugging."; desc("shared_subscription_group") -> "Per group dispatch strategy for shared subscription"; +desc("ocsp") -> + "Per listener OCSP Stapling configuration."; +desc("crl_cache") -> + "Global CRL cache options."; desc(_) -> undefined. %% utils -spec conf_get(string() | [string()], hocon:config()) -> term(). conf_get(Key, Conf) -> - V = hocon_maps:get(Key, Conf), - case is_binary(V) of - true -> - binary_to_list(V); - false -> - V - end. + ensure_list(hocon_maps:get(Key, Conf)). conf_get(Key, Conf, Default) -> - V = hocon_maps:get(Key, Conf, Default), + ensure_list(hocon_maps:get(Key, Conf, Default)). + +ensure_list(V) -> case is_binary(V) of true -> binary_to_list(V); @@ -1854,18 +2189,18 @@ filter(Opts) -> %% @private This function defines the SSL opts which are commonly used by %% SSL listener and client. --spec common_ssl_opts_schema(map()) -> hocon_schema:field_schema(). -common_ssl_opts_schema(Defaults) -> +-spec common_ssl_opts_schema(map(), server | client) -> hocon_schema:field_schema(). +common_ssl_opts_schema(Defaults, Type) -> D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, Collection = maps:get(versions, Defaults, tls_all_available), - AvailableVersions = default_tls_vsns(Collection), + DefaultVersions = default_tls_vsns(Collection), [ {"cacertfile", sc( binary(), #{ - default => D("cacertfile"), + default => cert_file("cacert.pem", Type), required => false, desc => ?DESC(common_ssl_opts_schema_cacertfile) } @@ -1874,7 +2209,7 @@ common_ssl_opts_schema(Defaults) -> sc( binary(), #{ - default => D("certfile"), + default => cert_file("cert.pem", Type), required => false, desc => ?DESC(common_ssl_opts_schema_certfile) } @@ -1883,7 +2218,7 @@ common_ssl_opts_schema(Defaults) -> sc( binary(), #{ - default => D("keyfile"), + default => cert_file("key.pem", Type), required => false, desc => ?DESC(common_ssl_opts_schema_keyfile) } @@ -1920,16 +2255,19 @@ common_ssl_opts_schema(Defaults) -> required => false, example => <<"">>, format => <<"password">>, - desc => ?DESC(common_ssl_opts_schema_password) + desc => ?DESC(common_ssl_opts_schema_password), + importance => ?IMPORTANCE_LOW, + converter => fun password_converter/2 } )}, {"versions", sc( hoconsc:array(typerefl:atom()), #{ - default => AvailableVersions, + default => DefaultVersions, desc => ?DESC(common_ssl_opts_schema_versions), - validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end + importance => ?IMPORTANCE_HIGH, + validator => fun(Input) -> validate_tls_versions(Collection, Input) end } )}, {"ciphers", ciphers_schema(D("ciphers"))}, @@ -1939,6 +2277,7 @@ common_ssl_opts_schema(Defaults) -> #{ default => <<"emqx_tls_psk:lookup">>, converter => fun ?MODULE:user_lookup_fun_tr/2, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(common_ssl_opts_schema_user_lookup_fun) } )}, @@ -1949,6 +2288,26 @@ common_ssl_opts_schema(Defaults) -> default => Df("secure_renegotiate", true), desc => ?DESC(common_ssl_opts_schema_secure_renegotiate) } + )}, + {"log_level", + sc( + hoconsc:enum([ + emergency, alert, critical, error, warning, notice, info, debug, none, all + ]), + #{ + default => notice, + desc => ?DESC(common_ssl_opts_schema_log_level), + importance => ?IMPORTANCE_LOW + } + )}, + + {"hibernate_after", + sc( + duration(), + #{ + default => Df("hibernate_after", <<"5s">>), + desc => ?DESC(common_ssl_opts_schema_hibernate_after) + } )} ]. @@ -1957,7 +2316,7 @@ common_ssl_opts_schema(Defaults) -> server_ssl_opts_schema(Defaults, IsRanchListener) -> D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, - common_ssl_opts_schema(Defaults) ++ + common_ssl_opts_schema(Defaults, server) ++ [ {"dhfile", sc( @@ -1996,24 +2355,94 @@ server_ssl_opts_schema(Defaults, IsRanchListener) -> sc( duration(), #{ - default => Df("handshake_timeout", "15s"), + default => Df("handshake_timeout", <<"15s">>), desc => ?DESC(server_ssl_opts_schema_handshake_timeout) } )} ] ++ [ - {"gc_after_handshake", - sc(boolean(), #{ - default => false, - desc => ?DESC(server_ssl_opts_schema_gc_after_handshake) - })} - || not IsRanchListener + Field + || not IsRanchListener, + Field <- [ + {gc_after_handshake, + sc(boolean(), #{ + default => false, + desc => ?DESC(server_ssl_opts_schema_gc_after_handshake) + })}, + {ocsp, + sc( + ref("ocsp"), + #{ + required => false, + validator => fun ocsp_inner_validator/1 + } + )}, + {enable_crl_check, + sc( + boolean(), + #{ + default => false, + importance => ?IMPORTANCE_MEDIUM, + desc => ?DESC("server_ssl_opts_schema_enable_crl_check") + } + )} + ] ]. +mqtt_ssl_listener_ssl_options_validator(Conf) -> + Checks = [ + fun ocsp_outer_validator/1, + fun crl_outer_validator/1 + ], + case emqx_utils:pipeline(Checks, Conf, not_used) of + {ok, _, _} -> + ok; + {error, Reason, _NotUsed} -> + {error, Reason} + end. + +ocsp_outer_validator(#{<<"ocsp">> := #{<<"enable_ocsp_stapling">> := true}} = Conf) -> + %% outer mqtt listener ssl server config + ServerCertPemPath = maps:get(<<"certfile">>, Conf, undefined), + case ServerCertPemPath of + undefined -> + {error, "Server certificate must be defined when using OCSP stapling"}; + _ -> + %% check if issuer pem is readable and/or valid? + ok + end; +ocsp_outer_validator(_Conf) -> + ok. + +ocsp_inner_validator(#{enable_ocsp_stapling := _} = Conf) -> + ocsp_inner_validator(emqx_utils_maps:binary_key_map(Conf)); +ocsp_inner_validator(#{<<"enable_ocsp_stapling">> := false} = _Conf) -> + ok; +ocsp_inner_validator(#{<<"enable_ocsp_stapling">> := true} = Conf) -> + assert_required_field( + Conf, <<"responder_url">>, "The responder URL is required for OCSP stapling" + ), + assert_required_field( + Conf, <<"issuer_pem">>, "The issuer PEM path is required for OCSP stapling" + ), + ok. + +crl_outer_validator( + #{<<"enable_crl_check">> := true} = SSLOpts +) -> + case maps:get(<<"verify">>, SSLOpts) of + verify_peer -> + ok; + _ -> + {error, "verify must be verify_peer when CRL check is enabled"} + end; +crl_outer_validator(_SSLOpts) -> + ok. + %% @doc Make schema for SSL client. -spec client_ssl_opts_schema(map()) -> hocon_schema:field_schema(). client_ssl_opts_schema(Defaults) -> - common_ssl_opts_schema(Defaults) ++ + common_ssl_opts_schema(Defaults, client) ++ [ {"enable", sc( @@ -2035,10 +2464,14 @@ client_ssl_opts_schema(Defaults) -> )} ]. -default_tls_vsns(dtls_all_available) -> - emqx_tls_lib:available_versions(dtls); -default_tls_vsns(tls_all_available) -> - emqx_tls_lib:available_versions(tls). +available_tls_vsns(dtls_all_available) -> emqx_tls_lib:available_versions(dtls); +available_tls_vsns(tls_all_available) -> emqx_tls_lib:available_versions(tls). + +outdated_tls_vsn(dtls_all_available) -> [dtlsv1]; +outdated_tls_vsn(tls_all_available) -> ['tlsv1.1', tlsv1]. + +default_tls_vsns(Key) -> + available_tls_vsns(Key) -- outdated_tls_vsn(Key). -spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) -> hocon_schema:field_schema(). @@ -2088,6 +2521,48 @@ do_default_ciphers(_) -> %% otherwise resolve default ciphers list at runtime []. +password_converter(X, Opts) -> + bin_str_converter(X, Opts). + +bin_str_converter(undefined, _) -> + undefined; +bin_str_converter(I, _) when is_integer(I) -> + integer_to_binary(I); +bin_str_converter(X, _) -> + try + iolist_to_binary(X) + catch + _:_ -> + throw("must_quote") + end. + +authz_fields() -> + [ + {"no_match", + sc( + hoconsc:enum([allow, deny]), + #{ + default => allow, + required => true, + desc => ?DESC(fields_authorization_no_match) + } + )}, + {"deny_action", + sc( + hoconsc:enum([ignore, disconnect]), + #{ + default => ignore, + required => true, + desc => ?DESC(fields_authorization_deny_action) + } + )}, + {"cache", + sc( + ref(?MODULE, "authz_cache"), + #{} + )} + ]. + %% @private return a list of keys in a parent field -spec keys(string(), hocon:config()) -> [string()]. keys(Parent, Conf) -> @@ -2169,6 +2644,23 @@ to_comma_separated_binary(Str) -> to_comma_separated_atoms(Str) -> {ok, lists:map(fun to_atom/1, string:tokens(Str, ", "))}. +to_url(Str) -> + case emqx_http_lib:uri_parse(Str) of + {ok, URIMap} -> + URIString = emqx_http_lib:normalize(URIMap), + {ok, iolist_to_binary(URIString)}; + Error -> + Error + end. + +to_json_binary(Str) -> + case emqx_utils_json:safe_decode(Str) of + {ok, _} -> + {ok, iolist_to_binary(Str)}; + Error -> + Error + end. + to_bar_separated_list(Str) -> {ok, string:tokens(Str, "| ")}. @@ -2235,20 +2727,22 @@ to_atom(Str) when is_list(Str) -> to_atom(Bin) when is_binary(Bin) -> binary_to_atom(Bin, utf8). -validate_heap_size(Siz) -> +validate_heap_size(Siz) when is_integer(Siz) -> MaxSiz = case erlang:system_info(wordsize) of % arch_64 - 8 -> - (1 bsl 59) - 1; + 8 -> (1 bsl 59) - 1; % arch_32 - 4 -> - (1 bsl 27) - 1 + 4 -> (1 bsl 27) - 1 end, case Siz > MaxSiz of - true -> error(io_lib:format("force_shutdown_policy: heap-size ~ts is too large", [Siz])); - false -> ok - end. + true -> + {error, #{reason => max_heap_size_too_large, maximum => MaxSiz}}; + false -> + ok + end; +validate_heap_size(_SizStr) -> + {error, invalid_heap_size}. validate_alarm_actions(Actions) -> UnSupported = lists:filter( @@ -2286,7 +2780,8 @@ validate_ciphers(Ciphers) -> Bad -> {error, {bad_ciphers, Bad}} end. -validate_tls_versions(AvailableVersions, Versions) -> +validate_tls_versions(Collection, Versions) -> + AvailableVersions = available_tls_vsns(Collection), case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of [] -> ok; Vs -> {error, {unsupported_tls_versions, Vs}} @@ -2325,30 +2820,39 @@ str(S) when is_list(S) -> S. authentication(Which) -> - Desc = + {Importance, Desc} = case Which of - global -> ?DESC(global_authentication); - listener -> ?DESC(listener_authentication) + global -> + %% For root level authentication, it is recommended to configure + %% from the dashboard or API. + %% Hence it's considered a low-importance when it comes to + %% configuration importance. + {?IMPORTANCE_LOW, ?DESC(global_authentication)}; + listener -> + {?IMPORTANCE_HIDDEN, ?DESC(listener_authentication)} end, - %% The runtime module injection - %% from EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY - %% is for now only affecting document generation. - %% maybe in the future, we can find a more straightforward way to support - %% * document generation (at compile time) - %% * type checks before boot (in bin/emqx config generation) - %% * type checks at runtime (when changing configs via management API) - Type0 = + %% poor man's dependency injection + %% this is due to the fact that authn is implemented outside of 'emqx' app. + %% so it can not be a part of emqx_schema since 'emqx' app is supposed to + %% work standalone. + Type = case persistent_term:get(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY, undefined) of - undefined -> hoconsc:array(typerefl:map()); - Module -> Module:root_type() + undefined -> + hoconsc:array(typerefl:map()); + Module -> + Module:root_type() end, - %% It is a lazy type because when handing runtime update requests - %% the config is not checked by emqx_schema, but by the injected schema - Type = hoconsc:lazy(Type0), - #{ - type => Type, - desc => Desc - }. + hoconsc:mk(Type, #{ + desc => Desc, + converter => fun ensure_array/2, + default => [], + importance => Importance + }). + +%% the older version schema allows individual element (instead of a chain) in config +ensure_array(undefined, _) -> undefined; +ensure_array(L, _) when is_list(L) -> L; +ensure_array(M, _) -> [M]. -spec qos() -> typerefl:type(). qos() -> @@ -2371,7 +2875,7 @@ non_empty_string(_) -> {error, invalid_string}. servers_sc(Meta0, ParseOpts) -> %% if this filed has a default value %% then it is not NOT required - %% NOTE: maps:is_key is not the solution beause #{default => undefined} is legit + %% NOTE: maps:is_key is not the solution because #{default => undefined} is legit HasDefault = (maps:get(default, Meta0, undefined) =/= undefined), Required = maps:get(required, Meta0, not HasDefault), Meta = #{ @@ -2424,17 +2928,18 @@ normalize_host_port_str(Str) -> %% NOTE: Validator is called after converter. servers_validator(Opts, Required) -> fun(Str0) -> - Str = str(Str0), - case Str =:= "" orelse Str =:= "undefined" of - true when Required -> - %% it's a required field - %% but value is set to an empty string (from environment override) - %% or when the filed is not set in config file + case str(Str0) of + "" -> + %% Empty string is not allowed even if the field is not required + %% we should remove field from config if it's empty + throw("cannot_be_empty"); + "undefined" when Required -> + %% when the filed is not set in config file %% NOTE: assuming nobody is going to name their server "undefined" throw("cannot_be_empty"); - true -> + "undefined" -> ok; - _ -> + Str -> %% it's valid as long as it can be parsed _ = parse_servers(Str, Opts), ok @@ -2449,7 +2954,7 @@ servers_validator(Opts, Required) -> %% `no_port': by default it's `false', when set to `true', %% a `throw' exception is raised if the port is found. -spec parse_server(undefined | string() | binary(), server_parse_option()) -> - {string(), port_number()}. + undefined | parsed_server(). parse_server(Str, Opts) -> case parse_servers(Str, Opts) of undefined -> @@ -2463,7 +2968,7 @@ parse_server(Str, Opts) -> %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% into a list of `{Host, Port}' tuples or just `Host' string. -spec parse_servers(undefined | string() | binary(), server_parse_option()) -> - [{string(), port_number()}]. + undefined | [parsed_server()]. parse_servers(undefined, _Opts) -> %% should not parse 'undefined' as string, %% not to throw exception either, @@ -2509,6 +3014,9 @@ split_host_port(Str) -> do_parse_server(Str, Opts) -> DefaultPort = maps:get(default_port, Opts, undefined), NotExpectingPort = maps:get(no_port, Opts, false), + DefaultScheme = maps:get(default_scheme, Opts, undefined), + SupportedSchemes = maps:get(supported_schemes, Opts, []), + NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0, case is_integer(DefaultPort) andalso NotExpectingPort of true -> %% either provide a default port from schema, @@ -2517,22 +3025,129 @@ do_parse_server(Str, Opts) -> false -> ok end, + case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of + true -> + %% inconsistent schema + error("bad_schema"); + false -> + ok + end, %% do not split with space, there should be no space allowed between host and port - case string:tokens(Str, ":") of - [Hostname, Port] -> - NotExpectingPort andalso throw("not_expecting_port_number"), - {check_hostname(Hostname), parse_port(Port)}; - [Hostname] -> - case is_integer(DefaultPort) of - true -> - {check_hostname(Hostname), DefaultPort}; - false when NotExpectingPort -> - check_hostname(Hostname); - false -> - throw("missing_port_number") - end; - _ -> - throw("bad_host_port") + Tokens = string:tokens(Str, ":"), + Context = #{ + not_expecting_port => NotExpectingPort, + not_expecting_scheme => NotExpectingScheme, + default_port => DefaultPort, + default_scheme => DefaultScheme, + opts => Opts + }, + check_server_parts(Tokens, Context). + +check_server_parts([Scheme, "//" ++ Hostname, Port], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + opts := Opts + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + NotExpectingScheme andalso throw("not_expecting_scheme"), + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; +check_server_parts([Scheme, "//" ++ Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + opts := Opts + } = Context, + NotExpectingScheme andalso throw("not_expecting_scheme"), + case is_integer(DefaultPort) of + true -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => DefaultPort + }; + false when NotExpectingPort -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname) + }; + false -> + throw("missing_port_number") + end; +check_server_parts([Hostname, Port], Context) -> + #{ + not_expecting_port := NotExpectingPort, + default_scheme := DefaultScheme + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + case is_list(DefaultScheme) of + false -> + #{ + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; + true -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => parse_port(Port) + } + end; +check_server_parts([Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + default_scheme := DefaultScheme + } = Context, + case is_integer(DefaultPort) orelse NotExpectingPort of + true -> + ok; + false -> + throw("missing_port_number") + end, + case is_list(DefaultScheme) orelse NotExpectingScheme of + true -> + ok; + false -> + throw("missing_scheme") + end, + case {is_integer(DefaultPort), is_list(DefaultScheme)} of + {true, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {true, false} -> + #{ + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {false, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname) + }; + {false, false} -> + #{hostname => check_hostname(Hostname)} + end; +check_server_parts(_Tokens, _Context) -> + throw("bad_host_port"). + +check_scheme(Str, Opts) -> + SupportedSchemes = maps:get(supported_schemes, Opts, []), + IsSupported = lists:member(Str, SupportedSchemes), + case IsSupported of + true -> + Str; + false -> + throw("unsupported_scheme") end. check_hostname(Str) -> @@ -2579,12 +3194,194 @@ is_port_number(Port) -> end. parse_port(Port) -> - try - P = list_to_integer(string:strip(Port)), - true = (P > 0), - true = (P =< 65535), - P - catch - _:_ -> - throw("bad_port_number") + case string:to_integer(string:strip(Port)) of + {P, ""} when P < 0 -> throw("port_number_must_be_positive"); + {P, ""} when P > 65535 -> throw("port_number_too_large"); + {P, ""} -> P; + _ -> throw("bad_port_number") end. + +quic_feature_toggle(Desc) -> + sc( + %% true, false are for user facing + %% 0, 1 are for internal representation + typerefl:alias("boolean", typerefl:union([true, false, 0, 1])), + #{ + desc => Desc, + importance => ?IMPORTANCE_HIDDEN, + required => false, + converter => fun + (true) -> 1; + (false) -> 0; + (Other) -> Other + end + } + ). + +quic_lowlevel_settings_uint(Low, High, Desc) -> + sc( + range(Low, High), + #{ + required => false, + importance => ?IMPORTANCE_HIDDEN, + desc => Desc + } + ). + +-spec is_quic_ssl_opts(string()) -> boolean(). +is_quic_ssl_opts(Name) -> + lists:member(Name, [ + "cacertfile", + "certfile", + "keyfile", + "verify", + "password" + %% Followings are planned + %% , "hibernate_after" + %% , "fail_if_no_peer_cert" + %% , "handshake_timeout" + %% , "gc_after_handshake" + ]). + +assert_required_field(Conf, Key, ErrorMessage) -> + case maps:get(Key, Conf, undefined) of + undefined -> + throw(ErrorMessage); + _ -> + ok + end. + +default_listener(tcp) -> + #{ + <<"bind">> => <<"0.0.0.0:1883">> + }; +default_listener(ws) -> + #{ + <<"bind">> => <<"0.0.0.0:8083">>, + <<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>} + }; +default_listener(SSLListener) -> + %% The env variable is resolved in emqx_tls_lib by calling naive_env_interpolate + SslOptions = #{ + <<"cacertfile">> => cert_file(<<"cacert.pem">>, server), + <<"certfile">> => cert_file(<<"cert.pem">>, server), + <<"keyfile">> => cert_file(<<"key.pem">>, server) + }, + case SSLListener of + ssl -> + #{ + <<"bind">> => <<"0.0.0.0:8883">>, + <<"ssl_options">> => SslOptions + }; + wss -> + #{ + <<"bind">> => <<"0.0.0.0:8084">>, + <<"ssl_options">> => SslOptions, + <<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>} + } + end. + +%% @doc This function helps to perform a naive string interpolation which +%% only looks at the first segment of the string and tries to replace it. +%% For example +%% "$MY_FILE_PATH" +%% "${MY_FILE_PATH}" +%% "$ENV_VARIABLE/sub/path" +%% "${ENV_VARIABLE}/sub/path" +%% "${ENV_VARIABLE}\sub\path" # windows +%% This function returns undefined if the input is undefined +%% otherwise always return string. +naive_env_interpolation(undefined) -> + undefined; +naive_env_interpolation(Bin) when is_binary(Bin) -> + naive_env_interpolation(unicode:characters_to_list(Bin, utf8)); +naive_env_interpolation("$" ++ Maybe = Original) -> + {Env, Tail} = split_path(Maybe), + case resolve_env(Env) of + {ok, Path} -> + filename:join([Path, Tail]); + error -> + Original + end; +naive_env_interpolation(Other) -> + Other. + +split_path(Path) -> + split_path(Path, []). + +split_path([], Acc) -> + {lists:reverse(Acc), []}; +split_path([Char | Rest], Acc) when Char =:= $/ orelse Char =:= $\\ -> + {lists:reverse(Acc), string:trim(Rest, leading, "/\\")}; +split_path([Char | Rest], Acc) -> + split_path(Rest, [Char | Acc]). + +resolve_env(Name0) -> + Name = string:trim(Name0, both, "{}"), + Value = os:getenv(Name), + case Value =/= false andalso Value =/= "" of + true -> + {ok, Value}; + false -> + special_env(Name) + end. + +-ifdef(TEST). +%% when running tests, we need to mock the env variables +special_env("EMQX_ETC_DIR") -> + {ok, filename:join([code:lib_dir(emqx), etc])}; +special_env("EMQX_LOG_DIR") -> + {ok, "log"}; +special_env(_Name) -> + %% only in tests + error. +-else. +special_env(_Name) -> error. +-endif. + +%% The tombstone atom. +tombstone() -> + ?TOMBSTONE_TYPE. + +%% Make a map type, the value of which is allowed to be 'marked_for_deletion' +%% 'marked_for_delition' is a special value which means the key is deleted. +%% This is used to support the 'delete' operation in configs, +%% since deleting the key would result in default value being used. +tombstone_map(Name, Type) -> + %% marked_for_deletion must be the last member of the union + %% because we need to first union member to populate the default values + map(Name, ?UNION([Type, ?TOMBSTONE_TYPE])). + +%% inverse of mark_del_map +get_tombstone_map_value_type(Schema) -> + %% TODO: violation of abstraction, expose an API in hoconsc + %% hoconsc:map_value_type(Schema) + ?MAP(_Name, Union) = hocon_schema:field_schema(Schema, type), + %% TODO: violation of abstraction, fix hoconsc:union_members/1 + ?UNION(Members) = Union, + Tombstone = tombstone(), + [Type, Tombstone] = hoconsc:union_members(Members), + Type. + +%% Keep the 'default' tombstone, but delete others. +keep_default_tombstone(Map, _Opts) when is_map(Map) -> + maps:filter( + fun(Key, Value) -> + Key =:= <<"default">> orelse Value =/= ?TOMBSTONE_VALUE + end, + Map + ); +keep_default_tombstone(Value, _Opts) -> + Value. + +ensure_default_listener(undefined, ListenerType) -> + %% let the schema's default value do its job + #{<<"default">> => default_listener(ListenerType)}; +ensure_default_listener(#{<<"default">> := _} = Map, _ListenerType) -> + keep_default_tombstone(Map, #{}); +ensure_default_listener(Map, ListenerType) -> + NewMap = Map#{<<"default">> => default_listener(ListenerType)}, + keep_default_tombstone(NewMap, #{}). + +cert_file(_File, client) -> undefined; +cert_file(File, server) -> iolist_to_binary(filename:join(["${EMQX_ETC_DIR}", "certs", File])). diff --git a/apps/emqx/src/emqx_sequence.erl b/apps/emqx/src/emqx_sequence.erl index 60596324a..7acc87256 100644 --- a/apps/emqx/src/emqx_sequence.erl +++ b/apps/emqx/src/emqx_sequence.erl @@ -39,7 +39,7 @@ %% @doc Create a sequence. -spec create(name()) -> ok. create(Name) -> - emqx_tables:new(Name, [public, set, {write_concurrency, true}]). + emqx_utils_ets:new(Name, [public, set, {write_concurrency, true}]). %% @doc Next value of the sequence. -spec nextval(name(), key()) -> seqid(). diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 2e17190e2..25bee629e 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -82,6 +82,7 @@ deliver/3, enqueue/3, dequeue/2, + filter_queue/2, ignore_local/4, retry/2, terminate/3 @@ -200,7 +201,7 @@ -spec init(options()) -> session(). init(Opts) -> - MaxInflight = maps:get(max_inflight, Opts, 1), + MaxInflight = maps:get(max_inflight, Opts), QueueOpts = maps:merge( #{ max_len => 1000, @@ -211,17 +212,17 @@ init(Opts) -> #session{ id = emqx_guid:gen(), clientid = maps:get(clientid, Opts, <<>>), - is_persistent = maps:get(is_persistent, Opts, false), - max_subscriptions = maps:get(max_subscriptions, Opts, infinity), + is_persistent = maps:get(is_persistent, Opts), + max_subscriptions = maps:get(max_subscriptions, Opts), subscriptions = #{}, - upgrade_qos = maps:get(upgrade_qos, Opts, false), + upgrade_qos = maps:get(upgrade_qos, Opts), inflight = emqx_inflight:new(MaxInflight), mqueue = emqx_mqueue:init(QueueOpts), next_pkt_id = 1, - retry_interval = maps:get(retry_interval, Opts, 30000), + retry_interval = maps:get(retry_interval, Opts), awaiting_rel = #{}, - max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100), - await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000), + max_awaiting_rel = maps:get(max_awaiting_rel, Opts), + await_rel_timeout = maps:get(await_rel_timeout, Opts), created_at = erlang:system_time(millisecond) }. @@ -290,16 +291,16 @@ stats(Session) -> info(?STATS_KEYS, Session). ignore_local(ClientInfo, Delivers, Subscriber, Session) -> Subs = info(subscriptions, Session), - lists:dropwhile( + lists:filter( fun({deliver, Topic, #message{from = Publisher} = Msg}) -> case maps:find(Topic, Subs) of {ok, #{nl := 1}} when Subscriber =:= Publisher -> ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]), ok = emqx_metrics:inc('delivery.dropped'), ok = emqx_metrics:inc('delivery.dropped.no_local'), - true; + false; _ -> - false + true end end, Delivers @@ -529,6 +530,9 @@ dequeue(ClientInfo, Cnt, Msgs, Q) -> end end. +filter_queue(Pred, #session{mqueue = Q} = Session) -> + Session#session{mqueue = emqx_mqueue:filter(Pred, Q)}. + acc_cnt(#message{qos = ?QOS_0}, Cnt) -> Cnt; acc_cnt(_Msg, Cnt) -> Cnt - 1. @@ -937,7 +941,7 @@ age(Now, Ts) -> Now - Ts. %%-------------------------------------------------------------------- set_field(Name, Value, Session) -> - Pos = emqx_misc:index_of(Name, record_info(fields, session)), + Pos = emqx_utils:index_of(Name, record_info(fields, session)), setelement(Pos + 1, Session, Value). get_mqueue(#session{mqueue = Q}) -> diff --git a/apps/emqx/src/emqx_session_router.erl b/apps/emqx/src/emqx_session_router.erl index 94f7fb64d..0435ddca3 100644 --- a/apps/emqx/src/emqx_session_router.erl +++ b/apps/emqx/src/emqx_session_router.erl @@ -71,24 +71,15 @@ %%-------------------------------------------------------------------- create_router_tab(disc) -> - ok = mria:create_table(?ROUTE_DISC_TAB, [ - {type, bag}, - {rlog_shard, ?ROUTE_SHARD}, - {storage, disc_copies}, - {record_name, route}, - {attributes, record_info(fields, route)}, - {storage_properties, [ - {ets, [ - {read_concurrency, true}, - {write_concurrency, true} - ]} - ]} - ]); + create_table(?ROUTE_DISC_TAB, disc_copies); create_router_tab(ram) -> - ok = mria:create_table(?ROUTE_RAM_TAB, [ + create_table(?ROUTE_RAM_TAB, ram_copies). + +create_table(Tab, Storage) -> + ok = mria:create_table(Tab, [ {type, bag}, {rlog_shard, ?ROUTE_SHARD}, - {storage, ram_copies}, + {storage, Storage}, {record_name, route}, {attributes, record_info(fields, route)}, {storage_properties, [ @@ -104,7 +95,7 @@ create_router_tab(ram) -> %%-------------------------------------------------------------------- create_init_tab() -> - emqx_tables:new(?SESSION_INIT_TAB, [ + emqx_utils_ets:new(?SESSION_INIT_TAB, [ public, {read_concurrency, true}, {write_concurrency, true} @@ -113,7 +104,7 @@ create_init_tab() -> -spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> gen_server:start_link( - {local, emqx_misc:proc_name(?MODULE, Id)}, + {local, emqx_utils:proc_name(?MODULE, Id)}, ?MODULE, [Pool, Id], [{hibernate_after, 1000}] @@ -191,7 +182,7 @@ pending(SessionID, MarkerIDs) -> call(pick(SessionID), {pending, SessionID, MarkerIDs}). buffer(SessionID, STopic, Msg) -> - case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of + case emqx_utils_ets:lookup_value(?SESSION_INIT_TAB, SessionID) of undefined -> ok; Worker -> emqx_session_router_worker:buffer(Worker, STopic, Msg) end. @@ -203,7 +194,7 @@ resume_begin(From, SessionID) when is_pid(From), is_binary(SessionID) -> -spec resume_end(pid(), binary()) -> {'ok', [emqx_types:message()]} | {'error', term()}. resume_end(From, SessionID) when is_pid(From), is_binary(SessionID) -> - case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of + case emqx_utils_ets:lookup_value(?SESSION_INIT_TAB, SessionID) of undefined -> ?tp(ps_session_not_found, #{sid => SessionID}), {error, not_found}; @@ -258,7 +249,7 @@ handle_cast({delete_routes, SessionID, Subscriptions}, State) -> ok = lists:foreach(Fun, maps:to_list(Subscriptions)), {noreply, State}; handle_cast({resume_end, SessionID, Pid}, State) -> - case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of + case emqx_utils_ets:lookup_value(?SESSION_INIT_TAB, SessionID) of undefined -> skip; P when P =:= Pid -> ets:delete(?SESSION_INIT_TAB, SessionID); P when is_pid(P) -> skip @@ -292,7 +283,7 @@ init_resume_worker(RemotePid, SessionID, #{pmon := Pmon} = State) -> error; {ok, Pid} -> Pmon1 = emqx_pmon:monitor(Pid, Pmon), - case emqx_tables:lookup_value(?SESSION_INIT_TAB, SessionID) of + case emqx_utils_ets:lookup_value(?SESSION_INIT_TAB, SessionID) of undefined -> {ok, Pid, State#{pmon => Pmon1}}; {_, OldPid} -> diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index 061f2a42f..997364898 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -165,7 +165,7 @@ strategy(Group) -> -spec ack_enabled() -> boolean(). ack_enabled() -> - emqx:get_config([broker, shared_dispatch_ack_enabled]). + emqx:get_config([broker, shared_dispatch_ack_enabled], false). do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() -> %% Deadlock otherwise @@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) -> do_dispatch(SubPid, Group, Topic, Msg, fresh) -> case ack_enabled() of true -> - %% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2 + %% TODO: delete this clase after 5.1.0 do_dispatch_with_ack(SubPid, Group, Topic, Msg); false -> send(SubPid, Topic, {deliver, Topic, Msg}) @@ -399,9 +399,11 @@ init([]) -> ok = mria:wait_for_tables([?TAB]), {ok, _} = mnesia:subscribe({table, ?TAB, simple}), {atomic, PMon} = mria:transaction(?SHARED_SUB_SHARD, fun ?MODULE:init_monitors/0), - ok = emqx_tables:new(?SHARED_SUBS, [protected, bag]), - ok = emqx_tables:new(?ALIVE_SUBS, [protected, set, {read_concurrency, true}]), - ok = emqx_tables:new(?SHARED_SUBS_ROUND_ROBIN_COUNTER, [public, set, {write_concurrency, true}]), + ok = emqx_utils_ets:new(?SHARED_SUBS, [protected, bag]), + ok = emqx_utils_ets:new(?ALIVE_SUBS, [protected, set, {read_concurrency, true}]), + ok = emqx_utils_ets:new(?SHARED_SUBS_ROUND_ROBIN_COUNTER, [ + public, set, {write_concurrency, true} + ]), {ok, update_stats(#state{pmon = PMon})}. init_monitors() -> diff --git a/apps/emqx/src/emqx_ssl_crl_cache.erl b/apps/emqx/src/emqx_ssl_crl_cache.erl new file mode 100644 index 000000000..13eccbd83 --- /dev/null +++ b/apps/emqx/src/emqx_ssl_crl_cache.erl @@ -0,0 +1,237 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2015-2022. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% + +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%---------------------------------------------------------------------- +% Based on `otp/lib/ssl/src/ssl_crl_cache.erl' +%---------------------------------------------------------------------- + +%---------------------------------------------------------------------- +%% Purpose: Simple default CRL cache +%%---------------------------------------------------------------------- + +-module(emqx_ssl_crl_cache). + +-include_lib("ssl/src/ssl_internal.hrl"). +-include_lib("public_key/include/public_key.hrl"). + +-behaviour(ssl_crl_cache_api). + +-export_type([crl_src/0, uri/0]). +-type crl_src() :: {file, file:filename()} | {der, public_key:der_encoded()}. +-type uri() :: uri_string:uri_string(). + +-export([lookup/3, select/2, fresh_crl/2]). +-export([insert/1, insert/2, delete/1]). + +%% Allow usage of OTP certificate record fields (camelCase). +-elvis([ + {elvis_style, atom_naming_convention, #{ + regex => "^([a-z][a-z0-9]*_?)([a-zA-Z0-9]*_?)*$", + enclosed_atoms => ".*" + }} +]). + +%%==================================================================== +%% Cache callback API +%%==================================================================== + +lookup( + #'DistributionPoint'{distributionPoint = {fullName, Names}}, + _Issuer, + CRLDbInfo +) -> + get_crls(Names, CRLDbInfo); +lookup(_, _, _) -> + not_available. + +select(GenNames, CRLDbHandle) when is_list(GenNames) -> + lists:flatmap( + fun + ({directoryName, Issuer}) -> + select(Issuer, CRLDbHandle); + (_) -> + [] + end, + GenNames + ); +select(Issuer, {{_Cache, Mapping}, _}) -> + case ssl_pkix_db:lookup(Issuer, Mapping) of + undefined -> + []; + CRLs -> + CRLs + end. + +fresh_crl(#'DistributionPoint'{distributionPoint = {fullName, Names}}, CRL) -> + case get_crls(Names, undefined) of + not_available -> + CRL; + NewCRL -> + NewCRL + end. + +%%==================================================================== +%% API +%%==================================================================== + +insert(CRLs) -> + insert(?NO_DIST_POINT, CRLs). + +insert(URI, {file, File}) when is_list(URI) -> + case file:read_file(File) of + {ok, PemBin} -> + PemEntries = public_key:pem_decode(PemBin), + CRLs = [ + CRL + || {'CertificateList', CRL, not_encrypted} <- + PemEntries + ], + do_insert(URI, CRLs); + Error -> + Error + end; +insert(URI, {der, CRLs}) -> + do_insert(URI, CRLs). + +delete({file, File}) -> + case file:read_file(File) of + {ok, PemBin} -> + PemEntries = public_key:pem_decode(PemBin), + CRLs = [ + CRL + || {'CertificateList', CRL, not_encrypted} <- + PemEntries + ], + ssl_manager:delete_crls({?NO_DIST_POINT, CRLs}); + Error -> + Error + end; +delete({der, CRLs}) -> + ssl_manager:delete_crls({?NO_DIST_POINT, CRLs}); +delete(URI) -> + case uri_string:normalize(URI, [return_map]) of + #{scheme := "http", path := Path} -> + ssl_manager:delete_crls(string:trim(Path, leading, "/")); + _ -> + {error, {only_http_distribution_points_supported, URI}} + end. + +%%-------------------------------------------------------------------- +%%% Internal functions +%%-------------------------------------------------------------------- +do_insert(URI, CRLs) -> + case uri_string:normalize(URI, [return_map]) of + #{scheme := "http", path := Path} -> + ssl_manager:insert_crls(string:trim(Path, leading, "/"), CRLs); + _ -> + {error, {only_http_distribution_points_supported, URI}} + end. + +get_crls([], _) -> + not_available; +get_crls( + [{uniformResourceIdentifier, "http" ++ _ = URL} | Rest], + CRLDbInfo +) -> + case cache_lookup(URL, CRLDbInfo) of + [] -> + handle_http(URL, Rest, CRLDbInfo); + CRLs -> + CRLs + end; +get_crls([_ | Rest], CRLDbInfo) -> + %% unsupported CRL location + get_crls(Rest, CRLDbInfo). + +http_lookup(URL, Rest, CRLDbInfo, Timeout) -> + case application:ensure_started(inets) of + ok -> + http_get(URL, Rest, CRLDbInfo, Timeout); + _ -> + get_crls(Rest, CRLDbInfo) + end. + +http_get(URL, Rest, CRLDbInfo, Timeout) -> + case emqx_crl_cache:http_get(URL, Timeout) of + {ok, {_Status, _Headers, Body}} -> + case Body of + <<"-----BEGIN", _/binary>> -> + Pem = public_key:pem_decode(Body), + CRLs = lists:filtermap( + fun + ({'CertificateList', CRL, not_encrypted}) -> + {true, CRL}; + (_) -> + false + end, + Pem + ), + emqx_crl_cache:register_der_crls(URL, CRLs), + CRLs; + _ -> + try public_key:der_decode('CertificateList', Body) of + _ -> + CRLs = [Body], + emqx_crl_cache:register_der_crls(URL, CRLs), + CRLs + catch + _:_ -> + get_crls(Rest, CRLDbInfo) + end + end; + {error, _Reason} -> + get_crls(Rest, CRLDbInfo) + end. + +cache_lookup(_, undefined) -> + []; +cache_lookup(URL, {{Cache, _}, _}) -> + #{path := Path} = uri_string:normalize(URL, [return_map]), + case ssl_pkix_db:lookup(string:trim(Path, leading, "/"), Cache) of + undefined -> + []; + [CRLs] -> + CRLs + end. + +handle_http(URI, Rest, {_, [{http, Timeout}]} = CRLDbInfo) -> + CRLs = http_lookup(URI, Rest, CRLDbInfo, Timeout), + %% Uncomment to improve performance, but need to + %% implement cache limit and or cleaning to prevent + %% DoS attack possibilities + %%insert(URI, {der, CRLs}), + CRLs; +handle_http(_, Rest, CRLDbInfo) -> + get_crls(Rest, CRLDbInfo). diff --git a/apps/emqx/src/emqx_stats.erl b/apps/emqx/src/emqx_stats.erl index ed901d9a9..ef9109e33 100644 --- a/apps/emqx/src/emqx_stats.erl +++ b/apps/emqx/src/emqx_stats.erl @@ -201,7 +201,7 @@ cast(Msg) -> gen_server:cast(?SERVER, Msg). %%-------------------------------------------------------------------- init(#{tick_ms := TickMs}) -> - ok = emqx_tables:new(?TAB, [public, set, {write_concurrency, true}]), + ok = emqx_utils_ets:new(?TAB, [public, set, {write_concurrency, true}]), Stats = lists:append([ ?CONNECTION_STATS, ?CHANNEL_STATS, @@ -213,7 +213,7 @@ init(#{tick_ms := TickMs}) -> {ok, start_timer(#state{updates = [], tick_ms = TickMs}), hibernate}. start_timer(#state{tick_ms = Ms} = State) -> - State#state{timer = emqx_misc:start_timer(Ms, tick)}. + State#state{timer = emqx_utils:start_timer(Ms, tick)}. handle_call(stop, _From, State) -> {stop, normal, ok, State}; @@ -301,7 +301,7 @@ handle_info(Info, State) -> {noreply, State}. terminate(_Reason, #state{timer = TRef}) -> - emqx_misc:cancel_timer(TRef). + emqx_utils:cancel_timer(TRef). code_change(_OldVsn, State, _Extra) -> {ok, State}. diff --git a/apps/emqx/src/emqx_sys.erl b/apps/emqx/src/emqx_sys.erl index 81b27d727..509429796 100644 --- a/apps/emqx/src/emqx_sys.erl +++ b/apps/emqx/src/emqx_sys.erl @@ -62,7 +62,7 @@ -endif. -import(emqx_topic, [systop/1]). --import(emqx_misc, [start_timer/2]). +-import(emqx_utils, [start_timer/2]). -record(state, { heartbeat :: maybe(reference()), @@ -211,7 +211,7 @@ handle_info({timeout, TRef, heartbeat}, State = #state{heartbeat = TRef}) -> handle_info({timeout, TRef, tick}, State = #state{ticker = TRef, sysdescr = Descr}) -> publish_any(version, version()), publish_any(sysdescr, Descr), - publish_any(brokers, mria_mnesia:running_nodes()), + publish_any(brokers, mria:running_nodes()), publish_any(stats, emqx_stats:getstats()), publish_any(metrics, emqx_metrics:all()), {noreply, tick(State), hibernate}; @@ -222,7 +222,7 @@ handle_info(Info, State) -> terminate(_Reason, #state{heartbeat = TRef1, ticker = TRef2}) -> _ = emqx_config_handler:remove_handler(?CONF_KEY_PATH), unload_event_hooks(sys_event_messages()), - lists:foreach(fun emqx_misc:cancel_timer/1, [TRef1, TRef2]). + lists:foreach(fun emqx_utils:cancel_timer/1, [TRef1, TRef2]). unload_event_hooks([]) -> ok; @@ -348,7 +348,7 @@ publish(Event, Payload) when Event == unsubscribed -> Topic = event_topic(Event, Payload), - safe_publish(Topic, emqx_json:encode(Payload)). + safe_publish(Topic, emqx_utils_json:encode(Payload)). metric_topic(Name) -> translate_topic("metrics/", Name). diff --git a/apps/emqx/src/emqx_sys_mon.erl b/apps/emqx/src/emqx_sys_mon.erl index 6ff68820e..f1190f586 100644 --- a/apps/emqx/src/emqx_sys_mon.erl +++ b/apps/emqx/src/emqx_sys_mon.erl @@ -77,7 +77,7 @@ init([]) -> {ok, start_timer(#{timer => undefined, events => []})}. start_timer(State) -> - State#{timer := emqx_misc:start_timer(timer:seconds(2), reset)}. + State#{timer := emqx_utils:start_timer(timer:seconds(2), reset)}. sysm_opts(VM) -> sysm_opts(maps:to_list(VM), []). @@ -204,7 +204,7 @@ handle_info(Info, State) -> {noreply, State}. terminate(_Reason, #{timer := TRef}) -> - emqx_misc:cancel_timer(TRef), + emqx_utils:cancel_timer(TRef), ok. code_change(_OldVsn, State, _Extra) -> diff --git a/apps/emqx/src/emqx_tls_lib.erl b/apps/emqx/src/emqx_tls_lib.erl index eb6091f29..2683d2a9d 100644 --- a/apps/emqx/src/emqx_tls_lib.erl +++ b/apps/emqx/src/emqx_tls_lib.erl @@ -47,8 +47,18 @@ -define(IS_TRUE(Val), ((Val =:= true) orelse (Val =:= <<"true">>))). -define(IS_FALSE(Val), ((Val =:= false) orelse (Val =:= <<"false">>))). --define(SSL_FILE_OPT_NAMES, [<<"keyfile">>, <<"certfile">>, <<"cacertfile">>]). --define(SSL_FILE_OPT_NAMES_A, [keyfile, certfile, cacertfile]). +-define(SSL_FILE_OPT_PATHS, [ + [<<"keyfile">>], + [<<"certfile">>], + [<<"cacertfile">>], + [<<"ocsp">>, <<"issuer_pem">>] +]). +-define(SSL_FILE_OPT_PATHS_A, [ + [keyfile], + [certfile], + [cacertfile], + [ocsp, issuer_pem] +]). %% non-empty string -define(IS_STRING(L), (is_list(L) andalso L =/= [] andalso is_integer(hd(L)))). @@ -298,20 +308,22 @@ ensure_ssl_files(Dir, SSL, Opts) -> RequiredKeys = maps:get(required_keys, Opts, []), case ensure_ssl_file_key(SSL, RequiredKeys) of ok -> - Keys = ?SSL_FILE_OPT_NAMES ++ ?SSL_FILE_OPT_NAMES_A, - ensure_ssl_files(Dir, SSL, Keys, Opts); + KeyPaths = ?SSL_FILE_OPT_PATHS ++ ?SSL_FILE_OPT_PATHS_A, + ensure_ssl_files_per_key(Dir, SSL, KeyPaths, Opts); {error, _} = Error -> Error end. -ensure_ssl_files(_Dir, SSL, [], _Opts) -> +ensure_ssl_files_per_key(_Dir, SSL, [], _Opts) -> {ok, SSL}; -ensure_ssl_files(Dir, SSL, [Key | Keys], Opts) -> - case ensure_ssl_file(Dir, Key, SSL, maps:get(Key, SSL, undefined), Opts) of +ensure_ssl_files_per_key(Dir, SSL, [KeyPath | KeyPaths], Opts) -> + case + ensure_ssl_file(Dir, KeyPath, SSL, emqx_utils_maps:deep_get(KeyPath, SSL, undefined), Opts) + of {ok, NewSSL} -> - ensure_ssl_files(Dir, NewSSL, Keys, Opts); + ensure_ssl_files_per_key(Dir, NewSSL, KeyPaths, Opts); {error, Reason} -> - {error, Reason#{which_options => [Key]}} + {error, Reason#{which_options => [KeyPath]}} end. %% @doc Compare old and new config, delete the ones in old but not in new. @@ -321,12 +333,12 @@ delete_ssl_files(Dir, NewOpts0, OldOpts0) -> {ok, NewOpts} = ensure_ssl_files(Dir, NewOpts0, #{dry_run => DryRun}), {ok, OldOpts} = ensure_ssl_files(Dir, OldOpts0, #{dry_run => DryRun}), Get = fun - (_K, undefined) -> undefined; - (K, Opts) -> maps:get(K, Opts, undefined) + (_KP, undefined) -> undefined; + (KP, Opts) -> emqx_utils_maps:deep_get(KP, Opts, undefined) end, lists:foreach( - fun(Key) -> delete_old_file(Get(Key, NewOpts), Get(Key, OldOpts)) end, - ?SSL_FILE_OPT_NAMES ++ ?SSL_FILE_OPT_NAMES_A + fun(KeyPath) -> delete_old_file(Get(KeyPath, NewOpts), Get(KeyPath, OldOpts)) end, + ?SSL_FILE_OPT_PATHS ++ ?SSL_FILE_OPT_PATHS_A ), %% try to delete the dir if it is empty _ = file:del_dir(pem_dir(Dir)), @@ -346,29 +358,33 @@ delete_old_file(_New, Old) -> ?SLOG(error, #{msg => "failed_to_delete_ssl_file", file_path => Old, reason => Reason}) end. -ensure_ssl_file(_Dir, _Key, SSL, undefined, _Opts) -> +ensure_ssl_file(_Dir, _KeyPath, SSL, undefined, _Opts) -> {ok, SSL}; -ensure_ssl_file(Dir, Key, SSL, MaybePem, Opts) -> +ensure_ssl_file(Dir, KeyPath, SSL, MaybePem, Opts) -> case is_valid_string(MaybePem) of true -> DryRun = maps:get(dry_run, Opts, false), - do_ensure_ssl_file(Dir, Key, SSL, MaybePem, DryRun); + do_ensure_ssl_file(Dir, KeyPath, SSL, MaybePem, DryRun); false -> {error, #{reason => invalid_file_path_or_pem_string}} end. -do_ensure_ssl_file(Dir, Key, SSL, MaybePem, DryRun) -> +do_ensure_ssl_file(Dir, KeyPath, SSL, MaybePem, DryRun) -> case is_pem(MaybePem) of true -> - case save_pem_file(Dir, Key, MaybePem, DryRun) of - {ok, Path} -> {ok, SSL#{Key => Path}}; - {error, Reason} -> {error, Reason} + case save_pem_file(Dir, KeyPath, MaybePem, DryRun) of + {ok, Path} -> + NewSSL = emqx_utils_maps:deep_put(KeyPath, SSL, Path), + {ok, NewSSL}; + {error, Reason} -> + {error, Reason} end; false -> case is_valid_pem_file(MaybePem) of true -> {ok, SSL}; - {error, enoent} when DryRun -> {ok, SSL}; + {error, enoent} when DryRun -> + {ok, SSL}; {error, Reason} -> {error, #{ pem_check => invalid_pem, @@ -398,8 +414,8 @@ is_pem(MaybePem) -> %% To make it simple, the file is always overwritten. %% Also a potentially half-written PEM file (e.g. due to power outage) %% can be corrected with an overwrite. -save_pem_file(Dir, Key, Pem, DryRun) -> - Path = pem_file_name(Dir, Key, Pem), +save_pem_file(Dir, KeyPath, Pem, DryRun) -> + Path = pem_file_name(Dir, KeyPath, Pem), case filelib:ensure_dir(Path) of ok when DryRun -> {ok, Path}; @@ -422,11 +438,14 @@ is_generated_file(Filename) -> _ -> false end. -pem_file_name(Dir, Key, Pem) -> +pem_file_name(Dir, KeyPath, Pem) -> <> = crypto:hash(md5, Pem), Suffix = hex_str(CK), - FileName = binary:replace(ensure_bin(Key), <<"file">>, <<"-", Suffix/binary>>), - filename:join([pem_dir(Dir), FileName]). + Segments = lists:map(fun ensure_bin/1, KeyPath), + Filename0 = iolist_to_binary(lists:join(<<"_">>, Segments)), + Filename1 = binary:replace(Filename0, <<"file">>, <<>>), + Filename = <>, + filename:join([pem_dir(Dir), Filename]). pem_dir(Dir) -> filename:join([emqx:mutable_certs_dir(), Dir]). @@ -453,7 +472,8 @@ hex_str(Bin) -> iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= Bin]). %% @doc Returns 'true' when the file is a valid pem, otherwise {error, Reason}. -is_valid_pem_file(Path) -> +is_valid_pem_file(Path0) -> + Path = resolve_cert_path_for_read(Path0), case file:read_file(Path) of {ok, Pem} -> is_pem(Pem) orelse {error, not_pem}; {error, Reason} -> {error, Reason} @@ -465,24 +485,26 @@ is_valid_pem_file(Path) -> %% so they are forced to upload a cert file, or use an existing file path. -spec drop_invalid_certs(map()) -> map(). drop_invalid_certs(#{enable := False} = SSL) when ?IS_FALSE(False) -> - maps:without(?SSL_FILE_OPT_NAMES_A, SSL); + lists:foldl(fun emqx_utils_maps:deep_remove/2, SSL, ?SSL_FILE_OPT_PATHS_A); drop_invalid_certs(#{<<"enable">> := False} = SSL) when ?IS_FALSE(False) -> - maps:without(?SSL_FILE_OPT_NAMES, SSL); + lists:foldl(fun emqx_utils_maps:deep_remove/2, SSL, ?SSL_FILE_OPT_PATHS); drop_invalid_certs(#{enable := True} = SSL) when ?IS_TRUE(True) -> - do_drop_invalid_certs(?SSL_FILE_OPT_NAMES_A, SSL); + do_drop_invalid_certs(?SSL_FILE_OPT_PATHS_A, SSL); drop_invalid_certs(#{<<"enable">> := True} = SSL) when ?IS_TRUE(True) -> - do_drop_invalid_certs(?SSL_FILE_OPT_NAMES, SSL). + do_drop_invalid_certs(?SSL_FILE_OPT_PATHS, SSL). do_drop_invalid_certs([], SSL) -> SSL; -do_drop_invalid_certs([Key | Keys], SSL) -> - case maps:get(Key, SSL, undefined) of +do_drop_invalid_certs([KeyPath | KeyPaths], SSL) -> + case emqx_utils_maps:deep_get(KeyPath, SSL, undefined) of undefined -> - do_drop_invalid_certs(Keys, SSL); + do_drop_invalid_certs(KeyPaths, SSL); PemOrPath -> case is_pem(PemOrPath) orelse is_valid_pem_file(PemOrPath) of - true -> do_drop_invalid_certs(Keys, SSL); - {error, _} -> do_drop_invalid_certs(Keys, maps:without([Key], SSL)) + true -> + do_drop_invalid_certs(KeyPaths, SSL); + {error, _} -> + do_drop_invalid_certs(KeyPaths, emqx_utils_maps:deep_remove(KeyPath, SSL)) end end. @@ -492,10 +514,16 @@ do_drop_invalid_certs([Key | Keys], SSL) -> to_server_opts(Type, Opts) -> Versions = integral_versions(Type, maps:get(versions, Opts, undefined)), Ciphers = integral_ciphers(Versions, maps:get(ciphers, Opts, undefined)), - maps:to_list(Opts#{ - ciphers => Ciphers, - versions => Versions - }). + Path = fun(Key) -> resolve_cert_path_for_read_strict(maps:get(Key, Opts, undefined)) end, + filter( + maps:to_list(Opts#{ + keyfile => Path(keyfile), + certfile => Path(certfile), + cacertfile => Path(cacertfile), + ciphers => Ciphers, + versions => Versions + }) + ). %% @doc Convert hocon-checked tls client options (map()) to %% proplist accepted by ssl library. @@ -509,11 +537,12 @@ to_client_opts(Opts) -> to_client_opts(Type, Opts) -> GetD = fun(Key, Default) -> fuzzy_map_get(Key, Opts, Default) end, Get = fun(Key) -> GetD(Key, undefined) end, + Path = fun(Key) -> resolve_cert_path_for_read_strict(Get(Key)) end, case GetD(enable, false) of true -> - KeyFile = ensure_str(Get(keyfile)), - CertFile = ensure_str(Get(certfile)), - CAFile = ensure_str(Get(cacertfile)), + KeyFile = Path(keyfile), + CertFile = Path(certfile), + CAFile = Path(cacertfile), Verify = GetD(verify, verify_none), SNI = ensure_sni(Get(server_name_indication)), Versions = integral_versions(Type, Get(versions)), @@ -535,6 +564,31 @@ to_client_opts(Type, Opts) -> [] end. +resolve_cert_path_for_read_strict(Path) -> + case resolve_cert_path_for_read(Path) of + undefined -> + undefined; + ResolvedPath -> + case filelib:is_regular(ResolvedPath) of + true -> + ResolvedPath; + false -> + PathToLog = ensure_str(Path), + LogData = + case PathToLog =:= ResolvedPath of + true -> + #{path => PathToLog}; + false -> + #{path => PathToLog, resolved_path => ResolvedPath} + end, + ?SLOG(error, LogData#{msg => "cert_file_not_found"}), + undefined + end + end. + +resolve_cert_path_for_read(Path) -> + emqx_schema:naive_env_interpolation(Path). + filter([]) -> []; filter([{_, undefined} | T]) -> filter(T); filter([{_, ""} | T]) -> filter(T); @@ -565,9 +619,12 @@ ensure_bin(A) when is_atom(A) -> atom_to_binary(A, utf8). ensure_ssl_file_key(_SSL, []) -> ok; -ensure_ssl_file_key(SSL, RequiredKeys) -> - Filter = fun(Key) -> not maps:is_key(Key, SSL) end, - case lists:filter(Filter, RequiredKeys) of +ensure_ssl_file_key(SSL, RequiredKeyPaths) -> + NotFoundRef = make_ref(), + Filter = fun(KeyPath) -> + NotFoundRef =:= emqx_utils_maps:deep_get(KeyPath, SSL, NotFoundRef) + end, + case lists:filter(Filter, RequiredKeyPaths) of [] -> ok; Miss -> {error, #{reason => ssl_file_option_not_found, which_options => Miss}} end. diff --git a/apps/emqx/src/emqx_trace/emqx_trace.erl b/apps/emqx/src/emqx_trace/emqx_trace.erl index ea6736038..91194772f 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace.erl @@ -21,6 +21,7 @@ -include_lib("emqx/include/logger.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("snabbkaffe/include/trace.hrl"). +-include_lib("emqx/include/emqx_trace.hrl"). -export([ publish/1, @@ -54,8 +55,6 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --include("emqx_trace.hrl"). - -ifdef(TEST). -export([ log_file/2, @@ -147,7 +146,11 @@ list(Enable) -> -spec create([{Key :: binary(), Value :: binary()}] | #{atom() => binary()}) -> {ok, #?TRACE{}} - | {error, {duplicate_condition, iodata()} | {already_existed, iodata()} | iodata()}. + | {error, + {duplicate_condition, iodata()} + | {already_existed, iodata()} + | {bad_type, any()} + | iodata()}. create(Trace) -> case mnesia:table_info(?TRACE, size) < ?MAX_SIZE of true -> @@ -222,14 +225,16 @@ format(Traces) -> init([]) -> erlang:process_flag(trap_exit, true), + Fields = record_info(fields, ?TRACE), ok = mria:create_table(?TRACE, [ {type, set}, {rlog_shard, ?SHARD}, {storage, disc_copies}, {record_name, ?TRACE}, - {attributes, record_info(fields, ?TRACE)} + {attributes, Fields} ]), ok = mria:wait_for_tables([?TRACE]), + maybe_migrate_trace(Fields), {ok, _} = mnesia:subscribe({table, ?TRACE, simple}), ok = filelib:ensure_dir(filename:join([trace_dir(), dummy])), ok = filelib:ensure_dir(filename:join([zip_dir(), dummy])), @@ -267,7 +272,7 @@ handle_info({timeout, TRef, update_trace}, #{timer := TRef} = State) -> ?tp(update_trace_done, #{}), {noreply, State#{timer => NextTRef}}; handle_info({mnesia_table_event, _Events}, State = #{timer := TRef}) -> - emqx_misc:cancel_timer(TRef), + emqx_utils:cancel_timer(TRef), handle_info({timeout, TRef, update_trace}, State); handle_info(Info, State) -> ?SLOG(error, #{unexpected_info => Info}), @@ -275,7 +280,7 @@ handle_info(Info, State) -> terminate(_Reason, #{timer := TRef}) -> _ = mnesia:unsubscribe({table, ?TRACE, simple}), - emqx_misc:cancel_timer(TRef), + emqx_utils:cancel_timer(TRef), stop_all_trace_handler(), update_trace_handler(), _ = file:del_dir_r(zip_dir()), @@ -297,7 +302,7 @@ update_trace(Traces) -> ok = stop_trace(NeedStop, Started), clean_stale_trace_files(), NextTime = find_closest_time(Traces, Now), - emqx_misc:start_timer(NextTime, update_trace). + emqx_utils:start_timer(NextTime, update_trace). stop_all_trace_handler() -> lists:foreach( @@ -358,9 +363,10 @@ start_trace(Trace) -> name = Name, type = Type, filter = Filter, - start_at = Start + start_at = Start, + payload_encode = PayloadEncode } = Trace, - Who = #{name => Name, type => Type, filter => Filter}, + Who = #{name => Name, type => Type, filter => Filter, payload_encode => PayloadEncode}, emqx_trace_handler:install(Who, debug, log_file(Name, Start)). stop_trace(Finished, Started) -> @@ -490,6 +496,8 @@ to_trace(#{type := ip_address, ip_address := Filter} = Trace, Rec) -> end; to_trace(#{type := Type}, _Rec) -> {error, io_lib:format("required ~s field", [Type])}; +to_trace(#{payload_encode := PayloadEncode} = Trace, Rec) -> + to_trace(maps:remove(payload_encode, Trace), Rec#?TRACE{payload_encode = PayloadEncode}); to_trace(#{start_at := StartAt} = Trace, Rec) -> {ok, Sec} = to_system_second(StartAt), to_trace(maps:remove(start_at, Trace), Rec#?TRACE{start_at = Sec}); @@ -573,3 +581,29 @@ filter_cli_handler(Names) -> now_second() -> os:system_time(second). + +maybe_migrate_trace(Fields) -> + case mnesia:table_info(emqx_trace, attributes) =:= Fields of + true -> + ok; + false -> + TransFun = fun(Trace) -> + case Trace of + {?TRACE, Name, Type, Filter, Enable, StartAt, EndAt} -> + #?TRACE{ + name = Name, + type = Type, + filter = Filter, + enable = Enable, + start_at = StartAt, + end_at = EndAt, + payload_encode = text, + extra = #{} + }; + #?TRACE{} -> + Trace + end + end, + {atomic, ok} = mnesia:transform_table(?TRACE, TransFun, Fields, ?TRACE), + ok + end. diff --git a/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl b/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl index c31bc0355..a44237bd0 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl @@ -27,7 +27,7 @@ format( #{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg}, #{payload_encode := PEncode} ) -> - Time = calendar:system_time_to_rfc3339(erlang:system_time(second)), + Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]), ClientId = to_iolist(maps:get(clientid, Meta, "")), Peername = maps:get(peername, Meta, ""), MetaBin = format_meta(Meta, PEncode), diff --git a/apps/emqx/src/emqx_trace/emqx_trace_handler.erl b/apps/emqx/src/emqx_trace/emqx_trace_handler.erl index 9c2d2358e..528bc4d42 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace_handler.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace_handler.erl @@ -44,7 +44,8 @@ -type tracer() :: #{ name := binary(), type := clientid | topic | ip_address, - filter := emqx_types:clientid() | emqx_types:topic() | emqx_trace:ip_address() + filter := emqx_types:clientid() | emqx_types:topic() | emqx_trace:ip_address(), + payload_encode := text | hidden | hex }. -define(CONFIG(_LogFile_), #{ @@ -70,7 +71,12 @@ LogFilePath :: string() ) -> ok | {error, term()}. install(Name, Type, Filter, Level, LogFile) -> - Who = #{type => Type, filter => ensure_bin(Filter), name => ensure_bin(Name)}, + Who = #{ + type => Type, + filter => ensure_bin(Filter), + name => ensure_bin(Name), + payload_encode => payload_encode() + }, install(Who, Level, LogFile). -spec install( @@ -160,14 +166,14 @@ filters(#{type := topic, filter := Filter, name := Name}) -> filters(#{type := ip_address, filter := Filter, name := Name}) -> [{ip_address, {fun ?MODULE:filter_ip_address/2, {ensure_list(Filter), Name}}}]. -formatter(#{type := _Type}) -> +formatter(#{type := _Type, payload_encode := PayloadEncode}) -> {emqx_trace_formatter, #{ %% template is for ?SLOG message not ?TRACE. template => [time, " [", level, "] ", msg, "\n"], single_line => true, max_size => unlimited, depth => unlimited, - payload_encode => payload_encode() + payload_encode => PayloadEncode }}. filter_traces(#{id := Id, level := Level, dst := Dst, filters := Filters}, Acc) -> @@ -190,7 +196,7 @@ handler_id(Name, Type) -> do_handler_id(Name, Type) catch _:_ -> - Hash = emqx_misc:bin_to_hexstr(crypto:hash(md5, Name), lower), + Hash = emqx_utils:bin_to_hexstr(crypto:hash(md5, Name), lower), do_handler_id(Hash, Type) end. diff --git a/apps/emqx/src/emqx_types.erl b/apps/emqx/src/emqx_types.erl index 7223da245..96d75daba 100644 --- a/apps/emqx/src/emqx_types.erl +++ b/apps/emqx/src/emqx_types.erl @@ -238,7 +238,7 @@ -type stats() :: [{atom(), term()}]. -type oom_policy() :: #{ - max_message_queue_len => non_neg_integer(), + max_mailbox_size => non_neg_integer(), max_heap_size => non_neg_integer(), enable => boolean() }. diff --git a/apps/emqx/src/emqx_vm.erl b/apps/emqx/src/emqx_vm.erl index cf1a9dc08..0d861f671 100644 --- a/apps/emqx/src/emqx_vm.erl +++ b/apps/emqx/src/emqx_vm.erl @@ -24,7 +24,6 @@ get_system_info/1, get_memory/0, get_memory/2, - mem_info/0, loads/0 ]). @@ -175,9 +174,9 @@ schedulers() -> loads() -> [ - {load1, ftos(avg1() / 256)}, - {load5, ftos(avg5() / 256)}, - {load15, ftos(avg15() / 256)} + {load1, load(avg1())}, + {load5, load(avg5())}, + {load15, load(avg15())} ]. system_info_keys() -> ?SYSTEM_INFO_KEYS. @@ -226,15 +225,6 @@ convert_allocated_areas({Key, Value1, Value2}) -> convert_allocated_areas({Key, Value}) -> {Key, Value}. -mem_info() -> - Dataset = memsup:get_system_memory_data(), - Total = proplists:get_value(total_memory, Dataset), - Free = proplists:get_value(free_memory, Dataset), - [{total_memory, Total}, {used_memory, Total - Free}]. - -ftos(F) -> - io_lib:format("~.2f", [F / 1.0]). - %%%% erlang vm scheduler_usage fun copied from recon scheduler_usage(Interval) when is_integer(Interval) -> %% We start and stop the scheduler_wall_time system flag @@ -391,18 +381,32 @@ cpu_util() -> compat_windows(Fun) -> case os:type() of {win32, nt} -> - 0; + 0.0; _Type -> case catch Fun() of + Val when is_float(Val) -> floor(Val * 100) / 100; Val when is_number(Val) -> Val; - _Error -> 0 + _Error -> 0.0 end end. -%% @doc Return on which Eralng/OTP the current vm is running. -%% NOTE: This API reads a file, do not use it in critical code paths. +load(Avg) -> + floor((Avg / 256) * 100) / 100. + +%% @doc Return on which Erlang/OTP the current vm is running. +%% The dashboard's /api/nodes endpoint will call this function frequently. +%% we should avoid reading file every time. +%% The OTP version never changes at runtime expect upgrade erts, +%% so we cache it in a persistent term for performance. get_otp_version() -> - read_otp_version(). + case persistent_term:get(emqx_otp_version, undefined) of + undefined -> + OtpVsn = read_otp_version(), + persistent_term:put(emqx_otp_version, OtpVsn), + OtpVsn; + OtpVsn when is_binary(OtpVsn) -> + OtpVsn + end. read_otp_version() -> ReleasesDir = filename:join([code:root_dir(), "releases"]), @@ -416,6 +420,8 @@ read_otp_version() -> %% running tests etc. OtpMajor = erlang:system_info(otp_release), OtpVsnFile = filename:join([ReleasesDir, OtpMajor, "OTP_VERSION"]), - {ok, Vsn} = file:read_file(OtpVsnFile), - Vsn + case file:read_file(OtpVsnFile) of + {ok, Vsn} -> Vsn; + {error, enoent} -> list_to_binary(OtpMajor) + end end. diff --git a/apps/emqx/src/emqx_vm_mon.erl b/apps/emqx/src/emqx_vm_mon.erl index 5447e94e9..d90d4139b 100644 --- a/apps/emqx/src/emqx_vm_mon.erl +++ b/apps/emqx/src/emqx_vm_mon.erl @@ -63,7 +63,7 @@ handle_info({timeout, _Timer, check}, State) -> ProcessCount = erlang:system_info(process_count), case ProcessCount / erlang:system_info(process_limit) of Percent when Percent > ProcHighWatermark -> - Usage = io_lib:format("~p%", [Percent * 100]), + Usage = usage(Percent), Message = [Usage, " process usage"], emqx_alarm:activate( too_many_processes, @@ -75,7 +75,7 @@ handle_info({timeout, _Timer, check}, State) -> Message ); Percent when Percent < ProcLowWatermark -> - Usage = io_lib:format("~p%", [Percent * 100]), + Usage = usage(Percent), Message = [Usage, " process usage"], emqx_alarm:ensure_deactivated( too_many_processes, @@ -107,4 +107,7 @@ code_change(_OldVsn, State, _Extra) -> start_check_timer() -> Interval = emqx:get_config([sysmon, vm, process_check_interval]), - emqx_misc:start_timer(Interval, check). + emqx_utils:start_timer(Interval, check). + +usage(Percent) -> + integer_to_list(floor(Percent * 100)) ++ "%". diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index 817c4b505..00fe545eb 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -52,7 +52,7 @@ -export([set_field/3]). -import( - emqx_misc, + emqx_utils, [ maybe_apply/2, start_timer/2 @@ -90,7 +90,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(container()), + limiter :: container(), %% cache operation when overload limiter_cache :: queue:queue(cache()), @@ -121,8 +121,8 @@ -define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]). -define(ENABLED(X), (X =/= undefined)). --define(LIMITER_BYTES_IN, bytes_in). --define(LIMITER_MESSAGE_IN, message_in). +-define(LIMITER_BYTES_IN, bytes). +-define(LIMITER_MESSAGE_IN, messages). -dialyzer({no_match, [info/2]}). -dialyzer({nowarn_function, [websocket_init/1]}). @@ -172,7 +172,7 @@ stats(WsPid) when is_pid(WsPid) -> stats(#state{channel = Channel}) -> SockStats = emqx_pd:get_counters(?SOCK_STATS), ChanStats = emqx_channel:stats(Channel), - ProcStats = emqx_misc:proc_stats(), + ProcStats = emqx_utils:proc_stats(), lists:append([SockStats, ChanStats, ProcStats]). %% kick|discard|takeover @@ -340,7 +340,7 @@ tune_heap_size(Channel) -> ) of #{enable := false} -> ok; - ShutdownPolicy -> emqx_misc:tune_heap_size(ShutdownPolicy) + ShutdownPolicy -> emqx_utils:tune_heap_size(ShutdownPolicy) end. get_stats_enable(Zone) -> @@ -399,6 +399,12 @@ get_peer_info(Type, Listener, Req, Opts) -> websocket_handle({binary, Data}, State) when is_list(Data) -> websocket_handle({binary, iolist_to_binary(Data)}, State); websocket_handle({binary, Data}, State) -> + ?SLOG(debug, #{ + msg => "raw_bin_received", + size => iolist_size(Data), + bin => binary_to_list(binary:encode_hex(Data)), + type => "hex" + }), State2 = ensure_stats_timer(State), {Packets, State3} = parse_incoming(Data, [], State2), LenMsg = erlang:length(Packets), @@ -437,6 +443,7 @@ websocket_info({incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, State) -> NState = State#state{serialize = Serialize}, handle_incoming(Packet, cancel_idle_timer(NState)); websocket_info({incoming, Packet}, State) -> + ?TRACE("WS-MQTT", "mqtt_packet_received", #{packet => Packet}), handle_incoming(Packet, State); websocket_info({outgoing, Packets}, State) -> return(enqueue(Packets, State)); @@ -447,7 +454,7 @@ websocket_info( State = #state{listener = {Type, Listener}} ) -> ActiveN = get_active_n(Type, Listener), - Delivers = [Deliver | emqx_misc:drain_deliver(ActiveN)], + Delivers = [Deliver | emqx_utils:drain_deliver(ActiveN)], with_channel(handle_deliver, [Delivers], State); websocket_info( {timeout, _, limit_timeout}, @@ -572,54 +579,61 @@ handle_timeout(TRef, TMsg, State) -> list(any()), state() ) -> state(). +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_cache = Cache - } = State + #state{limiter_timer = undefined, limiter = Limiter} = State ) -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_due_to_rate_limit", - needs => Needs, - time_in_ms => Time - }), + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_due_to_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - enqueue( - {active, false}, - State#state{ - sockstate = blocked, - limiter = Limiter3, - limiter_timer = TRef - } - ); - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - New = #cache{need = Needs, data = Data, next = WhenOk}, - State#state{limiter_cache = queue:in(New, Cache)} - end. + enqueue( + {active, false}, + State#state{ + sockstate = blocked, + limiter = Limiter3, + limiter_timer = TRef + } + ); + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} + end; +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_cache = Cache} = State +) -> + New = #cache{need = Needs, data = Data, next = WhenOk}, + State#state{limiter_cache = queue:in(New, Cache)}. -spec retry_limiter(state()) -> state(). retry_limiter(#state{limiter = Limiter} = State) -> @@ -671,7 +685,7 @@ check_oom(State = #state{channel = Channel}) -> #{enable := false} -> State; #{enable := true} -> - case emqx_misc:check_oom(ShutdownPolicy) of + case emqx_utils:check_oom(ShutdownPolicy) of Shutdown = {shutdown, _Reason} -> postpone(Shutdown, State); _Other -> @@ -719,7 +733,6 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) -> handle_incoming(Packet, State = #state{listener = {Type, Listener}}) when is_record(Packet, mqtt_packet) -> - ?TRACE("WS-MQTT", "mqtt_packet_received", #{packet => Packet}), ok = inc_incoming_stats(Packet), NState = case @@ -907,7 +920,7 @@ inc_qos_stats_key(_, _) -> undefined. %% Cancel idle timer cancel_idle_timer(State = #state{idle_timer = IdleTimer}) -> - ok = emqx_misc:cancel_timer(IdleTimer), + ok = emqx_utils:cancel_timer(IdleTimer), State#state{idle_timer = undefined}. %%-------------------------------------------------------------------- @@ -1040,7 +1053,7 @@ check_max_connection(Type, Listener) -> %%-------------------------------------------------------------------- set_field(Name, Value, State) -> - Pos = emqx_misc:index_of(Name, record_info(fields, state)), + Pos = emqx_utils:index_of(Name, record_info(fields, state)), setelement(Pos + 1, State, Value). %% ensure lowercase letters in headers diff --git a/apps/emqx/src/emqx_zone_schema.erl b/apps/emqx/src/emqx_zone_schema.erl index c2595725b..5d6720986 100644 --- a/apps/emqx/src/emqx_zone_schema.erl +++ b/apps/emqx/src/emqx_zone_schema.erl @@ -15,8 +15,10 @@ %%-------------------------------------------------------------------- -module(emqx_zone_schema). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). --export([namespace/0, roots/0, fields/1, desc/1]). +-export([namespace/0, roots/0, fields/1, desc/1, zone/0, zone_without_hidden/0]). namespace() -> zone. @@ -33,6 +35,32 @@ roots() -> "overload_protection" ]. +zone() -> + Fields = roots(), + Hidden = hidden(), + lists:map( + fun(F) -> + case lists:member(F, Hidden) of + true -> + {F, ?HOCON(?R_REF(F), #{importance => ?IMPORTANCE_HIDDEN})}; + false -> + {F, ?HOCON(?R_REF(F), #{})} + end + end, + Fields + ). + +zone_without_hidden() -> + lists:map(fun(F) -> {F, ?HOCON(?R_REF(F), #{})} end, roots() -- hidden()). + +hidden() -> + [ + "stats", + "overload_protection", + "conn_congestion", + "flapping_detect" + ]. + %% zone schemas are clones from the same name from root level %% only not allowed to have default values. fields(Name) -> diff --git a/apps/emqx/src/persistent_session/emqx_persistent_session.erl b/apps/emqx/src/persistent_session/emqx_persistent_session.erl index c1100cfdb..68f783283 100644 --- a/apps/emqx/src/persistent_session/emqx_persistent_session.erl +++ b/apps/emqx/src/persistent_session/emqx_persistent_session.erl @@ -303,7 +303,7 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> %% 3. Notify writers that we are resuming. %% They will buffer new messages. ?tp(ps_notify_writers, #{sid => SessionID}), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), NodeMarkers = resume_begin(Nodes, SessionID), ?tp(ps_node_markers, #{sid => SessionID, markers => NodeMarkers}), diff --git a/apps/emqx/src/proto/emqx_proto_v2.erl b/apps/emqx/src/proto/emqx_proto_v2.erl new file mode 100644 index 000000000..a11c8a10e --- /dev/null +++ b/apps/emqx/src/proto/emqx_proto_v2.erl @@ -0,0 +1,86 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_proto_v2). + +-behaviour(emqx_bpapi). + +-include("bpapi.hrl"). + +-export([ + introduced_in/0, + + are_running/1, + is_running/1, + + get_alarms/2, + get_stats/1, + get_metrics/1, + + deactivate_alarm/2, + delete_all_deactivated_alarms/1, + + clean_authz_cache/1, + clean_authz_cache/2, + clean_pem_cache/1 +]). + +introduced_in() -> + "5.0.22". + +-spec is_running(node()) -> boolean() | {badrpc, term()}. +is_running(Node) -> + rpc:call(Node, emqx, is_running, []). + +-spec are_running([node()]) -> emqx_rpc:erpc_multicall(boolean()). +are_running(Nodes) when is_list(Nodes) -> + erpc:multicall(Nodes, emqx, is_running, []). + +-spec get_alarms(node(), all | activated | deactivated) -> [map()]. +get_alarms(Node, Type) -> + rpc:call(Node, emqx_alarm, get_alarms, [Type]). + +-spec get_stats(node()) -> emqx_stats:stats() | {badrpc, _}. +get_stats(Node) -> + rpc:call(Node, emqx_stats, getstats, []). + +-spec get_metrics(node()) -> [{emqx_metrics:metric_name(), non_neg_integer()}] | {badrpc, _}. +get_metrics(Node) -> + rpc:call(Node, emqx_metrics, all, []). + +-spec clean_authz_cache(node(), emqx_types:clientid()) -> + ok + | {error, not_found} + | {badrpc, _}. +clean_authz_cache(Node, ClientId) -> + rpc:call(Node, emqx_authz_cache, drain_cache, [ClientId]). + +-spec clean_authz_cache(node()) -> ok | {badrpc, _}. +clean_authz_cache(Node) -> + rpc:call(Node, emqx_authz_cache, drain_cache, []). + +-spec clean_pem_cache(node()) -> ok | {badrpc, _}. +clean_pem_cache(Node) -> + rpc:call(Node, ssl_pem_cache, clear, []). + +-spec deactivate_alarm(node(), binary() | atom()) -> + ok | {error, not_found} | {badrpc, _}. +deactivate_alarm(Node, Name) -> + rpc:call(Node, emqx_alarm, deactivate, [Name]). + +-spec delete_all_deactivated_alarms(node()) -> ok | {badrpc, _}. +delete_all_deactivated_alarms(Node) -> + rpc:call(Node, emqx_alarm, delete_all_deactivated_alarms, []). diff --git a/apps/emqx/test/data/certs/certfile b/apps/emqx/test/data/certs/certfile new file mode 100644 index 000000000..a198faf61 --- /dev/null +++ b/apps/emqx/test/data/certs/certfile @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/jCCAeagAwIBAgIJAKTICmq1Lg6dMA0GCSqGSIb3DQEBCwUAMDQxEjAQBgNV +BAoMCUVNUVggVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4X +DTIxMTIzMDA4NDExMloXDTQ5MDUxNzA4NDExMlowJTESMBAGA1UECgwJRU1RWCBU +ZXN0MQ8wDQYDVQQDDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDzrujfx6XZTH0MWqLO6kNAeHndUZ+OGaURXvxKMPMF5dA40lxNG6cEzzlq +0Rm61adlv8tF4kRJrs6EnRjEVoMImrdh07vGFdOTYqP01LjiBhErAzyRtSn2X8FT +Te8ExoCRs3x61SPebGY2hOvFxuO6YDPVOSDvbbxvRgqIlM1ZXC8dOvPSSGZ+P8hV +56EPayRthfu1FVptnkW9CyZCRI0gg95Hv8RC7bGG+tuWpkN9ZrRvohhgGR1+bDUi +BNBpncEsSh+UgWaj8KRN8D16H6m/Im6ty467j0at49FvPx5nACL48/ghtYvzgKLc +uKHtokKUuuzebDK/hQxN3mUSAJStAgMBAAGjIjAgMAsGA1UdDwQEAwIFoDARBglg +hkgBhvhCAQEEBAMCB4AwDQYJKoZIhvcNAQELBQADggIBAIlVyPhOpkz3MNzQmjX7 +xgJ3vGPK5uK11n/wfjRwe2qXwZbrI2sYLVtTpUgvLDuP0gB73Vwfu7xAMdue6TRm +CKr9z0lkQsVBtgoqzZCjd4PYLfHm4EhsOMi98OGKU5uOGD4g3yLwQWXHhbYtiZMO +Jsj0hebYveYJt/BYTd1syGQcIcYCyVExWvSWjidfpAqjT6EF7whdubaFtuF2kaGF +IO9yn9rWtXB5yK99uCguEmKhx3fAQxomzqweTu3WRvy9axsUH3WAUW9a4DIBSz2+ +ZSJNheFn5GktgggygJUGYqpSZHooUJW0UBs/8vX6AP+8MtINmqOGZUawmNwLWLOq +wHyVt2YGD5TXjzzsWNSQ4mqXxM6AXniZVZK0yYNjA4ATikX1AtwunyWBR4IjyE/D +FxYPORdZCOtywRFE1R5KLTUq/C8BNGCkYnoO78DJBO+pT0oagkQGQb0CnmC6C1db +4lWzA9K0i4B0PyooZA+gp+5FFgaLuX1DkyeaY1J204QhHR1z/Vcyl5dpqR9hqnYP +t8raLk9ogMDKqKA9iG0wc3CBNckD4sjVWAEeovXhElG55fD21wwhF+AnDCvX8iVK +cBfKV6z6uxfKjGIxc2I643I5DiIn+V3DnPxYyY74Ln1lWFYmt5JREhAxPu42zq74 +e6+eIMYFszB+5gKgt6pa6ZNI +-----END CERTIFICATE----- diff --git a/apps/emqx/test/data/certs/keyfile b/apps/emqx/test/data/certs/keyfile new file mode 100644 index 000000000..2f0af5d41 --- /dev/null +++ b/apps/emqx/test/data/certs/keyfile @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA867o38el2Ux9DFqizupDQHh53VGfjhmlEV78SjDzBeXQONJc +TRunBM85atEZutWnZb/LReJESa7OhJ0YxFaDCJq3YdO7xhXTk2Kj9NS44gYRKwM8 +kbUp9l/BU03vBMaAkbN8etUj3mxmNoTrxcbjumAz1Tkg7228b0YKiJTNWVwvHTrz +0khmfj/IVeehD2skbYX7tRVabZ5FvQsmQkSNIIPeR7/EQu2xhvrblqZDfWa0b6IY +YBkdfmw1IgTQaZ3BLEoflIFmo/CkTfA9eh+pvyJurcuOu49GrePRbz8eZwAi+PP4 +IbWL84Ci3Lih7aJClLrs3mwyv4UMTd5lEgCUrQIDAQABAoIBAQDwEbBgznrIwn8r +jZt5x/brbAV7Ea/kOcWSgIaCvQifFdJ2OGAwov5/UXwajNgRZe2d4z7qoUhvYuUY +ZwCAZU6ASpRBr2v9cYFYYURvrqZaHmoJew3P6q/lhl6aqFvC06DUagRHqvXEafyk +13zEAvZVpfNKrBaTawPKiDFWb2qDDc9D6hC07EuJ/DNeehiHvzHrSZSDVV5Ut7Bw +YDm33XygheUPAlHfeCnaixzcs3osiVyFEmVjxcIaM0ZS1NgcSaohSpJHMzvEaohX +e+v9vccraSVlw01AlvFwI2vHYUV8jT6HwglTPKKGOCzK/ace3wPdYSU9qLcqfuHn +EFhNc3tNAoGBAPugLMgbReJg2gpbIPUkYyoMMAAU7llFU1WvPWwXzo1a9EBjBACw +WfCZISNtANXR38zIYXzoH547uXi4YPks1Nne3sYuCDpvuX+iz7fIo4zHf1nFmxH7 +eE6GtQr2ubmuuipTc28S0wBMGT1/KybH0e2NKL6GaOkNDmAI0IbEMBrvAoGBAPfr +Y1QYLhPhan6m5g/5s+bQpKtHfNH9TNkk13HuYu72zNuY3qL2GC7oSadR8vTbRXZg +KQqfaO0IGRcdkSFTq/AEhSSqr2Ld5nPadMbKvSGrSCc1s8rFH97jRVQY56yhM7ti +IW4+6cE8ylCMbdYB6wuduK/GIgNpqoF4xs1i2XojAoGACacBUMPLEH4Kny8TupOk +wi4pgTdMVVxVcAoC3yyincWJbRbfRm99Y79cCBHcYFdmsGJXawU0gUtlN/5KqgRQ +PfNQtGV7p1I12XGTakdmDrZwai8sXao52TlNpJgGU9siBRGicfZU5cQFi9he/WPY +57XshDJ/v8DidkigRysrdT0CgYEA5iuO22tblC+KvK1dGOXeZWO+DhrfwuGlcFBp +CaimB2/w/8vsn2VVTG9yujo2E6hj1CQw1mDrfG0xRim4LTXOgpbfugwRqvuTUmo2 +Ur21XEX2RhjwpEfhcACWxB4fMUG0krrniMA2K6axupi1/KNpQi6bYe3UdFCs8Wld +QSAOAvsCgYBk/X5PmD44DvndE5FShM2w70YOoMr3Cgl5sdwAFUFE9yDuC14UhVxk +oxnYxwtVI9uVVirET+LczP9JEvcvxnN/Xg3tH/qm0WlIxmTxyYrFFIK9j0rqeu9z +blPu56OzNI2VMrR1GbOBLxQINLTIpaacjNJAlr8XOlegdUJsW/Jwqw== +-----END RSA PRIVATE KEY----- diff --git a/apps/emqx/test/data/certs/keyfile2 b/apps/emqx/test/data/certs/keyfile2 new file mode 100644 index 000000000..2b3f30cf6 --- /dev/null +++ b/apps/emqx/test/data/certs/keyfile2 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzLiGiSwpxkENtjrzS7pNLblTnWe4HUUFwYyUX0H+3TnvA86X +EX85yZvFjkzB6lLjUkMY+C6UTVXt+mxeSJbUtSKZhX+2yoF/KYh7SaVjug5FqEqO +LvMpDZQEhUWF2W9DG6eUgOfDoX2milSDIe10yG2WBkryipHAfE7l1t+i6Rh3on+v +561LmrbqyBWR/cLp23RN3sHbkf2pb5/ugtU9twdgJr6Lve73rvSeulewL5BzszKD +BrYqr+PBT5+3ItCc55bTsO7M7CzOIL99BlqdvFH7xT0U1+2BFwLe4/8kwphSqyJE +C5oOiQBFnFVNXmFQSV+k7rPr80i1IO++HeJ6KQIDAQABAoIBAGWgvPjfuaU3qizq +uti/FY07USz0zkuJdkANH6LiSjlchzDmn8wJ0pApCjuIE0PV/g9aS8z4opp5q/gD +UBLM/a8mC/xf2EhTXOMrY7i9p/I3H5FZ4ZehEqIw9sWKK9YzC6dw26HabB2BGOnW +5nozPSQ6cp2RGzJ7BIkxSZwPzPnVTgy3OAuPOiJytvK+hGLhsNaT+Y9bNDvplVT2 +ZwYTV8GlHZC+4b2wNROILm0O86v96O+Qd8nn3fXjGHbMsAnONBq10bZS16L4fvkH +5G+W/1PeSXmtZFppdRRDxIW+DWcXK0D48WRliuxcV4eOOxI+a9N2ZJZZiNLQZGwg +w3A8+mECgYEA8HuJFrlRvdoBe2U/EwUtG74dcyy30L4yEBnN5QscXmEEikhaQCfX +Wm6EieMcIB/5I5TQmSw0cmBMeZjSXYoFdoI16/X6yMMuATdxpvhOZGdUGXxhAH+x +xoTUavWZnEqW3fkUU71kT5E2f2i+0zoatFESXHeslJyz85aAYpP92H0CgYEA2e5A +Yozt5eaA1Gyhd8SeptkEU4xPirNUnVQHStpMWUb1kzTNXrPmNWccQ7JpfpG6DcYl +zUF6p6mlzY+zkMiyPQjwEJlhiHM2NlL1QS7td0R8ewgsFoyn8WsBI4RejWrEG9td +EDniuIw+pBFkcWthnTLHwECHdzgquToyTMjrBB0CgYEA28tdGbrZXhcyAZEhHAZA +Gzog+pKlkpEzeonLKIuGKzCrEKRecIK5jrqyQsCjhS0T7ZRnL4g6i0s+umiV5M5w +fcc292pEA1h45L3DD6OlKplSQVTv55/OYS4oY3YEJtf5mfm8vWi9lQeY8sxOlQpn +O+VZTdBHmTC8PGeTAgZXHZUCgYA6Tyv88lYowB7SN2qQgBQu8jvdGtqhcs/99GCr +H3N0I69LPsKAR0QeH8OJPXBKhDUywESXAaEOwS5yrLNP1tMRz5Vj65YUCzeDG3kx +gpvY4IMp7ArX0bSRvJ6mYSFnVxy3k174G3TVCfksrtagHioVBGQ7xUg5ltafjrms +n8l55QKBgQDVzU8tQvBVqY8/1lnw11Vj4fkE/drZHJ5UkdC1eenOfSWhlSLfUJ8j +ds7vEWpRPPoVuPZYeR1y78cyxKe1GBx6Wa2lF5c7xjmiu0xbRnrxYeLolce9/ntp +asClqpnHT8/VJYTD7Kqj0fouTTZf0zkig/y+2XERppd8k+pSKjUCPQ== +-----END RSA PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_SUITE.erl b/apps/emqx/test/emqx_SUITE.erl index dbe8e09a6..64ed2ea19 100644 --- a/apps/emqx/test/emqx_SUITE.erl +++ b/apps/emqx/test/emqx_SUITE.erl @@ -26,6 +26,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), Config. @@ -147,6 +148,14 @@ t_run_hook(_) -> ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). +t_cluster_nodes(_) -> + Expected = [node()], + ?assertEqual(Expected, emqx:running_nodes()), + ?assertEqual(Expected, emqx:cluster_nodes(running)), + ?assertEqual(Expected, emqx:cluster_nodes(all)), + ?assertEqual(Expected, emqx:cluster_nodes(cores)), + ?assertEqual([], emqx:cluster_nodes(stopped)). + %%-------------------------------------------------------------------- %% Hook fun %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_authentication_SUITE.erl b/apps/emqx/test/emqx_authentication_SUITE.erl index e2e95d7ad..0190ab936 100644 --- a/apps/emqx/test/emqx_authentication_SUITE.erl +++ b/apps/emqx/test/emqx_authentication_SUITE.erl @@ -52,50 +52,10 @@ ) ). -%%------------------------------------------------------------------------------ -%% Hocon Schema -%%------------------------------------------------------------------------------ - -roots() -> - [ - {config, #{ - type => hoconsc:union([ - hoconsc:ref(?MODULE, type1), - hoconsc:ref(?MODULE, type2) - ]) - }} - ]. - -fields(type1) -> - [ - {mechanism, {enum, [password_based]}}, - {backend, {enum, [built_in_database]}}, - {enable, fun enable/1} - ]; -fields(type2) -> - [ - {mechanism, {enum, [password_based]}}, - {backend, {enum, [mysql]}}, - {enable, fun enable/1} - ]. - -enable(type) -> boolean(); -enable(default) -> true; -enable(_) -> undefined. - %%------------------------------------------------------------------------------ %% Callbacks %%------------------------------------------------------------------------------ -check_config(C) -> - #{config := R} = - hocon_tconf:check_plain( - ?MODULE, - #{<<"config">> => C}, - #{atom_key => true} - ), - R. - create(_AuthenticatorID, _Config) -> {ok, #{mark => 1}}. @@ -106,6 +66,10 @@ authenticate(#{username := <<"good">>}, _State) -> {ok, #{is_superuser => true}}; authenticate(#{username := <<"ignore">>}, _State) -> ignore; +authenticate(#{username := <<"emqx_authn_ignore_for_hook_good">>}, _State) -> + ignore; +authenticate(#{username := <<"emqx_authn_ignore_for_hook_bad">>}, _State) -> + ignore; authenticate(#{username := _}, _State) -> {error, bad_username_or_password}. @@ -117,6 +81,10 @@ hook_authenticate(#{username := <<"hook_user_finally_good">>}, _AuthResult) -> {stop, {ok, ?NOT_SUPERUSER}}; hook_authenticate(#{username := <<"hook_user_finally_bad">>}, _AuthResult) -> {stop, {error, invalid_username}}; +hook_authenticate(#{username := <<"emqx_authn_ignore_for_hook_good">>}, _AuthResult) -> + {ok, {ok, ?NOT_SUPERUSER}}; +hook_authenticate(#{username := <<"emqx_authn_ignore_for_hook_bad">>}, _AuthResult) -> + {stop, {error, invalid_username}}; hook_authenticate(_ClientId, AuthResult) -> {ok, AuthResult}. @@ -127,12 +95,17 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + LogLevel = emqx_logger:get_primary_log_level(), + ok = emqx_logger:set_log_level(debug), application:set_env(ekka, strict_mode, true), + emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), - Config. + [{log_level, LogLevel} | Config]. -end_per_suite(_) -> +end_per_suite(Config) -> emqx_common_test_helpers:stop_apps([]), + LogLevel = ?config(log_level), + emqx_logger:set_log_level(LogLevel), ok. init_per_testcase(Case, Config) -> @@ -191,7 +164,7 @@ t_authenticator(Config) when is_list(Config) -> % Create an authenticator when the provider does not exist ?assertEqual( - {error, no_available_provider}, + {error, {no_available_provider_for, {password_based, built_in_database}}}, ?AUTHN:create_authenticator(ChainName, AuthenticatorConfig1) ), @@ -326,14 +299,14 @@ t_update_config(Config) when is_list(Config) -> ok = register_provider(?config("auth2"), ?MODULE), Global = ?config(global), AuthenticatorConfig1 = #{ - <<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"built_in_database">>, - <<"enable">> => true + mechanism => password_based, + backend => built_in_database, + enable => true }, AuthenticatorConfig2 = #{ - <<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"mysql">>, - <<"enable">> => true + mechanism => password_based, + backend => mysql, + enable => true }, ID1 = <<"password_based:built_in_database">>, ID2 = <<"password_based:mysql">>, @@ -594,12 +567,17 @@ t_combine_authn_and_callback(Config) when is_list(Config) -> ?assertAuthFailureForUser(bad), ?assertAuthFailureForUser(ignore), - %% lower-priority hook can overrride auth result, - %% because emqx_authentication permits/denies with {ok, ...} - ?assertAuthSuccessForUser(hook_user_good), - ?assertAuthFailureForUser(hook_user_bad), - ?assertAuthSuccessForUser(hook_user_finally_good), + %% lower-priority hook can overrride emqx_authentication result + %% for ignored users + ?assertAuthSuccessForUser(emqx_authn_ignore_for_hook_good), + ?assertAuthFailureForUser(emqx_authn_ignore_for_hook_bad), + + %% lower-priority hook cannot overrride + %% successful/unsuccessful emqx_authentication result + ?assertAuthFailureForUser(hook_user_finally_good), ?assertAuthFailureForUser(hook_user_finally_bad), + ?assertAuthFailureForUser(hook_user_good), + ?assertAuthFailureForUser(hook_user_bad), ok = unhook(); t_combine_authn_and_callback({'end', Config}) -> diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index ed22a019a..9419ba4c3 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -141,3 +141,89 @@ t_kick(_) -> snabbkaffe:stop(), emqx_banned:delete(Who), ?assertEqual(1, length(?of_kind(kick_session_due_to_banned, Trace))). + +t_session_taken(_) -> + erlang:process_flag(trap_exit, true), + Topic = <<"t/banned">>, + ClientId2 = <<"t_session_taken">>, + MsgNum = 3, + Connect = fun() -> + {ok, C} = emqtt:start_link([ + {clientid, <<"client1">>}, + {proto_ver, v5}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 120}} + ]), + case emqtt:connect(C) of + {ok, _} -> + ok; + {error, econnrefused} -> + throw(mqtt_listener_not_ready) + end, + {ok, _, [0]} = emqtt:subscribe(C, Topic, []), + C + end, + + Publish = fun() -> + lists:foreach( + fun(_) -> + Msg = emqx_message:make(ClientId2, Topic, <<"payload">>), + emqx_broker:safe_publish(Msg) + end, + lists:seq(1, MsgNum) + ) + end, + emqx_common_test_helpers:wait_for( + ?FUNCTION_NAME, + ?LINE, + fun() -> + try + C = Connect(), + emqtt:disconnect(C), + true + catch + throw:mqtt_listener_not_ready -> + false + end + end, + 15_000 + ), + Publish(), + + C2 = Connect(), + ?assertEqual(MsgNum, length(receive_messages(MsgNum + 1))), + ok = emqtt:disconnect(C2), + + Publish(), + + Now = erlang:system_time(second), + Who = {clientid, ClientId2}, + emqx_banned:create(#{ + who => Who, + by => <<"test">>, + reason => <<"test">>, + at => Now, + until => Now + 120 + }), + + C3 = Connect(), + ?assertEqual(0, length(receive_messages(MsgNum + 1))), + emqx_banned:delete(Who), + {ok, #{}, [0]} = emqtt:unsubscribe(C3, Topic), + ok = emqtt:disconnect(C3). + +receive_messages(Count) -> + receive_messages(Count, []). +receive_messages(0, Msgs) -> + Msgs; +receive_messages(Count, Msgs) -> + receive + {publish, Msg} -> + ct:log("Msg: ~p ~n", [Msg]), + receive_messages(Count - 1, [Msg | Msgs]); + Other -> + ct:log("Other Msg: ~p~n", [Other]), + receive_messages(Count, Msgs) + after 1200 -> + Msgs + end. diff --git a/apps/emqx/test/emqx_boot_SUITE.erl b/apps/emqx/test/emqx_boot_SUITE.erl index 7d0a7b4d3..06f08afb8 100644 --- a/apps/emqx/test/emqx_boot_SUITE.erl +++ b/apps/emqx/test/emqx_boot_SUITE.erl @@ -24,19 +24,23 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_is_enabled(_) -> - ok = application:set_env(emqx, boot_modules, all), - ?assert(emqx_boot:is_enabled(router)), - ?assert(emqx_boot:is_enabled(broker)), - ?assert(emqx_boot:is_enabled(listeners)), - ok = application:set_env(emqx, boot_modules, [router]), - ?assert(emqx_boot:is_enabled(router)), - ?assertNot(emqx_boot:is_enabled(broker)), - ?assertNot(emqx_boot:is_enabled(listeners)), - ok = application:set_env(emqx, boot_modules, [router, broker]), - ?assert(emqx_boot:is_enabled(router)), - ?assert(emqx_boot:is_enabled(broker)), - ?assertNot(emqx_boot:is_enabled(listeners)), - ok = application:set_env(emqx, boot_modules, [router, broker, listeners]), - ?assert(emqx_boot:is_enabled(router)), - ?assert(emqx_boot:is_enabled(broker)), - ?assert(emqx_boot:is_enabled(listeners)). + try + ok = application:set_env(emqx, boot_modules, all), + ?assert(emqx_boot:is_enabled(router)), + ?assert(emqx_boot:is_enabled(broker)), + ?assert(emqx_boot:is_enabled(listeners)), + ok = application:set_env(emqx, boot_modules, [router]), + ?assert(emqx_boot:is_enabled(router)), + ?assertNot(emqx_boot:is_enabled(broker)), + ?assertNot(emqx_boot:is_enabled(listeners)), + ok = application:set_env(emqx, boot_modules, [router, broker]), + ?assert(emqx_boot:is_enabled(router)), + ?assert(emqx_boot:is_enabled(broker)), + ?assertNot(emqx_boot:is_enabled(listeners)), + ok = application:set_env(emqx, boot_modules, [router, broker, listeners]), + ?assert(emqx_boot:is_enabled(router)), + ?assert(emqx_boot:is_enabled(broker)), + ?assert(emqx_boot:is_enabled(listeners)) + after + application:set_env(emqx, boot_modules, all) + end. diff --git a/apps/emqx/test/emqx_bpapi_static_checks.erl b/apps/emqx/test/emqx_bpapi_static_checks.erl index f218739fc..34ff149c1 100644 --- a/apps/emqx/test/emqx_bpapi_static_checks.erl +++ b/apps/emqx/test/emqx_bpapi_static_checks.erl @@ -47,7 +47,9 @@ -type param_types() :: #{emqx_bpapi:var_name() => _Type}. %% Applications and modules we wish to ignore in the analysis: --define(IGNORED_APPS, "gen_rpc, recon, redbug, observer_cli, snabbkaffe, ekka, mria"). +-define(IGNORED_APPS, + "gen_rpc, recon, redbug, observer_cli, snabbkaffe, ekka, mria, amqp_client, rabbit_common" +). -define(IGNORED_MODULES, "emqx_rpc"). %% List of known RPC backend modules: -define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc"). @@ -65,7 +67,7 @@ % Reason: legacy code. A fun and a QC query are % passed in the args, it's futile to try to statically % check it - "emqx_mgmt_api:do_query/2, emqx_mgmt_api:collect_total_from_tail_nodes/3" + "emqx_mgmt_api:do_query/2, emqx_mgmt_api:collect_total_from_tail_nodes/2" ). -define(XREF, myxref). diff --git a/apps/emqx/test/emqx_broker_helper_SUITE.erl b/apps/emqx/test/emqx_broker_helper_SUITE.erl index 7c3e1aa91..65603410a 100644 --- a/apps/emqx/test/emqx_broker_helper_SUITE.erl +++ b/apps/emqx/test/emqx_broker_helper_SUITE.erl @@ -55,11 +55,12 @@ t_register_sub(_) -> ?assertEqual(self(), emqx_broker_helper:lookup_subpid(<<"clientid">>)). t_shard_seq(_) -> - ?assertEqual([], ets:lookup(emqx_subseq, <<"topic">>)), - emqx_broker_helper:create_seq(<<"topic">>), - ?assertEqual([{<<"topic">>, 1}], ets:lookup(emqx_subseq, <<"topic">>)), - emqx_broker_helper:reclaim_seq(<<"topic">>), - ?assertEqual([], ets:lookup(emqx_subseq, <<"topic">>)). + TestTopic = atom_to_list(?FUNCTION_NAME), + ?assertEqual([], ets:lookup(emqx_subseq, TestTopic)), + emqx_broker_helper:create_seq(TestTopic), + ?assertEqual([{TestTopic, 1}], ets:lookup(emqx_subseq, TestTopic)), + emqx_broker_helper:reclaim_seq(TestTopic), + ?assertEqual([], ets:lookup(emqx_subseq, TestTopic)). t_shards_num(_) -> ?assertEqual(emqx_vm:schedulers() * 32, emqx_broker_helper:shards_num()). diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index c6610c0e2..2b7280b32 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -31,7 +31,7 @@ force_gc_conf() -> #{bytes => 16777216, count => 16000, enable => true}. force_shutdown_conf() -> - #{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}. + #{enable => true, max_heap_size => 4194304, max_mailbox_size => 1000}. rpc_conf() -> #{ @@ -162,8 +162,7 @@ limiter_conf() -> Make = fun() -> #{ burst => 0, - rate => infinity, - capacity => infinity + rate => infinity } end, @@ -172,7 +171,7 @@ limiter_conf() -> Acc#{Name => Make()} end, #{}, - [bytes_in, message_in, message_routing, connection, internal] + [bytes, messages, message_routing, connection, internal] ). stats_conf() -> @@ -268,13 +267,14 @@ t_chan_info(_) -> t_chan_caps(_) -> ?assertMatch( #{ + exclusive_subscription := false, + max_packet_size := 1048576, max_clientid_len := 65535, max_qos_allowed := 2, max_topic_alias := 65535, max_topic_levels := Level, retain_available := true, shared_subscription := true, - subscription_identifiers := true, wildcard_subscription := true } when is_integer(Level), emqx_channel:caps(channel()) @@ -1137,7 +1137,7 @@ t_ws_cookie_init(_) -> %%-------------------------------------------------------------------- t_flapping_detect(_) -> - emqx_config:put_zone_conf(default, [flapping_detect, enable], true), + emqx_config:put_zone_conf(default, [flapping_detect, window_time], 60000), Parent = self(), ok = meck:expect( emqx_cm, @@ -1236,11 +1236,17 @@ connpkt(Props) -> session() -> session(#{}). session(InitFields) when is_map(InitFields) -> + Conf = emqx_cm:get_session_confs( + #{zone => default, clientid => <<"fake-test">>}, #{ + receive_maximum => 0, expiry_interval => 0 + } + ), + Session = emqx_session:init(Conf), maps:fold( - fun(Field, Value, Session) -> - emqx_session:set_field(Field, Value, Session) + fun(Field, Value, SessionAcc) -> + emqx_session:set_field(Field, Value, SessionAcc) end, - emqx_session:init(#{max_inflight => 0}), + Session, InitFields ). @@ -1252,7 +1258,7 @@ limiter_cfg() -> Client = #{ rate => 5, initial => 0, - capacity => 5, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), @@ -1264,7 +1270,7 @@ limiter_cfg() -> }. bucket_cfg() -> - #{rate => 10, initial => 0, capacity => 10}. + #{rate => 10, initial => 0, burst => 0}. add_bucket() -> emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()). diff --git a/apps/emqx/test/emqx_client_SUITE.erl b/apps/emqx/test/emqx_client_SUITE.erl index 79c934b47..ca5f53070 100644 --- a/apps/emqx/test/emqx_client_SUITE.erl +++ b/apps/emqx/test/emqx_client_SUITE.erl @@ -24,6 +24,7 @@ -include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -define(TOPICS, [ <<"TopicA">>, @@ -43,6 +44,8 @@ <<"TopicA/#">> ]). +-define(WAIT(EXPR, ATTEMPTS), ?retry(1000, ATTEMPTS, EXPR)). + all() -> [ {group, mqttv3}, @@ -64,7 +67,8 @@ groups() -> %% t_keepalive, %% t_redelivery_on_reconnect, %% subscribe_failure_test, - t_dollar_topics + t_dollar_topics, + t_sub_non_utf8_topic ]}, {mqttv5, [non_parallel_tests], [t_basic_with_props_v5]}, {others, [non_parallel_tests], [ @@ -85,6 +89,12 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]). +init_per_testcase(_Case, Config) -> + Config. + +end_per_testcase(_Case, _Config) -> + emqx_config:put_zone_conf(default, [mqtt, idle_timeout], 15000). + %%-------------------------------------------------------------------- %% Test cases for MQTT v3 %%-------------------------------------------------------------------- @@ -101,16 +111,35 @@ t_basic_v4(_Config) -> t_cm(_) -> emqx_config:put_zone_conf(default, [mqtt, idle_timeout], 1000), - ClientId = <<"myclient">>, + ClientId = atom_to_binary(?FUNCTION_NAME), {ok, C} = emqtt:start_link([{clientid, ClientId}]), {ok, _} = emqtt:connect(C), - ct:sleep(500), - #{clientinfo := #{clientid := ClientId}} = emqx_cm:get_chan_info(ClientId), + ?WAIT(#{clientinfo := #{clientid := ClientId}} = emqx_cm:get_chan_info(ClientId), 2), emqtt:subscribe(C, <<"mytopic">>, 0), - ct:sleep(1200), - Stats = emqx_cm:get_chan_stats(ClientId), - ?assertEqual(1, proplists:get_value(subscriptions_cnt, Stats)), - emqx_config:put_zone_conf(default, [mqtt, idle_timeout], 15000). + ?WAIT( + begin + Stats = emqx_cm:get_chan_stats(ClientId), + ?assertEqual(1, proplists:get_value(subscriptions_cnt, Stats)) + end, + 2 + ), + ok. + +t_idle_timeout_infinity(_) -> + emqx_config:put_zone_conf(default, [mqtt, idle_timeout], infinity), + ClientId = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{clientid, ClientId}]), + {ok, _} = emqtt:connect(C), + ?WAIT(#{clientinfo := #{clientid := ClientId}} = emqx_cm:get_chan_info(ClientId), 2), + emqtt:subscribe(C, <<"mytopic">>, 0), + ?WAIT( + begin + Stats = emqx_cm:get_chan_stats(ClientId), + ?assertEqual(1, proplists:get_value(subscriptions_cnt, Stats)) + end, + 2 + ), + ok. t_cm_registry(_) -> Children = supervisor:which_children(emqx_cm_sup), @@ -269,6 +298,36 @@ t_dollar_topics(_) -> ok = emqtt:disconnect(C), ct:pal("$ topics test succeeded"). +t_sub_non_utf8_topic(_) -> + {ok, Socket} = gen_tcp:connect({127, 0, 0, 1}, 1883, [{active, true}, binary]), + ConnPacket = emqx_frame:serialize(#mqtt_packet{ + header = #mqtt_packet_header{type = 1}, + variable = #mqtt_packet_connect{ + clientid = <<"abcdefg">> + } + }), + ok = gen_tcp:send(Socket, ConnPacket), + receive + {tcp, _, _ConnAck = <<32, 2, 0, 0>>} -> ok + after 3000 -> ct:fail({connect_ack_not_recv, process_info(self(), messages)}) + end, + SubHeader = <<130, 18, 25, 178>>, + SubTopicLen = <<0, 13>>, + %% this is not a valid utf8 topic + SubTopic = <<128, 10, 10, 12, 178, 159, 162, 47, 115, 1, 1, 1, 1>>, + SubQoS = <<1>>, + SubPacket = <>, + ok = gen_tcp:send(Socket, SubPacket), + receive + {tcp_closed, _} -> ok + after 3000 -> ct:fail({should_get_disconnected, process_info(self(), messages)}) + end, + timer:sleep(1000), + ListenerCounts = emqx_listeners:shutdown_count('tcp:default', {{0, 0, 0, 0}, 1883}), + TopicInvalidCount = proplists:get_value(topic_filter_invalid, ListenerCounts), + ?assert(is_integer(TopicInvalidCount) andalso TopicInvalidCount > 0), + ok. + %%-------------------------------------------------------------------- %% Test cases for MQTT v5 %%-------------------------------------------------------------------- @@ -362,4 +421,10 @@ tls_certcn_as_clientid(TLSVsn, RequiredTLSVsn) -> {ok, _} = emqtt:connect(Client), #{clientinfo := #{clientid := CN}} = emqx_cm:get_chan_info(CN), confirm_tls_version(Client, RequiredTLSVsn), + %% verify that the peercert won't be stored in the conninfo + [ChannPid] = emqx_cm:lookup_channels(CN), + SysState = sys:get_state(ChannPid), + ChannelRecord = lists:keyfind(channel, 1, tuple_to_list(SysState)), + ConnInfo = lists:nth(2, tuple_to_list(ChannelRecord)), + ?assertMatch(#{peercert := undefined}, ConnInfo), emqtt:disconnect(Client). diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 61196a645..c8ef40925 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -16,23 +16,31 @@ -module(emqx_common_test_helpers). +-include_lib("emqx/include/emqx_authentication.hrl"). + -type special_config_handler() :: fun(). -type apps() :: list(atom()). -export([ all/1, + init_per_testcase/3, + end_per_testcase/3, boot_modules/1, start_apps/1, start_apps/2, + start_apps/3, stop_apps/1, + stop_apps/2, reload/2, app_path/2, proj_root/0, deps_path/2, flush/0, flush/1, - render_and_load_app_config/1 + load/1, + render_and_load_app_config/1, + render_and_load_app_config/2 ]). -export([ @@ -42,16 +50,17 @@ client_ssl_twoway/1, ensure_mnesia_stopped/0, ensure_quic_listener/2, + ensure_quic_listener/3, is_all_tcp_servers_available/1, is_tcp_server_available/2, is_tcp_server_available/3, load_config/2, - load_config/3, not_wait_mqtt_payload/1, read_schema_configs/2, render_config_file/2, wait_for/4, - wait_mqtt_payload/1 + wait_mqtt_payload/1, + select_free_port/1 ]). -export([ @@ -59,14 +68,16 @@ emqx_cluster/2, start_epmd/0, start_slave/2, - stop_slave/1 + stop_slave/1, + listener_port/2 ]). -export([clear_screen/0]). -export([with_mock/4]). -export([ on_exit/1, - call_janitor/0 + call_janitor/0, + call_janitor/1 ]). %% Toxiproxy API @@ -75,6 +86,13 @@ reset_proxy/2 ]). +%% TLS certs API +-export([ + gen_ca/2, + gen_host_cert/3, + gen_host_cert/4 +]). + -define(CERTS_PATH(CertName), filename:join(["etc", "certs", CertName])). -define(MQTT_SSL_CLIENT_CERTS, [ @@ -150,6 +168,19 @@ all(Suite) -> string:substr(atom_to_list(F), 1, 2) == "t_" ]). +init_per_testcase(Module, TestCase, Config) -> + case erlang:function_exported(Module, TestCase, 2) of + true -> Module:TestCase(init, Config); + false -> Config + end. + +end_per_testcase(Module, TestCase, Config) -> + case erlang:function_exported(Module, TestCase, 2) of + true -> Module:TestCase('end', Config); + false -> ok + end, + Config. + %% set emqx app boot modules -spec boot_modules(all | list(atom())) -> ok. boot_modules(Mods) -> @@ -167,16 +198,20 @@ start_apps(Apps) -> application:set_env(system_monitor, db_hostname, ""), ok end, - start_apps(Apps, DefaultHandler). + start_apps(Apps, DefaultHandler, #{}). -spec start_apps(Apps :: apps(), Handler :: special_config_handler()) -> ok. start_apps(Apps, SpecAppConfig) when is_function(SpecAppConfig) -> + start_apps(Apps, SpecAppConfig, #{}). + +-spec start_apps(Apps :: apps(), Handler :: special_config_handler(), map()) -> ok. +start_apps(Apps, SpecAppConfig, Opts) when is_function(SpecAppConfig) -> %% Load all application code to beam vm first %% Because, minirest, ekka etc.. application will scan these modules lists:foreach(fun load/1, [emqx | Apps]), ok = start_ekka(), ok = emqx_ratelimiter_SUITE:load_conf(), - lists:foreach(fun(App) -> start_app(App, SpecAppConfig) end, [emqx | Apps]). + lists:foreach(fun(App) -> start_app(App, SpecAppConfig, Opts) end, [emqx | Apps]). load(App) -> case application:load(App) of @@ -186,36 +221,71 @@ load(App) -> end. render_and_load_app_config(App) -> + render_and_load_app_config(App, #{}). + +render_and_load_app_config(App, Opts) -> load(App), Schema = app_schema(App), - Conf = app_path(App, filename:join(["etc", app_conf_file(App)])), + ConfFilePath = maps:get(conf_file_path, Opts, filename:join(["etc", app_conf_file(App)])), + Conf = app_path(App, ConfFilePath), try - do_render_app_config(App, Schema, Conf) + do_render_app_config(App, Schema, Conf, Opts) catch throw:E:St -> %% turn throw into error error({Conf, E, St}) end. -do_render_app_config(App, Schema, ConfigFile) -> - Vars = mustache_vars(App), +do_render_app_config(App, Schema, ConfigFile, Opts) -> + Vars = mustache_vars(App, Opts), RenderedConfigFile = render_config_file(ConfigFile, Vars), read_schema_configs(Schema, RenderedConfigFile), force_set_config_file_paths(App, [RenderedConfigFile]), copy_certs(App, RenderedConfigFile), ok. -start_app(App, SpecAppConfig) -> - render_and_load_app_config(App), +start_app(App, SpecAppConfig, Opts) -> + render_and_load_app_config(App, Opts), SpecAppConfig(App), case application:ensure_all_started(App) of {ok, _} -> ok = ensure_dashboard_listeners_started(App), + ok = wait_for_app_processes(App), + ok = perform_sanity_checks(App), ok; {error, Reason} -> error({failed_to_start_app, App, Reason}) end. +wait_for_app_processes(emqx_conf) -> + %% emqx_conf app has a gen_server which + %% initializes its state asynchronously + gen_server:call(emqx_cluster_rpc, dummy), + ok; +wait_for_app_processes(_) -> + ok. + +%% These are checks to detect inter-suite or inter-testcase flakiness +%% early. For example, one suite might forget one application running +%% and stop others, and then the `application:start/2' callback is +%% never called again for this application. +perform_sanity_checks(emqx_rule_engine) -> + ensure_config_handler(emqx_rule_engine, [rule_engine, rules]), + ok; +perform_sanity_checks(emqx_bridge) -> + ensure_config_handler(emqx_bridge, [bridges]), + ok; +perform_sanity_checks(_App) -> + ok. + +ensure_config_handler(Module, ConfigPath) -> + #{handlers := Handlers} = sys:get_state(emqx_config_handler), + case emqx_utils_maps:deep_get(ConfigPath, Handlers, not_found) of + #{{mod} := Module} -> ok; + _NotFound -> error({config_handler_missing, ConfigPath, Module}) + end, + ok. + app_conf_file(emqx_conf) -> "emqx.conf.all"; app_conf_file(App) -> atom_to_list(App) ++ ".conf". @@ -229,12 +299,14 @@ app_schema(App) -> no_schema end. -mustache_vars(App) -> - [ - {platform_data_dir, app_path(App, "data")}, - {platform_etc_dir, app_path(App, "etc")}, - {platform_log_dir, app_path(App, "log")} - ]. +mustache_vars(App, Opts) -> + ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}), + Defaults = #{ + node_cookie => atom_to_list(erlang:get_cookie()), + platform_data_dir => app_path(App, "data"), + platform_etc_dir => app_path(App, "etc") + }, + maps:merge(Defaults, ExtraMustacheVars). render_config_file(ConfigFile, Vars0) -> Temp = @@ -242,7 +314,7 @@ render_config_file(ConfigFile, Vars0) -> {ok, T} -> T; {error, Reason} -> error({failed_to_read_config_template, ConfigFile, Reason}) end, - Vars = [{atom_to_list(N), iolist_to_binary(V)} || {N, V} <- Vars0], + Vars = [{atom_to_list(N), iolist_to_binary(V)} || {N, V} <- maps:to_list(Vars0)], Targ = bbmustache:render(Temp, Vars), NewName = ConfigFile ++ ".rendered", ok = file:write_file(NewName, Targ), @@ -265,7 +337,26 @@ generate_config(SchemaModule, ConfigFile) when is_atom(SchemaModule) -> -spec stop_apps(list()) -> ok. stop_apps(Apps) -> + stop_apps(Apps, #{}). + +stop_apps(Apps, Opts) -> [application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]], + ok = mria_mnesia:delete_schema(), + %% to avoid inter-suite flakiness + application:unset_env(emqx, init_config_load_done), + persistent_term:erase(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY), + case Opts of + #{erase_all_configs := false} -> + %% FIXME: this means inter-suite or inter-test dependencies + ok; + _ -> + emqx_config:erase_all() + end, + ok = emqx_config:delete_override_conf_files(), + application:unset_env(emqx, local_override_conf_file), + application:unset_env(emqx, cluster_override_conf_file), + application:unset_env(emqx, cluster_hocon_file), + application:unset_env(gen_rpc, port_discovery), ok. proj_root() -> @@ -310,7 +401,7 @@ safe_relative_path_2(Path) -> -spec reload(App :: atom(), SpecAppConfig :: special_config_handler()) -> ok. reload(App, SpecAppConfigHandler) -> application:stop(App), - start_app(App, SpecAppConfigHandler), + start_app(App, SpecAppConfigHandler, #{}), application:start(App). ensure_mnesia_stopped() -> @@ -408,9 +499,15 @@ catch_call(F) -> C:E:S -> {crashed, {C, E, S}} end. -force_set_config_file_paths(emqx_conf, Paths) -> +force_set_config_file_paths(emqx_conf, [Path] = Paths) -> + Bin = iolist_to_binary(io_lib:format("node.config_files = [~p]~n", [Path])), + ok = file:write_file(Path, Bin, [append]), application:set_env(emqx, config_files, Paths); force_set_config_file_paths(emqx, Paths) -> + %% we need init cluster conf, so we can save the cluster conf to the file + application:set_env(emqx, local_override_conf_file, "local_override.conf"), + application:set_env(emqx, cluster_override_conf_file, "cluster_override.conf"), + application:set_env(emqx, cluster_conf_file, "cluster.hocon"), application:set_env(emqx, config_files, Paths); force_set_config_file_paths(_, _) -> ok. @@ -423,18 +520,14 @@ copy_certs(emqx_conf, Dest0) -> copy_certs(_, _) -> ok. -load_config(SchemaModule, Config, Opts) -> +load_config(SchemaModule, Config) -> ConfigBin = case is_map(Config) of - true -> jsx:encode(Config); + true -> emqx_utils_json:encode(Config); false -> Config end, ok = emqx_config:delete_override_conf_files(), - ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts), - ok. - -load_config(SchemaModule, Config) -> - load_config(SchemaModule, Config, #{raw_with_default => false}). + ok = emqx_config:init_load(SchemaModule, ConfigBin). -spec is_all_tcp_servers_available(Servers) -> Result when Servers :: [{Host, Port}], @@ -447,8 +540,11 @@ is_all_tcp_servers_available(Servers) -> is_tcp_server_available(Host, Port) end, case lists:partition(Fun, Servers) of - {_, []} -> true; - {_, Unavail} -> ct:print("Unavailable servers: ~p", [Unavail]) + {_, []} -> + true; + {_, Unavail} -> + ct:pal("Unavailable servers: ~p", [Unavail]), + false end. -spec is_tcp_server_available( @@ -490,11 +586,14 @@ ensure_dashboard_listeners_started(_App) -> -spec ensure_quic_listener(Name :: atom(), UdpPort :: inet:port_number()) -> ok. ensure_quic_listener(Name, UdpPort) -> + ensure_quic_listener(Name, UdpPort, #{}). +-spec ensure_quic_listener(Name :: atom(), UdpPort :: inet:port_number(), map()) -> ok. +ensure_quic_listener(Name, UdpPort, ExtraSettings) -> application:ensure_all_started(quicer), Conf = #{ acceptors => 16, - bind => {{0, 0, 0, 0}, UdpPort}, - certfile => filename:join(code:lib_dir(emqx), "etc/certs/cert.pem"), + bind => UdpPort, + ciphers => [ "TLS_AES_256_GCM_SHA384", @@ -503,16 +602,22 @@ ensure_quic_listener(Name, UdpPort) -> ], enabled => true, idle_timeout => 15000, - keyfile => filename:join(code:lib_dir(emqx), "etc/certs/key.pem"), + ssl_options => #{ + certfile => filename:join(code:lib_dir(emqx), "etc/certs/cert.pem"), + keyfile => filename:join(code:lib_dir(emqx), "etc/certs/key.pem") + }, limiter => #{}, max_connections => 1024000, mountpoint => <<>>, zone => default }, - emqx_config:put([listeners, quic, Name], Conf), - case emqx_listeners:start_listener(quic, Name, Conf) of + + Conf2 = maps:merge(Conf, ExtraSettings), + emqx_config:put([listeners, quic, Name], Conf2), + case emqx_listeners:start_listener(emqx_listeners:listener_id(quic, Name)) of ok -> ok; - {error, {already_started, _Pid}} -> ok + {error, {already_started, _Pid}} -> ok; + Other -> throw(Other) end. %% @@ -536,6 +641,12 @@ ensure_quic_listener(Name, UdpPort) -> %% Whether to execute `emqx_config:init_load(SchemaMod)` %% default: true load_schema => boolean(), + %% If we want to exercise the scenario where a node joins an + %% existing cluster where there has already been some + %% configuration changes (via cluster rpc), then we need to enable + %% autocluster so that the joining node will restart the + %% `emqx_conf' app and correctly catch up the config. + start_autocluster => boolean(), %% Eval by emqx_config:put/2 conf => [{KeyPath :: list(), Val :: term()}], %% Fast option to config listener port @@ -586,25 +697,59 @@ emqx_cluster(Specs0, CommonOpts) -> %% Lower level starting API -spec start_slave(shortname(), node_opts()) -> nodename(). -start_slave(Name, Opts) -> - {ok, Node} = ct_slave:start( - list_to_atom(atom_to_list(Name) ++ "@" ++ host()), - [ - {kill_if_fail, true}, - {monitor_master, true}, - {init_timeout, 10000}, - {startup_timeout, 10000}, - {erl_flags, erl_flags()} - ] - ), - +start_slave(Name, Opts) when is_list(Opts) -> + start_slave(Name, maps:from_list(Opts)); +start_slave(Name, Opts) when is_map(Opts) -> + SlaveMod = maps:get(peer_mod, Opts, ct_slave), + Node = node_name(Name), + put_peer_mod(Node, SlaveMod), + Cookie = atom_to_list(erlang:get_cookie()), + DoStart = + fun() -> + case SlaveMod of + ct_slave -> + ct_slave:start( + Node, + [ + {kill_if_fail, true}, + {monitor_master, true}, + {init_timeout, 20_000}, + {startup_timeout, 20_000}, + {erl_flags, erl_flags()}, + {env, [ + {"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}, + {"EMQX_NODE__COOKIE", Cookie} + ]} + ] + ); + slave -> + Env = " -env HOCON_ENV_OVERRIDE_PREFIX EMQX_", + slave:start_link(host(), Name, ebin_path() ++ Env) + end + end, + case DoStart() of + {ok, _} -> + ok; + {error, started_not_connected, _} -> + ok; + Other -> + throw(Other) + end, pong = net_adm:ping(Node), setup_node(Node, Opts), + ok = snabbkaffe:forward_trace(Node), Node. %% Node stopping -stop_slave(Node) -> - ct_slave:stop(Node). +stop_slave(Node0) -> + Node = node_name(Node0), + SlaveMod = get_peer_mod(Node), + erase_peer_mod(Node), + case SlaveMod:stop(Node) of + ok -> ok; + {ok, _} -> ok; + {error, not_started, _} -> ok + end. %% EPMD starting start_epmd() -> @@ -635,16 +780,50 @@ setup_node(Node, Opts) when is_map(Opts) -> ConfigureGenRpc = maps:get(configure_gen_rpc, Opts, true), LoadSchema = maps:get(load_schema, Opts, true), SchemaMod = maps:get(schema_mod, Opts, emqx_schema), - LoadApps = maps:get(load_apps, Opts, [gen_rpc, emqx, ekka, mria] ++ Apps), + LoadApps = maps:get(load_apps, Opts, Apps), Env = maps:get(env, Opts, []), Conf = maps:get(conf, Opts, []), ListenerPorts = maps:get(listener_ports, Opts, [ {Type, listener_port(BasePort, Type)} || Type <- [tcp, ssl, ws, wss] ]), + %% we need a fresh data dir for each peer node to avoid unintended + %% successes due to sharing of data in the cluster. + PrivDataDir = maps:get(priv_data_dir, Opts, "/tmp"), + %% If we want to exercise the scenario where a node joins an + %% existing cluster where there has already been some + %% configuration changes (via cluster rpc), then we need to enable + %% autocluster so that the joining node will restart the + %% `emqx_conf' app and correctly catch up the config. + StartAutocluster = maps:get(start_autocluster, Opts, false), + + ct:pal( + "setting up node ~p:\n ~p", + [ + Node, + #{ + start_autocluster => StartAutocluster, + load_apps => LoadApps, + apps => Apps, + env => Env, + join_to => JoinTo, + start_apps => StartApps + } + ] + ), %% Load env before doing anything to avoid overriding - [ok = rpc:call(Node, application, load, [App]) || App <- LoadApps], + [ok = erpc:call(Node, ?MODULE, load, [App]) || App <- [gen_rpc, ekka, mria, emqx | LoadApps]], + + %% Ensure a clean mnesia directory for each run to avoid + %% inter-test flakiness. + MnesiaDataDir = filename:join([ + PrivDataDir, + Node, + integer_to_list(erlang:unique_integer()), + "mnesia" + ]), + erpc:call(Node, application, set_env, [mnesia, dir, MnesiaDataDir]), %% Needs to be set explicitly because ekka:start() (which calls `gen`) is called without Handler %% in emqx_common_test_helpers:start_apps(...) @@ -657,10 +836,7 @@ setup_node(Node, Opts) when is_map(Opts) -> end, %% Setting env before starting any applications - [ - ok = rpc:call(Node, application, set_env, [Application, Key, Value]) - || {Application, Key, Value} <- Env - ], + set_envs(Node, Env), %% Here we start the apps EnvHandlerForRpc = @@ -670,7 +846,20 @@ setup_node(Node, Opts) when is_map(Opts) -> %% Otherwise, configuration gets loaded and all preset env in EnvHandler is lost LoadSchema andalso begin + %% to avoid sharing data between executions and/or + %% nodes. these variables might notbe in the + %% config file (e.g.: emqx_ee_conf_schema). + NodeDataDir = filename:join([ + PrivDataDir, + node(), + integer_to_list(erlang:unique_integer()) + ]), + Cookie = atom_to_list(erlang:get_cookie()), + os:putenv("EMQX_NODE__DATA_DIR", NodeDataDir), + os:putenv("EMQX_NODE__COOKIE", Cookie), emqx_config:init_load(SchemaMod), + os:unsetenv("EMQX_NODE__DATA_DIR"), + os:unsetenv("EMQX_NODE__COOKIE"), application:set_env(emqx, init_config_load_done, true) end, @@ -697,6 +886,16 @@ setup_node(Node, Opts) when is_map(Opts) -> undefined -> ok; _ -> + StartAutocluster andalso + begin + %% Note: we need to re-set the env because + %% starting the apps apparently make some of them + %% to be lost... This is particularly useful for + %% setting extra apps to be restarted after + %% joining. + set_envs(Node, Env), + ok = erpc:call(Node, emqx_machine_boot, start_autocluster, []) + end, case rpc:call(Node, ekka, join, [JoinTo]) of ok -> ok; @@ -711,8 +910,27 @@ setup_node(Node, Opts) when is_map(Opts) -> %% Helpers +put_peer_mod(Node, SlaveMod) -> + put({?MODULE, Node}, SlaveMod), + ok. + +get_peer_mod(Node) -> + case get({?MODULE, Node}) of + undefined -> ct_slave; + SlaveMod -> SlaveMod + end. + +erase_peer_mod(Node) -> + erase({?MODULE, Node}). + node_name(Name) -> - list_to_atom(lists:concat([Name, "@", host()])). + case string:tokens(atom_to_list(Name), "@") of + [_Name, _Host] -> + %% the name already has a @ + Name; + _ -> + list_to_atom(atom_to_list(Name) ++ "@" ++ host()) + end. gen_node_name(Num) -> list_to_atom("autocluster_node" ++ integer_to_list(Num)). @@ -734,6 +952,14 @@ merge_opts(Opts1, Opts2) -> Opts2 ). +set_envs(Node, Env) -> + lists:foreach( + fun({Application, Key, Value}) -> + ok = rpc:call(Node, application, set_env, [Application, Key, Value]) + end, + Env + ). + erl_flags() -> %% One core and redirecting logs to master "+S 1:1 -master " ++ atom_to_list(node()) ++ " " ++ ebin_path(). @@ -753,6 +979,9 @@ base_port(Number) -> gen_rpc_port(BasePort) -> BasePort - 1. +listener_port(Opts, Type) when is_map(Opts) -> + BasePort = maps:get(base_port, Opts), + listener_port(BasePort, Type); listener_port(BasePort, tcp) -> BasePort; listener_port(BasePort, ssl) -> @@ -855,7 +1084,7 @@ switch_proxy(Switch, Name, ProxyHost, ProxyPort) -> off -> #{<<"enabled">> => false}; on -> #{<<"enabled">> => true} end, - BodyBin = emqx_json:encode(Body), + BodyBin = emqx_utils_json:encode(Body), {ok, {{_, 200, _}, _, _}} = httpc:request( post, {Url, [], "application/json", BodyBin}, @@ -875,7 +1104,7 @@ timeout_proxy(on, Name, ProxyHost, ProxyPort) -> <<"toxicity">> => 1.0, <<"attributes">> => #{<<"timeout">> => 0} }, - BodyBin = emqx_json:encode(Body), + BodyBin = emqx_utils_json:encode(Body), {ok, {{_, 200, _}, _, _}} = httpc:request( post, {Url, [], "application/json", BodyBin}, @@ -910,7 +1139,7 @@ latency_up_proxy(on, Name, ProxyHost, ProxyPort) -> <<"jitter">> => 3_000 } }, - BodyBin = emqx_json:encode(Body), + BodyBin = emqx_utils_json:encode(Body), {ok, {{_, 200, _}, _, _}} = httpc:request( post, {Url, [], "application/json", BodyBin}, @@ -931,14 +1160,115 @@ latency_up_proxy(off, Name, ProxyHost, ProxyPort) -> ). %%------------------------------------------------------------------------------- +%% TLS certs +%%------------------------------------------------------------------------------- +gen_ca(Path, Name) -> + %% Generate ca.pem and ca.key which will be used to generate certs + %% for hosts server and clients + ECKeyFile = filename(Path, "~s-ec.key", [Name]), + filelib:ensure_dir(ECKeyFile), + os:cmd("openssl ecparam -name secp256r1 > " ++ ECKeyFile), + Cmd = lists:flatten( + io_lib:format( + "openssl req -new -x509 -nodes " + "-newkey ec:~s " + "-keyout ~s -out ~s -days 3650 " + "-subj \"/C=SE/O=Internet Widgits Pty Ltd CA\"", + [ + ECKeyFile, + ca_key_name(Path, Name), + ca_cert_name(Path, Name) + ] + ) + ), + os:cmd(Cmd). + +ca_cert_name(Path, Name) -> + filename(Path, "~s.pem", [Name]). +ca_key_name(Path, Name) -> + filename(Path, "~s.key", [Name]). + +gen_host_cert(H, CaName, Path) -> + gen_host_cert(H, CaName, Path, #{}). + +gen_host_cert(H, CaName, Path, Opts) -> + ECKeyFile = filename(Path, "~s-ec.key", [CaName]), + CN = str(H), + HKey = filename(Path, "~s.key", [H]), + HCSR = filename(Path, "~s.csr", [H]), + HPEM = filename(Path, "~s.pem", [H]), + HEXT = filename(Path, "~s.extfile", [H]), + PasswordArg = + case maps:get(password, Opts, undefined) of + undefined -> + " -nodes "; + Password -> + io_lib:format(" -passout pass:'~s' ", [Password]) + end, + CSR_Cmd = + lists:flatten( + io_lib:format( + "openssl req -new ~s -newkey ec:~s " + "-keyout ~s -out ~s " + "-addext \"subjectAltName=DNS:~s\" " + "-addext keyUsage=digitalSignature,keyAgreement " + "-subj \"/C=SE/O=Internet Widgits Pty Ltd/CN=~s\"", + [PasswordArg, ECKeyFile, HKey, HCSR, CN, CN] + ) + ), + create_file( + HEXT, + "keyUsage=digitalSignature,keyAgreement\n" + "subjectAltName=DNS:~s\n", + [CN] + ), + CERT_Cmd = + lists:flatten( + io_lib:format( + "openssl x509 -req " + "-extfile ~s " + "-in ~s -CA ~s -CAkey ~s -CAcreateserial " + "-out ~s -days 500", + [ + HEXT, + HCSR, + ca_cert_name(Path, CaName), + ca_key_name(Path, CaName), + HPEM + ] + ) + ), + ct:pal(os:cmd(CSR_Cmd)), + ct:pal(os:cmd(CERT_Cmd)), + file:delete(HEXT). + +filename(Path, F, A) -> + filename:join(Path, str(io_lib:format(F, A))). + +str(Arg) -> + binary_to_list(iolist_to_binary(Arg)). + +create_file(Filename, Fmt, Args) -> + filelib:ensure_dir(Filename), + {ok, F} = file:open(Filename, [write]), + try + io:format(F, Fmt, Args) + after + file:close(F) + end, + ok. +%%------------------------------------------------------------------------------- %% Testcase teardown utilities %%------------------------------------------------------------------------------- %% stop the janitor gracefully to ensure proper cleanup order and less %% noise in the logs. call_janitor() -> + call_janitor(15_000). + +call_janitor(Timeout) -> Janitor = get_or_spawn_janitor(), - exit(Janitor, normal), + ok = emqx_test_janitor:stop(Janitor, Timeout), ok. get_or_spawn_janitor() -> @@ -954,3 +1284,34 @@ get_or_spawn_janitor() -> on_exit(Fun) -> Janitor = get_or_spawn_janitor(), ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun). + +%%------------------------------------------------------------------------------- +%% Select a free transport port from the OS +%%------------------------------------------------------------------------------- +%% @doc get unused port from OS +-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number(). +select_free_port(tcp) -> + select_free_port(gen_tcp, listen); +select_free_port(udp) -> + select_free_port(gen_udp, open); +select_free_port(ssl) -> + select_free_port(tcp); +select_free_port(quic) -> + select_free_port(udp). + +select_free_port(GenModule, Fun) when + GenModule == gen_tcp orelse + GenModule == gen_udp +-> + {ok, S} = GenModule:Fun(0, [{reuseaddr, true}]), + {ok, Port} = inet:port(S), + ok = GenModule:close(S), + case os:type() of + {unix, darwin} -> + %% in MacOS, still get address_in_use after close port + timer:sleep(500); + _ -> + skip + end, + ct:pal("Select free OS port: ~p", [Port]), + Port. diff --git a/apps/emqx/test/emqx_common_test_http.erl b/apps/emqx/test/emqx_common_test_http.erl index 87a35a1e2..e9064715d 100644 --- a/apps/emqx/test/emqx_common_test_http.erl +++ b/apps/emqx/test/emqx_common_test_http.erl @@ -29,6 +29,9 @@ auth_header/2 ]). +-define(DEFAULT_APP_ID, <<"default_appid">>). +-define(DEFAULT_APP_SECRET, <<"default_app_secret">>). + request_api(Method, Url, Auth) -> request_api(Method, Url, [], Auth, []). @@ -51,7 +54,7 @@ request_api(Method, Url, QueryParams, Auth, Body, HttpOpts) -> [] -> {NewUrl, [Auth]}; _ -> - {NewUrl, [Auth], "application/json", emqx_json:encode(Body)} + {NewUrl, [Auth], "application/json", emqx_utils_json:encode(Body)} end, do_request_api(Method, Request, HttpOpts). @@ -67,19 +70,25 @@ do_request_api(Method, Request, HttpOpts) -> end. get_http_data(ResponseBody) -> - emqx_json:decode(ResponseBody, [return_maps]). + emqx_utils_json:decode(ResponseBody, [return_maps]). auth_header(User, Pass) -> Encoded = base64:encode_to_string(lists:append([User, ":", Pass])), {"Authorization", "Basic " ++ Encoded}. default_auth_header() -> - AppId = <<"myappid">>, - AppSecret = emqx_mgmt_auth:get_appsecret(AppId), - auth_header(erlang:binary_to_list(AppId), erlang:binary_to_list(AppSecret)). + {ok, #{api_key := APIKey}} = emqx_mgmt_auth:read(?DEFAULT_APP_ID), + auth_header( + erlang:binary_to_list(APIKey), erlang:binary_to_list(?DEFAULT_APP_SECRET) + ). create_default_app() -> - emqx_mgmt_auth:add_app(<<"myappid">>, <<"test">>). + Now = erlang:system_time(second), + ExpiredAt = Now + timer:minutes(10), + emqx_mgmt_auth:create( + ?DEFAULT_APP_ID, ?DEFAULT_APP_SECRET, true, ExpiredAt, <<"default app key for test">> + ), + ok. delete_default_app() -> - emqx_mgmt_auth:del_app(<<"myappid">>). + emqx_mgmt_auth:delete(?DEFAULT_APP_ID). diff --git a/apps/emqx/test/emqx_config_SUITE.erl b/apps/emqx/test/emqx_config_SUITE.erl index fe8a5fed8..a55531c2d 100644 --- a/apps/emqx/test/emqx_config_SUITE.erl +++ b/apps/emqx/test/emqx_config_SUITE.erl @@ -19,6 +19,7 @@ -compile(export_all). -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). all() -> emqx_common_test_helpers:all(?MODULE). @@ -50,12 +51,48 @@ t_fill_default_values(_) -> }, <<"route_batch_clean">> := false, <<"session_locking_strategy">> := quorum, - <<"shared_dispatch_ack_enabled">> := false, <<"shared_subscription_strategy">> := round_robin } }, WithDefaults ), %% ensure JSON compatible - _ = emqx_json:encode(WithDefaults), + _ = emqx_utils_json:encode(WithDefaults), + ok. + +t_init_load(_Config) -> + ConfFile = "./test_emqx.conf", + ok = file:write_file(ConfFile, <<"">>), + ExpectRootNames = lists:sort(hocon_schema:root_names(emqx_schema)), + emqx_config:erase_all(), + {ok, DeprecatedFile} = application:get_env(emqx, cluster_override_conf_file), + ?assertEqual(false, filelib:is_regular(DeprecatedFile), DeprecatedFile), + %% Don't has deprecated file + ok = emqx_config:init_load(emqx_schema, [ConfFile]), + ?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())), + ?assertMatch({ok, #{raw_config := 256}}, emqx:update_config([mqtt, max_topic_levels], 256)), + emqx_config:erase_all(), + %% Has deprecated file + ok = file:write_file(DeprecatedFile, <<"{}">>), + ok = emqx_config:init_load(emqx_schema, [ConfFile]), + ?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())), + ?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)), + ok = file:delete(DeprecatedFile). + +t_unknown_rook_keys(_) -> + ?check_trace( + #{timetrap => 1000}, + begin + ok = emqx_config:init_load( + emqx_schema, <<"test_1 {}\n test_2 {sub = 100}\n listeners {}">> + ), + ?block_until(#{?snk_kind := unknown_config_keys}) + end, + fun(Trace) -> + ?assertMatch( + [#{unknown_config_keys := "test_1,test_2"}], + ?of_kind(unknown_config_keys, Trace) + ) + end + ), ok. diff --git a/apps/emqx/test/emqx_config_handler_SUITE.erl b/apps/emqx/test/emqx_config_handler_SUITE.erl index 8126b35c6..e21c1867f 100644 --- a/apps/emqx/test/emqx_config_handler_SUITE.erl +++ b/apps/emqx/test/emqx_config_handler_SUITE.erl @@ -21,8 +21,7 @@ -define(MOD, {mod}). -define(WKEY, '?'). --define(LOCAL_CONF, "/tmp/local-override.conf"). --define(CLUSTER_CONF, "/tmp/cluster-override.conf"). +-define(CLUSTER_CONF, "/tmp/cluster.conf"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -38,7 +37,6 @@ end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]). init_per_testcase(_Case, Config) -> - _ = file:delete(?LOCAL_CONF), _ = file:delete(?CLUSTER_CONF), Config. @@ -179,7 +177,9 @@ t_sub_key_update_remove(_Config) -> {ok, #{post_config_update => #{emqx_config_handler_SUITE => ok}}}, emqx:remove_config(KeyPath) ), - ?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)), + ?assertError( + {config_not_found, [<<"sysmon">>, os, cpu_check_interval]}, emqx:get_raw_config(KeyPath) + ), OSKey = maps:keys(emqx:get_raw_config([sysmon, os])), ?assertEqual(false, lists:member(<<"cpu_check_interval">>, OSKey)), ?assert(length(OSKey) > 0), @@ -200,62 +200,6 @@ t_sub_key_update_remove(_Config) -> ok = emqx_config_handler:remove_handler(KeyPath2), ok. -t_local_override_update_remove(_Config) -> - application:set_env(emqx, local_override_conf_file, ?LOCAL_CONF), - application:set_env(emqx, cluster_override_conf_file, ?CLUSTER_CONF), - KeyPath = [sysmon, os, cpu_high_watermark], - ok = emqx_config_handler:add_handler(KeyPath, ?MODULE), - LocalOpts = #{override_to => local}, - {ok, Res} = emqx:update_config(KeyPath, <<"70%">>, LocalOpts), - ?assertMatch( - #{ - config := 0.7, - post_config_update := #{}, - raw_config := <<"70%">> - }, - Res - ), - ClusterOpts = #{override_to => cluster}, - ?assertMatch( - {error, {permission_denied, _}}, emqx:update_config(KeyPath, <<"71%">>, ClusterOpts) - ), - ?assertMatch(0.7, emqx:get_config(KeyPath)), - - KeyPath2 = [sysmon, os, cpu_low_watermark], - ok = emqx_config_handler:add_handler(KeyPath2, ?MODULE), - ?assertMatch( - {error, {permission_denied, _}}, emqx:update_config(KeyPath2, <<"40%">>, ClusterOpts) - ), - - %% remove - ?assertMatch({error, {permission_denied, _}}, emqx:remove_config(KeyPath)), - ?assertEqual( - {ok, #{post_config_update => #{}}}, - emqx:remove_config(KeyPath, #{override_to => local}) - ), - ?assertEqual( - {ok, #{post_config_update => #{}}}, - emqx:remove_config(KeyPath) - ), - ?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)), - OSKey = maps:keys(emqx:get_raw_config([sysmon, os])), - ?assertEqual(false, lists:member(<<"cpu_high_watermark">>, OSKey)), - ?assert(length(OSKey) > 0), - - ?assertEqual( - {ok, #{config => 0.8, post_config_update => #{}, raw_config => <<"80%">>}}, - emqx:reset_config(KeyPath, ClusterOpts) - ), - OSKey1 = maps:keys(emqx:get_raw_config([sysmon, os])), - ?assertEqual(true, lists:member(<<"cpu_high_watermark">>, OSKey1)), - ?assert(length(OSKey1) > 1), - - ok = emqx_config_handler:remove_handler(KeyPath), - ok = emqx_config_handler:remove_handler(KeyPath2), - application:unset_env(emqx, local_override_conf_file), - application:unset_env(emqx, cluster_override_conf_file), - ok. - t_check_failed(_Config) -> KeyPath = [sysmon, os, cpu_check_interval], Opts = #{rawconf_with_defaults => true}, @@ -426,9 +370,9 @@ wait_for_new_pid() -> callback_error(FailedPath, Update, Error) -> Opts = #{rawconf_with_defaults => true}, ok = emqx_config_handler:add_handler(FailedPath, ?MODULE), - Old = emqx:get_raw_config(FailedPath), + Old = emqx:get_raw_config(FailedPath, undefined), ?assertEqual(Error, emqx:update_config(FailedPath, Update, Opts)), - New = emqx:get_raw_config(FailedPath), + New = emqx:get_raw_config(FailedPath, undefined), ?assertEqual(Old, New), ok = emqx_config_handler:remove_handler(FailedPath), ok. diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index 23ddf4008..0692ec8f5 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -38,8 +38,6 @@ init_per_suite(Config) -> ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), - %% Meck Limiter - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), %% Meck Pd ok = meck:new(emqx_pd, [passthrough, no_history, no_link]), %% Meck Metrics @@ -67,7 +65,6 @@ end_per_suite(_Config) -> ok = meck:unload(emqx_transport), catch meck:unload(emqx_channel), ok = meck:unload(emqx_cm), - ok = meck:unload(emqx_htb_limiter), ok = meck:unload(emqx_pd), ok = meck:unload(emqx_metrics), ok = meck:unload(emqx_hooks), @@ -421,20 +418,28 @@ t_ensure_rate_limit(_) -> {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)), + ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + + ok = meck:expect( + emqx_htb_limiter, + make_infinity_limiter, + fun() -> non_infinity end + ), + ok = meck:expect( emqx_htb_limiter, check, fun(_, Client) -> {pause, 3000, undefined, Client} end ), {ok, State2} = emqx_connection:check_limiter( - [{1000, bytes_in}], + [{1000, bytes}], [], WhenOk, [], - st(#{limiter => Limiter}) + st(#{limiter => init_limiter()}) ), meck:unload(emqx_htb_limiter), - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). t_activate_socket(_) -> @@ -495,17 +500,18 @@ t_get_conn_info(_) -> end). t_oom_shutdown(init, Config) -> + ok = snabbkaffe:stop(), ok = snabbkaffe:start_trace(), - ok = meck:new(emqx_misc, [non_strict, passthrough, no_history, no_link]), + ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), meck:expect( - emqx_misc, + emqx_utils, check_oom, fun(_) -> {shutdown, "fake_oom"} end ), Config; t_oom_shutdown('end', _Config) -> snabbkaffe:stop(), - meck:unload(emqx_misc), + meck:unload(emqx_utils), ok. t_oom_shutdown(_) -> @@ -673,7 +679,10 @@ channel(InitFields) -> peercert => undefined, mountpoint => undefined }, - Session = emqx_session:init(#{max_inflight => 0}), + Conf = emqx_cm:get_session_confs(ClientInfo, #{ + receive_maximum => 0, expiry_interval => 1000 + }), + Session = emqx_session:init(Conf), maps:fold( fun(Field, Value, Channel) -> emqx_channel:set_field(Field, Value, Channel) @@ -700,31 +709,32 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St). -define(LIMITER_ID, 'tcp:default'). init_limiter() -> - emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()). + emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()). limiter_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), Cfg = bucket_cfg(), - Client = #{ - rate => Infinity, + Client = client_cfg(), + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. + +bucket_cfg() -> + #{rate => infinity, initial => 0, burst => 0}. + +client_cfg() -> + #{ + rate => infinity, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force - }, - #{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. - -bucket_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - #{rate => Infinity, initial => 0, capacity => Infinity}. + }. add_bucket() -> Cfg = bucket_cfg(), - emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), - emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). + emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg), + emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg). del_bucket() -> - emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), - emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). + emqx_limiter_server:del_bucket(?LIMITER_ID, bytes), + emqx_limiter_server:del_bucket(?LIMITER_ID, messages). diff --git a/apps/emqx/test/emqx_crl_cache_SUITE.erl b/apps/emqx/test/emqx_crl_cache_SUITE.erl new file mode 100644 index 000000000..1b8abb9c3 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE.erl @@ -0,0 +1,1085 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_crl_cache_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% from ssl_manager.erl +-record(state, { + session_cache_client, + session_cache_client_cb, + session_lifetime, + certificate_db, + session_validation_timer, + session_cache_client_max, + session_client_invalidator, + options, + client_session_order +}). + +-define(DEFAULT_URL, "http://localhost:9878/intermediate.crl.pem"). + +%%-------------------------------------------------------------------- +%% CT boilerplate +%%-------------------------------------------------------------------- + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + application:load(emqx), + {ok, _} = application:ensure_all_started(ssl), + emqx_config:save_schema_mod_and_names(emqx_schema), + emqx_common_test_helpers:boot_modules(all), + Config. + +end_per_suite(_Config) -> + ok. + +init_per_testcase(TestCase, Config) when + TestCase =:= t_cache; + TestCase =:= t_filled_cache; + TestCase =:= t_revoked +-> + ct:timetrap({seconds, 30}), + DataDir = ?config(data_dir, Config), + CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPem} = file:read_file(CRLFile), + [{'CertificateList', CRLDer, not_encrypted}] = public_key:pem_decode(CRLPem), + ok = snabbkaffe:start_trace(), + ServerPid = start_crl_server(CRLPem), + IsCached = lists:member(TestCase, [t_filled_cache, t_revoked]), + ok = setup_crl_options(Config, #{is_cached => IsCached}), + [ + {crl_pem, CRLPem}, + {crl_der, CRLDer}, + {http_server, ServerPid} + | Config + ]; +init_per_testcase(t_revoke_then_refresh, Config) -> + ct:timetrap({seconds, 120}), + DataDir = ?config(data_dir, Config), + CRLFileNotRevoked = filename:join([DataDir, "intermediate-not-revoked.crl.pem"]), + {ok, CRLPemNotRevoked} = file:read_file(CRLFileNotRevoked), + [{'CertificateList', CRLDerNotRevoked, not_encrypted}] = public_key:pem_decode( + CRLPemNotRevoked + ), + CRLFileRevoked = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPemRevoked} = file:read_file(CRLFileRevoked), + [{'CertificateList', CRLDerRevoked, not_encrypted}] = public_key:pem_decode(CRLPemRevoked), + ok = snabbkaffe:start_trace(), + ServerPid = start_crl_server(CRLPemNotRevoked), + ExtraVars = #{refresh_interval => <<"10s">>}, + ok = setup_crl_options(Config, #{is_cached => true, extra_vars => ExtraVars}), + [ + {crl_pem_not_revoked, CRLPemNotRevoked}, + {crl_der_not_revoked, CRLDerNotRevoked}, + {crl_pem_revoked, CRLPemRevoked}, + {crl_der_revoked, CRLDerRevoked}, + {http_server, ServerPid} + | Config + ]; +init_per_testcase(t_cache_overflow, Config) -> + ct:timetrap({seconds, 120}), + DataDir = ?config(data_dir, Config), + CRLFileRevoked = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPemRevoked} = file:read_file(CRLFileRevoked), + ok = snabbkaffe:start_trace(), + ServerPid = start_crl_server(CRLPemRevoked), + ExtraVars = #{cache_capacity => <<"2">>}, + ok = setup_crl_options(Config, #{is_cached => false, extra_vars => ExtraVars}), + [ + {http_server, ServerPid} + | Config + ]; +init_per_testcase(t_not_cached_and_unreachable, Config) -> + ct:timetrap({seconds, 30}), + DataDir = ?config(data_dir, Config), + CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPem} = file:read_file(CRLFile), + [{'CertificateList', CRLDer, not_encrypted}] = public_key:pem_decode(CRLPem), + ok = snabbkaffe:start_trace(), + application:stop(cowboy), + ok = setup_crl_options(Config, #{is_cached => false}), + [ + {crl_pem, CRLPem}, + {crl_der, CRLDer} + | Config + ]; +init_per_testcase(t_refresh_config, Config) -> + ct:timetrap({seconds, 30}), + DataDir = ?config(data_dir, Config), + CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPem} = file:read_file(CRLFile), + [{'CertificateList', CRLDer, not_encrypted}] = public_key:pem_decode(CRLPem), + TestPid = self(), + ok = meck:new(emqx_crl_cache, [non_strict, passthrough, no_history, no_link]), + meck:expect( + emqx_crl_cache, + http_get, + fun(URL, _HTTPTimeout) -> + ct:pal("http get crl ~p", [URL]), + TestPid ! {http_get, URL}, + {ok, {{"HTTP/1.0", 200, "OK"}, [], CRLPem}} + end + ), + ok = snabbkaffe:start_trace(), + ok = setup_crl_options(Config, #{is_cached => false}), + [ + {crl_pem, CRLPem}, + {crl_der, CRLDer} + | Config + ]; +init_per_testcase(TestCase, Config) when + TestCase =:= t_update_listener; + TestCase =:= t_validations +-> + %% when running emqx standalone tests, we can't use those + %% features. + case does_module_exist(emqx_mgmt_api_test_util) of + true -> + ct:timetrap({seconds, 30}), + DataDir = ?config(data_dir, Config), + PrivDir = ?config(priv_dir, Config), + CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPem} = file:read_file(CRLFile), + ok = snabbkaffe:start_trace(), + ServerPid = start_crl_server(CRLPem), + ConfFilePath = filename:join([DataDir, "emqx_just_verify.conf"]), + emqx_mgmt_api_test_util:init_suite( + [emqx_conf], + fun emqx_mgmt_api_test_util:set_special_configs/1, + #{ + extra_mustache_vars => #{ + test_data_dir => DataDir, + test_priv_dir => PrivDir + }, + conf_file_path => ConfFilePath + } + ), + [ + {http_server, ServerPid} + | Config + ]; + false -> + [{skip_does_not_apply, true} | Config] + end; +init_per_testcase(_TestCase, Config) -> + ct:timetrap({seconds, 30}), + DataDir = ?config(data_dir, Config), + CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]), + {ok, CRLPem} = file:read_file(CRLFile), + [{'CertificateList', CRLDer, not_encrypted}] = public_key:pem_decode(CRLPem), + TestPid = self(), + ok = meck:new(emqx_crl_cache, [non_strict, passthrough, no_history, no_link]), + meck:expect( + emqx_crl_cache, + http_get, + fun(URL, _HTTPTimeout) -> + ct:pal("http get crl ~p", [URL]), + TestPid ! {http_get, URL}, + {ok, {{"HTTP/1.0", 200, 'OK'}, [], CRLPem}} + end + ), + ok = snabbkaffe:start_trace(), + [ + {crl_pem, CRLPem}, + {crl_der, CRLDer} + | Config + ]. + +end_per_testcase(TestCase, Config) when + TestCase =:= t_cache; + TestCase =:= t_filled_cache; + TestCase =:= t_revoked +-> + ServerPid = ?config(http_server, Config), + emqx_crl_cache_http_server:stop(ServerPid), + emqx_common_test_helpers:stop_apps([]), + clear_listeners(), + application:stop(cowboy), + clear_crl_cache(), + ok = snabbkaffe:stop(), + ok; +end_per_testcase(TestCase, Config) when + TestCase =:= t_revoke_then_refresh; + TestCase =:= t_cache_overflow +-> + ServerPid = ?config(http_server, Config), + emqx_crl_cache_http_server:stop(ServerPid), + emqx_common_test_helpers:stop_apps([]), + clear_listeners(), + clear_crl_cache(), + application:stop(cowboy), + ok = snabbkaffe:stop(), + ok; +end_per_testcase(t_not_cached_and_unreachable, _Config) -> + emqx_common_test_helpers:stop_apps([]), + clear_listeners(), + clear_crl_cache(), + ok = snabbkaffe:stop(), + ok; +end_per_testcase(t_refresh_config, _Config) -> + meck:unload([emqx_crl_cache]), + clear_crl_cache(), + emqx_common_test_helpers:stop_apps([]), + clear_listeners(), + clear_crl_cache(), + application:stop(cowboy), + ok = snabbkaffe:stop(), + ok; +end_per_testcase(TestCase, Config) when + TestCase =:= t_update_listener; + TestCase =:= t_validations +-> + Skip = proplists:get_bool(skip_does_not_apply, Config), + case Skip of + true -> + ok; + false -> + ServerPid = ?config(http_server, Config), + emqx_crl_cache_http_server:stop(ServerPid), + emqx_mgmt_api_test_util:end_suite([emqx_conf]), + clear_listeners(), + ok = snabbkaffe:stop(), + clear_crl_cache(), + ok + end; +end_per_testcase(_TestCase, _Config) -> + meck:unload([emqx_crl_cache]), + clear_crl_cache(), + ok = snabbkaffe:stop(), + ok. + +%%-------------------------------------------------------------------- +%% Helper functions +%%-------------------------------------------------------------------- + +does_module_exist(Mod) -> + case erlang:module_loaded(Mod) of + true -> + true; + false -> + case code:ensure_loaded(Mod) of + ok -> + true; + {module, Mod} -> + true; + _ -> + false + end + end. + +clear_listeners() -> + emqx_config:put([listeners], #{}), + emqx_config:put_raw([listeners], #{}), + ok. + +assert_http_get(URL) -> + receive + {http_get, URL} -> + ok + after 1000 -> + ct:pal("mailbox: ~p", [process_info(self(), messages)]), + error({should_have_requested, URL}) + end. + +get_crl_cache_table() -> + #state{certificate_db = [_, _, _, {Ref, _}]} = sys:get_state(ssl_manager), + Ref. + +start_crl_server(Port, CRLPem) -> + {ok, LSock} = gen_tcp:listen(Port, [binary, {active, true}, reusedaddr]), + spawn_link(fun() -> accept_loop(LSock, CRLPem) end), + ok. + +accept_loop(LSock, CRLPem) -> + case gen_tcp:accept(LSock) of + {ok, Sock} -> + Worker = spawn_link(fun() -> crl_loop(Sock, CRLPem) end), + gen_tcp:controlling_process(Sock, Worker), + accept_loop(LSock, CRLPem); + {error, Reason} -> + error({accept_error, Reason}) + end. + +crl_loop(Sock, CRLPem) -> + receive + {tcp, Sock, _Data} -> + gen_tcp:send(Sock, CRLPem), + crl_loop(Sock, CRLPem); + _Msg -> + ok + end. + +drain_msgs() -> + receive + _Msg -> + drain_msgs() + after 0 -> + ok + end. + +clear_crl_cache() -> + %% reset the CRL cache + Ref = monitor(process, whereis(ssl_manager)), + exit(whereis(ssl_manager), kill), + receive + {'DOWN', Ref, process, _, _} -> + ok + after 1_000 -> + ct:fail("ssl_manager didn't die") + end, + ensure_ssl_manager_alive(), + ok. + +force_cacertfile(Cacertfile) -> + {SSLListeners0, OtherListeners} = lists:partition( + fun(#{proto := Proto}) -> Proto =:= ssl end, + emqx:get_env(listeners) + ), + SSLListeners = + lists:map( + fun(Listener = #{opts := Opts0}) -> + SSLOpts0 = proplists:get_value(ssl_options, Opts0), + %% it injects some garbage... + SSLOpts1 = lists:keydelete(cacertfile, 1, lists:keydelete(cacertfile, 1, SSLOpts0)), + SSLOpts2 = [{cacertfile, Cacertfile} | SSLOpts1], + Opts1 = lists:keyreplace(ssl_options, 1, Opts0, {ssl_options, SSLOpts2}), + Listener#{opts => Opts1} + end, + SSLListeners0 + ), + application:set_env(emqx, listeners, SSLListeners ++ OtherListeners), + ok. + +setup_crl_options(Config, #{is_cached := IsCached} = Opts) -> + DataDir = ?config(data_dir, Config), + ConfFilePath = filename:join([DataDir, "emqx.conf"]), + Defaults = #{ + refresh_interval => <<"11m">>, + cache_capacity => <<"100">>, + test_data_dir => DataDir + }, + ExtraVars0 = maps:get(extra_vars, Opts, #{}), + ExtraVars = maps:merge(Defaults, ExtraVars0), + emqx_common_test_helpers:start_apps( + [], + fun(_) -> ok end, + #{ + extra_mustache_vars => ExtraVars, + conf_file_path => ConfFilePath + } + ), + case IsCached of + true -> + %% wait the cache to be filled + emqx_crl_cache:refresh(?DEFAULT_URL), + receive + {http_get, <>} -> ok + after 1_000 -> + ct:pal("mailbox: ~p", [process_info(self(), messages)]), + error(crl_cache_not_filled) + end; + false -> + %% ensure cache is empty + clear_crl_cache(), + ok + end, + drain_msgs(), + ok. + +start_crl_server(CRLPem) -> + application:ensure_all_started(cowboy), + {ok, ServerPid} = emqx_crl_cache_http_server:start_link(self(), 9878, CRLPem, []), + receive + {ServerPid, ready} -> ok + after 1000 -> error(timeout_starting_http_server) + end, + ServerPid. + +request(Method, Url, QueryParams, Body) -> + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + case emqx_mgmt_api_test_util:request_api(Method, Url, QueryParams, AuthHeader, Body, Opts) of + {ok, {Reason, Headers, BodyR}} -> + {ok, {Reason, Headers, emqx_utils_json:decode(BodyR, [return_maps])}}; + Error -> + Error + end. + +get_listener_via_api(ListenerId) -> + Path = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), + request(get, Path, [], []). + +update_listener_via_api(ListenerId, NewConfig) -> + Path = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), + request(put, Path, [], NewConfig). + +assert_successful_connection(Config) -> + assert_successful_connection(Config, default). + +assert_successful_connection(Config, ClientNum) -> + DataDir = ?config(data_dir, Config), + Num = + case ClientNum of + default -> ""; + _ -> integer_to_list(ClientNum) + end, + ClientCert = filename:join(DataDir, "client" ++ Num ++ ".cert.pem"), + ClientKey = filename:join(DataDir, "client" ++ Num ++ ".key.pem"), + %% 1) At first, the cache is empty, and the CRL is fetched and + %% cached on the fly. + {ok, C0} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + ?tp_span( + mqtt_client_connection, + #{client_num => ClientNum}, + begin + {ok, _} = emqtt:connect(C0), + emqtt:stop(C0), + ok + end + ). + +trace_between(Trace0, Marker1, Marker2) -> + {Trace1, [_ | _]} = ?split_trace_at(#{?snk_kind := Marker2}, Trace0), + {[_ | _], [_ | Trace2]} = ?split_trace_at(#{?snk_kind := Marker1}, Trace1), + Trace2. + +of_kinds(Trace0, Kinds0) -> + Kinds = sets:from_list(Kinds0, [{version, 2}]), + lists:filter( + fun(#{?snk_kind := K}) -> sets:is_element(K, Kinds) end, + Trace0 + ). + +ensure_ssl_manager_alive() -> + ?retry( + _Sleep0 = 200, + _Attempts0 = 50, + true = is_pid(whereis(ssl_manager)) + ). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +t_init_empty_urls(_Config) -> + Ref = get_crl_cache_table(), + ?assertEqual([], ets:tab2list(Ref)), + ?assertMatch({ok, _}, emqx_crl_cache:start_link()), + receive + {http_get, _} -> + error(should_not_make_http_request) + after 1000 -> ok + end, + ?assertEqual([], ets:tab2list(Ref)), + ok. + +t_manual_refresh(Config) -> + CRLDer = ?config(crl_der, Config), + Ref = get_crl_cache_table(), + ?assertEqual([], ets:tab2list(Ref)), + {ok, _} = emqx_crl_cache:start_link(), + URL = "http://localhost/crl.pem", + ok = snabbkaffe:start_trace(), + ?wait_async_action( + ?assertEqual(ok, emqx_crl_cache:refresh(URL)), + #{?snk_kind := crl_cache_insert}, + 5_000 + ), + ok = snabbkaffe:stop(), + ?assertEqual( + [{"crl.pem", [CRLDer]}], + ets:tab2list(Ref) + ), + ok. + +t_refresh_request_error(_Config) -> + meck:expect( + emqx_crl_cache, + http_get, + fun(_URL, _HTTPTimeout) -> + {ok, {{"HTTP/1.0", 404, 'Not Found'}, [], <<"not found">>}} + end + ), + {ok, _} = emqx_crl_cache:start_link(), + URL = "http://localhost/crl.pem", + ?check_trace( + ?wait_async_action( + ?assertEqual(ok, emqx_crl_cache:refresh(URL)), + #{?snk_kind := crl_cache_insert}, + 5_000 + ), + fun(Trace) -> + ?assertMatch( + [#{error := {bad_response, #{code := 404}}}], + ?of_kind(crl_refresh_failure, Trace) + ), + ok + end + ), + ok = snabbkaffe:stop(), + ok. + +t_refresh_invalid_response(_Config) -> + meck:expect( + emqx_crl_cache, + http_get, + fun(_URL, _HTTPTimeout) -> + {ok, {{"HTTP/1.0", 200, 'OK'}, [], <<"not a crl">>}} + end + ), + {ok, _} = emqx_crl_cache:start_link(), + URL = "http://localhost/crl.pem", + ?check_trace( + ?wait_async_action( + ?assertEqual(ok, emqx_crl_cache:refresh(URL)), + #{?snk_kind := crl_cache_insert}, + 5_000 + ), + fun(Trace) -> + ?assertMatch( + [#{crls := []}], + ?of_kind(crl_cache_insert, Trace) + ), + ok + end + ), + ok = snabbkaffe:stop(), + ok. + +t_refresh_http_error(_Config) -> + meck:expect( + emqx_crl_cache, + http_get, + fun(_URL, _HTTPTimeout) -> + {error, timeout} + end + ), + {ok, _} = emqx_crl_cache:start_link(), + URL = "http://localhost/crl.pem", + ?check_trace( + ?wait_async_action( + ?assertEqual(ok, emqx_crl_cache:refresh(URL)), + #{?snk_kind := crl_cache_insert}, + 5_000 + ), + fun(Trace) -> + ?assertMatch( + [#{error := {http_error, timeout}}], + ?of_kind(crl_refresh_failure, Trace) + ), + ok + end + ), + ok = snabbkaffe:stop(), + ok. + +t_unknown_messages(_Config) -> + {ok, Server} = emqx_crl_cache:start_link(), + gen_server:call(Server, foo), + gen_server:cast(Server, foo), + Server ! foo, + ok. + +t_evict(_Config) -> + {ok, _} = emqx_crl_cache:start_link(), + URL = "http://localhost/crl.pem", + ?wait_async_action( + ?assertEqual(ok, emqx_crl_cache:refresh(URL)), + #{?snk_kind := crl_cache_insert}, + 5_000 + ), + Ref = get_crl_cache_table(), + ?assertMatch([{"crl.pem", _}], ets:tab2list(Ref)), + {ok, {ok, _}} = ?wait_async_action( + emqx_crl_cache:evict(URL), + #{?snk_kind := crl_cache_evict} + ), + ?assertEqual([], ets:tab2list(Ref)), + ok. + +t_cache(Config) -> + DataDir = ?config(data_dir, Config), + ClientCert = filename:join(DataDir, "client.cert.pem"), + ClientKey = filename:join(DataDir, "client.key.pem"), + %% 1) At first, the cache is empty, and the CRL is fetched and + %% cached on the fly. + {ok, C0} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + {ok, _} = emqtt:connect(C0), + receive + {http_get, _} -> ok + after 500 -> + emqtt:stop(C0), + error(should_have_checked_server) + end, + emqtt:stop(C0), + %% 2) When another client using the cached CRL URL connects later, + %% it uses the cache. + {ok, C1} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + {ok, _} = emqtt:connect(C1), + receive + {http_get, _} -> + emqtt:stop(C1), + error(should_not_have_checked_server) + after 500 -> ok + end, + emqtt:stop(C1), + + ok. + +t_cache_overflow(Config) -> + %% we have capacity = 2 here. + ?check_trace( + begin + %% First and second connections goes into the cache + ?tp(first_connections, #{}), + assert_successful_connection(Config, 1), + assert_successful_connection(Config, 2), + %% These should be cached + ?tp(first_reconnections, #{}), + assert_successful_connection(Config, 1), + assert_successful_connection(Config, 2), + %% A third client connects and evicts the oldest URL (1) + ?tp(first_eviction, #{}), + assert_successful_connection(Config, 3), + assert_successful_connection(Config, 3), + %% URL (1) connects again and needs to be re-cached; this + %% time, (2) gets evicted + ?tp(second_eviction, #{}), + assert_successful_connection(Config, 1), + %% TODO: force race condition where the same URL is fetched + %% at the same time and tries to be registered + ?tp(test_end, #{}), + ok + end, + fun(Trace) -> + URL1 = "http://localhost:9878/intermediate1.crl.pem", + URL2 = "http://localhost:9878/intermediate2.crl.pem", + URL3 = "http://localhost:9878/intermediate3.crl.pem", + Kinds = [ + mqtt_client_connection, + new_crl_url_inserted, + crl_cache_ensure_timer, + crl_cache_overflow + ], + Trace1 = of_kinds( + trace_between(Trace, first_connections, first_reconnections), + Kinds + ), + ?assertMatch( + [ + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 1 + }, + #{ + ?snk_kind := new_crl_url_inserted, + url := URL1 + }, + #{ + ?snk_kind := crl_cache_ensure_timer, + url := URL1 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 1 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 2 + }, + #{ + ?snk_kind := new_crl_url_inserted, + url := URL2 + }, + #{ + ?snk_kind := crl_cache_ensure_timer, + url := URL2 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 2 + } + ], + Trace1 + ), + Trace2 = of_kinds( + trace_between(Trace, first_reconnections, first_eviction), + Kinds + ), + ?assertMatch( + [ + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 1 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 1 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 2 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 2 + } + ], + Trace2 + ), + Trace3 = of_kinds( + trace_between(Trace, first_eviction, second_eviction), + Kinds + ), + ?assertMatch( + [ + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 3 + }, + #{ + ?snk_kind := new_crl_url_inserted, + url := URL3 + }, + #{ + ?snk_kind := crl_cache_overflow, + oldest_url := URL1 + }, + #{ + ?snk_kind := crl_cache_ensure_timer, + url := URL3 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 3 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 3 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 3 + } + ], + Trace3 + ), + Trace4 = of_kinds( + trace_between(Trace, second_eviction, test_end), + Kinds + ), + ?assertMatch( + [ + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := start, + client_num := 1 + }, + #{ + ?snk_kind := new_crl_url_inserted, + url := URL1 + }, + #{ + ?snk_kind := crl_cache_overflow, + oldest_url := URL2 + }, + #{ + ?snk_kind := crl_cache_ensure_timer, + url := URL1 + }, + #{ + ?snk_kind := mqtt_client_connection, + ?snk_span := {complete, ok}, + client_num := 1 + } + ], + Trace4 + ), + ok + end + ). + +%% check that the URL in the certificate is *not* checked if the cache +%% contains that URL. +t_filled_cache(Config) -> + DataDir = ?config(data_dir, Config), + ClientCert = filename:join(DataDir, "client.cert.pem"), + ClientKey = filename:join(DataDir, "client.key.pem"), + {ok, C} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + {ok, _} = emqtt:connect(C), + receive + http_get -> + emqtt:stop(C), + error(should_have_used_cache) + after 500 -> ok + end, + emqtt:stop(C), + ok. + +%% If the CRL is not cached when the client tries to connect and the +%% CRL server is unreachable, the client will be denied connection. +t_not_cached_and_unreachable(Config) -> + DataDir = ?config(data_dir, Config), + ClientCert = filename:join(DataDir, "client.cert.pem"), + ClientKey = filename:join(DataDir, "client.key.pem"), + {ok, C} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + Ref = get_crl_cache_table(), + ?assertEqual([], ets:tab2list(Ref)), + process_flag(trap_exit, true), + ?assertMatch({error, {{shutdown, {tls_alert, {bad_certificate, _}}}, _}}, emqtt:connect(C)), + ok. + +t_revoked(Config) -> + DataDir = ?config(data_dir, Config), + ClientCert = filename:join(DataDir, "client-revoked.cert.pem"), + ClientKey = filename:join(DataDir, "client-revoked.key.pem"), + {ok, C} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + process_flag(trap_exit, true), + Res = emqtt:connect(C), + %% apparently, sometimes there's some race condition in + %% `emqtt_sock:ssl_upgrade' when it calls + %% `ssl:conetrolling_process' and a bad match happens at that + %% point. + case Res of + {error, {{shutdown, {tls_alert, {certificate_revoked, _}}}, _}} -> + ok; + {error, closed} -> + %% race condition? + ok; + _ -> + ct:fail("unexpected result: ~p", [Res]) + end, + ok. + +t_revoke_then_refresh(Config) -> + DataDir = ?config(data_dir, Config), + CRLPemRevoked = ?config(crl_pem_revoked, Config), + ClientCert = filename:join(DataDir, "client-revoked.cert.pem"), + ClientKey = filename:join(DataDir, "client-revoked.key.pem"), + {ok, C0} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + %% At first, the CRL contains no revoked entries, so the client + %% should be allowed connection. + ?assertMatch({ok, _}, emqtt:connect(C0)), + emqtt:stop(C0), + + %% Now we update the CRL on the server and wait for the cache to + %% be refreshed. + {true, {ok, _}} = + ?wait_async_action( + emqx_crl_cache_http_server:set_crl(CRLPemRevoked), + #{?snk_kind := crl_refresh_timer_done}, + 70_000 + ), + + %% The *same client* should now be denied connection. + {ok, C1} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + process_flag(trap_exit, true), + ?assertMatch( + {error, {{shutdown, {tls_alert, {certificate_revoked, _}}}, _}}, emqtt:connect(C1) + ), + ok. + +%% check that we can start with a non-crl listener and restart it with +%% the new crl config. +t_update_listener(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + do_t_update_listener(Config) + end. + +do_t_update_listener(Config) -> + DataDir = ?config(data_dir, Config), + Keyfile = filename:join([DataDir, "server.key.pem"]), + Certfile = filename:join([DataDir, "server.cert.pem"]), + Cacertfile = filename:join([DataDir, "ca-chain.cert.pem"]), + ClientCert = filename:join(DataDir, "client-revoked.cert.pem"), + ClientKey = filename:join(DataDir, "client-revoked.key.pem"), + + %% no crl at first + ListenerId = "ssl:default", + {ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId), + ?assertMatch( + #{ + <<"ssl_options">> := + #{ + <<"enable_crl_check">> := false, + <<"verify">> := <<"verify_peer">> + } + }, + ListenerData0 + ), + {ok, C0} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + %% At first, the CRL contains no revoked entries, so the client + %% should be allowed connection. + ?assertMatch({ok, _}, emqtt:connect(C0)), + emqtt:stop(C0), + + %% configure crl + CRLConfig = + #{ + <<"ssl_options">> => + #{ + <<"keyfile">> => Keyfile, + <<"certfile">> => Certfile, + <<"cacertfile">> => Cacertfile, + <<"enable_crl_check">> => true + } + }, + ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, CRLConfig), + {ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1), + ?assertMatch( + #{ + <<"ssl_options">> := + #{ + <<"enable_crl_check">> := true, + <<"verify">> := <<"verify_peer">> + } + }, + ListenerData2 + ), + + %% Now should use CRL information to block connection + process_flag(trap_exit, true), + {ok, C1} = emqtt:start_link([ + {ssl, true}, + {ssl_opts, [ + {certfile, ClientCert}, + {keyfile, ClientKey} + ]}, + {port, 8883} + ]), + ?assertMatch( + {error, {{shutdown, {tls_alert, {certificate_revoked, _}}}, _}}, emqtt:connect(C1) + ), + assert_http_get(<>), + + ok. + +t_validations(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + do_t_validations(Config) + end. + +do_t_validations(_Config) -> + ListenerId = <<"ssl:default">>, + {ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId), + + ListenerData1 = + emqx_utils_maps:deep_merge( + ListenerData0, + #{ + <<"ssl_options">> => + #{ + <<"enable_crl_check">> => true, + <<"verify">> => <<"verify_none">> + } + } + ), + {error, {_, _, ResRaw1}} = update_listener_via_api(ListenerId, ListenerData1), + #{<<"code">> := <<"BAD_REQUEST">>, <<"message">> := MsgRaw1} = + emqx_utils_json:decode(ResRaw1, [return_maps]), + ?assertMatch( + #{ + <<"mismatches">> := + #{ + <<"listeners:ssl_not_required_bind">> := + #{ + <<"reason">> := + <<"verify must be verify_peer when CRL check is enabled">> + } + } + }, + emqx_utils_json:decode(MsgRaw1, [return_maps]) + ), + + ok. diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/ca-chain.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/ca-chain.cert.pem new file mode 100644 index 000000000..eaabd2445 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/ca-chain.cert.pem @@ -0,0 +1,68 @@ +-----BEGIN CERTIFICATE----- +MIIF+zCCA+OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQK +DAlNeU9yZ05hbWUxETAPBgNVBAsMCE15Um9vdENBMREwDwYDVQQDDAhNeVJvb3RD +QTAeFw0yMzAxMTIxMzA4MTZaFw0zMzAxMDkxMzA4MTZaMGsxCzAJBgNVBAYTAlNF +MRIwEAYDVQQIDAlTdG9ja2hvbG0xEjAQBgNVBAoMCU15T3JnTmFtZTEZMBcGA1UE +CwwQTXlJbnRlcm1lZGlhdGVDQTEZMBcGA1UEAwwQTXlJbnRlcm1lZGlhdGVDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALQG7dMeU/y9HDNHzhydR0bm +wN9UGplqJOJPwqJRaZZcrn9umgJ9SU2il2ceEVxMDwzBWCRKJO5/H9A9k13SqsXM +2c2c9xXfIF1kb820lCm1Uow5hZ/auDjxliNk9kNJDigCRi3QoIs/dVeWzFsgEC2l +gxRqauN2eNFb6/yXY788YALHBsCRV2NFOFXxtPsvLXpD9Q/8EqYsSMuLARRdHVNU +ryaEF5lhShpcuz0TlIuTy2TiuXJUtJ+p7a4Z7friZ6JsrmQWsVQBj44F8TJRHWzW +C7vm9c+dzEX9eqbr5iPL+L4ctMW9Lz6ePcYfIXne6CElusRUf8G+xM1uwovF9bpV ++9IqY7tAu9G1iY9iNtJgNNDKOCcOGKcZCx6Cg1XYOEKReNnUMazvYeqRrrjV5WQ0 +vOcD5zcBRNTXCddCLa7U0guXP9mQrfuk4NTH1Bt77JieTJ8cfDXHwtaKf6aGbmZP +wl1Xi/GuXNUP/xeog78RKyFwBmjt2JKwvWzMpfmH4mEkG9moh2alva+aEz6LIJuP +16g6s0Q6c793/OvUtpNcewHw4Vjn39LD9o6VLp854G4n8dVpUWSbWS+sXD1ZE69H +g/sMNMyq+09ufkbewY8xoCm/rQ1pqDZAVMWsstJEaYu7b/eb7R+RGOj1YECCV/Yp +EZPdDotbSNRkIi2d/a1NAgMBAAGjgaQwgaEwHQYDVR0OBBYEFExwhjsVUom6tQ+S +qq6xMUETvnPzMB8GA1UdIwQYMBaAFD90kfU5pc5l48THu0Ayj9SNpHuhMBIGA1Ud +EwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMDsGA1UdHwQ0MDIwMKAuoCyG +Kmh0dHA6Ly9sb2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUuY3JsLnBlbTANBgkq +hkiG9w0BAQsFAAOCAgEAK6NgdWQYtPNKQNBGjsgtgqTRh+k30iqSO6Y3yE1KGABO +EuQdVqkC2qUIbCB0M0qoV0ab50KNLfU6cbshggW4LDpcMpoQpI05fukNh1jm3ZuZ +0xsB7vlmlsv00tpqmfIl/zykPDynHKOmFh/hJP/KetMy4+wDv4/+xP31UdEj5XvG +HvMtuqOS23A+H6WPU7ol7KzKBnU2zz/xekvPbUD3JqV+ynP5bgbIZHAndd0o9T8e +NFX23Us4cTenU2/ZlOq694bRzGaK+n3Ksz995Nbtzv5fbUgqmf7Mcq4iHGRVtV11 +MRyBrsXZp2vbF63c4hrf2Zd6SWRoaDKRhP2DMhajpH9zZASSTlfejg/ZRO2s+Clh +YrSTkeMAdnRt6i/q4QRcOTCfsX75RFM5v67njvTXsSaSTnAwaPi78tRtf+WSh0EP +VVPzy++BszBVlJ1VAf7soWZHCjZxZ8ZPqVTy5okoHwWQ09WmYe8GfulDh1oj0wbK +3FjN7bODWHJN+bFf5aQfK+tumYKoPG8RXL6QxpEzjFWjxhIMJHHMKfDWnAV1o1+7 +/1/aDzq7MzEYBbrgQR7oE5ZHtyqhCf9LUgw0Kr7/8QWuNAdeDCJzjXRROU0hJczp +dOyfRlLbHmLLmGOnROlx6LsGNQ17zuz6SPi7ei8/ylhykawDOAGkM1+xFakmQhM= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUYjc7hD7/UJ0/VPADfNfp/WpOwRowDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCU0UxEjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJ +U3RvY2tob2xtMRIwEAYDVQQKDAlNeU9yZ05hbWUxETAPBgNVBAsMCE15Um9vdENB +MREwDwYDVQQDDAhNeVJvb3RDQTAeFw0yMzAxMTIxMzA4MTRaFw00MzAxMDcxMzA4 +MTRaMG8xCzAJBgNVBAYTAlNFMRIwEAYDVQQIDAlTdG9ja2hvbG0xEjAQBgNVBAcM +CVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMREwDwYDVQQLDAhNeVJvb3RD +QTERMA8GA1UEAwwITXlSb290Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnBwSOYVJw47IoMHMXTVDtOYvUt3rqsurEhFcB4O8xmf2mmwr6m7s8A5Ft +AvAehg1GvnXT3t/KiyU7BK+acTwcErGyZwS2wvdB0lpHWSpOn/u5y+4ZETvQefcj +ZTdDOM9VN5nutpitgNb+1yL8sqSexfVbY7DnYYvFjOVBYoP/SGvM9jVjCad+0WL3 +FhuD+L8QAxzCieX3n9UMymlFwINQuEc+TDjuNcEqt+0J5EgS1fwzxb2RCVL0TNv4 +9a71hFGCNRj20AeZm99hbdufm7+0AFO7ocV5q43rLrWFUoBzqKPYIjga/cv/UdWZ +c5RLRXw3JDSrCqkf/mOlaEhNPlmWRF9MSus5Da3wuwgGCaVzmrf30rWR5aHHcscG +e+AOgJ4HayvBUQeb6ZlRXc0YlACiLToMKxuyxDyUcDfVEXpUIsDILF8dkiVQxEU3 +j9g6qjXiqPVdNiwpqXfBKObj8vNCzORnoHYs8cCgib3RgDVWeqkDmlSwlZE7CvQh +U4Loj4l7813xxzYEKkVaT1JdXPWu42CG/b4Y/+f4V+3rkJkYzUwndX6kZNksIBai +phmtvKt+CTdP1eAbT+C9AWWF3PT31+BIhuT0u9tR8BVSkXdQB8dG4M/AAJcTo640 +0mdYYOXT153gEKHJuUBm750ZTy+r6NjNvpw8VrMAakJwHqnIdQIDAQABo2MwYTAd +BgNVHQ4EFgQUP3SR9TmlzmXjxMe7QDKP1I2ke6EwHwYDVR0jBBgwFoAUP3SR9Tml +zmXjxMe7QDKP1I2ke6EwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAFMFv4C+I0+xOAb9v6G/IOpfPBZ1ez31EXKJJBra +lulP4nRHQMeb310JS8BIeQ3dl+7+PkSxPABZSwc3jkxdSMvhc+Z4MQtTgos+Qsjs +gH7sTqwWeeQ0lHYxWmkXijrh5OPRZwTKzYQlkcn85BCUXl2KDuNEdiqPbDTao+lc +lA0/UAvC6NCyFKq/jqf4CmW5Kx6yG1v1LaE+IXn7cbIXj+DaehocVXi0wsXqj03Q +DDUHuLHZP+LBsg4e91/0Jy2ekNRTYJifSqr+9ufHl0ZX1pFDZyf396IgZ5CQZ0PJ +nRxZHlCfsxWxmxxdy3FQSE6YwXhdTjjoAa1ApZcKkkt1beJa6/oRLze/ux5x+5q+ +4QczufHd6rjoKBi6BM3FgFQ8As5iNohHXlMHd/xITo1Go3CWw2j9TGH5vzksOElK +B0mcwwt2zwNEjvfytc+tI5jcfGN3tiT5fVHS8hw9dWKevypLL+55Ua9G8ZgDHasT +XFRJHgmnbyFcaAe26D2dSKmhC9u2mHBH+MaI8dj3e7wNBfpxNgp41aFIk+QTmiFW +VXFED6DHQ/Mxq93ACalHdYg18PlIYClbT6Pf2xXBnn33YPhn5xzoTZ+cDH/RpaQp +s0UUTSJT1UTXgtXPnZWQfvKlMjJEIiVFiLEC0sgZRlWuZDRAY0CdZJJxvQp59lqu +cbTm +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client-no-dist-points.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-no-dist-points.cert.pem new file mode 100644 index 000000000..038eec790 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-no-dist-points.cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFdTCCA12gAwIBAgICEAUwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDExODEyMzY1NloXDTMzMDQyNTEyMzY1NlowgYQxCzAJBgNVBAYTAlNFMRIw +EAYDVQQIDAlTdG9ja2hvbG0xEjAQBgNVBAcMCVN0b2NraG9sbTESMBAGA1UECgwJ +TXlPcmdOYW1lMRkwFwYDVQQLDBBNeUludGVybWVkaWF0ZUNBMR4wHAYDVQQDDBVj +bGllbnQtbm8tZGlzdC1wb2ludHMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCYQqNF7o20tEwyXphDgtwkZ628baYzQoCmmaufR+5SPQWdTN+GFeApv0dP +4y/ncZV24rgButMo73e4+wPsILwSGhaVIU0mMaCmexyC4W6INBkQsVB5FAd/YM0O +gdxS6A42h9HZTaAJ+4ftgFdOOHiP3lwicXeIYykAE7Y5ikxlnHgi8p1PTLowN4Q+ +AjuXChRzmU16cUEAevZKkTVf7VCcK66aJsxBsxfykkGHhc6qLqmlMt6Te6DPCi/R +KP/kARTDWNEkp6qtpvzByYFYAKPSZxPuryajAC3RLuGNkVSB+PZ6NnZW6ASeTdra +Lwuiwsi5XPBeFb0147naQOBzSGG/AgMBAAGjggEHMIIBAzAJBgNVHRMEAjAAMBEG +CWCGSAGG+EIBAQQEAwIFoDBBBglghkgBhvhCAQ0ENBYyT3BlblNTTCBHZW5lcmF0 +ZWQgQ2xpZW50IENlcnRpZmljYXRlIChubyBDUkwgaW5mbykwHQYDVR0OBBYEFBiV +sjDe46MixvftT/wej1mxGuN7MB8GA1UdIwQYMBaAFExwhjsVUom6tQ+Sqq6xMUET +vnPzMA4GA1UdDwEB/wQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwMQYIKwYBBQUHAQEEJTAjMCEGCCsGAQUFBzABhhVodHRwOi8vbG9jYWxob3N0 +Ojk4NzcwDQYJKoZIhvcNAQELBQADggIBAKBEnKYVLFtZb3MI0oMJkrWBssVCq5ja +OYomZ61I13QLEeyPevTSWAcWFQ4zQDF/SWBsXjsrC+JIEjx2xac6XCpxcx3jDUgo +46u/hx2rT8tMKa60hW0V1Dk6w8ZHiCe94BlFLsWFKnn6dVzoJd2u3vgUaleh3uxF +hug8XY+wmHd36rO0kVe3DrsqdIdOfhMiJLDxU0cBA79vI5kCvqB8DIwCWtOzkA82 +EPl3Iws5NPhuFAR9u0xOQu0akzmSJFcEGLZ4qfatHD/tZGRduyFvMKy5iIeMzuEs +2etm01tfLHqgKGOKp5LjPm7Aoac/GeVoTvctGF+wayvOuYE7inlGZToz3kQMMzHZ +ZGBBgOhXbR2y74QoFv6DUqmmTRbGfiLYyErA5r881ntgciQi02xrGjoAFntvKb+H +HNB22Qprz16OmdC9dJKF2RhO6Cketdhv65wFWw6xlhRMCWYPY3CI8tWkxS4A4yit +RZQZg3yaeHXMaCAu5HxuqAQXKGjz+7w7N6diwbT7o7CfKk8iHUrGfkQ5nCS0GZ1r +lU1vgKtdzVvJ6HmBrCRcdNqh/L/wdIltwI/52j+TKRtELM1qHuLAYmhcRBW+2wuH +ewaNA9KEgEk6JC+iR8uOBi0ZLkMIm47j+ZLJRJVUfgkVEEFjyiYSFfpwwcgT+/Aw +EczVZOdUEbDM +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client-no-dist-points.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-no-dist-points.key.pem new file mode 100644 index 000000000..02b865f5e --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-no-dist-points.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCYQqNF7o20tEwy +XphDgtwkZ628baYzQoCmmaufR+5SPQWdTN+GFeApv0dP4y/ncZV24rgButMo73e4 ++wPsILwSGhaVIU0mMaCmexyC4W6INBkQsVB5FAd/YM0OgdxS6A42h9HZTaAJ+4ft +gFdOOHiP3lwicXeIYykAE7Y5ikxlnHgi8p1PTLowN4Q+AjuXChRzmU16cUEAevZK +kTVf7VCcK66aJsxBsxfykkGHhc6qLqmlMt6Te6DPCi/RKP/kARTDWNEkp6qtpvzB +yYFYAKPSZxPuryajAC3RLuGNkVSB+PZ6NnZW6ASeTdraLwuiwsi5XPBeFb0147na +QOBzSGG/AgMBAAECggEACSMuozq+vFJ5pCgzIRIQXgruzTkTWU4rZFQijYuGjN7m +oFsFqwlTC45UHEI5FL2nR5wxiMEKfRFp8Or3gEsyni98nXSDKcCesH8A5gXbWUcv +HeZWOv3tuUI47B709vDAMZuTB2R2L0MuFB24n5QaACBLDTIcB05UHpIQRIG9NffH +MhxqFB2kuakp67VekYGZkBCNkqfL3VQZIGRpQC8SvpnRXELqZgI4MyJgvkK6myWj +Vtpwm8YiOQoJHJx4raoVfS2NWTsCwL0M0aXMMtmM2QfMP/xB9OifxnmDDBs7Tie8 +0Wri845xLTCYthaU8B06rhoQdKXoqKmQMoF2doPm8QKBgQDN+0E0PtPkyxIho8pV +CsQnmif91EQQqWxOdkHbE96lT0UKu6ziBSbB4ClRHYil5c8p7INxRpj7pruOY3Kw +MAcacIMMBNhLBJL4R0hr/pwr18WOZxCIMcLHTaCfbVqL71TKp4/6C+GexZfaYJ46 +IZEpLU5RPmD4f9MPIDDm6KcPxwKBgQC9O9TOor93g+A4sU54CGOqvVDrdi5TnGF8 +YdimvUsT20gl2WGX5vq3OohzZi7U8FuxKHWpbgh2efqGLcFsRNFZ/T0ZXX4DDafN +Gzyu/DMVuFO4ccgFJNnl45w3/yFG40kL6yS8kss/iEYu550/uOZ1FjH+kJ0vjV6G +JD8q0PgOSQKBgG2i9cLcSia2nBEBwFlhoKS/ndeyWwRPWZGtykHUoqZ0ufgLiurG ++SkqqnM9eBVta8YR2Ki7fgQ8bApPDqWO+sjs6CPGlGXhqmSydG7fF7sSX1n7q8YC +Tn2M6RjSuOZQ3l37sFvUZSQAYmJfGPkyErTLI6uEu1KpnuqnJMBTR1DTAoGAIGQn +bx9oirqmHM4s0lsNRGKXgVZ/Y4x3G2VcQl5QhZuZY/ErxWaiL87zIF2zUnu6Fj8I +tPHCvRTwDxux6ih1dWPlm3vnX/psaK1q28ELtYIRwpanWEoQiktFqEghmBK7pDCh +3y15YOygptK6lfe+avhboml6nnMiZO+7aEbQzxECgYALuUM4fo1dQYmYuZIqZoFJ +TXGyzMkNGs61SMiD6mW6XgXj5h5T8Q0MdpmHkwsm+z9A/1of5cxkE6d8HCCz+dt5 +tnY7OC0gYB1+gDld8MZgFgP6k0qklreLVhzEz11TbMldifa1EE4VjUDG/NeAEtbq +GbLaw0NhGJtRCgL9Bc7i7g== +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client-revoked.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-revoked.cert.pem new file mode 100644 index 000000000..d0a23bf2f --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-revoked.cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDExMjEzMDgxNloXDTMzMDQxOTEzMDgxNlowfTELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExFzAVBgNVBAMMDmNs +aWVudC1yZXZva2VkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs+R6 +PDtIxVlUoLYbDBbaVcxgoLjnWcvqL8wSqyWuqi/Y3cjuNYCziR9nR5dWajtkBjzJ +HyhgAr6gBVSRt4RRmDXoOcprK3GcpowAr65UAmC4hdH0af6FdKjKCnFw67byUg52 +f7ueXZ6t/XuuKxlU/f2rjXVwmmnlhBi5EHDkXxvfgWXJekDfsPbW9j0kaCUWCpfj +rzGbfkXqrPkslO41PYlCbPxoiRItJjindFjcQySYvRq7A2uYMGsrxv4n3rzo5NGt +goBmnGj61ii9WOdopcFxKirhIB9zrxC4x0opRfIaF/n1ZXk6NOnaDxu1LTZ18wfC +ZB979ge6pleeKoPf7QIDAQABo4IBNjCCATIwCQYDVR0TBAIwADARBglghkgBhvhC +AQEEBAMCBaAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVkIENsaWVu +dCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUQeItXr3nc6CZ++G9UCoq1YlQ9oowHwYD +VR0jBBgwFoAUTHCGOxVSibq1D5KqrrExQRO+c/MwDgYDVR0PAQH/BAQDAgXgMB0G +A1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDA7BgNVHR8ENDAyMDCgLqAshipo +dHRwOi8vbG9jYWxob3N0Ojk4NzgvaW50ZXJtZWRpYXRlLmNybC5wZW0wMQYIKwYB +BQUHAQEEJTAjMCEGCCsGAQUFBzABhhVodHRwOi8vbG9jYWxob3N0Ojk4NzcwDQYJ +KoZIhvcNAQELBQADggIBAIFuhokODd54/1B2JiNyG6FMq/2z8B+UquC2iw3p2pyM +g/Jz4Ouvg6gGwUwmykEua06FRCxx5vJ5ahdhXvKst/zH/0qmYTFNMhNsDy76J/Ot +Ss+VwQ8ddpEG3EIUI9BQxB3xL7z7kRQzploQjakNcDWtDt1BmN05Iy2vz4lnYJky +Kss6ya9jEkNibHekhxJuchJ0fVGlVe74MO7RNDFG7+O3tMlxu0zH/LpW093V7BI2 +snXNAwQBizvWTrDKWLDu5JsX8KKkrmDtFTs9gegnxDCOYdtG5GbbMq+H1SjWUJPV +wiXTF8/eE02s4Jzm7ZAxre4bRt/hAg7xTGmDQ1Hn+LzLn18I9LaW5ZWqSwwpgv+g +Z/jiLO9DJ/y525Cl7DLCpSFoDTWlQXouKhcgALcVay/cXCsZ3oFZCustburLiJi/ +zgBeEk1gVpwljriJLeZifyfWtJx6yfgB/h6fid8XLsGRD+Yc8Tzs8J1LIgi+j4ZT +UzKX3B85Kht/dr43UDMtWOF3edkOMaJu7rcg5tTsK+LIyHtXvebKPVvvA9f27Dz/ +4gmhAwwqS87Xv3FMVhZ03DNOJ6XAF+T6OTEqwYs+iK56IMSl1Jy+bCzo0j5jZVbl +XFwGxUHzM7pfM6PDx657oUxG1QwM/fIWA18F+kY/yigXxq6pYMeAiQsPanOThgHp +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client-revoked.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-revoked.key.pem new file mode 100644 index 000000000..0b7698da9 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client-revoked.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz5Ho8O0jFWVSg +thsMFtpVzGCguOdZy+ovzBKrJa6qL9jdyO41gLOJH2dHl1ZqO2QGPMkfKGACvqAF +VJG3hFGYNeg5ymsrcZymjACvrlQCYLiF0fRp/oV0qMoKcXDrtvJSDnZ/u55dnq39 +e64rGVT9/auNdXCaaeWEGLkQcORfG9+BZcl6QN+w9tb2PSRoJRYKl+OvMZt+Reqs ++SyU7jU9iUJs/GiJEi0mOKd0WNxDJJi9GrsDa5gwayvG/ifevOjk0a2CgGacaPrW +KL1Y52ilwXEqKuEgH3OvELjHSilF8hoX+fVleTo06doPG7UtNnXzB8JkH3v2B7qm +V54qg9/tAgMBAAECggEAAml+HRgjZ+gEezot3yngSBW7NvR7v6e9DmKDXpGdB7Go +DANBdGyzG5PU9/AGy9pbgzzl6nnJXcgOD7w8TvRifrK8WCgHa1f05IPMj458GGMR +HlQ8HX647eFEgkLWo4Z6tdB1VM2geDtkNFmn8nJ+wgAYgIdSWPOyDOUi+B43ZbIN +eaLWkP2fiX9tcJp41cytW+ng2YIm4s90Nt4FJPNBNzOrhVm35jciId02MmEjCEnr +0YbK9uoMDC2YLg8vhRcjtsUHV2rREkwEAQj8nCWvWWheIwk943d6OicGAD/yebpV +PTjtlZlpIbrovfvuMcoTxJg3WS8LTg/+cNWAX5a3eQKBgQDcRY7nVSJusYyN0Bij +YWc9H47wU+YucaGT25xKe26w1pl6s4fmr1Sc3NcaN2iyUv4BuAvaQzymHe4g9deU +D9Ws/NCQ9EjHJJsklNyn2KCgkSp7oPKhPwyl64XfPdV2gr5AD6MILf7Rkyib5sSf +1WK8i25KatT7M4mCtrBVJYHNpQKBgQDREjwPIaQBPXouVpnHhSwRHfKD0B1a2koq +4VE6Fnf3ogkiGfV9kqXwIfPHL0tfotFraM3FFmld8RcxhKUPr4oj+K9KTxmMD9lm +9Hal0ANXYmHs5a1iHyoNmTpBGHALWLT9fCoeg+EIYabi2+P1c7cDIdUPkEzo4GmI +nCIpv7hGqQKBgEFUC+8GK+EinWoN1tDV+ZWCP5V9fJ43q1E7592bQBgIfZqLlnnP +dEvVn6Ix3sZMoPMHj9Ra7qjh5Zc28ooCLEBS9tSW7uLJM44k7FCHihQ1GaFy+aLj +HTA0aw7rutycKCq9uH+bjKDBgWDDj3tMAS2kOMCvcJ1UCquO3TtTlWzVAoGBAIDN +8yJ/X0NEVNnnkKZTbWq+QILk3LD0e20fk6Nt5Es0ENxpkczjZEglIsM8Z/trnAnI +b71UqWWu+tMPHYIka77tn1DwmpSnzxCW2+Ib3XMgsaP5fHBPMuFd3X3tSFo1NIxW +yrwyE5nOT7rELhUyTTYoydLk2/09BMedKY7/BtDBAoGAXeX1pX74K1i/uWyYKwYZ +sskRueSo9whDJuZWgNiUovArr57eA+oA+bKdFpiE419348bkFF8jNoGFQ6MXMedD +LqHAYIj+ZPIC4+rObHqO5EaIyblgutwx3citkQp7HXDBxojnOKA9mKQXj1vxCaL1 +/1fFNJQCzEqwnKwnhI2MJ28= +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client.cert.pem new file mode 100644 index 000000000..b37d1b0ba --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client.cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFljCCA36gAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDExMjEzMDgxNloXDTMzMDQxOTEzMDgxNlowdzELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExETAPBgNVBAMMCE15 +Q2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvGuAShewEo8V +/+aWVO/MuUt92m8K0Ut4nC2gOvpjMjf8mhSSf6KfnxPklsFwP4fdyPOjOiXwCsf3 +1QO5fjVr8to3iGTHhEyZpzRcRqmw1eYJC7iDh3BqtYLAT30R+Kq6Mk+f4tXB5Lp/ +2jXgdi0wshWagCPgJO3CtiwGyE8XSa+Q6EBYwzgh3NFbgYdJma4x+S86Y/5WfmXP +zF//UipsFp4gFUqwGuj6kJrN9NnA1xCiuOxCyN4JuFNMfM/tkeh26jAp0OHhJGsT +s3YiUm9Dpt7Rs7o0so9ov9K+hgDFuQw9HZW3WIJI99M5a9QZ4ZEQqKpABtYBl/Nb +VPXcr+T3fQIDAQABo4IBNjCCATIwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMC +BaAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVkIENsaWVudCBDZXJ0 +aWZpY2F0ZTAdBgNVHQ4EFgQUOIChBA5aZB0dPWEtALfMIfSopIIwHwYDVR0jBBgw +FoAUTHCGOxVSibq1D5KqrrExQRO+c/MwDgYDVR0PAQH/BAQDAgXgMB0GA1UdJQQW +MBQGCCsGAQUFBwMCBggrBgEFBQcDBDA7BgNVHR8ENDAyMDCgLqAshipodHRwOi8v +bG9jYWxob3N0Ojk4NzgvaW50ZXJtZWRpYXRlLmNybC5wZW0wMQYIKwYBBQUHAQEE +JTAjMCEGCCsGAQUFBzABhhVodHRwOi8vbG9jYWxob3N0Ojk4NzcwDQYJKoZIhvcN +AQELBQADggIBAE0qTL5WIWcxRPU9oTrzJ+oxMTp1JZ7oQdS+ZekLkQ8mP7T6C/Ew +6YftjvkopnHUvn842+PTRXSoEtlFiTccmA60eMAai2tn5asxWBsLIRC9FH3LzOgV +/jgyY7HXuh8XyDBCDD+Sj9QityO+accTHijYAbHPAVBwmZU8nO5D/HsxLjRrCfQf +qf4OQpX3l1ryOi19lqoRXRGwcoZ95dqq3YgTMlLiEqmerQZSR6iSPELw3bcwnAV1 +hoYYzeKps3xhwszCTz2+WaSsUO2sQlcFEsZ9oHex/02UiM4a8W6hGFJl5eojErxH +7MqaSyhwwyX6yt8c75RlNcUThv+4+TLkUTbTnWgC9sFjYfd5KSfAdIMp3jYzw3zw +XEMTX5FaLaOCAfUDttPzn+oNezWZ2UyFTQXQE2CazpRdJoDd04qVg9WLpQxLYRP7 +xSFEHulOPccdAYF2C45yNtJAZyWKfGaAZIxrgEXbMkcdDMlYphpRwpjS8SIBNZ31 +KFE8BczKrg2qO0ywIjanPaRgrFVmeSvBKeU/YLQVx6fZMgOk6vtidLGZLyDXy0Ff +yaZSoj+on++RDz1IXb96Y8scuNlfcYI8QeoNjwiLtf80BV8SRJiG4e/jTvMf/z9L +kWrnDWvx4xkUmxFg4TK42dkNp7sEYBTlVVq9fjKE92ha7FGZRqsxOLNQ +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client.key.pem new file mode 100644 index 000000000..2e767d81f --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC8a4BKF7ASjxX/ +5pZU78y5S33abwrRS3icLaA6+mMyN/yaFJJ/op+fE+SWwXA/h93I86M6JfAKx/fV +A7l+NWvy2jeIZMeETJmnNFxGqbDV5gkLuIOHcGq1gsBPfRH4qroyT5/i1cHkun/a +NeB2LTCyFZqAI+Ak7cK2LAbITxdJr5DoQFjDOCHc0VuBh0mZrjH5Lzpj/lZ+Zc/M +X/9SKmwWniAVSrAa6PqQms302cDXEKK47ELI3gm4U0x8z+2R6HbqMCnQ4eEkaxOz +diJSb0Om3tGzujSyj2i/0r6GAMW5DD0dlbdYgkj30zlr1BnhkRCoqkAG1gGX81tU +9dyv5Pd9AgMBAAECggEAAifx6dZKIeNkQ8OaNp5V2IKIPSqBOV4/h/xKMkUZXisV +eDmTCf8du0PR7hfLqrt9xYsGDv+6FQ1/8K231l8qR0tP/6CTl/0ynM4qqEAGeFXN +3h2LvM4liFbdjImechrcwcnVaNKg/DogT5zHUYSMtB/rokaG0VBO3IX/+SGz0aXi +LOLAx6SPaLOVX9GYUCiigTSEDwaQA+F3F6J2fR4u8PrXo+OQUqxjQ/fGXWp+4IfA +6djlpvzO2849/WPB1tL20iLXJlL2OL0UgQNtbKWTjexMe+wgCR5BzCwTyPsQvMwX +YOQrTOwgF3b6O+gLks5wSRT0ivq1sKgzA534+X4M+wKBgQDirPTLlrYobOO8KUpV +LOJU8x9leiRNU9CZWrW/mOw/BXGXikqNWvgL595vvADsjYciuRxSqEE7lClB8Pp9 +20TMlES9orx7gdoQJCodpNV1BuBJhE9YtUiXzWAj+7m3D9LsXM1ewW/2A7Vvopj3 +4zKY7uHAFlo3nXwLOfChG5/i9wKBgQDUy5fPFa58xmn7Elb6x4vmUDHg6P4pf75E +XHRQvNA8I7DTrpqfcsF1N4WuJ3Lm//RSpw7bnyqP20GoEfGHu/iCUPf29B7CuXhO +vvD+I8uPdn8EcKUBWV+V0xNQN/gCe0TzrEjAkZcO2Lq0j93R8HVl3BbowxgRvQV9 +GmxQG/boKwKBgFeV8uSzsGEAaiKrZbBxrmaappgEUQCcES8gULfes/JJ/TFL2zCx +ZMTc7CMKZuUAbqXpFtuNbd9CiYqUPYXh8ryF0eXgeqnSa9ruzmMz7NLSPFnLyQkC +yzD0x2BABOuKLrrrxOMHJWbO2g1vq2GlJUjYjNw3BtcUf/iqg6MM1IPTAoGAWYWJ +SSqS7JVAcsrFYt1eIrdsNHVwr565OeM3X9v/Mr3FH1jeXeQWNSz1hU29Ticx7y+u +1YBBlKGmHoHl/bd7lb9ggjkzU7JZRa+YjSIb+i/cwc5t7IJf7xUMk/vnz4tyd5zs +Qm89gJZ2/Y1kwXSKvx53WNbyokvGKlpaZN1O418CgYACliGux77pe4bWeXSFFd9N +50ipxDLVghw1c5AiZn25GR5YHJZaV4R0wmFcHdZvogLKi0jDMPvU69PaiT8eX/A1 +COkxv7jY1vtKlEtb+gugMjMN8wvb2va4kyFamjqnleiZlBSqIF/Y17wBoMvaWgZ0 +bEPCN//ts5hBwgb1TwGrrg== +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client1.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client1.cert.pem new file mode 100644 index 000000000..4e41c15bb --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client1.cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFljCCA36gAwIBAgICEAowDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDMxNjIwMjAzMloXDTMzMDYyMTIwMjAzMlowdjELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExEDAOBgNVBAMMB0Ns +aWVudDEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDcDhlEvUIYc9uA +ocOBXt5thKrovs+8V0Eus/WrHMTKBk0Kw4X+7HBaRBoZj2sZpYfN63lVaO75kW4I +uJuorGj5PAXYWJj+4uAsCc95xAN/liCuHJnxE5togWVt8W+z0Zll98RIpiCohqiE +FLDL4X6FREL07GLgQZ/BFORvAwU+Gog05AFh43iZDnJl8MmrG2HBSRXtSZ6vQj9A +NrOSqz5eK4YIHEEsgwTWQmhtNwu3Y+GzrAPWCA4TeYrSRwIrnGh20fOWXkAMldS4 +eRXmBztEsXMGqbe6oYO1QPYOlmoGO8EaaDPJ2sFIuM0zn98Alq3kCnRhM5Bi9RpJ +7IpudIopAgMBAAGjggE3MIIBMzAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF +oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp +ZmljYXRlMB0GA1UdDgQWBBQoIuXq3wG6JEzAEj9wPe7am0OVgjAfBgNVHSMEGDAW +gBRMcIY7FVKJurUPkqqusTFBE75z8zAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw +FAYIKwYBBQUHAwIGCCsGAQUFBwMEMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9s +b2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUxLmNybC5wZW0wMQYIKwYBBQUHAQEE +JTAjMCEGCCsGAQUFBzABhhVodHRwOi8vbG9jYWxob3N0Ojk4NzcwDQYJKoZIhvcN +AQELBQADggIBAHqKYcwkm3ODPD7Mqxq3bsswSXregWfc8tqfIBc5FZg2F+IzhxcJ +kINB0lmcNdLALK6ka0sDs1Nrj1KB96NcHUqE+WY/qPS1Yksr34yFatb1ddlKQ9HK +VRrIsi0ZfjBpHpvoQ0GsLeyRKm7iN/Fm5H9u8rw6RBu0Oe/l20FVSQIDzldYw51L +uV/E9No8ZhdQ2Dffujs8madI7b7I1NMXS+Z1pZ+gYrz6O60tDEprE+rYuYWypURr +fK+DnLLl+KQ+eekTPynw7LRpFzI/1cOMmd4BRnsBHCbCObfNp7WPasemZOEXGIlZ +CQwZS62DYOJE4u4Nz5pSF+JgXfr6X/Im6Y1SV900xVHfoL0GpFDI9k+0Y5ncHfSH ++V9HlRWB3zqQF+yla32XOpBbER0vFDH52gp8/o1ZGg7rr6KrP4QKxnqywNLiAPDX +txaAykZhON7uG8j+Lbjx5Ik91NRn9Fd5NH/vtT33a4uig2TP9EWd7EPcD2z8ONuD +yiK3S37XAnmSKKX4HcCpEb+LedtqQo/+sqWyWXkpKdpkUSozvcYS4J/ob3z9N2IE +qIH5I+Mty1I4EB4W89Pem8DHNq86Lt0Ea6TBtPTV8NwR5aG2vvLzb5lNdpANXYcp +nGr57mTWaHnQh+yqgy66J++k+WokWkAkwE989AvUfNoQ+Jr6cTH8nKo2 +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client1.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client1.key.pem new file mode 100644 index 000000000..b355a3814 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client1.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDcDhlEvUIYc9uA +ocOBXt5thKrovs+8V0Eus/WrHMTKBk0Kw4X+7HBaRBoZj2sZpYfN63lVaO75kW4I +uJuorGj5PAXYWJj+4uAsCc95xAN/liCuHJnxE5togWVt8W+z0Zll98RIpiCohqiE +FLDL4X6FREL07GLgQZ/BFORvAwU+Gog05AFh43iZDnJl8MmrG2HBSRXtSZ6vQj9A +NrOSqz5eK4YIHEEsgwTWQmhtNwu3Y+GzrAPWCA4TeYrSRwIrnGh20fOWXkAMldS4 +eRXmBztEsXMGqbe6oYO1QPYOlmoGO8EaaDPJ2sFIuM0zn98Alq3kCnRhM5Bi9RpJ +7IpudIopAgMBAAECggEARcly2gnrXDXh9vlWN0EO6UyZpxZcay6AzX7k+k81WZyF +8lPvutjhCL9wR4rkPE3ys6tp31xX7W3hp4JkWynSYLhYYjQ20R7CWTUDR2qScXP7 +CTyo1XuSXaIruKJI+o4OR/g7l46X7NpHtxuYtg/dQAZV9bbB5LzrHSCzEUGz9+17 +jV//cBgLBiMdlbdLuQoGt4NQpBkNrauBVFq7Nq648uKkICmUo3Bzn/dfn3ehB+Zc ++580S+tawYd224j19tFQmd5oK8tfjqKuHenNGjp/gsRoY86N7qAtc3VIQ0yjE6ez +tgREo/ftCb8kGfwRJOAQIeeDamBv+FWNT6QzcOtbwQKBgQDzWhY9BUgI8JVzjYg0 +oWfU90On81BtckKsEo//8MTlgwOD2PnUF0hTZF3RcSPYT+HybouTqHT8EOLBAzqy +1+koH06MnAc/Y2ipaAe2fGaVH3SuXAsV/b8VcWWl4Qx7tYJDhE7sKmdl3/+jHZ7A +hZQzgOQnxxCANBo3pwF9KboDbwKBgQDnfglSpgYdGzFpWp1hZnPl2RKIfX/4M2z2 +s+hVN1tp+1VySPrBRUC3J6hKPQUzzeUzPICclHOnO+kP7jAos/rlJ9VcNdAQTbTL +7Ds9Em1KJTBphE038YbW3e93rydQpukXh50wRD9RI/3F3A/1rKhab92DXZZr6Wqu +JouhNV8f5wKBgQCLQ3XQi/Iyc4QDse5NuETUgoCsX7kaOTZghOr1nFMByV08mfI2 +5vAUES8DigzqYKS8eXjVEqWIDx3FOVThPmCG/ouUOkKHixs9P3SSgVSvaGX81l3d +wu4UlmWGbWkYbsJSYyhLTOUJTwxby7qrEIbEhrGK9gfCZo7OZHucpkF2bwKBgFhl +1qWK5JbExY+XnLWO6/7/b4ZTdkSPTrK+bJ/t7aiA41Yq7CZVjarjJ+6BcrUfkMCK +AArK3Yck55C/wgApCkvrdBwsKHGxWrLsWIqvuLAxl1UTwnD0eCsgwMsRRZAUzLnB +fZLq3MrdVZDywd1suzUdtpbta/11OtmZuoQq31JNAoGAIzmevuPaUZRqnjDpLXAm +Bo11q6CunhG5qZX4wifeZ9Fp5AaQu97F36igZ5/eDxFkDCrFRq6teMiNjRQZSLA3 +5tMBkq6BtN2Ozzm/6D135c4BF14ODHqAMPUy4sXbw5PS/kRFs4fKAH/+LcAOPgyI +N/jJIY1LfM7PzrG2NdyscMU= +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client2.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client2.cert.pem new file mode 100644 index 000000000..0cba3fb26 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client2.cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFljCCA36gAwIBAgICEAswDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDMxNjIwMjAzMloXDTMzMDYyMTIwMjAzMlowdjELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExEDAOBgNVBAMMB0Ns +aWVudDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDFLcCjzNhfY6Sk +2nSdrB/6UPPeTCCH5NBBVLvu1hdlqLS4qEdq8+EjyMDZTtspRtYPkBfjpOrlBWUO +lKyxw2mZOjZ8iWvd4sJaAI/6KZl5X0Rdsg1RjzW03kUdLx9XJCyrYY0YFrT1dgJo +Ih56jk2SJX7wrz0NCJ05VPIdpaOF6CcziA+YhdVHcE6xyHagsYI0JdDWxFZrl9zT +LyhaDgBUN/yUQBnxKzxs8TMT4YVSi73ouh5IP9Xvs52hd6HO8ZGVr+YthQZKo95p +OlwFF+AQWxdDIKoPYUPFo8XMOXvOeQ9iUJarxrYSrelLXtGkaGLBolAvqo/YKE7j +rcJWjRGHAgMBAAGjggE3MIIBMzAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF +oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp +ZmljYXRlMB0GA1UdDgQWBBTOo9YSgx1h5k/imP7nOfRfzQrRxjAfBgNVHSMEGDAW +gBRMcIY7FVKJurUPkqqusTFBE75z8zAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw +FAYIKwYBBQUHAwIGCCsGAQUFBwMEMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9s +b2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUyLmNybC5wZW0wMQYIKwYBBQUHAQEE +JTAjMCEGCCsGAQUFBzABhhVodHRwOi8vbG9jYWxob3N0Ojk4NzcwDQYJKoZIhvcN +AQELBQADggIBAFo91lLqjPY67Wmj2yWxZuTTuUwXdXXUQxL6sEUUnfkECvRhNyBA +eCHkfVopNbXZ5tdLfsUvXF0ulaC76GCK/P7gHOG9D/RJX/85VzhuJcqa4dsEEifg +IiKIG7viYxSA6HFXuyzHvwNco3FqTBHbY46lKf1lWRVLhiAtcwcyPP34/RWcPfQi +6NZfLyitu5U7Z9XVN5wCp8sg0ayaO5Ib2ejIYuBCUddV1gV//tSDf+rKCgtAbm/X +K64Bf3GdaX3h6EhoqMZ+Z2f4XpKSXTabsWAU44xdVxisI82eo+NwT8KleE65GpOv +nPvr/dLq5fQ6VtHbRL3wWqhzB1VKVCtd8a6RE2k8HVWflU3qgwJ+woF19ed921eq +OZxc+KzjsGFyW1D2fPdgoZFmePadSstIME7qtCNEi7D3im01/1KKzE2m/nosrHeW +ePjY2YrXu0w47re/N2kBJL2xRbj+fAjBsfNn9RhvQsWheXG6mgg8w1ac6y72ZA2W +72pWoDkgXQMX5XBBj/zMnmwtrX9zTILFjNGFuWMPYgBRI0xOf2FoqqZ67cQ2yTW/ +1T/6Mp0FSh4cIo/ENiNSdvlt3BIo84EyOm3iHHy28Iv5SiFjF0pkwtXlYYvjM3+R +BeWqlPsVCZXcVC1rPVDzfWZE219yghldY4I3QPJ7dlmszi8eI0HtzhTK +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client2.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client2.key.pem new file mode 100644 index 000000000..29196b1e2 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client2.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFLcCjzNhfY6Sk +2nSdrB/6UPPeTCCH5NBBVLvu1hdlqLS4qEdq8+EjyMDZTtspRtYPkBfjpOrlBWUO +lKyxw2mZOjZ8iWvd4sJaAI/6KZl5X0Rdsg1RjzW03kUdLx9XJCyrYY0YFrT1dgJo +Ih56jk2SJX7wrz0NCJ05VPIdpaOF6CcziA+YhdVHcE6xyHagsYI0JdDWxFZrl9zT +LyhaDgBUN/yUQBnxKzxs8TMT4YVSi73ouh5IP9Xvs52hd6HO8ZGVr+YthQZKo95p +OlwFF+AQWxdDIKoPYUPFo8XMOXvOeQ9iUJarxrYSrelLXtGkaGLBolAvqo/YKE7j +rcJWjRGHAgMBAAECggEABJYUCcyJcnbagytBxfnaNQUuAp8AIypFG3kipq0l5Stk +gGaTJq5F4OTGS4ofRsqeu07IgBSAfqJcJH8toPkDQqfvs6ftO1Mso2UzakMOcP51 +Ywxd91Kjm+LKOyHkHGDirPGnutUg/YpLLrrMvTk/bJHDZCM4i/WP1WTREVFjUgl7 +4L6Y53x2Lk5shJJhv0MzTGaoZzQcW0EbhNH1AI6MBv5/CN5m/7/+HCPlHSNKnozl +o3PXD6l0XNfOY2Hi6MgS/Vd70s3VmDT9UCJNsDjdFpKNHmI7vr9FScOLN8EwbqWe +maFa0TPknmPDmVjEGMtgGlJWL7Sm0MpNW+WsEXcDPQKBgQDv3sp0nVML9pxdzX/w +rGebFaZaMYDWmV9w0V1uXYh4ZkpFmqrWkq/QSTGpwIP/x8WH9FBDUZqspLpPBNgG +ft1XhuY34y3hoCxOyRhQcR/1dY+lgCzuN4G4MG3seq/cAXhrmkPljma/iO8KzoRK +Pa+uaKFGHy1vWY2AmOhT20zr4wKBgQDScA3478TFHg9THlSFzqpzvVn5eAvmmrCQ +RMYIZKFWPortqzeJHdA5ShVF1XBBar1yNMid7E7FXqi/P8Oh+E6Nuc7JxyVIJWlV +mcBE1ceTKdZn7A0nuQIaU6hcn7xz/UHmxGur1ZcNQm3diFJ2CPn11lzZlkSZLSCN +V86nndA9DQKBgQCWsUxXPo7xsRhDBdseg/ECyPMdLoRWTTxcT+t2bmRR31FBsQ0q +iDTTkWgV0NAcXJCH/MB/ykB1vXceNVjRm9nKJwFyktI8MLglNsiDoM4HErgPrRqM +/WoNIL+uFNVuTa4tS1jkWjXKlmg2Tc9mJKK92xWWS/frQENZSraKF/eXKQKBgGR9 +ni6CUTTQZgELOtGrHzql8ZFwAj7dH/PE48yeQW0t8KoOWTbhRc4V0pLGmhSjJFSl +YCgJ8JPP4EVz7bgrG1gSou04bFVHiEWYZnh4nhVopTp7Psz5TEfGK2AP5658Ajxx +D/m+xaNPVae0sawsHTGIbE57s8ZyBll41Pa2JfsBAoGBANtS7SOehkflSdry0eAZ +50Ec3CmY+fArC044hQCmXxol5SiTUnXf/OIQH8y+RZUjF4F3PbbrFNLm/J6wuUPw +XUIb4gAL75srakL8DXqyTYfO02aNrFEHhXzMs+GoAnRkFy16IAAFwpjbYSPanfed +PfHCTWz6Y0pGdh1hUJAFV/3v +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client3.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client3.cert.pem new file mode 100644 index 000000000..94092fad9 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client3.cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFljCCA36gAwIBAgICEAwwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDMxNjIwMjAzMloXDTMzMDYyMTIwMjAzMlowdjELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExEDAOBgNVBAMMB0Ns +aWVudDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEOZ6fYNjZDNXX +eOyapHMOMeNeYM3b7vsWXAbiJIt4utVrTS0A+/G640t/U0g8F9jbKgbjEEPtgPJ7 +GltjLWObfqDWKSO2D9/ei2+NauqgiN/HX+dQnSKHob0McXBXvLfrA4tn4braKrbg +p1fZB8bAECuT/bUhVBqWlzrUwDMpqjMJWDab48ixezb2gnc/ePE6wq/d3ecDb0/k +cYWQ0LX4JiQBgaTGhwczyoGfL1z2vx5kJqptK+r0Hc2jNCn6kFvoZUCYjCWgWNxZ +sQk7fObQQkUb/XQyqRaKJBWDyqsNcuK2gOg3LGeolAlgtMiEqGhHv77XdJnJug/w +3OiHpP/7AgMBAAGjggE3MIIBMzAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF +oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp +ZmljYXRlMB0GA1UdDgQWBBRxZFdIkSg6zDZCakXmIest5a6dBzAfBgNVHSMEGDAW +gBRMcIY7FVKJurUPkqqusTFBE75z8zAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw +FAYIKwYBBQUHAwIGCCsGAQUFBwMEMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9s +b2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUzLmNybC5wZW0wMQYIKwYBBQUHAQEE +JTAjMCEGCCsGAQUFBzABhhVodHRwOi8vbG9jYWxob3N0Ojk4NzcwDQYJKoZIhvcN +AQELBQADggIBAEntkhiPpQtModUF/ffnxruq+cqopPhIdMXhMD8gtU5e4e7o3EHX +lfZKIbxyw56v6dFPrl4TuHBiBudqIvBCsPtllWKixWvg6FV3CrEeTcg4shUIaJcD +pqv1qHLwS4pue6oau/lb8jv1GuzuBXoMFQwlmiOXO7xXqXjV2GdmkFJCDdB/0BW1 +VHvh0DXgotaxITWKhCpSNB7F7LSvegRwZIAN6JXrLDpue7tgqLqBB1EzpmS6ALbn +uZDdikOs/tGAFB3un/3Gl7jEPL8UGOoSj/H9PUT5AFHrHJDH72+QSXu09agz8RWJ +V939njYFCAxQ8Jt2mOK8BJQDJgPtLfIIb1iYicQV13Eypt8uIUYvp0i0Wq8WxPbq +rOEvQYpcGUsreS5XqZ7y68hgq6ePiR18Fnc3GyTV5o6qT3W7IOvPArTzNV5fFCwM +lx8xSEm+ebJrJPphp6Uc/h8evohvAN8R/Z7FSo9OL6V+F3ywPqWTXaqiIiRc9PS0 +0vxsYZ96EeZY5HzjN6LzHxmkv4KYM5I1qmXlviQlaU+sotp3tzegADlM4K78nUFh +HuXamecEcS73eAgjk+FGqJ9E25B0TLlQMcP6tCKdaUIGn6ZsF5wT87GzqT99wL/5 +foHCYIkyG7ZmAQmoaKBd4q6xqVOUHovmsPza69FuSrsBxoRR39PtAnrY +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/client3.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/client3.key.pem new file mode 100644 index 000000000..6ede63fd2 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/client3.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDEOZ6fYNjZDNXX +eOyapHMOMeNeYM3b7vsWXAbiJIt4utVrTS0A+/G640t/U0g8F9jbKgbjEEPtgPJ7 +GltjLWObfqDWKSO2D9/ei2+NauqgiN/HX+dQnSKHob0McXBXvLfrA4tn4braKrbg +p1fZB8bAECuT/bUhVBqWlzrUwDMpqjMJWDab48ixezb2gnc/ePE6wq/d3ecDb0/k +cYWQ0LX4JiQBgaTGhwczyoGfL1z2vx5kJqptK+r0Hc2jNCn6kFvoZUCYjCWgWNxZ +sQk7fObQQkUb/XQyqRaKJBWDyqsNcuK2gOg3LGeolAlgtMiEqGhHv77XdJnJug/w +3OiHpP/7AgMBAAECggEADSe89sig5E63SKAlFXcGw0H2XgqIzDP/TGMnqPvNoYhX +eSXUgxDhBptpB9e9a4RaKwaFxxPjlSXEdYFX9O22YSN1RMMl6Q8Zl9g3edhcDR6W +b7Qbx2x8qj6Rjibnlh8JiFPiaDjN2wUeSDBss/9D98NkKiJ9Ue2YCYmJAOA3B3w9 +2t4Co5+3YrxkdzkvibTQCUSEwHFeB1Nim21126fknMPxyrf+AezRBRc8JNAHqzWb +4QEeMnmIJDOzc3Oh7+P85tNyejOeRm9T7X3EQ0jKXgLYe+HUzXclBQ66b9x9Nc9b +tNn6XkMlLlsQ3f149Th6PtHksH3hM+GF8bMuCp9yxQKBgQDGk0PYPkLqTD8jHjJW +s8wBNhozigZPGaynxdTsD7L6UtDdOl1sSW/jFOj9UIs1duBce9dP1IjFc0jY+Kin +lMLv3qCtk5ZjxRglOoLipR9hdClcM69rDoRZdoQK8KYa+QHcOTSazIp3fnw4gWSX +nscelMfd1rtVP0dOGTuqE/73/QKBgQD8+F5WAi2IOVPHnBxAAOP+6XTs9Ntn1sDi +L5wNgm+QA28aJJ4KRAwdXIc3IFPlHxZI77c2K1L9dKDu9X4UcyZIZYDvGVLuOOt5 +twaRaGuJW03cjbgOWC7rGyfzfZ49YlCZi2YuxERclBkbqgWD9hfa8twUfKNguF2Y +AyiOhohtVwKBgQCJB8zUp7pzhqQ3LrpcHHzWBSi1kjTiVvxPVnSlZfwDRCz/zSv0 +8wRz9tUFIZS/E0ama4tcenTblL+bgpSX+E9BSiclQOiR9su/vQ3fK0Vpccis6LnP +rdflCKT8C68Eg/slppBHloijBzTfpWLuQlJ0JwV5b5ocrKsfGMiUiHH1XQKBgQDg +RnakfEPP7TtY0g+9ssxwOJxAZImM0zmojpsk4wpzvIeovuQap9+xvFHoztFyZhBE +07oz3U8zhE4V7TI9gSVktBEOaf47U914yIqbKd+FJJywODkBBq96I1ZVKn67X0mk +B5GtTrZo+agU/bTsHKdjp0L1KtdSLcJUviAb1Cxp+wKBgDrGqS01CCgxSUwMaZe4 +8HFWp/oMSyVDG9lTSC3uP/VL76zNFI55T3X06Q87hDN3gCJGUOmHzDZ/oCOgM4/S +SU55M4lXeSEdFe84tMXJKOv5JXTkulzBYzATJ5J8DeS/4YZxMKyPDLXX8wgwmU+l +i6Imd3qCPhh5eI3z9eSNDX+6 +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/crl.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/crl.pem new file mode 100644 index 000000000..a119cede2 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/crl.pem @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDPDCCASQCAQEwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0UxEjAQBgNV +BAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQLDBBNeUlu +dGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBFw0yMjA3MjAy +MDIzNTNaFw0zMjEwMjUyMDIzNTNaMBUwEwICEAIXDTIyMDYxMzEyNDIwNVqgbjBs +MB8GA1UdIwQYMBaAFCuv1TkzC1fSgTfzE1m1u5pRCJsVMDwGA1UdHAQ1MDOgLqAs +hipodHRwOi8vbG9jYWxob3N0Ojk4NzgvaW50ZXJtZWRpYXRlLmNybC5wZW2EAf8w +CwYDVR0UBAQCAhADMA0GCSqGSIb3DQEBCwUAA4ICAQBbWdqRFsIrG6coL6ln1RL+ +uhgW9l3XMmjNlyiYHHNzOgnlBok9xu9UdaVCOKC6GEthWSzSlBY1AZugje57DQQd +RkIJol9am94lKMTjF/qhzFLiSjho8fwZGDGyES5YeZXkLqNMOf6m/drKaI3iofWf +l63qU9jY8dnSrVDkwgCguUL2FTx60v5H9NPxSctQ3VDxDvDj0sTAcHFknQcZbfvY +ZWpOYNS0FAJlQPVK9wUoDxI0LhrWDq5h/T1jcGO34fPT8RUA5HRtFVUevqSuOLWx +WTfTx5oDeMZPJTvHWUcg4yMElHty4tEvtkFxLSYbZqj7qTU+mi/LAN3UKBH/gBEN +y2OsJvFhVRgHf+zPYegf3WzBSoeaXNAJZ4UnRo34P9AL3Mrh+4OOUP++oYRKjWno +pYtAmTrIwEYoLyisEhhZ6aD92f/Op3dIYsxwhHt0n0lKrbTmUfiJUAe7kUZ4PMn4 +Gg/OHlbEDaDxW1dCymjyRGl+3/8kjy7bkYUXCf7w6JBeL2Hw2dFp1Gh13NRjre93 +PYlSOvI6QNisYGscfuYPwefXogVrNjf/ttCksMa51tUk+ylw7ZMZqQjcPPSzmwKc +5CqpnQHfolvRuN0xIVZiAn5V6/MdHm7ocrXxOkzWQyaoNODTq4js8h8eYXgAkt1w +p1PTEFBucGud7uBDE6Ub6A== +-----END X509 CRL----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx.conf b/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx.conf new file mode 100644 index 000000000..f34ab1456 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx.conf @@ -0,0 +1,12 @@ +crl_cache.refresh_interval = {{ refresh_interval }} +crl_cache.http_timeout = 17s +crl_cache.capacity = {{ cache_capacity }} +listeners.ssl.default { + ssl_options { + keyfile = "{{ test_data_dir }}/server.key.pem" + certfile = "{{ test_data_dir }}/server.cert.pem" + cacertfile = "{{ test_data_dir }}/ca-chain.cert.pem" + verify = verify_peer + enable_crl_check = true + } +} diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx_crl_cache_http_server.erl b/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx_crl_cache_http_server.erl new file mode 100644 index 000000000..4e8b989fa --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx_crl_cache_http_server.erl @@ -0,0 +1,67 @@ +-module(emqx_crl_cache_http_server). + +-behaviour(gen_server). +-compile([nowarn_export_all, export_all]). + +set_crl(CRLPem) -> + ets:insert(?MODULE, {crl, CRLPem}). + +%%-------------------------------------------------------------------- +%% `gen_server' APIs +%%-------------------------------------------------------------------- + +start_link(Parent, BasePort, CRLPem, Opts) -> + process_flag(trap_exit, true), + stop_http(), + timer:sleep(100), + gen_server:start_link(?MODULE, {Parent, BasePort, CRLPem, Opts}, []). + +init({Parent, BasePort, CRLPem, Opts}) -> + Tab = ets:new(?MODULE, [named_table, ordered_set, public]), + ets:insert(Tab, {crl, CRLPem}), + ok = start_http(Parent, [{port, BasePort} | Opts]), + Parent ! {self(), ready}, + {ok, #{parent => Parent}}. + +handle_call(_Request, _From, State) -> + {reply, ignored, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + stop_http(). + +stop(Pid) -> + ok = gen_server:stop(Pid). + +%%-------------------------------------------------------------------- +%% Callbacks +%%-------------------------------------------------------------------- + +start_http(Parent, Opts) -> + {ok, _Pid1} = cowboy:start_clear(http, Opts, #{ + env => #{dispatch => compile_router(Parent)} + }), + ok. + +stop_http() -> + cowboy:stop_listener(http), + ok. + +compile_router(Parent) -> + {ok, _} = application:ensure_all_started(cowboy), + cowboy_router:compile([ + {'_', [{'_', ?MODULE, #{parent => Parent}}]} + ]). + +init(Req, #{parent := Parent} = State) -> + %% assert + <<"GET">> = cowboy_req:method(Req), + [{crl, CRLPem}] = ets:lookup(?MODULE, crl), + Parent ! {http_get, iolist_to_binary(cowboy_req:uri(Req))}, + Reply = reply(Req, CRLPem), + {ok, Reply, State}. + +reply(Req, CRLPem) -> + cowboy_req:reply(200, #{<<"content-type">> => <<"text/plain">>}, CRLPem, Req). diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx_just_verify.conf b/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx_just_verify.conf new file mode 100644 index 000000000..8b9549823 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/emqx_just_verify.conf @@ -0,0 +1,12 @@ +node.name = test@127.0.0.1 +node.cookie = emqxsecretcookie +node.data_dir = "{{ test_priv_dir }}" +listeners.ssl.default { + ssl_options { + keyfile = "{{ test_data_dir }}/server.key.pem" + certfile = "{{ test_data_dir }}/server.cert.pem" + cacertfile = "{{ test_data_dir }}/ca-chain.cert.pem" + verify = verify_peer + enable_crl_check = false + } +} diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-not-revoked.crl.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-not-revoked.crl.pem new file mode 100644 index 000000000..e484b44c0 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-not-revoked.crl.pem @@ -0,0 +1,19 @@ +-----BEGIN X509 CRL----- +MIIDJTCCAQ0CAQEwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0UxEjAQBgNV +BAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQLDBBNeUlu +dGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBFw0yMzAxMTIx +MzA4MTZaFw0zMzAxMDkxMzA4MTZaoG4wbDAfBgNVHSMEGDAWgBRMcIY7FVKJurUP +kqqusTFBE75z8zA8BgNVHRwENTAzoC6gLIYqaHR0cDovL2xvY2FsaG9zdDo5ODc4 +L2ludGVybWVkaWF0ZS5jcmwucGVthAH/MAsGA1UdFAQEAgIQADANBgkqhkiG9w0B +AQsFAAOCAgEAJGOZuqZL4m7zUaRyBrxeT6Tqo+XKz7HeD5zvO4BTNX+0E0CRyki4 +HhIGbxjv2NKWoaUv0HYbGAiZdO4TaPu3w3tm4+pGEDBclBj2KTdbB+4Hlzv956gD +KXZ//ziNwx1SCoxxkxB+TALxReN0shE7Mof9GlB5HPskhLorZgg/pmgJtIykEpsq +QAjJo4aq+f2/L+9dzRM205fVFegtsHvgEVNKz6iK6skt+kDhj/ks9BKsnfCDIGr+ +XnPYwS9yDnnhFdoJ40AQQDtomxggAjfgcSnqtHCxZwKJohuztbSWUgD/4yxzlrwP +Dk1cT/Ajjjqb2dXVOfTLK1VB2168uuouArxZ7KYbXwBjHduYWGGkA6FfkNJO/jpF +SL9qhX3oxcRF3hDhWigN1ZRD7NpDKwVal3Y9tmvO5bWhb5VF+3qv0HGeSGp6V0dp +sjwhIj+78bkUrcXxrivACLAXgSTGonx1uXD+T4P4NCt148dgRAbgd8sUXK5FcgU2 +cdBl8Kv2ZUjEaod5gUzDtf22VGSoO9lHvfHdpG9o2H3wC7s4tyLTidNrduIguJff +IIgc44Y252iV0sOmZ5S0jjTRiF1YUUPy9qA/6bOnr2LohbwbNZv9tDlNj8cdhxUz +cKiS+c7Qsz+YCcrp19QRiJoQae/gUqz7kmUZQgyPmDd+ArE0V+kDZEE= +-----END X509 CRL----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-revoked-no-dp.crl.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-revoked-no-dp.crl.pem new file mode 100644 index 000000000..4d3611d49 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-revoked-no-dp.crl.pem @@ -0,0 +1,19 @@ +-----BEGIN X509 CRL----- +MIIC/TCB5gIBATANBgkqhkiG9w0BAQsFADBrMQswCQYDVQQGEwJTRTESMBAGA1UE +CAwJU3RvY2tob2xtMRIwEAYDVQQKDAlNeU9yZ05hbWUxGTAXBgNVBAsMEE15SW50 +ZXJtZWRpYXRlQ0ExGTAXBgNVBAMMEE15SW50ZXJtZWRpYXRlQ0EXDTIzMDExODEz +Mjc1M1oXDTMzMDExNTEzMjc1M1owFTATAgIQAhcNMjMwMTEyMTMwODE2WqAwMC4w +HwYDVR0jBBgwFoAUTHCGOxVSibq1D5KqrrExQRO+c/MwCwYDVR0UBAQCAhACMA0G +CSqGSIb3DQEBCwUAA4ICAQCxoRYDc5MaBpDI+HQUX60+obFeZJdBkPO2wMb6HBQq +e0lZM2ukS+4n5oGhRelsvmEz0qKvnYS6ISpuFzv4Qy6Vaun/KwIYAdXsEQVwDHsu +Br4m1V01igjFnujowwR/7F9oPnZOmBaBdiyYbjgGV0YMF7sOfl4UO2MqI2GSGqVk +63wELT1AXjx31JVoyATQOQkq1A5HKFYLEbFmdF/8lNfbxSCBY2tuJ+uWVQtzjM0y +i+/owz5ez1BZ/Swx8akYhuvs8DVVTbjXydidVSrxt/QEf3+oJCzTA9qFqt4MH7gL +6BAglCGtRiYTHqeMHrwddaHF2hzR61lHJlkMCL61yhVuL8WsEJ/AxVX0W3MfQ4Cw +x/A6xIkgqtu+HtQnPyDcJxyaFHtFC+U67nSbEQySFvHfMw42DGdIGojKQCeUer9W +ECFC8OATQwN2h//f8QkY7D0H3k/brrNYDfdFIcCti9iZiFrrPFxO7NbOTfkeKCt3 +7IwYduRc8DWKmS8c7j2re1KkdYnfE1sfwbn3trImkcET5tvDlVCZ1glnBQzk82PS +HvKmSjD2pZI7upfLkoMgMhYyYJhYk7Mw2o4JXuddYGKmmw3bJyHkG/Ot5NAKjb7g +k1QCeWzxO1xXm8PNDDFWMn351twUGDQ/cwrUw0ODeUZpfL0BtTn4YnfCLLTvZDxo +Vg== +-----END X509 CRL----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-revoked.crl.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-revoked.crl.pem new file mode 100644 index 000000000..4c5cdd441 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate-revoked.crl.pem @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDPDCCASQCAQEwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0UxEjAQBgNV +BAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQLDBBNeUlu +dGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBFw0yMzAxMTIx +MzA4MTZaFw0zMzAxMDkxMzA4MTZaMBUwEwICEAIXDTIzMDExMjEzMDgxNlqgbjBs +MB8GA1UdIwQYMBaAFExwhjsVUom6tQ+Sqq6xMUETvnPzMDwGA1UdHAQ1MDOgLqAs +hipodHRwOi8vbG9jYWxob3N0Ojk4NzgvaW50ZXJtZWRpYXRlLmNybC5wZW2EAf8w +CwYDVR0UBAQCAhABMA0GCSqGSIb3DQEBCwUAA4ICAQCPadbaehEqLv4pwqF8em8T +CW8TOQ4Vjz02uiVk9Bo0za1dQqQmwCBA6UE1BcOh+aWzQxBRz56NeUcfhgDxTntG +xLs896N9MHIG6UxpqJH8cH+DXKHsQjvvCjXtiObmBQR1RiG5C1vEMkfzTt/WSrq5 +7blowLDs4NP6YbtqXEyyUkF7DQSUEUuIDWPQdx1f++nSpVaHWW4xpoO4umesaJco +FuxaXQnZpTHHQfqUJVIL2Mmzvez9thgfKTV3vgkYrGiSLW2m2+Tfga30pUc0qaVI +RrBVORVbcu9m1sV0aJyk96b2T/+i2FRR/np4TOcLgckBpHKeK2FH69lHFr0W/71w +CErNTxahoh82Yi8POenu+S1m2sDnrF1FMf+ZG/i2wr0nW6/+zVGQsEOw77Spbmei +dbEchu3iWF1XEO/n4zVBzl6a1o2RyVg+1pItYd5C5bPwcrfZnBrm4WECPxO+6rbW +2/wz9Iku4XznTLqLEpXLAtenAdo73mLGC7riviX7mhcxfN2UjNfLuVGHmG8XwIsM +Lgpr6DKaxHwpHgW3wA3SGJrY5dj0TvGWaoInrNt1cOMnIpoxRNy5+ko71Ubx3yrV +RhbUMggd1GG1ct9uZn82v74RYF6J8Xcxn9vDFJu5LLT5kvfy414kdJeTXKqfKXA/ +atdUgFa0otoccn5FzyUuzg== +-----END X509 CRL----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate.crl.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate.crl.pem new file mode 100644 index 000000000..a119cede2 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/intermediate.crl.pem @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDPDCCASQCAQEwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0UxEjAQBgNV +BAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQLDBBNeUlu +dGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBFw0yMjA3MjAy +MDIzNTNaFw0zMjEwMjUyMDIzNTNaMBUwEwICEAIXDTIyMDYxMzEyNDIwNVqgbjBs +MB8GA1UdIwQYMBaAFCuv1TkzC1fSgTfzE1m1u5pRCJsVMDwGA1UdHAQ1MDOgLqAs +hipodHRwOi8vbG9jYWxob3N0Ojk4NzgvaW50ZXJtZWRpYXRlLmNybC5wZW2EAf8w +CwYDVR0UBAQCAhADMA0GCSqGSIb3DQEBCwUAA4ICAQBbWdqRFsIrG6coL6ln1RL+ +uhgW9l3XMmjNlyiYHHNzOgnlBok9xu9UdaVCOKC6GEthWSzSlBY1AZugje57DQQd +RkIJol9am94lKMTjF/qhzFLiSjho8fwZGDGyES5YeZXkLqNMOf6m/drKaI3iofWf +l63qU9jY8dnSrVDkwgCguUL2FTx60v5H9NPxSctQ3VDxDvDj0sTAcHFknQcZbfvY +ZWpOYNS0FAJlQPVK9wUoDxI0LhrWDq5h/T1jcGO34fPT8RUA5HRtFVUevqSuOLWx +WTfTx5oDeMZPJTvHWUcg4yMElHty4tEvtkFxLSYbZqj7qTU+mi/LAN3UKBH/gBEN +y2OsJvFhVRgHf+zPYegf3WzBSoeaXNAJZ4UnRo34P9AL3Mrh+4OOUP++oYRKjWno +pYtAmTrIwEYoLyisEhhZ6aD92f/Op3dIYsxwhHt0n0lKrbTmUfiJUAe7kUZ4PMn4 +Gg/OHlbEDaDxW1dCymjyRGl+3/8kjy7bkYUXCf7w6JBeL2Hw2dFp1Gh13NRjre93 +PYlSOvI6QNisYGscfuYPwefXogVrNjf/ttCksMa51tUk+ylw7ZMZqQjcPPSzmwKc +5CqpnQHfolvRuN0xIVZiAn5V6/MdHm7ocrXxOkzWQyaoNODTq4js8h8eYXgAkt1w +p1PTEFBucGud7uBDE6Ub6A== +-----END X509 CRL----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/server.cert.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/server.cert.pem new file mode 100644 index 000000000..38cc63534 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/server.cert.pem @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGCTCCA/GgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDExMjEzMDgxNloXDTMzMDQxOTEzMDgxNloweDELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExEjAQBgNVBAMMCWxv +Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKdU9FaA/n0Z +TXkd10XA9l+UV9xKR65ZTy2ApCFlw2gGWLiUh96a6hX+GQZFUV7ECIDDf+7nC85o +xo1Xyf0rHGABQ0uHlhqSemc12F9APIzRLlQkhtV4vMBBbGQFekje4F9bhY9JQtGd +XJGmwsR+XWo6SUY7K5l9FuSSSRXC0kSYYQfSTPR/LrF6efdHf+ZN4huP7lM2qIFd +afX+qBOI1/Y2LtITo2TaU/hXyKh9wEiuynoq0RZ2KkYQll5cKD9fSD+pW3Xm0XWX +TQy4RZEe3WoYEQsklNw3NC92ocA/PQB9BGNO1fKhzDn6kW2HxDxruDKOuO/meGek +ApCayu3e/I0CAwEAAaOCAagwggGkMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQD +AgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2Vy +dGlmaWNhdGUwHQYDVR0OBBYEFGy5LQPzIelruJl7mL0mtUXM57XhMIGaBgNVHSME +gZIwgY+AFExwhjsVUom6tQ+Sqq6xMUETvnPzoXOkcTBvMQswCQYDVQQGEwJTRTES +MBAGA1UECAwJU3RvY2tob2xtMRIwEAYDVQQHDAlTdG9ja2hvbG0xEjAQBgNVBAoM +CU15T3JnTmFtZTERMA8GA1UECwwITXlSb290Q0ExETAPBgNVBAMMCE15Um9vdENB +ggIQADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwOwYDVR0f +BDQwMjAwoC6gLIYqaHR0cDovL2xvY2FsaG9zdDo5ODc4L2ludGVybWVkaWF0ZS5j +cmwucGVtMDEGCCsGAQUFBwEBBCUwIzAhBggrBgEFBQcwAYYVaHR0cDovL2xvY2Fs +aG9zdDo5ODc3MA0GCSqGSIb3DQEBCwUAA4ICAQCX3EQgiCVqLhnCNd0pmptxXPxo +l1KyZkpdrFa/NgSqRhkuZSAkszwBDDS/gzkHFKEUhmqs6/UZwN4+Rr3LzrHonBiN +aQ6GeNNXZ/3xAQfUCwjjGmz9Sgw6kaX19Gnk2CjI6xP7T+O5UmsMI9hHUepC9nWa +XX2a0hsO/KOVu5ZZckI16Ek/jxs2/HEN0epYdvjKFAaVmzZZ5PATNjrPQXvPmq2r +x++La+3bXZsrH8P2FhPpM5t/IxKKW/Tlpgz92c2jVSIHF5khSA/MFDC+dk80OFmm +v4ZTPIMuZ//Q+wo0f9P48rsL9D27qS7CA+8pn9wu+cfnBDSt7JD5Yipa1gHz71fy +YTa9qRxIAPpzW2v7TFZE8eSKFUY9ipCeM2BbdmCQGmq4+v36b5TZoyjH4k0UVWGo +Gclos2cic5Vxi8E6hb7b7yZpjEfn/5lbCiGMfAnI6aoOyrWg6keaRA33kaLUEZiK +OgFNbPkjiTV0ZQyLXf7uK9YFhpVzJ0dv0CFNse8rZb7A7PLn8VrV/ZFnJ9rPoawn +t7ZGxC0d5BRSEyEeEgsQdxuY4m8OkE18zwhCkt2Qs3uosOWlIrYmqSEa0i/sPSQP +jiwB4nEdBrf8ZygzuYjT5T9YRSwhVox4spS/Av8Ells5JnkuKAhCVv9gHxYwbj0c +CzyLJgE1z9Tq63m+gQ== +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_crl_cache_SUITE_data/server.key.pem b/apps/emqx/test/emqx_crl_cache_SUITE_data/server.key.pem new file mode 100644 index 000000000..d456ece72 --- /dev/null +++ b/apps/emqx/test/emqx_crl_cache_SUITE_data/server.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCnVPRWgP59GU15 +HddFwPZflFfcSkeuWU8tgKQhZcNoBli4lIfemuoV/hkGRVFexAiAw3/u5wvOaMaN +V8n9KxxgAUNLh5YaknpnNdhfQDyM0S5UJIbVeLzAQWxkBXpI3uBfW4WPSULRnVyR +psLEfl1qOklGOyuZfRbkkkkVwtJEmGEH0kz0fy6xenn3R3/mTeIbj+5TNqiBXWn1 +/qgTiNf2Ni7SE6Nk2lP4V8iofcBIrsp6KtEWdipGEJZeXCg/X0g/qVt15tF1l00M +uEWRHt1qGBELJJTcNzQvdqHAPz0AfQRjTtXyocw5+pFth8Q8a7gyjrjv5nhnpAKQ +msrt3vyNAgMBAAECggEABnWvIQ/Fw0qQxRYz00uJt1LguW5cqgxklBsdOvTUwFVO +Y4HIZP2R/9tZV/ahF4l10pK5g52DxSoiUB6Ne6qIY+RolqfbUZdKBmX7vmGadM02 +fqUSV3dbwghEiO/1Mo74FnZQB6IKZFEw26aWakN+k7VAUufB3SEJGzXSgHaO63ru +dFGSiYI8U+q+YnhUJjCnmI12fycNfy451TdUQtGZb6pNmm5HRUF6hpAV8Le9LojP +Ql9eacPpsrzU15X5ElCQZ/f9iNh1bplcISuhrULgKUKOvAVrBlEK67uRVy6g98xA +c/rgNLkbL/jZEsAc3/vHAyFgd3lABfwpBGLHej3QgQKBgQDFNYmfBNQr89HC5Zc+ +M6jXcAT/R+0GNczBTfC4iyNemwqsumSSRelNZ748UefKuS3F6Mvb2CBqE2LbB61G +hrnCffG2pARjZ491SefRwghhWWVGLP1p8KliLgOGBehA1REgJb+XULncjuHZuh4O +LVn3HVnWGxeBGg+yKa6Z4YQi3QKBgQDZN0O8ZcZY74lRJ0UjscD9mJ1yHlsssZag +njkX/f0GR/iVpfaIxQNC3gvWUy2LsU0He9sidcB0cfej0j/qZObQyFsCB0+utOgy ++hX7gokV2pes27WICbNWE2lJL4QZRJgvf82OaEy57kfDrm+eK1XaSZTZ10P82C9u +gAmMnontcQKBgGu29lhY9tqa7jOZ26Yp6Uri8JfO3XPK5u+edqEVvlfqL0Zw+IW8 +kdWpmIqx4f0kcA/tO4v03J+TvycLZmVjKQtGZ0PvCkaRRhY2K9yyMomZnmtaH4BB +5wKtR1do2pauyg/ZDnDDswD5OfsGYWw08TK8YVlEqu3lIjWZ9rguKVIxAoGAZYUk +zVqr10ks3pcCA2rCjkPT4lA5wKvHgI4ylPoKVfMxRY/pp4acvZXV5ne9o7pcDBFh +G7v5FPNnEFPlt4EtN4tMragJH9hBZgHoYEJkG6islweg0lHmVWaBIMlqbfzXO+v5 +gINSyNuLAvP2CvCqEXmubhnkFrpbgMOqsuQuBqECgYB3ss2PDhBF+5qoWgqymFof +1ovRPuQ9sPjWBn5IrCdoYITDnbBzBZERx7GLs6A/PUlWgST7jkb1PY/TxYSUfXzJ +SNd47q0mCQ+IUdqUbHgpK9b1ncwLMsnexpYZdHJWRLgnUhOx7OMjJc/4iLCAFCoN +3KJ7/V1keo7GBHOwnsFcCA== +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_exclusive_sub_SUITE.erl b/apps/emqx/test/emqx_exclusive_sub_SUITE.erl new file mode 100644 index 000000000..79dfc9de6 --- /dev/null +++ b/apps/emqx/test/emqx_exclusive_sub_SUITE.erl @@ -0,0 +1,159 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2018-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_exclusive_sub_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(EXCLUSIVE_TOPIC, <<"$exclusive/t/1">>). +-define(NORMAL_TOPIC, <<"t/1">>). + +-define(CHECK_SUB(Client, Code), ?CHECK_SUB(Client, ?EXCLUSIVE_TOPIC, Code)). +-define(CHECK_SUB(Client, Topic, Code), + {ok, _, [Code]} = emqtt:subscribe(Client, Topic, []) +). + +all() -> emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([]), + ok = ekka:start(), + OldConf = emqx:get_config([zones], #{}), + emqx_config:put_zone_conf(default, [mqtt, exclusive_subscription], true), + timer:sleep(50), + [{old_conf, OldConf} | Config]. + +end_per_suite(Config) -> + emqx_config:put([zones], proplists:get_value(old_conf, Config)), + ekka:stop(), + mria:stop(), + mria_mnesia:delete_schema(), + emqx_common_test_helpers:stop_apps([]). + +end_per_testcase(_TestCase, _Config) -> + emqx_exclusive_subscription:clear(). + +t_exclusive_sub(_) -> + {ok, C1} = emqtt:start_link([ + {clientid, <<"client1">>}, + {clean_start, false}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 100}} + ]), + {ok, _} = emqtt:connect(C1), + ?CHECK_SUB(C1, 0), + + {ok, C2} = emqtt:start_link([ + {clientid, <<"client2">>}, + {clean_start, false}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C2), + ?CHECK_SUB(C2, ?RC_QUOTA_EXCEEDED), + + %% keep exclusive even disconnected + ok = emqtt:disconnect(C1), + timer:sleep(1000), + + ?CHECK_SUB(C2, ?RC_QUOTA_EXCEEDED), + + ok = emqtt:disconnect(C2). + +t_allow_normal_sub(_) -> + {ok, C1} = emqtt:start_link([ + {clientid, <<"client1">>}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C1), + ?CHECK_SUB(C1, 0), + + {ok, C2} = emqtt:start_link([ + {clientid, <<"client2">>}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C2), + ?CHECK_SUB(C2, ?NORMAL_TOPIC, 0), + + ok = emqtt:disconnect(C1), + ok = emqtt:disconnect(C2). + +t_unsub(_) -> + {ok, C1} = emqtt:start_link([ + {clientid, <<"client1">>}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C1), + ?CHECK_SUB(C1, 0), + + {ok, C2} = emqtt:start_link([ + {clientid, <<"client2">>}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C2), + ?CHECK_SUB(C2, ?RC_QUOTA_EXCEEDED), + + {ok, #{}, [0]} = emqtt:unsubscribe(C1, ?EXCLUSIVE_TOPIC), + + ?CHECK_SUB(C2, 0), + + ok = emqtt:disconnect(C1), + ok = emqtt:disconnect(C2). + +t_clean_session(_) -> + erlang:process_flag(trap_exit, true), + {ok, C1} = emqtt:start_link([ + {clientid, <<"client1">>}, + {clean_start, true}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 0}} + ]), + {ok, _} = emqtt:connect(C1), + ?CHECK_SUB(C1, 0), + + {ok, C2} = emqtt:start_link([ + {clientid, <<"client2">>}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C2), + ?CHECK_SUB(C2, ?RC_QUOTA_EXCEEDED), + + %% auto clean when session was cleand + ok = emqtt:disconnect(C1), + + timer:sleep(1000), + + ?CHECK_SUB(C2, 0), + + ok = emqtt:disconnect(C2). + +t_feat_disabled(_) -> + OldConf = emqx:get_config([zones], #{}), + emqx_config:put_zone_conf(default, [mqtt, exclusive_subscription], false), + + {ok, C1} = emqtt:start_link([ + {clientid, <<"client1">>}, + {proto_ver, v5} + ]), + {ok, _} = emqtt:connect(C1), + ?CHECK_SUB(C1, ?RC_TOPIC_FILTER_INVALID), + ok = emqtt:disconnect(C1), + + emqx_config:put([zones], OldConf). diff --git a/apps/emqx/test/emqx_flapping_SUITE.erl b/apps/emqx/test/emqx_flapping_SUITE.erl index e27ff67e0..877f05995 100644 --- a/apps/emqx/test/emqx_flapping_SUITE.erl +++ b/apps/emqx/test/emqx_flapping_SUITE.erl @@ -101,3 +101,21 @@ t_expired_detecting(_) -> ets:tab2list(emqx_flapping) ) ). + +t_conf_without_window_time(_) -> + %% enable is deprecated, so we need to make sure it won't be used. + Global = emqx_config:get([flapping_detect]), + ?assertNot(maps:is_key(enable, Global)), + %% zones don't have default value, so we need to make sure fallback to global conf. + %% this new_zone will fallback to global conf. + emqx_config:put_zone_conf(new_zone, [flapping_detect], #{}), + ?assertEqual(Global, get_policy(new_zone)), + + emqx_config:put_zone_conf(new_zone_1, [flapping_detect], #{window_time => 100}), + ?assertEqual(100, emqx_flapping:get_policy(window_time, new_zone_1)), + ?assertEqual(maps:get(ban_time, Global), emqx_flapping:get_policy(ban_time, new_zone_1)), + ?assertEqual(maps:get(max_count, Global), emqx_flapping:get_policy(max_count, new_zone_1)), + ok. + +get_policy(Zone) -> + emqx_flapping:get_policy([window_time, ban_time, max_count], Zone). diff --git a/apps/emqx/test/emqx_frame_SUITE.erl b/apps/emqx/test/emqx_frame_SUITE.erl index 2a8d1bc39..23e8972e9 100644 --- a/apps/emqx/test/emqx_frame_SUITE.erl +++ b/apps/emqx/test/emqx_frame_SUITE.erl @@ -670,6 +670,42 @@ t_invalid_clientid(_) -> emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 0, 0, 0, 1, 0, 0>>) ). +%% for regression: `password` must be `undefined` +t_undefined_password(_) -> + Payload = <<16, 19, 0, 4, 77, 81, 84, 84, 4, 130, 0, 60, 0, 2, 97, 49, 0, 3, 97, 97, 97>>, + {ok, Packet, <<>>, {none, _}} = emqx_frame:parse(Payload), + Password = undefined, + ?assertEqual( + #mqtt_packet{ + header = #mqtt_packet_header{ + type = 1, + dup = false, + qos = 0, + retain = false + }, + variable = #mqtt_packet_connect{ + proto_name = <<"MQTT">>, + proto_ver = 4, + is_bridge = false, + clean_start = true, + will_flag = false, + will_qos = 0, + will_retain = false, + keepalive = 60, + properties = #{}, + clientid = <<"a1">>, + will_props = #{}, + will_topic = undefined, + will_payload = undefined, + username = <<"aaa">>, + password = Password + }, + payload = undefined + }, + Packet + ), + ok. + parse_serialize(Packet) -> parse_serialize(Packet, #{strict_mode => true}). diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index 6a7cd2791..fa0713cf0 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -26,6 +26,8 @@ -define(CERTS_PATH(CertName), filename:join(["../../lib/emqx/etc/certs/", CertName])). +-define(SERVER_KEY_PASSWORD, "sErve7r8Key$!"). + all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> @@ -33,6 +35,7 @@ init_per_suite(Config) -> application:ensure_all_started(esockd), application:ensure_all_started(quicer), application:ensure_all_started(cowboy), + generate_tls_certs(Config), lists:foreach(fun set_app_env/1, NewConfig), Config. @@ -44,18 +47,14 @@ init_per_testcase(Case, Config) when Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(tcp), {ok, _} = emqx_config_handler:start_link(), - case emqx_config:get([listeners], undefined) of - undefined -> ok; - Listeners -> emqx_config:put([listeners], maps:remove(quic, Listeners)) - end, - PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ tcp => #{ listener_test => #{ - bind => {"127.0.0.1", 9999}, + bind => {"127.0.0.1", Port}, max_connections => 4321, limiter => #{} } @@ -65,19 +64,20 @@ init_per_testcase(Case, Config) when ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {tcp_port, Port} | Config ]; init_per_testcase(t_wss_conn, Config) -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(ssl), {ok, _} = emqx_config_handler:start_link(), - PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ wss => #{ listener_test => #{ - bind => {{127, 0, 0, 1}, 9998}, + bind => {{127, 0, 0, 1}, Port}, limiter => #{}, ssl_options => #{ cacertfile => ?CERTS_PATH("cacert.pem"), @@ -91,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) -> ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {wss_port, Port} | Config ]; init_per_testcase(_, Config) -> @@ -138,18 +139,107 @@ t_restart_listeners(_) -> ok = emqx_listeners:restart(), ok = emqx_listeners:stop(). -t_max_conns_tcp(_) -> +t_restart_listeners_with_hibernate_after_disabled(_Config) -> + OldLConf = emqx_config:get([listeners]), + maps:foreach( + fun(LType, Listeners) -> + maps:foreach( + fun(Name, Opts) -> + case maps:is_key(ssl_options, Opts) of + true -> + emqx_config:put( + [ + listeners, + LType, + Name, + ssl_options, + hibernate_after + ], + undefined + ); + _ -> + skip + end + end, + Listeners + ) + end, + OldLConf + ), + ok = emqx_listeners:start(), + ok = emqx_listeners:stop(), + %% flakyness: eaddrinuse + timer:sleep(timer:seconds(2)), + ok = emqx_listeners:restart(), + ok = emqx_listeners:stop(), + emqx_config:put([listeners], OldLConf). + +t_max_conns_tcp(Config) -> %% Note: Using a string representation for the bind address like %% "127.0.0.1" does not work - ?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). + ?assertEqual( + 4321, + emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)}) + ). -t_current_conns_tcp(_) -> - ?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). +t_current_conns_tcp(Config) -> + ?assertEqual( + 0, + emqx_listeners:current_conns('tcp:listener_test', { + {127, 0, 0, 1}, ?config(tcp_port, Config) + }) + ). -t_wss_conn(_) -> - {ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), +t_wss_conn(Config) -> + {ok, Socket} = ssl:connect( + {127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000 + ), ok = ssl:close(Socket). +t_quic_conn(Config) -> + Port = emqx_common_test_helpers:select_free_port(quic), + DataDir = ?config(data_dir, Config), + SSLOpts = #{ + password => ?SERVER_KEY_PASSWORD, + certfile => filename:join(DataDir, "server-password.pem"), + cacertfile => filename:join(DataDir, "ca.pem"), + keyfile => filename:join(DataDir, "server-password.key") + }, + emqx_common_test_helpers:ensure_quic_listener(?FUNCTION_NAME, Port, #{ssl_options => SSLOpts}), + ct:pal("~p", [emqx_listeners:list()]), + {ok, Conn} = quicer:connect( + {127, 0, 0, 1}, + Port, + [ + {verify, verify_none}, + {alpn, ["mqtt"]} + ], + 1000 + ), + ok = quicer:close_connection(Conn), + emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}). + +t_ssl_password_cert(Config) -> + Port = emqx_common_test_helpers:select_free_port(ssl), + DataDir = ?config(data_dir, Config), + SSLOptsPWD = #{ + password => ?SERVER_KEY_PASSWORD, + certfile => filename:join(DataDir, "server-password.pem"), + cacertfile => filename:join(DataDir, "ca.pem"), + keyfile => filename:join(DataDir, "server-password.key") + }, + LConf = #{ + enabled => true, + bind => {{127, 0, 0, 1}, Port}, + mountpoint => <<>>, + zone => default, + ssl_options => SSLOptsPWD + }, + ok = emqx_listeners:start_listener(ssl, ?FUNCTION_NAME, LConf), + {ok, SSLSocket} = ssl:connect("127.0.0.1", Port, [{verify, verify_none}]), + ssl:close(SSLSocket), + emqx_listeners:stop_listener(ssl, ?FUNCTION_NAME, LConf). + t_format_bind(_) -> ?assertEqual( ":1883", @@ -189,8 +279,7 @@ render_config_file() -> mustache_vars() -> [ {platform_data_dir, local_path(["data"])}, - {platform_etc_dir, local_path(["etc"])}, - {platform_log_dir, local_path(["log"])} + {platform_etc_dir, local_path(["etc"])} ]. generate_config() -> @@ -234,3 +323,10 @@ remove_default_limiter(Listeners) -> end, Listeners ). + +generate_tls_certs(Config) -> + DataDir = ?config(data_dir, Config), + emqx_common_test_helpers:gen_ca(DataDir, "ca"), + emqx_common_test_helpers:gen_host_cert("server-password", "ca", DataDir, #{ + password => ?SERVER_KEY_PASSWORD + }). diff --git a/apps/emqx/test/emqx_logger_SUITE.erl b/apps/emqx/test/emqx_logger_SUITE.erl index c8ff63c75..e8d7d7a34 100644 --- a/apps/emqx/test/emqx_logger_SUITE.erl +++ b/apps/emqx/test/emqx_logger_SUITE.erl @@ -22,7 +22,6 @@ -include_lib("eunit/include/eunit.hrl"). -define(LOGGER, emqx_logger). --define(a, "a"). -define(SUPPORTED_LEVELS, [emergency, alert, critical, error, warning, notice, info, debug]). all() -> emqx_common_test_helpers:all(?MODULE). diff --git a/apps/emqx/test/emqx_metrics_worker_SUITE.erl b/apps/emqx/test/emqx_metrics_worker_SUITE.erl index 113e8650f..194c9cc99 100644 --- a/apps/emqx/test/emqx_metrics_worker_SUITE.erl +++ b/apps/emqx/test/emqx_metrics_worker_SUITE.erl @@ -46,7 +46,7 @@ end_per_testcase(_, _Config) -> ok. t_get_metrics(_) -> - Metrics = [a, b, c], + Metrics = [a, b, c, {slide, d}], Id = <<"testid">>, ok = emqx_metrics_worker:create_metrics(?NAME, Id, Metrics), %% all the metrics are set to zero at start @@ -73,6 +73,8 @@ t_get_metrics(_) -> ok = emqx_metrics_worker:set_gauge(?NAME, Id, worker_id0, inflight, 5), ok = emqx_metrics_worker:set_gauge(?NAME, Id, worker_id1, inflight, 7), ok = emqx_metrics_worker:set_gauge(?NAME, Id, worker_id2, queuing, 9), + ok = emqx_metrics_worker:observe(?NAME, Id, d, 10), + ok = emqx_metrics_worker:observe(?NAME, Id, d, 30), ct:sleep(1500), ?LET( #{ @@ -89,6 +91,9 @@ t_get_metrics(_) -> a := 1, b := 1, c := 2 + } = Counters, + slides := #{ + d := #{n_samples := 2, last5m := 20, current := _} } }, emqx_metrics_worker:get_metrics(?NAME, Id), @@ -100,7 +105,8 @@ t_get_metrics(_) -> ?assert(MaxB > 0), ?assert(MaxC > 0), ?assert(Inflight == 12), - ?assert(Queuing == 9) + ?assert(Queuing == 9), + ?assertNot(maps:is_key(d, Counters)) } ), ok = emqx_metrics_worker:clear_metrics(?NAME, Id). @@ -117,6 +123,7 @@ t_clear_metrics(_Config) -> c := #{current := 0.0, max := 0.0, last5m := 0.0} }, gauges := #{}, + slides := #{}, counters := #{ a := 0, b := 0, @@ -138,14 +145,15 @@ t_clear_metrics(_Config) -> #{ counters => #{}, gauges => #{}, - rate => #{current => 0.0, last5m => 0.0, max => 0.0} + rate => #{current => 0.0, last5m => 0.0, max => 0.0}, + slides => #{} }, emqx_metrics_worker:get_metrics(?NAME, Id) ), ok. t_reset_metrics(_) -> - Metrics = [a, b, c], + Metrics = [a, b, c, {slide, d}], Id = <<"testid">>, ok = emqx_metrics_worker:create_metrics(?NAME, Id, Metrics), %% all the metrics are set to zero at start @@ -161,6 +169,9 @@ t_reset_metrics(_) -> a := 0, b := 0, c := 0 + }, + slides := #{ + d := #{n_samples := 0, last5m := 0, current := 0} } }, emqx_metrics_worker:get_metrics(?NAME, Id) @@ -172,7 +183,12 @@ t_reset_metrics(_) -> ok = emqx_metrics_worker:set_gauge(?NAME, Id, worker_id0, inflight, 5), ok = emqx_metrics_worker:set_gauge(?NAME, Id, worker_id1, inflight, 7), ok = emqx_metrics_worker:set_gauge(?NAME, Id, worker_id2, queuing, 9), + ok = emqx_metrics_worker:observe(?NAME, Id, d, 100), + ok = emqx_metrics_worker:observe(?NAME, Id, d, 200), ct:sleep(1500), + ?assertMatch( + #{d := #{n_samples := 2}}, emqx_metrics_worker:get_slide(?NAME, <<"testid">>) + ), ok = emqx_metrics_worker:reset_metrics(?NAME, Id), ?LET( #{ @@ -186,6 +202,9 @@ t_reset_metrics(_) -> a := 0, b := 0, c := 0 + }, + slides := #{ + d := #{n_samples := 0, last5m := 0, current := 0} } }, emqx_metrics_worker:get_metrics(?NAME, Id), @@ -202,7 +221,7 @@ t_reset_metrics(_) -> ok = emqx_metrics_worker:clear_metrics(?NAME, Id). t_get_metrics_2(_) -> - Metrics = [a, b, c], + Metrics = [a, b, c, {slide, d}], Id = <<"testid">>, ok = emqx_metrics_worker:create_metrics( ?NAME, diff --git a/apps/emqx/test/emqx_mqtt_SUITE.erl b/apps/emqx/test/emqx_mqtt_SUITE.erl index 287d7fdba..d0162b34b 100644 --- a/apps/emqx/test/emqx_mqtt_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_SUITE.erl @@ -237,7 +237,7 @@ do_async_set_keepalive() -> {ok, _} = ?block_until( #{ ?snk_kind := insert_channel_info, - client_id := ClientID + clientid := ClientID }, 2000, 100 diff --git a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl index 2ee4b5ffd..297ee7f7d 100644 --- a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl @@ -22,7 +22,16 @@ -include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("eunit/include/eunit.hrl"). -all() -> emqx_common_test_helpers:all(?MODULE). +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([]), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([]), + ok. t_check_pub(_) -> OldConf = emqx:get_config([zones], #{}), diff --git a/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl b/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl index 7e97c5bf4..fe608f600 100644 --- a/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl @@ -829,6 +829,42 @@ t_subscribe_no_local(Config) -> ?assertEqual(1, length(receive_messages(2))), ok = emqtt:disconnect(Client1). +t_subscribe_no_local_mixed(Config) -> + ConnFun = ?config(conn_fun, Config), + Topic = nth(1, ?TOPICS), + {ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:ConnFun(Client1), + + {ok, Client2} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:ConnFun(Client2), + + %% Given tow clients and client1 subscribe to topic with 'no local' set to true + {ok, _, [2]} = emqtt:subscribe(Client1, #{}, [{Topic, [{nl, true}, {qos, 2}]}]), + + %% When mixed publish traffic are sent from both clients (Client1 sent 6 and Client2 sent 2) + CB = {fun emqtt:sync_publish_result/3, [self(), async_res]}, + ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed1">>, 0, CB), + ok = emqtt:publish_async(Client2, Topic, <<"t_subscribe_no_local_mixed2">>, 0, CB), + ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed3">>, 0, CB), + ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed4">>, 0, CB), + ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed5">>, 0, CB), + ok = emqtt:publish_async(Client2, Topic, <<"t_subscribe_no_local_mixed6">>, 0, CB), + ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed7">>, 0, CB), + ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed8">>, 0, CB), + [ + receive + {async_res, Res} -> ?assertEqual(ok, Res) + end + || _ <- lists:seq(1, 8) + ], + + %% Then only two messages from clients 2 are received + PubRecvd = receive_messages(9), + ct:pal("~p", [PubRecvd]), + ?assertEqual(2, length(PubRecvd)), + ok = emqtt:disconnect(Client1), + ok = emqtt:disconnect(Client2). + t_subscribe_actions(Config) -> ConnFun = ?config(conn_fun, Config), Topic = nth(1, ?TOPICS), @@ -905,7 +941,7 @@ t_shared_subscriptions_client_terminates_when_qos_eq_2(Config) -> emqtt, connected, fun - (cast, ?PUBLISH_PACKET(?QOS_2, _PacketId), _State) -> + (cast, {?PUBLISH_PACKET(?QOS_2, _PacketId), _Via}, _State) -> ok = counters:add(CRef, 1, 1), {stop, {shutdown, for_testing}}; (Arg1, ARg2, Arg3) -> diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE.erl b/apps/emqx/test/emqx_ocsp_cache_SUITE.erl new file mode 100644 index 000000000..b0ba4f0e2 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE.erl @@ -0,0 +1,1006 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ocsp_cache_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-include_lib("ssl/src/ssl_handshake.hrl"). + +-define(CACHE_TAB, emqx_ocsp_cache). + +all() -> + [{group, openssl}] ++ tests(). + +tests() -> + emqx_common_test_helpers:all(?MODULE) -- openssl_tests(). + +openssl_tests() -> + [t_openssl_client]. + +groups() -> + OpensslTests = openssl_tests(), + [ + {openssl, [ + {group, tls12}, + {group, tls13} + ]}, + {tls12, [ + {group, with_status_request}, + {group, without_status_request} + ]}, + {tls13, [ + {group, with_status_request}, + {group, without_status_request} + ]}, + {with_status_request, [], OpensslTests}, + {without_status_request, [], OpensslTests} + ]. + +init_per_suite(Config) -> + application:load(emqx), + emqx_config:save_schema_mod_and_names(emqx_schema), + emqx_common_test_helpers:boot_modules(all), + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(tls12, Config) -> + [{tls_vsn, "-tls1_2"} | Config]; +init_per_group(tls13, Config) -> + [{tls_vsn, "-tls1_3"} | Config]; +init_per_group(with_status_request, Config) -> + [{status_request, true} | Config]; +init_per_group(without_status_request, Config) -> + [{status_request, false} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(t_openssl_client, Config) -> + ct:timetrap({seconds, 30}), + DataDir = ?config(data_dir, Config), + Handler = fun(_) -> ok end, + {OCSPResponderPort, OCSPOSPid} = setup_openssl_ocsp(Config), + ConfFilePath = filename:join([DataDir, "openssl_listeners.conf"]), + emqx_common_test_helpers:start_apps( + [], + Handler, + #{ + extra_mustache_vars => #{test_data_dir => DataDir}, + conf_file_path => ConfFilePath + } + ), + ct:sleep(1_000), + [ + {ocsp_responder_port, OCSPResponderPort}, + {ocsp_responder_os_pid, OCSPOSPid} + | Config + ]; +init_per_testcase(TestCase, Config) when + TestCase =:= t_update_listener; + TestCase =:= t_validations +-> + %% when running emqx standalone tests, we can't use those + %% features. + case does_module_exist(emqx_mgmt_api_test_util) of + true -> + ct:timetrap({seconds, 30}), + %% start the listener with the default (non-ocsp) config + TestPid = self(), + ok = meck:new(emqx_ocsp_cache, [non_strict, passthrough, no_history, no_link]), + meck:expect( + emqx_ocsp_cache, + http_get, + fun(URL, _HTTPTimeout) -> + ct:pal("ocsp http request ~p", [URL]), + TestPid ! {http_get, URL}, + {ok, {{"HTTP/1.0", 200, 'OK'}, [], <<"ocsp response">>}} + end + ), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + snabbkaffe:start_trace(), + Config; + false -> + [{skip_does_not_apply, true} | Config] + end; +init_per_testcase(t_ocsp_responder_error_responses, Config) -> + ct:timetrap({seconds, 30}), + TestPid = self(), + ok = meck:new(emqx_ocsp_cache, [non_strict, passthrough, no_history, no_link]), + meck:expect( + emqx_ocsp_cache, + http_get, + fun(URL, _HTTPTimeout) -> + ct:pal("ocsp http request ~p", [URL]), + TestPid ! {http_get, URL}, + persistent_term:get({?MODULE, http_response}) + end + ), + DataDir = ?config(data_dir, Config), + Type = ssl, + Name = test_ocsp, + ListenerOpts = #{ + ssl_options => + #{ + certfile => filename:join(DataDir, "server.pem"), + ocsp => #{ + enable_ocsp_stapling => true, + responder_url => <<"http://localhost:9877/">>, + issuer_pem => filename:join(DataDir, "ocsp-issuer.pem"), + refresh_http_timeout => 15_000, + refresh_interval => 1_000 + } + } + }, + Conf = #{listeners => #{Type => #{Name => ListenerOpts}}}, + ConfBin = emqx_utils_maps:binary_key_map(Conf), + hocon_tconf:check_plain(emqx_schema, ConfBin, #{required => false, atom_keys => false}), + emqx_config:put_listener_conf(Type, Name, [], ListenerOpts), + snabbkaffe:start_trace(), + _Heir = spawn_dummy_heir(), + {ok, CachePid} = emqx_ocsp_cache:start_link(), + [ + {cache_pid, CachePid} + | Config + ]; +init_per_testcase(_TestCase, Config) -> + ct:timetrap({seconds, 10}), + TestPid = self(), + ok = meck:new(emqx_ocsp_cache, [non_strict, passthrough, no_history, no_link]), + meck:expect( + emqx_ocsp_cache, + http_get, + fun(URL, _HTTPTimeout) -> + TestPid ! {http_get, URL}, + {ok, {{"HTTP/1.0", 200, 'OK'}, [], <<"ocsp response">>}} + end + ), + _Heir = spawn_dummy_heir(), + {ok, CachePid} = emqx_ocsp_cache:start_link(), + DataDir = ?config(data_dir, Config), + Type = ssl, + Name = test_ocsp, + ListenerOpts = #{ + ssl_options => + #{ + certfile => filename:join(DataDir, "server.pem"), + ocsp => #{ + enable_ocsp_stapling => true, + responder_url => <<"http://localhost:9877/">>, + issuer_pem => filename:join(DataDir, "ocsp-issuer.pem"), + refresh_http_timeout => 15_000, + refresh_interval => 1_000 + } + } + }, + Conf = #{listeners => #{Type => #{Name => ListenerOpts}}}, + ConfBin = emqx_utils_maps:binary_key_map(Conf), + hocon_tconf:check_plain(emqx_schema, ConfBin, #{required => false, atom_keys => false}), + emqx_config:put_listener_conf(Type, Name, [], ListenerOpts), + snabbkaffe:start_trace(), + [ + {cache_pid, CachePid} + | Config + ]. + +end_per_testcase(t_openssl_client, Config) -> + OCSPResponderOSPid = ?config(ocsp_responder_os_pid, Config), + catch kill_pid(OCSPResponderOSPid), + emqx_common_test_helpers:stop_apps([]), + ok; +end_per_testcase(TestCase, Config) when + TestCase =:= t_update_listener; + TestCase =:= t_validations +-> + Skip = proplists:get_bool(skip_does_not_apply, Config), + case Skip of + true -> + ok; + false -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]), + meck:unload([emqx_ocsp_cache]), + ok + end; +end_per_testcase(t_ocsp_responder_error_responses, Config) -> + CachePid = ?config(cache_pid, Config), + catch gen_server:stop(CachePid), + meck:unload([emqx_ocsp_cache]), + persistent_term:erase({?MODULE, http_response}), + ok; +end_per_testcase(_TestCase, Config) -> + CachePid = ?config(cache_pid, Config), + catch gen_server:stop(CachePid), + meck:unload([emqx_ocsp_cache]), + ok. + +%%-------------------------------------------------------------------- +%% Helper functions +%%-------------------------------------------------------------------- + +%% The real cache makes `emqx_kernel_sup' the heir to its ETS table. +%% In some tests, we don't start the full supervision tree, so we need +%% this dummy process. +spawn_dummy_heir() -> + spawn_link(fun() -> + true = register(emqx_kernel_sup, self()), + receive + stop -> ok + end + end). + +does_module_exist(Mod) -> + case erlang:module_loaded(Mod) of + true -> + true; + false -> + case code:ensure_loaded(Mod) of + ok -> + true; + {module, Mod} -> + true; + _ -> + false + end + end. + +assert_no_http_get() -> + Timeout = 0, + Error = should_be_cached, + assert_no_http_get(Timeout, Error). + +assert_no_http_get(Timeout, Error) -> + receive + {http_get, _URL} -> + error(Error) + after Timeout -> + ok + end. + +assert_http_get(N) -> + assert_http_get(N, 0). + +assert_http_get(0, _Timeout) -> + ok; +assert_http_get(N, Timeout) when N > 0 -> + receive + {http_get, URL} -> + ?assertMatch(<<"http://localhost:9877/", _Request64/binary>>, URL), + ok + after Timeout -> + error({no_http_get, #{mailbox => process_info(self(), messages)}}) + end, + assert_http_get(N - 1, Timeout). + +openssl_client_command(TLSVsn, RequestStatus, Config) -> + DataDir = ?config(data_dir, Config), + ClientCert = filename:join([DataDir, "client.pem"]), + ClientKey = filename:join([DataDir, "client.key"]), + Cacert = filename:join([DataDir, "ca.pem"]), + Openssl = os:find_executable("openssl"), + StatusOpt = + case RequestStatus of + true -> ["-status"]; + false -> [] + end, + [ + Openssl, + "s_client", + "-connect", + "localhost:8883", + %% needed to trigger `sni_fun' + "-servername", + "localhost", + TLSVsn, + "-CAfile", + Cacert, + "-cert", + ClientCert, + "-key", + ClientKey + ] ++ StatusOpt. + +run_openssl_client(TLSVsn, RequestStatus, Config) -> + Command0 = openssl_client_command(TLSVsn, RequestStatus, Config), + Command = lists:flatten(lists:join(" ", Command0)), + os:cmd(Command). + +%% fixme: for some reason, the port program doesn't return any output +%% when running in OTP 25 using `open_port`, but the `os:cmd` version +%% works fine. +%% the `open_port' version works fine in OTP 24 for some reason. +spawn_openssl_client(TLSVsn, RequestStatus, Config) -> + [Openssl | Args] = openssl_client_command(TLSVsn, RequestStatus, Config), + open_port( + {spawn_executable, Openssl}, + [ + {args, Args}, + binary, + stderr_to_stdout + ] + ). + +spawn_openssl_ocsp_responder(Config) -> + DataDir = ?config(data_dir, Config), + IssuerCert = filename:join([DataDir, "ocsp-issuer.pem"]), + IssuerKey = filename:join([DataDir, "ocsp-issuer.key"]), + Cacert = filename:join([DataDir, "ca.pem"]), + Index = filename:join([DataDir, "index.txt"]), + Openssl = os:find_executable("openssl"), + open_port( + {spawn_executable, Openssl}, + [ + {args, [ + "ocsp", + "-ignore_err", + "-port", + "9877", + "-CA", + Cacert, + "-rkey", + IssuerKey, + "-rsigner", + IssuerCert, + "-index", + Index + ]}, + binary, + stderr_to_stdout + ] + ). + +kill_pid(OSPid) -> + os:cmd("kill -9 " ++ integer_to_list(OSPid)). + +test_ocsp_connection(TLSVsn, WithRequestStatus = true, Config) -> + OCSPOutput = run_openssl_client(TLSVsn, WithRequestStatus, Config), + ?assertMatch( + {match, _}, + re:run(OCSPOutput, "OCSP Response Status: successful"), + #{mailbox => process_info(self(), messages)} + ), + ?assertMatch( + {match, _}, + re:run(OCSPOutput, "Cert Status: good"), + #{mailbox => process_info(self(), messages)} + ), + ok; +test_ocsp_connection(TLSVsn, WithRequestStatus = false, Config) -> + OCSPOutput = run_openssl_client(TLSVsn, WithRequestStatus, Config), + ?assertMatch( + nomatch, + re:run(OCSPOutput, "Cert Status: good", [{capture, none}]), + #{mailbox => process_info(self(), messages)} + ), + ok. + +ensure_port_open(Port) -> + do_ensure_port_open(Port, 10). + +do_ensure_port_open(Port, 0) -> + error({port_not_open, Port}); +do_ensure_port_open(Port, N) when N > 0 -> + Timeout = 1_000, + case gen_tcp:connect("localhost", Port, [], Timeout) of + {ok, Sock} -> + gen_tcp:close(Sock), + ok; + {error, _} -> + ct:sleep(500), + do_ensure_port_open(Port, N - 1) + end. + +get_sni_fun(ListenerID) -> + #{opts := Opts} = emqx_listeners:find_by_id(ListenerID), + SSLOpts = proplists:get_value(ssl_options, Opts), + proplists:get_value(sni_fun, SSLOpts). + +openssl_version() -> + Res0 = string:trim(os:cmd("openssl version"), trailing), + [_, Res] = string:split(Res0, " "), + {match, [Version]} = re:run(Res, "^([^ ]+)", [{capture, first, list}]), + Version. + +setup_openssl_ocsp(Config) -> + OCSPResponderPort = spawn_openssl_ocsp_responder(Config), + {os_pid, OCSPOSPid} = erlang:port_info(OCSPResponderPort, os_pid), + %%%%%%%% Warning!!! + %% Apparently, openssl 3.0.7 introduced a bug in the responder + %% that makes it hang forever if one probes the port with + %% `gen_tcp:open' / `gen_tcp:close'... Comment this out if + %% openssl gets updated in CI or in your local machine. + OpenSSLVersion = openssl_version(), + ct:pal("openssl version: ~p", [OpenSSLVersion]), + case OpenSSLVersion of + "3." ++ _ -> + %% hope that the responder has started... + ok; + _ -> + ensure_port_open(9877) + end, + ct:sleep(1_000), + {OCSPResponderPort, OCSPOSPid}. + +request(Method, Url, QueryParams, Body) -> + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + case emqx_mgmt_api_test_util:request_api(Method, Url, QueryParams, AuthHeader, Body, Opts) of + {ok, {Reason, Headers, BodyR}} -> + {ok, {Reason, Headers, emqx_utils_json:decode(BodyR, [return_maps])}}; + Error -> + Error + end. + +get_listener_via_api(ListenerId) -> + Path = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), + request(get, Path, [], []). + +update_listener_via_api(ListenerId, NewConfig) -> + Path = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), + request(put, Path, [], NewConfig). + +put_http_response(Response) -> + persistent_term:put({?MODULE, http_response}, Response). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +t_request_ocsp_response(_Config) -> + ?check_trace( + begin + ListenerID = <<"ssl:test_ocsp">>, + %% not yet cached. + ?assertEqual([], ets:tab2list(?CACHE_TAB)), + ?assertEqual( + {ok, <<"ocsp response">>}, + emqx_ocsp_cache:fetch_response(ListenerID) + ), + assert_http_get(1), + ?assertMatch([{_, <<"ocsp response">>}], ets:tab2list(?CACHE_TAB)), + %% already cached; should not perform request again. + ?assertEqual( + {ok, <<"ocsp response">>}, + emqx_ocsp_cache:fetch_response(ListenerID) + ), + assert_no_http_get(), + ok + end, + fun(Trace) -> + ?assert( + ?strict_causality( + #{?snk_kind := ocsp_cache_miss, listener_id := _ListenerID}, + #{?snk_kind := ocsp_http_fetch_and_cache, listener_id := _ListenerID}, + Trace + ) + ), + ?assertMatch( + [_], + ?of_kind(ocsp_cache_miss, Trace) + ), + ?assertMatch( + [_], + ?of_kind(ocsp_http_fetch_and_cache, Trace) + ), + ?assertMatch( + [_], + ?of_kind(ocsp_cache_hit, Trace) + ), + ok + end + ). + +t_request_ocsp_response_restart_cache(Config) -> + process_flag(trap_exit, true), + CachePid = ?config(cache_pid, Config), + ListenerID = <<"ssl:test_ocsp">>, + ?check_trace( + begin + [] = ets:tab2list(?CACHE_TAB), + {ok, _} = emqx_ocsp_cache:fetch_response(ListenerID), + ?wait_async_action( + begin + Ref = monitor(process, CachePid), + exit(CachePid, kill), + receive + {'DOWN', Ref, process, CachePid, killed} -> + ok + after 1_000 -> + error(cache_not_killed) + end, + {ok, _} = emqx_ocsp_cache:start_link(), + ok + end, + #{?snk_kind := ocsp_cache_init} + ), + {ok, _} = emqx_ocsp_cache:fetch_response(ListenerID), + ok + end, + fun(Trace) -> + %% Only one fetch because the cache table was preserved by + %% its heir ("emqx_kernel_sup"). + ?assertMatch( + [_], + ?of_kind(ocsp_http_fetch_and_cache, Trace) + ), + assert_http_get(1), + ok + end + ). + +t_request_ocsp_response_bad_http_status(_Config) -> + TestPid = self(), + meck:expect( + emqx_ocsp_cache, + http_get, + fun(URL, _HTTPTimeout) -> + TestPid ! {http_get, URL}, + {ok, {{"HTTP/1.0", 404, 'Not Found'}, [], <<"not found">>}} + end + ), + ListenerID = <<"ssl:test_ocsp">>, + %% not yet cached. + ?assertEqual([], ets:tab2list(?CACHE_TAB)), + ?assertEqual( + error, + emqx_ocsp_cache:fetch_response(ListenerID) + ), + assert_http_get(1), + ?assertEqual([], ets:tab2list(?CACHE_TAB)), + ok. + +t_request_ocsp_response_timeout(_Config) -> + TestPid = self(), + meck:expect( + emqx_ocsp_cache, + http_get, + fun(URL, _HTTPTimeout) -> + TestPid ! {http_get, URL}, + {error, timeout} + end + ), + ListenerID = <<"ssl:test_ocsp">>, + %% not yet cached. + ?assertEqual([], ets:tab2list(?CACHE_TAB)), + ?assertEqual( + error, + emqx_ocsp_cache:fetch_response(ListenerID) + ), + assert_http_get(1), + ?assertEqual([], ets:tab2list(?CACHE_TAB)), + ok. + +t_register_listener(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []), + %% should fetch and cache immediately + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerID, Conf), + #{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID} + ), + assert_http_get(1), + ?assertMatch([{_, <<"ocsp response">>}], ets:tab2list(?CACHE_TAB)), + ok. + +t_register_twice(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []), + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerID, Conf), + #{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID} + ), + assert_http_get(1), + ?assertMatch([{_, <<"ocsp response">>}], ets:tab2list(?CACHE_TAB)), + %% should have no problem in registering the same listener again. + %% this prompts an immediate refresh. + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerID, Conf), + #{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID} + ), + ok. + +t_refresh_periodically(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []), + %% should refresh periodically + {ok, SubRef} = + snabbkaffe:subscribe( + fun + (#{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID0}) -> + ListenerID0 =:= ListenerID; + (_) -> + false + end, + _NEvents = 2, + _Timeout = 10_000 + ), + ok = emqx_ocsp_cache:register_listener(ListenerID, Conf), + ?assertMatch({ok, [_, _]}, snabbkaffe:receive_events(SubRef)), + assert_http_get(2), + ok. + +t_sni_fun_success(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + ServerName = "localhost", + ?assertEqual( + [ + {certificate_status, #certificate_status{ + status_type = ?CERTIFICATE_STATUS_TYPE_OCSP, + response = <<"ocsp response">> + }} + ], + emqx_ocsp_cache:sni_fun(ServerName, ListenerID) + ), + ok. + +t_sni_fun_http_error(_Config) -> + meck:expect( + emqx_ocsp_cache, + http_get, + fun(_URL, _HTTPTimeout) -> + {error, timeout} + end + ), + ListenerID = <<"ssl:test_ocsp">>, + ServerName = "localhost", + ?assertEqual( + [], + emqx_ocsp_cache:sni_fun(ServerName, ListenerID) + ), + ok. + +%% check that we can start with a non-ocsp stapling listener and +%% restart it with the new ocsp config. +t_update_listener(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + do_t_update_listener(Config) + end. + +do_t_update_listener(Config) -> + DataDir = ?config(data_dir, Config), + Keyfile = filename:join([DataDir, "server.key"]), + Certfile = filename:join([DataDir, "server.pem"]), + Cacertfile = filename:join([DataDir, "ca.pem"]), + IssuerPemPath = filename:join([DataDir, "ocsp-issuer.pem"]), + {ok, IssuerPem} = file:read_file(IssuerPemPath), + + %% no ocsp at first + ListenerId = "ssl:default", + {ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId), + ?assertMatch( + #{ + <<"enable_ocsp_stapling">> := false, + <<"refresh_http_timeout">> := _, + <<"refresh_interval">> := _ + }, + emqx_utils_maps:deep_get([<<"ssl_options">>, <<"ocsp">>], ListenerData0, undefined) + ), + assert_no_http_get(), + + %% configure ocsp + OCSPConfig = + #{ + <<"ssl_options">> => + #{ + <<"keyfile">> => Keyfile, + <<"certfile">> => Certfile, + <<"cacertfile">> => Cacertfile, + <<"ocsp">> => + #{ + <<"enable_ocsp_stapling">> => true, + %% we use the file contents to check that + %% the API converts that to an internally + %% managed file + <<"issuer_pem">> => IssuerPem, + <<"responder_url">> => <<"http://localhost:9877">>, + %% for quicker testing; min refresh in tests is 5 s. + <<"refresh_interval">> => <<"5s">> + } + } + }, + ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, OCSPConfig), + {ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1), + ?assertMatch( + #{ + <<"ssl_options">> := + #{ + <<"ocsp">> := + #{ + <<"enable_ocsp_stapling">> := true, + <<"issuer_pem">> := _, + <<"responder_url">> := _ + } + } + }, + ListenerData2 + ), + %% issuer pem should have been uploaded and saved to a new + %% location + ?assertNotEqual( + IssuerPemPath, + emqx_utils_maps:deep_get( + [<<"ssl_options">>, <<"ocsp">>, <<"issuer_pem">>], + ListenerData2 + ) + ), + ?assertNotEqual( + IssuerPem, + emqx_utils_maps:deep_get( + [<<"ssl_options">>, <<"ocsp">>, <<"issuer_pem">>], + ListenerData2 + ) + ), + assert_http_get(1, 5_000), + + %% Disable OCSP Stapling; the periodic refreshes should stop + RefreshInterval = emqx_config:get([listeners, ssl, default, ssl_options, ocsp, refresh_interval]), + OCSPConfig1 = + #{ + <<"ssl_options">> => + #{ + <<"ocsp">> => + #{ + <<"enable_ocsp_stapling">> => false + } + } + }, + ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, OCSPConfig1), + {ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3), + ?assertMatch( + #{ + <<"ssl_options">> := + #{ + <<"ocsp">> := + #{ + <<"enable_ocsp_stapling">> := false + } + } + }, + ListenerData4 + ), + + assert_no_http_get(2 * RefreshInterval, should_stop_refreshing), + + ok. + +t_double_unregister(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []), + ?check_trace( + begin + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerID, Conf), + #{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID}, + 5_000 + ), + assert_http_get(1), + + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:unregister_listener(ListenerID), + #{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID}, + 5_000 + ), + + %% Should be idempotent and not crash + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:unregister_listener(ListenerID), + #{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID}, + 5_000 + ), + ok + end, + [] + ), + + ok. + +t_ocsp_responder_error_responses(_Config) -> + ListenerId = <<"ssl:test_ocsp">>, + Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []), + ?check_trace( + begin + %% successful response without headers + put_http_response({ok, {200, <<"ocsp_response">>}}), + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerId, Conf), + #{?snk_kind := ocsp_http_fetch_and_cache, headers := false}, + 1_000 + ), + + %% error response with headers + put_http_response({ok, {{"HTTP/1.0", 500, "Internal Server Error"}, [], <<"error">>}}), + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerId, Conf), + #{?snk_kind := ocsp_http_fetch_bad_code, code := 500, headers := true}, + 1_000 + ), + + %% error response without headers + put_http_response({ok, {500, <<"error">>}}), + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerId, Conf), + #{?snk_kind := ocsp_http_fetch_bad_code, code := 500, headers := false}, + 1_000 + ), + + %% econnrefused + put_http_response( + {error, + {failed_connect, [ + {to_address, {"localhost", 9877}}, + {inet, [inet], econnrefused} + ]}} + ), + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerId, Conf), + #{?snk_kind := ocsp_http_fetch_error, error := {failed_connect, _}}, + 1_000 + ), + + %% timeout + put_http_response({error, timeout}), + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerId, Conf), + #{?snk_kind := ocsp_http_fetch_error, error := timeout}, + 1_000 + ), + + ok + end, + [] + ), + ok. + +t_unknown_requests(_Config) -> + emqx_ocsp_cache ! unknown, + ?assertEqual(ok, gen_server:cast(emqx_ocsp_cache, unknown)), + ?assertEqual({error, {unknown_call, unknown}}, gen_server:call(emqx_ocsp_cache, unknown)), + ok. + +t_validations(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + do_t_validations(Config) + end. + +do_t_validations(_Config) -> + ListenerId = <<"ssl:default">>, + {ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId), + + ListenerData1 = + emqx_utils_maps:deep_merge( + ListenerData0, + #{ + <<"ssl_options">> => + #{<<"ocsp">> => #{<<"enable_ocsp_stapling">> => true}} + } + ), + {error, {_, _, ResRaw1}} = update_listener_via_api(ListenerId, ListenerData1), + #{<<"code">> := <<"BAD_REQUEST">>, <<"message">> := MsgRaw1} = + emqx_utils_json:decode(ResRaw1, [return_maps]), + ?assertMatch( + #{ + <<"mismatches">> := + #{ + <<"listeners:ssl_not_required_bind">> := + #{ + <<"reason">> := + <<"The responder URL is required for OCSP stapling">> + } + } + }, + emqx_utils_json:decode(MsgRaw1, [return_maps]) + ), + + ListenerData2 = + emqx_utils_maps:deep_merge( + ListenerData0, + #{ + <<"ssl_options">> => + #{ + <<"ocsp">> => #{ + <<"enable_ocsp_stapling">> => true, + <<"responder_url">> => <<"http://localhost:9877">> + } + } + } + ), + {error, {_, _, ResRaw2}} = update_listener_via_api(ListenerId, ListenerData2), + #{<<"code">> := <<"BAD_REQUEST">>, <<"message">> := MsgRaw2} = + emqx_utils_json:decode(ResRaw2, [return_maps]), + ?assertMatch( + #{ + <<"mismatches">> := + #{ + <<"listeners:ssl_not_required_bind">> := + #{ + <<"reason">> := + <<"The issuer PEM path is required for OCSP stapling">> + } + } + }, + emqx_utils_json:decode(MsgRaw2, [return_maps]) + ), + + ListenerData3a = + emqx_utils_maps:deep_merge( + ListenerData0, + #{ + <<"ssl_options">> => + #{ + <<"ocsp">> => #{ + <<"enable_ocsp_stapling">> => true, + <<"responder_url">> => <<"http://localhost:9877">>, + <<"issuer_pem">> => <<"some_file">> + } + } + } + ), + ListenerData3 = emqx_utils_maps:deep_remove( + [<<"ssl_options">>, <<"certfile">>], ListenerData3a + ), + {error, {_, _, ResRaw3}} = update_listener_via_api(ListenerId, ListenerData3), + #{<<"code">> := <<"BAD_REQUEST">>, <<"message">> := MsgRaw3} = + emqx_utils_json:decode(ResRaw3, [return_maps]), + %% we can't remove certfile now, because it has default value. + ?assertMatch( + <<"{bad_ssl_config,#{file_read => enoent,pem_check => invalid_pem", _/binary>>, + MsgRaw3 + ), + ok. + +t_unknown_error_fetching_ocsp_response(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + TestPid = self(), + ok = meck:expect( + emqx_ocsp_cache, + http_get, + fun(_RequestURI, _HTTPTimeout) -> + TestPid ! error_raised, + meck:exception(error, something_went_wrong) + end + ), + ?assertEqual(error, emqx_ocsp_cache:fetch_response(ListenerID)), + receive + error_raised -> ok + after 200 -> ct:fail("should have tried to fetch ocsp response") + end, + ok. + +t_openssl_client(Config) -> + TLSVsn = ?config(tls_vsn, Config), + WithStatusRequest = ?config(status_request, Config), + %% ensure ocsp response is already cached. + ListenerID = <<"ssl:default">>, + ?assertMatch( + {ok, _}, + emqx_ocsp_cache:fetch_response(ListenerID), + #{msgs => process_info(self(), messages)} + ), + timer:sleep(500), + test_ocsp_connection(TLSVsn, WithStatusRequest, Config). diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ca.pem b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ca.pem new file mode 100644 index 000000000..eaabd2445 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ca.pem @@ -0,0 +1,68 @@ +-----BEGIN CERTIFICATE----- +MIIF+zCCA+OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQK +DAlNeU9yZ05hbWUxETAPBgNVBAsMCE15Um9vdENBMREwDwYDVQQDDAhNeVJvb3RD +QTAeFw0yMzAxMTIxMzA4MTZaFw0zMzAxMDkxMzA4MTZaMGsxCzAJBgNVBAYTAlNF +MRIwEAYDVQQIDAlTdG9ja2hvbG0xEjAQBgNVBAoMCU15T3JnTmFtZTEZMBcGA1UE +CwwQTXlJbnRlcm1lZGlhdGVDQTEZMBcGA1UEAwwQTXlJbnRlcm1lZGlhdGVDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALQG7dMeU/y9HDNHzhydR0bm +wN9UGplqJOJPwqJRaZZcrn9umgJ9SU2il2ceEVxMDwzBWCRKJO5/H9A9k13SqsXM +2c2c9xXfIF1kb820lCm1Uow5hZ/auDjxliNk9kNJDigCRi3QoIs/dVeWzFsgEC2l +gxRqauN2eNFb6/yXY788YALHBsCRV2NFOFXxtPsvLXpD9Q/8EqYsSMuLARRdHVNU +ryaEF5lhShpcuz0TlIuTy2TiuXJUtJ+p7a4Z7friZ6JsrmQWsVQBj44F8TJRHWzW +C7vm9c+dzEX9eqbr5iPL+L4ctMW9Lz6ePcYfIXne6CElusRUf8G+xM1uwovF9bpV ++9IqY7tAu9G1iY9iNtJgNNDKOCcOGKcZCx6Cg1XYOEKReNnUMazvYeqRrrjV5WQ0 +vOcD5zcBRNTXCddCLa7U0guXP9mQrfuk4NTH1Bt77JieTJ8cfDXHwtaKf6aGbmZP +wl1Xi/GuXNUP/xeog78RKyFwBmjt2JKwvWzMpfmH4mEkG9moh2alva+aEz6LIJuP +16g6s0Q6c793/OvUtpNcewHw4Vjn39LD9o6VLp854G4n8dVpUWSbWS+sXD1ZE69H +g/sMNMyq+09ufkbewY8xoCm/rQ1pqDZAVMWsstJEaYu7b/eb7R+RGOj1YECCV/Yp +EZPdDotbSNRkIi2d/a1NAgMBAAGjgaQwgaEwHQYDVR0OBBYEFExwhjsVUom6tQ+S +qq6xMUETvnPzMB8GA1UdIwQYMBaAFD90kfU5pc5l48THu0Ayj9SNpHuhMBIGA1Ud +EwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMDsGA1UdHwQ0MDIwMKAuoCyG +Kmh0dHA6Ly9sb2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUuY3JsLnBlbTANBgkq +hkiG9w0BAQsFAAOCAgEAK6NgdWQYtPNKQNBGjsgtgqTRh+k30iqSO6Y3yE1KGABO +EuQdVqkC2qUIbCB0M0qoV0ab50KNLfU6cbshggW4LDpcMpoQpI05fukNh1jm3ZuZ +0xsB7vlmlsv00tpqmfIl/zykPDynHKOmFh/hJP/KetMy4+wDv4/+xP31UdEj5XvG +HvMtuqOS23A+H6WPU7ol7KzKBnU2zz/xekvPbUD3JqV+ynP5bgbIZHAndd0o9T8e +NFX23Us4cTenU2/ZlOq694bRzGaK+n3Ksz995Nbtzv5fbUgqmf7Mcq4iHGRVtV11 +MRyBrsXZp2vbF63c4hrf2Zd6SWRoaDKRhP2DMhajpH9zZASSTlfejg/ZRO2s+Clh +YrSTkeMAdnRt6i/q4QRcOTCfsX75RFM5v67njvTXsSaSTnAwaPi78tRtf+WSh0EP +VVPzy++BszBVlJ1VAf7soWZHCjZxZ8ZPqVTy5okoHwWQ09WmYe8GfulDh1oj0wbK +3FjN7bODWHJN+bFf5aQfK+tumYKoPG8RXL6QxpEzjFWjxhIMJHHMKfDWnAV1o1+7 +/1/aDzq7MzEYBbrgQR7oE5ZHtyqhCf9LUgw0Kr7/8QWuNAdeDCJzjXRROU0hJczp +dOyfRlLbHmLLmGOnROlx6LsGNQ17zuz6SPi7ei8/ylhykawDOAGkM1+xFakmQhM= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUYjc7hD7/UJ0/VPADfNfp/WpOwRowDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCU0UxEjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJ +U3RvY2tob2xtMRIwEAYDVQQKDAlNeU9yZ05hbWUxETAPBgNVBAsMCE15Um9vdENB +MREwDwYDVQQDDAhNeVJvb3RDQTAeFw0yMzAxMTIxMzA4MTRaFw00MzAxMDcxMzA4 +MTRaMG8xCzAJBgNVBAYTAlNFMRIwEAYDVQQIDAlTdG9ja2hvbG0xEjAQBgNVBAcM +CVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMREwDwYDVQQLDAhNeVJvb3RD +QTERMA8GA1UEAwwITXlSb290Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnBwSOYVJw47IoMHMXTVDtOYvUt3rqsurEhFcB4O8xmf2mmwr6m7s8A5Ft +AvAehg1GvnXT3t/KiyU7BK+acTwcErGyZwS2wvdB0lpHWSpOn/u5y+4ZETvQefcj +ZTdDOM9VN5nutpitgNb+1yL8sqSexfVbY7DnYYvFjOVBYoP/SGvM9jVjCad+0WL3 +FhuD+L8QAxzCieX3n9UMymlFwINQuEc+TDjuNcEqt+0J5EgS1fwzxb2RCVL0TNv4 +9a71hFGCNRj20AeZm99hbdufm7+0AFO7ocV5q43rLrWFUoBzqKPYIjga/cv/UdWZ +c5RLRXw3JDSrCqkf/mOlaEhNPlmWRF9MSus5Da3wuwgGCaVzmrf30rWR5aHHcscG +e+AOgJ4HayvBUQeb6ZlRXc0YlACiLToMKxuyxDyUcDfVEXpUIsDILF8dkiVQxEU3 +j9g6qjXiqPVdNiwpqXfBKObj8vNCzORnoHYs8cCgib3RgDVWeqkDmlSwlZE7CvQh +U4Loj4l7813xxzYEKkVaT1JdXPWu42CG/b4Y/+f4V+3rkJkYzUwndX6kZNksIBai +phmtvKt+CTdP1eAbT+C9AWWF3PT31+BIhuT0u9tR8BVSkXdQB8dG4M/AAJcTo640 +0mdYYOXT153gEKHJuUBm750ZTy+r6NjNvpw8VrMAakJwHqnIdQIDAQABo2MwYTAd +BgNVHQ4EFgQUP3SR9TmlzmXjxMe7QDKP1I2ke6EwHwYDVR0jBBgwFoAUP3SR9Tml +zmXjxMe7QDKP1I2ke6EwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAFMFv4C+I0+xOAb9v6G/IOpfPBZ1ez31EXKJJBra +lulP4nRHQMeb310JS8BIeQ3dl+7+PkSxPABZSwc3jkxdSMvhc+Z4MQtTgos+Qsjs +gH7sTqwWeeQ0lHYxWmkXijrh5OPRZwTKzYQlkcn85BCUXl2KDuNEdiqPbDTao+lc +lA0/UAvC6NCyFKq/jqf4CmW5Kx6yG1v1LaE+IXn7cbIXj+DaehocVXi0wsXqj03Q +DDUHuLHZP+LBsg4e91/0Jy2ekNRTYJifSqr+9ufHl0ZX1pFDZyf396IgZ5CQZ0PJ +nRxZHlCfsxWxmxxdy3FQSE6YwXhdTjjoAa1ApZcKkkt1beJa6/oRLze/ux5x+5q+ +4QczufHd6rjoKBi6BM3FgFQ8As5iNohHXlMHd/xITo1Go3CWw2j9TGH5vzksOElK +B0mcwwt2zwNEjvfytc+tI5jcfGN3tiT5fVHS8hw9dWKevypLL+55Ua9G8ZgDHasT +XFRJHgmnbyFcaAe26D2dSKmhC9u2mHBH+MaI8dj3e7wNBfpxNgp41aFIk+QTmiFW +VXFED6DHQ/Mxq93ACalHdYg18PlIYClbT6Pf2xXBnn33YPhn5xzoTZ+cDH/RpaQp +s0UUTSJT1UTXgtXPnZWQfvKlMjJEIiVFiLEC0sgZRlWuZDRAY0CdZJJxvQp59lqu +cbTm +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/client.key b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/client.key new file mode 100644 index 000000000..a1c46aa5c --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/client.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCmfZmBAOZJ8xjP +YkpyQxTGZ40vIwOuylwSow12idWN6jcW9g5aIip+B2oKrfzR7PYsxbDodcj/KOpQ +GwCFAujSYgYviiOsmATQ1meNocnnWjAsybw+dSXK/ZjfrVgIaJF7RHaLiDtq5TI4 +b4KjUFyh5NILIc+zfZqoNU6khUF0bcOBAG2BFaBzRf+a/hgZXEPyEnoqFK5J5k+D +DSlKXDbOTEHhXG4QFT1hZataxptD1nTEFRYuzfmh/g4RDvWtawm9YU3j/V0Un7t/ +Taj0fAXNi30TzKOVaVcDrkVtDFHe2hX3lOJd53I5NpS7asaq+aTNytz+I3Bf/a4v +khEgrKpjBSXXm/+Vw5NzsXNwKddSUGywmIbV2YBYnK+0DwhOXLsTPh3pv6931NVx +pifW0nM4Ur6XCDHOPVX/jIZZ819bzAlZZ3BgMTz7pqT9906lmNRQBgSgr+Zaw9gj +VhLg1VDfwF85eanhbzk5ITnffR+s2conZr2g+LEDsq2dJv/sEbYuHBNBkDthn439 +MgNq1nr3PV0hn8pNcgS5ZFUw+fN8403RY9TYLssB/FFYREDCax0j75qL3E7LbZK8 +JfsP8uh1e3PdR64TgtoYoTKuwtIqelmh+ryAWFjaXLPoP/AqYk1VcRCevOXUKw6L +iskdukplk9cy2cPLcm+EP+2Js3B28QIDAQABAoICABxBnVOcZjk/QaLy1N07HtPE +f9zz5Zxc+k7sbuzDHGQzT8m9FXb9LPaKRhhNaqbrP2WeYLW3RdduZ4QUbRxl/8Mz +AUdAu+i/PTP/a4BJaOWztBDp5SG5iqI+s5skxZfZvXUtC6yHQMRV5VXYMRUMHsiY +OADNKn3VT7IEKBZ6ij8bIO7sNmmN1NczllvFC6yEMQDs22B4dZMTvENq8KrO5ztQ +jG7V29Utcact1Oz5X6EeDN+5j3P+n8M7RcJl5lLaI4NJeCl9VvaY3H7Q3J+vy+FU +bvQ1Cz9gqzSz91L4YA3BODC2i0uyK/vjVE9Roimi6HJH34VfWONlv9IRiYgg3eLd +xrWe/qZkxcfrHmgyH0a6fxwpT58T3d6WH0I/HwSbJuVvm2AhLy+7zXdLNRLrlE+n +UfrJDgTwiTPlJA5JzSVGKVBSOVQs9G52aZ0IAvgN9uHHFhhqeJ3naax1q/JtRfDo +O0w5Ga2KjAJDcAQj/Cq5+LMSI1Bxl46db17EFnA//X3Oxhv93CvsTULPiOJ7fdYC +3X7YCJ33a7w4B8+FxmiTYLe+aR6CC8fsu4qYccCctPUje1MzUkw6gvbWSyxkbmW7 +kGTWKx4E/SL4cc+DjoC1h37RtqghDDxtYhA42wWiocDXoKPlWJoIkG1UUO5f6/2N +cKPzQx1f23UTvIRkMYe1AoIBAQDR94YzLncfuY4DhHpqJRjv8xXfOif+ARWicnma +CwePpv80YoQvc7B9rbPA9qZ5EG9eQF62FkTrvCwbAhA5L11aJsXxnSvZREQcdteO +kQPnKXAJbHYh5yto/HhezdtIMmoZCGpHLmsiK20QnRyA0InKsFCKBpi20gFzOKMx +DwuQEoANHIwUscHnansM958eKAolujfjjOeFiK+j4Vd6P0neV8EQTl6A0+R/l5td +l69wySW7tB4xfOon5Y0D+AfGMH3alZs3ymAjBNKZIk+2hKvhDRa7IqwlckwQq6by +Ku25LKeRVt3wOkfJitSDgiEsNA5oJQ90A4ny6hIOAvLWir6tAoIBAQDK/fPVaT7r +7tNjzaMgeQ/VKGUauCMbPC7ST2cEvZMp9YFhdKbl/TwhC8lpJqrsKhXyKNz20FOL +7m8XjHu4mdSs6zaPvkMnUboge9pcnIKeS5nRVsW0CRuSc4A3qhrvBp9av77gIjnr +XJ6RyFihDji1P6RVoylyyR8k/qiZupMg7UK3vbuTpJqARObfaaprOwqVItkJX2vf +XF7qfBCnik1jlZKWZq+9dbhz8KP4KWpKINrwIuvlAQnTJpc15beHxMEt73hxAY3A +n3Iydtm5zsBcOLyLLgySUOsp0zlcAv0iHP3ShsFP2WeQLKR9Qapc58kkJ1lmlu71 +QdahwonpXjXVAoIBAEQnfYc1iPNiTsezg+zad9rDZBEeloaroXMmh3RKKj0l7ub5 +J4Ejo2FYNeXn6ieX/x5v9I5UcjC21vY5WDzHtBykQ1JnOyl+MEGxDc04IzUwzS4x +57KfkAa3FPdpCMnJm4jeo2jRl3Ly96cR6IOjrWZ+jtYOyBln15KoCsjM4mr0pl4b +Kxk4jgFpHeIaqqqmQoz2gle5kBlXQfQHHFcRHhAvGfsKBUD6Bsyn0IWzy/3nPPlN +wRM9QeCLcZedNiDN8rw2HbkhVs1nLlkIuyk6rXQSxJMf8RMCo9Axd7JZ3uphpU7X +DJmCwXSZPNwnLE9l4ltJ1FdLIscX1Z54tIyRYs0CggEBAIVPgnMFS21myy0gP6Fz +4BH9FWkWxPd97sHvo5hZZ+yGbxGxqmoghPyu4PdNjbLLcN44N+Vfq36aeBrfB+GU +JTfqwUpliXSpF7N9o0pu/tk2jS4N7ojt8k2bzPjBni6cCstuYcyQrbkEep8DFDGx +RUzDHwmevfnEW8/P7qoG/dkB+G7zC91KnKzgkz7mBiWmAK0w1ZhyMkXeQ/d6wvVE +vs5HzJ05kvC5/wklYIn5qPRF34MVbBZZODqTfXrIAmAHt1aTjmWov49hJ348z4BX +Z70pBanh9B+jRM2TCniC/fsJTyiTlyD5hioJJ32bQmcBUfeMYAof1Y78ThityiSY +2oECggEAYdkz6z+1hIMI2nIMtei1n5bLV4bWmS1nkZ3pBSMkbS7VJFAxZ53xJi0S +StSs/bka+akvnYEoFAGhVtiaz4497qnUiquf/aBs4TUHfNGn22/LN5b8vs51ugil +RXejaJjPLqL6jmXz5T4+TJGcH5kL6NDtYkT3IEtv5uWkQkBs0Z1Juf34nVjMbozC +bohyOyCMOLt7HqcUpUtevSK7SXmyU4yd2UyRqFMFPi4RJjxQWFZmNFC5S1PsZBh+ +OOMNAJ1F2h2fC7KdNVBpdoNsOAPxdCNxbwGKiNHwnukvF9uvaDIw3jqKJU3g/Z6j +rkE8Bz5a/iwO+QwdO5Q2cp5+0nm41A== +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/client.pem b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/client.pem new file mode 100644 index 000000000..06adc2aa3 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/client.pem @@ -0,0 +1,38 @@ +-----BEGIN CERTIFICATE----- +MIIGmjCCBIKgAwIBAgICEAYwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDMwNjE5NTA0N1oXDTMzMDYxMTE5NTA0N1owezELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExFTATBgNVBAMMDG9j +c3AuY2xpZW50MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKZ9mYEA +5knzGM9iSnJDFMZnjS8jA67KXBKjDXaJ1Y3qNxb2DloiKn4Hagqt/NHs9izFsOh1 +yP8o6lAbAIUC6NJiBi+KI6yYBNDWZ42hyedaMCzJvD51Jcr9mN+tWAhokXtEdouI +O2rlMjhvgqNQXKHk0gshz7N9mqg1TqSFQXRtw4EAbYEVoHNF/5r+GBlcQ/ISeioU +rknmT4MNKUpcNs5MQeFcbhAVPWFlq1rGm0PWdMQVFi7N+aH+DhEO9a1rCb1hTeP9 +XRSfu39NqPR8Bc2LfRPMo5VpVwOuRW0MUd7aFfeU4l3ncjk2lLtqxqr5pM3K3P4j +cF/9ri+SESCsqmMFJdeb/5XDk3Oxc3Ap11JQbLCYhtXZgFicr7QPCE5cuxM+Hem/ +r3fU1XGmJ9bSczhSvpcIMc49Vf+MhlnzX1vMCVlncGAxPPumpP33TqWY1FAGBKCv +5lrD2CNWEuDVUN/AXzl5qeFvOTkhOd99H6zZyidmvaD4sQOyrZ0m/+wRti4cE0GQ +O2Gfjf0yA2rWevc9XSGfyk1yBLlkVTD583zjTdFj1NguywH8UVhEQMJrHSPvmovc +Tsttkrwl+w/y6HV7c91HrhOC2hihMq7C0ip6WaH6vIBYWNpcs+g/8CpiTVVxEJ68 +5dQrDouKyR26SmWT1zLZw8tyb4Q/7YmzcHbxAgMBAAGjggE2MIIBMjAJBgNVHRME +AjAAMBEGCWCGSAGG+EIBAQQEAwIFoDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBH +ZW5lcmF0ZWQgQ2xpZW50IENlcnRpZmljYXRlMB0GA1UdDgQWBBSJ/yia067wCafe +kDCgk+e8PJTCUDAfBgNVHSMEGDAWgBRMcIY7FVKJurUPkqqusTFBE75z8zAOBgNV +HQ8BAf8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMEMDsGA1Ud +HwQ0MDIwMKAuoCyGKmh0dHA6Ly9sb2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUu +Y3JsLnBlbTAxBggrBgEFBQcBAQQlMCMwIQYIKwYBBQUHMAGGFWh0dHA6Ly9sb2Nh +bGhvc3Q6OTg3NzANBgkqhkiG9w0BAQsFAAOCAgEAN2XfYgbrjxC6OWh9UoMLQaDD +59JPxAUBxlRtWzTWqxY2jfT+OwJfDP4e+ef2G1YEG+qyt57ddlm/EwX9IvAvG0D4 +wd4tfItG88IJWKDM3wpT5KYrUsu+PlQTFmGmaWlORK/mRKlmfjbP5CIAcUedvCS9 +j9PkCrbbkklAmp0ULLSLUkYajmfFOkQ+VdGhQ6nAamTeyh2Z2S4dVjsKc8yBViMo +/V6HP56rOvUqiVTcvhZtH7QDptMSTzuJ+AsmreYjwIiTGzYS/i8QVAFuPfXJKEOB +jD5WhUaP/8Snbuft4MxssPAph8okcmxLfb55nw+soNc2oS1wWwKMe7igRelq8vtg +bu00QSEGiY1eq/vFgZh0+Wohy/YeYzhO4Jq40FFpKiVbkLzexpNH/Afj2QrHuZ7y +259uGGfv5tGA+TW6PsckCQknEb5V4V35ZZlbWVRKpuADeNPoDuoYPtc5eOomIkmw +rFz/gPZWSA+4pYEgXgqcaM8+KP0i53eTbWqwy5DVgXiuaTYWU4m1FTsIZ+/nGIqW +Dsgqd/D6jivf9Yvm+VFYTZsxIfq5sMdjxSuMBo0nZrzFDpqc6m6fVVoHv5R9Yliw +MbxgmFQ84CKLy7iNKGSGVN2SIr1obMQ0e/t3NiCHib3WKzmZFoNoFCtVzAgsxGmF +Q6rY83JdIPPW4LqZNcE= +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/index.txt b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/index.txt new file mode 100644 index 000000000..76a170dd3 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/index.txt @@ -0,0 +1,6 @@ +V 330419130816Z 1000 unknown /C=SE/ST=Stockholm/L=Stockholm/O=MyOrgName/OU=MyIntermediateCA/CN=localhost +V 330419130816Z 1001 unknown /C=SE/ST=Stockholm/L=Stockholm/O=MyOrgName/OU=MyIntermediateCA/CN=MyClient +R 330419130816Z 230112130816Z 1002 unknown /C=SE/ST=Stockholm/L=Stockholm/O=MyOrgName/OU=MyIntermediateCA/CN=client-revoked +V 330419130816Z 1003 unknown /C=SE/ST=Stockholm/L=Stockholm/O=MyOrgName/OU=MyIntermediateCA/CN=ocsp.server +V 330419130816Z 1004 unknown /C=SE/ST=Stockholm/L=Stockholm/O=MyOrgName/OU=MyIntermediateCA/CN=ocsp.client +V 330425123656Z 1005 unknown /C=SE/ST=Stockholm/L=Stockholm/O=MyOrgName/OU=MyIntermediateCA/CN=client-no-dist-points diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ocsp-issuer.key b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ocsp-issuer.key new file mode 100644 index 000000000..511cd5b0a --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ocsp-issuer.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQC0Bu3THlP8vRwz +R84cnUdG5sDfVBqZaiTiT8KiUWmWXK5/bpoCfUlNopdnHhFcTA8MwVgkSiTufx/Q +PZNd0qrFzNnNnPcV3yBdZG/NtJQptVKMOYWf2rg48ZYjZPZDSQ4oAkYt0KCLP3VX +lsxbIBAtpYMUamrjdnjRW+v8l2O/PGACxwbAkVdjRThV8bT7Ly16Q/UP/BKmLEjL +iwEUXR1TVK8mhBeZYUoaXLs9E5SLk8tk4rlyVLSfqe2uGe364meibK5kFrFUAY+O +BfEyUR1s1gu75vXPncxF/Xqm6+Yjy/i+HLTFvS8+nj3GHyF53ughJbrEVH/BvsTN +bsKLxfW6VfvSKmO7QLvRtYmPYjbSYDTQyjgnDhinGQsegoNV2DhCkXjZ1DGs72Hq +ka641eVkNLznA+c3AUTU1wnXQi2u1NILlz/ZkK37pODUx9Qbe+yYnkyfHHw1x8LW +in+mhm5mT8JdV4vxrlzVD/8XqIO/ESshcAZo7diSsL1szKX5h+JhJBvZqIdmpb2v +mhM+iyCbj9eoOrNEOnO/d/zr1LaTXHsB8OFY59/Sw/aOlS6fOeBuJ/HVaVFkm1kv +rFw9WROvR4P7DDTMqvtPbn5G3sGPMaApv60Naag2QFTFrLLSRGmLu2/3m+0fkRjo +9WBAglf2KRGT3Q6LW0jUZCItnf2tTQIDAQABAoICAAVlH8Nv6TxtvmabBEY/QF+T +krwenR1z3N8bXM3Yer2S0XfoLJ1ee8/jy32/nO2TKfBL6wRLZIfxL1biQYRSR+Pd +m7lZtt3k7edelysm+jm1wV+KacK8n0C1nLY61FZ33gC88LV2xxjlMfMKBd3FPDbh ++ueluMZQSpablprfPpIAkTAEHuOud1v2OxX4RGAyrb44QyPTfguU0CmpZMLjd3mD +1CvnUX27OKlJliLib1UvfKztTnlqqG8QfJr3E/asykZH04IUXAQUd+TdsLi9TZBx +abCb30n1hKWkTwSplSAFgNLRsWkrnjrWKyvAyxQH5hT4OHyhu6JmwScW5qWhrRd3 +ld+pMaKQlOmtrTiRzSeFD2pOHFHvZ3N/1BhH5TGfnTIXKuEja3xdOArCHTBkh/9S +kEZegVIAjoFW+t3gfbz12JzNmDUUX+sWfadBBiwYepTUr2aZQehZM8+dzdSwQeh4 +XcAUC55YgaC2oFCfcc8rD5o+57nlR+7xAjZ/Z61SuUJHrKSRzB6w2PARiEIuYotK +E/CsQfL9tgjoc0aN0uVl8SH+GvKvRWM6LV711ep8w2XoPIAxId3ne/Ktw+wKCrqC +CJsHXIGOi8n0YZLZ6vz/6WrjmY1GdJc1aywQvr5eDFP5g0j3e+WzGBxoCKX8Gah5 +KpA4fcN44s2umsu7WcoBAoIBAQDZyGhtu9rbm3JMJrA9Eyq97niv6axwbhocE/bU +tPwdeWbPdprkK4aQ9UqJwHmVHkAUrGFRsY2iPJFLvdRwvixFYVAf/WLlAepd+HFz +Xit1oX5ouzbcjq2+13zUQpfjXFqfLqVYcu/sW7UFaD3yJEstkhI+ZM6Ci+kLWXN5 ++KOXASGzO8p7WBHFABRMH0bUjRnZy8xX3wdOhAKRFaCalxABodH9wz/cMunzrmEa +uHRsNWIIdWIVle4ZX4QTcsDgJSf5LeDaLtrpMu2AnFafQ2VCAb/jdKdighBsZG3H +Pu6e1fJzSKZEUtWSLMzBoB6R/oNDW9cPhcXWXlNc8QsZ7DAtAoIBAQDTnmUqf8Lo +lWPEQCrfkgQm2Gom/75uj5TnHsQYf2xk3vZNF5UwErD3Ixzh4F1E5ewA1Xvy5t3J +VCOLypiKDlfcZnsMPncdubGMrT575mkpZgsvR/w8u8pd4mFSdyCc/y5TeyfcNFQe +0Ho1NXMH6czutQs3oX+yfaTUr6Oa3brG1SAJQpG53nQI74pMWKHcivI/ytlA26Ki +zxIVzeAzJ/ToVc6MzbObkXjFxrnVlvjsLyGMJEfW2lmny4Gpx1xpc2j3YW8vehfx +DalWOJai1mtAo8ieo7CVw+kV2CqL7gJOJ2iNmCKT+IFk4LRtfJxd4wUJz6A/+vWp +o0LMvApAnIWhAoIBAER1S+Zaq9Rmi8pGSxYXxVLI+KULhkodQhXbbLa2YZ3+QIQs +m0noKLe+c3zTxSRLywb0nO7qKkR6V44AkRwTm6T/jwlPRFwKexqo8zi5vF2Qs0TG +vNsd+p3H7RRoDojIyi/JoO4pyyN4PHIDr51DLWKYzSVR2NyOkGYh6zvHHd1k3KwT +unWFXKiZesfm+QPtite8yXJByHE06/2hV8fgfoaU0Ia9boCQfJw+D4Yvv2EYcsWH +6JoydBMDxGe8pcaPx337nvfWzLeLa78G5e/QZq8WD7S3Qbqkefcopp2AOdAyHrGA +f8twYnQ9ouumopVv9OEiqHrXqTXWlsvbdYrjhM0CggEABOEHBhbSAJjJJxIvqt3r ++JVOxT1qP5RR445DCSmO7zhwx1A+4U/dAqWtmcuZeuguK8rAQ9Zs0KJ++08dezlf +bzZxqdOa3XWVkV/BLAwg6pJuuZVYTHIr9UQt6D/U4anEgKo7Pgl60wcNekKUN199 +mRdVfd/cWNoqvbia9gwcrU7moTAGuhlV5YrYTnBQswwFD9F2dtdZhZVunlAT1joa +nGy2CWsItBKDjVPKnxEPBisEA/4mJd786DB5+dcd21SM2/9EF/0hpi4hdFpzpqd4 +65GbI4U0og9VRWqpeHZxWSnxcCpMycqV+SRxJIEV/dgpGpPN5wu7NEEOXjgLqHez +YQKCAQBjwMVQUgn2KZK6Q9Lwe09ZpWTxGMh9mevU3eMA/6awajkE4UVgV8hSVvcG +i3Otn9UMnMhYu+HuU9O9W4zzncH0nRoiwjQr3X0MTT3Lc0rSJNPb/a6pcvysBuvB +wvhQ/dRXbCtmK9VE9ctPa9EO9f9SQRZF2NQsTOkyILdsgISm4zXSBhyT8KkQbiTe +0ToI7qMM73HqLHKOkjA+8jYkE5MTVQaaRXx2JlCeHEsIpH/2Nj1OsmUfn3paL6ZN +3loKhFfGy4onSOJOxoYaI3r6aykTFm7Qyg1xrG+8uFhK/qTOCB22I63LmSLZ1wlY +xBO4CmF79pAcAXvDoRB619Flx5/G +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ocsp-issuer.pem b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ocsp-issuer.pem new file mode 100644 index 000000000..467e4c209 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/ocsp-issuer.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF+zCCA+OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQK +DAlNeU9yZ05hbWUxETAPBgNVBAsMCE15Um9vdENBMREwDwYDVQQDDAhNeVJvb3RD +QTAeFw0yMzAxMTIxMzA4MTZaFw0zMzAxMDkxMzA4MTZaMGsxCzAJBgNVBAYTAlNF +MRIwEAYDVQQIDAlTdG9ja2hvbG0xEjAQBgNVBAoMCU15T3JnTmFtZTEZMBcGA1UE +CwwQTXlJbnRlcm1lZGlhdGVDQTEZMBcGA1UEAwwQTXlJbnRlcm1lZGlhdGVDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALQG7dMeU/y9HDNHzhydR0bm +wN9UGplqJOJPwqJRaZZcrn9umgJ9SU2il2ceEVxMDwzBWCRKJO5/H9A9k13SqsXM +2c2c9xXfIF1kb820lCm1Uow5hZ/auDjxliNk9kNJDigCRi3QoIs/dVeWzFsgEC2l +gxRqauN2eNFb6/yXY788YALHBsCRV2NFOFXxtPsvLXpD9Q/8EqYsSMuLARRdHVNU +ryaEF5lhShpcuz0TlIuTy2TiuXJUtJ+p7a4Z7friZ6JsrmQWsVQBj44F8TJRHWzW +C7vm9c+dzEX9eqbr5iPL+L4ctMW9Lz6ePcYfIXne6CElusRUf8G+xM1uwovF9bpV ++9IqY7tAu9G1iY9iNtJgNNDKOCcOGKcZCx6Cg1XYOEKReNnUMazvYeqRrrjV5WQ0 +vOcD5zcBRNTXCddCLa7U0guXP9mQrfuk4NTH1Bt77JieTJ8cfDXHwtaKf6aGbmZP +wl1Xi/GuXNUP/xeog78RKyFwBmjt2JKwvWzMpfmH4mEkG9moh2alva+aEz6LIJuP +16g6s0Q6c793/OvUtpNcewHw4Vjn39LD9o6VLp854G4n8dVpUWSbWS+sXD1ZE69H +g/sMNMyq+09ufkbewY8xoCm/rQ1pqDZAVMWsstJEaYu7b/eb7R+RGOj1YECCV/Yp +EZPdDotbSNRkIi2d/a1NAgMBAAGjgaQwgaEwHQYDVR0OBBYEFExwhjsVUom6tQ+S +qq6xMUETvnPzMB8GA1UdIwQYMBaAFD90kfU5pc5l48THu0Ayj9SNpHuhMBIGA1Ud +EwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMDsGA1UdHwQ0MDIwMKAuoCyG +Kmh0dHA6Ly9sb2NhbGhvc3Q6OTg3OC9pbnRlcm1lZGlhdGUuY3JsLnBlbTANBgkq +hkiG9w0BAQsFAAOCAgEAK6NgdWQYtPNKQNBGjsgtgqTRh+k30iqSO6Y3yE1KGABO +EuQdVqkC2qUIbCB0M0qoV0ab50KNLfU6cbshggW4LDpcMpoQpI05fukNh1jm3ZuZ +0xsB7vlmlsv00tpqmfIl/zykPDynHKOmFh/hJP/KetMy4+wDv4/+xP31UdEj5XvG +HvMtuqOS23A+H6WPU7ol7KzKBnU2zz/xekvPbUD3JqV+ynP5bgbIZHAndd0o9T8e +NFX23Us4cTenU2/ZlOq694bRzGaK+n3Ksz995Nbtzv5fbUgqmf7Mcq4iHGRVtV11 +MRyBrsXZp2vbF63c4hrf2Zd6SWRoaDKRhP2DMhajpH9zZASSTlfejg/ZRO2s+Clh +YrSTkeMAdnRt6i/q4QRcOTCfsX75RFM5v67njvTXsSaSTnAwaPi78tRtf+WSh0EP +VVPzy++BszBVlJ1VAf7soWZHCjZxZ8ZPqVTy5okoHwWQ09WmYe8GfulDh1oj0wbK +3FjN7bODWHJN+bFf5aQfK+tumYKoPG8RXL6QxpEzjFWjxhIMJHHMKfDWnAV1o1+7 +/1/aDzq7MzEYBbrgQR7oE5ZHtyqhCf9LUgw0Kr7/8QWuNAdeDCJzjXRROU0hJczp +dOyfRlLbHmLLmGOnROlx6LsGNQ17zuz6SPi7ei8/ylhykawDOAGkM1+xFakmQhM= +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/openssl_listeners.conf b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/openssl_listeners.conf new file mode 100644 index 000000000..d26e12acf --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/openssl_listeners.conf @@ -0,0 +1,14 @@ +listeners.ssl.default { + bind = "0.0.0.0:8883" + max_connections = 512000 + ssl_options { + keyfile = "{{ test_data_dir }}/server.key" + certfile = "{{ test_data_dir }}/server.pem" + cacertfile = "{{ test_data_dir }}/ca.pem" + ocsp { + enable_ocsp_stapling = true + issuer_pem = "{{ test_data_dir }}/ocsp-issuer.pem" + responder_url = "http://127.0.0.1:9877" + } + } +} diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/server.key b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/server.key new file mode 100644 index 000000000..d456ece72 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCnVPRWgP59GU15 +HddFwPZflFfcSkeuWU8tgKQhZcNoBli4lIfemuoV/hkGRVFexAiAw3/u5wvOaMaN +V8n9KxxgAUNLh5YaknpnNdhfQDyM0S5UJIbVeLzAQWxkBXpI3uBfW4WPSULRnVyR +psLEfl1qOklGOyuZfRbkkkkVwtJEmGEH0kz0fy6xenn3R3/mTeIbj+5TNqiBXWn1 +/qgTiNf2Ni7SE6Nk2lP4V8iofcBIrsp6KtEWdipGEJZeXCg/X0g/qVt15tF1l00M +uEWRHt1qGBELJJTcNzQvdqHAPz0AfQRjTtXyocw5+pFth8Q8a7gyjrjv5nhnpAKQ +msrt3vyNAgMBAAECggEABnWvIQ/Fw0qQxRYz00uJt1LguW5cqgxklBsdOvTUwFVO +Y4HIZP2R/9tZV/ahF4l10pK5g52DxSoiUB6Ne6qIY+RolqfbUZdKBmX7vmGadM02 +fqUSV3dbwghEiO/1Mo74FnZQB6IKZFEw26aWakN+k7VAUufB3SEJGzXSgHaO63ru +dFGSiYI8U+q+YnhUJjCnmI12fycNfy451TdUQtGZb6pNmm5HRUF6hpAV8Le9LojP +Ql9eacPpsrzU15X5ElCQZ/f9iNh1bplcISuhrULgKUKOvAVrBlEK67uRVy6g98xA +c/rgNLkbL/jZEsAc3/vHAyFgd3lABfwpBGLHej3QgQKBgQDFNYmfBNQr89HC5Zc+ +M6jXcAT/R+0GNczBTfC4iyNemwqsumSSRelNZ748UefKuS3F6Mvb2CBqE2LbB61G +hrnCffG2pARjZ491SefRwghhWWVGLP1p8KliLgOGBehA1REgJb+XULncjuHZuh4O +LVn3HVnWGxeBGg+yKa6Z4YQi3QKBgQDZN0O8ZcZY74lRJ0UjscD9mJ1yHlsssZag +njkX/f0GR/iVpfaIxQNC3gvWUy2LsU0He9sidcB0cfej0j/qZObQyFsCB0+utOgy ++hX7gokV2pes27WICbNWE2lJL4QZRJgvf82OaEy57kfDrm+eK1XaSZTZ10P82C9u +gAmMnontcQKBgGu29lhY9tqa7jOZ26Yp6Uri8JfO3XPK5u+edqEVvlfqL0Zw+IW8 +kdWpmIqx4f0kcA/tO4v03J+TvycLZmVjKQtGZ0PvCkaRRhY2K9yyMomZnmtaH4BB +5wKtR1do2pauyg/ZDnDDswD5OfsGYWw08TK8YVlEqu3lIjWZ9rguKVIxAoGAZYUk +zVqr10ks3pcCA2rCjkPT4lA5wKvHgI4ylPoKVfMxRY/pp4acvZXV5ne9o7pcDBFh +G7v5FPNnEFPlt4EtN4tMragJH9hBZgHoYEJkG6islweg0lHmVWaBIMlqbfzXO+v5 +gINSyNuLAvP2CvCqEXmubhnkFrpbgMOqsuQuBqECgYB3ss2PDhBF+5qoWgqymFof +1ovRPuQ9sPjWBn5IrCdoYITDnbBzBZERx7GLs6A/PUlWgST7jkb1PY/TxYSUfXzJ +SNd47q0mCQ+IUdqUbHgpK9b1ncwLMsnexpYZdHJWRLgnUhOx7OMjJc/4iLCAFCoN +3KJ7/V1keo7GBHOwnsFcCA== +-----END PRIVATE KEY----- diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE_data/server.pem b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/server.pem new file mode 100644 index 000000000..38cc63534 --- /dev/null +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE_data/server.pem @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGCTCCA/GgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCU0Ux +EjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UECgwJTXlPcmdOYW1lMRkwFwYDVQQL +DBBNeUludGVybWVkaWF0ZUNBMRkwFwYDVQQDDBBNeUludGVybWVkaWF0ZUNBMB4X +DTIzMDExMjEzMDgxNloXDTMzMDQxOTEzMDgxNloweDELMAkGA1UEBhMCU0UxEjAQ +BgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYDVQQKDAlN +eU9yZ05hbWUxGTAXBgNVBAsMEE15SW50ZXJtZWRpYXRlQ0ExEjAQBgNVBAMMCWxv +Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKdU9FaA/n0Z +TXkd10XA9l+UV9xKR65ZTy2ApCFlw2gGWLiUh96a6hX+GQZFUV7ECIDDf+7nC85o +xo1Xyf0rHGABQ0uHlhqSemc12F9APIzRLlQkhtV4vMBBbGQFekje4F9bhY9JQtGd +XJGmwsR+XWo6SUY7K5l9FuSSSRXC0kSYYQfSTPR/LrF6efdHf+ZN4huP7lM2qIFd +afX+qBOI1/Y2LtITo2TaU/hXyKh9wEiuynoq0RZ2KkYQll5cKD9fSD+pW3Xm0XWX +TQy4RZEe3WoYEQsklNw3NC92ocA/PQB9BGNO1fKhzDn6kW2HxDxruDKOuO/meGek +ApCayu3e/I0CAwEAAaOCAagwggGkMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQD +AgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2Vy +dGlmaWNhdGUwHQYDVR0OBBYEFGy5LQPzIelruJl7mL0mtUXM57XhMIGaBgNVHSME +gZIwgY+AFExwhjsVUom6tQ+Sqq6xMUETvnPzoXOkcTBvMQswCQYDVQQGEwJTRTES +MBAGA1UECAwJU3RvY2tob2xtMRIwEAYDVQQHDAlTdG9ja2hvbG0xEjAQBgNVBAoM +CU15T3JnTmFtZTERMA8GA1UECwwITXlSb290Q0ExETAPBgNVBAMMCE15Um9vdENB +ggIQADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwOwYDVR0f +BDQwMjAwoC6gLIYqaHR0cDovL2xvY2FsaG9zdDo5ODc4L2ludGVybWVkaWF0ZS5j +cmwucGVtMDEGCCsGAQUFBwEBBCUwIzAhBggrBgEFBQcwAYYVaHR0cDovL2xvY2Fs +aG9zdDo5ODc3MA0GCSqGSIb3DQEBCwUAA4ICAQCX3EQgiCVqLhnCNd0pmptxXPxo +l1KyZkpdrFa/NgSqRhkuZSAkszwBDDS/gzkHFKEUhmqs6/UZwN4+Rr3LzrHonBiN +aQ6GeNNXZ/3xAQfUCwjjGmz9Sgw6kaX19Gnk2CjI6xP7T+O5UmsMI9hHUepC9nWa +XX2a0hsO/KOVu5ZZckI16Ek/jxs2/HEN0epYdvjKFAaVmzZZ5PATNjrPQXvPmq2r +x++La+3bXZsrH8P2FhPpM5t/IxKKW/Tlpgz92c2jVSIHF5khSA/MFDC+dk80OFmm +v4ZTPIMuZ//Q+wo0f9P48rsL9D27qS7CA+8pn9wu+cfnBDSt7JD5Yipa1gHz71fy +YTa9qRxIAPpzW2v7TFZE8eSKFUY9ipCeM2BbdmCQGmq4+v36b5TZoyjH4k0UVWGo +Gclos2cic5Vxi8E6hb7b7yZpjEfn/5lbCiGMfAnI6aoOyrWg6keaRA33kaLUEZiK +OgFNbPkjiTV0ZQyLXf7uK9YFhpVzJ0dv0CFNse8rZb7A7PLn8VrV/ZFnJ9rPoawn +t7ZGxC0d5BRSEyEeEgsQdxuY4m8OkE18zwhCkt2Qs3uosOWlIrYmqSEa0i/sPSQP +jiwB4nEdBrf8ZygzuYjT5T9YRSwhVox4spS/Av8Ells5JnkuKAhCVv9gHxYwbj0c +CzyLJgE1z9Tq63m+gQ== +-----END CERTIFICATE----- diff --git a/apps/emqx/test/emqx_olp_SUITE.erl b/apps/emqx/test/emqx_olp_SUITE.erl index b1cfbc0f1..87393686e 100644 --- a/apps/emqx/test/emqx_olp_SUITE.erl +++ b/apps/emqx/test/emqx_olp_SUITE.erl @@ -22,18 +22,23 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). -include_lib("lc/include/lc.hrl"). all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:start_apps([]), - Config. + OldSch = erlang:system_flag(schedulers_online, 1), + [{old_sch, OldSch} | Config]. -end_per_suite(_Config) -> +end_per_suite(Config) -> + erlang:system_flag(schedulers_online, ?config(old_sch, Config)), emqx_common_test_helpers:stop_apps([]). init_per_testcase(_, Config) -> + emqx_common_test_helpers:boot_modules(all), + emqx_common_test_helpers:start_apps([]), emqx_olp:enable(), case wait_for(fun() -> lc_sup:whereis_runq_flagman() end, 10) of true -> ok; @@ -86,6 +91,7 @@ t_overload_cooldown_conn(Config) -> t_overloaded_conn(Config), timer:sleep(1000), ?assert(not emqx_olp:is_overloaded()), + true = emqx:is_running(node()), {ok, C} = emqtt:start_link([{host, "localhost"}, {clientid, "myclient"}]), ?assertMatch({ok, _Pid}, emqtt:connect(C)), emqtt:stop(C). @@ -93,7 +99,7 @@ t_overload_cooldown_conn(Config) -> -spec burst_runq() -> ParentToKill :: pid(). burst_runq() -> NProc = erlang:system_info(schedulers_online), - spawn(?MODULE, worker_parent, [NProc * 10, {?MODULE, busy_loop, []}]). + spawn(?MODULE, worker_parent, [NProc * 1000, {?MODULE, busy_loop, []}]). %% internal helpers worker_parent(N, {M, F, A}) -> diff --git a/apps/emqx/test/emqx_os_mon_SUITE.erl b/apps/emqx/test/emqx_os_mon_SUITE.erl index 8729bbdb6..e76928114 100644 --- a/apps/emqx/test/emqx_os_mon_SUITE.erl +++ b/apps/emqx/test/emqx_os_mon_SUITE.erl @@ -25,32 +25,44 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - emqx_common_test_helpers:start_apps( - [], - fun - (emqx) -> - application:set_env(emqx, os_mon, [ - {cpu_check_interval, 1}, - {cpu_high_watermark, 5}, - {cpu_low_watermark, 80}, - {procmem_high_watermark, 5} - ]); - (_) -> - ok - end - ), + emqx_common_test_helpers:start_apps([]), Config. end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]). -t_api(_) -> - ?assertEqual(60000, emqx_os_mon:get_mem_check_interval()), - ?assertEqual(ok, emqx_os_mon:set_mem_check_interval(30000)), - ?assertEqual(60000, emqx_os_mon:get_mem_check_interval()), - ?assertEqual(ok, emqx_os_mon:set_mem_check_interval(122000)), - ?assertEqual(120000, emqx_os_mon:get_mem_check_interval()), +init_per_testcase(t_cpu_check_alarm, Config) -> + SysMon = emqx_config:get([sysmon, os], #{}), + emqx_config:put([sysmon, os], SysMon#{ + cpu_high_watermark => 0.9, + cpu_low_watermark => 0, + %% 200ms + cpu_check_interval => 200 + }), + ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon), + {ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon), + Config; +init_per_testcase(t_sys_mem_check_alarm, Config) -> + case emqx_os_mon:is_sysmem_check_supported() of + true -> + SysMon = emqx_config:get([sysmon, os], #{}), + emqx_config:put([sysmon, os], SysMon#{ + sysmem_high_watermark => 0.51, + %% 200ms + mem_check_interval => 200 + }), + ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon), + {ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon), + Config; + false -> + Config + end; +init_per_testcase(_, Config) -> + emqx_common_test_helpers:boot_modules(all), + emqx_common_test_helpers:start_apps([]), + Config. +t_api(_) -> ?assertEqual(0.7, emqx_os_mon:get_sysmem_high_watermark()), ?assertEqual(ok, emqx_os_mon:set_sysmem_high_watermark(0.8)), ?assertEqual(0.8, emqx_os_mon:get_sysmem_high_watermark()), @@ -67,3 +79,123 @@ t_api(_) -> emqx_os_mon ! ignored, gen_server:stop(emqx_os_mon), ok. + +t_sys_mem_check_disable(Config) -> + case emqx_os_mon:is_sysmem_check_supported() of + true -> do_sys_mem_check_disable(Config); + false -> skip + end. + +do_sys_mem_check_disable(_Config) -> + MemRef0 = maps:get(mem_time_ref, sys:get_state(emqx_os_mon)), + ?assertEqual(true, is_reference(MemRef0), MemRef0), + emqx_config:put([sysmon, os, mem_check_interval], 1000), + emqx_os_mon:update(emqx_config:get([sysmon, os])), + MemRef1 = maps:get(mem_time_ref, sys:get_state(emqx_os_mon)), + ?assertEqual(true, is_reference(MemRef1), {MemRef0, MemRef1}), + ?assertNotEqual(MemRef0, MemRef1), + emqx_config:put([sysmon, os, mem_check_interval], disabled), + emqx_os_mon:update(emqx_config:get([sysmon, os])), + ?assertEqual(undefined, maps:get(mem_time_ref, sys:get_state(emqx_os_mon))), + ok. + +t_sys_mem_check_alarm(Config) -> + case emqx_os_mon:is_sysmem_check_supported() of + true -> do_sys_mem_check_alarm(Config); + false -> skip + end. + +do_sys_mem_check_alarm(_Config) -> + emqx_config:put([sysmon, os, mem_check_interval], 200), + emqx_os_mon:update(emqx_config:get([sysmon, os])), + Mem = 0.52345, + Usage = floor(Mem * 10000) / 100, + emqx_common_test_helpers:with_mock( + load_ctl, + get_memory_usage, + fun() -> Mem end, + fun() -> + timer:sleep(500), + Alarms = emqx_alarm:get_alarms(activated), + ?assert( + emqx_vm_mon_SUITE:is_existing( + high_system_memory_usage, emqx_alarm:get_alarms(activated) + ), + #{ + load_ctl_memory => load_ctl:get_memory_usage(), + config => emqx_config:get([sysmon, os]), + process => sys:get_state(emqx_os_mon), + alarms => Alarms + } + ), + [ + #{ + activate_at := _, + activated := true, + deactivate_at := infinity, + details := #{high_watermark := 51.0, usage := RealUsage}, + message := Msg, + name := high_system_memory_usage + } + ] = + lists:filter( + fun + (#{name := high_system_memory_usage}) -> true; + (_) -> false + end, + Alarms + ), + ?assert(RealUsage >= Usage, {RealUsage, Usage}), + ?assert(is_binary(Msg)), + emqx_config:put([sysmon, os, sysmem_high_watermark], 0.99999), + ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon), + {ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon), + timer:sleep(600), + Activated = emqx_alarm:get_alarms(activated), + ?assertNot( + emqx_vm_mon_SUITE:is_existing(high_system_memory_usage, Activated), + #{activated => Activated, process_state => sys:get_state(emqx_os_mon)} + ) + end + ). + +t_cpu_check_alarm(_) -> + CpuUtil = 90.12345, + Usage = floor(CpuUtil * 100) / 100, + emqx_common_test_helpers:with_mock( + cpu_sup, + util, + fun() -> CpuUtil end, + fun() -> + timer:sleep(500), + Alarms = emqx_alarm:get_alarms(activated), + ?assert( + emqx_vm_mon_SUITE:is_existing(high_cpu_usage, emqx_alarm:get_alarms(activated)) + ), + [ + #{ + activate_at := _, + activated := true, + deactivate_at := infinity, + details := #{high_watermark := 90.0, low_watermark := 0, usage := RealUsage}, + message := Msg, + name := high_cpu_usage + } + ] = + lists:filter( + fun + (#{name := high_cpu_usage}) -> true; + (_) -> false + end, + Alarms + ), + ?assert(RealUsage >= Usage, {RealUsage, Usage}), + ?assert(is_binary(Msg)), + emqx_config:put([sysmon, os, cpu_high_watermark], 1), + emqx_config:put([sysmon, os, cpu_low_watermark], 0.96), + timer:sleep(500), + ?assertNot( + emqx_vm_mon_SUITE:is_existing(high_cpu_usage, emqx_alarm:get_alarms(activated)) + ) + end + ). diff --git a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl new file mode 100644 index 000000000..b55a28206 --- /dev/null +++ b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl @@ -0,0 +1,2052 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_quic_multistreams_SUITE). + +-ifndef(BUILD_WITHOUT_QUIC). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("quicer/include/quicer.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). + +suite() -> + [{timetrap, {seconds, 30}}]. + +all() -> + [ + {group, mstream}, + {group, shutdown}, + {group, misc}, + t_listener_with_lowlevel_settings, + t_listener_inval_settings + ]. + +groups() -> + [ + {mstream, [], [{group, profiles}]}, + + {profiles, [], [ + {group, profile_low_latency}, + {group, profile_max_throughput} + ]}, + {profile_low_latency, [], [ + {group, pub_qos0}, + {group, pub_qos1}, + {group, pub_qos2} + ]}, + {profile_max_throughput, [], [ + {group, pub_qos0}, + {group, pub_qos1}, + {group, pub_qos2} + ]}, + {pub_qos0, [], [ + {group, sub_qos0}, + {group, sub_qos1}, + {group, sub_qos2} + ]}, + {pub_qos1, [], [ + {group, sub_qos0}, + {group, sub_qos1}, + {group, sub_qos2} + ]}, + {pub_qos2, [], [ + {group, sub_qos0}, + {group, sub_qos1}, + {group, sub_qos2} + ]}, + {sub_qos0, [{group, qos}]}, + {sub_qos1, [{group, qos}]}, + {sub_qos2, [{group, qos}]}, + {qos, [ + t_multi_streams_sub, + t_multi_streams_pub_5x100, + t_multi_streams_pub_parallel, + t_multi_streams_pub_parallel_no_blocking, + t_multi_streams_sub_pub_async, + t_multi_streams_sub_pub_sync, + t_multi_streams_unsub, + t_multi_streams_corr_topic, + t_multi_streams_unsub_via_other, + t_multi_streams_dup_sub, + t_multi_streams_packet_boundary, + t_multi_streams_packet_malform, + t_multi_streams_kill_sub_stream, + t_multi_streams_packet_too_large, + t_multi_streams_sub_0_rtt, + t_multi_streams_sub_0_rtt_large_payload, + t_multi_streams_sub_0_rtt_stream_data_cont, + t_conn_change_client_addr + ]}, + + {shutdown, [ + {group, graceful_shutdown}, + {group, abort_recv_shutdown}, + {group, abort_send_shutdown}, + {group, abort_send_recv_shutdown} + ]}, + + {graceful_shutdown, [ + {group, ctrl_stream_shutdown}, + {group, data_stream_shutdown} + ]}, + {abort_recv_shutdown, [ + {group, ctrl_stream_shutdown}, + {group, data_stream_shutdown} + ]}, + {abort_send_shutdown, [ + {group, ctrl_stream_shutdown}, + {group, data_stream_shutdown} + ]}, + {abort_send_recv_shutdown, [ + {group, ctrl_stream_shutdown}, + {group, data_stream_shutdown} + ]}, + + {ctrl_stream_shutdown, [ + t_multi_streams_shutdown_ctrl_stream, + t_multi_streams_shutdown_ctrl_stream_then_reconnect, + t_multi_streams_remote_shutdown, + t_multi_streams_emqx_ctrl_kill, + t_multi_streams_emqx_ctrl_exit_normal, + t_multi_streams_remote_shutdown_with_reconnect + ]}, + + {data_stream_shutdown, [ + t_multi_streams_shutdown_pub_data_stream, + t_multi_streams_shutdown_sub_data_stream + ]}, + {misc, [ + t_conn_silent_close, + t_client_conn_bump_streams, + t_olp_true, + t_olp_reject, + t_conn_resume, + t_conn_without_ctrl_stream + ]} + ]. + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([]), + UdpPort = 14567, + start_emqx_quic(UdpPort), + %% Turn off force_shutdown policy. + ShutdownPolicy = emqx_config:get_zone_conf(default, [force_shutdown]), + ct:pal("force shutdown config: ~p", [ShutdownPolicy]), + emqx_config:put_zone_conf(default, [force_shutdown], ShutdownPolicy#{enable := false}), + [{shutdown_policy, ShutdownPolicy}, {port, UdpPort}, {pub_qos, 0}, {sub_qos, 0} | Config]. + +end_per_suite(Config) -> + emqx_config:put_zone_conf(default, [force_shutdown], ?config(shutdown_policy, Config)), + emqx_common_test_helpers:stop_apps([]), + ok. + +init_per_group(pub_qos0, Config) -> + [{pub_qos, 0} | Config]; +init_per_group(sub_qos0, Config) -> + [{sub_qos, 0} | Config]; +init_per_group(pub_qos1, Config) -> + [{pub_qos, 1} | Config]; +init_per_group(sub_qos1, Config) -> + [{sub_qos, 1} | Config]; +init_per_group(pub_qos2, Config) -> + [{pub_qos, 2} | Config]; +init_per_group(sub_qos2, Config) -> + [{sub_qos, 2} | Config]; +init_per_group(abort_send_shutdown, Config) -> + [{stream_shutdown_flag, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT_SEND} | Config]; +init_per_group(abort_recv_shutdown, Config) -> + [{stream_shutdown_flag, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT_RECEIVE} | Config]; +init_per_group(abort_send_recv_shutdown, Config) -> + [{stream_shutdown_flag, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT} | Config]; +init_per_group(graceful_shutdown, Config) -> + [{stream_shutdown_flag, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL} | Config]; +init_per_group(profile_max_throughput, Config) -> + quicer:reg_open(quic_execution_profile_type_max_throughput), + Config; +init_per_group(profile_low_latency, Config) -> + quicer:reg_open(quic_execution_profile_low_latency), + Config; +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(_, Config) -> + emqx_common_test_helpers:start_apps([]), + start_emqx_quic(?config(port, Config)), + Config. + +t_quic_sock(Config) -> + Port = 4567, + SslOpts = [ + {cert, certfile(Config)}, + {key, keyfile(Config)}, + {idle_timeout_ms, 10000}, + % QUIC_SERVER_RESUME_AND_ZERORTT + {server_resumption_level, 2}, + {peer_bidi_stream_count, 10}, + {alpn, ["mqtt"]} + ], + Server = quic_server:start_link(Port, SslOpts), + timer:sleep(500), + {ok, Sock} = emqtt_quic:connect( + "localhost", + Port, + [{alpn, ["mqtt"]}, {active, false}], + 3000 + ), + send_and_recv_with(Sock), + ok = emqtt_quic:close(Sock), + quic_server:stop(Server). + +t_quic_sock_fail(_Config) -> + Port = 4567, + Error1 = + {error, + {transport_down, #{ + error => 2, + status => connection_refused + }}}, + Error2 = {error, {transport_down, #{error => 1, status => unreachable}}}, + case + emqtt_quic:connect( + "localhost", + Port, + [{alpn, ["mqtt"]}, {active, false}], + 3000 + ) + of + Error1 -> + ok; + Error2 -> + ok; + Other -> + ct:fail("unexpected return ~p", [Other]) + end. + +t_0_rtt(Config) -> + Port = 4568, + SslOpts = [ + {cert, certfile(Config)}, + {key, keyfile(Config)}, + {idle_timeout_ms, 10000}, + % QUIC_SERVER_RESUME_AND_ZERORTT + {server_resumption_level, 2}, + {peer_bidi_stream_count, 10}, + {alpn, ["mqtt"]} + ], + Server = quic_server:start_link(Port, SslOpts), + timer:sleep(500), + {ok, {quic, Conn, _Stream} = Sock} = emqtt_quic:connect( + "localhost", + Port, + [ + {alpn, ["mqtt"]}, + {active, false}, + {quic_event_mask, 1} + ], + 3000 + ), + send_and_recv_with(Sock), + ok = emqtt_quic:close(Sock), + NST = + receive + {quic, nst_received, Conn, Ticket} -> + Ticket + end, + {ok, Sock2} = emqtt_quic:connect( + "localhost", + Port, + [ + {alpn, ["mqtt"]}, + {active, false}, + {nst, NST} + ], + 3000 + ), + send_and_recv_with(Sock2), + ok = emqtt_quic:close(Sock2), + quic_server:stop(Server). + +t_0_rtt_fail(Config) -> + Port = 4569, + SslOpts = [ + {cert, certfile(Config)}, + {key, keyfile(Config)}, + {idle_timeout_ms, 10000}, + % QUIC_SERVER_RESUME_AND_ZERORTT + {server_resumption_level, 2}, + {peer_bidi_stream_count, 10}, + {alpn, ["mqtt"]} + ], + Server = quic_server:start_link(Port, SslOpts), + timer:sleep(500), + {ok, {quic, Conn, _Stream} = Sock} = emqtt_quic:connect( + "localhost", + Port, + [ + {alpn, ["mqtt"]}, + {active, false}, + {quic_event_mask, 1} + ], + 3000 + ), + send_and_recv_with(Sock), + ok = emqtt_quic:close(Sock), + <<_Head:16, Left/binary>> = + receive + {quic, nst_received, Conn, Ticket} when is_binary(Ticket) -> + Ticket + end, + + Error = {error, {not_found, invalid_parameter}}, + Error = emqtt_quic:connect( + "localhost", + Port, + [ + {alpn, ["mqtt"]}, + {active, false}, + {nst, Left} + ], + 3000 + ), + quic_server:stop(Server). + +t_multi_streams_sub(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + case emqtt:publish(C, Topic, <<"qos 2 1">>, PubQos) of + ok when PubQos == 0 -> ok; + {ok, _} -> ok + end, + receive + {publish, #{ + client_pid := C, + payload := <<"qos 2 1">>, + qos := RecQos, + topic := Topic + }} -> + ok; + Other -> + ct:fail("unexpected recv ~p", [Other]) + after 100 -> + ct:fail("not received") + end, + ok = emqtt:disconnect(C). + +t_multi_streams_pub_5x100(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + + PubVias = lists:map( + fun(_N) -> + {ok, Via} = emqtt:start_data_stream(C, []), + Via + end, + lists:seq(1, 5) + ), + CtrlVia = proplists:get_value(socket, emqtt:info(C)), + [ + begin + case emqtt:publish_via(C, PVia, Topic, #{}, <<"stream data ", N>>, [{qos, PubQos}]) of + ok when PubQos == 0 -> ok; + {ok, _} -> ok + end, + 0 == (N rem 10) andalso timer:sleep(10) + end + || %% also publish on control stream + N <- lists:seq(1, 100), + PVia <- [CtrlVia | PubVias] + ], + ?assert(timeout =/= recv_pub(600)), + ok = emqtt:disconnect(C). + +t_multi_streams_pub_parallel(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + PktId2 = calc_pkt_id(RecQos, 2), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe(C, #{}, [{Topic, [{qos, SubQos}]}]), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + <<"stream data 1">>, + [{qos, PubQos}], + undefined + ), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + <<"stream data 2">>, + [{qos, PubQos}], + undefined + ), + PubRecvs = recv_pub(2), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data", _/binary>>, + qos := RecQos, + topic := Topic + }}, + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<"stream data", _/binary>>, + qos := RecQos, + topic := Topic + }} + ], + PubRecvs + ), + Payloads = [P || {publish, #{payload := P}} <- PubRecvs], + ?assert( + [<<"stream data 1">>, <<"stream data 2">>] == Payloads orelse + [<<"stream data 2">>, <<"stream data 1">>] == Payloads + ), + ok = emqtt:disconnect(C). + +%% @doc test two pub streams, one send incomplete MQTT packet() can not block another. +t_multi_streams_pub_parallel_no_blocking(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId2 = calc_pkt_id(RecQos, 1), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe(C, #{}, [{Topic, [{qos, SubQos}]}]), + Drop = <<"stream data 1">>, + meck:new(emqtt_quic, [passthrough, no_history]), + meck:expect(emqtt_quic, send, fun(Sock, IoList) -> + case lists:last(IoList) == Drop of + true -> + ct:pal("meck droping ~p", [Drop]), + meck:passthrough([Sock, IoList -- [Drop]]); + false -> + meck:passthrough([Sock, IoList]) + end + end), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + Drop, + [{qos, PubQos}], + undefined + ), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + <<"stream data 2">>, + [{qos, PubQos}], + undefined + ), + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<"stream data 2">>, + qos := RecQos, + topic := Topic + }} + ], + PubRecvs + ), + meck:unload(emqtt_quic), + ?assertEqual(timeout, recv_pub(1)), + ok = emqtt:disconnect(C). + +t_multi_streams_packet_boundary(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + PktId2 = calc_pkt_id(RecQos, 2), + PktId3 = calc_pkt_id(RecQos, 3), + Topic = atom_to_binary(?FUNCTION_NAME), + + %% make quicer to batch job + quicer:reg_open(quic_execution_profile_type_max_throughput), + + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe(C, #{}, [{Topic, [{qos, SubQos}]}]), + + {ok, PubVia} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 1">>, + [{qos, PubQos}], + undefined + ), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 2">>, + [{qos, PubQos}], + undefined + ), + ThisFunB = atom_to_binary(?FUNCTION_NAME), + LargePart3 = iolist_to_binary([ + <> + || N <- lists:seq(1, 20000) + ]), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + LargePart3, + [{qos, PubQos}], + undefined + ), + PubRecvs = recv_pub(3, [], 1000), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 1">>, + qos := RecQos, + topic := Topic + }}, + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<"stream data 2">>, + qos := RecQos, + topic := Topic + }}, + {publish, #{ + client_pid := C, + packet_id := PktId3, + payload := _LargePart3_TO_BE_CHECKED, + qos := RecQos, + topic := Topic + }} + ], + PubRecvs + ), + {publish, #{payload := LargePart3Recv}} = lists:last(PubRecvs), + CommonLen = binary:longest_common_prefix([LargePart3Recv, LargePart3]), + Size3 = byte_size(LargePart3), + case Size3 - CommonLen of + 0 -> + ok; + Left -> + ct:fail( + "unmatched large payload: offset: ~p ~n send: ~p ~n recv ~p", + [ + CommonLen, + binary:part(LargePart3, {CommonLen, Left}), + binary:part(LargePart3Recv, {CommonLen, Left}) + ] + ) + end, + ok = emqtt:disconnect(C). + +%% @doc test that one malformed stream will not close the entire connection +t_multi_streams_packet_malform(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + PktId2 = calc_pkt_id(RecQos, 2), + PktId3 = calc_pkt_id(RecQos, 3), + Topic = atom_to_binary(?FUNCTION_NAME), + + %% make quicer to batch job + quicer:reg_open(quic_execution_profile_type_max_throughput), + + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe(C, #{}, [{Topic, [{qos, SubQos}]}]), + + {ok, PubVia} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 1">>, + [{qos, PubQos}], + undefined + ), + + {ok, {quic, _Conn, MalformStream}} = emqtt:start_data_stream(C, []), + {ok, _} = quicer:send(MalformStream, <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>), + + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 2">>, + [{qos, PubQos}], + undefined + ), + LargePart3 = binary:copy(atom_to_binary(?FUNCTION_NAME), 2000), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + LargePart3, + [{qos, PubQos}], + undefined + ), + PubRecvs = recv_pub(3), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 1">>, + qos := RecQos, + topic := Topic + }}, + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<"stream data 2">>, + qos := RecQos, + topic := Topic + }}, + {publish, #{ + client_pid := C, + packet_id := PktId3, + payload := LargePart3, + qos := RecQos, + topic := Topic + }} + ], + PubRecvs + ), + + case quicer:send(MalformStream, <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>) of + {ok, 10} -> ok; + {error, cancelled} -> ok; + {error, stm_send_error, aborted} -> ok + end, + + ?assert(is_list(emqtt:info(C))), + + {error, stm_send_error, aborted} = quicer:send(MalformStream, <<1, 2, 3, 4, 5, 6, 7, 8, 9, 0>>), + + ?assert(is_list(emqtt:info(C))), + + ok = emqtt:disconnect(C). + +t_multi_streams_packet_too_large(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + Topic = atom_to_binary(?FUNCTION_NAME), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + PktId2 = calc_pkt_id(RecQos, 2), + PktId3 = calc_pkt_id(RecQos, 3), + + OldMax = emqx_config:get_zone_conf(default, [mqtt, max_packet_size]), + emqx_config:put_zone_conf(default, [mqtt, max_packet_size], 1000), + + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe(C, #{}, [{Topic, [{qos, SubQos}]}]), + + {ok, PubVia} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 1">>, + [{qos, PubQos}], + undefined + ), + + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 2">>, + [{qos, PubQos}], + undefined + ), + + PubRecvs = recv_pub(2), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 1">>, + qos := RecQos, + topic := Topic + }}, + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<"stream data 2">>, + qos := RecQos, + topic := Topic + }} + ], + PubRecvs + ), + + {ok, PubVia2} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia2, + Topic, + binary:copy(<<"too large">>, 200), + [{qos, PubQos}], + undefined + ), + ?assert(is_list(emqtt:info(C))), + + timeout = recv_pub(1), + + %% send large payload on stream 1 + ok = emqtt:publish_async( + C, + PubVia, + Topic, + binary:copy(<<"too large">>, 200), + [{qos, PubQos}], + undefined + ), + timeout = recv_pub(1), + ?assert(is_list(emqtt:info(C))), + + %% Connection could be kept + {error, stm_send_error, _} = quicer:send(via_stream(PubVia), <<1>>), + {error, stm_send_error, _} = quicer:send(via_stream(PubVia2), <<1>>), + %% We could send data over new stream + {ok, PubVia3} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia3, + Topic, + <<"stream data 3">>, + [{qos, PubQos}], + undefined + ), + [ + {publish, #{ + client_pid := C, + packet_id := PktId3, + payload := <<"stream data 3">>, + qos := RecQos, + topic := Topic + }} + ] = recv_pub(1), + + ?assert(is_list(emqtt:info(C))), + + emqx_config:put_zone_conf(default, [mqtt, max_packet_size], OldMax), + ok = emqtt:disconnect(C). + +t_conn_change_client_addr(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe(C, #{}, [{Topic, [{qos, SubQos}]}]), + + {ok, {quic, Conn, _} = PubVia} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + <<"stream data 1">>, + [{qos, PubQos}], + undefined + ), + + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := _PktId1, + payload := <<"stream data 1">>, + qos := RecQos + }} + ], + recv_pub(1) + ), + NewPort = select_port(), + {ok, OldAddr} = quicer:sockname(Conn), + ?assertEqual( + ok, quicer:setopt(Conn, param_conn_local_address, "127.0.0.1:" ++ integer_to_list(NewPort)) + ), + {ok, NewAddr} = quicer:sockname(Conn), + ct:pal("NewAddr: ~p, Old Addr: ~p", [NewAddr, OldAddr]), + ?assertNotEqual(OldAddr, NewAddr), + ?assert(is_list(emqtt:info(C))), + ok = emqtt:disconnect(C). + +t_multi_streams_sub_pub_async(Config) -> + Topic = atom_to_binary(?FUNCTION_NAME), + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, _, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + <<"stream data 1">>, + [{qos, PubQos}], + undefined + ), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic2, + <<"stream data 2">>, + [{qos, PubQos}], + undefined + ), + PubRecvs = recv_pub(2), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data", _/binary>>, + qos := RecQos + }}, + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data", _/binary>>, + qos := RecQos + }} + ], + PubRecvs + ), + Payloads = [P || {publish, #{payload := P}} <- PubRecvs], + ?assert( + [<<"stream data 1">>, <<"stream data 2">>] == Payloads orelse + [<<"stream data 2">>, <<"stream data 1">>] == Payloads + ), + ok = emqtt:disconnect(C). + +t_multi_streams_sub_pub_sync(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia1}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<"stream data 3">>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> + Via1 = undefined, + ok; + {ok, #{reason_code := 0, via := Via1}} -> + ok + end, + case + emqtt:publish_via(C, {new_data_stream, []}, Topic2, #{}, <<"stream data 4">>, [ + {qos, PubQos} + ]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := Via2}} -> + ?assert(Via1 =/= Via2), + ok + end, + ct:pal("SVia1: ~p, SVia2: ~p", [SVia1, SVia2]), + PubRecvs = recv_pub(2), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 3">>, + qos := RecQos, + via := SVia1 + }}, + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 4">>, + qos := RecQos, + via := SVia2 + }} + ], + lists:sort(PubRecvs) + ), + ok = emqtt:disconnect(C). + +t_multi_streams_dup_sub(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia1}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + + #{data_stream_socks := [{quic, _Conn, SubStream} | _]} = proplists:get_value( + extra, emqtt:info(C) + ), + ?assertEqual(2, length(emqx_broker:subscribers(Topic))), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<"stream data 3">>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> + ok; + {ok, #{reason_code := 0, via := _Via1}} -> + ok + end, + PubRecvs = recv_pub(2), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 3">>, + qos := RecQos + }}, + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<"stream data 3">>, + qos := RecQos + }} + ], + lists:sort(PubRecvs) + ), + + RecvVias = [Via || {publish, #{via := Via}} <- PubRecvs], + + ct:pal("~p, ~p, ~n recv from: ~p~n", [SVia1, SVia2, PubRecvs]), + %% Can recv in any order + ?assert([SVia1, SVia2] == RecvVias orelse [SVia2, SVia1] == RecvVias), + + %% Shutdown one stream + quicer:async_shutdown_stream(SubStream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 500), + timer:sleep(100), + + ?assertEqual(1, length(emqx_broker:subscribers(Topic))), + + ok = emqtt:disconnect(C). + +t_multi_streams_corr_topic(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + PktId2 = calc_pkt_id(RecQos, 2), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SubVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> + ok; + {ok, #{reason_code := 0, via := _Via}} -> + ok + end, + + #{data_stream_socks := [PubVia | _]} = proplists:get_value(extra, emqtt:info(C)), + ?assert(PubVia =/= SubVia), + + case emqtt:publish_via(C, PubVia, Topic, #{}, <<6, 7, 8, 9>>, [{qos, PubQos}]) of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := PubVia}} -> ok + end, + PubRecvs = recv_pub(2), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }}, + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<6, 7, 8, 9>>, + qos := RecQos + }} + ], + PubRecvs + ), + ok = emqtt:disconnect(C). + +t_multi_streams_unsub(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SubVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> + ok; + {ok, #{reason_code := 0, via := _PVia}} -> + ok + end, + + #{data_stream_socks := [PubVia | _]} = proplists:get_value(extra, emqtt:info(C)), + ?assert(PubVia =/= SubVia), + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + emqtt:unsubscribe_via(C, SubVia, Topic), + + case emqtt:publish_via(C, PubVia, Topic, #{}, <<6, 7, 8, 9>>, [{qos, PubQos}]) of + ok when PubQos == 0 -> + ok; + {ok, #{reason_code := 16, via := PubVia, reason_code_name := no_matching_subscribers}} -> + ok + end, + + timeout = recv_pub(1), + ok = emqtt:disconnect(C). + +t_multi_streams_kill_sub_stream(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := _SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := _SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + [TopicStreamOwner] = emqx_broker:subscribers(Topic), + exit(TopicStreamOwner, kill), + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> + ok; + {ok, #{reason_code := Code, via := _PVia}} when Code == 0 orelse Code == 16 -> + ok + end, + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic2, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> + ok; + {ok, #{reason_code := 0, via := _PVia2}} -> + ok + end, + + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + topic := Topic2, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + recv_pub(1) + ), + ?assertEqual(timeout, recv_pub(1)), + ok. + +t_multi_streams_unsub_via_other(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + PktId2 = calc_pkt_id(RecQos, 2), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := _SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + #{data_stream_socks := [PubVia | _]} = proplists:get_value(extra, emqtt:info(C)), + + %% Unsub topic1 via stream2 should fail with error code 17: "No subscription existed" + {ok, #{via := SVia2}, [17]} = emqtt:unsubscribe_via(C, SVia2, Topic), + + case emqtt:publish_via(C, PubVia, Topic, #{}, <<6, 7, 8, 9>>, [{qos, PubQos}]) of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia2}} -> ok + end, + + PubRecvs2 = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId2, + payload := <<6, 7, 8, 9>>, + qos := RecQos + }} + ], + PubRecvs2 + ), + ok = emqtt:disconnect(C). + +t_multi_streams_shutdown_pub_data_stream(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia =/= SVia2), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + #{data_stream_socks := [PubVia | _]} = proplists:get_value(extra, emqtt:info(C)), + {quic, _Conn, DataStream} = PubVia, + quicer:shutdown_stream(DataStream, ?config(stream_shutdown_flag, Config), 500, 100), + timer:sleep(500), + %% Still alive + ?assert(is_list(emqtt:info(C))), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + emqtt:stop(C). + +t_multi_streams_shutdown_sub_data_stream(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia =/= SVia2), + {quic, _Conn, DataStream} = SVia2, + quicer:shutdown_stream(DataStream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT_RECEIVE, 500, 100), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + #{data_stream_socks := [_PubVia | _]} = proplists:get_value(extra, emqtt:info(C)), + timer:sleep(500), + %% Still alive + ?assert(is_list(emqtt:info(C))), + emqtt:stop(C). + +t_multi_streams_shutdown_ctrl_stream(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + unlink(C), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := _SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := _SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + {quic, _Conn, Ctrlstream} = proplists:get_value(socket, emqtt:info(C)), + Flag = ?config(stream_shutdown_flag, Config), + AppErrorCode = + case Flag of + ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL -> 0; + _ -> 500 + end, + quicer:shutdown_stream(Ctrlstream, Flag, AppErrorCode, 1000), + timer:sleep(500), + %% Client should be closed + ?assertMatch({'EXIT', {noproc, {gen_statem, call, [_, info, infinity]}}}, catch emqtt:info(C)). + +t_multi_streams_shutdown_ctrl_stream_then_reconnect(Config) -> + erlang:process_flag(trap_exit, true), + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {reconnect, true}, + {clean_start, false}, + {clientid, atom_to_binary(?FUNCTION_NAME)}, + %% speedup test + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia2 =/= SVia), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + {quic, _Conn, Ctrlstream} = proplists:get_value(socket, emqtt:info(C)), + quicer:shutdown_stream(Ctrlstream, ?config(stream_shutdown_flag, Config), 500, 100), + timer:sleep(200), + %% Client should not be closed + ?assert(is_list(emqtt:info(C))), + emqtt:stop(C). + +t_multi_streams_emqx_ctrl_kill(Config) -> + erlang:process_flag(trap_exit, true), + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {reconnect, false}, + %% speedup test + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia2 =/= SVia), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + ClientId = proplists:get_value(clientid, emqtt:info(C)), + [{ClientId, TransPid}] = ets:lookup(emqx_channel, ClientId), + exit(TransPid, kill), + + %% Client should be closed + assert_client_die(C). + +t_multi_streams_emqx_ctrl_exit_normal(Config) -> + erlang:process_flag(trap_exit, true), + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {reconnect, false}, + %% speedup test + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia2 =/= SVia), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + ClientId = proplists:get_value(clientid, emqtt:info(C)), + [{ClientId, TransPid}] = ets:lookup(emqx_channel, ClientId), + + emqx_connection:stop(TransPid), + %% Client exit normal. + assert_client_die(C). + +t_multi_streams_remote_shutdown(Config) -> + erlang:process_flag(trap_exit, true), + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {reconnect, false}, + {clientid, atom_to_binary(?FUNCTION_NAME)}, + %% speedup test + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia2 =/= SVia), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + {quic, _Conn, _Ctrlstream} = proplists:get_value(socket, emqtt:info(C)), + + ok = stop_emqx(), + %% Client should be closed + assert_client_die(C, 100, 50). + +t_multi_streams_remote_shutdown_with_reconnect(Config) -> + erlang:process_flag(trap_exit, true), + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + PktId1 = calc_pkt_id(RecQos, 1), + + Topic = atom_to_binary(?FUNCTION_NAME), + Topic2 = <>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {reconnect, true}, + {clean_start, false}, + {clientid, atom_to_binary(?FUNCTION_NAME)}, + %% speedup test + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + {ok, #{via := SVia}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, #{via := SVia2}, [SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {Topic2, [{qos, SubQos}]} + ]), + + ?assert(SVia2 =/= SVia), + + case + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, PubQos}]) + of + ok when PubQos == 0 -> ok; + {ok, #{reason_code := 0, via := _PVia}} -> ok + end, + + PubRecvs = recv_pub(1), + ?assertMatch( + [ + {publish, #{ + client_pid := C, + packet_id := PktId1, + payload := <<1, 2, 3, 4, 5>>, + qos := RecQos + }} + ], + PubRecvs + ), + + {quic, _Conn, _Ctrlstream} = proplists:get_value(socket, emqtt:info(C)), + + ok = stop_emqx(), + + timer:sleep(200), + start_emqx_quic(?config(port, Config)), + ?assert(is_list(emqtt:info(C))), + emqtt:stop(C). + +t_conn_silent_close(Config) -> + erlang:process_flag(trap_exit, true), + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + %% quic idle timeout + 1s + timer:sleep(16000), + Topic = atom_to_binary(?FUNCTION_NAME), + ?assertException( + exit, + noproc, + emqtt:publish_via(C, {new_data_stream, []}, Topic, #{}, <<1, 2, 3, 4, 5>>, [{qos, 1}]) + ). + +t_client_conn_bump_streams(Config) -> + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + {quic, Conn, _Stream} = proplists:get_value(socket, emqtt:info(C)), + ok = quicer:setopt(Conn, param_conn_settings, #{peer_unidi_stream_count => 20}). + +t_olp_true(Config) -> + meck:new(emqx_olp, [passthrough, no_history]), + ok = meck:expect(emqx_olp, is_overloaded, fun() -> true end), + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + ok = meck:unload(emqx_olp). + +t_olp_reject(Config) -> + erlang:process_flag(trap_exit, true), + emqx_config:put_zone_conf(default, [overload_protection, enable], true), + meck:new(emqx_olp, [passthrough, no_history]), + ok = meck:expect(emqx_olp, is_overloaded, fun() -> true end), + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5} + | Config + ]), + ?assertEqual( + {error, + {transport_down, #{ + error => 346, + status => + user_canceled + }}}, + emqtt:quic_connect(C) + ), + ok = meck:unload(emqx_olp), + emqx_config:put_zone_conf(default, [overload_protection, enable], false). + +t_conn_resume(Config) -> + erlang:process_flag(trap_exit, true), + {ok, C0} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5} + | Config + ]), + + {ok, _} = emqtt:quic_connect(C0), + #{nst := NST} = proplists:get_value(extra, emqtt:info(C0)), + emqtt:disconnect(C0), + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5}, + {nst, NST} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + Cid = proplists:get_value(clientid, emqtt:info(C)), + ct:pal("~p~n", [emqx_cm:get_chan_info(Cid)]). + +t_conn_without_ctrl_stream(Config) -> + erlang:process_flag(trap_exit, true), + {ok, Conn} = quicer:connect( + {127, 0, 0, 1}, + ?config(port, Config), + [{alpn, ["mqtt"]}, {verify, none}], + 3000 + ), + receive + {quic, transport_shutdown, Conn, _} -> ok + end. + +t_data_stream_race_ctrl_stream(Config) -> + erlang:process_flag(trap_exit, true), + {ok, C0} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5} + | Config + ]), + {ok, _} = emqtt:quic_connect(C0), + #{nst := NST} = proplists:get_value(extra, emqtt:info(C0)), + emqtt:disconnect(C0), + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {connect_timeout, 5}, + {nst, NST} + | Config + ]), + {ok, _} = emqtt:quic_connect(C), + Cid = proplists:get_value(clientid, emqtt:info(C)), + ct:pal("~p~n", [emqx_cm:get_chan_info(Cid)]). + +t_multi_streams_sub_0_rtt(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + Topic = atom_to_binary(?FUNCTION_NAME), + {ok, C0} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C0), + {ok, _, [SubQos]} = emqtt:subscribe_via(C0, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + ok = emqtt:open_quic_connection(C), + ok = emqtt:quic_mqtt_connect(C), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + #{}, + <<"qos 2 1">>, + [{qos, PubQos}], + infinity, + fun(_) -> ok end + ), + {ok, _} = emqtt:quic_connect(C), + receive + {publish, #{ + client_pid := C0, + payload := <<"qos 2 1">>, + qos := RecQos, + topic := Topic + }} -> + ok; + Other -> + ct:fail("unexpected recv ~p", [Other]) + after 100 -> + ct:fail("not received") + end, + ok = emqtt:disconnect(C), + ok = emqtt:disconnect(C0). + +t_multi_streams_sub_0_rtt_large_payload(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + Topic = atom_to_binary(?FUNCTION_NAME), + Payload = binary:copy(<<"qos 2 1">>, 1600), + {ok, C0} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C0), + {ok, _, [SubQos]} = emqtt:subscribe_via(C0, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + ok = emqtt:open_quic_connection(C), + ok = emqtt:quic_mqtt_connect(C), + ok = emqtt:publish_async( + C, + {new_data_stream, []}, + Topic, + #{}, + Payload, + [{qos, PubQos}], + infinity, + fun(_) -> ok end + ), + {ok, _} = emqtt:quic_connect(C), + receive + {publish, #{ + client_pid := C0, + payload := Payload, + qos := RecQos, + topic := Topic + }} -> + ok; + Other -> + ct:fail("unexpected recv ~p", [Other]) + after 100 -> + ct:fail("not received") + end, + ok = emqtt:disconnect(C), + ok = emqtt:disconnect(C0). + +%% @doc verify data stream can continue after 0-RTT handshake +t_multi_streams_sub_0_rtt_stream_data_cont(Config) -> + PubQos = ?config(pub_qos, Config), + SubQos = ?config(sub_qos, Config), + RecQos = calc_qos(PubQos, SubQos), + Topic = atom_to_binary(?FUNCTION_NAME), + Payload = binary:copy(<<"qos 2 1">>, 1600), + {ok, C0} = emqtt:start_link([{proto_ver, v5} | Config]), + {ok, _} = emqtt:quic_connect(C0), + {ok, _, [SubQos]} = emqtt:subscribe_via(C0, {new_data_stream, []}, #{}, [ + {Topic, [{qos, SubQos}]} + ]), + {ok, C} = emqtt:start_link([{proto_ver, v5} | Config]), + ok = emqtt:open_quic_connection(C), + ok = emqtt:quic_mqtt_connect(C), + {ok, PubVia} = emqtt:start_data_stream(C, []), + ok = emqtt:publish_async( + C, + PubVia, + Topic, + #{}, + Payload, + [{qos, PubQos}], + infinity, + fun(_) -> ok end + ), + {ok, _} = emqtt:quic_connect(C), + receive + {publish, #{ + client_pid := C0, + payload := Payload, + qos := RecQos, + topic := Topic + }} -> + ok; + Other -> + ct:fail("unexpected recv ~p", [Other]) + after 100 -> + ct:fail("not received") + end, + Payload2 = <<"2nd part", Payload/binary>>, + ok = emqtt:publish_async( + C, + PubVia, + Topic, + #{}, + Payload2, + [{qos, PubQos}], + infinity, + fun(_) -> ok end + ), + receive + {publish, #{ + client_pid := C0, + payload := Payload2, + qos := RecQos, + topic := Topic + }} -> + ok; + Other2 -> + ct:fail("unexpected recv ~p", [Other2]) + after 100 -> + ct:fail("not received") + end, + ok = emqtt:disconnect(C), + ok = emqtt:disconnect(C0). + +t_listener_inval_settings(_Config) -> + LPort = select_port(), + %% too small + LowLevelTunings = #{stream_recv_buffer_default => 1024}, + ?assertThrow( + {error, {failed_to_start, _}}, + emqx_common_test_helpers:ensure_quic_listener(?FUNCTION_NAME, LPort, LowLevelTunings) + ). + +t_listener_with_lowlevel_settings(_Config) -> + LPort = select_port(), + LowLevelTunings = #{ + max_bytes_per_key => 274877906, + %% In conf schema we use handshake_idle_timeout + handshake_idle_timeout_ms => 2000, + %% In conf schema we use idle_timeout + idle_timeout_ms => 20000, + %% not use since we are server + %% tls_client_max_send_buffer, + tls_server_max_send_buffer => 10240, + stream_recv_window_default => 16384 * 2, + %% there is one debug assertion: stream_recv_window_default > stream_recv_buffer_default + stream_recv_buffer_default => 16384, + conn_flow_control_window => 1024, + max_stateless_operations => 16, + initial_window_packets => 1300, + send_idle_timeout_ms => 12000, + initial_rtt_ms => 300, + max_ack_delay_ms => 6000, + disconnect_timeout_ms => 60000, + %% In conf schema, we use keep_alive_interval + keep_alive_interval_ms => 12000, + %% over written by conn opts + peer_bidi_stream_count => 100, + %% over written by conn opts + peer_unidi_stream_count => 100, + retry_memory_limit => 640, + load_balancing_mode => 1, + max_operations_per_drain => 32, + send_buffering_enabled => 1, + pacing_enabled => 0, + migration_enabled => 0, + datagram_receive_enabled => 1, + server_resumption_level => 0, + minimum_mtu => 1250, + maximum_mtu => 1600, + mtu_discovery_search_complete_timeout_us => 500000000, + mtu_discovery_missing_probe_count => 6, + max_binding_stateless_operations => 200, + stateless_operation_expiration_ms => 200 + }, + ?assertEqual( + ok, emqx_common_test_helpers:ensure_quic_listener(?FUNCTION_NAME, LPort, LowLevelTunings) + ), + timer:sleep(1000), + {ok, C} = emqtt:start_link([{proto_ver, v5}, {port, LPort}]), + {ok, _} = emqtt:quic_connect(C), + {ok, _, _} = emqtt:subscribe(C, <<"test/1/2">>, qos2), + {ok, _, [_SubQos]} = emqtt:subscribe_via(C, {new_data_stream, []}, #{}, [ + {<<"test/1/3">>, [{qos, 2}]} + ]), + ok = emqtt:disconnect(C). + +%%-------------------------------------------------------------------- +%% Helper functions +%%-------------------------------------------------------------------- +send_and_recv_with(Sock) -> + {ok, {IP, _}} = emqtt_quic:sockname(Sock), + ?assert(lists:member(tuple_size(IP), [4, 8])), + ok = emqtt_quic:send(Sock, <<"ping">>), + emqtt_quic:setopts(Sock, [{active, false}]), + {ok, <<"pong">>} = emqtt_quic:recv(Sock, 0), + ok = emqtt_quic:setopts(Sock, [{active, 100}]), + {ok, Stats} = emqtt_quic:getstat(Sock, [send_cnt, recv_cnt]), + %% connection level counters, not stream level + [{send_cnt, _}, {recv_cnt, _}] = Stats. + +certfile(Config) -> + filename:join([test_dir(Config), "certs", "test.crt"]). + +keyfile(Config) -> + filename:join([test_dir(Config), "certs", "test.key"]). + +test_dir(Config) -> + filename:dirname(filename:dirname(proplists:get_value(data_dir, Config))). + +recv_pub(Count) -> + recv_pub(Count, [], 100). + +recv_pub(Count, Tout) -> + recv_pub(Count, [], Tout). + +recv_pub(0, Acc, _Tout) -> + lists:reverse(Acc); +recv_pub(Count, Acc, Tout) -> + receive + {publish, _Prop} = Pub -> + recv_pub(Count - 1, [Pub | Acc], Tout) + after Tout -> + timeout + end. + +all_tc() -> + code:add_patha(filename:join(code:lib_dir(emqx), "ebin/")), + emqx_common_test_helpers:all(?MODULE). + +-spec calc_qos(0 | 1 | 2, 0 | 1 | 2) -> 0 | 1 | 2. +calc_qos(PubQos, SubQos) -> + if + PubQos > SubQos -> + SubQos; + SubQos > PubQos -> + PubQos; + true -> + PubQos + end. +-spec calc_pkt_id(0 | 1 | 2, non_neg_integer()) -> undefined | non_neg_integer(). +calc_pkt_id(0, _Id) -> + undefined; +calc_pkt_id(1, Id) -> + Id; +calc_pkt_id(2, Id) -> + Id. + +-spec start_emqx_quic(inet:port_number()) -> ok. +start_emqx_quic(UdpPort) -> + emqx_common_test_helpers:start_apps([]), + application:ensure_all_started(quicer), + emqx_common_test_helpers:ensure_quic_listener(?MODULE, UdpPort). + +-spec stop_emqx() -> ok. +stop_emqx() -> + emqx_common_test_helpers:stop_apps([]). + +%% select a random port picked by OS +-spec select_port() -> inet:port_number(). +select_port() -> + emqx_common_test_helpers:select_free_port(quic). + +-spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) -> + quicer:stream_handle(). +via_stream({quic, _Conn, Stream}) -> + Stream. + +assert_client_die(C) -> + assert_client_die(C, 100, 10). +assert_client_die(C, _, 0) -> + ct:fail("Client ~p did not die: stacktrace: ~p", [C, process_info(C, current_stacktrace)]); +assert_client_die(C, Delay, Retries) -> + try emqtt:info(C) of + Info when is_list(Info) -> + timer:sleep(Delay), + assert_client_die(C, Delay, Retries - 1) + catch + exit:Error -> + ct:comment("client die with ~p", [Error]) + end. + +%% BUILD_WITHOUT_QUIC +-else. +-endif. diff --git a/apps/emqx/test/emqx_ratelimiter_SUITE.erl b/apps/emqx/test/emqx_ratelimiter_SUITE.erl index f3b97d517..6f488eaa9 100644 --- a/apps/emqx/test/emqx_ratelimiter_SUITE.erl +++ b/apps/emqx/test/emqx_ratelimiter_SUITE.erl @@ -38,6 +38,7 @@ -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). -define(RATE(Rate), to_rate(Rate)). -define(NOW, erlang:system_time(millisecond)). +-define(ROOT_COUNTER_IDX, 1). %%-------------------------------------------------------------------- %% Setups @@ -46,7 +47,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF), + load_conf(), emqx_common_test_helpers:start_apps([?APP]), Config. @@ -54,13 +55,15 @@ end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([?APP]). init_per_testcase(_TestCase, Config) -> + emqx_config:erase(limiter), + load_conf(), Config. end_per_testcase(_TestCase, Config) -> Config. load_conf() -> - emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF). + ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF). init_config() -> emqx_config:init_load(emqx_limiter_schema, ?BASE_CONF). @@ -72,7 +75,7 @@ t_consume(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 100, - capacity := 100, + burst := 0, initial := 100, max_retry_time := 1000, failure_strategy := force @@ -89,7 +92,7 @@ t_retry(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 50, - capacity := 200, + burst := 150, initial := 0, max_retry_time := 1000, failure_strategy := force @@ -109,7 +112,7 @@ t_restore(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 1, - capacity := 200, + burst := 199, initial := 50, max_retry_time := 100, failure_strategy := force @@ -129,7 +132,7 @@ t_max_retry_time(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 1, - capacity := 1, + burst := 0, max_retry_time := 500, failure_strategy := drop } @@ -139,8 +142,12 @@ t_max_retry_time(_) -> Begin = ?NOW, Result = emqx_htb_limiter:consume(101, Client), ?assertMatch({drop, _}, Result), - Time = ?NOW - Begin, - ?assert(Time >= 500 andalso Time < 550) + End = ?NOW, + Time = End - Begin, + ?assert( + Time >= 500 andalso Time < 550, + lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time])) + ) end, with_per_client(Cfg, Case). @@ -150,7 +157,7 @@ t_divisible(_) -> divisible := true, rate := ?RATE("1000/1s"), initial := 600, - capacity := 600 + burst := 0 } end, Case = fun(BucketCfg) -> @@ -176,7 +183,7 @@ t_low_watermark(_) -> low_watermark := 400, rate := ?RATE("1000/1s"), initial := 1000, - capacity := 1000 + burst := 0 } end, Case = fun(BucketCfg) -> @@ -201,23 +208,22 @@ t_infinity_client(_) -> Fun = fun(Cfg) -> Cfg end, Case = fun(Cfg) -> Client = connect(Cfg), - InfVal = emqx_limiter_schema:infinity_value(), - ?assertMatch(#{bucket := #{rate := InfVal}}, Client), + ?assertMatch(infinity, Client), Result = emqx_htb_limiter:check(100000, Client), ?assertEqual({ok, Client}, Result) end, with_per_client(Fun, Case). -t_try_restore_agg(_) -> +t_try_restore_with_bucket(_) -> Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ - rate := 1, - capacity := 200, + rate := 100, + burst := 100, initial := 50 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := 0, divisible := true, max_retry_time := 100, failure_strategy := force @@ -239,11 +245,11 @@ t_short_board(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/1s"), initial := 0, - capacity := 100 + burst := 0 }, Cli2 = Cli#{ rate := ?RATE("600/1s"), - capacity := 600, + burst := 0, initial := 600 }, Bucket2#{client := Cli2} @@ -261,46 +267,45 @@ t_rate(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - capacity := infinity + burst := 0 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} end, Case = fun(Cfg) -> + Time = 1000, Client = connect(Cfg), - Ts1 = erlang:system_time(millisecond), C1 = emqx_htb_limiter:available(Client), - timer:sleep(1000), - Ts2 = erlang:system_time(millisecond), + timer:sleep(1100), C2 = emqx_htb_limiter:available(Client), - ShouldInc = floor((Ts2 - Ts1) / 100) * 100, + ShouldInc = floor(Time / 100) * 100, Inc = C2 - C1, ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") end, with_bucket(Fun, Case). t_capacity(_) -> - Capacity = 600, + Capacity = 1200, Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - capacity := 600 + burst := 200 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} end, Case = fun(Cfg) -> Client = connect(Cfg), - timer:sleep(1000), + timer:sleep(1500), C1 = emqx_htb_limiter:available(Client), ?assertEqual(Capacity, C1, "test bucket capacity") end, @@ -310,19 +315,19 @@ t_capacity(_) -> %% Test Cases Global Level %%-------------------------------------------------------------------- t_collaborative_alloc(_) -> - GlobalMod = fun(#{message_routing := MR} = Cfg) -> - Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}} + GlobalMod = fun(Cfg) -> + Cfg#{message_routing => #{rate => ?RATE("600/1s"), burst => 0}} end, Bucket1 = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ rate := ?RATE("400/1s"), initial := 0, - capacity := 600 + burst := 200 }, Cli2 = Cli#{ rate := ?RATE("50"), - capacity := 100, + burst := 50, initial := 100 }, Bucket2#{client := Cli2} @@ -350,11 +355,11 @@ t_collaborative_alloc(_) -> ). t_burst(_) -> - GlobalMod = fun(#{message_routing := MR} = Cfg) -> + GlobalMod = fun(Cfg) -> Cfg#{ - message_routing := MR#{ - rate := ?RATE("200/1s"), - burst := ?RATE("400/1s") + message_routing => #{ + rate => ?RATE("200/1s"), + burst => ?RATE("400/1s") } } end, @@ -363,11 +368,11 @@ t_burst(_) -> Bucket2 = Bucket#{ rate := ?RATE("200/1s"), initial := 0, - capacity := 200 + burst := 0 }, Cli2 = Cli#{ rate := ?RATE("50/1s"), - capacity := 200, + burst := 150, divisible := true }, Bucket2#{client := Cli2} @@ -392,38 +397,6 @@ t_burst(_) -> Case ). -t_limit_global_with_unlimit_other(_) -> - GlobalMod = fun(#{message_routing := MR} = Cfg) -> - Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}} - end, - - Bucket = fun(#{client := Cli} = Bucket) -> - Bucket2 = Bucket#{ - rate := infinity, - initial := 0, - capacity := infinity - }, - Cli2 = Cli#{ - rate := infinity, - capacity := infinity, - initial := 0 - }, - Bucket2#{client := Cli2} - end, - - Case = fun() -> - C1 = counters:new(1, []), - start_client({b1, Bucket}, ?NOW + 2000, C1, 20), - timer:sleep(2100), - check_average_rate(C1, 2, 600) - end, - - with_global( - GlobalMod, - [{b1, Bucket}], - Case - ). - %%-------------------------------------------------------------------- %% Test Cases container %%-------------------------------------------------------------------- @@ -432,7 +405,7 @@ t_check_container(_) -> Cfg#{ rate := ?RATE("1000/1s"), initial := 1000, - capacity := 1000 + burst := 0 } end, Case = fun(#{client := Client} = BucketCfg) -> @@ -452,38 +425,6 @@ t_check_container(_) -> end, with_per_client(Cfg, Case). -%%-------------------------------------------------------------------- -%% Test Override -%%-------------------------------------------------------------------- -t_bucket_no_client(_) -> - Rate = ?RATE("1/s"), - GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) -> - Cfg#{client := Client#{message_routing := MR#{rate := Rate}}} - end, - BucketMod = fun(Bucket) -> - maps:remove(client, Bucket) - end, - Case = fun() -> - Limiter = connect(BucketMod(make_limiter_cfg())), - ?assertMatch(#{rate := Rate}, Limiter) - end, - with_global(GlobalMod, [BucketMod], Case). - -t_bucket_client(_) -> - GlobalRate = ?RATE("1/s"), - BucketRate = ?RATE("10/s"), - GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) -> - Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}} - end, - BucketMod = fun(#{client := Client} = Bucket) -> - Bucket#{client := Client#{rate := BucketRate}} - end, - Case = fun() -> - Limiter = connect(BucketMod(make_limiter_cfg())), - ?assertMatch(#{rate := BucketRate}, Limiter) - end, - with_global(GlobalMod, [BucketMod], Case). - %%-------------------------------------------------------------------- %% Test Cases misc %%-------------------------------------------------------------------- @@ -565,13 +506,303 @@ t_schema_unit(_) -> ?assertMatch({error, _}, M:to_rate("100MB/1")), ?assertMatch({error, _}, M:to_rate("100/10x")), - ?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")), + ?assertEqual({ok, infinity}, M:to_capacity("infinity")), ?assertEqual({ok, 100}, M:to_capacity("100")), ?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")), ?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")), ?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")), ok. +t_compatibility_for_capacity(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.messages.capacity = infinity\n" + " limiter.client.messages.capacity = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + messages := #{burst := 0}, + client := #{messages := #{burst := 0}} + }, + parse_and_check(CfgStr) + ). + +t_compatibility_for_message_in(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.message_in.rate = infinity\n" + " limiter.client.message_in.rate = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + messages := #{rate := infinity}, + client := #{messages := #{rate := infinity}} + }, + parse_and_check(CfgStr) + ). + +t_compatibility_for_bytes_in(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.bytes_in.rate = infinity\n" + " limiter.client.bytes_in.rate = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + bytes := #{rate := infinity}, + client := #{bytes := #{rate := infinity}} + }, + parse_and_check(CfgStr) + ). + +t_extract_with_type(_) -> + IsOnly = fun + (_Key, Cfg) when map_size(Cfg) =/= 1 -> + false; + (Key, Cfg) -> + maps:is_key(Key, Cfg) + end, + Checker = fun + (Type, #{client := Client} = Cfg) -> + Cfg2 = maps:remove(client, Cfg), + IsOnly(Type, Client) andalso + (IsOnly(Type, Cfg2) orelse + map_size(Cfg2) =:= 0); + (Type, Cfg) -> + IsOnly(Type, Cfg) + end, + ?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + messages => #{rate => 1}, bytes => #{rate => 1} + }) + ) + ), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + messages => #{rate => 1}, + bytes => #{rate => 1}, + client => #{messages => #{rate => 2}} + }) + ) + ), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + client => #{messages => #{rate => 2}, bytes => #{rate => 1}} + }) + ) + ). + +%%-------------------------------------------------------------------- +%% Test Cases Create Instance +%%-------------------------------------------------------------------- +t_create_instance_with_infinity_node(_) -> + emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME), + Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME), + lists:foreach( + fun({Cfg, Expected}) -> + {ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg), + IsMatched = + case is_atom(Expected) of + true -> + Result =:= Expected; + _ -> + Expected(Result) + end, + ?assert( + IsMatched, + lists:flatten( + io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [ + Result, Cfg + ]) + ) + ) + end, + Cases + ), + emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes), + ok. + +t_not_exists_instance(_) -> + Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}}, + ?assertEqual( + {error, invalid_bucket}, + emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg) + ), + + ?assertEqual( + {ok, infinity}, + emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg) + ), + ok. + +t_create_instance_with_node(_) -> + GlobalMod = fun(Cfg) -> + Cfg#{ + message_routing => #{rate => ?RATE("200/1s"), burst => 0}, + messages => #{rate => ?RATE("200/1s"), burst => 0} + } + end, + + B1 = fun(Bucket) -> + Bucket#{rate := ?RATE("400/1s")} + end, + + B2 = fun(Bucket) -> + Bucket#{rate := infinity} + end, + + IsRefLimiter = fun + ({ok, #{tokens := _}}, _IsRoot) -> + false; + ({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) -> + true; + ({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX -> + true; + (Result, _IsRoot) -> + ct:pal("The result is:~p~n", [Result]), + false + end, + + Case = fun() -> + BucketCfg = make_limiter_cfg(), + + ?assert( + IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false) + ), + ?assert( + IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true) + ), + ?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)), + ?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false)) + end, + + with_global( + GlobalMod, + [{b1, B1}, {b2, B2}], + Case + ), + ok. + +%%-------------------------------------------------------------------- +%% Test Cases emqx_esockd_htb_limiter +%%-------------------------------------------------------------------- +t_create_esockd_htb_limiter(_) -> + Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined), + ?assertMatch( + #{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined}, + Opts + ), + + Limiter = emqx_esockd_htb_limiter:create(Opts), + ?assertMatch( + #{module := _, name := bytes, limiter := infinity}, + Limiter + ), + + ?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)), + ok. + +t_esockd_htb_consume(_) -> + ClientCfg = emqx_limiter_schema:default_client_config(), + Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}}, + Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg), + Limiter = emqx_esockd_htb_limiter:create(Opts), + + C1R = emqx_esockd_htb_limiter:consume(51, Limiter), + ?assertMatch({pause, _Ms, _Limiter2}, C1R), + + timer:sleep(300), + C2R = emqx_esockd_htb_limiter:consume(50, Limiter), + ?assertMatch({ok, _}, C2R), + ok. + +%%-------------------------------------------------------------------- +%% Test Cases short paths +%%-------------------------------------------------------------------- +t_node_short_paths(_) -> + CfgStr = <<"limiter {max_conn_rate = \"1000\", messages_rate = \"100\", bytes_rate = \"10\"}">>, + ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, CfgStr), + Accessor = fun emqx_limiter_schema:get_node_opts/1, + ?assertMatch(#{rate := 100.0}, Accessor(connection)), + ?assertMatch(#{rate := 10.0}, Accessor(messages)), + ?assertMatch(#{rate := 1.0}, Accessor(bytes)), + ?assertMatch(#{rate := infinity}, Accessor(message_routing)), + ?assertEqual(undefined, emqx:get_config([limiter, connection], undefined)). + +t_compatibility_for_node_short_paths(_) -> + CfgStr = + <<"limiter {max_conn_rate = \"1000\", connection.rate = \"500\", bytes.rate = \"200\"}">>, + ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, CfgStr), + Accessor = fun emqx_limiter_schema:get_node_opts/1, + ?assertMatch(#{rate := 100.0}, Accessor(connection)), + ?assertMatch(#{rate := 20.0}, Accessor(bytes)). + +t_listener_short_paths(_) -> + CfgStr = << + "" + "listeners.tcp.default {max_conn_rate = \"1000\", messages_rate = \"100\", bytes_rate = \"10\"}" + "" + >>, + ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr), + ListenerOpt = emqx:get_config([listeners, tcp, default]), + ?assertMatch( + #{ + client := #{ + messages := #{rate := 10.0}, + bytes := #{rate := 1.0} + }, + connection := #{rate := 100.0} + }, + emqx_limiter_schema:get_listener_opts(ListenerOpt) + ). + +t_compatibility_for_listener_short_paths(_) -> + CfgStr = << + "" "listeners.tcp.default {max_conn_rate = \"1000\", limiter.connection.rate = \"500\"}" "" + >>, + ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr), + ListenerOpt = emqx:get_config([listeners, tcp, default]), + ?assertMatch( + #{ + connection := #{rate := 100.0} + }, + emqx_limiter_schema:get_listener_opts(ListenerOpt) + ). + +t_no_limiter_for_listener(_) -> + CfgStr = <<>>, + ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr), + ListenerOpt = emqx:get_config([listeners, tcp, default]), + ?assertEqual( + undefined, + emqx_limiter_schema:get_listener_opts(ListenerOpt) + ). + %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- @@ -748,17 +979,16 @@ connect(Name, Cfg) -> Limiter. make_limiter_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), Client = #{ - rate => Infinity, + rate => infinity, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 0, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. + #{client => Client, rate => infinity, initial => 0, burst => 0}. add_bucket(Cfg) -> add_bucket(?MODULE, Cfg). @@ -812,3 +1042,76 @@ apply_modifier(Pairs, #{default := Template}) -> Acc#{N => M(Template)} end, lists:foldl(Fun, #{}, Pairs). + +parse_and_check(ConfigString) -> + ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString), + emqx:get_config([listeners, tcp, default, limiter]). + +make_create_test_data_with_infinity_node(FakeInstnace) -> + Infinity = emqx_htb_limiter:make_infinity_limiter(), + ClientCfg = emqx_limiter_schema:default_client_config(), + InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(), + MkC = fun(Rate) -> + #{client => #{bytes => ClientCfg#{rate := Rate}}} + end, + MkB = fun(Rate) -> + #{bytes => #{rate => Rate, burst => 0, initial => 0}} + end, + + MkA = fun(Client, Bucket) -> + maps:merge(MkC(Client), MkB(Bucket)) + end, + IsRefLimiter = fun(Expected) -> + fun + (#{tokens := _}) -> false; + (#{bucket := Bucket}) -> Bucket =:= Expected; + (_) -> false + end + end, + + IsTokenLimiter = fun(Expected) -> + fun + (#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected; + (_) -> false + end + end, + + [ + %% default situation, no limiter setting + {undefined, Infinity}, + + %% client = undefined bucket = undefined + {#{}, Infinity}, + %% client = undefined bucket = infinity + {MkB(infinity), Infinity}, + %% client = undefined bucket = other + {MkB(100), IsRefLimiter(FakeInstnace)}, + + %% client = infinity bucket = undefined + {MkC(infinity), Infinity}, + %% client = infinity bucket = infinity + {MkA(infinity, infinity), Infinity}, + + %% client = infinity bucket = other + {MkA(infinity, 100), IsRefLimiter(FakeInstnace)}, + + %% client = other bucket = undefined + {MkC(100), IsTokenLimiter(InfinityRef)}, + + %% client = other bucket = infinity + {MkC(100), IsTokenLimiter(InfinityRef)}, + + %% client = C bucket = B C < B + {MkA(100, 1000), IsTokenLimiter(FakeInstnace)}, + + %% client = C bucket = B C > B + {MkA(1000, 100), IsRefLimiter(FakeInstnace)} + ]. + +parse_schema(ConfigString) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain( + emqx_limiter_schema, + RawConf, + #{required => false, atom_key => false} + ). diff --git a/apps/emqx/test/emqx_router_SUITE.erl b/apps/emqx/test/emqx_router_SUITE.erl index 298a33fe8..2db0acf82 100644 --- a/apps/emqx/test/emqx_router_SUITE.erl +++ b/apps/emqx/test/emqx_router_SUITE.erl @@ -119,7 +119,7 @@ t_has_routes(_) -> ?R:delete_route(<<"devices/+/messages">>). t_unexpected(_) -> - Router = emqx_misc:proc_name(?R, 1), + Router = emqx_utils:proc_name(?R, 1), ?assertEqual(ignored, gen_server:call(Router, bad_request)), ?assertEqual(ok, gen_server:cast(Router, bad_message)), Router ! bad_info. diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index e4fadb192..81991f26e 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -153,7 +153,7 @@ ssl_opts_gc_after_handshake_test_rancher_listener_test() -> #{ kind := validation_error, reason := unknown_fields, - unknown := <<"gc_after_handshake">> + unknown := "gc_after_handshake" } ]}, validate(Sc, #{<<"gc_after_handshake">> => true}) @@ -219,112 +219,124 @@ parse_server_test_() -> ?T( "single server, binary, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(<<"localhost">>) ) ), ?T( "single server, string, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse("localhost") ) ), ?T( "single server, list(string), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(["localhost"]) ) ), ?T( "single server, list(binary), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse([<<"localhost">>]) ) ), ?T( "single server, binary, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(<<"localhost:9999">>) ) ), ?T( "single server, list(string), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(["localhost:9999"]) ) ), ?T( "single server, string, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse("localhost:9999") ) ), ?T( "single server, list(binary), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse([<<"localhost:9999">>]) ) ), ?T( "multiple servers, string, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse("host1, host2") ) ), ?T( "multiple servers, binary, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(<<"host1, host2,,,">>) ) ), ?T( "multiple servers, list(string), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(["host1", "host2"]) ) ), ?T( "multiple servers, list(binary), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse([<<"host1">>, <<"host2">>]) ) ), ?T( "multiple servers, string, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse("host1:1234, host2:2345") ) ), ?T( "multiple servers, binary, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse(<<"host1:1234, host2:2345, ">>) ) ), ?T( "multiple servers, list(string), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([" host1:1234 ", "host2:2345"]) ) ), ?T( "multiple servers, list(binary), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([<<"host1:1234">>, <<"host2:2345">>]) ) ), @@ -350,9 +362,9 @@ parse_server_test_() -> ) ), ?T( - "multiple servers wihtout port, mixed list(binary|string)", + "multiple servers without port, mixed list(binary|string)", ?assertEqual( - ["host1", "host2"], + [#{hostname => "host1"}, #{hostname => "host2"}], Parse2([<<"host1">>, "host2"], #{no_port => true}) ) ), @@ -394,14 +406,18 @@ parse_server_test_() -> ?T( "single server map", ?assertEqual( - [{"host1.domain", 1234}], + [#{hostname => "host1.domain", port => 1234}], HoconParse("host1.domain:1234") ) ), ?T( "multiple servers map", ?assertEqual( - [{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}], + [ + #{hostname => "host1.domain", port => 1234}, + #{hostname => "host2.domain", port => 2345}, + #{hostname => "host3.domain", port => 3456} + ], HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456") ) ), @@ -447,6 +463,171 @@ parse_server_test_() -> "bad_schema", emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true}) ) + ), + ?T( + "scheme, hostname and port", + ?assertEqual( + #{scheme => "pulsar+ssl", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6650}, + emqx_schema:parse_server( + "pulsar://host", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, no port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host"}, + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, missing port", + ?assertThrow( + "missing_port_number", + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => false, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, no default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host"}, + emqx_schema:parse_server( + "host", + #{ + default_scheme => "pulsar", + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6650}, + emqx_schema:parse_server( + "host", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "just hostname, expecting missing scheme", + ?assertThrow( + "missing_scheme", + emqx_schema:parse_server( + "host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "inconsistent scheme opts", + ?assertError( + "bad_schema", + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + default_scheme => "something", + supported_schemes => ["not", "supported"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "unsupported scheme", + ?assertThrow( + "unsupported_scheme", + emqx_schema:parse_server( + "pulsar+quic://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar"] + } + ) + ) + ), + ?T( + "multiple hostnames with schemes (1)", + ?assertEqual( + [ + #{scheme => "pulsar", hostname => "host", port => 6649}, + #{scheme => "pulsar+ssl", hostname => "other.host", port => 6651}, + #{scheme => "pulsar", hostname => "yet.another", port => 6650} + ], + emqx_schema:parse_servers( + "pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) ) ]. @@ -455,13 +636,139 @@ servers_validator_test() -> NotRequired = emqx_schema:servers_validator(#{}, false), ?assertThrow("cannot_be_empty", Required("")), ?assertThrow("cannot_be_empty", Required(<<>>)), + ?assertThrow("cannot_be_empty", NotRequired("")), + ?assertThrow("cannot_be_empty", NotRequired(<<>>)), ?assertThrow("cannot_be_empty", Required(undefined)), - ?assertEqual(ok, NotRequired("")), - ?assertEqual(ok, NotRequired(<<>>)), ?assertEqual(ok, NotRequired(undefined)), + ?assertEqual(ok, NotRequired("undefined")), ok. converter_invalid_input_test() -> ?assertEqual(undefined, emqx_schema:convert_servers(undefined)), %% 'foo: bar' is a valid HOCON value, but 'bar' is not a port number ?assertThrow("bad_host_port", emqx_schema:convert_servers(#{foo => bar})). + +password_converter_test() -> + ?assertEqual(undefined, emqx_schema:password_converter(undefined, #{})), + ?assertEqual(<<"123">>, emqx_schema:password_converter(123, #{})), + ?assertEqual(<<"123">>, emqx_schema:password_converter(<<"123">>, #{})), + ?assertThrow("must_quote", emqx_schema:password_converter(foobar, #{})), + ok. + +url_type_test_() -> + [ + ?_assertEqual( + {ok, <<"http://some.server/">>}, + typerefl:from_string(emqx_schema:url(), <<"http://some.server/">>) + ), + ?_assertEqual( + {ok, <<"http://192.168.0.1/">>}, + typerefl:from_string(emqx_schema:url(), <<"http://192.168.0.1">>) + ), + ?_assertEqual( + {ok, <<"http://some.server/">>}, + typerefl:from_string(emqx_schema:url(), "http://some.server/") + ), + ?_assertEqual( + {ok, <<"http://some.server/">>}, + typerefl:from_string(emqx_schema:url(), <<"http://some.server">>) + ), + ?_assertEqual( + {ok, <<"http://some.server:9090/">>}, + typerefl:from_string(emqx_schema:url(), <<"http://some.server:9090">>) + ), + ?_assertEqual( + {ok, <<"https://some.server:9090/">>}, + typerefl:from_string(emqx_schema:url(), <<"https://some.server:9090">>) + ), + ?_assertEqual( + {ok, <<"https://some.server:9090/path?q=uery">>}, + typerefl:from_string(emqx_schema:url(), <<"https://some.server:9090/path?q=uery">>) + ), + ?_assertEqual( + {error, {unsupported_scheme, <<"postgres">>}}, + typerefl:from_string(emqx_schema:url(), <<"postgres://some.server:9090">>) + ), + ?_assertEqual( + {error, empty_host_not_allowed}, + typerefl:from_string(emqx_schema:url(), <<"">>) + ) + ]. + +env_test_() -> + Do = fun emqx_schema:naive_env_interpolation/1, + [ + {"undefined", fun() -> ?assertEqual(undefined, Do(undefined)) end}, + {"full env abs path", + with_env_fn( + "MY_FILE", + "/path/to/my/file", + fun() -> ?assertEqual("/path/to/my/file", Do("$MY_FILE")) end + )}, + {"full env relative path", + with_env_fn( + "MY_FILE", + "path/to/my/file", + fun() -> ?assertEqual("path/to/my/file", Do("${MY_FILE}")) end + )}, + %% we can not test windows style file join though + {"windows style", + with_env_fn( + "MY_FILE", + "path\\to\\my\\file", + fun() -> ?assertEqual("path\\to\\my\\file", Do("$MY_FILE")) end + )}, + {"dir no {}", + with_env_fn( + "MY_DIR", + "/mydir", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"$MY_DIR/foobar">>)) end + )}, + {"dir with {}", + with_env_fn( + "MY_DIR", + "/mydir", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end + )}, + %% a trailing / should not cause the sub path to become absolute + {"env dir with trailing /", + with_env_fn( + "MY_DIR", + "/mydir//", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end + )}, + {"string dir with doulbe /", + with_env_fn( + "MY_DIR", + "/mydir/", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}//foobar">>)) end + )}, + {"env not found", + with_env_fn( + "MY_DIR", + "/mydir/", + fun() -> ?assertEqual("${MY_DIR2}//foobar", Do(<<"${MY_DIR2}//foobar">>)) end + )} + ]. + +with_env_fn(Name, Value, F) -> + fun() -> + with_envs(F, [{Name, Value}]) + end. + +with_envs(Fun, Envs) -> + with_envs(Fun, [], Envs). + +with_envs(Fun, Args, [{_Name, _Value} | _] = Envs) -> + set_envs(Envs), + try + apply(Fun, Args) + after + unset_envs(Envs) + end. + +set_envs([{_Name, _Value} | _] = Envs) -> + lists:map(fun({Name, Value}) -> os:putenv(Name, Value) end, Envs). + +unset_envs([{_Name, _Value} | _] = Envs) -> + lists:map(fun({Name, _}) -> os:unsetenv(Name) end, Envs). diff --git a/apps/emqx/test/emqx_session_SUITE.erl b/apps/emqx/test/emqx_session_SUITE.erl index ecc9794d1..21d8f0a2a 100644 --- a/apps/emqx/test/emqx_session_SUITE.erl +++ b/apps/emqx/test/emqx_session_SUITE.erl @@ -63,7 +63,12 @@ end_per_testcase(_TestCase, Config) -> %%-------------------------------------------------------------------- t_session_init(_) -> - Session = emqx_session:init(#{max_inflight => 64}), + Conf = emqx_cm:get_session_confs( + #{zone => default, clientid => <<"fake-test">>}, #{ + receive_maximum => 64, expiry_interval => 0 + } + ), + Session = emqx_session:init(Conf), ?assertEqual(#{}, emqx_session:info(subscriptions, Session)), ?assertEqual(0, emqx_session:info(subscriptions_cnt, Session)), ?assertEqual(infinity, emqx_session:info(subscriptions_max, Session)), @@ -459,11 +464,17 @@ mqueue(Opts) -> session() -> session(#{}). session(InitFields) when is_map(InitFields) -> + Conf = emqx_cm:get_session_confs( + #{zone => default, clientid => <<"fake-test">>}, #{ + receive_maximum => 0, expiry_interval => 0 + } + ), + Session = emqx_session:init(Conf), maps:fold( - fun(Field, Value, Session) -> - emqx_session:set_field(Field, Value, Session) + fun(Field, Value, SessionAcc) -> + emqx_session:set_field(Field, Value, SessionAcc) end, - emqx_session:init(#{max_inflight => 0}), + Session, InitFields ). diff --git a/apps/emqx/test/emqx_test_janitor.erl b/apps/emqx/test/emqx_test_janitor.erl index 07d09aca1..041b03fa7 100644 --- a/apps/emqx/test/emqx_test_janitor.erl +++ b/apps/emqx/test/emqx_test_janitor.erl @@ -30,6 +30,8 @@ %% API -export([ start_link/0, + stop/1, + stop/2, push_on_exit_callback/2 ]). @@ -40,6 +42,12 @@ start_link() -> gen_server:start_link(?MODULE, self(), []). +stop(Server) -> + stop(Server, 15_000). + +stop(Server, Timeout) -> + gen_server:call(Server, terminate, Timeout). + push_on_exit_callback(Server, Callback) when is_function(Callback, 0) -> gen_server:call(Server, {push, Callback}). @@ -52,10 +60,13 @@ init(Parent) -> {ok, #{callbacks => [], owner => Parent}}. terminate(_Reason, #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> Fun() end, Callbacks). + do_terminate(Callbacks). handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) -> {reply, ok, State#{callbacks := [Callback | Callbacks]}}; +handle_call(terminate, _From, State = #{callbacks := Callbacks}) -> + do_terminate(Callbacks), + {stop, normal, ok, State}; handle_call(_Req, _From, State) -> {reply, error, State}. @@ -66,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) -> {stop, normal, State}; handle_info(_Msg, State) -> {noreply, State}. + +%%---------------------------------------------------------------------------------- +%% Internal fns +%%---------------------------------------------------------------------------------- + +do_terminate(Callbacks) -> + lists:foreach( + fun(Fun) -> + try + Fun() + catch + K:E:S -> + ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]), + ct:pal("stacktrace: ~p", [S]), + ok + end + end, + Callbacks + ), + ok. diff --git a/apps/emqx/test/emqx_tls_lib_tests.erl b/apps/emqx/test/emqx_tls_lib_tests.erl index 5a81daf6a..0f5883b10 100644 --- a/apps/emqx/test/emqx_tls_lib_tests.erl +++ b/apps/emqx/test/emqx_tls_lib_tests.erl @@ -117,7 +117,7 @@ ssl_files_failure_test_() -> %% empty string ?assertMatch( {error, #{ - reason := invalid_file_path_or_pem_string, which_options := [<<"keyfile">>] + reason := invalid_file_path_or_pem_string, which_options := [[<<"keyfile">>]] }}, emqx_tls_lib:ensure_ssl_files("/tmp", #{ <<"keyfile">> => <<>>, @@ -128,7 +128,7 @@ ssl_files_failure_test_() -> %% not valid unicode ?assertMatch( {error, #{ - reason := invalid_file_path_or_pem_string, which_options := [<<"keyfile">>] + reason := invalid_file_path_or_pem_string, which_options := [[<<"keyfile">>]] }}, emqx_tls_lib:ensure_ssl_files("/tmp", #{ <<"keyfile">> => <<255, 255>>, @@ -136,6 +136,18 @@ ssl_files_failure_test_() -> <<"cacertfile">> => bin(test_key()) }) ), + ?assertMatch( + {error, #{ + reason := invalid_file_path_or_pem_string, + which_options := [[<<"ocsp">>, <<"issuer_pem">>]] + }}, + emqx_tls_lib:ensure_ssl_files("/tmp", #{ + <<"keyfile">> => bin(test_key()), + <<"certfile">> => bin(test_key()), + <<"cacertfile">> => bin(test_key()), + <<"ocsp">> => #{<<"issuer_pem">> => <<255, 255>>} + }) + ), %% not printable ?assertMatch( {error, #{reason := invalid_file_path_or_pem_string}}, @@ -155,7 +167,8 @@ ssl_files_failure_test_() -> #{ <<"cacertfile">> => bin(TmpFile), <<"keyfile">> => bin(TmpFile), - <<"certfile">> => bin(TmpFile) + <<"certfile">> => bin(TmpFile), + <<"ocsp">> => #{<<"issuer_pem">> => bin(TmpFile)} } ) ) @@ -170,22 +183,29 @@ ssl_files_save_delete_test() -> SSL0 = #{ <<"keyfile">> => Key, <<"certfile">> => Key, - <<"cacertfile">> => Key + <<"cacertfile">> => Key, + <<"ocsp">> => #{<<"issuer_pem">> => Key} }, Dir = filename:join(["/tmp", "ssl-test-dir"]), {ok, SSL} = emqx_tls_lib:ensure_ssl_files(Dir, SSL0), - File = maps:get(<<"keyfile">>, SSL), - ?assertMatch(<<"/tmp/ssl-test-dir/key-", _:16/binary>>, File), - ?assertEqual({ok, bin(test_key())}, file:read_file(File)), + FileKey = maps:get(<<"keyfile">>, SSL), + ?assertMatch(<<"/tmp/ssl-test-dir/key-", _:16/binary>>, FileKey), + ?assertEqual({ok, bin(test_key())}, file:read_file(FileKey)), + FileIssuerPem = emqx_utils_maps:deep_get([<<"ocsp">>, <<"issuer_pem">>], SSL), + ?assertMatch(<<"/tmp/ssl-test-dir/ocsp_issuer_pem-", _:16/binary>>, FileIssuerPem), + ?assertEqual({ok, bin(test_key())}, file:read_file(FileIssuerPem)), %% no old file to delete ok = emqx_tls_lib:delete_ssl_files(Dir, SSL, undefined), - ?assertEqual({ok, bin(test_key())}, file:read_file(File)), + ?assertEqual({ok, bin(test_key())}, file:read_file(FileKey)), + ?assertEqual({ok, bin(test_key())}, file:read_file(FileIssuerPem)), %% old and new identical, no delete ok = emqx_tls_lib:delete_ssl_files(Dir, SSL, SSL), - ?assertEqual({ok, bin(test_key())}, file:read_file(File)), + ?assertEqual({ok, bin(test_key())}, file:read_file(FileKey)), + ?assertEqual({ok, bin(test_key())}, file:read_file(FileIssuerPem)), %% new is gone, delete old ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL), - ?assertEqual({error, enoent}, file:read_file(File)), + ?assertEqual({error, enoent}, file:read_file(FileKey)), + ?assertEqual({error, enoent}, file:read_file(FileIssuerPem)), %% test idempotence ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL), ok. @@ -198,7 +218,8 @@ ssl_files_handle_non_generated_file_test() -> SSL0 = #{ <<"keyfile">> => TmpKeyFile, <<"certfile">> => TmpKeyFile, - <<"cacertfile">> => TmpKeyFile + <<"cacertfile">> => TmpKeyFile, + <<"ocsp">> => #{<<"issuer_pem">> => TmpKeyFile} }, Dir = filename:join(["/tmp", "ssl-test-dir-00"]), {ok, SSL2} = emqx_tls_lib:ensure_ssl_files(Dir, SSL0), @@ -208,7 +229,8 @@ ssl_files_handle_non_generated_file_test() -> ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2), %% verify the file is not delete and not changed, because it is not generated by %% emqx_tls_lib - ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)). + ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)), + ok = file:delete(TmpKeyFile). ssl_file_replace_test() -> Key1 = bin(test_key()), @@ -216,24 +238,32 @@ ssl_file_replace_test() -> SSL0 = #{ <<"keyfile">> => Key1, <<"certfile">> => Key1, - <<"cacertfile">> => Key1 + <<"cacertfile">> => Key1, + <<"ocsp">> => #{<<"issuer_pem">> => Key1} }, SSL1 = #{ <<"keyfile">> => Key2, <<"certfile">> => Key2, - <<"cacertfile">> => Key2 + <<"cacertfile">> => Key2, + <<"ocsp">> => #{<<"issuer_pem">> => Key2} }, Dir = filename:join(["/tmp", "ssl-test-dir2"]), {ok, SSL2} = emqx_tls_lib:ensure_ssl_files(Dir, SSL0), {ok, SSL3} = emqx_tls_lib:ensure_ssl_files(Dir, SSL1), File1 = maps:get(<<"keyfile">>, SSL2), File2 = maps:get(<<"keyfile">>, SSL3), + IssuerPem1 = emqx_utils_maps:deep_get([<<"ocsp">>, <<"issuer_pem">>], SSL2), + IssuerPem2 = emqx_utils_maps:deep_get([<<"ocsp">>, <<"issuer_pem">>], SSL3), ?assert(filelib:is_regular(File1)), ?assert(filelib:is_regular(File2)), + ?assert(filelib:is_regular(IssuerPem1)), + ?assert(filelib:is_regular(IssuerPem2)), %% delete old file (File1, in SSL2) ok = emqx_tls_lib:delete_ssl_files(Dir, SSL3, SSL2), ?assertNot(filelib:is_regular(File1)), ?assert(filelib:is_regular(File2)), + ?assertNot(filelib:is_regular(IssuerPem1)), + ?assert(filelib:is_regular(IssuerPem2)), ok. bin(X) -> iolist_to_binary(X). diff --git a/apps/emqx/test/emqx_trace_SUITE.erl b/apps/emqx/test/emqx_trace_SUITE.erl index c66808132..140ec79ff 100644 --- a/apps/emqx/test/emqx_trace_SUITE.erl +++ b/apps/emqx/test/emqx_trace_SUITE.erl @@ -22,10 +22,9 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_trace.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --record(emqx_trace, {name, type, filter, enable = true, start_at, end_at}). - %%-------------------------------------------------------------------- %% Setups %%-------------------------------------------------------------------- @@ -97,7 +96,9 @@ t_base_create_delete(_Config) -> type => clientid, name => <<"name1">>, start_at => Now, - end_at => Now + 30 * 60 + end_at => Now + 30 * 60, + payload_encode => text, + extra => #{} } ], ?assertEqual(ExpectFormat, emqx_trace:format([TraceRec])), @@ -385,6 +386,48 @@ t_find_closed_time(_Config) -> ?assertEqual(1000, emqx_trace:find_closest_time(Traces, Now)), ok. +t_migrate_trace(_Config) -> + build_new_trace_data(), + build_old_trace_data(), + reload(), + Traces = emqx_trace:format(emqx_trace:list()), + ?assertEqual(2, erlang:length(Traces)), + lists:foreach( + fun(#{name := Name, enable := Enable}) -> + ?assertEqual(true, Enable, Name) + end, + Traces + ), + LoggerIds = logger:get_handler_ids(), + lists:foreach( + fun(Id) -> + ?assertEqual(true, lists:member(Id, LoggerIds), LoggerIds) + end, + [ + trace_topic_test_topic_migrate_new, + trace_topic_test_topic_migrate_old + ] + ), + ok. + +build_new_trace_data() -> + Now = erlang:system_time(second), + {ok, _} = emqx_trace:create([ + {<<"name">>, <<"test_topic_migrate_new">>}, + {<<"type">>, topic}, + {<<"topic">>, <<"/test/migrate/new">>}, + {<<"start_at">>, Now - 10} + ]). + +build_old_trace_data() -> + Now = erlang:system_time(second), + OldAttrs = [name, type, filter, enable, start_at, end_at], + {atomic, ok} = mnesia:transform_table(emqx_trace, ignore, OldAttrs, emqx_trace), + OldTrace = + {emqx_trace, <<"test_topic_migrate_old">>, topic, <<"topic">>, true, Now - 10, Now + 100}, + ok = mnesia:dirty_write(OldTrace), + ok. + reload() -> catch ok = gen_server:stop(emqx_trace), {ok, _Pid} = emqx_trace:start_link(). diff --git a/apps/emqx/test/emqx_vm_SUITE.erl b/apps/emqx/test/emqx_vm_SUITE.erl index f9809361b..12f28ed28 100644 --- a/apps/emqx/test/emqx_vm_SUITE.erl +++ b/apps/emqx/test/emqx_vm_SUITE.erl @@ -24,7 +24,24 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_load(_Config) -> - ?assertMatch([{load1, _}, {load5, _}, {load15, _}], emqx_vm:loads()). + lists:foreach( + fun({Avg, LoadKey, Int}) -> + emqx_common_test_helpers:with_mock( + cpu_sup, + Avg, + fun() -> Int end, + fun() -> + Load = proplists:get_value(LoadKey, emqx_vm:loads()), + ?assertEqual(Int / 256, Load) + end + ) + end, + [{avg1, load1, 0}, {avg5, load5, 128}, {avg15, load15, 256}] + ), + ?assertMatch( + [{load1, _}, {load5, _}, {load15, _}], + emqx_vm:loads() + ). t_systeminfo(_Config) -> ?assertEqual( @@ -33,12 +50,6 @@ t_systeminfo(_Config) -> ), ?assertEqual(undefined, emqx_vm:get_system_info(undefined)). -t_mem_info(_Config) -> - application:ensure_all_started(os_mon), - MemInfo = emqx_vm:mem_info(), - [{total_memory, _}, {used_memory, _}] = MemInfo, - application:stop(os_mon). - t_process_info(_Config) -> ProcessInfo = emqx_vm:get_process_info(), ?assertEqual(emqx_vm:process_info_keys(), [K || {K, _V} <- ProcessInfo]). diff --git a/apps/emqx/test/emqx_vm_mon_SUITE.erl b/apps/emqx/test/emqx_vm_mon_SUITE.erl index 140a00010..ceeffafb5 100644 --- a/apps/emqx/test/emqx_vm_mon_SUITE.erl +++ b/apps/emqx/test/emqx_vm_mon_SUITE.erl @@ -23,13 +23,13 @@ all() -> emqx_common_test_helpers:all(?MODULE). -init_per_testcase(t_alarms, Config) -> +init_per_testcase(t_too_many_processes_alarm, Config) -> emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), emqx_config:put([sysmon, vm], #{ process_high_watermark => 0, process_low_watermark => 0, - %% 1s + %% 100ms process_check_interval => 100 }), ok = supervisor:terminate_child(emqx_sys_sup, emqx_vm_mon), @@ -43,9 +43,29 @@ init_per_testcase(_, Config) -> end_per_testcase(_, _Config) -> emqx_common_test_helpers:stop_apps([]). -t_alarms(_) -> +t_too_many_processes_alarm(_) -> timer:sleep(500), + Alarms = emqx_alarm:get_alarms(activated), ?assert(is_existing(too_many_processes, emqx_alarm:get_alarms(activated))), + ?assertMatch( + [ + #{ + activate_at := _, + activated := true, + deactivate_at := infinity, + details := #{high_watermark := 0, low_watermark := 0, usage := "0%"}, + message := <<"0% process usage">>, + name := too_many_processes + } + ], + lists:filter( + fun + (#{name := too_many_processes}) -> true; + (_) -> false + end, + Alarms + ) + ), emqx_config:put([sysmon, vm, process_high_watermark], 70), emqx_config:put([sysmon, vm, process_low_watermark], 60), timer:sleep(500), diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index 787491c4b..60abe3d3c 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -138,13 +138,13 @@ end_per_testcase(t_ws_non_check_origin, Config) -> del_bucket(), PrevConfig = ?config(prev_config, Config), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), - emqx_common_test_helpers:stop_apps([]), + stop_apps(), ok; end_per_testcase(_, Config) -> del_bucket(), PrevConfig = ?config(prev_config, Config), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), - emqx_common_test_helpers:stop_apps([]), + stop_apps(), Config. init_per_suite(Config) -> @@ -156,6 +156,10 @@ end_per_suite(_) -> emqx_common_test_helpers:stop_apps([]), ok. +%% FIXME: this is a temp fix to tests share configs. +stop_apps() -> + emqx_common_test_helpers:stop_apps([], #{erase_all_configs => false}). + %%-------------------------------------------------------------------- %% Test Cases %%-------------------------------------------------------------------- @@ -443,7 +447,12 @@ t_websocket_info_deliver(_) -> t_websocket_info_timeout_limiter(_) -> Ref = make_ref(), - LimiterT = init_limiter(), + {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), + LimiterT = init_limiter(#{ + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} + }), Next = fun emqx_ws_connection:when_msg_in/3, Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT), Event = {timeout, Ref, limit_timeout}, @@ -509,16 +518,16 @@ t_handle_timeout_emit_stats(_) -> t_ensure_rate_limit(_) -> {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), Limiter = init_limiter(#{ - bytes_in => bucket_cfg(), - message_in => bucket_cfg(), - client => #{bytes_in => client_cfg(Rate)} + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} }), St = st(#{limiter => Limiter}), %% must bigger than value in emqx_ratelimit_SUITE {ok, Need} = emqx_limiter_schema:to_capacity("1GB"), St1 = ?ws_conn:check_limiter( - [{Need, bytes_in}], + [{Need, bytes}], [], fun(_, _, S) -> S end, [], @@ -612,7 +621,10 @@ channel(InitFields) -> peercert => undefined, mountpoint => undefined }, - Session = emqx_session:init(#{max_inflight => 0}), + Conf = emqx_cm:get_session_confs(ClientInfo, #{ + receive_maximum => 0, expiry_interval => 0 + }), + Session = emqx_session:init(Conf), maps:fold( fun(Field, Value, Channel) -> emqx_channel:set_field(Field, Value, Channel) @@ -696,23 +708,21 @@ init_limiter() -> init_limiter(limiter_cfg()). init_limiter(LimiterCfg) -> - emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg). + emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg). limiter_cfg() -> Cfg = bucket_cfg(), Client = client_cfg(), - #{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. client_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - client_cfg(Infinity). + client_cfg(infinity). client_cfg(Rate) -> - Infinity = emqx_limiter_schema:infinity_value(), #{ rate => Rate, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), @@ -720,14 +730,13 @@ client_cfg(Rate) -> }. bucket_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - #{rate => Infinity, initial => 0, capacity => Infinity}. + #{rate => infinity, initial => 0, burst => 0}. add_bucket() -> Cfg = bucket_cfg(), - emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), - emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). + emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg), + emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg). del_bucket() -> - emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), - emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). + emqx_limiter_server:del_bucket(?LIMITER_ID, bytes), + emqx_limiter_server:del_bucket(?LIMITER_ID, messages). diff --git a/apps/emqx/test/props/prop_emqx_sys.erl b/apps/emqx/test/props/prop_emqx_sys.erl index 5e6c56341..505d729a7 100644 --- a/apps/emqx/test/props/prop_emqx_sys.erl +++ b/apps/emqx/test/props/prop_emqx_sys.erl @@ -30,7 +30,7 @@ emqx_metrics, emqx_stats, emqx_broker, - mria_mnesia, + mria, emqx_hooks, emqx_config_handler ]). @@ -109,8 +109,8 @@ do_mock(emqx_broker) -> ); do_mock(emqx_stats) -> meck:expect(emqx_stats, getstats, fun() -> [0] end); -do_mock(mria_mnesia) -> - meck:expect(mria_mnesia, running_nodes, fun() -> [node()] end); +do_mock(mria) -> + meck:expect(mria, running_nodes, fun() -> [node()] end); do_mock(emqx_metrics) -> meck:expect(emqx_metrics, all, fun() -> [{hello, 3}] end); do_mock(emqx_hooks) -> diff --git a/apps/emqx_authn/i18n/emqx_authn_api_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_api_i18n.conf deleted file mode 100644 index da5d6a1d1..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_api_i18n.conf +++ /dev/null @@ -1,217 +0,0 @@ -emqx_authn_api { - - authentication_get { - desc { - en: """List authenticators for global authentication.""" - zh: """列出全局认证链上的认证器。""" - } - } - - authentication_post { - desc { - en: """Create authenticator for global authentication.""" - zh: """为全局认证链创建认证器。""" - } - } - - authentication_id_get { - desc { - en: """Get authenticator from global authentication chain.""" - zh: """获取全局认证链上的指定认证器。""" - } - } - - authentication_id_put { - desc { - en: """Update authenticator from global authentication chain.""" - zh: """更新全局认证链上的指定认证器。""" - } - } - - authentication_id_delete { - desc { - en: """Delete authenticator from global authentication chain.""" - zh: """删除全局认证链上的指定认证器。""" - } - } - - authentication_id_status_get { - desc { - en: """Get authenticator status from global authentication chain.""" - zh: """获取全局认证链上指定认证器的状态。""" - } - } - - listeners_listener_id_authentication_get { - desc { - en: """List authenticators for listener authentication.""" - zh: """列出监听器认证链上的认证器。""" - } - } - - listeners_listener_id_authentication_post { - desc { - en: """Create authenticator for listener authentication.""" - zh: """在监听器认证链上创建认证器。""" - } - } - - listeners_listener_id_authentication_id_get { - desc { - en: """Get authenticator from listener authentication chain.""" - zh: """获取监听器认证链上的指定认证器。""" - } - } - - listeners_listener_id_authentication_id_put { - desc { - en: """Update authenticator from listener authentication chain.""" - zh: """更新监听器认证链上的指定认证器。""" - } - } - - listeners_listener_id_authentication_id_delete { - desc { - en: """Delete authenticator from listener authentication chain.""" - zh: """删除监听器认证链上的指定认证器。""" - } - } - - listeners_listener_id_authentication_id_status_get { - desc { - en: """Get authenticator status from listener authentication chain.""" - zh: """获取监听器认证链上指定认证器的状态。""" - } - } - - authentication_id_position_put { - desc { - en: """Move authenticator in global authentication chain.""" - zh: """更改全局认证链上指定认证器的顺序。""" - } - } - - listeners_listener_id_authentication_id_position_put { - desc { - en: """Move authenticator in listener authentication chain.""" - zh: """更改监听器认证链上指定认证器的顺序。""" - } - } - - authentication_id_users_post { - desc { - en: """Create users for authenticator in global authentication chain.""" - zh: """为全局认证链上的指定认证器创建用户数据。""" - } - } - - authentication_id_users_get { - desc { - en: """List users in authenticator in global authentication chain.""" - zh: """获取全局认证链上指定认证器中的用户数据。""" - } - } - - listeners_listener_id_authentication_id_users_post { - desc { - en: """Create users for authenticator in listener authentication chain.""" - zh: """为监听器认证链上的指定认证器创建用户数据。""" - } - } - - listeners_listener_id_authentication_id_users_get { - desc { - en: """List users in authenticator in listener authentication chain.""" - zh: """列出监听器认证链上指定认证器中的用户数据。""" - } - } - - authentication_id_users_user_id_get { - desc { - en: """Get user from authenticator in global authentication chain.""" - zh: """获取全局认证链上指定认证器中的指定用户数据。""" - } - } - - authentication_id_users_user_id_put { - desc { - en: """Update user in authenticator in global authentication chain.""" - zh: """更新全局认证链上指定认证器中的指定用户数据。""" - } - } - - authentication_id_users_user_id_delete { - desc { - en: """Delete user in authenticator in global authentication chain.""" - zh: """删除全局认证链上指定认证器中的指定用户数据。""" - } - } - - listeners_listener_id_authentication_id_users_user_id_get { - desc { - en: """Get user from authenticator in listener authentication chain.""" - zh: """获取监听器认证链上指定认证器中的指定用户数据。""" - } - } - - listeners_listener_id_authentication_id_users_user_id_put { - desc { - en: """Update user in authenticator in listener authentication chain.""" - zh: """更新监听器认证链上指定认证器中的指定用户数据。""" - } - } - - listeners_listener_id_authentication_id_users_user_id_delete { - desc { - en: """Delete user in authenticator in listener authentication chain.""" - zh: """删除监听器认证链上指定认证器中的指定用户数据。""" - } - } - - param_auth_id { - desc { - en: """Authenticator ID.""" - zh: """认证器 ID。""" - } - } - - param_listener_id { - desc { - en: """Listener ID.""" - zh: """监听器 ID。""" - } - } - - param_user_id { - desc { - en: """User ID.""" - zh: """用户 ID。""" - } - } - - param_position { - desc { - en: """Position of authenticator in chain. Possible values are 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'.""" - zn: """认证者在链中的位置。可能的值是 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'""" - } - } - - like_user_id { - desc { - en: """Fuzzy search user_id (username or clientid).""" - zh: """使用用户 ID (username 或 clientid)模糊查询。""" - } - label { - en: """like_user_id""" - zh: """like_user_id""" - } - } - - is_superuser { - desc { - en: """Is superuser""" - zh: """是否是超级用户""" - } - } - -} diff --git a/apps/emqx_authn/i18n/emqx_authn_http_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_http_i18n.conf deleted file mode 100644 index 129db5054..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_http_i18n.conf +++ /dev/null @@ -1,81 +0,0 @@ -emqx_authn_http { - get { - desc { - en: """Configuration of authenticator using HTTP Server as authentication service (Using GET request).""" - zh: """使用 HTTP Server 作为认证服务的认证器的配置项 (使用 GET 请求)。""" - } - } - - post { - desc { - en: """Configuration of authenticator using HTTP Server as authentication service (Using POST request).""" - zh: """使用 HTTP Server 作为认证服务的认证器的配置项 (使用 POST 请求)。""" - } - } - - method { - desc { - en: """HTTP request method.""" - zh: """HTTP 请求方法。""" - } - label { - en: """Request Method""" - zh: """请求方法""" - } - } - - url { - desc { - en: """URL of the HTTP server.""" - zh: """认证 HTTP 服务器地址。""" - } - label { - en: """URL""" - zh: """URL""" - } - } - - headers { - desc { - en: """List of HTTP Headers.""" - zh: """HTTP Headers 列表""" - } - label { - en: """Headers""" - zh: """请求头""" - } - } - - headers_no_content_type { - desc { - en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表 (无 content-type) 。""" - } - label { - en: """headers_no_content_type""" - zh: """请求头(无 content-type)""" - } - } - - body { - desc { - en: """HTTP request body.""" - zh: """HTTP request body。""" - } - label { - en: """Request Body""" - zh: """Request Body""" - } - } - - request_timeout { - desc { - en: """HTTP request timeout.""" - zh: """HTTP 请求超时时长。""" - } - label { - en: """Request Timeout""" - zh: """请求超时时间""" - } - } -} diff --git a/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf deleted file mode 100644 index a420dd7d9..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf +++ /dev/null @@ -1,237 +0,0 @@ -emqx_authn_jwt { - use_jwks { - desc { - en: """Whether to use JWKS.""" - zh: """是否使用 JWKS。""" - } - label { - en: """Whether to Use JWKS""" - zh: """是否使用 JWKS""" - } - } - - algorithm { - desc { - en: """JWT signing algorithm, Supports HMAC (configured as hmac-based) and RSA, ECDSA (configured as public-key).""" - zh: """JWT 签名算法,支持 HMAC (配置为 hmac-based)和 RSA、ECDSA (配置为 public-key)。""" - } - label { - en: """JWT Signing Algorithm""" - zh: """JWT 签名算法""" - } - } - - public_key { - desc { - en: """The public key used to verify the JWT.""" - zh: """用于验证 JWT 的公钥。""" - } - label { - en: """Public Key""" - zh: """公钥""" - } - } - - secret_base64_encoded { - desc { - en: """Whether secret is base64 encoded.""" - zh: """密钥是否为 Base64 编码。""" - } - label { - en: """Whether Secret is Base64 Encoded""" - zh: """密钥是否为 Base64 编码""" - } - } - - secret { - desc { - en: """The key to verify the JWT using HMAC algorithm.""" - zh: """使用 HMAC 算法时用于验证 JWT 的密钥""" - } - label { - en: """Secret""" - zh: """Secret""" - } - } - - endpoint { - desc { - en: """JWKS endpoint, it's a read-only endpoint that returns the server's public key set in the JWKS format.""" - zh: """JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。""" - } - label { - en: """JWKS Endpoint""" - zh: """JWKS Endpoint""" - } - } - - refresh_interval { - desc { - en: """JWKS refresh interval.""" - zh: """JWKS 刷新间隔。""" - } - label { - en: """JWKS Refresh Interval""" - zh: """JWKS 刷新间隔""" - } - } - - cacertfile { - desc { - en: """Path to a file containing PEM-encoded CA certificates.""" - zh: """包含 PEM 编码的 CA 证书的文件的路径。""" - } - label { - en: """CA Certificate File""" - zh: """CA 证书文件""" - } - } - - certfile { - desc { - en: """Path to a file containing the user certificate.""" - zh: """包含用户证书的文件的路径。""" - } - label { - en: """Certificate File""" - zh: """证书文件""" - } - } - - keyfile { - desc { - en: """Path to a file containing the user's private PEM-encoded key.""" - zh: """包含 PEM 编码的用户私钥的文件的路径。""" - } - label { - en: """Key File""" - zh: """私钥文件""" - } - } - - verify { - desc { - en: """Enable or disable SSL peer verification.""" - zh: """指定握手过程中是否校验对端证书。""" - } - label { - en: """Verify""" - zh: """Verify""" - } - } - - server_name_indication { - desc { - en: """Server Name Indication (SNI).""" - zh: """服务器名称指示(SNI)。""" - } - label { - en: """Server Name Indication""" - zh: """服务器名称指示""" - } - } - - verify_claims { - desc { - en: """ -A list of custom claims to validate, which is a list of name/value pairs. -Values can use the following placeholders: -- ${username}: Will be replaced at runtime with Username used by the client when connecting -- ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting -Authentication will verify that the value of claims in the JWT (taken from the Password field) matches what is required in verify_claims. -""" - zh: """ -需要验证的自定义声明列表,它是一个名称/值对列表。 -值可以使用以下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符 -认证时将验证 JWT(取自 Password 字段)中 claims 的值是否与 verify_claims 中要求的相匹配。 -""" - } - label { - en: """Verify Claims""" - zh: """Verify Claims""" - } - } - - ssl { - desc { - en: """SSL options.""" - zh: """SSL 选项。""" - } - label { - en: """SSL Options""" - zh: """SSL 选项""" - } - } - - enable { - desc { - en: """Enable/disable SSL.""" - zh: """启用/禁用 SSL。""" - } - label { - en: """Enable/disable SSL""" - zh: """启用/禁用 SSL""" - } - } - - hmac-based { - desc { - en: """Configuration when the JWT for authentication is issued using the HMAC algorithm.""" - zh: """用于认证的 JWT 使用 HMAC 算法签发时的配置。""" - } - } - - public-key { - desc { - en: """Configuration when the JWT for authentication is issued using RSA or ECDSA algorithm.""" - zh: """用于认证的 JWT 使用 RSA 或 ECDSA 算法签发时的配置。""" - } - } - - jwks { - desc { - en: """Configuration when JWTs used for authentication need to be fetched from the JWKS endpoint.""" - zh: """用于认证的 JWTs 需要从 JWKS 端点获取时的配置。""" - } - } - - ssl_disable { - desc { - en: """SSL configuration.""" - zh: """SSL 配置。""" - } - } - - ssl_enable { - desc { - en: """SSL configuration.""" - zh: """SSL 配置。""" - } - } - - acl_claim_name { - desc { - en: """JWT claim name to use for getting ACL rules.""" - zh: """JWT claim name to use for getting ACL rules.""" - } - label { - en: """ACL claim name""" - zh: """ACL claim name""" - } - } - - from { - desc { - en: """Field to take JWT from.""" - zh: """要从中获取 JWT 的字段。""" - } - label { - en: """From Field""" - zh: """源字段""" - } - } - - -} diff --git a/apps/emqx_authn/i18n/emqx_authn_mnesia_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_mnesia_i18n.conf deleted file mode 100644 index 0d07217d9..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_mnesia_i18n.conf +++ /dev/null @@ -1,21 +0,0 @@ -emqx_authn_mnesia { - authentication { - desc { - en: """Configuration of authenticator using built-in database as data source.""" - zh: """使用内置数据库作为认证数据源的认证器的配置项。""" - } - } - - user_id_type { - desc { - en: """Specify whether to use `clientid` or `username` for authentication.""" - zh: """指定使用客户端ID `clientid` 还是用户名 `username` 进行认证。""" - } - - label: { - en: """Authentication ID Type""" - zh: """认证 ID 类型""" - } - } - -} diff --git a/apps/emqx_authn/i18n/emqx_authn_mongodb_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_mongodb_i18n.conf deleted file mode 100644 index 440e6edb1..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_mongodb_i18n.conf +++ /dev/null @@ -1,87 +0,0 @@ -emqx_authn_mongodb { - standalone { - desc { - en: """Configuration of authenticator using MongoDB (Standalone) as authentication data source.""" - zh: """使用 MongoDB (Standalone) 作为认证数据源的认证器的配置项。""" - } - } - - replica-set { - desc { - en: """Configuration of authenticator using MongoDB (Replica Set) as authentication data source.""" - zh: """使用 MongoDB (Replica Set) 作为认证数据源的认证器的配置项。""" - } - } - - sharded-cluster { - desc { - en: """Configuration of authenticator using MongoDB (Sharded Cluster) as authentication data source.""" - zh: """使用 MongoDB (Sharded Cluster) 作为认证数据源的认证器的配置项。""" - } - } - - collection { - desc { - en: """Collection used to store authentication data.""" - zh: """存储认证数据的集合。""" - } - label: { - en: """Collection""" - zh: """集合""" - } - } - - filter { - desc { - en: """ -Conditional expression that defines the filter condition in the query. -Filter supports the following placeholders: -- ${username}: Will be replaced at runtime with Username used by the client when connecting -- ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting -""" - zh: """ -在查询中定义过滤条件的条件表达式。 -过滤器支持如下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符 -""" - } - label: { - en: """Filter""" - zh: """过滤器""" - } - } - - password_hash_field { - desc { - en: """Document field that contains password hash.""" - zh: """文档中用于存放密码散列的字段。""" - } - label: { - en: """Password Hash Field""" - zh: """密码散列字段""" - } - } - - salt_field { - desc { - en: """Document field that contains the password salt.""" - zh: """文档中用于存放盐值的字段。""" - } - label: { - en: """Salt Field""" - zh: """盐值字段""" - } - } - - is_superuser_field { - desc { - en: """Document field that defines if the user has superuser privileges.""" - zh: """文档中用于定义用户是否具有超级用户权限的字段。""" - } - label: { - en: """Is Superuser Field""" - zh: """超级用户字段""" - } - } -} diff --git a/apps/emqx_authn/i18n/emqx_authn_mysql_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_mysql_i18n.conf deleted file mode 100644 index 4ddfd5701..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_mysql_i18n.conf +++ /dev/null @@ -1,30 +0,0 @@ -emqx_authn_mysql { - authentication { - desc { - en: """Configuration of authenticator using MySQL as authentication data source.""" - zh: """使用 MySQL 作为认证数据源的认证器的配置项。""" - } - } - - query { - desc { - en: """SQL used to query data for authentication, such as password hash.""" - zh: """用于查询密码散列等用于认证的数据的 SQL 语句。""" - } - label: { - en: """Query""" - zh: """查询语句""" - } - } - - query_timeout { - desc { - en: """Timeout for the SQL query.""" - zh: """SQL 查询的超时时间。""" - } - label: { - en: """Query Timeout""" - zh: """查询超时""" - } - } -} diff --git a/apps/emqx_authn/i18n/emqx_authn_pgsql_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_pgsql_i18n.conf deleted file mode 100644 index 298e38774..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_pgsql_i18n.conf +++ /dev/null @@ -1,19 +0,0 @@ -emqx_authn_pgsql { - authentication { - desc { - en: """Configuration of authenticator using PostgreSQL as authentication data source.""" - zh: """使用 PostgreSQL 作为认证数据源的认证器的配置项。""" - } - } - - query { - desc { - en: """SQL used to query data for authentication, such as password hash.""" - zh: """用于查询密码散列等用于认证的数据的 SQL 语句。""" - } - label: { - en: """Query""" - zh: """查询语句""" - } - } -} diff --git a/apps/emqx_authn/i18n/emqx_authn_redis_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_redis_i18n.conf deleted file mode 100644 index a9cd4a414..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_redis_i18n.conf +++ /dev/null @@ -1,33 +0,0 @@ -emqx_authn_redis { - standalone { - desc { - en: """Configuration of authenticator using Redis (Standalone) as authentication data source.""" - zh: """使用 Redis (Standalone) 作为认证数据源的认证器的配置项。""" - } - } - - cluster { - desc { - en: """Configuration of authenticator using Redis (Cluster) as authentication data source.""" - zh: """使用 Redis (Cluster) 作为认证数据源的认证器的配置项。""" - } - } - - sentinel { - desc { - en: """Configuration of authenticator using Redis (Sentinel) as authentication data source.""" - zh: """使用 Redis (Sentinel) 作为认证数据源的认证器的配置项。""" - } - } - - cmd { - desc { - en: """The Redis Command used to query data for authentication such as password hash, currently only supports HGET and HMGET.""" - zh: """用于查询密码散列等用于认证的数据的 Redis Command,目前仅支持 HGETHMGET。""" - } - label: { - en: """Command""" - zh: """Command""" - } - } -} diff --git a/apps/emqx_authn/i18n/emqx_authn_schema_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_schema_i18n.conf deleted file mode 100644 index 917554af0..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_schema_i18n.conf +++ /dev/null @@ -1,243 +0,0 @@ -emqx_authn_schema { - enable { - desc { - en: """Set to true or false to disable this auth provider.""" - zh: """设为 truefalse 以启用或禁用此认证数据源。""" - } - label: { - en: """Enable""" - zh: """启用""" - } - } - - mechanism { - desc { - en: """Authentication mechanism.""" - zh: """认证机制。""" - } - label: { - en: """Authentication Mechanism""" - zh: """认证机制""" - } - } - - backend { - desc { - en: """Backend type.""" - zh: """后端类型。""" - } - label: { - en: """Backend Type""" - zh: """后端类型""" - } - } - - metrics { - desc { - en: """The metrics of the resource.""" - zh: """资源统计指标。""" - } - label: { - en: """Metrics""" - zh: """指标""" - } - } - - node_metrics { - desc { - en: """The metrics of the resource for each node.""" - zh: """每个节点上资源的统计指标。""" - } - label: { - en: """Resource Metrics in Node""" - zh: """节点资源指标""" - } - } - - status { - desc { - en: """The status of the resource.""" - zh: """资源状态。""" - } - label: { - en: """Status""" - zh: """状态""" - } - } - - node_status { - desc { - en: """The status of the resource for each node.""" - zh: """每个节点上资源的状态。""" - } - label: { - en: """Resource Status in Node""" - zh: """节点资源状态""" - } - } - - node_error { - desc { - en: """The error of node.""" - zh: """节点上产生的错误。""" - } - label: { - en: """Error in Node""" - zh: """节点产生的错误""" - } - } - - matched { - desc { - en: """Count of this resource is queried.""" - zh: """请求命中次数。""" - } - label: { - en: """Matched""" - zh: """已命中""" - } - } - - success { - desc { - en: """Count of query success.""" - zh: """请求成功次数。""" - } - label: { - en: """Success""" - zh: """成功""" - } - } - - failed { - desc { - en: """Count of query failed.""" - zh: """请求失败次数。""" - } - label: { - en: """Failed""" - zh: """失败""" - } - } - - rate { - desc { - en: """The rate of matched, times/second.""" - zh: """命中速率,单位:次/秒。""" - } - label: { - en: """Rate""" - zh: """速率""" - } - } - - rate_max { - desc { - en: """The max rate of matched, times/second.""" - zh: """最大命中速率,单位:次/秒。""" - } - label: { - en: """Max Rate""" - zh: """最大速率""" - } - } - - rate_last5m { - desc { - en: """The average rate of matched in the last 5 minutes, times/second.""" - zh: """5分钟内平均命中速率,单位:次/秒。""" - } - label: { - en: """Rate in Last 5min""" - zh: """5分钟内速率""" - } - } - - node { - desc { - en: """Node name.""" - zh: """节点名称。""" - } - label: { - en: """Node Name.""" - zh: """节点名称。""" - } - } - - metrics_nomatch { - desc { - en: """The number of times the instance was ignored when the required authentication information was not found in the current instance.""" - zh: """在当前实例中没有找到需要的认证信息,实例被忽略的次数。""" - } - label: { - en: """Nomatch Times""" - zh: """实例被忽略的次数""" - } - } - - metrics_total { - desc { - en: """The total number of times the current instance was triggered.""" - zh: """当前实例被触发的总次数。""" - } - label: { - en: """Total Triggered Times""" - zh: """当前实例被触发的总次数""" - } - } - - metrics_success { - desc { - en: """The required authentication information is found in the current instance, and the instance returns authentication success.""" - zh: """在当前实例中找到需要的认证信息,并且实例返回认证成功的次数。""" - } - label: { - en: """Authentication Success Times""" - zh: """实例认证成功的次数""" - } - } - - metrics_failed { - desc { - en: """The required authentication information is found in the current instance, and the instance returns authentication failure.""" - zh: """在当前实例中找到需要的认证信息,并且实例返回认证失败的次数。""" - } - label: { - en: """Authentication Failed Times""" - zh: """实例认证失败的次数""" - } - } - - metrics_rate { - desc { - en: """The total rate at which instances are triggered, times/second.""" - zh: """实例被触发的速率。触发速率等于匹配速率 + 忽略速率,单位:次/秒。""" - } - label: { - en: """Total Triggered Rate""" - zh: """实例被触发的速率""" - } - } - - metrics_rate_max { - desc { - en: """The highest trigger rate the instance has ever reached, times/second.""" - zh: """实例曾经达到的最高触发速率,单位:次/秒。""" - } - label: { - en: """Highest Triggered Rate""" - zh: """实例曾经达到的最高触发速率""" - } - } - - metrics_rate_last5m { - desc { - en: """The average trigger rate of the instance within 5 minutes, times/second.""" - zh: """实例5分钟内平均触发速率,单位:次/秒。""" - } - label: { - en: """Average Triggered Rate in Last 5min""" - zh: """实例5分钟内平均触发速率""" - } - } -} diff --git a/apps/emqx_authn/i18n/emqx_authn_user_import_api_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_user_import_api_i18n.conf deleted file mode 100644 index 294897ec1..000000000 --- a/apps/emqx_authn/i18n/emqx_authn_user_import_api_i18n.conf +++ /dev/null @@ -1,17 +0,0 @@ -emqx_authn_user_import_api { - - authentication_id_import_users_post { - desc { - en: """Import users into authenticator in global authentication chain.""" - zh: """为全局认证链上的指定认证器导入用户数据。""" - } - } - - listeners_listener_id_authentication_id_import_users_post { - desc { - en: """Import users into authenticator in listener authentication chain.""" - zh: """为监听器认证链上的指定认证器导入用户数据。""" - } - } - -} diff --git a/apps/emqx_authn/rebar.config b/apps/emqx_authn/rebar.config index 8fd9cea0f..5f0043c39 100644 --- a/apps/emqx_authn/rebar.config +++ b/apps/emqx_authn/rebar.config @@ -2,6 +2,7 @@ {deps, [ {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, {emqx_connector, {path, "../emqx_connector"}} ]}. diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src index ea21e5bdc..3e0cf786e 100644 --- a/apps/emqx_authn/src/emqx_authn.app.src +++ b/apps/emqx_authn/src/emqx_authn.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authn, [ {description, "EMQX Authentication"}, - {vsn, "0.1.11"}, + {vsn, "0.1.19"}, {modules, []}, {registered, [emqx_authn_sup, emqx_authn_registry]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, diff --git a/apps/emqx_authn/src/emqx_authn.erl b/apps/emqx_authn/src/emqx_authn.erl index 8c8e2efd9..15efeb673 100644 --- a/apps/emqx_authn/src/emqx_authn.erl +++ b/apps/emqx_authn/src/emqx_authn.erl @@ -20,7 +20,6 @@ providers/0, check_config/1, check_config/2, - check_configs/1, %% for telemetry information get_enabled_authns/0 ]). @@ -39,16 +38,6 @@ providers() -> {{scram, built_in_database}, emqx_enhanced_authn_scram_mnesia} ]. -check_configs(CM) when is_map(CM) -> - check_configs([CM]); -check_configs(CL) -> - check_configs(CL, 1). - -check_configs([], _Nth) -> - []; -check_configs([Config | Configs], Nth) -> - [check_config(Config, #{id_for_log => Nth}) | check_configs(Configs, Nth + 1)]. - check_config(Config) -> check_config(Config, #{}). @@ -67,21 +56,32 @@ do_check_config(#{<<"mechanism">> := Mec0} = Config, Opts) -> end, case lists:keyfind(Key, 1, providers()) of false -> - throw(#{error => unknown_authn_provider, which => Key}); + Reason = + case Key of + {M, B} -> + #{mechanism => M, backend => B}; + M -> + #{mechanism => M} + end, + throw(Reason#{error => unknown_authn_provider}); {_, ProviderModule} -> - hocon_tconf:check_plain( - ProviderModule, - #{?CONF_NS_BINARY => Config}, - Opts#{atom_key => true} - ) + do_check_config_maybe_throw(ProviderModule, Config, Opts) end; -do_check_config(Config, Opts) when is_map(Config) -> +do_check_config(Config, _Opts) when is_map(Config) -> throw(#{ error => invalid_config, - which => maps:get(id_for_log, Opts, unknown), reason => "mechanism_field_required" }). +do_check_config_maybe_throw(ProviderModule, Config0, Opts) -> + Config = #{?CONF_NS_BINARY => Config0}, + case emqx_hocon:check(ProviderModule, Config, Opts#{atom_key => true}) of + {ok, Checked} -> + Checked; + {error, Reason} -> + throw(Reason) + end. + %% The atoms have to be loaded already, %% which might be an issue for plugins which are loaded after node boot %% but they should really manage their own configs in that case. diff --git a/apps/emqx_authn/src/emqx_authn_api.erl b/apps/emqx_authn/src/emqx_authn_api.erl index 452a7bb90..f46718842 100644 --- a/apps/emqx_authn/src/emqx_authn_api.erl +++ b/apps/emqx_authn/src/emqx_authn_api.erl @@ -228,6 +228,7 @@ schema("/listeners/:listener_id/authentication") -> 'operationId' => listener_authenticators, get => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_get), parameters => [param_listener_id()], responses => #{ @@ -239,6 +240,7 @@ schema("/listeners/:listener_id/authentication") -> }, post => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_post), parameters => [param_listener_id()], 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -260,6 +262,7 @@ schema("/listeners/:listener_id/authentication/:id") -> 'operationId' => listener_authenticator, get => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_get), parameters => [param_listener_id(), param_auth_id()], responses => #{ @@ -272,6 +275,7 @@ schema("/listeners/:listener_id/authentication/:id") -> }, put => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_put), parameters => [param_listener_id(), param_auth_id()], 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -287,6 +291,7 @@ schema("/listeners/:listener_id/authentication/:id") -> }, delete => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_delete), parameters => [param_listener_id(), param_auth_id()], responses => #{ @@ -300,6 +305,7 @@ schema("/listeners/:listener_id/authentication/:id/status") -> 'operationId' => listener_authenticator_status, get => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_status_get), parameters => [param_listener_id(), param_auth_id()], responses => #{ @@ -330,6 +336,7 @@ schema("/listeners/:listener_id/authentication/:id/position/:position") -> 'operationId' => listener_authenticator_position, put => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_position_put), parameters => [param_listener_id(), param_auth_id(), param_position()], responses => #{ @@ -393,6 +400,7 @@ schema("/listeners/:listener_id/authentication/:id/users") -> 'operationId' => listener_authenticator_users, post => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_users_post), parameters => [param_auth_id(), param_listener_id()], 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -410,6 +418,7 @@ schema("/listeners/:listener_id/authentication/:id/users") -> }, get => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_users_get), parameters => [ param_listener_id(), @@ -479,6 +488,7 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") -> 'operationId' => listener_authenticator_user, get => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_users_user_id_get), parameters => [param_listener_id(), param_auth_id(), param_user_id()], responses => #{ @@ -491,6 +501,7 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") -> }, put => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_users_user_id_put), parameters => [param_listener_id(), param_auth_id(), param_user_id()], 'requestBody' => emqx_dashboard_swagger:schema_with_example( @@ -508,6 +519,7 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") -> }, delete => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_users_user_id_delete), parameters => [param_listener_id(), param_auth_id(), param_user_id()], responses => #{ @@ -872,8 +884,8 @@ lookup_from_local_node(ChainName, AuthenticatorID) -> case emqx_resource:get_instance(ResourceId) of {error, not_found} -> {error, {NodeId, not_found_resource}}; - {ok, _, #{status := Status, metrics := ResourceMetrics}} -> - {ok, {NodeId, Status, Metrics, ResourceMetrics}} + {ok, _, #{status := Status}} -> + {ok, {NodeId, Status, Metrics, emqx_resource:get_metrics(ResourceId)}} end end; {error, Reason} -> @@ -881,7 +893,7 @@ lookup_from_local_node(ChainName, AuthenticatorID) -> end. lookup_from_all_nodes(ChainName, AuthenticatorID) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), LookupResult = emqx_authn_proto_v1:lookup_from_all_nodes(Nodes, ChainName, AuthenticatorID), case is_ok(LookupResult) of {ok, ResList} -> @@ -929,7 +941,7 @@ aggregate_metrics([]) -> aggregate_metrics([HeadMetrics | AllMetrics]) -> ErrorLogger = fun(Reason) -> ?SLOG(info, #{msg => "bad_metrics_value", error => Reason}) end, Fun = fun(ElemMap, AccMap) -> - emqx_map_lib:best_effort_recursive_sum(AccMap, ElemMap, ErrorLogger) + emqx_utils_maps:best_effort_recursive_sum(AccMap, ElemMap, ErrorLogger) end, lists:foldl(Fun, HeadMetrics, AllMetrics). @@ -1069,7 +1081,7 @@ update_user(ChainName, AuthenticatorID, UserID, UserInfo0) -> true -> serialize_error({missing_parameter, password}); false -> - UserInfo = emqx_map_lib:safe_atom_key_map(UserInfo0), + UserInfo = emqx_utils_maps:safe_atom_key_map(UserInfo0), case emqx_authentication:update_user(ChainName, AuthenticatorID, UserID, UserInfo) of {ok, User} -> {200, User}; @@ -1232,15 +1244,10 @@ serialize_error({unknown_authn_type, Type}) -> code => <<"BAD_REQUEST">>, message => binfmt("Unknown type '~p'", [Type]) }}; -serialize_error({bad_authenticator_config, Reason}) -> - {400, #{ - code => <<"BAD_REQUEST">>, - message => binfmt("Bad authenticator config ~p", [Reason]) - }}; serialize_error(Reason) -> {400, #{ code => <<"BAD_REQUEST">>, - message => binfmt("~p", [Reason]) + message => binfmt("~0p", [Reason]) }}. parse_position(<<"front">>) -> @@ -1424,14 +1431,14 @@ request_user_create_examples() -> summary => <<"Regular user">>, value => #{ user_id => <<"user1">>, - password => <<"secret">> + password => <<"******">> } }, super_user => #{ summary => <<"Superuser">>, value => #{ user_id => <<"user2">>, - password => <<"secret">>, + password => <<"******">>, is_superuser => true } } @@ -1442,13 +1449,13 @@ request_user_update_examples() -> regular_user => #{ summary => <<"Update regular user">>, value => #{ - password => <<"newsecret">> + password => <<"******">> } }, super_user => #{ summary => <<"Update user and promote to superuser">>, value => #{ - password => <<"newsecret">>, + password => <<"******">>, is_superuser => true } } diff --git a/apps/emqx_authn/src/emqx_authn_app.erl b/apps/emqx_authn/src/emqx_authn_app.erl index 99a141822..5d4be5f41 100644 --- a/apps/emqx_authn/src/emqx_authn_app.erl +++ b/apps/emqx_authn/src/emqx_authn_app.erl @@ -35,6 +35,9 @@ %%------------------------------------------------------------------------------ start(_StartType, _StartArgs) -> + %% required by test cases, ensure the injection of + %% EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY + _ = emqx_conf_schema:roots(), ok = mria_rlog:wait_for_shards([?AUTH_SHARD], infinity), {ok, Sup} = emqx_authn_sup:start_link(), case initialize() of @@ -43,34 +46,23 @@ start(_StartType, _StartArgs) -> end. stop(_State) -> - ok = deinitialize(), - ok. + ok = deinitialize(). %%------------------------------------------------------------------------------ %% Internal functions %%------------------------------------------------------------------------------ initialize() -> - try - ok = ?AUTHN:register_providers(emqx_authn:providers()), - - lists:foreach( - fun({ChainName, RawAuthConfigs}) -> - AuthConfig = emqx_authn:check_configs(RawAuthConfigs), - ?AUTHN:initialize_authentication( - ChainName, - AuthConfig - ) - end, - chain_configs() - ) - of - ok -> ok - catch - throw:Reason -> - ?SLOG(error, #{msg => "failed_to_initialize_authentication", reason => Reason}), - {error, {failed_to_initialize_authentication, Reason}} - end. + ok = ?AUTHN:register_providers(emqx_authn:providers()), + lists:foreach( + fun({ChainName, AuthConfig}) -> + ?AUTHN:initialize_authentication( + ChainName, + AuthConfig + ) + end, + chain_configs() + ). deinitialize() -> ok = ?AUTHN:deregister_providers(provider_types()), @@ -80,20 +72,22 @@ chain_configs() -> [global_chain_config() | listener_chain_configs()]. global_chain_config() -> - {?GLOBAL, emqx:get_raw_config([?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY], [])}. + {?GLOBAL, emqx:get_config([?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM], [])}. listener_chain_configs() -> lists:map( fun({ListenerID, _}) -> - {ListenerID, emqx:get_raw_config(auth_config_path(ListenerID), [])} + {ListenerID, emqx:get_config(auth_config_path(ListenerID), [])} end, emqx_listeners:list() ). auth_config_path(ListenerID) -> - [<<"listeners">>] ++ - binary:split(atom_to_binary(ListenerID), <<":">>) ++ - [?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY]. + Names = [ + binary_to_existing_atom(N, utf8) + || N <- binary:split(atom_to_binary(ListenerID), <<":">>) + ], + [listeners] ++ Names ++ [?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM]. provider_types() -> lists:map(fun({Type, _Module}) -> Type end, emqx_authn:providers()). diff --git a/apps/emqx_authn/src/emqx_authn_password_hashing.erl b/apps/emqx_authn/src/emqx_authn_password_hashing.erl index b3e90e2cd..4954cd66e 100644 --- a/apps/emqx_authn/src/emqx_authn_password_hashing.erl +++ b/apps/emqx_authn/src/emqx_authn_password_hashing.erl @@ -64,7 +64,7 @@ ]). namespace() -> "authn-hash". -roots() -> [pbkdf2, bcrypt, bcrypt_rw, other_algorithms]. +roots() -> [pbkdf2, bcrypt, bcrypt_rw, simple]. fields(bcrypt_rw) -> fields(bcrypt) ++ @@ -96,7 +96,7 @@ fields(pbkdf2) -> )}, {dk_length, fun dk_length/1} ]; -fields(other_algorithms) -> +fields(simple) -> [ {name, sc( @@ -112,8 +112,8 @@ desc(bcrypt) -> "Settings for bcrypt password hashing algorithm."; desc(pbkdf2) -> "Settings for PBKDF2 password hashing algorithm."; -desc(other_algorithms) -> - "Settings for other password hashing algorithms."; +desc(simple) -> + "Settings for simple algorithms."; desc(_) -> undefined. @@ -231,17 +231,31 @@ check_password(#{name := Other, salt_position := SaltPosition}, Salt, PasswordHa %%------------------------------------------------------------------------------ rw_refs() -> - [ + All = [ hoconsc:ref(?MODULE, bcrypt_rw), hoconsc:ref(?MODULE, pbkdf2), - hoconsc:ref(?MODULE, other_algorithms) - ]. + hoconsc:ref(?MODULE, simple) + ], + fun + (all_union_members) -> All; + ({value, #{<<"name">> := <<"bcrypt">>}}) -> [hoconsc:ref(?MODULE, bcrypt_rw)]; + ({value, #{<<"name">> := <<"pbkdf2">>}}) -> [hoconsc:ref(?MODULE, pbkdf2)]; + ({value, #{<<"name">> := _}}) -> [hoconsc:ref(?MODULE, simple)]; + ({value, _}) -> throw(#{reason => "algorithm_name_missing"}) + end. ro_refs() -> - [ + All = [ hoconsc:ref(?MODULE, bcrypt), hoconsc:ref(?MODULE, pbkdf2), - hoconsc:ref(?MODULE, other_algorithms) - ]. + hoconsc:ref(?MODULE, simple) + ], + fun + (all_union_members) -> All; + ({value, #{<<"name">> := <<"bcrypt">>}}) -> [hoconsc:ref(?MODULE, bcrypt)]; + ({value, #{<<"name">> := <<"pbkdf2">>}}) -> [hoconsc:ref(?MODULE, pbkdf2)]; + ({value, #{<<"name">> := _}}) -> [hoconsc:ref(?MODULE, simple)]; + ({value, _}) -> throw(#{reason => "algorithm_name_missing"}) + end. sc(Type, Meta) -> hoconsc:mk(Type, Meta). diff --git a/apps/emqx_authn/src/emqx_authn_schema.erl b/apps/emqx_authn/src/emqx_authn_schema.erl index 88d8955c5..a7cdaac5f 100644 --- a/apps/emqx_authn/src/emqx_authn_schema.erl +++ b/apps/emqx_authn/src/emqx_authn_schema.erl @@ -18,10 +18,13 @@ -elvis([{elvis_style, invalid_dynamic_call, disable}]). -include_lib("hocon/include/hoconsc.hrl"). +-include("emqx_authn.hrl"). -export([ common_fields/0, roots/0, + validations/0, + tags/0, fields/1, authenticator_type/0, authenticator_type_without_scram/0, @@ -32,6 +35,9 @@ roots() -> []. +tags() -> + [<<"Authentication">>]. + common_fields() -> [{enable, fun enable/1}]. @@ -41,24 +47,79 @@ enable(desc) -> ?DESC(?FUNCTION_NAME); enable(_) -> undefined. authenticator_type() -> - hoconsc:union(config_refs([Module || {_AuthnType, Module} <- emqx_authn:providers()])). + hoconsc:union(union_member_selector(emqx_authn:providers())). authenticator_type_without_scram() -> Providers = lists:filtermap( fun - ({{password_based, _Backend}, Mod}) -> - {true, Mod}; - ({jwt, Mod}) -> - {true, Mod}; ({{scram, _Backend}, _Mod}) -> - false + false; + (_) -> + true end, emqx_authn:providers() ), - hoconsc:union(config_refs(Providers)). + hoconsc:union(union_member_selector(Providers)). -config_refs(Modules) -> - lists:append([Module:refs() || Module <- Modules]). +config_refs(Providers) -> + lists:append([Module:refs() || {_, Module} <- Providers]). + +union_member_selector(Providers) -> + Types = config_refs(Providers), + fun + (all_union_members) -> Types; + ({value, Value}) -> select_union_member(Value, Providers) + end. + +select_union_member(#{<<"mechanism">> := _} = Value, Providers0) -> + BackendVal = maps:get(<<"backend">>, Value, undefined), + MechanismVal = maps:get(<<"mechanism">>, Value), + BackendFilterFn = fun + ({{_Mec, Backend}, _Mod}) -> + BackendVal =:= atom_to_binary(Backend); + (_) -> + BackendVal =:= undefined + end, + MechanismFilterFn = fun + ({{Mechanism, _Backend}, _Mod}) -> + MechanismVal =:= atom_to_binary(Mechanism); + ({Mechanism, _Mod}) -> + MechanismVal =:= atom_to_binary(Mechanism) + end, + case lists:filter(BackendFilterFn, Providers0) of + [] -> + throw(#{reason => "unknown_backend", backend => BackendVal}); + Providers1 -> + case lists:filter(MechanismFilterFn, Providers1) of + [] -> + throw(#{ + reason => "unsupported_mechanism", + mechanism => MechanismVal, + backend => BackendVal + }); + [{_, Module}] -> + try_select_union_member(Module, Value) + end + end; +select_union_member(Value, _Providers) when is_map(Value) -> + throw(#{reason => "missing_mechanism_field"}); +select_union_member(Value, _Providers) -> + throw(#{reason => "not_a_struct", value => Value}). + +try_select_union_member(Module, Value) -> + %% some modules have `union_member_selector/1' exported to help selecting + %% the sub-types, they are: + %% emqx_authn_http + %% emqx_authn_jwt + %% emqx_authn_mongodb + %% emqx_authn_redis + try + Module:union_member_selector({value, Value}) + catch + error:undef -> + %% otherwise expect only one member from this module + Module:refs() + end. %% authn is a core functionality however implemented outside of emqx app %% in emqx_schema, 'authentication' is a map() type which is to allow @@ -148,3 +209,27 @@ array(Name) -> array(Name, DescId) -> {Name, ?HOCON(?R_REF(Name), #{desc => ?DESC(DescId)})}. + +validations() -> + [ + {check_http_ssl_opts, fun(Conf) -> + CheckFun = fun emqx_authn_http:check_ssl_opts/1, + validation(Conf, CheckFun) + end}, + {check_http_headers, fun(Conf) -> + CheckFun = fun emqx_authn_http:check_headers/1, + validation(Conf, CheckFun) + end} + ]. + +validation(Conf, CheckFun) when is_map(Conf) -> + validation(hocon_maps:get(?CONF_NS, Conf), CheckFun); +validation(undefined, _) -> + ok; +validation([], _) -> + ok; +validation([AuthN | Tail], CheckFun) -> + case CheckFun(#{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY => AuthN}) of + ok -> validation(Tail, CheckFun); + Error -> Error + end. diff --git a/apps/emqx_authn/src/emqx_authn_user_import_api.erl b/apps/emqx_authn/src/emqx_authn_user_import_api.erl index bab25bb78..86cfc6247 100644 --- a/apps/emqx_authn/src/emqx_authn_user_import_api.erl +++ b/apps/emqx_authn/src/emqx_authn_user_import_api.erl @@ -72,6 +72,7 @@ schema("/listeners/:listener_id/authentication/:id/import_users") -> 'operationId' => listener_authenticator_import_users, post => #{ tags => ?API_TAGS_SINGLE, + deprecated => true, description => ?DESC(listeners_listener_id_authentication_id_import_users_post), parameters => [emqx_authn_api:param_listener_id(), emqx_authn_api:param_auth_id()], 'requestBody' => emqx_dashboard_swagger:file_schema(filename), diff --git a/apps/emqx_authn/src/emqx_authn_utils.erl b/apps/emqx_authn/src/emqx_authn_utils.erl index 1352e3daf..12520251e 100644 --- a/apps/emqx_authn/src/emqx_authn_utils.erl +++ b/apps/emqx_authn/src/emqx_authn_utils.erl @@ -28,6 +28,7 @@ parse_sql/2, render_deep/2, render_str/2, + render_urlencoded_str/2, render_sql_params/2, is_superuser/1, bin/1, @@ -129,6 +130,13 @@ render_str(Template, Credential) -> #{return => full_binary, var_trans => fun handle_var/2} ). +render_urlencoded_str(Template, Credential) -> + emqx_placeholder:proc_tmpl( + Template, + mapping_credential(Credential), + #{return => full_binary, var_trans => fun urlencode_var/2} + ). + render_sql_params(ParamList, Credential) -> emqx_placeholder:proc_tmpl( ParamList, @@ -217,6 +225,11 @@ without_password(Credential, [Name | Rest]) -> without_password(Credential, Rest) end. +urlencode_var({var, _} = Var, Value) -> + emqx_http_lib:uri_encode(handle_var(Var, Value)); +urlencode_var(Var, Value) -> + handle_var(Var, Value). + handle_var({var, _Name}, undefined) -> <<>>; handle_var({var, <<"peerhost">>}, PeerHost) -> diff --git a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl index ba13bd069..b11b89081 100644 --- a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl +++ b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl @@ -25,6 +25,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -93,6 +94,7 @@ mnesia(boot) -> ok = mria:create_table(?TAB, [ {rlog_shard, ?AUTH_SHARD}, + {type, ordered_set}, {storage, disc_copies}, {record_name, user_info}, {attributes, record_info(fields, user_info)}, @@ -103,11 +105,16 @@ mnesia(boot) -> %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-scram-builtin_db". +namespace() -> "authn". -roots() -> [?CONF_NS]. +tags() -> + [<<"Authentication">>]. -fields(?CONF_NS) -> +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}]. + +fields(scram) -> [ {mechanism, emqx_authn_schema:mechanism(scram)}, {backend, emqx_authn_schema:backend(built_in_database)}, @@ -115,7 +122,7 @@ fields(?CONF_NS) -> {iteration_count, fun iteration_count/1} ] ++ emqx_authn_schema:common_fields(). -desc(?CONF_NS) -> +desc(scram) -> "Settings for Salted Challenge Response Authentication Mechanism\n" "(SCRAM) authentication."; desc(_) -> @@ -136,7 +143,7 @@ iteration_count(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, scram)]. create( AuthenticatorID, @@ -163,7 +170,7 @@ authenticate( }, State ) -> - case ensure_auth_method(AuthMethod, State) of + case ensure_auth_method(AuthMethod, AuthData, State) of true -> case AuthCache of #{next_step := client_final} -> @@ -299,11 +306,13 @@ run_fuzzy_filter( %% Internal functions %%------------------------------------------------------------------------------ -ensure_auth_method(<<"SCRAM-SHA-256">>, #{algorithm := sha256}) -> +ensure_auth_method(_AuthMethod, undefined, _State) -> + false; +ensure_auth_method(<<"SCRAM-SHA-256">>, _AuthData, #{algorithm := sha256}) -> true; -ensure_auth_method(<<"SCRAM-SHA-512">>, #{algorithm := sha512}) -> +ensure_auth_method(<<"SCRAM-SHA-512">>, _AuthData, #{algorithm := sha512}) -> true; -ensure_auth_method(_, _) -> +ensure_auth_method(_AuthMethod, _AuthData, _State) -> false. check_client_first_message(Bin, _Cache, #{iteration_count := IterationCount} = State) -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl index 0a9aaa825..eddad92a3 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl @@ -26,6 +26,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1, @@ -37,8 +38,11 @@ headers/1 ]). +-export([check_headers/1, check_ssl_opts/1]). + -export([ refs/0, + union_member_selector/1, create/2, update/2, authenticate/2, @@ -49,31 +53,35 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-http". +namespace() -> "authn". +tags() -> + [<<"Authentication">>]. + +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(refs()), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(get) -> +fields(http_get) -> [ - {method, #{type => get, required => true, default => get, desc => ?DESC(method)}}, + {method, #{type => get, required => true, desc => ?DESC(method)}}, {headers, fun headers_no_content_type/1} ] ++ common_fields(); -fields(post) -> +fields(http_post) -> [ - {method, #{type => post, required => true, default => post, desc => ?DESC(method)}}, + {method, #{type => post, required => true, desc => ?DESC(method)}}, {headers, fun headers/1} ] ++ common_fields(). -desc(get) -> +desc(http_get) -> ?DESC(get); -desc(post) -> +desc(http_post) -> ?DESC(post); desc(_) -> undefined. @@ -101,8 +109,8 @@ common_fields() -> validations() -> [ - {check_ssl_opts, fun check_ssl_opts/1}, - {check_headers, fun check_headers/1} + {check_ssl_opts, fun ?MODULE:check_ssl_opts/1}, + {check_headers, fun ?MODULE:check_headers/1} ]. url(type) -> binary(); @@ -151,10 +159,25 @@ request_timeout(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, get), - hoconsc:ref(?MODULE, post) + hoconsc:ref(?MODULE, http_get), + hoconsc:ref(?MODULE, http_post) ]. +union_member_selector(all_union_members) -> + refs(); +union_member_selector({value, Value}) -> + refs(Value). + +refs(#{<<"method">> := <<"get">>}) -> + [hoconsc:ref(?MODULE, http_get)]; +refs(#{<<"method">> := <<"post">>}) -> + [hoconsc:ref(?MODULE, http_post)]; +refs(_) -> + throw(#{ + field_name => method, + expected => "get | post" + }). + create(_AuthenticatorID, Config) -> create(Config). @@ -188,7 +211,7 @@ authenticate( } = State ) -> Request = generate_request(Credential, State), - Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}), + Response = emqx_resource:simple_sync_query(ResourceId, {Method, Request, RequestTimeout}), ?TRACE_AUTHN_PROVIDER("http_response", #{ request => request_for_log(Credential, State), response => response_for_log(Response), @@ -241,21 +264,47 @@ transform_header_name(Headers) -> ). check_ssl_opts(Conf) -> - {BaseUrl, _Path, _Query} = parse_url(get_conf_val("url", Conf)), - case BaseUrl of - <<"https://", _/binary>> -> - case get_conf_val("ssl.enable", Conf) of - true -> ok; - false -> false + case is_backend_http(Conf) of + true -> + Url = get_conf_val("url", Conf), + {BaseUrl, _Path, _Query} = parse_url(Url), + case BaseUrl of + <<"https://", _/binary>> -> + case get_conf_val("ssl.enable", Conf) of + true -> + ok; + false -> + <<"it's required to enable the TLS option to establish a https connection">> + end; + <<"http://", _/binary>> -> + ok end; - <<"http://", _/binary>> -> + false -> ok end. check_headers(Conf) -> - Method = to_bin(get_conf_val("method", Conf)), - Headers = get_conf_val("headers", Conf), - Method =:= <<"post">> orelse (not maps:is_key(<<"content-type">>, Headers)). + case is_backend_http(Conf) of + true -> + Headers = get_conf_val("headers", Conf), + case to_bin(get_conf_val("method", Conf)) of + <<"post">> -> + ok; + <<"get">> -> + case maps:is_key(<<"content-type">>, Headers) of + false -> ok; + true -> <<"HTTP GET requests cannot include content-type header.">> + end + end; + false -> + ok + end. + +is_backend_http(Conf) -> + case get_conf_val("backend", Conf) of + http -> true; + _ -> false + end. parse_url(Url) -> case string:split(Url, "//", leading) of @@ -265,9 +314,9 @@ parse_url(Url) -> BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), case string:split(Remaining, "?", leading) of [Path, QueryString] -> - {BaseUrl, Path, QueryString}; + {BaseUrl, <<"/", Path/binary>>, QueryString}; [Path] -> - {BaseUrl, Path, <<>>} + {BaseUrl, <<"/", Path/binary>>, <<>>} end; [HostPort] -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} @@ -290,7 +339,7 @@ parse_config( method => Method, path => Path, headers => ensure_header_name_type(Headers), - base_path_templete => emqx_authn_utils:parse_str(Path), + base_path_template => emqx_authn_utils:parse_str(Path), base_query_template => emqx_authn_utils:parse_deep( cow_qs:parse_qs(to_bin(Query)) ), @@ -303,12 +352,12 @@ parse_config( generate_request(Credential, #{ method := Method, headers := Headers0, - base_path_templete := BasePathTemplate, + base_path_template := BasePathTemplate, base_query_template := BaseQueryTemplate, body_template := BodyTemplate }) -> Headers = maps:to_list(Headers0), - Path = emqx_authn_utils:render_str(BasePathTemplate, Credential), + Path = emqx_authn_utils:render_urlencoded_str(BasePathTemplate, Credential), Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential), Body = emqx_authn_utils:render_deep(BodyTemplate, Credential), case Method of @@ -323,9 +372,9 @@ generate_request(Credential, #{ end. append_query(Path, []) -> - encode_path(Path); + Path; append_query(Path, Query) -> - encode_path(Path) ++ "?" ++ binary_to_list(qs(Query)). + Path ++ "?" ++ binary_to_list(qs(Query)). qs(KVs) -> qs(KVs, []). @@ -337,7 +386,7 @@ qs([{K, V} | More], Acc) -> qs(More, [["&", uri_encode(K), "=", uri_encode(V)] | Acc]). serialize_body(<<"application/json">>, Body) -> - emqx_json:encode(Body); + emqx_utils_json:encode(Body); serialize_body(<<"application/x-www-form-urlencoded">>, Body) -> qs(maps:to_list(Body)). @@ -375,7 +424,7 @@ safely_parse_body(ContentType, Body) -> end. parse_body(<<"application/json", _/binary>>, Body) -> - {ok, emqx_json:decode(Body, [return_maps])}; + {ok, emqx_utils_json:decode(Body, [return_maps])}; parse_body(<<"application/x-www-form-urlencoded", _/binary>>, Body) -> Flags = [<<"result">>, <<"is_superuser">>], RawMap = maps:from_list(cow_qs:parse_qs(Body)), @@ -387,10 +436,6 @@ parse_body(ContentType, _) -> uri_encode(T) -> emqx_http_lib:uri_encode(to_list(T)). -encode_path(Path) -> - Parts = string:split(Path, "/", all), - lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). - request_for_log(Credential, #{url := Url} = State) -> SafeCredential = emqx_authn_utils:without_password(Credential), case generate_request(SafeCredential, State) of diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_client.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_client.erl index 5ee923859..23d939f7d 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_client.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_client.erl @@ -99,7 +99,7 @@ handle_info( State1; {StatusLine, Headers, Body} -> try - JWKS = jose_jwk:from(emqx_json:decode(Body, [return_maps])), + JWKS = jose_jwk:from(emqx_utils_json:decode(Body, [return_maps])), {_, JWKs} = JWKS#jose_jwk.keys, State1#{jwks := JWKs} catch diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl index 1fdd7cef7..fe0349b4a 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl @@ -35,18 +35,17 @@ callback_mode() -> always_sync. on_start(InstId, Opts) -> - PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolOpts = [ {pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)}, {connector_opts, Opts} ], - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of - ok -> {ok, #{pool_name => PoolName}}; + case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of + ok -> {ok, #{pool_name => InstId}}; {error, Reason} -> {error, Reason} end. on_stop(_InstId, #{pool_name := PoolName}) -> - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstId, get_jwks, #{pool_name := PoolName}) -> Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), @@ -72,18 +71,17 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) -> ok. on_get_status(_InstId, #{pool_name := PoolName}) -> - Func = - fun(Conn) -> - case emqx_authn_jwks_client:get_jwks(Conn) of - {ok, _} -> true; - _ -> false - end - end, - case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of + case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of true -> connected; false -> disconnected end. +health_check(Conn) -> + case emqx_authn_jwks_client:get_jwks(Conn) of + {ok, _} -> true; + _ -> false + end. + connect(Opts) -> ConnectorOpts = proplists:get_value(connector_opts, Opts), emqx_authn_jwks_client:start_link(ConnectorOpts). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl index 5709a1fe7..0df9014b8 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl @@ -21,10 +21,10 @@ -include_lib("hocon/include/hoconsc.hrl"). -behaviour(hocon_schema). --behaviour(emqx_authentication). -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -32,6 +32,7 @@ -export([ refs/0, + union_member_selector/1, create/2, update/2, authenticate/2, @@ -42,33 +43,57 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-jwt". +namespace() -> "authn". +tags() -> + [<<"Authentication">>]. + +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(refs()), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields('hmac-based') -> +fields(jwt_hmac) -> [ - {use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, + %% for hmac, it's the 'algorithm' field which selects this type + %% use_jwks field can be ignored (kept for backward compatibility) + {use_jwks, + sc( + hoconsc:enum([false]), + #{ + required => false, + desc => ?DESC(use_jwks), + importance => ?IMPORTANCE_HIDDEN + } + )}, {algorithm, sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})}, {secret, fun secret/1}, {secret_base64_encoded, fun secret_base64_encoded/1} ] ++ common_fields(); -fields('public-key') -> +fields(jwt_public_key) -> [ - {use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, + %% for public-key, it's the 'algorithm' field which selects this type + %% use_jwks field can be ignored (kept for backward compatibility) + {use_jwks, + sc( + hoconsc:enum([false]), + #{ + required => false, + desc => ?DESC(use_jwks), + importance => ?IMPORTANCE_HIDDEN + } + )}, {algorithm, sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})}, {public_key, fun public_key/1} ] ++ common_fields(); -fields('jwks') -> +fields(jwt_jwks) -> [ {use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})}, {endpoint, fun endpoint/1}, @@ -81,17 +106,13 @@ fields('jwks') -> }} ] ++ common_fields(). -desc('hmac-based') -> - ?DESC('hmac-based'); -desc('public-key') -> - ?DESC('public-key'); -desc('jwks') -> - ?DESC('jwks'); -desc(ssl_disable) -> - ?DESC(ssl_disable); -desc(ssl_enable) -> - ?DESC(ssl_enable); -desc(_) -> +desc(jwt_hmac) -> + ?DESC(jwt_hmac); +desc(jwt_public_key) -> + ?DESC(jwt_public_key); +desc(jwt_jwks) -> + ?DESC(jwt_jwks); +desc(undefined) -> undefined. common_fields() -> @@ -160,11 +181,36 @@ from(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, 'hmac-based'), - hoconsc:ref(?MODULE, 'public-key'), - hoconsc:ref(?MODULE, 'jwks') + hoconsc:ref(?MODULE, jwt_hmac), + hoconsc:ref(?MODULE, jwt_public_key), + hoconsc:ref(?MODULE, jwt_jwks) ]. +union_member_selector(all_union_members) -> + refs(); +union_member_selector({value, V}) -> + UseJWKS = maps:get(<<"use_jwks">>, V, undefined), + select_ref(boolean(UseJWKS), V). + +%% this field is technically a boolean type, +%% but union member selection is done before type casting (by typrefl), +%% so we have to allow strings too +boolean(<<"true">>) -> true; +boolean(<<"false">>) -> false; +boolean(Other) -> Other. + +select_ref(true, _) -> + [hoconsc:ref(?MODULE, 'jwt_jwks')]; +select_ref(false, #{<<"public_key">> := _}) -> + [hoconsc:ref(?MODULE, jwt_public_key)]; +select_ref(false, _) -> + [hoconsc:ref(?MODULE, jwt_hmac)]; +select_ref(_, _) -> + throw(#{ + field_name => use_jwks, + expected => "true | false" + }). + create(_AuthenticatorID, Config) -> create(Config). @@ -183,7 +229,7 @@ update( #{use_jwks := true} = Config, #{jwk_resource := ResourceId} = State ) -> - case emqx_resource:query(ResourceId, {update, connector_opts(Config)}) of + case emqx_resource:simple_sync_query(ResourceId, {update, connector_opts(Config)}) of ok -> case maps:get(verify_claims, Config, undefined) of undefined -> @@ -225,7 +271,7 @@ authenticate( from := From } ) -> - case emqx_resource:query(ResourceId, get_jwks) of + case emqx_resource:simple_sync_query(ResourceId, get_jwks) of {error, Reason} -> ?TRACE_AUTHN_PROVIDER(error, "get_jwks_failed", #{ resource => ResourceId, @@ -382,7 +428,7 @@ do_verify(_JWT, [], _VerifyClaims) -> do_verify(JWT, [JWK | More], VerifyClaims) -> try jose_jws:verify(JWK, JWT) of {true, Payload, _JWT} -> - Claims0 = emqx_json:decode(Payload, [return_maps]), + Claims0 = emqx_utils_json:decode(Payload, [return_maps]), Claims = try_convert_to_num(Claims0, [<<"exp">>, <<"iat">>, <<"nbf">>]), case verify_claims(Claims, VerifyClaims) of ok -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl index e915744e1..d57e9e00e 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl @@ -26,6 +26,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -95,6 +96,7 @@ mnesia(boot) -> ok = mria:create_table(?TAB, [ {rlog_shard, ?AUTH_SHARD}, + {type, ordered_set}, {storage, disc_copies}, {record_name, user_info}, {attributes, record_info(fields, user_info)}, @@ -105,11 +107,16 @@ mnesia(boot) -> %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-builtin_db". +namespace() -> "authn". -roots() -> [?CONF_NS]. +tags() -> + [<<"Authentication">>]. -fields(?CONF_NS) -> +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}]. + +fields(builtin_db) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(built_in_database)}, @@ -117,8 +124,8 @@ fields(?CONF_NS) -> {password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1} ] ++ emqx_authn_schema:common_fields(). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(builtin_db) -> + ?DESC(builtin_db); desc(_) -> undefined. @@ -133,7 +140,7 @@ user_id_type(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, builtin_db)]. create(_AuthenticatorID, Config) -> create(Config). @@ -327,7 +334,7 @@ run_fuzzy_filter( %% Example: data/user-credentials.json import_users_from_json(Bin, #{user_group := UserGroup}) -> - case emqx_json:safe_decode(Bin, [return_maps]) of + case emqx_utils_json:safe_decode(Bin, [return_maps]) of {ok, List} -> trans(fun ?MODULE:import/2, [UserGroup, List]); {error, Reason} -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl index 3fac0ed7d..1a766b975 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl @@ -25,6 +25,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -32,6 +33,7 @@ -export([ refs/0, + union_member_selector/1, create/2, update/2, authenticate/2, @@ -42,29 +44,33 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-mongodb". +namespace() -> "authn". +tags() -> + [<<"Authentication">>]. + +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(refs()), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(standalone) -> +fields(mongo_single) -> common_fields() ++ emqx_connector_mongo:fields(single); -fields('replica-set') -> +fields(mongo_rs) -> common_fields() ++ emqx_connector_mongo:fields(rs); -fields('sharded-cluster') -> +fields(mongo_sharded) -> common_fields() ++ emqx_connector_mongo:fields(sharded). -desc(standalone) -> - ?DESC(standalone); -desc('replica-set') -> +desc(mongo_single) -> + ?DESC(single); +desc(mongo_rs) -> ?DESC('replica-set'); -desc('sharded-cluster') -> +desc(mongo_sharded) -> ?DESC('sharded-cluster'); desc(_) -> undefined. @@ -121,9 +127,9 @@ is_superuser_field(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, standalone), - hoconsc:ref(?MODULE, 'replica-set'), - hoconsc:ref(?MODULE, 'sharded-cluster') + hoconsc:ref(?MODULE, mongo_single), + hoconsc:ref(?MODULE, mongo_rs), + hoconsc:ref(?MODULE, mongo_sharded) ]. create(_AuthenticatorID, Config) -> @@ -163,7 +169,7 @@ authenticate( } = State ) -> Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential), - case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of + case emqx_resource:simple_sync_query(ResourceId, {find_one, Collection, Filter, #{}}) of {ok, undefined} -> ignore; {error, Reason} -> @@ -242,3 +248,20 @@ is_superuser(Doc, #{is_superuser_field := IsSuperuserField}) -> emqx_authn_utils:is_superuser(#{<<"is_superuser">> => IsSuperuser}); is_superuser(_, _) -> emqx_authn_utils:is_superuser(#{<<"is_superuser">> => false}). + +union_member_selector(all_union_members) -> + refs(); +union_member_selector({value, Value}) -> + refs(Value). + +refs(#{<<"mongo_type">> := <<"single">>}) -> + [hoconsc:ref(?MODULE, mongo_single)]; +refs(#{<<"mongo_type">> := <<"rs">>}) -> + [hoconsc:ref(?MODULE, mongo_rs)]; +refs(#{<<"mongo_type">> := <<"sharded">>}) -> + [hoconsc:ref(?MODULE, mongo_sharded)]; +refs(_) -> + throw(#{ + field_name => mongo_type, + expected => "single | rs | sharded" + }). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl index 68913443f..d8e631885 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl @@ -27,6 +27,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -44,11 +45,16 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-mysql". +namespace() -> "authn". -roots() -> [?CONF_NS]. +tags() -> + [<<"Authentication">>]. -fields(?CONF_NS) -> +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}]. + +fields(mysql) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(mysql)}, @@ -58,8 +64,8 @@ fields(?CONF_NS) -> ] ++ emqx_authn_schema:common_fields() ++ proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(mysql) -> + ?DESC(mysql); desc(_) -> undefined. @@ -70,7 +76,7 @@ query(_) -> undefined. query_timeout(type) -> emqx_schema:duration_ms(); query_timeout(desc) -> ?DESC(?FUNCTION_NAME); -query_timeout(default) -> "5s"; +query_timeout(default) -> <<"5s">>; query_timeout(_) -> undefined. %%------------------------------------------------------------------------------ @@ -78,7 +84,7 @@ query_timeout(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, mysql)]. create(_AuthenticatorID, Config) -> create(Config). @@ -114,7 +120,9 @@ authenticate( } ) -> Params = emqx_authn_utils:render_sql_params(TmplToken, Credential), - case emqx_resource:query(ResourceId, {prepared_query, ?PREPARE_KEY, Params, Timeout}) of + case + emqx_resource:simple_sync_query(ResourceId, {prepared_query, ?PREPARE_KEY, Params, Timeout}) + of {ok, _Columns, []} -> ignore; {ok, Columns, [Row | _]} -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl index 1cadf9c56..d9526cc7b 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl @@ -26,6 +26,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -48,11 +49,16 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-postgresql". +namespace() -> "authn". -roots() -> [?CONF_NS]. +tags() -> + [<<"Authentication">>]. -fields(?CONF_NS) -> +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}]. + +fields(postgresql) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(postgresql)}, @@ -62,8 +68,8 @@ fields(?CONF_NS) -> emqx_authn_schema:common_fields() ++ proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(postgresql) -> + ?DESC(postgresql); desc(_) -> undefined. @@ -77,7 +83,7 @@ query(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, postgresql)]. create(_AuthenticatorID, Config) -> create(Config). @@ -116,7 +122,7 @@ authenticate( } ) -> Params = emqx_authn_utils:render_sql_params(PlaceHolders, Credential), - case emqx_resource:query(ResourceId, {prepared_query, ResourceId, Params}) of + case emqx_resource:simple_sync_query(ResourceId, {prepared_query, ResourceId, Params}) of {ok, _Columns, []} -> ignore; {ok, Columns, [Row | _]} -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl index 0c8fedfb5..27d8c540a 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl @@ -25,6 +25,7 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1 @@ -32,6 +33,7 @@ -export([ refs/0, + union_member_selector/1, create/2, update/2, authenticate/2, @@ -42,29 +44,33 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-redis". +namespace() -> "authn". +tags() -> + [<<"Authentication">>]. + +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(refs()), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(standalone) -> +fields(redis_single) -> common_fields() ++ emqx_connector_redis:fields(single); -fields(cluster) -> +fields(redis_cluster) -> common_fields() ++ emqx_connector_redis:fields(cluster); -fields(sentinel) -> +fields(redis_sentinel) -> common_fields() ++ emqx_connector_redis:fields(sentinel). -desc(standalone) -> - ?DESC(standalone); -desc(cluster) -> +desc(redis_single) -> + ?DESC(single); +desc(redis_cluster) -> ?DESC(cluster); -desc(sentinel) -> +desc(redis_sentinel) -> ?DESC(sentinel); desc(_) -> "". @@ -88,11 +94,28 @@ cmd(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, standalone), - hoconsc:ref(?MODULE, cluster), - hoconsc:ref(?MODULE, sentinel) + hoconsc:ref(?MODULE, redis_single), + hoconsc:ref(?MODULE, redis_cluster), + hoconsc:ref(?MODULE, redis_sentinel) ]. +union_member_selector(all_union_members) -> + refs(); +union_member_selector({value, Value}) -> + refs(Value). + +refs(#{<<"redis_type">> := <<"single">>}) -> + [hoconsc:ref(?MODULE, redis_single)]; +refs(#{<<"redis_type">> := <<"cluster">>}) -> + [hoconsc:ref(?MODULE, redis_cluster)]; +refs(#{<<"redis_type">> := <<"sentinel">>}) -> + [hoconsc:ref(?MODULE, redis_sentinel)]; +refs(_) -> + throw(#{ + field_name => redis_type, + expected => "single | cluster | sentinel" + }). + create(_AuthenticatorID, Config) -> create(Config). @@ -135,7 +158,7 @@ authenticate( ) -> NKey = emqx_authn_utils:render_str(KeyTemplate, Credential), Command = [CommandName, NKey | Fields], - case emqx_resource:query(ResourceId, {cmd, Command}) of + case emqx_resource:simple_sync_query(ResourceId, {cmd, Command}) of {ok, []} -> ignore; {ok, Values} -> diff --git a/apps/emqx_authn/test/emqx_authn_SUITE.erl b/apps/emqx_authn/test/emqx_authn_SUITE.erl index 3017c3346..4f96ca2dd 100644 --- a/apps/emqx_authn/test/emqx_authn_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_SUITE.erl @@ -97,21 +97,109 @@ t_will_message_connection_denied(Config) when is_list(Config) -> {will_topic, <<"lwt">>}, {will_payload, <<"should not be published">>} ]), - snabbkaffe:start_trace(), - ?wait_async_action( - {error, _} = emqtt:connect(Publisher), - #{?snk_kind := channel_terminated} - ), - snabbkaffe:stop(), - + Ref = monitor(process, Publisher), + _ = unlink(Publisher), + {error, _} = emqtt:connect(Publisher), + receive + {'DOWN', Ref, process, Publisher, Reason} -> + ?assertEqual({shutdown, unauthorized_client}, Reason) + after 2000 -> + error(timeout) + end, receive {publish, #{ topic := <<"lwt">>, payload := <<"should not be published">> }} -> ct:fail("should not publish will message") - after 0 -> + after 1000 -> ok end, - ok. + +%% With auth enabled, send CONNECT without password field, +%% expect CONNACK with reason_code=5 and socket close +t_password_undefined({init, Config}) -> + emqx_common_test_helpers:start_apps([emqx_conf, emqx_authn]), + AuthnConfig = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"user_id_type">> => <<"clientid">> + }, + Chain = 'mqtt:global', + emqx:update_config( + [authentication], + {create_authenticator, Chain, AuthnConfig} + ), + Config; +t_password_undefined({'end', _Config}) -> + emqx:update_config( + [authentication], + {delete_authenticator, 'mqtt:global', <<"password_based:built_in_database">>} + ), + emqx_common_test_helpers:stop_apps([emqx_authn, emqx_conf]), + ok; +t_password_undefined(Config) when is_list(Config) -> + Payload = <<16, 19, 0, 4, 77, 81, 84, 84, 4, 130, 0, 60, 0, 2, 97, 49, 0, 3, 97, 97, 97>>, + {ok, Sock} = gen_tcp:connect("localhost", 1883, [binary, {active, true}]), + gen_tcp:send(Sock, Payload), + receive + {tcp, Sock, Bytes} -> + Resp = parse(iolist_to_binary(Bytes)), + ?assertMatch( + #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNACK}, + variable = #mqtt_packet_connack{ + ack_flags = 0, + reason_code = ?CONNACK_AUTH + }, + payload = undefined + }, + Resp + ) + after 2000 -> + error(timeout) + end, + receive + {tcp_closed, Sock} -> + ok + after 2000 -> + error(timeout) + end, + ok. + +t_union_selector_errors({init, Config}) -> + Config; +t_union_selector_errors({'end', _Config}) -> + ok; +t_union_selector_errors(Config) when is_list(Config) -> + Conf0 = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"mysql">> + }, + Conf1 = Conf0#{<<"mechanism">> => <<"unknown-atom-xx">>}, + ?assertThrow(#{error := unknown_mechanism}, emqx_authn:check_config(Conf1)), + Conf2 = Conf0#{<<"backend">> => <<"unknown-atom-xx">>}, + ?assertThrow(#{error := unknown_backend}, emqx_authn:check_config(Conf2)), + Conf3 = Conf0#{<<"backend">> => <<"unknown">>, <<"mechanism">> => <<"unknown">>}, + ?assertThrow( + #{ + error := unknown_authn_provider, + backend := unknown, + mechanism := unknown + }, + emqx_authn:check_config(Conf3) + ), + Res = catch (emqx_authn:check_config(#{<<"mechanism">> => <<"unknown">>})), + ?assertEqual( + #{ + error => unknown_authn_provider, + mechanism => unknown + }, + Res + ), + ok. + +parse(Bytes) -> + {ok, Frame, <<>>, {none, _}} = emqx_frame:parse(Bytes), + Frame. diff --git a/apps/emqx_authn/test/emqx_authn_api_SUITE.erl b/apps/emqx_authn/test/emqx_authn_api_SUITE.erl index 1a867b0be..6d9203c95 100644 --- a/apps/emqx_authn/test/emqx_authn_api_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_api_SUITE.erl @@ -18,7 +18,8 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/3, uri/1, multipart_formdata_request/3]). +-import(emqx_dashboard_api_test_helpers, [multipart_formdata_request/3]). +-import(emqx_mgmt_api_test_util, [request/3, uri/1]). -include("emqx_authn.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -28,7 +29,7 @@ -define(assertAuthenticatorsMatch(Guard, Path), (fun() -> {ok, 200, Response} = request(get, uri(Path)), - ?assertMatch(Guard, jiffy:decode(Response, [return_maps])) + ?assertMatch(Guard, emqx_utils_json:decode(Response, [return_maps])) end)() ). @@ -65,9 +66,8 @@ end_per_testcase(_, Config) -> init_per_suite(Config) -> emqx_config:erase(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY), _ = application:load(emqx_conf), - ok = emqx_common_test_helpers:start_apps( - [emqx_authn, emqx_dashboard], - fun set_special_configs/1 + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_authn] ), ?AUTHN:delete_chain(?GLOBAL), @@ -76,12 +76,7 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authn]), - ok. - -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(); -set_special_configs(_App) -> + emqx_mgmt_api_test_util:end_suite([emqx_authn]), ok. %%------------------------------------------------------------------------------ @@ -239,7 +234,7 @@ test_authenticator(PathPrefix) -> get, uri(PathPrefix ++ [?CONF_NS, "password_based:http", "status"]) ), - {ok, RList} = emqx_json:safe_decode(Res), + {ok, RList} = emqx_utils_json:safe_decode(Res), Snd = fun({_, Val}) -> Val end, LookupVal = fun LookupV(List, RestJson) -> case List of @@ -358,7 +353,7 @@ test_authenticator_users(PathPrefix) -> <<"success">> := 0, <<"nomatch">> := 1 } - } = jiffy:decode(PageData0, [return_maps]); + } = emqx_utils_json:decode(PageData0, [return_maps]); ["listeners", 'tcp:default'] -> #{ <<"metrics">> := #{ @@ -366,7 +361,7 @@ test_authenticator_users(PathPrefix) -> <<"success">> := 0, <<"nomatch">> := 1 } - } = jiffy:decode(PageData0, [return_maps]) + } = emqx_utils_json:decode(PageData0, [return_maps]) end, InvalidUsers = [ @@ -389,7 +384,7 @@ test_authenticator_users(PathPrefix) -> lists:foreach( fun(User) -> {ok, 201, UserData} = request(post, UsersUri, User), - CreatedUser = jiffy:decode(UserData, [return_maps]), + CreatedUser = emqx_utils_json:decode(UserData, [return_maps]), ?assertMatch(#{<<"user_id">> := _}, CreatedUser) end, ValidUsers @@ -416,7 +411,7 @@ test_authenticator_users(PathPrefix) -> <<"success">> := 1, <<"nomatch">> := 1 } - } = jiffy:decode(PageData01, [return_maps]); + } = emqx_utils_json:decode(PageData01, [return_maps]); ["listeners", 'tcp:default'] -> #{ <<"metrics">> := #{ @@ -424,7 +419,7 @@ test_authenticator_users(PathPrefix) -> <<"success">> := 1, <<"nomatch">> := 1 } - } = jiffy:decode(PageData01, [return_maps]) + } = emqx_utils_json:decode(PageData01, [return_maps]) end, {ok, 200, Page1Data} = request(get, UsersUri ++ "?page=1&limit=2"), @@ -438,7 +433,7 @@ test_authenticator_users(PathPrefix) -> <<"count">> := 3 } } = - jiffy:decode(Page1Data, [return_maps]), + emqx_utils_json:decode(Page1Data, [return_maps]), {ok, 200, Page2Data} = request(get, UsersUri ++ "?page=2&limit=2"), @@ -450,7 +445,7 @@ test_authenticator_users(PathPrefix) -> <<"limit">> := 2, <<"count">> := 3 } - } = jiffy:decode(Page2Data, [return_maps]), + } = emqx_utils_json:decode(Page2Data, [return_maps]), ?assertEqual(2, length(Page1Users)), ?assertEqual(1, length(Page2Users)), @@ -470,7 +465,7 @@ test_authenticator_users(PathPrefix) -> <<"limit">> := 3, <<"count">> := 1 } - } = jiffy:decode(Super1Data, [return_maps]), + } = emqx_utils_json:decode(Super1Data, [return_maps]), ?assertEqual( [<<"u2">>], @@ -487,7 +482,7 @@ test_authenticator_users(PathPrefix) -> <<"limit">> := 3, <<"count">> := 2 } - } = jiffy:decode(Super2Data, [return_maps]), + } = emqx_utils_json:decode(Super2Data, [return_maps]), ?assertEqual( [<<"u1">>, <<"u3">>], @@ -514,7 +509,7 @@ test_authenticator_user(PathPrefix) -> {ok, 200, UserData} = request(get, UsersUri ++ "/u1"), - FetchedUser = jiffy:decode(UserData, [return_maps]), + FetchedUser = emqx_utils_json:decode(UserData, [return_maps]), ?assertMatch(#{<<"user_id">> := <<"u1">>}, FetchedUser), ?assertNotMatch(#{<<"password">> := _}, FetchedUser), diff --git a/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl b/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl index 59865ab41..98215e853 100644 --- a/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl @@ -42,15 +42,16 @@ init_per_testcase(_Case, Config) -> <<"backend">> => <<"built_in_database">>, <<"user_id_type">> => <<"clientid">> }, - emqx:update_config( + {ok, _} = emqx:update_config( ?PATH, {create_authenticator, ?GLOBAL, AuthnConfig} ), - - emqx_conf:update( - [listeners, tcp, listener_authn_enabled], {create, listener_mqtt_tcp_conf(18830, true)}, #{} + {ok, _} = emqx_conf:update( + [listeners, tcp, listener_authn_enabled], + {create, listener_mqtt_tcp_conf(18830, true)}, + #{} ), - emqx_conf:update( + {ok, _} = emqx_conf:update( [listeners, tcp, listener_authn_disabled], {create, listener_mqtt_tcp_conf(18831, false)}, #{} diff --git a/apps/emqx_authn/test/emqx_authn_http_SUITE.erl b/apps/emqx_authn/test/emqx_authn_http_SUITE.erl index 9a3c7c833..b08167a5b 100644 --- a/apps/emqx_authn/test/emqx_authn_http_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_http_SUITE.erl @@ -41,13 +41,12 @@ -define(SERVER_RESPONSE_JSON(Result), ?SERVER_RESPONSE_JSON(Result, false)). -define(SERVER_RESPONSE_JSON(Result, IsSuperuser), - jiffy:encode(#{ + emqx_utils_json:encode(#{ result => Result, is_superuser => IsSuperuser }) ). --define(SERVER_RESPONSE_URLENCODE(Result), ?SERVER_RESPONSE_URLENCODE(Result, false)). -define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser), list_to_binary( "result=" ++ @@ -166,17 +165,65 @@ test_user_auth(#{ ?GLOBAL ). +t_authenticate_path_placeholders(_Config) -> + ok = emqx_authn_http_test_server:stop(), + {ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>), + ok = emqx_authn_http_test_server:set_handler( + fun(Req0, State) -> + Req = + case cowboy_req:path(Req0) of + <<"/my/p%20ath//us%20er/auth//">> -> + cowboy_req:reply( + 200, + #{<<"content-type">> => <<"application/json">>}, + emqx_utils_json:encode(#{result => allow, is_superuser => false}), + Req0 + ); + Path -> + ct:pal("Unexpected path: ~p", [Path]), + cowboy_req:reply(403, Req0) + end, + {ok, Req, State} + end + ), + + Credentials = ?CREDENTIALS#{ + username => <<"us er">> + }, + + AuthConfig = maps:merge( + raw_http_auth_config(), + #{ + <<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>, + <<"body">> => #{} + } + ), + {ok, _} = emqx:update_config( + ?PATH, + {create_authenticator, ?GLOBAL, AuthConfig} + ), + + ?assertMatch( + {ok, #{is_superuser := false}}, + emqx_access_control:authenticate(Credentials) + ), + + _ = emqx_authn_test_lib:delete_authenticators( + [authentication], + ?GLOBAL + ). + t_no_value_for_placeholder(_Config) -> Handler = fun(Req0, State) -> {ok, RawBody, Req1} = cowboy_req:read_body(Req0), #{ <<"cert_subject">> := <<"">>, <<"cert_common_name">> := <<"">> - } = jiffy:decode(RawBody, [return_maps]), + } = emqx_utils_json:decode(RawBody, [return_maps]), Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => false}), + emqx_utils_json:encode(#{result => allow, is_superuser => false}), Req1 ), {ok, Req, State} @@ -444,7 +491,7 @@ samples() -> Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => false}), + emqx_utils_json:encode(#{result => allow, is_superuser => false}), Req0 ), {ok, Req, State} @@ -459,7 +506,7 @@ samples() -> Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => true}), + emqx_utils_json:encode(#{result => allow, is_superuser => true}), Req0 ), {ok, Req, State} @@ -512,11 +559,11 @@ samples() -> #{ <<"username">> := <<"plain">>, <<"password">> := <<"plain">> - } = jiffy:decode(RawBody, [return_maps]), + } = emqx_utils_json:decode(RawBody, [return_maps]), Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => false}), + emqx_utils_json:encode(#{result => allow, is_superuser => false}), Req1 ), {ok, Req, State} @@ -539,7 +586,7 @@ samples() -> Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => false}), + emqx_utils_json:encode(#{result => allow, is_superuser => false}), Req1 ), {ok, Req, State} @@ -565,11 +612,11 @@ samples() -> <<"peerhost">> := <<"127.0.0.1">>, <<"cert_subject">> := <<"cert_subject_data">>, <<"cert_common_name">> := <<"cert_common_name_data">> - } = jiffy:decode(RawBody, [return_maps]), + } = emqx_utils_json:decode(RawBody, [return_maps]), Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => false}), + emqx_utils_json:encode(#{result => allow, is_superuser => false}), Req1 ), {ok, Req, State} diff --git a/apps/emqx_authn/test/emqx_authn_https_SUITE.erl b/apps/emqx_authn/test/emqx_authn_https_SUITE.erl index 7d51ff425..c4315b69f 100644 --- a/apps/emqx_authn/test/emqx_authn_https_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_https_SUITE.erl @@ -168,7 +168,7 @@ cowboy_handler(Req0, State) -> Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{result => allow, is_superuser => false}), + emqx_utils_json:encode(#{result => allow, is_superuser => false}), Req0 ), {ok, Req, State}. diff --git a/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl b/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl index 7a51d2bbb..bd18367b6 100644 --- a/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl @@ -37,7 +37,7 @@ init_per_testcase(_, Config) -> init_per_suite(Config) -> _ = application:load(emqx_conf), - emqx_common_test_helpers:start_apps([emqx_authn]), + emqx_common_test_helpers:start_apps([emqx_conf, emqx_authn]), application:ensure_all_started(emqx_resource), application:ensure_all_started(emqx_connector), Config. @@ -467,7 +467,7 @@ jwks_handler(Req0, State) -> Req = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(JWKS), + emqx_utils_json:encode(JWKS), Req0 ), {ok, Req, State}. diff --git a/apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl b/apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl index 8191fe2e9..599eae92e 100644 --- a/apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl @@ -49,36 +49,6 @@ end_per_testcase(_Case, Config) -> %% Tests %%------------------------------------------------------------------------------ --define(CONF(Conf), #{?CONF_NS_BINARY => Conf}). - -t_check_schema(_Config) -> - ConfigOk = #{ - <<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"built_in_database">>, - <<"user_id_type">> => <<"username">>, - <<"password_hash_algorithm">> => #{ - <<"name">> => <<"bcrypt">>, - <<"salt_rounds">> => <<"6">> - } - }, - - hocon_tconf:check_plain(emqx_authn_mnesia, ?CONF(ConfigOk)), - - ConfigNotOk = #{ - <<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"built_in_database">>, - <<"user_id_type">> => <<"username">>, - <<"password_hash_algorithm">> => #{ - <<"name">> => <<"md6">> - } - }, - - ?assertException( - throw, - {emqx_authn_mnesia, _}, - hocon_tconf:check_plain(emqx_authn_mnesia, ?CONF(ConfigNotOk)) - ). - t_create(_) -> Config0 = config(), @@ -197,7 +167,7 @@ t_list_users(_) -> #{is_superuser := false, user_id := _}, #{is_superuser := false, user_id := _} ], - meta := #{page := 1, limit := 2, count := 3} + meta := #{page := 1, limit := 2, count := 3, hasnext := true} } = emqx_authn_mnesia:list_users( #{<<"page">> => 1, <<"limit">> => 2}, State @@ -205,7 +175,7 @@ t_list_users(_) -> #{ data := [#{is_superuser := false, user_id := _}], - meta := #{page := 2, limit := 2, count := 3} + meta := #{page := 2, limit := 2, count := 3, hasnext := false} } = emqx_authn_mnesia:list_users( #{<<"page">> => 2, <<"limit">> => 2}, State @@ -213,7 +183,7 @@ t_list_users(_) -> #{ data := [#{is_superuser := false, user_id := <<"u3">>}], - meta := #{page := 1, limit := 20, count := 0} + meta := #{page := 1, limit := 20, hasnext := false} } = emqx_authn_mnesia:list_users( #{ <<"page">> => 1, diff --git a/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl b/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl index 7681b6ee3..9d2c5cf63 100644 --- a/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl @@ -516,13 +516,13 @@ init_seeds() -> ). q(Sql) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?MYSQL_RESOURCE, {sql, Sql} ). q(Sql, Params) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?MYSQL_RESOURCE, {sql, Sql, Params} ). diff --git a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl index 66529525b..075ae5cb7 100644 --- a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl @@ -105,18 +105,12 @@ t_update_with_invalid_config(_Config) -> AuthConfig = raw_pgsql_auth_config(), BadConfig = maps:without([<<"server">>], AuthConfig), ?assertMatch( - {error, - {bad_authenticator_config, #{ - reason := - {emqx_authn_pgsql, [ - #{ - kind := validation_error, - path := "authentication.server", - reason := required_field, - value := undefined - } - ]} - }}}, + {error, #{ + kind := validation_error, + matched_type := "authn:postgresql", + path := "authentication.1.server", + reason := required_field + }}, emqx:update_config( ?PATH, {create_authenticator, ?GLOBAL, BadConfig} @@ -594,13 +588,13 @@ create_user(Values) -> ok. q(Sql) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?PGSQL_RESOURCE, {query, Sql} ). q(Sql, Params) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?PGSQL_RESOURCE, {query, Sql, Params} ). diff --git a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl index 1df1faa4c..1354e06cc 100644 --- a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl @@ -160,10 +160,12 @@ t_create_invalid_config(_Config) -> Config0 = raw_redis_auth_config(), Config = maps:without([<<"server">>], Config0), ?assertMatch( - {error, - {bad_authenticator_config, #{ - reason := {emqx_authn_redis, [#{kind := validation_error}]} - }}}, + {error, #{ + kind := validation_error, + matched_type := "authn:redis_single", + path := "authentication.1.server", + reason := required_field + }}, emqx:update_config(?PATH, {create_authenticator, ?GLOBAL, Config}) ), ?assertMatch([], emqx_config:get_raw([authentication])), @@ -588,7 +590,7 @@ init_seeds() -> ). q(Command) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?REDIS_RESOURCE, {cmd, Command} ). diff --git a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl new file mode 100644 index 000000000..3afb8e973 --- /dev/null +++ b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl @@ -0,0 +1,191 @@ +-module(emqx_authn_schema_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). + +-include("emqx_authn.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + _ = application:load(emqx_conf), + emqx_common_test_helpers:start_apps([emqx_authn]), + Config. + +end_per_suite(_) -> + emqx_common_test_helpers:stop_apps([emqx_authn]), + ok. + +init_per_testcase(_Case, Config) -> + {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), + mria:clear_table(emqx_authn_mnesia), + Config. + +end_per_testcase(_Case, Config) -> + Config. + +-define(CONF(Conf), #{?CONF_NS_BINARY => Conf}). + +t_check_schema(_Config) -> + Check = fun(C) -> emqx_config:check_config(emqx_schema, ?CONF(C)) end, + ConfigOk = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"user_id_type">> => <<"username">>, + <<"password_hash_algorithm">> => #{ + <<"name">> => <<"bcrypt">>, + <<"salt_rounds">> => <<"6">> + } + }, + _ = Check(ConfigOk), + + ConfigNotOk = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"user_id_type">> => <<"username">>, + <<"password_hash_algorithm">> => #{ + <<"name">> => <<"md6">> + } + }, + ?assertThrow( + #{ + path := "authentication.1.password_hash_algorithm.name", + matched_type := "authn:builtin_db/authn-hash:simple", + reason := unable_to_convert_to_enum_symbol + }, + Check(ConfigNotOk) + ), + + ConfigMissingAlgoName = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"user_id_type">> => <<"username">>, + <<"password_hash_algorithm">> => #{ + <<"foo">> => <<"bar">> + } + }, + + ?assertThrow( + #{ + path := "authentication.1.password_hash_algorithm", + reason := "algorithm_name_missing", + matched_type := "authn:builtin_db" + }, + Check(ConfigMissingAlgoName) + ). + +t_union_member_selector(_) -> + %% default value for authentication + ?assertMatch(#{authentication := []}, check(undefined)), + C1 = #{<<"backend">> => <<"built_in_database">>}, + ?assertThrow( + #{ + path := "authentication.1", + reason := "missing_mechanism_field" + }, + check(C1) + ), + C2 = <<"foobar">>, + ?assertThrow( + #{ + path := "authentication.1", + reason := "not_a_struct", + value := <<"foobar">> + }, + check(C2) + ), + Base = #{ + <<"user_id_type">> => <<"username">>, + <<"password_hash_algorithm">> => #{ + <<"name">> => <<"plain">> + } + }, + BadBackend = Base#{<<"mechanism">> => <<"password_based">>, <<"backend">> => <<"bar">>}, + ?assertThrow( + #{ + reason := "unknown_backend", + backend := <<"bar">> + }, + check(BadBackend) + ), + BadMechanism = Base#{<<"mechanism">> => <<"foo">>, <<"backend">> => <<"built_in_database">>}, + ?assertThrow( + #{ + reason := "unsupported_mechanism", + mechanism := <<"foo">>, + backend := <<"built_in_database">> + }, + check(BadMechanism) + ), + BadCombination = Base#{<<"mechanism">> => <<"scram">>, <<"backend">> => <<"http">>}, + ?assertThrow( + #{ + reason := "unsupported_mechanism", + mechanism := <<"scram">>, + backend := <<"http">> + }, + check(BadCombination) + ), + ok. + +t_http_auth_selector(_) -> + C1 = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"http">> + }, + ?assertThrow( + #{ + field_name := method, + expected := "get | post" + }, + check(C1) + ), + ok. + +t_mongo_auth_selector(_) -> + C1 = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"mongodb">> + }, + ?assertThrow( + #{ + field_name := mongo_type, + expected := "single | rs | sharded" + }, + check(C1) + ), + ok. + +t_redis_auth_selector(_) -> + C1 = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"redis">> + }, + ?assertThrow( + #{ + field_name := redis_type, + expected := "single | cluster | sentinel" + }, + check(C1) + ), + ok. + +t_redis_jwt_selector(_) -> + C1 = #{ + <<"mechanism">> => <<"jwt">> + }, + ?assertThrow( + #{ + field_name := use_jwks, + expected := "true | false" + }, + check(C1) + ), + ok. + +check(C) -> + {_Mappings, Checked} = emqx_config:check_config(emqx_schema, ?CONF(C)), + Checked. diff --git a/apps/emqx_authn/test/emqx_authn_schema_tests.erl b/apps/emqx_authn/test/emqx_authn_schema_tests.erl new file mode 100644 index 000000000..622655b2d --- /dev/null +++ b/apps/emqx_authn/test/emqx_authn_schema_tests.erl @@ -0,0 +1,135 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_authn_schema_tests). + +-include_lib("eunit/include/eunit.hrl"). + +%% schema error +-define(ERR(Reason), {error, Reason}). + +union_member_selector_mongo_test_() -> + Check = fun(Txt) -> check(emqx_authn_mongodb, Txt) end, + [ + {"unknown", fun() -> + ?assertMatch( + ?ERR(#{field_name := mongo_type, expected := _}), + Check("{mongo_type: foobar}") + ) + end}, + {"single", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:mongo_single"}), + Check("{mongo_type: single}") + ) + end}, + {"replica-set", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:mongo_rs"}), + Check("{mongo_type: rs}") + ) + end}, + {"sharded", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:mongo_sharded"}), + Check("{mongo_type: sharded}") + ) + end} + ]. + +union_member_selector_jwt_test_() -> + Check = fun(Txt) -> check(emqx_authn_jwt, Txt) end, + [ + {"unknown", fun() -> + ?assertMatch( + ?ERR(#{field_name := use_jwks, expected := "true | false"}), + Check("{use_jwks = 1}") + ) + end}, + {"jwks", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:jwt_jwks"}), + Check("{use_jwks = true}") + ) + end}, + {"publick-key", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:jwt_public_key"}), + Check("{use_jwks = false, public_key = 1}") + ) + end}, + {"hmac-based", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:jwt_hmac"}), + Check("{use_jwks = false}") + ) + end} + ]. + +union_member_selector_redis_test_() -> + Check = fun(Txt) -> check(emqx_authn_redis, Txt) end, + [ + {"unknown", fun() -> + ?assertMatch( + ?ERR(#{field_name := redis_type, expected := _}), + Check("{redis_type = 1}") + ) + end}, + {"single", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:redis_single"}), + Check("{redis_type = single}") + ) + end}, + {"cluster", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:redis_cluster"}), + Check("{redis_type = cluster}") + ) + end}, + {"sentinel", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:redis_sentinel"}), + Check("{redis_type = sentinel}") + ) + end} + ]. + +union_member_selector_http_test_() -> + Check = fun(Txt) -> check(emqx_authn_http, Txt) end, + [ + {"unknown", fun() -> + ?assertMatch( + ?ERR(#{field_name := method, expected := _}), + Check("{method = 1}") + ) + end}, + {"get", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:http_get"}), + Check("{method = get}") + ) + end}, + {"post", fun() -> + ?assertMatch( + ?ERR(#{matched_type := "authn:http_post"}), + Check("{method = post}") + ) + end} + ]. + +check(Module, HoconConf) -> + emqx_hocon:check(Module, ["authentication= ", HoconConf]). diff --git a/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl b/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl index e1a2586cd..f52e895cc 100644 --- a/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl +++ b/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl @@ -20,6 +20,7 @@ -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -include("emqx_authn.hrl"). @@ -37,9 +38,11 @@ all() -> init_per_suite(Config) -> _ = application:load(emqx_conf), ok = emqx_common_test_helpers:start_apps([emqx_authn]), - Config. + IdleTimeout = emqx_config:get([mqtt, idle_timeout]), + [{idle_timeout, IdleTimeout} | Config]. -end_per_suite(_Config) -> +end_per_suite(Config) -> + ok = emqx_config:put([mqtt, idle_timeout], ?config(idle_timeout, Config)), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). init_per_testcase(_Case, Config) -> @@ -99,6 +102,8 @@ t_authenticate(_Config) -> init_auth(Username, Password, Algorithm), + ok = emqx_config:put([mqtt, idle_timeout], 500), + {ok, Pid} = emqx_authn_mqtt_test_client:start_link("127.0.0.1", 1883), ClientFirstMessage = esasl_scram:client_first_message(Username), @@ -115,6 +120,9 @@ t_authenticate(_Config) -> ok = emqx_authn_mqtt_test_client:send(Pid, ConnectPacket), + %% Intentional sleep to trigger idle timeout for the connection not yet authenticated + ok = ct:sleep(1000), + ?AUTH_PACKET( ?RC_CONTINUE_AUTHENTICATION, #{'Authentication-Data' := ServerFirstMessage} @@ -150,6 +158,28 @@ t_authenticate(_Config) -> ServerFinalMessage, ClientCache#{algorithm => Algorithm} ). +t_authenticate_bad_props(_Config) -> + Algorithm = sha512, + Username = <<"u">>, + Password = <<"p">>, + + init_auth(Username, Password, Algorithm), + + {ok, Pid} = emqx_authn_mqtt_test_client:start_link("127.0.0.1", 1883), + + ConnectPacket = ?CONNECT_PACKET( + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V5, + properties = #{ + 'Authentication-Method' => <<"SCRAM-SHA-512">> + } + } + ), + + ok = emqx_authn_mqtt_test_client:send(Pid, ConnectPacket), + + ?CONNACK_PACKET(?RC_NOT_AUTHORIZED) = receive_packet(). + t_authenticate_bad_username(_Config) -> Algorithm = sha512, Username = <<"u">>, @@ -300,14 +330,14 @@ t_list_users(_) -> #{ data := [?USER_MAP, ?USER_MAP], - meta := #{page := 1, limit := 2, count := 3} + meta := #{page := 1, limit := 2, count := 3, hasnext := true} } = emqx_enhanced_authn_scram_mnesia:list_users( #{<<"page">> => 1, <<"limit">> => 2}, State ), #{ data := [?USER_MAP], - meta := #{page := 2, limit := 2, count := 3} + meta := #{page := 2, limit := 2, count := 3, hasnext := false} } = emqx_enhanced_authn_scram_mnesia:list_users( #{<<"page">> => 2, <<"limit">> => 2}, State @@ -319,7 +349,7 @@ t_list_users(_) -> is_superuser := _ } ], - meta := #{page := 1, limit := 3, count := 0} + meta := #{page := 1, limit := 3, hasnext := false} } = emqx_enhanced_authn_scram_mnesia:list_users( #{ <<"page">> => 1, diff --git a/apps/emqx_authz/README.md b/apps/emqx_authz/README.md index 8c05f21be..af543e478 100644 --- a/apps/emqx_authz/README.md +++ b/apps/emqx_authz/README.md @@ -15,7 +15,6 @@ authz:{ pool_size: 1 username: root password: public - auto_reconnect: true ssl: { enable: true cacertfile: "etc/certs/cacert.pem" @@ -33,7 +32,6 @@ authz:{ pool_size: 1 username: root password: public - auto_reconnect: true ssl: {enable: false} } sql: "select ipaddress, username, clientid, action, permission, topic from mqtt_authz where ipaddr = ${peerhost} or username = ${username} or username = '$all' or clientid = ${clientid}" @@ -45,7 +43,6 @@ authz:{ database: 0 pool_size: 1 password: public - auto_reconnect: true ssl: {enable: false} } cmd: "HGETALL mqtt_authz:${username}" diff --git a/apps/emqx_authz/etc/acl.conf b/apps/emqx_authz/etc/acl.conf index d39490d46..a64287a4a 100644 --- a/apps/emqx_authz/etc/acl.conf +++ b/apps/emqx_authz/etc/acl.conf @@ -23,7 +23,7 @@ %% -type(rule() :: {permission(), who(), access(), topics()} | {permission(), all}). %%-------------------------------------------------------------------- -{allow, {username, "^dashboard?"}, subscribe, ["$SYS/#"]}. +{allow, {username, {re, "^dashboard$"}}, subscribe, ["$SYS/#"]}. {allow, {ipaddr, "127.0.0.1"}, all, ["$SYS/#", "#"]}. diff --git a/apps/emqx_authz/etc/emqx_authz.conf b/apps/emqx_authz/etc/emqx_authz.conf index e7fd73498..167b12b3f 100644 --- a/apps/emqx_authz/etc/emqx_authz.conf +++ b/apps/emqx_authz/etc/emqx_authz.conf @@ -1,14 +1,5 @@ authorization { deny_action = ignore no_match = allow - sources = [ - { - type = file - enable = true - # This file is immutable to EMQX. - # Once new rules are created from dashboard UI or HTTP API, - # the file 'data/authz/acl.conf' is used instead of this one - path = "{{ platform_etc_dir }}/acl.conf" - } - ] + cache = { enable = true } } diff --git a/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf deleted file mode 100644 index 9c620a22d..000000000 --- a/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf +++ /dev/null @@ -1,8 +0,0 @@ -emqx_authz_api_cache { - authorization_cache_delete { - desc { - en: """Clean all authorization cache in the cluster.""" - zh: """清除集群中所有授权数据缓存。""" - } - } -} diff --git a/apps/emqx_authz/i18n/emqx_authz_api_mnesia_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_mnesia_i18n.conf deleted file mode 100644 index 50f644097..000000000 --- a/apps/emqx_authz/i18n/emqx_authz_api_mnesia_i18n.conf +++ /dev/null @@ -1,177 +0,0 @@ -emqx_authz_api_mnesia { - users_username_get { - desc { - en: """Show the list of record for username""" - zh: """获取内置数据库中所有用户名类型的规则记录""" - } - } - - users_username_post { - desc { - en: """Add new records for username""" - zh: """添加内置数据库中用户名类型的规则记录""" - } - } - - users_clientid_get { - desc { - en: """Show the list of record for clientid""" - zh: """获取内置数据库中所有客户端标识符类型的规则记录""" - } - } - - users_clientid_post { - desc { - en: """Add new records for clientid""" - zh: """添加内置数据库中客户端标识符类型的规则记录""" - } - } - - - user_username_get { - desc { - en: """Get record info for username""" - zh: """获取内置数据库中指定用户名类型的规则记录""" - } - } - - user_username_put { - desc { - en: """Set record for username""" - zh: """更新内置数据库中指定用户名类型的规则记录""" - } - } - - user_username_delete { - desc { - en: """Delete one record for username""" - zh: """删除内置数据库中指定用户名类型的规则记录""" - } - } - - user_clientid_get { - desc { - en: """Get record info for clientid""" - zh: """获取内置数据库中指定客户端标识符类型的规则记录""" - } - } - - user_clientid_put { - desc { - en: """Set record for clientid""" - zh: """更新内置数据库中指定客户端标识符类型的规则记录""" - } - } - - user_clientid_delete { - desc { - en: """Delete one record for clientid""" - zh: """删除内置数据库中指定客户端标识符类型的规则记录""" - } - } - - - rules_for_all_get { - desc { - en: """Show the list of rules for all""" - zh: """列出为所有客户端启用的规则列表""" - } - } - - rules_for_all_post { - desc { - en: """ -Create/Update the list of rules for all. -Set a empty list to clean up rules -""" - zh: """ -创建/更新 为所有客户端启用的规则列表。 -设为空列表以清楚所有规则 -""" - } - } - - purge_all_delete { - desc { - en: """Purge all records for username/clientid/all""" - zh: """清除所有内置数据库中的规则, 用户名/客户端标识符/所有""" - } - } - - fuzzy_username { - desc { - en: """Fuzzy search `username` as substring""" - zh: """使用字串匹配模糊搜索用户名""" - } - label { - en: """fuzzy_username""" - zh: """用户名子串""" - } - } - - fuzzy_clientid { - desc { - en: """Fuzzy search `clientid` as substring""" - zh: """使用字串匹配模糊搜索客户端标识符""" - } - label { - en: """fuzzy_clientid""" - zh: """客户端标识符子串""" - } - } - - topic { - desc { - en: """Rule on specific topic""" - zh: """在指定主题上的规则""" - } - label { - en: """topic""" - zh: """主题""" - } - } - - permission { - desc { - en: """Permission""" - zh: """权限""" - } - label { - en: """permission""" - zh: """权限""" - } - } - - action { - desc { - en: """Authorized action (pub/sub/all)""" - zh: """被授权的行为 (发布/订阅/所有)""" - } - label { - en: """action""" - zh: """行为""" - } - } - - clientid { - desc { - en: """ClientID""" - zh: """客户端标识符""" - } - label { - en: """clientid""" - zh: """客户端标识符""" - } - } - - username { - desc { - en: """Username""" - zh: """用户名""" - } - label { - en: """username""" - zh: """用户名""" - } - } -} diff --git a/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf deleted file mode 100644 index afec5c109..000000000 --- a/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf +++ /dev/null @@ -1,189 +0,0 @@ -emqx_authz_api_schema { - enable { - desc { - en: """Set to true or false to disable this ACL provider.""" - zh: """设为 truefalse 以启用或禁用此访问控制数据源。""" - } - label { - en: """enable""" - zh: """enable""" - } - } - - type { - desc { - en: """Backend type.""" - zh: """数据后端类型。""" - } - label { - en: """type""" - zh: """type""" - } - } - -#==== authz_file - - rules { - desc { - en: """Authorization static file rules.""" - zh: """静态授权文件规则。""" - } - label { - en: """rules""" - zh: """规则""" - } - } - -#==== authz_http - - method { - desc { - en: """HTTP method.""" - zh: """HTTP 请求方法。""" - } - label { - en: """method""" - zh: """method""" - } - } - - url { - desc { - en: """URL of the auth server.""" - zh: """认证服务器 URL。""" - } - label { - en: """url""" - zh: """url""" - } - } - - headers { - desc { - en: """List of HTTP Headers.""" - zh: """HTTP Headers 列表""" - } - label { - en: """Headers""" - zh: """请求头""" - } - } - - headers_no_content_type { - desc { - en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表(无 content-type)。""" - } - label { - en: """headers_no_content_type""" - zh: """请求头(无 content-type)""" - } - } - - body { - desc { - en: """HTTP request body.""" - zh: """HTTP 请求体。""" - } - label { - en: """body""" - zh: """请求体""" - } - } - - request_timeout { - desc { - en: """Request timeout.""" - zh: """请求超时时间。""" - } - label { - en: """request_timeout""" - zh: """请求超时""" - } - } - -#==== authz_mnesia - -# only common fields(`enable` and `type`) - -#==== authz_mongo - - collection { - desc { - en: """`MongoDB` collection containing the authorization data.""" - zh: """`MongoDB` 授权数据集。""" - } - label { - en: """collection""" - zh: """数据集""" - } - } - - filter { - desc { - en: """ -Conditional expression that defines the filter condition in the query. -Filter supports the following placeholders: - - ${username}: Will be replaced at runtime with Username used by the client when connecting - - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting -""" - zh: """ -在查询中定义过滤条件的条件表达式。 -过滤器支持如下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符 -""" - } - label { - en: """Filter""" - zh: """过滤器""" - } - } - -#==== authz_mysql - -# `query`, is common field - -#==== authz_pgsql - -# `query`, is common field - -#==== authz_redis - - cmd { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询命令。""" - } - label { - en: """cmd""" - zh: """查询命令""" - } - } - -#==== common field for DBs (except mongodb and redis) - - query { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询语句。""" - } - label { - en: """query""" - zh: """查询语句""" - } - } - -#==== fields - - position { - desc { - en: """Where to place the source.""" - zh: """认证数据源位置。""" - } - label { - en: """position""" - zh: """位置""" - } - } -} diff --git a/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf deleted file mode 100644 index b44580b34..000000000 --- a/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf +++ /dev/null @@ -1,15 +0,0 @@ -emqx_authz_api_settings { - authorization_settings_get { - desc { - en: """Get authorization settings""" - zh: """获取授权配置""" - } - } - - authorization_settings_put { - desc { - en: """Update authorization settings""" - zh: """更新授权配置""" - } - } -} diff --git a/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf deleted file mode 100644 index c5f0eaad4..000000000 --- a/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf +++ /dev/null @@ -1,116 +0,0 @@ -emqx_authz_api_sources { - authorization_sources_get { - desc { - en: """List all authorization sources""" - zh: """列出所有授权数据源""" - } - } - - authorization_sources_post { - desc { - en: """Add a new source""" - zh: """添加授权数据源""" - } - } - - authorization_sources_type_get { - desc { - en: """Get a authorization source""" - zh: """获取指定类型的授权数据源""" - } - } - - authorization_sources_type_put { - desc { - en: """Update source""" - zh: """更新指定类型的授权数据源""" - } - } - - authorization_sources_type_delete { - desc { - en: """Delete source""" - zh: """删除指定类型的授权数据源""" - } - } - - authorization_sources_type_status_get { - desc { - en: """Get a authorization source""" - zh: """获取指定授权数据源的状态""" - } - } - - authorization_sources_type_move_post { - desc { - en: """Change the exection order of sources""" - zh: """更新授权数据源的优先执行顺序""" - } - } - - sources { - desc { - en: """Authorization source""" - zh: """授权数据源列表""" - } - label { - en: """sources""" - zh: """数据源列表""" - } - } - - sources { - desc { - en: """Authorization sources""" - zh: """授权数据源列表""" - } - label { - en: """sources""" - zh: """数据源列表""" - } - } - - source_config { - desc { - en: """Source config""" - zh: """数据源配置""" - } - label { - en: """source_config""" - zh: """数据源配置""" - } - } - - source { - desc { - en: """Authorization source""" - zh: """授权数据源""" - } - label { - en: """source""" - zh: """数据源""" - } - } - - source_config { - desc { - en: """Source config""" - zh: """数据源配置""" - } - label { - en: """source_config""" - zh: """数据源配置""" - } - } - - source_type { - desc { - en: """Authorization type""" - zh: """数据源类型""" - } - label { - en: """source_type""" - zh: """数据源类型""" - } - } -} diff --git a/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf deleted file mode 100644 index a10128592..000000000 --- a/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf +++ /dev/null @@ -1,553 +0,0 @@ -emqx_authz_schema { - sources { - desc { - en: """ -Authorization data sources.
-An array of authorization (ACL) data providers. -It is designed as an array, not a hash-map, so the sources can be -ordered to form a chain of access controls.
- -When authorizing a 'publish' or 'subscribe' action, the configured -sources are checked in order. When checking an ACL source, -in case the client (identified by username or client ID) is not found, -it moves on to the next source. And it stops immediately -once an 'allow' or 'deny' decision is returned.
- -If the client is not found in any of the sources, -the default action configured in 'authorization.no_match' is applied.
- -NOTE: -The source elements are identified by their 'type'. -It is NOT allowed to configure two or more sources of the same type. -""" - zh: """ -授权数据源。
-授权(ACL)数据源的列表。 -它被设计为一个数组,而不是一个散列映射, -所以可以作为链式访问控制。
- -当授权一个 'publish' 或 'subscribe' 行为时, -该配置列表中的所有数据源将按顺序进行检查。 -如果在某个客户端未找到时(使用 ClientID 或 Username)。 -将会移动到下一个数据源。直至得到 'allow' 或 'deny' 的结果。
- -如果在任何数据源中都未找到对应的客户端信息。 -配置的默认行为 ('authorization.no_match') 将生效。
- -注意: -数据源使用 'type' 进行标识。 -使用同一类型的数据源多于一次不被允许。 -""" - } - label { - en: """sources""" - zh: """数据源""" - } - } - - authorization { - desc { - en: """Configuration related to the client authorization.""" - zh: """客户端授权相关配置""" - } - label { - en: """authorization""" - zh: """授权""" - } - } - - enable { - desc { - en: """Set to true or false to disable this ACL provider""" - zh: """设为 truefalse 以启用或禁用此访问控制数据源""" - } - label { - en: """enable""" - zh: """enable""" - } - } - - type { - desc { - en: """Backend type.""" - zh: """数据后端类型""" - } - label { - en: """type""" - zh: """type""" - } - } - -#==== authz_file - - file { - desc { - en: """Authorization using a static file.""" - zh: """使用静态文件授权""" - } - label { - en: """file""" - zh: """文件""" - } - } - - path { - desc { - en: """ -Path to the file which contains the ACL rules. -If the file provisioned before starting EMQX node, -it can be placed anywhere as long as EMQX has read access to it. -That is, EMQX will treat it as read only. - -In case the rule-set is created or updated from EMQX Dashboard or HTTP API, -a new file will be created and placed in `authz` subdirectory inside EMQX's `data_dir`, -and the old file will not be used anymore. -""" - zh: """ -包含 ACL 规则的文件路径。 -如果在启动 EMQX 节点前预先配置该路径, -那么可以将该文件置于任何 EMQX 可以访问到的位置。 - -如果从 EMQX Dashboard 或 HTTP API 创建或修改了规则集, -那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中, -并从此弃用旧的文件。""" - } - label { - en: """path""" - zh: """path""" - } - } - -#==== authz_http - - http_get { - desc { - en: """Authorization using an external HTTP server (via GET requests).""" - zh: """使用外部 HTTP 服务器授权(GET 请求)。""" - } - label { - en: """http_get""" - zh: """http_get""" - } - } - - http_post { - desc { - en: """Authorization using an external HTTP server (via POST requests).""" - zh: """使用外部 HTTP 服务器授权(POST 请求)。""" - } - label { - en: """http_post""" - zh: """http_post""" - } - } - - method { - desc { - en: """HTTP method.""" - zh: """HTTP 请求方法""" - } - label { - en: """method""" - zh: """method""" - } - } - - url { - desc { - en: """URL of the auth server.""" - zh: """授权 HTTP 服务器地址。""" - } - label { - en: """URL""" - zh: """URL""" - } - } - - headers { - desc { - en: """List of HTTP Headers.""" - zh: """HTTP Headers 列表""" - } - label { - en: """Headers""" - zh: """请求头""" - } - } - - headers_no_content_type { - desc { - en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表 (无 content-type) 。""" - } - label { - en: """headers_no_content_type""" - zh: """请求头(无 content-type)""" - } - } - - body { - desc { - en: """HTTP request body.""" - zh: """HTTP request body。""" - } - label { - en: """Request Body""" - zh: """Request Body""" - } - } - - request_timeout { - desc { - en: """HTTP request timeout.""" - zh: """HTTP 请求超时时长。""" - } - label { - en: """Request Timeout""" - zh: """请求超时时间""" - } - } - -#==== authz_mnesia - - mnesia { - desc { - en: """Authorization using a built-in database (mnesia).""" - zh: """使用内部数据库授权(mnesia)。""" - } - label { - en: """mnesia""" - zh: """mnesia""" - } - } - -#==== authz_mongo - - mongo_single { - desc { - en: """Authorization using a single MongoDB instance.""" - zh: """使用 MongoDB 授权(单实例)。""" - } - label { - en: """mongo_single""" - zh: """mongo_single""" - } - } - - mongo_rs { - desc { - en: """Authorization using a MongoDB replica set.""" - zh: """使用 MongoDB 授权(副本集模式)""" - } - label { - en: """mongo_rs""" - zh: """mongo_rs""" - } - } - - mongo_sharded { - desc { - en: """Authorization using a sharded MongoDB cluster.""" - zh: """使用 MongoDB 授权(分片集群模式)。""" - } - label { - en: """mongo_sharded""" - zh: """mongo_sharded""" - } - } - - collection { - desc { - en: """`MongoDB` collection containing the authorization data.""" - zh: """`MongoDB` 授权数据集。""" - } - label { - en: """collection""" - zh: """数据集""" - } - } - - filter { - desc { - en: """ -Conditional expression that defines the filter condition in the query. -Filter supports the following placeholders: - - ${username}: Will be replaced at runtime with Username used by the client when connecting - - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting -""" - zh: """ -在查询中定义过滤条件的条件表达式。 -过滤器支持如下占位符: -- ${username}:将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}:将在运行时被替换为客户端连接时使用的客户端标识符 -""" - } - label { - en: """Filter""" - zh: """过滤器""" - } - } - -#==== authz_mysql - - mysql { - desc { - en: """Authorization using a MySQL database.""" - zh: """使用 MySOL 数据库授权""" - } - label { - en: """mysql""" - zh: """mysql""" - } - } - -#==== authz_pgsql - - postgresql { - desc { - en: """Authorization using a PostgreSQL database.""" - zh: """使用 PostgreSQL 数据库授权""" - } - label { - en: """postgresql""" - zh: """postgresql""" - } - } - -#==== authz_redis - - redis_single { - desc { - en: """Authorization using a single Redis instance.""" - zh: """使用 Redis 授权(单实例)。""" - } - label { - en: """redis_single""" - zh: """redis_single""" - } - } - - redis_sentinel { - desc { - en: """Authorization using a Redis Sentinel.""" - zh: """使用 Redis 授权(哨兵模式)。""" - } - label { - en: """redis_sentinel""" - zh: """redis_sentinel""" - } - } - - redis_cluster { - desc { - en: """Authorization using a Redis cluster.""" - zh: """使用 Redis 授权(集群模式)。""" - } - label { - en: """redis_cluster""" - zh: """redis_cluster""" - } - } - - cmd { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查查询命令""" - } - label { - en: """cmd""" - zh: """查询命令""" - } - } - -#==== common field for DBs (except redis) - - query { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询语句/查询命令。""" - } - label { - en: """query""" - zh: """查询语句""" - } - } - -#==== metrics field - - metrics { - desc { - en: """The metrics of the resource.""" - zh: """资源统计指标。""" - } - label: { - en: """Metrics""" - zh: """指标""" - } - } - - node_metrics { - desc { - en: """The metrics of the resource for each node.""" - zh: """每个节点上资源的统计指标。""" - } - label: { - en: """Resource Metrics in Node""" - zh: """节点资源指标""" - } - } - - status { - desc { - en: """The status of the resource.""" - zh: """资源状态。""" - } - label: { - en: """Status""" - zh: """状态""" - } - } - - node_status { - desc { - en: """The status of the resource for each node.""" - zh: """每个节点上资源的状态。""" - } - label: { - en: """Resource Status in Node""" - zh: """节点资源状态""" - } - } - - node_error { - desc { - en: """The error of node.""" - zh: """节点上产生的错误。""" - } - label: { - en: """Error in Node""" - zh: """节点产生的错误""" - } - } - - matched { - desc { - en: """Count of this resource is queried.""" - zh: """请求命中次数。""" - } - label: { - en: """Matched""" - zh: """已命中""" - } - } - - success { - desc { - en: """Count of query success.""" - zh: """请求成功次数。""" - } - label: { - en: """Success""" - zh: """成功""" - } - } - - failed { - desc { - en: """Count of query failed.""" - zh: """请求失败次数。""" - } - label: { - en: """Failed""" - zh: """失败""" - } - } - - rate { - desc { - en: """The rate of matched, times/second.""" - zh: """命中速率,单位:次/秒。""" - } - label: { - en: """Rate""" - zh: """速率""" - } - } - - rate_max { - desc { - en: """The max rate of matched, times/second.""" - zh: """最大命中速率,单位:次/秒。""" - } - label: { - en: """Max Rate""" - zh: """最大速率""" - } - } - - rate_last5m { - desc { - en: """The average rate of matched in the last 5 minutes, times/second.""" - zh: """5分钟内平均命中速率,单位:次/秒。""" - } - label: { - en: """Rate in Last 5min""" - zh: """5分钟内速率""" - } - } - - node { - desc { - en: """Node name.""" - zh: """节点名称。""" - } - label: { - en: """Node Name.""" - zh: """节点名称。""" - } - } - - metrics_total { - desc { - en: """The total number of times the authorization rule was triggered.""" - zh: """授权实例被触发的总次数。""" - } - label: { - en: """The Total Number of Times the Authorization Rule was Triggered""" - zh: """授权实例被触发的总次数""" - } - } - - nomatch { - desc { - en: """The number of times that no authorization rules were matched.""" - zh: """没有匹配到任何授权规则的次数。""" - } - label: { - en: """The Number of Times that no Authorization Rules were Matched""" - zh: """没有匹配到任何授权规则的次数""" - } - } - - allow { - desc { - en: """The number of times the authentication was successful.""" - zh: """授权成功的次数。""" - } - label: { - en: """The Number of Times the Authentication was Successful""" - zh: """授权成功次数""" - } - } - - deny { - desc { - en: """The number of authentication failures.""" - zh: """授权失败的次数。""" - } - label: { - en: """The Number of Authentication Failures""" - zh: """授权失败次数""" - } - } -} diff --git a/apps/emqx_authz/rebar.config b/apps/emqx_authz/rebar.config index da2fa7807..9fd61b060 100644 --- a/apps/emqx_authz/rebar.config +++ b/apps/emqx_authz/rebar.config @@ -3,6 +3,7 @@ {erl_opts, [debug_info, nowarn_unused_import]}. {deps, [ {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, {emqx_connector, {path, "../emqx_connector"}} ]}. diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index c876fbf16..dd0325694 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.10"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ diff --git a/apps/emqx_authz/src/emqx_authz.erl b/apps/emqx_authz/src/emqx_authz.erl index bf07f3083..682ad7f2e 100644 --- a/apps/emqx_authz/src/emqx_authz.erl +++ b/apps/emqx_authz/src/emqx_authz.erl @@ -20,6 +20,7 @@ -include("emqx_authz.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/emqx_hooks.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -ifdef(TEST). -compile(export_all). @@ -358,6 +359,7 @@ authorize_non_superuser( emqx_metrics:inc(?METRIC_DENY), {stop, #{result => deny, from => AuthzSource}}; nomatch -> + ?tp(authz_non_superuser, #{result => nomatch}), ?SLOG(info, #{ msg => "authorization_failed_nomatch", username => Username, @@ -388,6 +390,12 @@ do_authorize( nomatch -> emqx_metrics_worker:inc(authz_metrics, Type, nomatch), do_authorize(Client, PubSub, Topic, Tail); + %% {matched, allow | deny | ignore} + {matched, ignore} -> + do_authorize(Client, PubSub, Topic, Tail); + ignore -> + do_authorize(Client, PubSub, Topic, Tail); + %% {matched, allow | deny} Matched -> {Matched, Type} catch diff --git a/apps/emqx_authz/src/emqx_authz_api_mnesia.erl b/apps/emqx_authz/src/emqx_authz_api_mnesia.erl index 6a747496c..b39379b43 100644 --- a/apps/emqx_authz/src/emqx_authz_api_mnesia.erl +++ b/apps/emqx_authz/src/emqx_authz_api_mnesia.erl @@ -44,7 +44,7 @@ user/2, client/2, all/2, - purge/2 + rules/2 ]). %% query funs @@ -70,19 +70,19 @@ api_spec() -> paths() -> [ - "/authorization/sources/built_in_database/username", - "/authorization/sources/built_in_database/clientid", - "/authorization/sources/built_in_database/username/:username", - "/authorization/sources/built_in_database/clientid/:clientid", - "/authorization/sources/built_in_database/all", - "/authorization/sources/built_in_database/purge-all" + "/authorization/sources/built_in_database/rules/users", + "/authorization/sources/built_in_database/rules/clients", + "/authorization/sources/built_in_database/rules/users/:username", + "/authorization/sources/built_in_database/rules/clients/:clientid", + "/authorization/sources/built_in_database/rules/all", + "/authorization/sources/built_in_database/rules" ]. %%-------------------------------------------------------------------- %% Schema for each URI %%-------------------------------------------------------------------- -schema("/authorization/sources/built_in_database/username") -> +schema("/authorization/sources/built_in_database/rules/users") -> #{ 'operationId' => users, get => @@ -128,7 +128,7 @@ schema("/authorization/sources/built_in_database/username") -> } } }; -schema("/authorization/sources/built_in_database/clientid") -> +schema("/authorization/sources/built_in_database/rules/clients") -> #{ 'operationId' => clients, get => @@ -174,7 +174,7 @@ schema("/authorization/sources/built_in_database/clientid") -> } } }; -schema("/authorization/sources/built_in_database/username/:username") -> +schema("/authorization/sources/built_in_database/rules/users/:username") -> #{ 'operationId' => user, get => @@ -227,7 +227,7 @@ schema("/authorization/sources/built_in_database/username/:username") -> } } }; -schema("/authorization/sources/built_in_database/clientid/:clientid") -> +schema("/authorization/sources/built_in_database/rules/clients/:clientid") -> #{ 'operationId' => client, get => @@ -280,20 +280,20 @@ schema("/authorization/sources/built_in_database/clientid/:clientid") -> } } }; -schema("/authorization/sources/built_in_database/all") -> +schema("/authorization/sources/built_in_database/rules/all") -> #{ 'operationId' => all, get => #{ tags => [<<"authorization">>], - description => ?DESC(rules_for_all_get), + description => ?DESC(rules_all_get), responses => #{200 => swagger_with_example({rules, ?TYPE_REF}, {all, ?PUT_MAP_EXAMPLE})} }, post => #{ tags => [<<"authorization">>], - description => ?DESC(rules_for_all_post), + description => ?DESC(rules_all_post), 'requestBody' => swagger_with_example({rules, ?TYPE_REF}, {all, ?PUT_MAP_EXAMPLE}), responses => @@ -303,15 +303,24 @@ schema("/authorization/sources/built_in_database/all") -> [?BAD_REQUEST], <<"Bad rule schema">> ) } - } - }; -schema("/authorization/sources/built_in_database/purge-all") -> - #{ - 'operationId' => purge, + }, delete => #{ tags => [<<"authorization">>], - description => ?DESC(purge_all_delete), + description => ?DESC(rules_all_delete), + responses => + #{ + 204 => <<"Deleted">> + } + } + }; +schema("/authorization/sources/built_in_database/rules") -> + #{ + 'operationId' => rules, + delete => + #{ + tags => [<<"authorization">>], + description => ?DESC(rules_delete), responses => #{ 204 => <<"Deleted">>, @@ -555,9 +564,12 @@ all(get, _) -> end; all(post, #{body := #{<<"rules">> := Rules}}) -> emqx_authz_mnesia:store_rules(all, format_rules(Rules)), + {204}; +all(delete, _) -> + emqx_authz_mnesia:store_rules(all, []), {204}. -purge(delete, _) -> +rules(delete, _) -> case emqx_authz_api_sources:get_raw_source(<<"built_in_database">>) of [#{<<"enable">> := false}] -> ok = emqx_authz_mnesia:purge_rules(), diff --git a/apps/emqx_authz/src/emqx_authz_api_schema.erl b/apps/emqx_authz/src/emqx_authz_api_schema.erl index 44ec0d28a..4adada182 100644 --- a/apps/emqx_authz/src/emqx_authz_api_schema.erl +++ b/apps/emqx_authz/src/emqx_authz_api_schema.erl @@ -108,7 +108,7 @@ authz_http_common_fields() -> })}, {request_timeout, mk_duration("Request timeout", #{ - required => false, default => "30s", desc => ?DESC(request_timeout) + required => false, default => <<"30s">>, desc => ?DESC(request_timeout) })} ] ++ maps:to_list( diff --git a/apps/emqx_authz/src/emqx_authz_api_settings.erl b/apps/emqx_authz/src/emqx_authz_api_settings.erl index 72a2db35c..db915a795 100644 --- a/apps/emqx_authz/src/emqx_authz_api_settings.erl +++ b/apps/emqx_authz/src/emqx_authz_api_settings.erl @@ -64,7 +64,7 @@ schema("/authorization/settings") -> }. ref_authz_schema() -> - proplists:delete(sources, emqx_conf_schema:fields("authorization")). + emqx_schema:authz_fields(). settings(get, _Params) -> {200, authorization_settings()}; @@ -83,4 +83,6 @@ settings(put, #{ {200, authorization_settings()}. authorization_settings() -> - maps:remove(<<"sources">>, emqx:get_raw_config([authorization], #{})). + C = maps:remove(<<"sources">>, emqx:get_raw_config([authorization], #{})), + Schema = emqx_hocon:make_schema(emqx_schema:authz_fields()), + hocon_tconf:make_serializable(Schema, C, #{}). diff --git a/apps/emqx_authz/src/emqx_authz_api_sources.erl b/apps/emqx_authz/src/emqx_authz_api_sources.erl index f5570f1f1..d332f009f 100644 --- a/apps/emqx_authz/src/emqx_authz_api_sources.erl +++ b/apps/emqx_authz/src/emqx_authz_api_sources.erl @@ -47,7 +47,7 @@ -export([ sources/2, source/2, - move_source/2, + source_move/2, aggregate_metrics/1 ]). @@ -164,7 +164,7 @@ schema("/authorization/sources/:type/status") -> }; schema("/authorization/sources/:type/move") -> #{ - 'operationId' => move_source, + 'operationId' => source_move, post => #{ description => ?DESC(authorization_sources_type_move_post), @@ -205,7 +205,7 @@ sources(get, _) -> }, AccIn ) -> - case file:read_file(Path) of + case emqx_authz_file:read_file(Path) of {ok, Rules} -> lists:append(AccIn, [ #{ @@ -230,8 +230,6 @@ sources(get, _) -> get_raw_sources() ), {200, #{sources => Sources}}; -sources(post, #{body := #{<<"type">> := <<"file">>} = Body}) -> - create_authz_file(Body); sources(post, #{body := Body}) -> update_config(?CMD_PREPEND, Body). @@ -240,75 +238,99 @@ source(Method, #{bindings := #{type := Type} = Bindings} = Req) when -> source(Method, Req#{bindings => Bindings#{type => atom_to_binary(Type, utf8)}}); source(get, #{bindings := #{type := Type}}) -> - case get_raw_source(Type) of - [] -> - {404, #{code => <<"NOT_FOUND">>, message => <<"Not found: ", Type/binary>>}}; - [#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}] -> - case file:read_file(Path) of - {ok, Rules} -> - {200, #{ - type => file, - enable => Enable, - rules => Rules - }}; - {error, Reason} -> - {500, #{ - code => <<"INTERNAL_ERROR">>, - message => bin(Reason) - }} - end; - [Source] -> - {200, Source} - end; -source(put, #{bindings := #{type := <<"file">>}, body := #{<<"type">> := <<"file">>} = Body}) -> - update_authz_file(Body); -source(put, #{bindings := #{type := Type}, body := Body}) -> - update_config({?CMD_REPLACE, Type}, Body); + with_source( + Type, + fun + (#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}) -> + case emqx_authz_file:read_file(Path) of + {ok, Rules} -> + {200, #{ + type => file, + enable => Enable, + rules => Rules + }}; + {error, Reason} -> + {500, #{ + code => <<"INTERNAL_ERROR">>, + message => bin(Reason) + }} + end; + (Source) -> + {200, Source} + end + ); +source(put, #{bindings := #{type := Type}, body := #{<<"type">> := Type} = Body}) -> + with_source( + Type, + fun(_) -> + update_config({?CMD_REPLACE, Type}, Body) + end + ); +source(put, #{bindings := #{type := Type}, body := #{<<"type">> := _OtherType}}) -> + with_source( + Type, + fun(_) -> + {400, #{code => <<"BAD_REQUEST">>, message => <<"Type mismatch">>}} + end + ); source(delete, #{bindings := #{type := Type}}) -> - update_config({?CMD_DELETE, Type}, #{}). + with_source( + Type, + fun(_) -> + update_config({?CMD_DELETE, Type}, #{}) + end + ). source_status(get, #{bindings := #{type := Type}}) -> - lookup_from_all_nodes(Type). + with_source( + atom_to_binary(Type, utf8), + fun(_) -> lookup_from_all_nodes(Type) end + ). -move_source(Method, #{bindings := #{type := Type} = Bindings} = Req) when +source_move(Method, #{bindings := #{type := Type} = Bindings} = Req) when is_atom(Type) -> - move_source(Method, Req#{bindings => Bindings#{type => atom_to_binary(Type, utf8)}}); -move_source(post, #{bindings := #{type := Type}, body := #{<<"position">> := Position}}) -> - case parse_position(Position) of - {ok, NPosition} -> - try emqx_authz:move(Type, NPosition) of - {ok, _} -> - {204}; - {error, {not_found_source, _Type}} -> - {404, #{ - code => <<"NOT_FOUND">>, - message => <<"source ", Type/binary, " not found">> - }}; - {error, {emqx_conf_schema, _}} -> - {400, #{ - code => <<"BAD_REQUEST">>, - message => <<"BAD_SCHEMA">> - }}; + source_move(Method, Req#{bindings => Bindings#{type => atom_to_binary(Type, utf8)}}); +source_move(post, #{bindings := #{type := Type}, body := #{<<"position">> := Position}}) -> + with_source( + Type, + fun(_Source) -> + case parse_position(Position) of + {ok, NPosition} -> + try emqx_authz:move(Type, NPosition) of + {ok, _} -> + {204}; + {error, {not_found_source, _Type}} -> + {404, #{ + code => <<"NOT_FOUND">>, + message => <<"source ", Type/binary, " not found">> + }}; + {error, {emqx_conf_schema, _}} -> + {400, #{ + code => <<"BAD_REQUEST">>, + message => <<"BAD_SCHEMA">> + }}; + {error, Reason} -> + {400, #{ + code => <<"BAD_REQUEST">>, + message => bin(Reason) + }} + catch + error:{unknown_authz_source_type, Unknown} -> + NUnknown = bin(Unknown), + {400, #{ + code => <<"BAD_REQUEST">>, + message => <<"Unknown authz Source Type: ", NUnknown/binary>> + }} + end; {error, Reason} -> {400, #{ code => <<"BAD_REQUEST">>, message => bin(Reason) }} - catch - error:{unknown_authz_source_type, Unknown} -> - NUnknown = bin(Unknown), - {400, #{ - code => <<"BAD_REQUEST">>, - message => <<"Unknown authz Source Type: ", NUnknown/binary>> - }} - end; - {error, Reason} -> - {400, #{ - code => <<"BAD_REQUEST">>, - message => bin(Reason) - }} - end. + end + end + ). %%-------------------------------------------------------------------- %% Internal functions @@ -322,8 +344,8 @@ lookup_from_local_node(Type) -> case emqx_resource:get_instance(ResourceId) of {error, not_found} -> {error, {NodeId, not_found_resource}}; - {ok, _, #{status := Status, metrics := ResourceMetrics}} -> - {ok, {NodeId, Status, Metrics, ResourceMetrics}} + {ok, _, #{status := Status}} -> + {ok, {NodeId, Status, Metrics, emqx_resource:get_metrics(ResourceId)}} end; _ -> Metrics = emqx_metrics_worker:get_metrics(authz_metrics, Type), @@ -334,7 +356,7 @@ lookup_from_local_node(Type) -> end. lookup_from_all_nodes(Type) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case is_ok(emqx_authz_proto_v1:lookup_from_all_nodes(Nodes, Type)) of {ok, ResList} -> {StatusMap, MetricsMap, ResourceMetricsMap, ErrorMap} = make_result_map(ResList), @@ -381,7 +403,7 @@ aggregate_metrics([]) -> aggregate_metrics([HeadMetrics | AllMetrics]) -> ErrorLogger = fun(Reason) -> ?SLOG(info, #{msg => "bad_metrics_value", error => Reason}) end, Fun = fun(ElemMap, AccMap) -> - emqx_map_lib:best_effort_recursive_sum(AccMap, ElemMap, ErrorLogger) + emqx_utils_maps:best_effort_recursive_sum(AccMap, ElemMap, ErrorLogger) end, lists:foldl(Fun, HeadMetrics, AllMetrics). @@ -449,7 +471,7 @@ is_ok(ResL) -> get_raw_sources() -> RawSources = emqx:get_raw_config([authorization, sources], []), - Schema = #{roots => emqx_authz_schema:fields("authorization"), fields => #{}}, + Schema = emqx_hocon:make_schema(emqx_authz_schema:authz_fields()), Conf = #{<<"sources">> => RawSources}, #{<<"sources">> := Sources} = hocon_tconf:make_serializable(Schema, Conf, #{}), merge_default_headers(Sources). @@ -484,6 +506,15 @@ get_raw_source(Type) -> get_raw_sources() ). +-spec with_source(binary(), fun((map()) -> term())) -> term(). +with_source(Type, ContF) -> + case get_raw_source(Type) of + [] -> + {404, #{code => <<"NOT_FOUND">>, message => <<"Not found: ", Type/binary>>}}; + [Source] -> + ContF(Source) + end. + update_config(Cmd, Sources) -> case emqx_authz:update(Cmd, Sources) of {ok, _} -> @@ -628,13 +659,3 @@ status_metrics_example() -> } } }. - -create_authz_file(Body) -> - do_update_authz_file(?CMD_PREPEND, Body). - -update_authz_file(Body) -> - do_update_authz_file({?CMD_REPLACE, <<"file">>}, Body). - -do_update_authz_file(Cmd, Body) -> - %% API update will placed in `authz` subdirectory inside EMQX's `data_dir` - update_config(Cmd, Body). diff --git a/apps/emqx_authz/src/emqx_authz_file.erl b/apps/emqx_authz/src/emqx_authz_file.erl index 9aa2d506f..54f1775c6 100644 --- a/apps/emqx_authz/src/emqx_authz_file.erl +++ b/apps/emqx_authz/src/emqx_authz_file.erl @@ -32,13 +32,15 @@ create/1, update/1, destroy/1, - authorize/4 + authorize/4, + read_file/1 ]). description() -> "AuthZ with static rules". -create(#{path := Path} = Source) -> +create(#{path := Path0} = Source) -> + Path = filename(Path0), Rules = case file:consult(Path) of {ok, Terms} -> @@ -47,7 +49,7 @@ create(#{path := Path} = Source) -> ?SLOG(alert, #{ msg => failed_to_read_acl_file, path => Path, - explain => emqx_misc:explain_posix(Reason) + explain => emqx_utils:explain_posix(Reason) }), throw(failed_to_read_acl_file); {error, Reason} -> @@ -63,3 +65,9 @@ destroy(_Source) -> ok. authorize(Client, PubSub, Topic, #{annotations := #{rules := Rules}}) -> emqx_authz_rule:matches(Client, PubSub, Topic, Rules). + +read_file(Path) -> + file:read_file(filename(Path)). + +filename(PathMaybeTemplate) -> + emqx_schema:naive_env_interpolation(PathMaybeTemplate). diff --git a/apps/emqx_authz/src/emqx_authz_http.erl b/apps/emqx_authz/src/emqx_authz_http.erl index ea12214ec..5747e6eeb 100644 --- a/apps/emqx_authz/src/emqx_authz_http.erl +++ b/apps/emqx_authz/src/emqx_authz_http.erl @@ -20,6 +20,7 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/emqx_placeholder.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -behaviour(emqx_authz). @@ -81,7 +82,7 @@ authorize( } = Config ) -> Request = generate_request(PubSub, Topic, Client, Config), - case emqx_resource:query(ResourceID, {Method, Request, RequestTimeout}) of + case emqx_resource:simple_sync_query(ResourceID, {Method, Request, RequestTimeout}) of {ok, 204, _Headers} -> {matched, allow}; {ok, 200, Headers, Body} -> @@ -104,6 +105,7 @@ authorize( log_nomtach_msg(Status, Headers, Body), nomatch; {error, Reason} -> + ?tp(authz_http_request_failure, #{error => Reason}), ?SLOG(error, #{ msg => "http_server_query_failed", resource => ResourceID, @@ -159,9 +161,9 @@ parse_url(Url) -> BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), case string:split(Remaining, "?", leading) of [Path, QueryString] -> - {BaseUrl, Path, QueryString}; + {BaseUrl, <<"/", Path/binary>>, QueryString}; [Path] -> - {BaseUrl, Path, <<>>} + {BaseUrl, <<"/", Path/binary>>, <<>>} end; [HostPort] -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} @@ -183,7 +185,7 @@ generate_request( } ) -> Values = client_vars(Client, PubSub, Topic), - Path = emqx_authz_utils:render_str(BasePathTemplate, Values), + Path = emqx_authz_utils:render_urlencoded_str(BasePathTemplate, Values), Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values), Body = emqx_authz_utils:render_deep(BodyTemplate, Values), case Method of @@ -200,9 +202,9 @@ generate_request( end. append_query(Path, []) -> - encode_path(Path); + to_list(Path); append_query(Path, Query) -> - encode_path(Path) ++ "?" ++ to_list(query_string(Query)). + to_list(Path) ++ "?" ++ to_list(query_string(Query)). query_string(Body) -> query_string(Body, []). @@ -220,12 +222,8 @@ query_string([{K, V} | More], Acc) -> uri_encode(T) -> emqx_http_lib:uri_encode(to_list(T)). -encode_path(Path) -> - Parts = string:split(Path, "/", all), - lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). - serialize_body(<<"application/json">>, Body) -> - jsx:encode(Body); + emqx_utils_json:encode(Body); serialize_body(<<"application/x-www-form-urlencoded">>, Body) -> query_string(Body). diff --git a/apps/emqx_authz/src/emqx_authz_mongodb.erl b/apps/emqx_authz/src/emqx_authz_mongodb.erl index 840ce8fb9..8d04a3229 100644 --- a/apps/emqx_authz/src/emqx_authz_mongodb.erl +++ b/apps/emqx_authz/src/emqx_authz_mongodb.erl @@ -79,7 +79,7 @@ authorize( RenderedFilter = emqx_authz_utils:render_deep(FilterTemplate, Client), Result = try - emqx_resource:query(ResourceID, {find, Collection, RenderedFilter, #{}}) + emqx_resource:simple_sync_query(ResourceID, {find, Collection, RenderedFilter, #{}}) catch error:Error -> {error, Error} end, diff --git a/apps/emqx_authz/src/emqx_authz_mysql.erl b/apps/emqx_authz/src/emqx_authz_mysql.erl index bd81a7398..768479e98 100644 --- a/apps/emqx_authz/src/emqx_authz_mysql.erl +++ b/apps/emqx_authz/src/emqx_authz_mysql.erl @@ -82,7 +82,9 @@ authorize( } ) -> RenderParams = emqx_authz_utils:render_sql_params(TmplToken, Client), - case emqx_resource:query(ResourceID, {prepared_query, ?PREPARE_KEY, RenderParams}) of + case + emqx_resource:simple_sync_query(ResourceID, {prepared_query, ?PREPARE_KEY, RenderParams}) + of {ok, _Columns, []} -> nomatch; {ok, Columns, Rows} -> diff --git a/apps/emqx_authz/src/emqx_authz_postgresql.erl b/apps/emqx_authz/src/emqx_authz_postgresql.erl index ae58efee1..05f2315a6 100644 --- a/apps/emqx_authz/src/emqx_authz_postgresql.erl +++ b/apps/emqx_authz/src/emqx_authz_postgresql.erl @@ -87,7 +87,9 @@ authorize( } ) -> RenderedParams = emqx_authz_utils:render_sql_params(Placeholders, Client), - case emqx_resource:query(ResourceID, {prepared_query, ResourceID, RenderedParams}) of + case + emqx_resource:simple_sync_query(ResourceID, {prepared_query, ResourceID, RenderedParams}) + of {ok, _Columns, []} -> nomatch; {ok, Columns, Rows} -> diff --git a/apps/emqx_authz/src/emqx_authz_redis.erl b/apps/emqx_authz/src/emqx_authz_redis.erl index 52469ad43..34c2038ab 100644 --- a/apps/emqx_authz/src/emqx_authz_redis.erl +++ b/apps/emqx_authz/src/emqx_authz_redis.erl @@ -78,7 +78,7 @@ authorize( } ) -> Cmd = emqx_authz_utils:render_deep(CmdTemplate, Client), - case emqx_resource:query(ResourceID, {cmd, Cmd}) of + case emqx_resource:simple_sync_query(ResourceID, {cmd, Cmd}) of {ok, []} -> nomatch; {ok, Rows} -> diff --git a/apps/emqx_authz/src/emqx_authz_rule.erl b/apps/emqx_authz/src/emqx_authz_rule.erl index 4aa0983e6..bdd0904f7 100644 --- a/apps/emqx_authz/src/emqx_authz_rule.erl +++ b/apps/emqx_authz/src/emqx_authz_rule.erl @@ -100,15 +100,17 @@ compile_topic(<<"eq ", Topic/binary>>) -> compile_topic({eq, Topic}) -> {eq, emqx_topic:words(bin(Topic))}; compile_topic(Topic) -> - Words = emqx_topic:words(bin(Topic)), - case pattern(Words) of - true -> {pattern, Words}; - false -> Words + TopicBin = bin(Topic), + case + emqx_placeholder:preproc_tmpl( + TopicBin, + #{placeholders => [?PH_USERNAME, ?PH_CLIENTID]} + ) + of + [{str, _}] -> emqx_topic:words(TopicBin); + Tokens -> {pattern, Tokens} end. -pattern(Words) -> - lists:member(?PH_USERNAME, Words) orelse lists:member(?PH_CLIENTID, Words). - atom(B) when is_binary(B) -> try binary_to_existing_atom(B, utf8) @@ -183,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) -> match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> lists:foldl( fun(Principal, Permission) -> - match_who(ClientInfo, Principal) andalso Permission + Permission andalso match_who(ClientInfo, Principal) end, true, Principals @@ -191,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'or', Principals}) when is_list(Principals) -> lists:foldl( fun(Principal, Permission) -> - match_who(ClientInfo, Principal) orelse Permission + Permission orelse match_who(ClientInfo, Principal) end, false, Principals @@ -202,8 +204,8 @@ match_who(_, _) -> match_topics(_ClientInfo, _Topic, []) -> false; match_topics(ClientInfo, Topic, [{pattern, PatternFilter} | Filters]) -> - TopicFilter = feed_var(ClientInfo, PatternFilter), - match_topic(emqx_topic:words(Topic), TopicFilter) orelse + TopicFilter = emqx_placeholder:proc_tmpl(PatternFilter, ClientInfo), + match_topic(emqx_topic:words(Topic), emqx_topic:words(TopicFilter)) orelse match_topics(ClientInfo, Topic, Filters); match_topics(ClientInfo, Topic, [TopicFilter | Filters]) -> match_topic(emqx_topic:words(Topic), TopicFilter) orelse @@ -213,18 +215,3 @@ match_topic(Topic, {'eq', TopicFilter}) -> Topic =:= TopicFilter; match_topic(Topic, TopicFilter) -> emqx_topic:match(Topic, TopicFilter). - -feed_var(ClientInfo, Pattern) -> - feed_var(ClientInfo, Pattern, []). -feed_var(_ClientInfo, [], Acc) -> - lists:reverse(Acc); -feed_var(ClientInfo = #{clientid := undefined}, [?PH_CLIENTID | Words], Acc) -> - feed_var(ClientInfo, Words, [?PH_CLIENTID | Acc]); -feed_var(ClientInfo = #{clientid := ClientId}, [?PH_CLIENTID | Words], Acc) -> - feed_var(ClientInfo, Words, [ClientId | Acc]); -feed_var(ClientInfo = #{username := undefined}, [?PH_USERNAME | Words], Acc) -> - feed_var(ClientInfo, Words, [?PH_USERNAME | Acc]); -feed_var(ClientInfo = #{username := Username}, [?PH_USERNAME | Words], Acc) -> - feed_var(ClientInfo, Words, [Username | Acc]); -feed_var(ClientInfo, [W | Words], Acc) -> - feed_var(ClientInfo, Words, [W | Acc]). diff --git a/apps/emqx_authz/src/emqx_authz_schema.erl b/apps/emqx_authz/src/emqx_authz_schema.erl index d03747b84..a2a7c6b52 100644 --- a/apps/emqx_authz/src/emqx_authz_schema.erl +++ b/apps/emqx_authz/src/emqx_authz_schema.erl @@ -33,9 +33,11 @@ -export([ namespace/0, roots/0, + tags/0, fields/1, validations/0, - desc/1 + desc/1, + authz_fields/0 ]). -export([ @@ -47,18 +49,12 @@ %% Hocon Schema %%-------------------------------------------------------------------- -namespace() -> authz. - -%% @doc authorization schema is not exported -%% but directly used by emqx_schema -roots() -> []. - -fields("authorization") -> - Types = [ +type_names() -> + [ file, http_get, http_post, - mnesia, + builtin_db, mongo_single, mongo_rs, mongo_sharded, @@ -67,18 +63,19 @@ fields("authorization") -> redis_single, redis_sentinel, redis_cluster - ], - Unions = [?R_REF(Type) || Type <- Types], - [ - {sources, - ?HOCON( - ?ARRAY(?UNION(Unions)), - #{ - default => [], - desc => ?DESC(sources) - } - )} - ]; + ]. + +namespace() -> authz. + +tags() -> + [<<"Authorization">>]. + +%% @doc authorization schema is not exported +%% but directly used by emqx_schema +roots() -> []. + +fields("authorization") -> + authz_fields(); fields(file) -> authz_common_fields(file) ++ [{path, ?HOCON(string(), #{required => true, desc => ?DESC(path)})}]; @@ -96,7 +93,7 @@ fields(http_post) -> {method, method(post)}, {headers, fun headers/1} ]; -fields(mnesia) -> +fields(builtin_db) -> authz_common_fields(built_in_database); fields(mongo_single) -> authz_common_fields(mongodb) ++ @@ -194,8 +191,8 @@ desc(http_get) -> ?DESC(http_get); desc(http_post) -> ?DESC(http_post); -desc(mnesia) -> - ?DESC(mnesia); +desc(builtin_db) -> + ?DESC(builtin_db); desc(mongo_single) -> ?DESC(mongo_single); desc(mongo_rs) -> @@ -226,7 +223,7 @@ http_common_fields() -> {url, fun url/1}, {request_timeout, mk_duration("Request timeout", #{ - required => false, default => "30s", desc => ?DESC(request_timeout) + required => false, default => <<"30s">>, desc => ?DESC(request_timeout) })}, {body, ?HOCON(map(), #{required => false, desc => ?DESC(body)})} ] ++ @@ -243,7 +240,7 @@ http_common_fields() -> mongo_common_fields() -> [ {collection, - ?HOCON(atom(), #{ + ?HOCON(binary(), #{ required => true, desc => ?DESC(collection) })}, @@ -340,7 +337,7 @@ check_ssl_opts(Conf) -> (#{<<"url">> := Url} = Source) -> case emqx_authz_http:parse_url(Url) of {<<"https", _/binary>>, _, _} -> - case emqx_map_lib:deep_find([<<"ssl">>, <<"enable">>], Source) of + case emqx_utils_maps:deep_find([<<"ssl">>, <<"enable">>], Source) of {ok, true} -> true; {ok, false} -> throw({ssl_not_enable, Url}); _ -> throw({ssl_enable_not_found, Url}) @@ -408,9 +405,106 @@ common_rate_field() -> ]. method(Method) -> - ?HOCON(Method, #{default => Method, required => true, desc => ?DESC(method)}). + ?HOCON(Method, #{required => true, desc => ?DESC(method)}). array(Ref) -> array(Ref, Ref). array(Ref, DescId) -> ?HOCON(?ARRAY(?R_REF(Ref)), #{desc => ?DESC(DescId)}). + +select_union_member(#{<<"type">> := <<"mongodb">>} = Value) -> + MongoType = maps:get(<<"mongo_type">>, Value, undefined), + case MongoType of + <<"single">> -> + ?R_REF(mongo_single); + <<"rs">> -> + ?R_REF(mongo_rs); + <<"sharded">> -> + ?R_REF(mongo_sharded); + Else -> + throw(#{ + reason => "unknown_mongo_type", + expected => "single | rs | sharded", + got => Else + }) + end; +select_union_member(#{<<"type">> := <<"redis">>} = Value) -> + RedisType = maps:get(<<"redis_type">>, Value, undefined), + case RedisType of + <<"single">> -> + ?R_REF(redis_single); + <<"cluster">> -> + ?R_REF(redis_cluster); + <<"sentinel">> -> + ?R_REF(redis_sentinel); + Else -> + throw(#{ + reason => "unknown_redis_type", + expected => "single | cluster | sentinel", + got => Else + }) + end; +select_union_member(#{<<"type">> := <<"http">>} = Value) -> + RedisType = maps:get(<<"method">>, Value, undefined), + case RedisType of + <<"get">> -> + ?R_REF(http_get); + <<"post">> -> + ?R_REF(http_post); + Else -> + throw(#{ + reason => "unknown_http_method", + expected => "get | post", + got => Else + }) + end; +select_union_member(#{<<"type">> := <<"built_in_database">>}) -> + ?R_REF(builtin_db); +select_union_member(#{<<"type">> := Type}) -> + select_union_member_loop(Type, type_names()); +select_union_member(_) -> + throw("missing_type_field"). + +select_union_member_loop(TypeValue, []) -> + throw(#{ + reason => "unknown_authz_type", + got => TypeValue + }); +select_union_member_loop(TypeValue, [Type | Types]) -> + case TypeValue =:= atom_to_binary(Type) of + true -> + ?R_REF(Type); + false -> + select_union_member_loop(TypeValue, Types) + end. + +authz_fields() -> + Types = [?R_REF(Type) || Type <- type_names()], + UnionMemberSelector = + fun + (all_union_members) -> Types; + %% must return list + ({value, Value}) -> [select_union_member(Value)] + end, + [ + {sources, + ?HOCON( + ?ARRAY(?UNION(UnionMemberSelector)), + #{ + default => [default_authz()], + desc => ?DESC(sources), + %% doc_lift is force a root level reference instead of nesting sub-structs + extra => #{doc_lift => true}, + %% it is recommended to configure authz sources from dashboard + %% hance the importance level for config is low + importance => ?IMPORTANCE_LOW + } + )} + ]. + +default_authz() -> + #{ + <<"type">> => <<"file">>, + <<"enable">> => true, + <<"path">> => <<"${EMQX_ETC_DIR}/acl.conf">> + }. diff --git a/apps/emqx_authz/src/emqx_authz_utils.erl b/apps/emqx_authz/src/emqx_authz_utils.erl index df77673a2..c01505680 100644 --- a/apps/emqx_authz/src/emqx_authz_utils.erl +++ b/apps/emqx_authz/src/emqx_authz_utils.erl @@ -16,7 +16,6 @@ -module(emqx_authz_utils). --include_lib("emqx/include/emqx_placeholder.hrl"). -include_lib("emqx_authz.hrl"). -export([ @@ -28,6 +27,7 @@ update_config/2, parse_deep/2, parse_str/2, + render_urlencoded_str/2, parse_sql/3, render_deep/2, render_str/2, @@ -128,6 +128,13 @@ render_str(Template, Values) -> #{return => full_binary, var_trans => fun handle_var/2} ). +render_urlencoded_str(Template, Values) -> + emqx_placeholder:proc_tmpl( + Template, + client_vars(Values), + #{return => full_binary, var_trans => fun urlencode_var/2} + ). + render_sql_params(ParamList, Values) -> emqx_placeholder:proc_tmpl( ParamList, @@ -144,7 +151,7 @@ parse_http_resp_body(<<"application/x-www-form-urlencoded", _/binary>>, Body) -> end; parse_http_resp_body(<<"application/json", _/binary>>, Body) -> try - result(emqx_json:decode(Body, [return_maps])) + result(emqx_utils_json:decode(Body, [return_maps])) catch _:_ -> error end. @@ -181,6 +188,11 @@ convert_client_var({dn, DN}) -> {cert_subject, DN}; convert_client_var({protocol, Proto}) -> {proto_name, Proto}; convert_client_var(Other) -> Other. +urlencode_var({var, _} = Var, Value) -> + emqx_http_lib:uri_encode(handle_var(Var, Value)); +urlencode_var(Var, Value) -> + handle_var(Var, Value). + handle_var({var, _Name}, undefined) -> <<>>; handle_var({var, <<"peerhost">>}, IpAddr) -> diff --git a/apps/emqx_authz/test/emqx_authz_SUITE.erl b/apps/emqx_authz/test/emqx_authz_SUITE.erl index b3ce04f43..84b1d903e 100644 --- a/apps/emqx_authz/test/emqx_authz_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_SUITE.erl @@ -26,6 +26,8 @@ -include_lib("emqx/include/emqx_placeholder.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-import(emqx_common_test_helpers, [on_exit/1]). + all() -> emqx_common_test_helpers:all(?MODULE). @@ -65,6 +67,7 @@ end_per_suite(_Config) -> init_per_testcase(TestCase, Config) when TestCase =:= t_subscribe_deny_disconnect_publishes_last_will_testament; + TestCase =:= t_publish_last_will_testament_banned_client_connecting; TestCase =:= t_publish_deny_disconnect_publishes_last_will_testament -> {ok, _} = emqx_authz:update(?CMD_REPLACE, []), @@ -76,11 +79,15 @@ init_per_testcase(_, Config) -> end_per_testcase(TestCase, _Config) when TestCase =:= t_subscribe_deny_disconnect_publishes_last_will_testament; + TestCase =:= t_publish_last_will_testament_banned_client_connecting; TestCase =:= t_publish_deny_disconnect_publishes_last_will_testament -> {ok, _} = emqx:update_config([authorization, deny_action], ignore), + {ok, _} = emqx_authz:update(?CMD_REPLACE, []), + emqx_common_test_helpers:call_janitor(), ok; end_per_testcase(_TestCase, _Config) -> + emqx_common_test_helpers:call_janitor(), ok. set_special_configs(emqx_authz) -> @@ -396,5 +403,63 @@ t_publish_last_will_testament_denied_topic(_Config) -> ok. +%% client is allowed by ACL to publish to its LWT topic, is connected, +%% and then gets banned and kicked out while connected. Should not +%% publish LWT. +t_publish_last_will_testament_banned_client_connecting(_Config) -> + {ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE7]), + Username = <<"some_client">>, + ClientId = <<"some_clientid">>, + LWTPayload = <<"should not be published">>, + LWTTopic = <<"some_client/lwt">>, + ok = emqx:subscribe(<<"some_client/lwt">>), + {ok, C} = emqtt:start_link([ + {clientid, ClientId}, + {username, Username}, + {will_topic, LWTTopic}, + {will_payload, LWTPayload} + ]), + ?assertMatch({ok, _}, emqtt:connect(C)), + + %% Now we ban the client while it is connected. + Now = erlang:system_time(second), + Who = {username, Username}, + emqx_banned:create(#{ + who => Who, + by => <<"test">>, + reason => <<"test">>, + at => Now, + until => Now + 120 + }), + on_exit(fun() -> emqx_banned:delete(Who) end), + %% Now kick it as we do in the ban API. + process_flag(trap_exit, true), + ?check_trace( + begin + ok = emqx_cm:kick_session(ClientId), + receive + {deliver, LWTTopic, #message{payload = LWTPayload}} -> + error(lwt_should_not_be_published_to_forbidden_topic) + after 2_000 -> ok + end, + ok + end, + fun(Trace) -> + ?assertMatch( + [ + #{ + client_banned := true, + publishing_disallowed := false + } + ], + ?of_kind(last_will_testament_publish_denied, Trace) + ), + ok + end + ), + ok = snabbkaffe:stop(), + + ok. + stop_apps(Apps) -> lists:foreach(fun application:stop/1, Apps). diff --git a/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl index 24b8fe25e..ab673b225 100644 --- a/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl @@ -18,7 +18,7 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/2, uri/1]). +-import(emqx_mgmt_api_test_util, [request/2, uri/1]). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -32,8 +32,8 @@ groups() -> []. init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_authz, emqx_dashboard, emqx_management], + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_authz], fun set_special_configs/1 ), Config. @@ -47,7 +47,7 @@ end_per_suite(_Config) -> <<"sources">> => [] } ), - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf, emqx_management]), + emqx_mgmt_api_test_util:end_suite([emqx_authz, emqx_conf]), ok. set_special_configs(emqx_dashboard) -> @@ -60,19 +60,19 @@ set_special_configs(emqx_authz) -> set_special_configs(_App) -> ok. -t_clean_cahce(_) -> +t_clean_cache(_) -> {ok, C} = emqtt:start_link([{clientid, <<"emqx0">>}, {username, <<"emqx0">>}]), {ok, _} = emqtt:connect(C), {ok, _, _} = emqtt:subscribe(C, <<"a/b/c">>, 0), ok = emqtt:publish(C, <<"a/b/c">>, <<"{\"x\":1,\"y\":1}">>, 0), {ok, 200, Result3} = request(get, uri(["clients", "emqx0", "authorization", "cache"])), - ?assertEqual(2, length(jsx:decode(Result3))), + ?assertEqual(2, length(emqx_utils_json:decode(Result3))), request(delete, uri(["authorization", "cache"])), {ok, 200, Result4} = request(get, uri(["clients", "emqx0", "authorization", "cache"])), - ?assertEqual(0, length(jsx:decode(Result4))), + ?assertEqual(0, length(emqx_utils_json:decode(Result4))), ok. diff --git a/apps/emqx_authz/test/emqx_authz_api_mnesia_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_mnesia_SUITE.erl index 62c747433..3775b9a1c 100644 --- a/apps/emqx_authz/test/emqx_authz_api_mnesia_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_mnesia_SUITE.erl @@ -22,7 +22,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --import(emqx_dashboard_api_test_helpers, [request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/3, uri/1]). all() -> emqx_common_test_helpers:all(?MODULE). @@ -31,8 +31,8 @@ groups() -> []. init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_authz, emqx_dashboard], + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_authz], fun set_special_configs/1 ), Config. @@ -46,7 +46,7 @@ end_per_suite(_Config) -> <<"sources">> => [] } ), - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]), + emqx_mgmt_api_test_util:end_suite([emqx_authz, emqx_conf]), ok. set_special_configs(emqx_dashboard) -> @@ -70,21 +70,21 @@ t_api(_) -> {ok, 204, _} = request( post, - uri(["authorization", "sources", "built_in_database", "username"]), + uri(["authorization", "sources", "built_in_database", "rules", "users"]), [?USERNAME_RULES_EXAMPLE] ), {ok, 409, _} = request( post, - uri(["authorization", "sources", "built_in_database", "username"]), + uri(["authorization", "sources", "built_in_database", "rules", "users"]), [?USERNAME_RULES_EXAMPLE] ), {ok, 200, Request1} = request( get, - uri(["authorization", "sources", "built_in_database", "username"]), + uri(["authorization", "sources", "built_in_database", "rules", "users"]), [] ), #{ @@ -92,9 +92,10 @@ t_api(_) -> <<"meta">> := #{ <<"count">> := 1, <<"limit">> := 100, - <<"page">> := 1 + <<"page">> := 1, + <<"hasnext">> := false } - } = jsx:decode(Request1), + } = emqx_utils_json:decode(Request1), ?assertEqual(3, length(Rules1)), {ok, 200, Request1_1} = @@ -104,164 +105,175 @@ t_api(_) -> "authorization", "sources", "built_in_database", - "username?page=1&limit=20&like_username=noexist" + "rules", + "users?page=1&limit=20&like_username=noexist" ]), [] ), - #{ - <<"data">> := [], - <<"meta">> := #{ - <<"count">> := 0, - <<"limit">> := 20, - <<"page">> := 1 - } - } = jsx:decode(Request1_1), + ?assertEqual( + #{ + <<"data">> => [], + <<"meta">> => #{ + <<"limit">> => 20, + <<"page">> => 1, + <<"hasnext">> => false + } + }, + emqx_utils_json:decode(Request1_1) + ), {ok, 200, Request2} = request( get, - uri(["authorization", "sources", "built_in_database", "username", "user1"]), + uri(["authorization", "sources", "built_in_database", "rules", "users", "user1"]), [] ), - #{<<"username">> := <<"user1">>, <<"rules">> := Rules1} = jsx:decode(Request2), + #{<<"username">> := <<"user1">>, <<"rules">> := Rules1} = emqx_utils_json:decode(Request2), {ok, 204, _} = request( put, - uri(["authorization", "sources", "built_in_database", "username", "user1"]), + uri(["authorization", "sources", "built_in_database", "rules", "users", "user1"]), ?USERNAME_RULES_EXAMPLE#{rules => []} ), {ok, 200, Request3} = request( get, - uri(["authorization", "sources", "built_in_database", "username", "user1"]), + uri(["authorization", "sources", "built_in_database", "rules", "users", "user1"]), [] ), - #{<<"username">> := <<"user1">>, <<"rules">> := Rules2} = jsx:decode(Request3), + #{<<"username">> := <<"user1">>, <<"rules">> := Rules2} = emqx_utils_json:decode(Request3), ?assertEqual(0, length(Rules2)), {ok, 204, _} = request( delete, - uri(["authorization", "sources", "built_in_database", "username", "user1"]), + uri(["authorization", "sources", "built_in_database", "rules", "users", "user1"]), [] ), {ok, 404, _} = request( get, - uri(["authorization", "sources", "built_in_database", "username", "user1"]), + uri(["authorization", "sources", "built_in_database", "rules", "users", "user1"]), [] ), {ok, 404, _} = request( delete, - uri(["authorization", "sources", "built_in_database", "username", "user1"]), + uri(["authorization", "sources", "built_in_database", "rules", "users", "user1"]), [] ), + % ensure that db contain a mix of records + {ok, 204, _} = + request( + post, + uri(["authorization", "sources", "built_in_database", "rules", "users"]), + [?USERNAME_RULES_EXAMPLE] + ), + {ok, 204, _} = request( post, - uri(["authorization", "sources", "built_in_database", "clientid"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients"]), [?CLIENTID_RULES_EXAMPLE] ), {ok, 409, _} = request( post, - uri(["authorization", "sources", "built_in_database", "clientid"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients"]), [?CLIENTID_RULES_EXAMPLE] ), {ok, 200, Request4} = request( get, - uri(["authorization", "sources", "built_in_database", "clientid"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients"]), [] ), {ok, 200, Request5} = request( get, - uri(["authorization", "sources", "built_in_database", "clientid", "client1"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients", "client1"]), [] ), #{ <<"data">> := [#{<<"clientid">> := <<"client1">>, <<"rules">> := Rules3}], <<"meta">> := #{<<"count">> := 1, <<"limit">> := 100, <<"page">> := 1} } = - jsx:decode(Request4), - #{<<"clientid">> := <<"client1">>, <<"rules">> := Rules3} = jsx:decode(Request5), + emqx_utils_json:decode(Request4), + #{<<"clientid">> := <<"client1">>, <<"rules">> := Rules3} = emqx_utils_json:decode(Request5), ?assertEqual(3, length(Rules3)), {ok, 204, _} = request( put, - uri(["authorization", "sources", "built_in_database", "clientid", "client1"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients", "client1"]), ?CLIENTID_RULES_EXAMPLE#{rules => []} ), {ok, 200, Request6} = request( get, - uri(["authorization", "sources", "built_in_database", "clientid", "client1"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients", "client1"]), [] ), - #{<<"clientid">> := <<"client1">>, <<"rules">> := Rules4} = jsx:decode(Request6), + #{<<"clientid">> := <<"client1">>, <<"rules">> := Rules4} = emqx_utils_json:decode(Request6), ?assertEqual(0, length(Rules4)), {ok, 204, _} = request( delete, - uri(["authorization", "sources", "built_in_database", "clientid", "client1"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients", "client1"]), [] ), {ok, 404, _} = request( get, - uri(["authorization", "sources", "built_in_database", "clientid", "client1"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients", "client1"]), [] ), {ok, 404, _} = request( delete, - uri(["authorization", "sources", "built_in_database", "clientid", "client1"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients", "client1"]), [] ), {ok, 204, _} = request( post, - uri(["authorization", "sources", "built_in_database", "all"]), + uri(["authorization", "sources", "built_in_database", "rules", "all"]), ?ALL_RULES_EXAMPLE ), {ok, 200, Request7} = request( get, - uri(["authorization", "sources", "built_in_database", "all"]), + uri(["authorization", "sources", "built_in_database", "rules", "all"]), [] ), - #{<<"rules">> := Rules5} = jsx:decode(Request7), + #{<<"rules">> := Rules5} = emqx_utils_json:decode(Request7), ?assertEqual(3, length(Rules5)), {ok, 204, _} = request( - post, - uri(["authorization", "sources", "built_in_database", "all"]), - - ?ALL_RULES_EXAMPLE#{rules => []} + delete, + uri(["authorization", "sources", "built_in_database", "rules", "all"]), + [] ), {ok, 200, Request8} = request( get, - uri(["authorization", "sources", "built_in_database", "all"]), + uri(["authorization", "sources", "built_in_database", "rules", "all"]), [] ), - #{<<"rules">> := Rules6} = jsx:decode(Request8), + #{<<"rules">> := Rules6} = emqx_utils_json:decode(Request8), ?assertEqual(0, length(Rules6)), {ok, 204, _} = request( post, - uri(["authorization", "sources", "built_in_database", "username"]), + uri(["authorization", "sources", "built_in_database", "rules", "users"]), [ #{username => erlang:integer_to_binary(N), rules => []} || N <- lists:seq(1, 20) @@ -270,16 +282,16 @@ t_api(_) -> {ok, 200, Request9} = request( get, - uri(["authorization", "sources", "built_in_database", "username?page=2&limit=5"]), + uri(["authorization", "sources", "built_in_database", "rules", "users?page=2&limit=5"]), [] ), - #{<<"data">> := Data1} = jsx:decode(Request9), + #{<<"data">> := Data1} = emqx_utils_json:decode(Request9), ?assertEqual(5, length(Data1)), {ok, 204, _} = request( post, - uri(["authorization", "sources", "built_in_database", "clientid"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients"]), [ #{clientid => erlang:integer_to_binary(N), rules => []} || N <- lists:seq(1, 20) @@ -288,16 +300,16 @@ t_api(_) -> {ok, 200, Request10} = request( get, - uri(["authorization", "sources", "built_in_database", "clientid?limit=5"]), + uri(["authorization", "sources", "built_in_database", "rules", "clients?limit=5"]), [] ), - #{<<"data">> := Data2} = jsx:decode(Request10), + #{<<"data">> := Data2} = emqx_utils_json:decode(Request10), ?assertEqual(5, length(Data2)), {ok, 400, Msg1} = request( delete, - uri(["authorization", "sources", "built_in_database", "purge-all"]), + uri(["authorization", "sources", "built_in_database", "rules"]), [] ), ?assertMatch({match, _}, re:run(Msg1, "must\sbe\sdisabled\sbefore")), @@ -323,7 +335,7 @@ t_api(_) -> {ok, 204, _} = request( delete, - uri(["authorization", "sources", "built_in_database", "purge-all"]), + uri(["authorization", "sources", "built_in_database", "rules"]), [] ), ?assertEqual(0, emqx_authz_mnesia:record_count()), diff --git a/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl index 275b04e40..e3412e169 100644 --- a/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl @@ -18,7 +18,7 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/3, uri/1]). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -30,7 +30,7 @@ groups() -> []. init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps( + ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_authz, emqx_dashboard], fun set_special_configs/1 ), @@ -46,7 +46,7 @@ end_per_suite(_Config) -> } ), ok = stop_apps([emqx_resource]), - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]), + emqx_mgmt_api_test_util:end_suite([emqx_authz, emqx_conf]), ok. set_special_configs(emqx_dashboard) -> @@ -76,7 +76,7 @@ t_api(_) -> {ok, 200, Result1} = request(put, uri(["authorization", "settings"]), Settings1), {ok, 200, Result1} = request(get, uri(["authorization", "settings"]), []), - ?assertEqual(Settings1, jsx:decode(Result1)), + ?assertEqual(Settings1, emqx_utils_json:decode(Result1)), Settings2 = #{ <<"no_match">> => <<"allow">>, @@ -90,7 +90,7 @@ t_api(_) -> {ok, 200, Result2} = request(put, uri(["authorization", "settings"]), Settings2), {ok, 200, Result2} = request(get, uri(["authorization", "settings"]), []), - ?assertEqual(Settings2, jsx:decode(Result2)), + ?assertEqual(Settings2, emqx_utils_json:decode(Result2)), ok. diff --git a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl index 34638d0aa..7a7dbb7e9 100644 --- a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl @@ -18,10 +18,9 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/3, uri/1]). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_placeholder.hrl"). -define(MONGO_SINGLE_HOST, "mongo"). @@ -115,8 +114,8 @@ init_per_suite(Config) -> end ), - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_authz, emqx_dashboard], + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_authz], fun set_special_configs/1 ), ok = start_apps([emqx_resource]), @@ -134,7 +133,7 @@ end_per_suite(_Config) -> %% resource and connector should be stop first, %% or authz_[mysql|pgsql|redis..]_SUITE would be failed ok = stop_apps([emqx_resource]), - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]), + emqx_mgmt_api_test_util:end_suite([emqx_authz, emqx_conf]), meck:unload(emqx_resource), ok. @@ -149,8 +148,8 @@ set_special_configs(_App) -> ok. init_per_testcase(t_api, Config) -> - meck:new(emqx_misc, [non_strict, passthrough, no_history, no_link]), - meck:expect(emqx_misc, gen_id, fun() -> "fake" end), + meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), + meck:expect(emqx_utils, gen_id, fun() -> "fake" end), meck:new(emqx, [non_strict, passthrough, no_history, no_link]), meck:expect( @@ -166,7 +165,7 @@ init_per_testcase(_, Config) -> Config. end_per_testcase(t_api, _Config) -> - meck:unload(emqx_misc), + meck:unload(emqx_utils), meck:unload(emqx), ok; end_per_testcase(_, _Config) -> @@ -183,7 +182,7 @@ t_api(_) -> {ok, 404, ErrResult} = request(get, uri(["authorization", "sources", "http"]), []), ?assertMatch( #{<<"code">> := <<"NOT_FOUND">>, <<"message">> := <<"Not found: http">>}, - jsx:decode(ErrResult) + emqx_utils_json:decode(ErrResult, [return_maps]) ), [ @@ -215,7 +214,10 @@ t_api(_) -> ?SOURCE1#{<<"enable">> := false} ), {ok, 200, Result3} = request(get, uri(["authorization", "sources", "http"]), []), - ?assertMatch(#{<<"type">> := <<"http">>, <<"enable">> := false}, jsx:decode(Result3)), + ?assertMatch( + #{<<"type">> := <<"http">>, <<"enable">> := false}, + emqx_utils_json:decode(Result3, [return_maps]) + ), Keyfile = emqx_common_test_helpers:app_path( emqx, @@ -252,7 +254,7 @@ t_api(_) -> <<"total">> := 0, <<"nomatch">> := 0 } - } = jiffy:decode(Status4, [return_maps]), + } = emqx_utils_json:decode(Status4, [return_maps]), ?assertMatch( #{ <<"type">> := <<"mongodb">>, @@ -264,7 +266,7 @@ t_api(_) -> <<"verify">> := <<"verify_none">> } }, - jsx:decode(Result4) + emqx_utils_json:decode(Result4, [return_maps]) ), {ok, Cacert} = file:read_file(Cacertfile), @@ -296,7 +298,7 @@ t_api(_) -> <<"verify">> := <<"verify_none">> } }, - jsx:decode(Result5) + emqx_utils_json:decode(Result5, [return_maps]) ), {ok, 200, Status5_1} = request(get, uri(["authorization", "sources", "mongodb", "status"]), []), @@ -307,7 +309,7 @@ t_api(_) -> <<"total">> := 0, <<"nomatch">> := 0 } - } = jiffy:decode(Status5_1, [return_maps]), + } = emqx_utils_json:decode(Status5_1, [return_maps]), #{ ssl := #{ @@ -332,6 +334,7 @@ t_api(_) -> uri(["authorization", "sources", "postgresql"]), ?SOURCE4#{<<"server">> := <<"fake">>} ), + {ok, 204, _} = request( put, uri(["authorization", "sources", "redis"]), @@ -343,6 +346,19 @@ t_api(_) -> } ), + {ok, 400, TypeMismatch} = request( + put, + uri(["authorization", "sources", "file"]), + #{<<"type">> => <<"built_in_database">>, <<"enable">> => false} + ), + ?assertMatch( + #{ + <<"code">> := <<"BAD_REQUEST">>, + <<"message">> := <<"Type mismatch", _/binary>> + }, + emqx_utils_json:decode(TypeMismatch, [return_maps]) + ), + lists:foreach( fun(#{<<"type">> := Type}) -> {ok, 204, _} = request( @@ -357,6 +373,43 @@ t_api(_) -> ?assertEqual([], get_sources(Result6)), ?assertEqual([], emqx:get_config([authorization, sources])), + lists:foreach( + fun(#{<<"type">> := Type}) -> + {ok, 404, _} = request( + get, + uri(["authorization", "sources", binary_to_list(Type), "status"]), + [] + ), + {ok, 404, _} = request( + post, + uri(["authorization", "sources", binary_to_list(Type), "move"]), + #{<<"position">> => <<"front">>} + ), + {ok, 404, _} = request( + get, + uri(["authorization", "sources", binary_to_list(Type)]), + [] + ), + {ok, 404, _} = request( + delete, + uri(["authorization", "sources", binary_to_list(Type)]), + [] + ) + end, + Sources + ), + + {ok, 404, _TypeMismatch2} = request( + put, + uri(["authorization", "sources", "file"]), + #{<<"type">> => <<"built_in_database">>, <<"enable">> => false} + ), + {ok, 404, _} = request( + put, + uri(["authorization", "sources", "built_in_database"]), + #{<<"type">> => <<"built_in_database">>, <<"enable">> => false} + ), + {ok, 204, _} = request(post, uri(["authorization", "sources"]), ?SOURCE6), {ok, Client} = emqtt:start_link( @@ -368,7 +421,6 @@ t_api(_) -> ] ), emqtt:connect(Client), - timer:sleep(50), emqtt:publish( Client, @@ -378,17 +430,24 @@ t_api(_) -> [{qos, 1}] ), - {ok, 200, Status5} = request(get, uri(["authorization", "sources", "file", "status"]), []), - #{ - <<"metrics">> := #{ - <<"allow">> := 1, - <<"deny">> := 0, - <<"total">> := 1, - <<"nomatch">> := 0 - } - } = jiffy:decode(Status5, [return_maps]), + snabbkaffe:retry( + 10, + 3, + fun() -> + {ok, 200, Status5} = request( + get, uri(["authorization", "sources", "file", "status"]), [] + ), + #{ + <<"metrics">> := #{ + <<"allow">> := 1, + <<"deny">> := 0, + <<"total">> := 1, + <<"nomatch">> := 0 + } + } = emqx_utils_json:decode(Status5, [return_maps]) + end + ), - timer:sleep(50), emqtt:publish( Client, <<"t2">>, @@ -397,17 +456,24 @@ t_api(_) -> [{qos, 1}] ), - {ok, 200, Status6} = request(get, uri(["authorization", "sources", "file", "status"]), []), - #{ - <<"metrics">> := #{ - <<"allow">> := 2, - <<"deny">> := 0, - <<"total">> := 2, - <<"nomatch">> := 0 - } - } = jiffy:decode(Status6, [return_maps]), + snabbkaffe:retry( + 10, + 3, + fun() -> + {ok, 200, Status6} = request( + get, uri(["authorization", "sources", "file", "status"]), [] + ), + #{ + <<"metrics">> := #{ + <<"allow">> := 2, + <<"deny">> := 0, + <<"total">> := 2, + <<"nomatch">> := 0 + } + } = emqx_utils_json:decode(Status6, [return_maps]) + end + ), - timer:sleep(50), emqtt:publish( Client, <<"t3">>, @@ -416,20 +482,26 @@ t_api(_) -> [{qos, 1}] ), - timer:sleep(50), - {ok, 200, Status7} = request(get, uri(["authorization", "sources", "file", "status"]), []), - #{ - <<"metrics">> := #{ - <<"allow">> := 3, - <<"deny">> := 0, - <<"total">> := 3, - <<"nomatch">> := 0 - } - } = jiffy:decode(Status7, [return_maps]), - + snabbkaffe:retry( + 10, + 3, + fun() -> + {ok, 200, Status7} = request( + get, uri(["authorization", "sources", "file", "status"]), [] + ), + #{ + <<"metrics">> := #{ + <<"allow">> := 3, + <<"deny">> := 0, + <<"total">> := 3, + <<"nomatch">> := 0 + } + } = emqx_utils_json:decode(Status7, [return_maps]) + end + ), ok. -t_move_source(_) -> +t_source_move(_) -> {ok, _} = emqx_authz:update(replace, [?SOURCE1, ?SOURCE2, ?SOURCE3, ?SOURCE4, ?SOURCE5]), ?assertMatch( [ @@ -550,7 +622,7 @@ t_aggregate_metrics(_) -> ). get_sources(Result) -> - maps:get(<<"sources">>, jsx:decode(Result), []). + maps:get(<<"sources">>, emqx_utils_json:decode(Result, [return_maps])). data_dir() -> emqx:data_dir(). diff --git a/apps/emqx_authz/test/emqx_authz_file_SUITE.erl b/apps/emqx_authz/test/emqx_authz_file_SUITE.erl index 5b5d2618c..124fe904f 100644 --- a/apps/emqx_authz/test/emqx_authz_file_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_file_SUITE.erl @@ -55,7 +55,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). init_per_testcase(_TestCase, Config) -> ok = emqx_authz_test_lib:reset_authorizers(), diff --git a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl index b95192cb7..702bf2756 100644 --- a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl @@ -23,6 +23,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_placeholder.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -define(HTTP_PORT, 33333). -define(HTTP_PATH, "/authz/[...]"). @@ -51,7 +52,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = stop_apps([emqx_resource, cowboy]), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). set_special_configs(emqx_authz) -> ok = emqx_authz_test_lib:reset_authorizers(); @@ -64,7 +65,14 @@ init_per_testcase(_Case, Config) -> Config. end_per_testcase(_Case, _Config) -> - ok = emqx_authz_http_test_server:stop(). + try + ok = emqx_authz_http_test_server:stop() + catch + exit:noproc -> + ok + end, + snabbkaffe:stop(), + ok. %%------------------------------------------------------------------------------ %% Tests @@ -148,7 +156,39 @@ t_response_handling(_Config) -> ?assertEqual( deny, emqx_access_control:authorize(ClientInfo, publish, <<"t">>) - ). + ), + + %% the server cannot be reached; should skip to the next + %% authorizer in the chain. + ok = emqx_authz_http_test_server:stop(), + + ?check_trace( + ?assertEqual( + deny, + emqx_access_control:authorize(ClientInfo, publish, <<"t">>) + ), + fun(Trace) -> + ?assertMatch( + [ + #{ + ?snk_kind := authz_http_request_failure, + error := {recoverable_error, econnrefused} + } + ], + ?of_kind(authz_http_request_failure, Trace) + ), + ?assert( + ?strict_causality( + #{?snk_kind := authz_http_request_failure}, + #{?snk_kind := authz_non_superuser, result := nomatch}, + Trace + ) + ), + ok + end + ), + + ok. t_query_params(_Config) -> ok = setup_handler_and_config( @@ -159,7 +199,7 @@ t_query_params(_Config) -> peerhost := <<"127.0.0.1">>, proto_name := <<"MQTT">>, mountpoint := <<"MOUNTPOINT">>, - topic := <<"t">>, + topic := <<"t/1">>, action := <<"publish">> } = cowboy_req:match_qs( [ @@ -201,7 +241,7 @@ t_query_params(_Config) -> ?assertEqual( allow, - emqx_access_control:authorize(ClientInfo, publish, <<"t">>) + emqx_access_control:authorize(ClientInfo, publish, <<"t/1">>) ). t_path(_Config) -> @@ -209,13 +249,13 @@ t_path(_Config) -> fun(Req0, State) -> ?assertEqual( << - "/authz/users/" + "/authz/use%20rs/" "user%20name/" "client%20id/" "127.0.0.1/" "MQTT/" "MOUNTPOINT/" - "t/1/" + "t%2F1/" "publish" >>, cowboy_req:path(Req0) @@ -224,7 +264,7 @@ t_path(_Config) -> end, #{ <<"url">> => << - "http://127.0.0.1:33333/authz/users/" + "http://127.0.0.1:33333/authz/use%20rs/" "${username}/" "${clientid}/" "${peerhost}/" @@ -271,7 +311,7 @@ t_json_body(_Config) -> <<"topic">> := <<"t">>, <<"action">> := <<"publish">> }, - jiffy:decode(RawBody, [return_maps]) + emqx_utils_json:decode(RawBody, [return_maps]) ), {ok, ?AUTHZ_HTTP_RESP(allow, Req1), State} end, @@ -326,7 +366,7 @@ t_placeholder_and_body(_Config) -> <<"CN">> := ?PH_CERT_CN_NAME, <<"CS">> := ?PH_CERT_SUBJECT }, - jiffy:decode(PostVars, [return_maps]) + emqx_utils_json:decode(PostVars, [return_maps]) ), {ok, ?AUTHZ_HTTP_RESP(allow, Req1), State} end, @@ -378,7 +418,7 @@ t_no_value_for_placeholder(_Config) -> #{ <<"mountpoint">> := <<"[]">> }, - jiffy:decode(RawBody, [return_maps]) + emqx_utils_json:decode(RawBody, [return_maps]) ), {ok, ?AUTHZ_HTTP_RESP(allow, Req1), State} end, diff --git a/apps/emqx_authz/test/emqx_authz_mnesia_SUITE.erl b/apps/emqx_authz/test/emqx_authz_mnesia_SUITE.erl index d7b31b5b5..2b7fce309 100644 --- a/apps/emqx_authz/test/emqx_authz_mnesia_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_mnesia_SUITE.erl @@ -36,7 +36,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). init_per_testcase(_TestCase, Config) -> ok = emqx_authz_test_lib:reset_authorizers(), diff --git a/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl b/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl index c685e8237..9ffeacf45 100644 --- a/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl @@ -50,7 +50,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = stop_apps([emqx_resource]), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). set_special_configs(emqx_authz) -> ok = emqx_authz_test_lib:reset_authorizers(); diff --git a/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl b/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl index 38c997f17..d276a2e1b 100644 --- a/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl @@ -57,7 +57,7 @@ end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = emqx_resource:remove_local(?MYSQL_RESOURCE), ok = stop_apps([emqx_resource]), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). init_per_testcase(_TestCase, Config) -> ok = emqx_authz_test_lib:reset_authorizers(), @@ -321,13 +321,13 @@ raw_mysql_authz_config() -> }. q(Sql) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?MYSQL_RESOURCE, {sql, Sql} ). q(Sql, Params) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?MYSQL_RESOURCE, {sql, Sql, Params} ). diff --git a/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl b/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl index fbe17f92e..0ef21360c 100644 --- a/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl @@ -57,7 +57,7 @@ end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = emqx_resource:remove_local(?PGSQL_RESOURCE), ok = stop_apps([emqx_resource]), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). init_per_testcase(_TestCase, Config) -> ok = emqx_authz_test_lib:reset_authorizers(), @@ -326,13 +326,13 @@ raw_pgsql_authz_config() -> }. q(Sql) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?PGSQL_RESOURCE, {query, Sql} ). insert(Sql, Params) -> - {ok, _} = emqx_resource:query( + {ok, _} = emqx_resource:simple_sync_query( ?PGSQL_RESOURCE, {query, Sql, Params} ), diff --git a/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl b/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl index ae734f20d..d68ea342e 100644 --- a/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl @@ -58,7 +58,7 @@ end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = emqx_resource:remove_local(?REDIS_RESOURCE), ok = stop_apps([emqx_resource]), - ok = emqx_common_test_helpers:stop_apps([emqx_authz]). + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authz]). init_per_testcase(_TestCase, Config) -> ok = emqx_authz_test_lib:reset_authorizers(), @@ -188,8 +188,7 @@ t_create_invalid_config(_Config) -> ?assertMatch( {error, #{ kind := validation_error, - path := "authorization.sources.1", - discarded_errors_count := 0 + path := "authorization.sources.1.server" }}, emqx_authz:update(?CMD_REPLACE, [C]) ). @@ -263,7 +262,7 @@ raw_redis_authz_config() -> }. q(Command) -> - emqx_resource:query( + emqx_resource:simple_sync_query( ?REDIS_RESOURCE, {cmd, Command} ). diff --git a/apps/emqx_authz/test/emqx_authz_rule_SUITE.erl b/apps/emqx_authz/test/emqx_authz_rule_SUITE.erl index 77f8617ee..76e5677ce 100644 --- a/apps/emqx_authz/test/emqx_authz_rule_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_rule_SUITE.erl @@ -35,6 +35,7 @@ ]}, publish, [?PH_S_USERNAME, ?PH_S_CLIENTID]} ). +-define(SOURCE6, {allow, {username, "test"}, publish, ["t/foo${username}boo"]}). all() -> emqx_common_test_helpers:all(?MODULE). @@ -80,7 +81,7 @@ t_compile(_) -> {{127, 0, 0, 1}, {127, 0, 0, 1}, 32}, {{192, 168, 1, 0}, {192, 168, 1, 255}, 24} ]}, - subscribe, [{pattern, [?PH_CLIENTID]}]}, + subscribe, [{pattern, [{var, {var, <<"clientid">>}}]}]}, emqx_authz_rule:compile(?SOURCE3) ), @@ -97,9 +98,18 @@ t_compile(_) -> {username, {re_pattern, _, _, _, _}}, {clientid, {re_pattern, _, _, _, _}} ]}, - publish, [{pattern, [?PH_USERNAME]}, {pattern, [?PH_CLIENTID]}]}, + publish, [ + {pattern, [{var, {var, <<"username">>}}]}, {pattern, [{var, {var, <<"clientid">>}}]} + ]}, emqx_authz_rule:compile(?SOURCE5) ), + + ?assertEqual( + {allow, {username, {eq, <<"test">>}}, publish, [ + {pattern, [{str, <<"t/foo">>}, {var, {var, <<"username">>}}, {str, <<"boo">>}]} + ]}, + emqx_authz_rule:compile(?SOURCE6) + ), ok. t_match(_) -> @@ -307,4 +317,24 @@ t_match(_) -> emqx_authz_rule:compile(?SOURCE5) ) ), + + ?assertEqual( + nomatch, + emqx_authz_rule:match( + ClientInfo1, + publish, + <<"t/foo${username}boo">>, + emqx_authz_rule:compile(?SOURCE6) + ) + ), + + ?assertEqual( + {matched, allow}, + emqx_authz_rule:match( + ClientInfo4, + publish, + <<"t/footestboo">>, + emqx_authz_rule:compile(?SOURCE6) + ) + ), ok. diff --git a/apps/emqx_authz/test/emqx_authz_schema_tests.erl b/apps/emqx_authz/test/emqx_authz_schema_tests.erl new file mode 100644 index 000000000..f7b2e3c10 --- /dev/null +++ b/apps/emqx_authz/test/emqx_authz_schema_tests.erl @@ -0,0 +1,116 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_authz_schema_tests). + +-include_lib("eunit/include/eunit.hrl"). + +bad_authz_type_test() -> + Txt = "[{type: foobar}]", + ?assertThrow( + [ + #{ + reason := "unknown_authz_type", + got := <<"foobar">> + } + ], + check(Txt) + ). + +bad_mongodb_type_test() -> + Txt = "[{type: mongodb, mongo_type: foobar}]", + ?assertThrow( + [ + #{ + reason := "unknown_mongo_type", + got := <<"foobar">> + } + ], + check(Txt) + ). + +missing_mongodb_type_test() -> + Txt = "[{type: mongodb}]", + ?assertThrow( + [ + #{ + reason := "unknown_mongo_type", + got := undefined + } + ], + check(Txt) + ). + +unknown_redis_type_test() -> + Txt = "[{type: redis, redis_type: foobar}]", + ?assertThrow( + [ + #{ + reason := "unknown_redis_type", + got := <<"foobar">> + } + ], + check(Txt) + ). + +missing_redis_type_test() -> + Txt = "[{type: redis}]", + ?assertThrow( + [ + #{ + reason := "unknown_redis_type", + got := undefined + } + ], + check(Txt) + ). + +unknown_http_method_test() -> + Txt = "[{type: http, method: getx}]", + ?assertThrow( + [ + #{ + reason := "unknown_http_method", + got := <<"getx">> + } + ], + check(Txt) + ). + +missing_http_method_test() -> + Txt = "[{type: http, methodx: get}]", + ?assertThrow( + [ + #{ + reason := "unknown_http_method", + got := undefined + } + ], + check(Txt) + ). + +check(Txt0) -> + Txt = ["sources: ", Txt0], + {ok, RawConf} = hocon:binary(Txt), + try + hocon_tconf:check_plain(schema(), RawConf, #{}) + catch + throw:{_Schema, Errors} -> + throw(Errors) + end. + +schema() -> + #{roots => emqx_authz_schema:fields("authorization")}. diff --git a/apps/emqx_auto_subscribe/README.md b/apps/emqx_auto_subscribe/README.md index 96d368715..981e4cb1f 100644 --- a/apps/emqx_auto_subscribe/README.md +++ b/apps/emqx_auto_subscribe/README.md @@ -1,9 +1,54 @@ -emqx_auto_subscribe -===== +# Auto Subscribe -An OTP application +This application can help clients automatically subscribe to topics compiled from user definitions when they connect, and the clients no longer need to send the MQTT `SUBSCRIBE ` request. -Build ------ +# How To Use - $ rebar3 compile +Add the following configuration items to the `emqx.conf` file + +```yaml +auto_subscribe { + topics = [ + { + topic = "c/${clientid}" + }, + { + topic = "client/${clientid}/username/${username}/host/${host}/port/${port}" + qos = 1 + rh = 0 + rap = 0 + nl = 0 + } + ] +} +``` + +This example defines two templates, all of which will be compiled into the final topic by replacing placeholders like `${clientid}` `${port}` with the actual values when the client connects. + +# Configuration + +## Configuration Definition + +| Field | Definition | Value Range | Default | +| -------------- | ----------------------------- | ----------------------------------------------------------- | ------- | +| auto_subscribe | Auto subscribe configuration | topics | topics | +| topics | Subscription Options | Subscription configurations list. See `Subscription Option` | [] | + +## Subscription Option + +| Field | Definition | Value Range | Default | +|-------|---------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------|------------------| +| topic | Required. Topic template. | String, placeholders supported | No default value | +| qos | Optional. Subscription QoS | 0 or 1 or 2. Refer to the MQTT QoS definition | 0 | +| rh | Optional. MQTT version 5.0. Whether to send retain message when a subscription is created. | 0: Not send the retain message
1: Send the retain message | 0 | +| rap | Optional. MQTT version 5.0. When forwarding messages, Whether to send with retain flag | 0: Set retain 0
1: Keep retain flag | 0 | +| nl | Optional. MQTT version 5.0. Whether the message can be forwarded to the client when published by itself | 0: Forwarded to self
1: Not forwarded to self | 0 | + +## Subscription Placeholders + +| Placeholder | Definition | +| ----------- | -------------------------------------- | +| ${clientid} | Client ID | +| ${username} | Client Username | +| ${ip} | Client TCP connection local IP address | +| ${port} | Client TCP connection local Port | diff --git a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_api_i18n.conf b/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_api_i18n.conf deleted file mode 100644 index b8d043e9a..000000000 --- a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_api_i18n.conf +++ /dev/null @@ -1,23 +0,0 @@ -emqx_auto_subscribe_api { - list_auto_subscribe_api { - desc { - en: """Get auto subscribe topic list""" - zh: """获取自动订阅主题列表""" - } - } - - update_auto_subscribe_api { - desc { - en: """Update auto subscribe topic list""" - zh: """更新自动订阅主题列表""" - } - } - - update_auto_subscribe_api_response409 { - desc { - en: """Auto Subscribe topics max limit""" - zh: """超出自定订阅主题列表长度限制""" - } - } - -} diff --git a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf b/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf deleted file mode 100644 index 57f744e8e..000000000 --- a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf +++ /dev/null @@ -1,85 +0,0 @@ -emqx_auto_subscribe_schema { - auto_subscribe { - desc { - en: """After the device logs in successfully, the subscription is automatically completed for the device through the pre-defined subscription representation. Supports the use of placeholders.""" - zh: """设备登录成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。""" - } - lable { - en: """Auto Subscribe""" - zh: """自动订阅""" - } - } - - topic { - desc { - en: """Topic name, placeholders are supported. For example: client/${clientid}/username/${username}/host/${host}/port/${port} -Required field, and cannot be empty string""" - zh: """订阅标识符,支持使用占位符,例如 client/${clientid}/username/${username}/host/${host}/port/${port} -必填,且不可为空字符串""" - } - label { - en: """Topic""" - zh: """订阅标识符""" - } - } - - qos { - desc { - en: """Default value 0. Quality of service. -At most once (0) -At least once (1) -Exactly once (2)""" - zh: """缺省值为 0,服务质量, -QoS 0:消息最多传递一次,如果当时客户端不可用,则会丢失该消息。 -QoS 1:消息传递至少 1 次。 -QoS 2:消息仅传送一次。""" - } - label { - en: """Quality of Service""" - zh: """服务质量""" - } - } - - rh { - desc { - en: """Default value 0. This option is used to specify whether the server forwards the retained message to the client when establishing a subscription. -Retain Handling is equal to 0, as long as the client successfully subscribes, the server will send the retained message. -Retain Handling is equal to 1, if the client successfully subscribes and this subscription does not exist previously, the server sends the retained message. After all, sometimes the client re-initiate the subscription just to change the QoS, but it does not mean that it wants to receive the reserved messages again. -Retain Handling is equal to 2, even if the client successfully subscribes, the server does not send the retained message.""" - zh: """指定订阅建立时服务端是否向客户端发送保留消息, -可选值 0:只要客户端订阅成功,服务端就发送保留消息。 -可选值 1:客户端订阅成功且该订阅此前不存在,服务端才发送保留消息。毕竟有些时候客户端重新发起订阅可能只是为了改变一下 QoS,并不意味着它想再次接收保留消息。 -可选值 2:即便客户订阅成功,服务端也不会发送保留消息。""" - } - label { - en: """Retain Handling""" - zh: """Retain Handling""" - } - } - - rap { - desc { - en: """Default value 0. This option is used to specify whether the server retains the RETAIN mark when forwarding messages to the client, and this option does not affect the RETAIN mark in the retained message. Therefore, when the option Retain As Publish is set to 0, the client will directly distinguish whether this is a normal forwarded message or a retained message according to the RETAIN mark in the message, instead of judging whether this message is the first received after subscribing(the forwarded message may be sent before the retained message, which depends on the specific implementation of different brokers).""" - zh: """缺省值为 0,这一选项用来指定服务端向客户端转发消息时是否要保留其中的 RETAIN 标识,注意这一选项不会影响保留消息中的 RETAIN 标识。因此当 Retain As Publish 选项被设置为 0 时,客户端直接依靠消息中的 RETAIN 标识来区分这是一个正常的转发消息还是一个保留消息,而不是去判断消息是否是自己订阅后收到的第一个消息(转发消息甚至可能会先于保留消息被发送,视不同 Broker 的具体实现而定)。""" - } - label { - en: """Retain As Publish""" - zh: """Retain As Publish""" - } - } - - nl { - desc { - en: """Default value 0. -MQTT v3.1.1: if you subscribe to the topic published by yourself, you will receive all messages that you published. -MQTT v5: if you set this option as 1 when subscribing, the server will not forward the message you published to you.""" - zh: """缺省值为0, -MQTT v3.1.1:如果设备订阅了自己发布消息的主题,那么将收到自己发布的所有消息。 -MQTT v5:如果设备在订阅时将此选项设置为 1,那么服务端将不会向设备转发自己发布的消息""" - } - label { - en: """No Local""" - zh: """No Local""" - } - } -} diff --git a/apps/emqx_auto_subscribe/rebar.config b/apps/emqx_auto_subscribe/rebar.config index 33e077f50..a19783033 100644 --- a/apps/emqx_auto_subscribe/rebar.config +++ b/apps/emqx_auto_subscribe/rebar.config @@ -1,7 +1,10 @@ %% -*- mode: erlang -*- {erl_opts, [debug_info]}. -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {shell, [ {apps, [emqx_auto_subscribe]} diff --git a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src index 1c5627a1f..d6f6f4058 100644 --- a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src +++ b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_auto_subscribe, [ - {description, "An OTP application"}, - {vsn, "0.1.2"}, + {description, "Auto subscribe Application"}, + {vsn, "0.1.4"}, {registered, []}, {mod, {emqx_auto_subscribe_app, []}}, {applications, [ diff --git a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.erl b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.erl index 878fc2ad7..7453eabdb 100644 --- a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.erl +++ b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.erl @@ -51,8 +51,21 @@ max_limit() -> list() -> format(emqx_conf:get([auto_subscribe, topics], [])). -update(Topics) -> - update_(Topics). +update(Topics) when length(Topics) =< ?MAX_AUTO_SUBSCRIBE -> + case + emqx_conf:update( + [auto_subscribe, topics], + Topics, + #{rawconf_with_defaults => true, override_to => cluster} + ) + of + {ok, #{raw_config := NewTopics}} -> + {ok, NewTopics}; + {error, Reason} -> + {error, Reason} + end; +update(_Topics) -> + {error, quota_exceeded}. post_config_update(_KeyPath, _Req, NewTopics, _OldConf, _AppEnvs) -> Config = emqx_conf:get([auto_subscribe], #{}), @@ -95,22 +108,6 @@ format(Rule = #{topic := Topic}) when is_map(Rule) -> nl => maps:get(nl, Rule, 0) }. -update_(Topics) when length(Topics) =< ?MAX_AUTO_SUBSCRIBE -> - case - emqx_conf:update( - [auto_subscribe, topics], - Topics, - #{rawconf_with_defaults => true, override_to => cluster} - ) - of - {ok, #{raw_config := NewTopics}} -> - {ok, NewTopics}; - {error, Reason} -> - {error, Reason} - end; -update_(_Topics) -> - {error, quota_exceeded}. - update_hook() -> update_hook(emqx_conf:get([auto_subscribe], #{})). diff --git a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl index f30482d4c..678c8e9b7 100644 --- a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl +++ b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl @@ -34,7 +34,7 @@ -include_lib("emqx/include/emqx_placeholder.hrl"). api_spec() -> - emqx_dashboard_swagger:spec(?MODULE). + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). paths() -> ["/mqtt/auto_subscribe"]. @@ -46,15 +46,15 @@ schema("/mqtt/auto_subscribe") -> description => ?DESC(list_auto_subscribe_api), tags => [<<"Auto Subscribe">>], responses => #{ - 200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe") + 200 => topics() } }, put => #{ description => ?DESC(update_auto_subscribe_api), tags => [<<"Auto Subscribe">>], - 'requestBody' => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"), + 'requestBody' => topics(), responses => #{ - 200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"), + 200 => topics(), 409 => emqx_dashboard_swagger:error_codes( [?EXCEED_LIMIT], ?DESC(update_auto_subscribe_api_response409) @@ -63,14 +63,17 @@ schema("/mqtt/auto_subscribe") -> } }. +topics() -> + Fields = emqx_auto_subscribe_schema:fields("auto_subscribe"), + {topics, Topics} = lists:keyfind(topics, 1, Fields), + Topics. + %%%============================================================================================== %% api apply auto_subscribe(get, _) -> {200, emqx_auto_subscribe:list()}; -auto_subscribe(put, #{body := #{}}) -> - {400, #{code => ?BAD_REQUEST, message => <<"Request body required">>}}; -auto_subscribe(put, #{body := Params}) -> - case emqx_auto_subscribe:update(Params) of +auto_subscribe(put, #{body := Topics}) when is_list(Topics) -> + case emqx_auto_subscribe:update(Topics) of {error, quota_exceeded} -> Message = list_to_binary( io_lib:format( diff --git a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_schema.erl b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_schema.erl index a01e17c1f..98f32c8a4 100644 --- a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_schema.erl +++ b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_schema.erl @@ -31,14 +31,17 @@ namespace() -> "auto_subscribe". roots() -> - ["auto_subscribe"]. + [{"auto_subscribe", ?HOCON(?R_REF("auto_subscribe"), #{importance => ?IMPORTANCE_HIDDEN})}]. fields("auto_subscribe") -> [ {topics, ?HOCON( ?ARRAY(?R_REF("topic")), - #{desc => ?DESC(auto_subscribe), default => []} + #{ + desc => ?DESC(auto_subscribe), + default => [] + } )} ]; fields("topic") -> diff --git a/apps/emqx_auto_subscribe/test/emqx_auto_subscribe_SUITE.erl b/apps/emqx_auto_subscribe/test/emqx_auto_subscribe_SUITE.erl index 36c4e708e..9d8d47bf2 100644 --- a/apps/emqx_auto_subscribe/test/emqx_auto_subscribe_SUITE.erl +++ b/apps/emqx_auto_subscribe/test/emqx_auto_subscribe_SUITE.erl @@ -93,9 +93,8 @@ init_per_suite(Config) -> " }" >> ), - emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_dashboard, ?APP], - fun set_special_configs/1 + emqx_mgmt_api_test_util:init_suite( + [emqx_conf, ?APP] ), Config. @@ -111,12 +110,6 @@ end_per_testcase(t_get_basic_usage_info, _Config) -> end_per_testcase(_TestCase, _Config) -> ok. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. - topic_config(T) -> #{ topic => T, @@ -132,7 +125,7 @@ end_per_suite(_) -> application:unload(?APP), meck:unload(emqx_resource), meck:unload(emqx_schema), - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_conf, ?APP]). + emqx_mgmt_api_test_util:end_suite([emqx_conf, ?APP]). t_auto_subscribe(_) -> emqx_auto_subscribe:update([#{<<"topic">> => Topic} || Topic <- ?TOPICS]), @@ -148,9 +141,35 @@ t_update(_) -> Auth = emqx_mgmt_api_test_util:auth_header_(), Body = [#{topic => ?TOPIC_S}], {ok, Response} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, Body), - ResponseMap = emqx_json:decode(Response, [return_maps]), + ResponseMap = emqx_utils_json:decode(Response, [return_maps]), ?assertEqual(1, erlang:length(ResponseMap)), + BadBody1 = #{topic => ?TOPIC_S}, + ?assertMatch( + {error, {"HTTP/1.1", 400, "Bad Request"}}, + emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, BadBody1) + ), + BadBody2 = [#{topic => ?TOPIC_S, qos => 3}], + ?assertMatch( + {error, {"HTTP/1.1", 400, "Bad Request"}}, + emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, BadBody2) + ), + BadBody3 = [#{topic => ?TOPIC_S, rh => 10}], + ?assertMatch( + {error, {"HTTP/1.1", 400, "Bad Request"}}, + emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, BadBody3) + ), + BadBody4 = [#{topic => ?TOPIC_S, rap => -1}], + ?assertMatch( + {error, {"HTTP/1.1", 400, "Bad Request"}}, + emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, BadBody4) + ), + BadBody5 = [#{topic => ?TOPIC_S, nl => -1}], + ?assertMatch( + {error, {"HTTP/1.1", 400, "Bad Request"}}, + emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, BadBody5) + ), + {ok, Client} = emqtt:start_link(#{username => ?CLIENT_USERNAME, clientid => ?CLIENT_ID}), {ok, _} = emqtt:connect(Client), timer:sleep(100), @@ -158,7 +177,7 @@ t_update(_) -> emqtt:disconnect(Client), {ok, GETResponse} = emqx_mgmt_api_test_util:request_api(get, Path), - GETResponseMap = emqx_json:decode(GETResponse, [return_maps]), + GETResponseMap = emqx_utils_json:decode(GETResponse, [return_maps]), ?assertEqual(1, erlang:length(GETResponseMap)), ok. @@ -185,7 +204,7 @@ check_subs(Count) -> check_subs([], []) -> ok; -check_subs([{{_, Topic}, #{subid := ?CLIENT_ID}} | Subs], List) -> +check_subs([{{Topic, _}, #{subid := ?CLIENT_ID}} | Subs], List) -> check_subs(Subs, lists:delete(Topic, List)); check_subs([_ | Subs], List) -> check_subs(Subs, List). diff --git a/apps/emqx_bridge/i18n/emqx_bridge_api.conf b/apps/emqx_bridge/i18n/emqx_bridge_api.conf deleted file mode 100644 index e8bb2403a..000000000 --- a/apps/emqx_bridge/i18n/emqx_bridge_api.conf +++ /dev/null @@ -1,137 +0,0 @@ -emqx_bridge_api { - - desc_param_path_operation_cluster { - desc { - en: """Operations can be one of: enable, disable, start, stop, restart""" - zh: """集群可用操作:启用、禁用、启动、停止、重新启动""" - } - label: { - en: "Cluster Operation" - zh: "集群可用操作" - } - } - - desc_param_path_operation_on_node { - desc { - en: """Operations can be one of: stop, restart""" - zh: """节点可用操作:停止、重新启动""" - } - label: { - en: "Node Operation " - zh: "节点可用操作" - } - } - - desc_param_path_node { - desc { - en: """The node name, e.g. emqx@127.0.0.1""" - zh: """节点名,比如 emqx@127.0.0.1""" - } - label: { - en: "The node name" - zh: "节点名" - } - } - - desc_param_path_id { - desc { - en: """The bridge Id. Must be of format {type}:{name}""" - zh: """Bridge ID , 格式为 {type}:{name}""" - } - label: { - en: "Bridge ID" - zh: "Bridge ID" - } - } - - desc_api1 { - desc { - en: """List all created bridges""" - zh: """列出所有 Birdge """ - } - label: { - en: "List All Bridges" - zh: "列出所有 Bridge" - } - } - - desc_api2 { - desc { - en: """Create a new bridge by type and name""" - zh: """通过类型和名字创建 Bridge""" - } - label: { - en: "Create Bridge" - zh: "创建 Bridge" - } - } - - desc_api3 { - desc { - en: """Get a bridge by Id""" - zh: """通过 ID 获取 Bridge""" - } - label: { - en: "Get Bridge" - zh: "获取 Bridge" - } - } - - desc_api4 { - desc { - en: """Update a bridge by Id""" - zh: """通过 ID 更新 Bridge""" - } - label: { - en: "Update Bridge" - zh: "更新 Bridge" - } - } - - desc_api5 { - desc { - en: """Delete a bridge by Id""" - zh: """通过 ID 删除 Bridge""" - } - label: { - en: "Delete Bridge" - zh: "删除 Bridge" - } - } - - desc_api6 { - desc { - en: """Reset a bridge metrics by Id""" - zh: """通过 ID 重置 Bridge 的计数""" - } - label: { - en: "Reset Bridge Metrics" - zh: "重置 Bridge 计数" - } - } - - desc_api7 { - desc { - en: """Enable/Disable/Stop/Restart bridges on all nodes in the cluster.""" - zh: """在集群中的所有节点上启用/禁用/停止/重新启动 Bridge。""" - } - label: { - en: "Cluster Bridge Operate" - zh: "集群 Bridge 操作" - } - } - - desc_api8 { - desc { - en: """Stop/Restart bridges on a specific node. - NOTE: It's not allowed to disable/enable bridges on a single node.""" - zh: """在某个节点上停止/重新启动 Bridge。 -NOTE:不允许在单节点上启用/禁用 Bridge""" - } - label: { - en: "Node Bridge Operate" - zh: "单节点 Bridge 操作" - } - } - -} diff --git a/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf deleted file mode 100644 index b935b360c..000000000 --- a/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf +++ /dev/null @@ -1,34 +0,0 @@ -emqx_bridge_mqtt_schema { - config { - desc { - en: """The config for MQTT Bridges.""" - zh: """MQTT Bridge 的配置。""" - } - label: { - en: "Config" - zh: "配置" - } - } - desc_type { - desc { - en: """The bridge type.""" - zh: """Bridge 的类型""" - } - label: { - en: "Bridge Type" - zh: "Bridge 类型" - } - } - - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """Bridge 名字,Bridge 的可读描述""" - } - label: { - en: "Bridge Name" - zh: "Bridge 名字" - } - } - -} diff --git a/apps/emqx_bridge/i18n/emqx_bridge_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_schema.conf deleted file mode 100644 index b62a2ee68..000000000 --- a/apps/emqx_bridge/i18n/emqx_bridge_schema.conf +++ /dev/null @@ -1,316 +0,0 @@ -emqx_bridge_schema { - - desc_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用 Bridge""" - } - label: { - en: "Enable Or Disable Bridge" - zh: "启用/禁用 Bridge" - } - } - - desc_metrics { - desc { - en: """The metrics of the bridge""" - zh: """Bridge 的计数""" - } - label: { - en: "Bridge Metrics" - zh: "Bridge 计数" - } - } - - desc_node_metrics { - desc { - en: """The metrics of the bridge for each node""" - zh: """每个节点的 Bridge 计数""" - } - label: { - en: "Each Node Bridge Metircs" - zh: "每个节点的 Bridge 计数" - } - } - - desc_status { - desc { - en: """The status of the bridge""" - zh: """Bridge 的状态""" - } - label: { - en: "Bridge Status" - zh: "Bridge 状态" - } - } - - desc_node_status { - desc { - en: """The status of the bridge for each node""" - zh: """每个节点的 Bridge 状态""" - } - label: { - en: "Node Bridge Status" - zh: "每个节点的 Bridge 状态" - } - } - - bridges_webhook { - desc { - en: """WebHook to an HTTP server.""" - zh: """转发消息到 HTTP 服务器的 WebHook""" - } - label: { - en: "WebHook" - zh: "WebHook" - } - } - - - bridges_mqtt { - desc { - en: """MQTT bridges to/from another MQTT broker""" - zh: """桥接到另一个 MQTT Broker 的 MQTT Bridge""" - } - label: { - en: "MQTT Bridge" - zh: "MQTT Bridge" - } - } - - metric_batching { - desc { - en: """Count of messages that are currently accumulated in memory waiting for sending in one batch.""" - zh: """当前积压在内存里,等待批量发送的消息个数""" - } - label: { - en: "Batched" - zh: "等待批量发送" - } - } - - metric_dropped { - desc { - en: """Count of messages dropped.""" - zh: """被丢弃的消息个数。""" - } - label: { - en: "Dropped" - zh: "丢弃" - } - } - - metric_dropped_other { - desc { - en: """Count of messages dropped due to other reasons.""" - zh: """因为其他原因被丢弃的消息个数。""" - } - label: { - en: "Dropped Other" - zh: "其他丢弃" - } - } - metric_dropped_queue_full { - desc { - en: """Count of messages dropped due to the queue is full.""" - zh: """因为队列已满被丢弃的消息个数。""" - } - label: { - en: "Dropped Queue Full" - zh: "队列已满被丢弃" - } - } - metric_dropped_queue_not_enabled { - desc { - en: """Count of messages dropped due to the queue is not enabled.""" - zh: """因为队列未启用被丢弃的消息个数。""" - } - label: { - en: "Dropped Queue Disabled" - zh: "队列未启用被丢弃" - } - } - metric_dropped_resource_not_found { - desc { - en: """Count of messages dropped due to the resource is not found.""" - zh: """因为资源不存在被丢弃的消息个数。""" - } - label: { - en: "Dropped Resource NotFound" - zh: "资源不存在被丢弃" - } - } - metric_dropped_resource_stopped { - desc { - en: """Count of messages dropped due to the resource is stopped.""" - zh: """因为资源已停用被丢弃的消息个数。""" - } - label: { - en: "Dropped Resource Stopped" - zh: "资源停用被丢弃" - } - } - metric_matched { - desc { - en: """Count of this bridge is matched and queried.""" - zh: """Bridge 被匹配到(被请求)的次数。""" - } - label: { - en: "Matched" - zh: "匹配次数" - } - } - - metric_queuing { - desc { - en: """Count of messages that are currently queuing.""" - zh: """当前被缓存到磁盘队列的消息个数。""" - } - label: { - en: "Queued" - zh: "被缓存" - } - } - metric_retried { - desc { - en: """Times of retried.""" - zh: """重试的次数。""" - } - label: { - en: "Retried" - zh: "已重试" - } - } - - metric_sent_failed { - desc { - en: """Count of messages that sent failed.""" - zh: """发送失败的消息个数。""" - } - label: { - en: "Sent Failed" - zh: "发送失败" - } - } - - metric_sent_inflight { - desc { - en: """Count of messages that were sent asynchronously but ACKs are not yet received.""" - zh: """已异步地发送但没有收到 ACK 的消息个数。""" - } - label: { - en: "Sent Inflight" - zh: "已发送未确认" - } - } - metric_sent_success { - desc { - en: """Count of messages that sent successfully.""" - zh: """已经发送成功的消息个数。""" - } - label: { - en: "Sent Success" - zh: "发送成功" - } - } - - metric_rate { - desc { - en: """The rate of matched, times/second""" - zh: """执行操作的速率,次/秒""" - } - label: { - en: "Rate" - zh: "速率" - } - } - - metric_rate_max { - desc { - en: """The max rate of matched, times/second""" - zh: """执行操作的最大速率,次/秒""" - } - label: { - en: "Max Rate Of Matched" - zh: "执行操作的最大速率" - } - } - - metric_rate_last5m { - desc { - en: """The average rate of matched in the last 5 minutes, times/second""" - zh: """5 分钟平均速率,次/秒""" - } - label: { - en: "Last 5 Minutes Rate" - zh: "5 分钟平均速率" - } - } - - metric_received { - desc { - en: """Count of messages that is received from the remote system.""" - zh: """从远程系统收到的消息个数。""" - } - label: { - en: "Received" - zh: "已接收" - } - } - - desc_bridges { - desc { - en: """Configuration for MQTT bridges.""" - zh: """MQTT Bridge 配置""" - } - label: { - en: "MQTT Bridge Configuration" - zh: "MQTT Bridge 配置" - } - } - - desc_metrics { - desc { - en: """Bridge metrics.""" - zh: """Bridge 计数""" - } - label: { - en: "Bridge Metrics" - zh: "Bridge 计数" - } - } - - desc_node_metrics { - desc { - en: """Node metrics.""" - zh: """节点的计数器""" - } - label: { - en: "Node Metrics" - zh: "节点的计数器" - } - } - - desc_node_status { - desc { - en: """Node status.""" - zh: """节点的状态""" - } - label: { - en: "Node Status" - zh: "节点的状态" - } - } - - desc_node_name { - desc { - en: """The node name.""" - zh: """节点的名字""" - } - label: { - en: "Node Name" - zh: "节点名字" - } - } - -} diff --git a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf deleted file mode 100644 index d9d2d0c40..000000000 --- a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf +++ /dev/null @@ -1,163 +0,0 @@ -emqx_bridge_webhook_schema { - - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用 Bridge""" - } - label: { - en: "Enable Or Disable Bridge" - zh: "启用/禁用 Bridge" - } - } - - config_url { - desc { - en: """ -The URL of the HTTP Bridge.
-Template with variables is allowed in the path, but variables cannot be used in the scheme, host, -or port part.
-For example, http://localhost:9901/${topic} is allowed, but - http://${host}:9901/message or http://localhost:${port}/message -is not allowed. -""" - zh: """ -HTTP Bridge 的 URL。
-路径中允许使用带变量的模板,但是 host, port 不允许使用变量模板。
-例如, http://localhost:9901/${topic} 是允许的, -但是 http://${host}:9901/message -或 http://localhost:${port}/message -不允许。 -""" - } - label: { - en: "HTTP Bridge" - zh: "HTTP Bridge" - } - } - - config_local_topic { - desc { - en: """ -The MQTT topic filter to be forwarded to the HTTP server. All MQTT 'PUBLISH' messages with the topic -matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is -configured, then both the data got from the rule and the MQTT messages that match local_topic -will be forwarded. -""" - zh: """ -发送到 'local_topic' 的消息都会转发到 HTTP 服务器。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HTTP 服务器。 -""" - } - label: { - en: "Local Topic" - zh: "本地 Topic" - } - } - - config_method { - desc { - en: """ -The method of the HTTP request. All the available methods are: post, put, get, delete.
-Template with variables is allowed.
-""" - zh: """ -HTTP 请求的方法。 所有可用的方法包括:post、put、get、delete。
-允许使用带有变量的模板。
""" - } - label: { - en: "HTTP Method" - zh: "HTTP 请求方法" - } - } - - config_headers { - desc { - en: """ -The headers of the HTTP request.
-Template with variables is allowed. -""" - zh: """ -HTTP 请求的标头。
-允许使用带有变量的模板。 -""" - } - label: { - en: "HTTP Header" - zh: "HTTP 请求标头" - } - } - - config_body { - desc { - en: """ -The body of the HTTP request.
-Template with variables is allowed. -""" - zh: """ -HTTP 请求的正文。
-允许使用带有变量的模板。""" - } - label: { - en: "HTTP Body" - zh: "HTTP 请求正文" - } - } - - config_request_timeout { - desc { - en: """HTTP request timeout.""" - zh: """HTTP 请求超时""" - } - label: { - en: "HTTP Request Timeout" - zh: "HTTP 请求超时" - } - } - - config_max_retries { - desc { - en: """HTTP request max retry times if failed.""" - zh: """HTTP 请求失败最大重试次数""" - } - label: { - en: "HTTP Request Max Retries" - zh: "HTTP 请求重试次数" - } - } - - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label: { - en: "Bridge Type" - zh: "Bridge 类型" - } - } - - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """Bridge 名字,Bridge 的可读描述""" - } - label: { - en: "Bridge Name" - zh: "Bridge 名字" - } - } - - desc_config { - desc { - en: """Configuration for an HTTP bridge.""" - zh: """HTTP Bridge 配置""" - } - label: { - en: "HTTP Bridge Configuration" - zh: "HTTP Bridge 配置" - } - } - -} diff --git a/apps/emqx_bridge/include/emqx_bridge.hrl b/apps/emqx_bridge/include/emqx_bridge.hrl index ff639066f..81feef893 100644 --- a/apps/emqx_bridge/include/emqx_bridge.hrl +++ b/apps/emqx_bridge/include/emqx_bridge.hrl @@ -21,16 +21,16 @@ ). -define(METRICS( - Batched, Dropped, DroppedOther, + DroppedExpired, DroppedQueueFull, - DroppedQueueNotEnabled, DroppedResourceNotFound, DroppedResourceStopped, Matched, Queued, Retried, + LateReply, SentFailed, SentInflight, SentSucc, @@ -40,16 +40,16 @@ Rcvd ), #{ - 'batching' => Batched, 'dropped' => Dropped, 'dropped.other' => DroppedOther, + 'dropped.expired' => DroppedExpired, 'dropped.queue_full' => DroppedQueueFull, - 'dropped.queue_not_enabled' => DroppedQueueNotEnabled, 'dropped.resource_not_found' => DroppedResourceNotFound, 'dropped.resource_stopped' => DroppedResourceStopped, 'matched' => Matched, 'queuing' => Queued, 'retried' => Retried, + 'late_reply' => LateReply, 'failed' => SentFailed, 'inflight' => SentInflight, 'success' => SentSucc, @@ -61,16 +61,16 @@ ). -define(metrics( - Batched, Dropped, DroppedOther, + DroppedExpired, DroppedQueueFull, - DroppedQueueNotEnabled, DroppedResourceNotFound, DroppedResourceStopped, Matched, Queued, Retried, + LateReply, SentFailed, SentInflight, SentSucc, @@ -80,16 +80,16 @@ Rcvd ), #{ - 'batching' := Batched, 'dropped' := Dropped, 'dropped.other' := DroppedOther, + 'dropped.expired' := DroppedExpired, 'dropped.queue_full' := DroppedQueueFull, - 'dropped.queue_not_enabled' := DroppedQueueNotEnabled, 'dropped.resource_not_found' := DroppedResourceNotFound, 'dropped.resource_stopped' := DroppedResourceStopped, 'matched' := Matched, 'queuing' := Queued, 'retried' := Retried, + 'late_reply' := LateReply, 'failed' := SentFailed, 'inflight' := SentInflight, 'success' := SentSucc, @@ -99,13 +99,3 @@ received := Rcvd } ). - --define(METRICS_EXAMPLE, #{ - metrics => ?EMPTY_METRICS, - node_metrics => [ - #{ - node => node(), - metrics => ?EMPTY_METRICS - } - ] -}). diff --git a/apps/emqx_bridge/include/emqx_bridge_resource.hrl b/apps/emqx_bridge/include/emqx_bridge_resource.hrl new file mode 100644 index 000000000..fcf1c41a4 --- /dev/null +++ b/apps/emqx_bridge/include/emqx_bridge_resource.hrl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_RESOURCE_HRL). +-define(EMQX_BRIDGE_RESOURCE_HRL, true). + +-define(BRIDGE_HOOKPOINT(BridgeId), <<"$bridges/", BridgeId/binary>>). + +-endif. diff --git a/apps/emqx_bridge/rebar.config b/apps/emqx_bridge/rebar.config index 0a1cbc29b..864c45e9a 100644 --- a/apps/emqx_bridge/rebar.config +++ b/apps/emqx_bridge/rebar.config @@ -1,5 +1,9 @@ {erl_opts, [debug_info]}. -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}} + ]}. {shell, [ % {config, "config/sys.config"}, diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 89fb7adaf..e408250be 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,8 +1,8 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.8"}, - {registered, []}, + {vsn, "0.1.18"}, + {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ kernel, diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 20614a344..3aade0369 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -31,9 +31,10 @@ -export([ load/0, + unload/0, lookup/1, lookup/2, - lookup/3, + get_metrics/2, create/3, disable_enable/3, remove/2, @@ -54,9 +55,25 @@ T == gcp_pubsub; T == influxdb_api_v1; T == influxdb_api_v2; + %% TODO: rename this to `kafka_producer' after alias support is + %% added to hocon; keeping this as just `kafka' for backwards + %% compatibility. + T == kafka; T == redis_single; T == redis_sentinel; - T == redis_cluster + T == redis_cluster; + T == clickhouse; + T == pgsql; + T == timescale; + T == matrix; + T == tdengine; + T == dynamo; + T == rocketmq; + T == cassandra; + T == sqlserver; + T == pulsar_producer; + T == oracle; + T == iotdb ). load() -> @@ -65,7 +82,7 @@ load() -> fun({Type, NamedConf}) -> lists:foreach( fun({Name, Conf}) -> - %% fetch opts for `emqx_resource_worker` + %% fetch opts for `emqx_resource_buffer_worker` ResOpts = emqx_resource:fetch_creation_opts(Conf), safe_load_bridge(Type, Name, Conf, ResOpts) end, @@ -75,6 +92,21 @@ load() -> maps:to_list(Bridges) ). +unload() -> + unload_hook(), + Bridges = emqx:get_config([bridges], #{}), + lists:foreach( + fun({Type, NamedConf}) -> + lists:foreach( + fun({Name, _Conf}) -> + _ = emqx_bridge_resource:stop(Type, Name) + end, + maps:to_list(NamedConf) + ) + end, + maps:to_list(Bridges) + ). + safe_load_bridge(Type, Name, Conf, Opts) -> try _Res = emqx_bridge_resource:create(Type, Name, Conf, Opts), @@ -115,12 +147,12 @@ load_hook(Bridges) -> maps:to_list(Bridges) ). -do_load_hook(Type, #{local_topic := _}) when ?EGRESS_DIR_BRIDGES(Type) -> +do_load_hook(Type, #{local_topic := LocalTopic}) when + ?EGRESS_DIR_BRIDGES(Type) andalso is_binary(LocalTopic) +-> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); do_load_hook(mqtt, #{egress := #{local := #{topic := _}}}) -> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); -do_load_hook(kafka, #{producer := #{mqtt := #{topic := _}}}) -> - emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); do_load_hook(_Type, _Conf) -> ok. @@ -170,12 +202,23 @@ send_message(BridgeId, Message) -> case emqx:get_config([bridges, BridgeType, BridgeName], not_found) of not_found -> {error, {bridge_not_found, BridgeId}}; - #{enable := true} -> - emqx_resource:query(ResId, {send_message, Message}); + #{enable := true} = Config -> + QueryOpts = query_opts(Config), + emqx_resource:query(ResId, {send_message, Message}, QueryOpts); #{enable := false} -> {error, {bridge_stopped, BridgeId}} end. +query_opts(Config) -> + case emqx_utils_maps:deep_get([resource_opts, request_timeout], Config, false) of + Timeout when is_integer(Timeout) -> + %% request_timeout is configured + #{timeout => Timeout}; + _ -> + %% emqx_resource has a default value (15s) + #{} + end. + config_key_path() -> [bridges]. @@ -190,24 +233,25 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnv) -> ]), ok = unload_hook(), ok = load_hook(NewConf), + ?tp(bridge_post_config_update_done, #{}), Result. list() -> - lists:foldl( - fun({Type, NameAndConf}, Bridges) -> - lists:foldl( - fun({Name, RawConf}, Acc) -> + maps:fold( + fun(Type, NameAndConf, Bridges) -> + maps:fold( + fun(Name, RawConf, Acc) -> case lookup(Type, Name, RawConf) of {error, not_found} -> Acc; {ok, Res} -> [Res | Acc] end end, Bridges, - maps:to_list(NameAndConf) + NameAndConf ) end, [], - maps:to_list(emqx:get_raw_config([bridges], #{})) + emqx:get_raw_config([bridges], #{}) ). lookup(Id) -> @@ -231,8 +275,13 @@ lookup(Type, Name, RawConf) -> }} end. +get_metrics(Type, Name) -> + emqx_resource:get_metrics(emqx_bridge_resource:resource_id(Type, Name)). + maybe_upgrade(mqtt, Config) -> - emqx_bridge_mqtt_config:maybe_upgrade(Config); + emqx_bridge_compatible_config:maybe_upgrade(Config); +maybe_upgrade(webhook, Config) -> + emqx_bridge_compatible_config:webhook_maybe_upgrade(Config); maybe_upgrade(_Other, Config) -> Config. @@ -250,7 +299,7 @@ create(BridgeType, BridgeName, RawConf) -> brige_action => create, bridge_type => BridgeType, bridge_name => BridgeName, - bridge_raw_config => RawConf + bridge_raw_config => emqx_utils:redact(RawConf) }), emqx_conf:update( emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], @@ -321,7 +370,7 @@ perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) -> perform_bridge_changes(Tasks, Result). diff_confs(NewConfs, OldConfs) -> - emqx_map_lib:diff_maps( + emqx_utils_maps:diff_maps( flatten_confs(NewConfs), flatten_confs(OldConfs) ). @@ -363,14 +412,17 @@ get_matched_egress_bridges(Topic) -> get_matched_bridge_id(_BType, #{enable := false}, _Topic, _BName, Acc) -> Acc; -get_matched_bridge_id(BType, #{local_topic := Filter}, Topic, BName, Acc) when - ?EGRESS_DIR_BRIDGES(BType) --> - do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc); +get_matched_bridge_id(BType, Conf, Topic, BName, Acc) when ?EGRESS_DIR_BRIDGES(BType) -> + case maps:get(local_topic, Conf, undefined) of + undefined -> + Acc; + Filter -> + do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc) + end; get_matched_bridge_id(mqtt, #{egress := #{local := #{topic := Filter}}}, Topic, BName, Acc) -> do_get_matched_bridge_id(Topic, Filter, mqtt, BName, Acc); -get_matched_bridge_id(kafka, #{producer := #{mqtt := #{topic := Filter}}}, Topic, BName, Acc) -> - do_get_matched_bridge_id(Topic, Filter, kafka, BName, Acc). +get_matched_bridge_id(_BType, _Conf, _Topic, _BName, Acc) -> + Acc. do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc) -> case emqx_topic:match(Topic, Filter) of diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index e649e5215..c7e48990b 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -20,6 +20,7 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_utils/include/emqx_utils_api.hrl"). -include_lib("emqx_bridge/include/emqx_bridge.hrl"). -import(hoconsc, [mk/2, array/1, enum/1]). @@ -36,24 +37,36 @@ -export([ '/bridges'/2, '/bridges/:id'/2, - '/bridges/:id/operation/:operation'/2, - '/nodes/:node/bridges/:id/operation/:operation'/2, - '/bridges/:id/reset_metrics'/2 + '/bridges/:id/enable/:enable'/2, + '/bridges/:id/:operation'/2, + '/nodes/:node/bridges/:id/:operation'/2, + '/bridges/:id/metrics'/2, + '/bridges/:id/metrics/reset'/2, + '/bridges_probe'/2 ]). -export([lookup_from_local_node/2]). +-export([get_metrics_from_local_node/2]). +-define(BRIDGE_NOT_ENABLED, + ?BAD_REQUEST(<<"Forbidden operation, bridge not enabled">>) +). + +-define(BRIDGE_NOT_FOUND(BRIDGE_TYPE, BRIDGE_NAME), + ?NOT_FOUND( + <<"Bridge lookup failed: bridge named '", (bin(BRIDGE_NAME))/binary, "' of type ", + (bin(BRIDGE_TYPE))/binary, " does not exist.">> + ) +). + +%% Don't turn bridge_name to atom, it's maybe not a existing atom. -define(TRY_PARSE_ID(ID, EXPR), - try emqx_bridge_resource:parse_bridge_id(Id) of + try emqx_bridge_resource:parse_bridge_id(Id, #{atom_name => false}) of {BridgeType, BridgeName} -> EXPR catch - throw:{invalid_bridge_id, Reason} -> - {400, - error_msg( - 'INVALID_ID', - <<"Invalid bride ID, ", Reason/binary>> - )} + throw:#{reason := Reason} -> + ?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>) end ). @@ -66,9 +79,12 @@ paths() -> [ "/bridges", "/bridges/:id", - "/bridges/:id/operation/:operation", - "/nodes/:node/bridges/:id/operation/:operation", - "/bridges/:id/reset_metrics" + "/bridges/:id/enable/:enable", + "/bridges/:id/:operation", + "/nodes/:node/bridges/:id/:operation", + "/bridges/:id/metrics", + "/bridges/:id/metrics/reset", + "/bridges_probe" ]. error_schema(Code, Message) when is_atom(Code) -> @@ -87,11 +103,11 @@ get_response_body_schema() -> param_path_operation_cluster() -> {operation, mk( - enum([enable, disable, stop, restart]), + enum([start, stop, restart]), #{ in => path, required => true, - example => <<"restart">>, + example => <<"start">>, desc => ?DESC("desc_param_path_operation_cluster") } )}. @@ -99,7 +115,7 @@ param_path_operation_cluster() -> param_path_operation_on_node() -> {operation, mk( - enum([stop, restart]), + enum([start, stop, restart]), #{ in => path, required => true, @@ -132,8 +148,20 @@ param_path_id() -> } )}. +param_path_enable() -> + {enable, + mk( + boolean(), + #{ + in => path, + required => true, + desc => ?DESC("desc_param_path_enable"), + example => true + } + )}. + bridge_info_array_example(Method) -> - [Config || #{value := Config} <- maps:values(bridge_info_examples(Method))]. + lists:map(fun(#{value := Config}) -> Config end, maps:values(bridge_info_examples(Method))). bridge_info_examples(Method) -> maps:merge( @@ -150,12 +178,12 @@ bridge_info_examples(Method) -> ee_bridge_examples(Method) ). +-if(?EMQX_RELEASE_EDITION == ee). ee_bridge_examples(Method) -> - try - emqx_ee_bridge:examples(Method) - catch - _:_ -> #{} - end. + emqx_ee_bridge:examples(Method). +-else. +ee_bridge_examples(_Method) -> #{}. +-endif. info_example(Type, Method) -> maps:merge( @@ -166,27 +194,13 @@ info_example(Type, Method) -> method_example(Type, Method) when Method == get; Method == post -> SType = atom_to_list(Type), SName = SType ++ "_example", - TypeNameExam = #{ + #{ type => bin(SType), name => bin(SName) - }, - maybe_with_metrics_example(TypeNameExam, Method); + }; method_example(_Type, put) -> #{}. -maybe_with_metrics_example(TypeNameExam, get) -> - TypeNameExam#{ - metrics => ?EMPTY_METRICS, - node_metrics => [ - #{ - node => node(), - metrics => ?EMPTY_METRICS - } - ] - }; -maybe_with_metrics_example(TypeNameExam, _) -> - TypeNameExam. - info_example_basic(webhook) -> #{ enable => true, @@ -206,9 +220,8 @@ info_example_basic(webhook) -> health_check_interval => 15000, auto_restart_interval => 15000, query_mode => async, - async_inflight_window => 100, - enable_queue => false, - max_queue_bytes => 100 * 1024 * 1024 + inflight_window => 100, + max_buffer_bytes => 100 * 1024 * 1024 } }; info_example_basic(mqtt) -> @@ -224,7 +237,7 @@ mqtt_main_example() -> server => <<"127.0.0.1:1883">>, proto_ver => <<"v4">>, username => <<"foo">>, - password => <<"bar">>, + password => <<"******">>, clean_start => true, keepalive => <<"300s">>, retry_interval => <<"15s">>, @@ -233,8 +246,7 @@ mqtt_main_example() -> health_check_interval => <<"15s">>, auto_restart_interval => <<"60s">>, query_mode => sync, - enable_queue => false, - max_queue_bytes => 100 * 1024 * 1024 + max_buffer_bytes => 100 * 1024 * 1024 }, ssl => #{ enable => false @@ -271,7 +283,7 @@ schema("/bridges") -> 'operationId' => '/bridges', get => #{ tags => [<<"bridges">>], - summary => <<"List Bridges">>, + summary => <<"List bridges">>, description => ?DESC("desc_api1"), responses => #{ 200 => emqx_dashboard_swagger:schema_with_example( @@ -282,7 +294,7 @@ schema("/bridges") -> }, post => #{ tags => [<<"bridges">>], - summary => <<"Create Bridge">>, + summary => <<"Create bridge">>, description => ?DESC("desc_api2"), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( emqx_bridge_schema:post_request(), @@ -299,7 +311,7 @@ schema("/bridges/:id") -> 'operationId' => '/bridges/:id', get => #{ tags => [<<"bridges">>], - summary => <<"Get Bridge">>, + summary => <<"Get bridge">>, description => ?DESC("desc_api3"), parameters => [param_path_id()], responses => #{ @@ -309,7 +321,7 @@ schema("/bridges/:id") -> }, put => #{ tags => [<<"bridges">>], - summary => <<"Update Bridge">>, + summary => <<"Update bridge">>, description => ?DESC("desc_api4"), parameters => [param_path_id()], 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -319,60 +331,98 @@ schema("/bridges/:id") -> responses => #{ 200 => get_response_body_schema(), 404 => error_schema('NOT_FOUND', "Bridge not found"), - 400 => error_schema(['BAD_REQUEST', 'INVALID_ID'], "Update bridge failed") + 400 => error_schema('BAD_REQUEST', "Update bridge failed") } }, delete => #{ tags => [<<"bridges">>], - summary => <<"Delete Bridge">>, + summary => <<"Delete bridge">>, description => ?DESC("desc_api5"), parameters => [param_path_id()], responses => #{ 204 => <<"Bridge deleted">>, - 400 => error_schema(['INVALID_ID'], "Update bridge failed"), - 403 => error_schema('FORBIDDEN_REQUEST', "Forbidden operation"), + 400 => error_schema( + 'BAD_REQUEST', + "Cannot delete bridge while active rules are defined for this bridge" + ), + 404 => error_schema('NOT_FOUND', "Bridge not found"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } } }; -schema("/bridges/:id/reset_metrics") -> +schema("/bridges/:id/metrics") -> #{ - 'operationId' => '/bridges/:id/reset_metrics', - put => #{ + 'operationId' => '/bridges/:id/metrics', + get => #{ tags => [<<"bridges">>], - summary => <<"Reset Bridge Metrics">>, - description => ?DESC("desc_api6"), + summary => <<"Get bridge metrics">>, + description => ?DESC("desc_bridge_metrics"), parameters => [param_path_id()], responses => #{ - 200 => <<"Reset success">>, - 400 => error_schema(['BAD_REQUEST'], "RPC Call Failed") + 200 => emqx_bridge_schema:metrics_fields(), + 404 => error_schema('NOT_FOUND', "Bridge not found") } } }; -schema("/bridges/:id/operation/:operation") -> +schema("/bridges/:id/metrics/reset") -> #{ - 'operationId' => '/bridges/:id/operation/:operation', + 'operationId' => '/bridges/:id/metrics/reset', + put => #{ + tags => [<<"bridges">>], + summary => <<"Reset bridge metrics">>, + description => ?DESC("desc_api6"), + parameters => [param_path_id()], + responses => #{ + 204 => <<"Reset success">>, + 404 => error_schema('NOT_FOUND', "Bridge not found") + } + } + }; +schema("/bridges/:id/enable/:enable") -> + #{ + 'operationId' => '/bridges/:id/enable/:enable', + put => + #{ + tags => [<<"bridges">>], + summary => <<"Enable or disable bridge">>, + desc => ?DESC("desc_enable_bridge"), + parameters => [param_path_id(), param_path_enable()], + responses => + #{ + 204 => <<"Success">>, + 404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"), + 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") + } + } + }; +schema("/bridges/:id/:operation") -> + #{ + 'operationId' => '/bridges/:id/:operation', post => #{ tags => [<<"bridges">>], - summary => <<"Enable/Disable/Stop/Restart Bridge">>, + summary => <<"Stop or restart bridge">>, description => ?DESC("desc_api7"), parameters => [ param_path_id(), param_path_operation_cluster() ], responses => #{ - 200 => <<"Operation success">>, - 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable"), - 400 => error_schema('INVALID_ID', "Bad bridge ID") + 204 => <<"Operation success">>, + 400 => error_schema( + 'BAD_REQUEST', "Problem with configuration of external service" + ), + 404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"), + 501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"), + 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } } }; -schema("/nodes/:node/bridges/:id/operation/:operation") -> +schema("/nodes/:node/bridges/:id/:operation") -> #{ - 'operationId' => '/nodes/:node/bridges/:id/operation/:operation', + 'operationId' => '/nodes/:node/bridges/:id/:operation', post => #{ tags => [<<"bridges">>], - summary => <<"Stop/Restart Bridge">>, + summary => <<"Stop/restart bridge">>, description => ?DESC("desc_api8"), parameters => [ param_path_node(), @@ -380,134 +430,234 @@ schema("/nodes/:node/bridges/:id/operation/:operation") -> param_path_operation_on_node() ], responses => #{ - 200 => <<"Operation success">>, - 400 => error_schema('INVALID_ID', "Bad bridge ID"), - 403 => error_schema('FORBIDDEN_REQUEST', "forbidden operation"), + 204 => <<"Operation success">>, + 400 => error_schema( + 'BAD_REQUEST', + "Problem with configuration of external service or bridge not enabled" + ), + 404 => error_schema('NOT_FOUND', "Bridge or node not found or invalid operation"), + 501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } } + }; +schema("/bridges_probe") -> + #{ + 'operationId' => '/bridges_probe', + post => #{ + tags => [<<"bridges">>], + desc => ?DESC("desc_api9"), + summary => <<"Test creating bridge">>, + 'requestBody' => emqx_dashboard_swagger:schema_with_examples( + emqx_bridge_schema:post_request(), + bridge_info_examples(post) + ), + responses => #{ + 204 => <<"Test bridge OK">>, + 400 => error_schema(['TEST_FAILED'], "bridge test failed") + } + } }. '/bridges'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) -> - Conf = filter_out_request_body(Conf0), case emqx_bridge:lookup(BridgeType, BridgeName) of {ok, _} -> - {400, error_msg('ALREADY_EXISTS', <<"bridge already exists">>)}; + ?BAD_REQUEST('ALREADY_EXISTS', <<"bridge already exists">>); {error, not_found} -> - case ensure_bridge_created(BridgeType, BridgeName, Conf) of - ok -> lookup_from_all_nodes(BridgeType, BridgeName, 201); - {error, Error} -> {400, Error} - end + Conf = filter_out_request_body(Conf0), + create_bridge(BridgeType, BridgeName, Conf) end; '/bridges'(get, _Params) -> - {200, - zip_bridges([ - [format_resp(Data, Node) || Data <- emqx_bridge_proto_v1:list_bridges(Node)] - || Node <- mria_mnesia:running_nodes() - ])}. + Nodes = mria:running_nodes(), + NodeReplies = emqx_bridge_proto_v4:list_bridges_on_nodes(Nodes), + case is_ok(NodeReplies) of + {ok, NodeBridges} -> + AllBridges = [ + [format_resource(Data, Node) || Data <- Bridges] + || {Node, Bridges} <- lists:zip(Nodes, NodeBridges) + ], + ?OK(zip_bridges(AllBridges)); + {error, Reason} -> + ?INTERNAL_ERROR(Reason) + end. '/bridges/:id'(get, #{bindings := #{id := Id}}) -> ?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200)); '/bridges/:id'(put, #{bindings := #{id := Id}, body := Conf0}) -> - Conf = filter_out_request_body(Conf0), + Conf1 = filter_out_request_body(Conf0), ?TRY_PARSE_ID( Id, case emqx_bridge:lookup(BridgeType, BridgeName) of {ok, _} -> - case ensure_bridge_created(BridgeType, BridgeName, Conf) of - ok -> - lookup_from_all_nodes(BridgeType, BridgeName, 200); - {error, Error} -> - {400, Error} - end; + RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}), + Conf = deobfuscate(Conf1, RawConf), + update_bridge(BridgeType, BridgeName, Conf); {error, not_found} -> - {404, error_msg('NOT_FOUND', <<"bridge not found">>)} + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) end ); '/bridges/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) -> - AlsoDeleteActs = - case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of - <<"true">> -> true; - true -> true; - _ -> false - end, ?TRY_PARSE_ID( Id, - case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of + case emqx_bridge:lookup(BridgeType, BridgeName) of {ok, _} -> - 204; - {error, {rules_deps_on_this_bridge, RuleIds}} -> - {403, - error_msg( - 'FORBIDDEN_REQUEST', - {<<"There're some rules dependent on this bridge">>, RuleIds} - )}; - {error, timeout} -> - {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, Reason} -> - {500, error_msg('INTERNAL_ERROR', Reason)} + AlsoDeleteActs = + case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of + <<"true">> -> true; + true -> true; + _ -> false + end, + case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of + {ok, _} -> + ?NO_CONTENT; + {error, {rules_deps_on_this_bridge, RuleIds}} -> + ?BAD_REQUEST( + {<<"Cannot delete bridge while active rules are defined for this bridge">>, + RuleIds} + ); + {error, timeout} -> + ?SERVICE_UNAVAILABLE(<<"request timeout">>); + {error, Reason} -> + ?INTERNAL_ERROR(Reason) + end; + {error, not_found} -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) end ). -'/bridges/:id/reset_metrics'(put, #{bindings := #{id := Id}}) -> +'/bridges/:id/metrics'(get, #{bindings := #{id := Id}}) -> + ?TRY_PARSE_ID(Id, get_metrics_from_all_nodes(BridgeType, BridgeName)). + +'/bridges/:id/metrics/reset'(put, #{bindings := #{id := Id}}) -> ?TRY_PARSE_ID( Id, - case - emqx_bridge_resource:reset_metrics( + begin + ok = emqx_bridge_resource:reset_metrics( emqx_bridge_resource:resource_id(BridgeType, BridgeName) - ) - of - ok -> {200, <<"Reset success">>}; - Reason -> {400, error_msg('BAD_REQUEST', Reason)} + ), + ?NO_CONTENT end ). +'/bridges_probe'(post, Request) -> + RequestMeta = #{module => ?MODULE, method => post, path => "/bridges_probe"}, + case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of + {ok, #{body := #{<<"type">> := ConnType} = Params}} -> + Params1 = maybe_deobfuscate_bridge_probe(Params), + case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of + ok -> + ?NO_CONTENT; + {error, #{kind := validation_error} = Reason} -> + ?BAD_REQUEST('TEST_FAILED', map_to_json(Reason)); + {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> + ?BAD_REQUEST('TEST_FAILED', Reason) + end; + BadRequest -> + BadRequest + end. + +maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType, <<"name">> := BridgeName} = Params) -> + case emqx_bridge:lookup(BridgeType, BridgeName) of + {ok, _} -> + RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}), + deobfuscate(Params, RawConf); + _ -> + %% A bridge may be probed before it's created, so not finding it here is fine + Params + end; +maybe_deobfuscate_bridge_probe(Params) -> + Params. + +get_metrics_from_all_nodes(BridgeType, BridgeName) -> + Nodes = mria:running_nodes(), + Result = do_bpapi_call(all, get_metrics_from_all_nodes, [Nodes, BridgeType, BridgeName]), + case Result of + Metrics when is_list(Metrics) -> + {200, format_bridge_metrics(lists:zip(Nodes, Metrics))}; + {error, Reason} -> + ?INTERNAL_ERROR(Reason) + end. + lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) -> - Nodes = mria_mnesia:running_nodes(), - case is_ok(emqx_bridge_proto_v1:lookup_from_all_nodes(Nodes, BridgeType, BridgeName)) of + Nodes = mria:running_nodes(), + case is_ok(emqx_bridge_proto_v4:lookup_from_all_nodes(Nodes, BridgeType, BridgeName)) of {ok, [{ok, _} | _] = Results} -> {SuccCode, format_bridge_info([R || {ok, R} <- Results])}; {ok, [{error, not_found} | _]} -> - {404, error_msg('NOT_FOUND', <<"not_found">>)}; - {error, ErrL} -> - {500, error_msg('INTERNAL_ERROR', ErrL)} + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName); + {error, Reason} -> + ?INTERNAL_ERROR(Reason) end. lookup_from_local_node(BridgeType, BridgeName) -> case emqx_bridge:lookup(BridgeType, BridgeName) of - {ok, Res} -> {ok, format_resp(Res)}; + {ok, Res} -> {ok, format_resource(Res, node())}; Error -> Error end. -'/bridges/:id/operation/:operation'(post, #{ +create_bridge(BridgeType, BridgeName, Conf) -> + create_or_update_bridge(BridgeType, BridgeName, Conf, 201). + +update_bridge(BridgeType, BridgeName, Conf) -> + create_or_update_bridge(BridgeType, BridgeName, Conf, 200). + +create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) -> + case emqx_bridge:create(BridgeType, BridgeName, Conf) of + {ok, _} -> + lookup_from_all_nodes(BridgeType, BridgeName, HttpStatusCode); + {error, #{kind := validation_error} = Reason} -> + ?BAD_REQUEST(map_to_json(Reason)) + end. + +get_metrics_from_local_node(BridgeType, BridgeName) -> + format_metrics(emqx_bridge:get_metrics(BridgeType, BridgeName)). + +'/bridges/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) -> + ?TRY_PARSE_ID( + Id, + case enable_func(Enable) of + invalid -> + ?NOT_FOUND(<<"Invalid operation">>); + OperFunc -> + case emqx_bridge:disable_enable(OperFunc, BridgeType, BridgeName) of + {ok, _} -> + ?NO_CONTENT; + {error, {pre_config_update, _, bridge_not_found}} -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName); + {error, {_, _, timeout}} -> + ?SERVICE_UNAVAILABLE(<<"request timeout">>); + {error, timeout} -> + ?SERVICE_UNAVAILABLE(<<"request timeout">>); + {error, Reason} -> + ?INTERNAL_ERROR(Reason) + end + end + ). + +'/bridges/:id/:operation'(post, #{ bindings := #{id := Id, operation := Op} }) -> ?TRY_PARSE_ID( Id, - case operation_func(Op) of + case operation_to_all_func(Op) of invalid -> - {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; - OperFunc when OperFunc == enable; OperFunc == disable -> - case emqx_bridge:disable_enable(OperFunc, BridgeType, BridgeName) of - {ok, _} -> - {200}; - {error, {pre_config_update, _, bridge_not_found}} -> - {404, error_msg('NOT_FOUND', <<"bridge not found">>)}; - {error, {_, _, timeout}} -> - {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, timeout} -> - {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, Reason} -> - {500, error_msg('INTERNAL_ERROR', Reason)} - end; + ?NOT_FOUND(<<"Invalid operation: ", Op/binary>>); OperFunc -> - Nodes = mria_mnesia:running_nodes(), - operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) + try is_enabled_bridge(BridgeType, BridgeName) of + false -> + ?BRIDGE_NOT_ENABLED; + true -> + Nodes = mria:running_nodes(), + call_operation(all, OperFunc, [Nodes, BridgeType, BridgeName]) + catch + throw:not_found -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) + end end ). -'/nodes/:node/bridges/:id/operation/:operation'(post, #{ +'/nodes/:node/bridges/:id/:operation'(post, #{ bindings := #{id := Id, operation := Op, node := Node} }) -> @@ -515,52 +665,49 @@ lookup_from_local_node(BridgeType, BridgeName) -> Id, case node_operation_func(Op) of invalid -> - {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; + ?NOT_FOUND(<<"Invalid operation: ", Op/binary>>); OperFunc -> - ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]), - case maps:get(enable, ConfMap, false) of + try is_enabled_bridge(BridgeType, BridgeName) of false -> - {403, - error_msg( - 'FORBIDDEN_REQUEST', - <<"forbidden operation: bridge disabled">> - )}; + ?BRIDGE_NOT_ENABLED; true -> - call_operation(Node, OperFunc, BridgeType, BridgeName) + case emqx_utils:safe_to_existing_atom(Node, utf8) of + {ok, TargetNode} -> + call_operation(TargetNode, OperFunc, [ + TargetNode, BridgeType, BridgeName + ]); + {error, _} -> + ?NOT_FOUND(<<"Invalid node name: ", Node/binary>>) + end + catch + throw:not_found -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) end end ). -node_operation_func(<<"stop">>) -> stop_bridge_to_node; +is_enabled_bridge(BridgeType, BridgeName) -> + try emqx:get_config([bridges, BridgeType, BridgeName]) of + ConfMap -> + maps:get(enable, ConfMap, false) + catch + error:{config_not_found, _} -> + throw(not_found) + end. + node_operation_func(<<"restart">>) -> restart_bridge_to_node; +node_operation_func(<<"start">>) -> start_bridge_to_node; +node_operation_func(<<"stop">>) -> stop_bridge_to_node; node_operation_func(_) -> invalid. -operation_func(<<"stop">>) -> stop; -operation_func(<<"restart">>) -> restart; -operation_func(<<"enable">>) -> enable; -operation_func(<<"disable">>) -> disable; -operation_func(_) -> invalid. +operation_to_all_func(<<"restart">>) -> restart_bridges_to_all_nodes; +operation_to_all_func(<<"start">>) -> start_bridges_to_all_nodes; +operation_to_all_func(<<"stop">>) -> stop_bridges_to_all_nodes; +operation_to_all_func(_) -> invalid. -operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) -> - RpcFunc = - case OperFunc of - restart -> restart_bridges_to_all_nodes; - stop -> stop_bridges_to_all_nodes - end, - case is_ok(emqx_bridge_proto_v1:RpcFunc(Nodes, BridgeType, BridgeName)) of - {ok, _} -> - {200}; - {error, [timeout | _]} -> - {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, ErrL} -> - {500, error_msg('INTERNAL_ERROR', ErrL)} - end. - -ensure_bridge_created(BridgeType, BridgeName, Conf) -> - case emqx_bridge:create(BridgeType, BridgeName, Conf) of - {ok, _} -> ok; - {error, Reason} -> {error, error_msg('BAD_REQUEST', Reason)} - end. +enable_func(<<"true">>) -> enable; +enable_func(<<"false">>) -> disable; +enable_func(_) -> invalid. zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) -> lists:foldl( @@ -602,17 +749,21 @@ pick_bridges_by_id(Type, Name, BridgesAllNodes) -> format_bridge_info([FirstBridge | _] = Bridges) -> Res = maps:remove(node, FirstBridge), - NodeStatus = collect_status(Bridges), - NodeMetrics = collect_metrics(Bridges), - Res#{ + NodeStatus = node_status(Bridges), + redact(Res#{ status => aggregate_status(NodeStatus), - node_status => NodeStatus, + node_status => NodeStatus + }). + +format_bridge_metrics(Bridges) -> + NodeMetrics = collect_metrics(Bridges), + #{ metrics => aggregate_metrics(NodeMetrics), node_metrics => NodeMetrics }. -collect_status(Bridges) -> - [maps:with([node, status], B) || B <- Bridges]. +node_status(Bridges) -> + [maps:with([node, status, status_reason], B) || B <- Bridges]. aggregate_status(AllStatus) -> Head = fun([A | _]) -> A end, @@ -624,76 +775,84 @@ aggregate_status(AllStatus) -> end. collect_metrics(Bridges) -> - [maps:with([node, metrics], B) || B <- Bridges]. + [#{node => Node, metrics => Metrics} || {Node, Metrics} <- Bridges]. aggregate_metrics(AllMetrics) -> InitMetrics = ?EMPTY_METRICS, - lists:foldl( - fun( - #{ - metrics := ?metrics( - M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17 - ) - }, - ?metrics( - N1, N2, N3, N4, N5, N6, N7, N8, N9, N10, N11, N12, N13, N14, N15, N16, N17 - ) - ) -> - ?METRICS( - M1 + N1, - M2 + N2, - M3 + N3, - M4 + N4, - M5 + N5, - M6 + N6, - M7 + N7, - M8 + N8, - M9 + N9, - M10 + N10, - M11 + N11, - M12 + N12, - M13 + N13, - M14 + N14, - M15 + N15, - M16 + N16, - M17 + N17 - ) - end, - InitMetrics, - AllMetrics + lists:foldl(fun aggregate_metrics/2, InitMetrics, AllMetrics). + +aggregate_metrics( + #{ + metrics := ?metrics( + M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17 + ) + }, + ?metrics( + N1, N2, N3, N4, N5, N6, N7, N8, N9, N10, N11, N12, N13, N14, N15, N16, N17 + ) +) -> + ?METRICS( + M1 + N1, + M2 + N2, + M3 + N3, + M4 + N4, + M5 + N5, + M6 + N6, + M7 + N7, + M8 + N8, + M9 + N9, + M10 + N10, + M11 + N11, + M12 + N12, + M13 + N13, + M14 + N14, + M15 + N15, + M16 + N16, + M17 + N17 ). -format_resp(Data) -> - format_resp(Data, node()). - -format_resp( +format_resource( #{ type := Type, name := BridgeName, raw_config := RawConf, - resource_data := #{status := Status, metrics := Metrics} + resource_data := ResourceData }, Node ) -> RawConfFull = fill_defaults(Type, RawConf), - RawConfFull#{ - type => Type, - name => maps:get(<<"name">>, RawConf, BridgeName), - node => Node, - status => Status, - metrics => format_metrics(Metrics) - }. + redact( + maps:merge( + RawConfFull#{ + type => Type, + name => maps:get(<<"name">>, RawConf, BridgeName), + node => Node + }, + format_resource_data(ResourceData) + ) + ). + +format_resource_data(ResData) -> + maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], ResData)). + +format_resource_data(error, undefined, Result) -> + Result; +format_resource_data(error, Error, Result) -> + Result#{status_reason => emqx_utils:readable_error_msg(Error)}; +format_resource_data(K, V, Result) -> + Result#{K => V}. format_metrics(#{ counters := #{ 'dropped' := Dropped, 'dropped.other' := DroppedOther, + 'dropped.expired' := DroppedExpired, 'dropped.queue_full' := DroppedQueueFull, - 'dropped.queue_not_enabled' := DroppedQueueNotEnabled, 'dropped.resource_not_found' := DroppedResourceNotFound, 'dropped.resource_stopped' := DroppedResourceStopped, 'matched' := Matched, 'retried' := Retried, + 'late_reply' := LateReply, 'failed' := SentFailed, 'success' := SentSucc, 'received' := Rcvd @@ -703,20 +862,19 @@ format_metrics(#{ matched := #{current := Rate, last5m := Rate5m, max := RateMax} } }) -> - Batched = maps:get('batching', Gauges, 0), Queued = maps:get('queuing', Gauges, 0), SentInflight = maps:get('inflight', Gauges, 0), ?METRICS( - Batched, Dropped, DroppedOther, + DroppedExpired, DroppedQueueFull, - DroppedQueueNotEnabled, DroppedResourceNotFound, DroppedResourceStopped, Matched, Queued, Retried, + LateReply, SentFailed, SentInflight, SentSucc, @@ -739,6 +897,12 @@ unpack_bridge_conf(Type, PackedConf) -> #{<<"foo">> := RawConf} = maps:get(bin(Type), Bridges), RawConf. +is_ok(ok) -> + ok; +is_ok(OkResult = {ok, _}) -> + OkResult; +is_ok(Error = {error, _}) -> + Error; is_ok(ResL) -> case lists:filter( @@ -751,7 +915,7 @@ is_ok(ResL) -> ) of [] -> {ok, [Res || {ok, Res} <- ResL]}; - ErrL -> {error, ErrL} + ErrL -> hd(ErrL) end. filter_out_request_body(Conf) -> @@ -760,6 +924,7 @@ filter_out_request_body(Conf) -> <<"type">>, <<"name">>, <<"status">>, + <<"status_reason">>, <<"node_status">>, <<"node_metrics">>, <<"metrics">>, @@ -767,9 +932,6 @@ filter_out_request_body(Conf) -> ], maps:without(ExtraConfs, Conf). -error_msg(Code, Msg) -> - #{code => Code, message => emqx_misc:readable_error_msg(Msg)}. - bin(S) when is_list(S) -> list_to_binary(S); bin(S) when is_atom(S) -> @@ -777,32 +939,98 @@ bin(S) when is_atom(S) -> bin(S) when is_binary(S) -> S. -call_operation(Node, OperFunc, BridgeType, BridgeName) -> - case emqx_misc:safe_to_existing_atom(Node, utf8) of - {ok, TargetNode} -> - case - emqx_bridge_proto_v1:OperFunc( - TargetNode, BridgeType, BridgeName - ) - of - ok -> - {200}; - {error, timeout} -> - {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, {start_pool_failed, Name, Reason}} -> - {503, - error_msg( - 'SERVICE_UNAVAILABLE', - bin( - io_lib:format( - "failed to start ~p pool for reason ~p", - [Name, Reason] - ) - ) - )}; - {error, Reason} -> - {500, error_msg('INTERNAL_ERROR', Reason)} - end; - {error, _} -> - {400, error_msg('INVALID_NODE', <<"invalid node">>)} +call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) -> + case is_ok(do_bpapi_call(NodeOrAll, OperFunc, Args)) of + Ok when Ok =:= ok; is_tuple(Ok), element(1, Ok) =:= ok -> + ?NO_CONTENT; + {error, not_implemented} -> + %% Should only happen if we call `start` on a node that is + %% still on an older bpapi version that doesn't support it. + maybe_try_restart(NodeOrAll, OperFunc, Args); + {error, timeout} -> + ?SERVICE_UNAVAILABLE(<<"Request timeout">>); + {error, {start_pool_failed, Name, Reason}} -> + ?SERVICE_UNAVAILABLE( + bin(io_lib:format("Failed to start ~p pool for reason ~p", [Name, Reason])) + ); + {error, not_found} -> + BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName), + ?SLOG(warning, #{ + msg => "bridge_inconsistent_in_cluster_for_call_operation", + reason => not_found, + type => BridgeType, + name => BridgeName, + bridge => BridgeId + }), + ?SERVICE_UNAVAILABLE(<<"Bridge not found on remote node: ", BridgeId/binary>>); + {error, {node_not_found, Node}} -> + ?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>); + {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> + ?BAD_REQUEST(Reason) end. + +maybe_try_restart(all, start_bridges_to_all_nodes, Args) -> + call_operation(all, restart_bridges_to_all_nodes, Args); +maybe_try_restart(Node, start_bridge_to_node, Args) -> + call_operation(Node, restart_bridge_to_node, Args); +maybe_try_restart(_, _, _) -> + ?NOT_IMPLEMENTED. + +do_bpapi_call(all, Call, Args) -> + maybe_unwrap( + do_bpapi_call_vsn(emqx_bpapi:supported_version(emqx_bridge), Call, Args) + ); +do_bpapi_call(Node, Call, Args) -> + case lists:member(Node, mria:running_nodes()) of + true -> + do_bpapi_call_vsn(emqx_bpapi:supported_version(Node, emqx_bridge), Call, Args); + false -> + {error, {node_not_found, Node}} + end. + +do_bpapi_call_vsn(SupportedVersion, Call, Args) -> + case lists:member(SupportedVersion, supported_versions(Call)) of + true -> + apply(emqx_bridge_proto_v4, Call, Args); + false -> + {error, not_implemented} + end. + +maybe_unwrap({error, not_implemented}) -> + {error, not_implemented}; +maybe_unwrap(RpcMulticallResult) -> + emqx_rpc:unwrap_erpc(RpcMulticallResult). + +supported_versions(start_bridge_to_node) -> [2, 3, 4]; +supported_versions(start_bridges_to_all_nodes) -> [2, 3, 4]; +supported_versions(get_metrics_from_all_nodes) -> [4]; +supported_versions(_Call) -> [1, 2, 3, 4]. + +redact(Term) -> + emqx_utils:redact(Term). + +deobfuscate(NewConf, OldConf) -> + maps:fold( + fun(K, V, Acc) -> + case maps:find(K, OldConf) of + error -> + Acc#{K => V}; + {ok, OldV} when is_map(V), is_map(OldV) -> + Acc#{K => deobfuscate(V, OldV)}; + {ok, OldV} -> + case emqx_utils:is_redacted(K, V) of + true -> + Acc#{K => OldV}; + _ -> + Acc#{K => V} + end + end + end, + #{}, + NewConf + ). + +map_to_json(M) -> + emqx_utils_json:encode( + emqx_utils_maps:jsonable_map(M, fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end) + ). diff --git a/apps/emqx_bridge/src/emqx_bridge_app.erl b/apps/emqx_bridge/src/emqx_bridge_app.erl index e10034bae..daae15a17 100644 --- a/apps/emqx_bridge/src/emqx_bridge_app.erl +++ b/apps/emqx_bridge/src/emqx_bridge_app.erl @@ -39,7 +39,7 @@ start(_StartType, _StartArgs) -> stop(_State) -> emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH), emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH), - ok = emqx_bridge:unload_hook(), + ok = emqx_bridge:unload(), ok. -if(?EMQX_RELEASE_EDITION == ee). diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index d1ce260c9..0d2feef83 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -14,37 +14,53 @@ %% limitations under the License. %%-------------------------------------------------------------------- -module(emqx_bridge_resource). --include_lib("emqx/include/emqx.hrl"). + +-include("emqx_bridge_resource.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). -export([ bridge_to_resource_type/1, resource_id/1, resource_id/2, bridge_id/2, - parse_bridge_id/1 + parse_bridge_id/1, + parse_bridge_id/2, + bridge_hookpoint/1, + bridge_hookpoint_to_bridge_id/1 ]). -export([ create/2, create/3, create/4, + create_dry_run/2, recreate/2, recreate/3, - create_dry_run/2, remove/1, remove/2, remove/4, + reset_metrics/1, + restart/2, + start/2, + stop/2, update/2, update/3, - update/4, - stop/2, - restart/2, - reset_metrics/1 + update/4 ]). %% bi-directional bridge with producer/consumer or ingress/egress configs --define(IS_BI_DIR_BRIDGE(TYPE), TYPE =:= <<"mqtt">>; TYPE =:= <<"kafka">>). +-define(IS_BI_DIR_BRIDGE(TYPE), + (TYPE) =:= <<"mqtt">> +). +-define(IS_INGRESS_BRIDGE(TYPE), + (TYPE) =:= <<"kafka_consumer">> orelse ?IS_BI_DIR_BRIDGE(TYPE) +). + +%% [FIXME] this has no place here, it's used in parse_confs/3, which should +%% rather delegate to a behavior callback than implementing domain knowledge +%% here (reversed dependency) +-define(INSERT_TABLET_PATH, "/rest/v2/insertTablet"). -if(?EMQX_RELEASE_EDITION == ee). bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; @@ -71,33 +87,48 @@ bridge_id(BridgeType, BridgeName) -> Type = bin(BridgeType), <>. --spec parse_bridge_id(list() | binary() | atom()) -> {atom(), binary()}. parse_bridge_id(BridgeId) -> + parse_bridge_id(BridgeId, #{atom_name => true}). + +-spec parse_bridge_id(list() | binary() | atom(), #{atom_name => boolean()}) -> + {atom(), atom() | binary()}. +parse_bridge_id(BridgeId, Opts) -> case string:split(bin(BridgeId), ":", all) of [Type, Name] -> - {to_type_atom(Type), validate_name(Name)}; + {to_type_atom(Type), validate_name(Name, Opts)}; _ -> - invalid_bridge_id( - <<"should be of forst {type}:{name}, but got ", BridgeId/binary>> + invalid_data( + <<"should be of pattern {type}:{name}, but got ", BridgeId/binary>> ) end. -validate_name(Name0) -> +bridge_hookpoint(BridgeId) -> + <<"$bridges/", (bin(BridgeId))/binary>>. + +bridge_hookpoint_to_bridge_id(?BRIDGE_HOOKPOINT(BridgeId)) -> + {ok, BridgeId}; +bridge_hookpoint_to_bridge_id(_) -> + {error, bad_bridge_hookpoint}. + +validate_name(Name0, Opts) -> Name = unicode:characters_to_list(Name0, utf8), case is_list(Name) andalso Name =/= [] of true -> case lists:all(fun is_id_char/1, Name) of true -> - Name0; + case maps:get(atom_name, Opts, true) of + true -> list_to_existing_atom(Name); + false -> Name0 + end; false -> - invalid_bridge_id(<<"bad name: ", Name0/binary>>) + invalid_data(<<"bad name: ", Name0/binary>>) end; false -> - invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) + invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) end. --spec invalid_bridge_id(binary()) -> no_return(). -invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}). +-spec invalid_data(binary()) -> no_return(). +invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}). is_id_char(C) when C >= $0 andalso C =< $9 -> true; is_id_char(C) when C >= $a andalso C =< $z -> true; @@ -112,18 +143,20 @@ to_type_atom(Type) -> erlang:binary_to_existing_atom(Type, utf8) catch _:_ -> - invalid_bridge_id(<<"unknown type: ", Type/binary>>) + invalid_data(<<"unknown bridge type: ", Type/binary>>) end. reset_metrics(ResourceId) -> emqx_resource:reset_metrics(ResourceId). +restart(Type, Name) -> + emqx_resource:restart(resource_id(Type, Name)). + stop(Type, Name) -> emqx_resource:stop(resource_id(Type, Name)). -%% we don't provide 'start', as we want an already started bridge to be restarted. -restart(Type, Name) -> - emqx_resource:restart(resource_id(Type, Name)). +start(Type, Name) -> + emqx_resource:start(resource_id(Type, Name)). create(BridgeId, Conf) -> {BridgeType, BridgeName} = parse_bridge_id(BridgeId), @@ -132,13 +165,14 @@ create(BridgeId, Conf) -> create(Type, Name, Conf) -> create(Type, Name, Conf, #{}). -create(Type, Name, Conf, Opts) -> +create(Type, Name, Conf, Opts0) -> ?SLOG(info, #{ msg => "create bridge", type => Type, name => Name, - config => Conf + config => emqx_utils:redact(Conf) }), + Opts = override_start_after_created(Conf, Opts0), {ok, _Data} = emqx_resource:create_local( resource_id(Type, Name), <<"emqx_bridge">>, @@ -146,7 +180,7 @@ create(Type, Name, Conf, Opts) -> parse_confs(bin(Type), Name, Conf), Opts ), - maybe_disable_bridge(Type, Name, Conf). + ok. update(BridgeId, {OldConf, Conf}) -> {BridgeType, BridgeName} = parse_bridge_id(BridgeId), @@ -155,7 +189,7 @@ update(BridgeId, {OldConf, Conf}) -> update(Type, Name, {OldConf, Conf}) -> update(Type, Name, {OldConf, Conf}, #{}). -update(Type, Name, {OldConf, Conf}, Opts) -> +update(Type, Name, {OldConf, Conf}, Opts0) -> %% TODO: sometimes its not necessary to restart the bridge connection. %% %% - if the connection related configs like `servers` is updated, we should restart/start @@ -164,23 +198,24 @@ update(Type, Name, {OldConf, Conf}, Opts) -> %% the `method` or `headers` of a WebHook is changed, then the bridge can be updated %% without restarting the bridge. %% - case emqx_map_lib:if_only_to_toggle_enable(OldConf, Conf) of + Opts = override_start_after_created(Conf, Opts0), + case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of false -> ?SLOG(info, #{ msg => "update bridge", type => Type, name => Name, - config => Conf + config => emqx_utils:redact(Conf) }), case recreate(Type, Name, Conf, Opts) of {ok, _} -> - maybe_disable_bridge(Type, Name, Conf); + ok; {error, not_found} -> ?SLOG(warning, #{ - msg => "updating_a_non-exist_bridge_need_create_a_new_one", + msg => "updating_a_non_existing_bridge", type => Type, name => Name, - config => Conf + config => emqx_utils:redact(Conf) }), create(Type, Name, Conf, Opts); {error, Reason} -> @@ -213,17 +248,27 @@ recreate(Type, Name, Conf, Opts) -> Opts ). -create_dry_run(Type, Conf) -> - TmpPath = iolist_to_binary(["bridges-create-dry-run:", emqx_misc:gen_id(8)]), +create_dry_run(Type, Conf0) -> + TmpPath0 = iolist_to_binary([?TEST_ID_PREFIX, emqx_utils:gen_id(8)]), + TmpPath = emqx_utils:safe_filename(TmpPath0), + Conf = emqx_utils_maps:safe_atom_key_map(Conf0), case emqx_connector_ssl:convert_certs(TmpPath, Conf) of {error, Reason} -> {error, Reason}; {ok, ConfNew} -> - Res = emqx_resource:create_dry_run_local( - bridge_to_resource_type(Type), ConfNew - ), - _ = maybe_clear_certs(TmpPath, ConfNew), - Res + try + ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), + Res = emqx_resource:create_dry_run_local( + bridge_to_resource_type(Type), ParseConf + ), + Res + catch + %% validation errors + throw:Reason -> + {error, Reason} + after + _ = maybe_clear_certs(TmpPath, ConfNew) + end end. remove(BridgeId) -> @@ -242,18 +287,14 @@ remove(Type, Name, _Conf, _Opts) -> {error, Reason} -> {error, Reason} end. -maybe_disable_bridge(Type, Name, Conf) -> - case maps:get(enable, Conf, true) of - false -> stop(Type, Name); - true -> ok - end. - maybe_clear_certs(TmpPath, #{ssl := SslConf} = Conf) -> %% don't remove the cert files if they are in use case is_tmp_path_conf(TmpPath, SslConf) of true -> emqx_connector_ssl:clear_certs(TmpPath, Conf); false -> ok - end. + end; +maybe_clear_certs(_TmpPath, _ConfWithoutSsl) -> + ok. is_tmp_path_conf(TmpPath, #{certfile := Certfile}) -> is_tmp_path(TmpPath, Certfile); @@ -274,32 +315,70 @@ parse_confs( #{ url := Url, method := Method, - body := Body, headers := Headers, request_timeout := ReqTimeout, max_retries := Retry } = Conf ) -> - {BaseUrl, Path} = parse_url(Url), - {ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), + Url1 = bin(Url), + {BaseUrl, Path} = parse_url(Url1), + BaseUrl1 = + case emqx_http_lib:uri_parse(BaseUrl) of + {ok, BUrl} -> + BUrl; + {error, Reason} -> + Reason1 = emqx_utils:readable_error_msg(Reason), + invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>) + end, Conf#{ - base_url => BaseUrl2, + base_url => BaseUrl1, request => #{ path => Path, method => Method, - body => Body, + body => maps:get(body, Conf, undefined), headers => Headers, request_timeout => ReqTimeout, max_retries => Retry } }; -parse_confs(Type, Name, Conf) when ?IS_BI_DIR_BRIDGE(Type) -> +parse_confs(<<"iotdb">>, Name, Conf) -> + #{ + base_url := BaseURL, + authentication := + #{ + username := Username, + password := Password + } + } = Conf, + BasicToken = base64:encode(<>), + WebhookConfig = + Conf#{ + method => <<"post">>, + url => <>, + headers => [ + {<<"Content-type">>, <<"application/json">>}, + {<<"Authorization">>, BasicToken} + ] + }, + parse_confs( + <<"webhook">>, + Name, + WebhookConfig + ); +parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) -> %% For some drivers that can be used as data-sources, we need to provide a %% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it %% receives a message from the external database. BId = bridge_id(Type, Name), - Conf#{hookpoint => <<"$bridges/", BId/binary>>, bridge_name => Name}; + BridgeHookpoint = bridge_hookpoint(BId), + Conf#{hookpoint => BridgeHookpoint, bridge_name => Name}; +%% TODO: rename this to `kafka_producer' after alias support is added +%% to hocon; keeping this as just `kafka' for backwards compatibility. +parse_confs(<<"kafka">> = _Type, Name, Conf) -> + Conf#{bridge_name => Name}; +parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) -> + Conf#{bridge_name => Name}; parse_confs(_Type, _Name, Conf) -> Conf. @@ -313,7 +392,7 @@ parse_url(Url) -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>} end; [Url] -> - error({invalid_url, Url}) + invalid_data(<<"Missing scheme in URL: ", Url/binary>>) end. str(Bin) when is_binary(Bin) -> binary_to_list(Bin); @@ -322,3 +401,8 @@ str(Str) when is_list(Str) -> Str. bin(Bin) when is_binary(Bin) -> Bin; bin(Str) when is_list(Str) -> list_to_binary(Str); bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). + +override_start_after_created(Config, Opts) -> + Enabled = maps:get(enable, Config, true), + StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled), + Opts#{start_after_created => StartAfterCreated}. diff --git a/apps/emqx_bridge/src/emqx_bridge_sup.erl b/apps/emqx_bridge/src/emqx_bridge_sup.erl index a5e72a8c6..46a87b74f 100644 --- a/apps/emqx_bridge/src/emqx_bridge_sup.erl +++ b/apps/emqx_bridge/src/emqx_bridge_sup.erl @@ -34,5 +34,3 @@ init([]) -> }, ChildSpecs = [], {ok, {SupFlags, ChildSpecs}}. - -%% internal functions diff --git a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v1.erl b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v1.erl index 52790ca42..88554893b 100644 --- a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v1.erl +++ b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v1.erl @@ -20,6 +20,7 @@ -export([ introduced_in/0, + deprecated_since/0, list_bridges/1, restart_bridge_to_node/3, @@ -36,6 +37,9 @@ introduced_in() -> "5.0.0". +deprecated_since() -> + "5.0.17". + -spec list_bridges(node()) -> list() | emqx_rpc:badrpc(). list_bridges(Node) -> rpc:call(Node, emqx_bridge, list, [], ?TIMEOUT). diff --git a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v2.erl b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v2.erl new file mode 100644 index 000000000..bcf6ca198 --- /dev/null +++ b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v2.erl @@ -0,0 +1,126 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_proto_v2). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + deprecated_since/0, + + list_bridges/1, + restart_bridge_to_node/3, + start_bridge_to_node/3, + stop_bridge_to_node/3, + lookup_from_all_nodes/3, + restart_bridges_to_all_nodes/3, + start_bridges_to_all_nodes/3, + stop_bridges_to_all_nodes/3 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +-define(TIMEOUT, 15000). + +introduced_in() -> + "5.0.17". + +deprecated_since() -> + "5.0.21". + +-spec list_bridges(node()) -> list() | emqx_rpc:badrpc(). +list_bridges(Node) -> + rpc:call(Node, emqx_bridge, list, [], ?TIMEOUT). + +-type key() :: atom() | binary() | [byte()]. + +-spec restart_bridge_to_node(node(), key(), key()) -> + term(). +restart_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + restart, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec start_bridge_to_node(node(), key(), key()) -> + term(). +start_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + start, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec stop_bridge_to_node(node(), key(), key()) -> + term(). +stop_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + stop, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec restart_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + restart, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec start_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +start_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + start, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec stop_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + stop, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec lookup_from_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +lookup_from_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_api, + lookup_from_local_node, + [BridgeType, BridgeName], + ?TIMEOUT + ). diff --git a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v3.erl b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v3.erl new file mode 100644 index 000000000..0b496364a --- /dev/null +++ b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v3.erl @@ -0,0 +1,132 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_proto_v3). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + deprecated_since/0, + + list_bridges/1, + list_bridges_on_nodes/1, + restart_bridge_to_node/3, + start_bridge_to_node/3, + stop_bridge_to_node/3, + lookup_from_all_nodes/3, + restart_bridges_to_all_nodes/3, + start_bridges_to_all_nodes/3, + stop_bridges_to_all_nodes/3 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +-define(TIMEOUT, 15000). + +introduced_in() -> + "5.0.21". + +deprecated_since() -> + "5.0.22". + +-spec list_bridges(node()) -> list() | emqx_rpc:badrpc(). +list_bridges(Node) -> + rpc:call(Node, emqx_bridge, list, [], ?TIMEOUT). + +-spec list_bridges_on_nodes([node()]) -> + emqx_rpc:erpc_multicall([emqx_resource:resource_data()]). +list_bridges_on_nodes(Nodes) -> + erpc:multicall(Nodes, emqx_bridge, list, [], ?TIMEOUT). + +-type key() :: atom() | binary() | [byte()]. + +-spec restart_bridge_to_node(node(), key(), key()) -> + term(). +restart_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + restart, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec start_bridge_to_node(node(), key(), key()) -> + term(). +start_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + start, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec stop_bridge_to_node(node(), key(), key()) -> + term(). +stop_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + stop, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec restart_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + restart, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec start_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +start_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + start, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec stop_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + stop, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec lookup_from_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +lookup_from_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_api, + lookup_from_local_node, + [BridgeType, BridgeName], + ?TIMEOUT + ). diff --git a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v4.erl b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v4.erl new file mode 100644 index 000000000..937065e41 --- /dev/null +++ b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v4.erl @@ -0,0 +1,135 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_proto_v4). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + list_bridges_on_nodes/1, + restart_bridge_to_node/3, + start_bridge_to_node/3, + stop_bridge_to_node/3, + lookup_from_all_nodes/3, + get_metrics_from_all_nodes/3, + restart_bridges_to_all_nodes/3, + start_bridges_to_all_nodes/3, + stop_bridges_to_all_nodes/3 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +-define(TIMEOUT, 15000). + +introduced_in() -> + "5.0.22". + +-spec list_bridges_on_nodes([node()]) -> + emqx_rpc:erpc_multicall([emqx_resource:resource_data()]). +list_bridges_on_nodes(Nodes) -> + erpc:multicall(Nodes, emqx_bridge, list, [], ?TIMEOUT). + +-type key() :: atom() | binary() | [byte()]. + +-spec restart_bridge_to_node(node(), key(), key()) -> + term(). +restart_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + restart, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec start_bridge_to_node(node(), key(), key()) -> + term(). +start_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + start, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec stop_bridge_to_node(node(), key(), key()) -> + term(). +stop_bridge_to_node(Node, BridgeType, BridgeName) -> + rpc:call( + Node, + emqx_bridge_resource, + stop, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec restart_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + restart, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec start_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +start_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + start, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec stop_bridges_to_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_resource, + stop, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec lookup_from_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +lookup_from_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_api, + lookup_from_local_node, + [BridgeType, BridgeName], + ?TIMEOUT + ). + +-spec get_metrics_from_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(emqx_metrics_worker:metrics()). +get_metrics_from_all_nodes(Nodes, BridgeType, BridgeName) -> + erpc:multicall( + Nodes, + emqx_bridge_api, + get_metrics_from_local_node, + [BridgeType, BridgeName], + ?TIMEOUT + ). diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_config.erl b/apps/emqx_bridge/src/schema/emqx_bridge_compatible_config.erl similarity index 80% rename from apps/emqx_bridge/src/schema/emqx_bridge_mqtt_config.erl rename to apps/emqx_bridge/src/schema/emqx_bridge_compatible_config.erl index 4e35e38aa..595b75ecf 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_config.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_compatible_config.erl @@ -15,22 +15,23 @@ %%-------------------------------------------------------------------- %% @doc This module was created to convert old version (from v5.0.0 to v5.0.11) -%% mqtt connector configs to newer version (developed for enterprise edition). --module(emqx_bridge_mqtt_config). +%% mqtt/webhook connector configs to newer version (developed for enterprise edition). +-module(emqx_bridge_compatible_config). -export([ - upgrade_pre_ee/1, - maybe_upgrade/1 + upgrade_pre_ee/2, + maybe_upgrade/1, + webhook_maybe_upgrade/1 ]). -upgrade_pre_ee(undefined) -> +upgrade_pre_ee(undefined, _UpgradeFunc) -> undefined; -upgrade_pre_ee(Conf0) when is_map(Conf0) -> - maps:from_list(upgrade_pre_ee(maps:to_list(Conf0))); -upgrade_pre_ee([]) -> +upgrade_pre_ee(Conf0, UpgradeFunc) when is_map(Conf0) -> + maps:from_list(upgrade_pre_ee(maps:to_list(Conf0), UpgradeFunc)); +upgrade_pre_ee([], _UpgradeFunc) -> []; -upgrade_pre_ee([{Name, Config} | Bridges]) -> - [{Name, maybe_upgrade(Config)} | upgrade_pre_ee(Bridges)]. +upgrade_pre_ee([{Name, Config} | Bridges], UpgradeFunc) -> + [{Name, UpgradeFunc(Config)} | upgrade_pre_ee(Bridges, UpgradeFunc)]. maybe_upgrade(#{<<"connector">> := _} = Config0) -> Config1 = up(Config0), @@ -39,6 +40,12 @@ maybe_upgrade(#{<<"connector">> := _} = Config0) -> maybe_upgrade(NewVersion) -> NewVersion. +webhook_maybe_upgrade(#{<<"direction">> := _} = Config0) -> + Config1 = maps:remove(<<"direction">>, Config0), + Config1#{<<"resource_opts">> => default_resource_opts()}; +webhook_maybe_upgrade(NewVersion) -> + NewVersion. + binary_key({K, V}) -> {atom_to_binary(K, utf8), V}. @@ -65,7 +72,6 @@ up(#{<<"connector">> := Connector} = Config) -> Cn(proto_ver, <<"v4">>), Cn(server, undefined), Cn(retry_interval, <<"15s">>), - Cn(reconnect_interval, <<"15s">>), Cn(ssl, default_ssl()), {enable, Enable}, {resource_opts, default_resource_opts()}, @@ -80,11 +86,10 @@ default_ssl() -> default_resource_opts() -> #{ - <<"async_inflight_window">> => 100, + <<"inflight_window">> => 100, <<"auto_restart_interval">> => <<"60s">>, - <<"enable_queue">> => false, <<"health_check_interval">> => <<"15s">>, - <<"max_queue_bytes">> => <<"1GB">>, + <<"max_buffer_bytes">> => <<"1GB">>, <<"query_mode">> => <<"sync">>, %% there is only one underlying MQTT connection %% doesn't make a lot of sense to have a large pool diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl index 4665a3bc5..5cd1693c7 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl @@ -51,7 +51,7 @@ fields("post") -> fields("put") -> fields("config"); fields("get") -> - emqx_bridge_schema:metrics_status_fields() ++ fields("config"). + emqx_bridge_schema:status_fields() ++ fields("config"). desc("config") -> ?DESC("config"); diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl index c2358da51..f58805b6b 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl @@ -17,10 +17,11 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx/include/logger.hrl"). -import(hoconsc, [mk/2, ref/2]). --export([roots/0, fields/1, desc/1, namespace/0]). +-export([roots/0, fields/1, desc/1, namespace/0, tags/0]). -export([ get_response/0, @@ -30,7 +31,8 @@ -export([ common_bridge_fields/0, - metrics_status_fields/0 + status_fields/0, + metrics_fields/0 ]). %%====================================================================================== @@ -55,8 +57,8 @@ api_schema(Method) -> EE = ee_api_schemas(Method), hoconsc:union(Broker ++ EE). +-if(?EMQX_RELEASE_EDITION == ee). ee_api_schemas(Method) -> - %% must ensure the app is loaded before checking if fn is defined. ensure_loaded(emqx_ee_bridge, emqx_ee_bridge), case erlang:function_exported(emqx_ee_bridge, api_schemas, 1) of true -> emqx_ee_bridge:api_schemas(Method); @@ -64,13 +66,31 @@ ee_api_schemas(Method) -> end. ee_fields_bridges() -> - %% must ensure the app is loaded before checking if fn is defined. ensure_loaded(emqx_ee_bridge, emqx_ee_bridge), case erlang:function_exported(emqx_ee_bridge, fields, 1) of true -> emqx_ee_bridge:fields(bridges); false -> [] end. +%% must ensure the app is loaded before checking if fn is defined. +ensure_loaded(App, Mod) -> + try + _ = application:load(App), + _ = Mod:module_info(), + ok + catch + _:_ -> + ok + end. + +-else. + +ee_api_schemas(_) -> []. + +ee_fields_bridges() -> []. + +-endif. + common_bridge_fields() -> [ {enable, @@ -83,19 +103,29 @@ common_bridge_fields() -> )} ]. -metrics_status_fields() -> +status_fields() -> + [ + {"status", mk(status(), #{desc => ?DESC("desc_status")})}, + {"status_reason", + mk(binary(), #{ + required => false, + desc => ?DESC("desc_status_reason"), + example => <<"Connection refused">> + })}, + {"node_status", + mk( + hoconsc:array(ref(?MODULE, "node_status")), + #{desc => ?DESC("desc_node_status")} + )} + ]. + +metrics_fields() -> [ {"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})}, {"node_metrics", mk( hoconsc:array(ref(?MODULE, "node_metrics")), #{desc => ?DESC("desc_node_metrics")} - )}, - {"status", mk(status(), #{desc => ?DESC("desc_status")})}, - {"node_status", - mk( - hoconsc:array(ref(?MODULE, "node_status")), - #{desc => ?DESC("desc_node_status")} )} ]. @@ -104,7 +134,10 @@ metrics_status_fields() -> namespace() -> "bridge". -roots() -> [bridges]. +tags() -> + [<<"Bridge">>]. + +roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_LOW})}]. fields(bridges) -> [ @@ -113,7 +146,8 @@ fields(bridges) -> hoconsc:map(name, ref(emqx_bridge_webhook_schema, "config")), #{ desc => ?DESC("bridges_webhook"), - required => false + required => false, + converter => fun webhook_bridge_converter/2 } )}, {mqtt, @@ -122,18 +156,19 @@ fields(bridges) -> #{ desc => ?DESC("bridges_mqtt"), required => false, - converter => fun emqx_bridge_mqtt_config:upgrade_pre_ee/1 + converter => fun(X, _HoconOpts) -> + emqx_bridge_compatible_config:upgrade_pre_ee( + X, fun emqx_bridge_compatible_config:maybe_upgrade/1 + ) + end } )} ] ++ ee_fields_bridges(); fields("metrics") -> [ - {"batching", mk(integer(), #{desc => ?DESC("metric_batching")})}, {"dropped", mk(integer(), #{desc => ?DESC("metric_dropped")})}, {"dropped.other", mk(integer(), #{desc => ?DESC("metric_dropped_other")})}, {"dropped.queue_full", mk(integer(), #{desc => ?DESC("metric_dropped_queue_full")})}, - {"dropped.queue_not_enabled", - mk(integer(), #{desc => ?DESC("metric_dropped_queue_not_enabled")})}, {"dropped.resource_not_found", mk(integer(), #{desc => ?DESC("metric_dropped_resource_not_found")})}, {"dropped.resource_stopped", @@ -142,7 +177,7 @@ fields("metrics") -> {"queuing", mk(integer(), #{desc => ?DESC("metric_queuing")})}, {"retried", mk(integer(), #{desc => ?DESC("metric_retried")})}, {"failed", mk(integer(), #{desc => ?DESC("metric_sent_failed")})}, - {"inflight", mk(integer(), #{desc => ?DESC("metric_sent_inflight")})}, + {"inflight", mk(integer(), #{desc => ?DESC("metric_inflight")})}, {"success", mk(integer(), #{desc => ?DESC("metric_sent_success")})}, {"rate", mk(float(), #{desc => ?DESC("metric_rate")})}, {"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})}, @@ -161,7 +196,13 @@ fields("node_metrics") -> fields("node_status") -> [ node_name(), - {"status", mk(status(), #{})} + {"status", mk(status(), #{})}, + {"status_reason", + mk(binary(), #{ + required => false, + desc => ?DESC("desc_status_reason"), + example => <<"Connection refused">> + })} ]. desc(bridges) -> @@ -176,21 +217,57 @@ desc(_) -> undefined. status() -> - hoconsc:enum([connected, disconnected, connecting]). + hoconsc:enum([connected, disconnected, connecting, inconsistent]). node_name() -> {"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}. -%%================================================================================================= -%% Internal fns -%%================================================================================================= - -ensure_loaded(App, Mod) -> - try - _ = application:load(App), - _ = Mod:module_info(), - ok - catch - _:_ -> - ok +webhook_bridge_converter(Conf0, _HoconOpts) -> + Conf1 = emqx_bridge_compatible_config:upgrade_pre_ee( + Conf0, fun emqx_bridge_compatible_config:webhook_maybe_upgrade/1 + ), + case Conf1 of + undefined -> + undefined; + _ -> + maps:map( + fun(_Name, Conf) -> + do_convert_webhook_config(Conf) + end, + Conf1 + ) end. + +do_convert_webhook_config( + #{<<"request_timeout">> := ReqT, <<"resource_opts">> := #{<<"request_timeout">> := ReqT}} = Conf +) -> + %% ok: same values + Conf; +do_convert_webhook_config( + #{ + <<"request_timeout">> := ReqTRootRaw, + <<"resource_opts">> := #{<<"request_timeout">> := ReqTResourceRaw} + } = Conf0 +) -> + %% different values; we set them to the same, if they are valid + %% durations + MReqTRoot = emqx_schema:to_duration_ms(ReqTRootRaw), + MReqTResource = emqx_schema:to_duration_ms(ReqTResourceRaw), + case {MReqTRoot, MReqTResource} of + {{ok, ReqTRoot}, {ok, ReqTResource}} -> + {_Parsed, ReqTRaw} = max({ReqTRoot, ReqTRootRaw}, {ReqTResource, ReqTResourceRaw}), + Conf1 = emqx_utils_maps:deep_merge( + Conf0, + #{ + <<"request_timeout">> => ReqTRaw, + <<"resource_opts">> => #{<<"request_timeout">> => ReqTRaw} + } + ), + Conf1; + _ -> + %% invalid values; let the type checker complain about + %% that. + Conf0 + end; +do_convert_webhook_config(Conf) -> + Conf. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl index a41fc35f5..1540f77bf 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl @@ -38,7 +38,7 @@ fields("post") -> fields("put") -> fields("config"); fields("get") -> - emqx_bridge_schema:metrics_status_fields() ++ fields("post"); + emqx_bridge_schema:status_fields() ++ fields("post"); fields("creation_opts") -> lists:filter( fun({K, _V}) -> @@ -81,6 +81,15 @@ request_config() -> desc => ?DESC("config_url") } )}, + {direction, + mk( + egress, + #{ + desc => ?DESC("config_direction"), + required => {false, recursively}, + deprecated => {since, "5.0.12"} + } + )}, {local_topic, mk( binary(), @@ -115,7 +124,7 @@ request_config() -> mk( binary(), #{ - default => <<"${payload}">>, + default => undefined, desc => ?DESC("config_body") } )}, diff --git a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl index ed4807d12..a8864bf00 100644 --- a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl @@ -141,8 +141,7 @@ setup_fake_telemetry_data() -> } } }, - Opts = #{raw_with_default => true}, - ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts), + ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf), ok = snabbkaffe:start_trace(), Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end, diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index 74e712c6f..d55b92138 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -18,12 +18,15 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/4, uri/1]). +-import(emqx_mgmt_api_test_util, [uri/1]). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --define(CONF_DEFAULT, <<"bridges: {}">>). --define(BRIDGE_TYPE, <<"webhook">>). +-include_lib("snabbkaffe/include/test_macros.hrl"). + +-define(SUITE_APPS, [emqx_conf, emqx_authn, emqx_management, emqx_rule_engine, emqx_bridge]). + +-define(BRIDGE_TYPE_HTTP, <<"webhook">>). -define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))). -define(URL(PORT, PATH), list_to_binary( @@ -33,62 +36,173 @@ ) ) ). --define(HTTP_BRIDGE(URL, TYPE, NAME), #{ +-define(BRIDGE(NAME, TYPE), #{ + <<"ssl">> => #{<<"enable">> => false}, <<"type">> => TYPE, - <<"name">> => NAME, + <<"name">> => NAME +}). + +-define(BRIDGE_TYPE_MQTT, <<"mqtt">>). +-define(MQTT_BRIDGE(SERVER, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_MQTT)#{ + <<"server">> => SERVER, + <<"username">> => <<"user1">>, + <<"password">> => <<"">>, + <<"proto_ver">> => <<"v5">> +}). +-define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)). + +-define(HTTP_BRIDGE(URL, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_HTTP)#{ <<"url">> => URL, <<"local_topic">> => <<"emqx_webhook/#">>, <<"method">> => <<"post">>, - <<"ssl">> => #{<<"enable">> => false}, <<"body">> => <<"${payload}">>, <<"headers">> => #{ - <<"content-type">> => <<"application/json">> + % NOTE + % The Pascal-Case is important here. + % The reason is kinda ridiculous: `emqx_bridge_resource:create_dry_run/2` converts + % bridge config keys into atoms, and the atom 'Content-Type' exists in the ERTS + % when this happens (while the 'content-type' does not). + <<"Content-Type">> => <<"application/json">> } }). +-define(HTTP_BRIDGE(URL), ?HTTP_BRIDGE(URL, ?BRIDGE_NAME)). all() -> - emqx_common_test_helpers:all(?MODULE). + [ + {group, single}, + {group, cluster} + ]. groups() -> - []. + SingleOnlyTests = [ + t_broken_bpapi_vsn, + t_old_bpapi_vsn, + t_bridges_probe + ], + [ + {single, [], emqx_common_test_helpers:all(?MODULE)}, + {cluster, [], emqx_common_test_helpers:all(?MODULE) -- SingleOnlyTests} + ]. suite() -> [{timetrap, {seconds, 60}}]. init_per_suite(Config) -> - _ = application:load(emqx_conf), - %% some testcases (may from other app) already get emqx_connector started - _ = application:stop(emqx_resource), - _ = application:stop(emqx_connector), - ok = emqx_common_test_helpers:start_apps( - [emqx_rule_engine, emqx_bridge, emqx_dashboard], - fun set_special_configs/1 - ), - ok = emqx_common_test_helpers:load_config( - emqx_rule_engine_schema, - <<"rule_engine {rules {}}">> - ), - ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?CONF_DEFAULT), Config. end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_rule_engine, emqx_bridge, emqx_dashboard]), ok. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(<<"bridge_admin">>); -set_special_configs(_) -> +init_per_group(cluster, Config) -> + Cluster = mk_cluster_specs(Config), + ct:pal("Starting ~p", [Cluster]), + Nodes = [ + emqx_common_test_helpers:start_slave(Name, Opts) + || {Name, Opts} <- Cluster + ], + [NodePrimary | NodesRest] = Nodes, + ok = erpc:call(NodePrimary, fun() -> init_node(primary) end), + _ = [ok = erpc:call(Node, fun() -> init_node(regular) end) || Node <- NodesRest], + [{group, cluster}, {cluster_nodes, Nodes}, {api_node, NodePrimary} | Config]; +init_per_group(_, Config) -> + ok = emqx_mgmt_api_test_util:init_suite(?SUITE_APPS), + ok = load_suite_config(emqx_rule_engine), + ok = load_suite_config(emqx_bridge), + [{group, single}, {api_node, node()} | Config]. + +mk_cluster_specs(Config) -> + Specs = [ + {core, emqx_bridge_api_SUITE1, #{}}, + {core, emqx_bridge_api_SUITE2, #{}} + ], + CommonOpts = #{ + env => [{emqx, boot_modules, [broker]}], + apps => [], + % NOTE + % We need to start all those apps _after_ the cluster becomes stable, in the + % `init_node/1`. This is because usual order is broken in very subtle way: + % 1. Node starts apps including `mria` and `emqx_conf` which starts `emqx_cluster_rpc`. + % 2. The `emqx_cluster_rpc` sets up a mnesia table subscription during initialization. + % 3. In the meantime `mria` joins the cluster and notices it should restart. + % 4. Mnesia subscription becomes lost during restarts (god knows why). + % Yet we need to load them before, so that mria / mnesia will know which tables + % should be created in the cluster. + % TODO + % We probably should hide these intricacies behind the `emqx_common_test_helpers`. + load_apps => ?SUITE_APPS ++ [emqx_dashboard], + env_handler => fun load_suite_config/1, + load_schema => false, + priv_data_dir => ?config(priv_dir, Config) + }, + emqx_common_test_helpers:emqx_cluster(Specs, CommonOpts). + +init_node(Type) -> + ok = emqx_common_test_helpers:start_apps(?SUITE_APPS, fun load_suite_config/1), + case Type of + primary -> + ok = emqx_config:put( + [dashboard, listeners], + #{http => #{enable => true, bind => 18083}} + ), + ok = emqx_dashboard:start_listeners(), + ready = emqx_dashboard_listener:regenerate_minirest_dispatch(), + emqx_common_test_http:create_default_app(); + regular -> + ok + end. + +load_suite_config(emqx_rule_engine) -> + ok = emqx_common_test_helpers:load_config( + emqx_rule_engine_schema, + <<"rule_engine { rules {} }">> + ); +load_suite_config(emqx_bridge) -> + ok = emqx_common_test_helpers:load_config( + emqx_bridge_schema, + <<"bridges {}">> + ); +load_suite_config(_) -> ok. +end_per_group(cluster, Config) -> + ok = lists:foreach( + fun(Node) -> + _ = erpc:call(Node, emqx_common_test_helpers, stop_apps, [?SUITE_APPS]), + emqx_common_test_helpers:stop_slave(Node) + end, + ?config(cluster_nodes, Config) + ); +end_per_group(_, _Config) -> + emqx_mgmt_api_test_util:end_suite(?SUITE_APPS), + ok. + +init_per_testcase(t_broken_bpapi_vsn, Config) -> + meck:new(emqx_bpapi, [passthrough]), + meck:expect(emqx_bpapi, supported_version, 1, -1), + meck:expect(emqx_bpapi, supported_version, 2, -1), + init_per_testcase(commong, Config); +init_per_testcase(t_old_bpapi_vsn, Config) -> + meck:new(emqx_bpapi, [passthrough]), + meck:expect(emqx_bpapi, supported_version, 1, 1), + meck:expect(emqx_bpapi, supported_version, 2, 1), + init_per_testcase(common, Config); init_per_testcase(_, Config) -> - {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), {Port, Sock, Acceptor} = start_http_server(fun handle_fun_200_ok/2), [{port, Port}, {sock, Sock}, {acceptor, Acceptor} | Config]. + +end_per_testcase(t_broken_bpapi_vsn, Config) -> + meck:unload([emqx_bpapi]), + end_per_testcase(common, Config); +end_per_testcase(t_old_bpapi_vsn, Config) -> + meck:unload([emqx_bpapi]), + end_per_testcase(common, Config); end_per_testcase(_, Config) -> Sock = ?config(sock, Config), Acceptor = ?config(acceptor, Config), - stop_http_server(Sock, Acceptor), - clear_resources(), + Node = ?config(api_node, Config), + ok = emqx_common_test_helpers:call_janitor(), + ok = stop_http_server(Sock, Acceptor), + ok = erpc:call(Node, fun clear_resources/0), ok. clear_resources() -> @@ -117,17 +231,13 @@ stop_http_server(Sock, Acceptor) -> gen_tcp:close(Sock). listen_on_random_port() -> - Min = 1024, - Max = 65000, - rand:seed(exsplus, erlang:timestamp()), - Port = rand:uniform(Max - Min) + Min, - case - gen_tcp:listen(Port, [ - binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000} - ]) - of - {ok, Sock} -> {Port, Sock}; - {error, eaddrinuse} -> listen_on_random_port() + SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}], + case gen_tcp:listen(0, SockOpts) of + {ok, Sock} -> + {ok, Port} = inet:port(Sock), + {Port, Sock}; + {error, Reason} when Reason /= eaddrinuse -> + {error, Reason} end. accept_loop(Sock, HandleFun, Parent) -> @@ -174,34 +284,36 @@ parse_http_request(ReqStr0) -> t_http_crud_apis(Config) -> Port = ?config(port, Config), %% assert we there's no bridges at first - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), + + {ok, 404, _} = request(get, uri(["bridges", "foo"]), Config), + {ok, 404, _} = request(get, uri(["bridges", "webhook:foo"]), Config), %% then we add a webhook bridge, using POST %% POST /bridges/ will create a bridge URL1 = ?URL(Port, "path1"), Name = ?BRIDGE_NAME, - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := _, + <<"node_status">> := [_ | _], + <<"url">> := URL1 + }}, + request_json( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ) ), - %ct:pal("---bridge: ~p", [Bridge]), - #{ - <<"type">> := ?BRIDGE_TYPE, - <<"name">> := Name, - <<"enable">> := true, - <<"status">> := _, - <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], - <<"url">> := URL1 - } = jsx:decode(Bridge), - - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), %% send an message to emqx and the message should be forwarded to the HTTP server Body = <<"my msg">>, - emqx:publish(emqx_message:make(<<"emqx_webhook/1">>, Body)), + _ = publish_message(<<"emqx_webhook/1">>, Body, Config), ?assert( receive {http_server, received, #{ @@ -219,61 +331,53 @@ t_http_crud_apis(Config) -> ), %% update the request-path of the bridge URL2 = ?URL(Port, "path2"), - {ok, 200, Bridge2} = request( - put, - uri(["bridges", BridgeID]), - ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name) - ), ?assertMatch( - #{ - <<"type">> := ?BRIDGE_TYPE, + {ok, 200, #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], <<"url">> := URL2 - }, - jsx:decode(Bridge2) + }}, + request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(URL2, Name), + Config + ) ), %% list all bridges again, assert Bridge2 is in it - {ok, 200, Bridge2Str} = request(get, uri(["bridges"]), []), ?assertMatch( - [ + {ok, 200, [ #{ - <<"type">> := ?BRIDGE_TYPE, + <<"type">> := ?BRIDGE_TYPE_HTTP, <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], <<"url">> := URL2 } - ], - jsx:decode(Bridge2Str) + ]}, + request_json(get, uri(["bridges"]), Config) ), %% get the bridge by id - {ok, 200, Bridge3Str} = request(get, uri(["bridges", BridgeID]), []), ?assertMatch( - #{ - <<"type">> := ?BRIDGE_TYPE, + {ok, 200, #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], <<"url">> := URL2 - }, - jsx:decode(Bridge3Str) + }}, + request_json(get, uri(["bridges", BridgeID]), Config) ), %% send an message to emqx again, check the path has been changed - emqx:publish(emqx_message:make(<<"emqx_webhook/1">>, Body)), + _ = publish_message(<<"emqx_webhook/1">>, Body, Config), ?assert( receive {http_server, received, #{path := <<"/path2">>}} -> @@ -286,41 +390,211 @@ t_http_crud_apis(Config) -> end ), - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - - %% update a deleted bridge returns an error - {ok, 404, ErrMsg2} = request( + %% Test bad updates + {ok, 400, PutFail1} = request_json( put, uri(["bridges", BridgeID]), - ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name) + maps:remove(<<"url">>, ?HTTP_BRIDGE(URL2, Name)), + Config + ), + ?assertMatch( + #{<<"reason">> := <<"required_field">>}, + json(maps:get(<<"message">>, PutFail1)) + ), + {ok, 400, PutFail2} = request_json( + put, + uri(["bridges", BridgeID]), + maps:put(<<"curl">>, URL2, maps:remove(<<"url">>, ?HTTP_BRIDGE(URL2, Name))), + Config ), ?assertMatch( #{ - <<"code">> := _, - <<"message">> := <<"bridge not found">> + <<"reason">> := <<"unknown_fields">>, + <<"unknown">> := <<"curl">> }, - jsx:decode(ErrMsg2) + json(maps:get(<<"message">>, PutFail2)) ), - ok. + {ok, 400, _} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(<<"localhost:1234/foo">>, Name), + Config + ), + {ok, 400, _} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(<<"htpp://localhost:12341234/foo">>, Name), + Config + ), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), + + %% update a deleted bridge returns an error + ?assertMatch( + {ok, 404, #{ + <<"code">> := <<"NOT_FOUND">>, + <<"message">> := _ + }}, + request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(URL2, Name), + Config + ) + ), + + %% try delete bad bridge id + ?assertMatch( + {ok, 404, #{ + <<"code">> := <<"NOT_FOUND">>, + <<"message">> := <<"Invalid bridge ID", _/binary>> + }}, + request_json(delete, uri(["bridges", "foo"]), Config) + ), + + %% Deleting a non-existing bridge should result in an error + ?assertMatch( + {ok, 404, #{ + <<"code">> := <<"NOT_FOUND">>, + <<"message">> := _ + }}, + request_json(delete, uri(["bridges", BridgeID]), Config) + ), + + %% Create non working bridge + BrokenURL = ?URL(Port + 1, "/foo"), + {ok, 201, BrokenBridge} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(BrokenURL, Name), + fun json/1, + Config + ), + ?assertMatch( + #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"disconnected">>, + <<"status_reason">> := <<"Connection refused">>, + <<"node_status">> := [ + #{ + <<"status">> := <<"disconnected">>, + <<"status_reason">> := <<"Connection refused">> + } + | _ + ], + <<"url">> := BrokenURL + }, + BrokenBridge + ), + + {ok, 200, FixedBridge} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(URL1), + Config + ), + ?assertMatch( + #{ + <<"status">> := <<"connected">>, + <<"node_status">> := [FixedNodeStatus = #{<<"status">> := <<"connected">>} | _] + } when + not is_map_key(<<"status_reason">>, FixedBridge) andalso + not is_map_key(<<"status_reason">>, FixedNodeStatus), + FixedBridge + ), + + %% Try create bridge with bad characters as name + {ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config), + + %% Missing scheme in URL + {ok, 400, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(<<"localhost:1234/foo">>, <<"missing_url_scheme">>), + Config + ), + + %% Invalid port + {ok, 400, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(<<"http://localhost:12341234/foo">>, <<"invalid_port">>), + Config + ), + + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config). + +t_http_bridges_local_topic(Config) -> + Port = ?config(port, Config), + %% assert we there's no bridges at first + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), + + %% then we add a webhook bridge, using POST + %% POST /bridges/ will create a bridge + URL1 = ?URL(Port, "path1"), + Name1 = <<"t_http_bridges_with_local_topic1">>, + Name2 = <<"t_http_bridges_without_local_topic1">>, + %% create one http bridge with local_topic + {ok, 201, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name1), + Config + ), + %% and we create another one without local_topic + {ok, 201, _} = request( + post, + uri(["bridges"]), + maps:remove(<<"local_topic">>, ?HTTP_BRIDGE(URL1, Name2)), + Config + ), + BridgeID1 = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name1), + BridgeID2 = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name2), + %% Send an message to emqx and the message should be forwarded to the HTTP server. + %% This is to verify we can have 2 bridges with and without local_topic fields + %% at the same time. + Body = <<"my msg">>, + _ = publish_message(<<"emqx_webhook/1">>, Body, Config), + ?assert( + receive + {http_server, received, #{ + method := <<"POST">>, + path := <<"/path1">>, + body := Body + }} -> + true; + Msg -> + ct:pal("error: http got unexpected request: ~p", [Msg]), + false + after 100 -> + false + end + ), + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID1]), Config), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID2]), Config). t_check_dependent_actions_on_delete(Config) -> Port = ?config(port, Config), %% assert we there's no bridges at first - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), %% then we add a webhook bridge, using POST %% POST /bridges/ will create a bridge URL1 = ?URL(Port, "path1"), Name = <<"t_http_crud_apis">>, - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), {ok, 201, _} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ?HTTP_BRIDGE(URL1, Name), + Config ), - {ok, 201, Rule} = request( + {ok, 201, #{<<"id">> := RuleId}} = request_json( post, uri(["rules"]), #{ @@ -328,34 +602,36 @@ t_check_dependent_actions_on_delete(Config) -> <<"enable">> => true, <<"actions">> => [BridgeID], <<"sql">> => <<"SELECT * from \"t\"">> - } + }, + Config + ), + %% deleting the bridge should fail because there is a rule that depends on it + {ok, 400, _} = request( + delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions=false", Config ), - #{<<"id">> := RuleId} = jsx:decode(Rule), - %% delete the bridge should fail because there is a rule depenents on it - {ok, 403, _} = request(delete, uri(["bridges", BridgeID]), []), %% delete the rule first - {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), Config), %% then delete the bridge is OK - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - ok. + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config). t_cascade_delete_actions(Config) -> Port = ?config(port, Config), %% assert we there's no bridges at first - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), %% then we add a webhook bridge, using POST %% POST /bridges/ will create a bridge URL1 = ?URL(Port, "path1"), Name = <<"t_http_crud_apis">>, - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), {ok, 201, _} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ?HTTP_BRIDGE(URL1, Name), + Config ), - {ok, 201, Rule} = request( + {ok, 201, #{<<"id">> := RuleId}} = request_json( post, uri(["rules"]), #{ @@ -363,22 +639,97 @@ t_cascade_delete_actions(Config) -> <<"enable">> => true, <<"actions">> => [BridgeID], <<"sql">> => <<"SELECT * from \"t\"">> - } - ), - #{<<"id">> := RuleId} = jsx:decode(Rule), - %% delete the bridge will also delete the actions from the rules - {ok, 204, _} = request(delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions", []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), - ?assertMatch( - #{ - <<"actions">> := [] }, - jsx:decode(Rule1) + Config ), - {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + %% delete the bridge will also delete the actions from the rules + {ok, 204, _} = request( + delete, + uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions=true", + Config + ), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), + ?assertMatch( + {ok, 200, #{<<"actions">> := []}}, + request_json(get, uri(["rules", RuleId]), Config) + ), + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), Config), + + {ok, 201, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ), + {ok, 201, _} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"t_http_crud_apis">>, + <<"enable">> => true, + <<"actions">> => [BridgeID], + <<"sql">> => <<"SELECT * from \"t\"">> + }, + Config + ), + + {ok, 204, _} = request( + delete, + uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions", + Config + ), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config). + +t_broken_bpapi_vsn(Config) -> + Port = ?config(port, Config), + URL1 = ?URL(Port, "abc"), + Name = <<"t_bad_bpapi_vsn">>, + {ok, 201, _Bridge} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), + %% still works since we redirect to 'restart' + {ok, 501, <<>>} = request(post, {operation, cluster, start, BridgeID}, Config), + {ok, 501, <<>>} = request(post, {operation, node, start, BridgeID}, Config), ok. +t_old_bpapi_vsn(Config) -> + Port = ?config(port, Config), + URL1 = ?URL(Port, "abc"), + Name = <<"t_bad_bpapi_vsn">>, + {ok, 201, _Bridge} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), + {ok, 204, <<>>} = request(post, {operation, cluster, stop, BridgeID}, Config), + {ok, 204, <<>>} = request(post, {operation, node, stop, BridgeID}, Config), + %% still works since we redirect to 'restart' + {ok, 204, <<>>} = request(post, {operation, cluster, start, BridgeID}, Config), + {ok, 204, <<>>} = request(post, {operation, node, start, BridgeID}, Config), + {ok, 204, <<>>} = request(post, {operation, cluster, restart, BridgeID}, Config), + {ok, 204, <<>>} = request(post, {operation, node, restart, BridgeID}, Config), + ok. + +t_start_bridge_unknown_node(Config) -> + {ok, 404, _} = + request( + post, + uri(["nodes", "thisbetterbenotanatomyet", "bridges", "webhook:foo", start]), + Config + ), + {ok, 404, _} = + request( + post, + uri(["nodes", "undefined", "bridges", "webhook:foo", start]), + Config + ). + t_start_stop_bridges_node(Config) -> do_start_stop_bridges(node, Config). @@ -387,140 +738,626 @@ t_start_stop_bridges_cluster(Config) -> do_start_stop_bridges(Type, Config) -> %% assert we there's no bridges at first - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), Name = atom_to_binary(Type), - {ok, 201, Bridge} = request( + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"url">> := URL1 + }}, + request_json( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ) + ), + + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), + ExpectedStatus = + case ?config(group, Config) of + cluster when Type == node -> + <<"inconsistent">>; + _ -> + <<"stopped">> + end, + + %% stop it + {ok, 204, <<>>} = request(post, {operation, Type, stop, BridgeID}, Config), + ?assertMatch( + {ok, 200, #{<<"status">> := ExpectedStatus}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), + %% start again + {ok, 204, <<>>} = request(post, {operation, Type, start, BridgeID}, Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), + %% start a started bridge + {ok, 204, <<>>} = request(post, {operation, Type, start, BridgeID}, Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), + %% restart an already started bridge + {ok, 204, <<>>} = request(post, {operation, Type, restart, BridgeID}, Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), + %% stop it again + {ok, 204, <<>>} = request(post, {operation, Type, stop, BridgeID}, Config), + %% restart a stopped bridge + {ok, 204, <<>>} = request(post, {operation, Type, restart, BridgeID}, Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), + + {ok, 404, _} = request(post, {operation, Type, invalidop, BridgeID}, Config), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), + + %% Fail parse-id check + {ok, 404, _} = request(post, {operation, Type, start, <<"wreckbook_fugazi">>}, Config), + %% Looks ok but doesn't exist + {ok, 404, _} = request(post, {operation, Type, start, <<"webhook:cptn_hook">>}, Config), + + %% Create broken bridge + {ListenPort, Sock} = listen_on_random_port(), + %% Connecting to this endpoint should always timeout + BadServer = iolist_to_binary(io_lib:format("localhost:~B", [ListenPort])), + BadName = <<"bad_", (atom_to_binary(Type))/binary>>, + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE_MQTT, + <<"name">> := BadName, + <<"enable">> := true, + <<"server">> := BadServer, + <<"status">> := <<"connecting">>, + <<"node_status">> := [_ | _] + }}, + request_json( + post, + uri(["bridges"]), + ?MQTT_BRIDGE(BadServer, BadName), + Config + ) + ), + BadBridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_MQTT, BadName), + ?assertMatch( + {ok, SC, _} when SC == 500 orelse SC == 503, + request(post, {operation, Type, start, BadBridgeID}, Config) + ), + ok = gen_tcp:close(Sock), + ok. + +t_start_stop_inconsistent_bridge_node(Config) -> + start_stop_inconsistent_bridge(node, Config). + +t_start_stop_inconsistent_bridge_cluster(Config) -> + start_stop_inconsistent_bridge(cluster, Config). + +start_stop_inconsistent_bridge(Type, Config) -> + Port = ?config(port, Config), + URL = ?URL(Port, "abc"), + Node = ?config(api_node, Config), + + erpc:call(Node, fun() -> + meck:new(emqx_bridge_resource, [passthrough, no_link]), + meck:expect( + emqx_bridge_resource, + stop, + fun + (_, <<"bridge_not_found">>) -> {error, not_found}; + (BridgeType, Name) -> meck:passthrough([BridgeType, Name]) + end + ) + end), + + emqx_common_test_helpers:on_exit(fun() -> + erpc:call(Node, fun() -> + meck:unload([emqx_bridge_resource]) + end) + end), + + {ok, 201, _Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ?HTTP_BRIDGE(URL, <<"bridge_not_found">>), + Config ), - %ct:pal("the bridge ==== ~p", [Bridge]), - #{ - <<"type">> := ?BRIDGE_TYPE, - <<"name">> := Name, - <<"enable">> := true, - <<"status">> := <<"connected">>, - <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], - <<"url">> := URL1 - } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), - %% stop it - {ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), - {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), - %% start again - {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), - {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), - %% restart an already started bridge - {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), - {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), - %% stop it again - {ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), - %% restart a stopped bridge - {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), - {ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)), - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). + {ok, 503, _} = request( + post, {operation, Type, stop, <<"webhook:bridge_not_found">>}, Config + ). t_enable_disable_bridges(Config) -> %% assert we there's no bridges at first - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), Name = ?BRIDGE_NAME, Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"url">> := URL1 + }}, + request_json( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ) ), - %ct:pal("the bridge ==== ~p", [Bridge]), - #{ - <<"type">> := ?BRIDGE_TYPE, - <<"name">> := Name, - <<"enable">> := true, - <<"status">> := <<"connected">>, - <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], - <<"url">> := URL1 - } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), %% disable it - {ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>), - {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), + {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"stopped">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), %% enable again - {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), - {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), + {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), %% enable an already started bridge - {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), - {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), + {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), %% disable it again - {ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>), + {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), Config), - {ok, 403, Res} = request(post, operation_path(node, restart, BridgeID), <<"">>), + %% bad param + {ok, 404, _} = request(put, enable_path(foo, BridgeID), Config), + {ok, 404, _} = request(put, enable_path(true, "foo"), Config), + {ok, 404, _} = request(put, enable_path(true, "webhook:foo"), Config), + + {ok, 400, Res} = request(post, {operation, node, start, BridgeID}, <<>>, fun json/1, Config), ?assertEqual( - <<"{\"code\":\"FORBIDDEN_REQUEST\",\"message\":\"forbidden operation: bridge disabled\"}">>, + #{ + <<"code">> => <<"BAD_REQUEST">>, + <<"message">> => <<"Forbidden operation, bridge not enabled">> + }, Res ), + {ok, 400, Res} = request(post, {operation, cluster, start, BridgeID}, <<>>, fun json/1, Config), %% enable a stopped bridge - {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), - {ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)), + {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config), + ?assertMatch( + {ok, 200, #{<<"status">> := <<"connected">>}}, + request_json(get, uri(["bridges", BridgeID]), Config) + ), %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config). t_reset_bridges(Config) -> - %% assert we there's no bridges at first - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + %% assert there's no bridges at first + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), Name = ?BRIDGE_NAME, Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"url">> := URL1 + }}, + request_json( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ) ), - %ct:pal("the bridge ==== ~p", [Bridge]), - #{ - <<"type">> := ?BRIDGE_TYPE, - <<"name">> := Name, - <<"enable">> := true, - <<"status">> := <<"connected">>, - <<"node_status">> := [_ | _], - <<"metrics">> := _, - <<"node_metrics">> := [_ | _], - <<"url">> := URL1 - } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), - {ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), + {ok, 204, <<>>} = request(put, uri(["bridges", BridgeID, "metrics/reset"]), Config), %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), + {ok, 200, []} = request_json(get, uri(["bridges"]), Config). -request(Method, Url, Body) -> - request(<<"bridge_admin">>, Method, Url, Body). +t_with_redact_update(Config) -> + Name = <<"redact_update">>, + Type = <<"mqtt">>, + Password = <<"123456">>, + Template = #{ + <<"type">> => Type, + <<"name">> => Name, + <<"server">> => <<"127.0.0.1:1883">>, + <<"username">> => <<"test">>, + <<"password">> => Password, + <<"ingress">> => + #{<<"remote">> => #{<<"topic">> => <<"t/#">>}} + }, -operation_path(node, Oper, BridgeID) -> - uri(["nodes", node(), "bridges", BridgeID, "operation", Oper]); -operation_path(cluster, Oper, BridgeID) -> - uri(["bridges", BridgeID, "operation", Oper]). + {ok, 201, _} = request( + post, + uri(["bridges"]), + Template, + Config + ), + + %% update with redacted config + BridgeConf = emqx_utils:redact(Template), + BridgeID = emqx_bridge_resource:bridge_id(Type, Name), + {ok, 200, _} = request(put, uri(["bridges", BridgeID]), BridgeConf, Config), + ?assertEqual( + Password, + get_raw_config([bridges, Type, Name, password], Config) + ), + ok. + +t_bridges_probe(Config) -> + Port = ?config(port, Config), + URL = ?URL(Port, "some_path"), + + {ok, 204, <<>>} = request( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(URL), + Config + ), + + %% second time with same name is ok since no real bridge created + {ok, 204, <<>>} = request( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(URL), + Config + ), + + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"http://203.0.113.3:1234/foo">>), + Config + ) + ), + + %% Missing scheme in URL + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"203.0.113.3:1234/foo">>), + Config + ) + ), + + %% Invalid port + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"http://203.0.113.3:12341234/foo">>), + Config + ) + ), + + {ok, 204, _} = request( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>), + Config + ), + + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Connection refused">> + }}, + request_json( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:2883">>), + Config + ) + ), + + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Could not resolve host">> + }}, + request_json( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"nohost:2883">>), + Config + ) + ), + + AuthnConfig = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"user_id_type">> => <<"username">> + }, + Chain = 'mqtt:global', + {ok, _} = update_config( + [authentication], + {create_authenticator, Chain, AuthnConfig}, + Config + ), + User = #{user_id => <<"u">>, password => <<"p">>}, + AuthenticatorID = <<"password_based:built_in_database">>, + {ok, _} = add_user_auth( + Chain, + AuthenticatorID, + User, + Config + ), + + emqx_common_test_helpers:on_exit(fun() -> + delete_user_auth(Chain, AuthenticatorID, User, Config) + end), + + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Unauthorized client">> + }}, + request_json( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>)#{<<"proto_ver">> => <<"v4">>}, + Config + ) + ), + + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Bad username or password">> + }}, + request_json( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>)#{ + <<"proto_ver">> => <<"v4">>, + <<"password">> => <<"mySecret">>, + <<"username">> => <<"u">> + }, + Config + ) + ), + + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Not authorized">> + }}, + request_json( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>), + Config + ) + ), + + ?assertMatch( + {ok, 400, #{<<"code">> := <<"BAD_REQUEST">>}}, + request_json( + post, + uri(["bridges_probe"]), + ?BRIDGE(<<"bad_bridge">>, <<"unknown_type">>), + Config + ) + ), + ok. + +t_metrics(Config) -> + Port = ?config(port, Config), + %% assert we there's no bridges at first + {ok, 200, []} = request_json(get, uri(["bridges"]), Config), + + %% then we add a webhook bridge, using POST + %% POST /bridges/ will create a bridge + URL1 = ?URL(Port, "path1"), + Name = ?BRIDGE_NAME, + ?assertMatch( + {ok, 201, + Bridge = #{ + <<"type">> := ?BRIDGE_TYPE_HTTP, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := _, + <<"node_status">> := [_ | _], + <<"url">> := URL1 + }} when + %% assert that the bridge return doesn't contain metrics anymore + not is_map_key(<<"metrics">>, Bridge) andalso + not is_map_key(<<"node_metrics">>, Bridge), + request_json( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, Name), + Config + ) + ), + + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), + + %% check for empty bridge metrics + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"success">> := 0}, + <<"node_metrics">> := [_ | _] + }}, + request_json(get, uri(["bridges", BridgeID, "metrics"]), Config) + ), + + %% check that the bridge doesn't contain metrics anymore + {ok, 200, Bridge} = request_json(get, uri(["bridges", BridgeID]), Config), + ?assertNot(maps:is_key(<<"metrics">>, Bridge)), + ?assertNot(maps:is_key(<<"node_metrics">>, Bridge)), + + %% send an message to emqx and the message should be forwarded to the HTTP server + Body = <<"my msg">>, + _ = publish_message(<<"emqx_webhook/1">>, Body, Config), + ?assert( + receive + {http_server, received, #{ + method := <<"POST">>, + path := <<"/path1">>, + body := Body + }} -> + true; + Msg -> + ct:pal("error: http got unexpected request: ~p", [Msg]), + false + after 100 -> + false + end + ), + + %% check for non-empty bridge metrics + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"success">> := _}, + <<"node_metrics">> := [_ | _] + }}, + request_json(get, uri(["bridges", BridgeID, "metrics"]), Config) + ), + + %% check for absence of metrics when listing all bridges + {ok, 200, Bridges} = request_json(get, uri(["bridges"]), Config), + ?assertNotMatch( + [ + #{ + <<"metrics">> := #{}, + <<"node_metrics">> := [_ | _] + } + ], + Bridges + ), + ok. + +%% request_timeout in bridge root should match request_timeout in +%% resource_opts. +t_inconsistent_webhook_request_timeouts(Config) -> + Port = ?config(port, Config), + URL1 = ?URL(Port, "path1"), + Name = ?BRIDGE_NAME, + BadBridgeParams = + emqx_utils_maps:deep_merge( + ?HTTP_BRIDGE(URL1, Name), + #{ + <<"request_timeout">> => <<"1s">>, + <<"resource_opts">> => #{<<"request_timeout">> => <<"2s">>} + } + ), + ?assertMatch( + {ok, 201, #{ + %% note: same value on both fields + <<"request_timeout">> := <<"2s">>, + <<"resource_opts">> := #{<<"request_timeout">> := <<"2s">>} + }}, + request_json( + post, + uri(["bridges"]), + BadBridgeParams, + Config + ) + ), + ok. + +%% + +request(Method, URL, Config) -> + request(Method, URL, [], Config). + +request(Method, {operation, Type, Op, BridgeID}, Body, Config) -> + URL = operation_path(Type, Op, BridgeID, Config), + request(Method, URL, Body, Config); +request(Method, URL, Body, Config) -> + Opts = #{compatible_mode => true, httpc_req_opts => [{body_format, binary}]}, + emqx_mgmt_api_test_util:request_api(Method, URL, [], auth_header(Config), Body, Opts). + +request(Method, URL, Body, Decoder, Config) -> + case request(Method, URL, Body, Config) of + {ok, Code, Response} -> + {ok, Code, Decoder(Response)}; + Otherwise -> + Otherwise + end. + +request_json(Method, URLLike, Config) -> + request(Method, URLLike, [], fun json/1, Config). + +request_json(Method, URLLike, Body, Config) -> + request(Method, URLLike, Body, fun json/1, Config). + +auth_header(Config) -> + erpc:call(?config(api_node, Config), emqx_common_test_http, default_auth_header, []). + +operation_path(node, Oper, BridgeID, Config) -> + uri(["nodes", ?config(api_node, Config), "bridges", BridgeID, Oper]); +operation_path(cluster, Oper, BridgeID, _Config) -> + uri(["bridges", BridgeID, Oper]). + +enable_path(Enable, BridgeID) -> + uri(["bridges", BridgeID, "enable", Enable]). + +publish_message(Topic, Body, Config) -> + Node = ?config(api_node, Config), + erpc:call(Node, emqx, publish, [emqx_message:make(Topic, Body)]). + +update_config(Path, Value, Config) -> + Node = ?config(api_node, Config), + erpc:call(Node, emqx, update_config, [Path, Value]). + +get_raw_config(Path, Config) -> + Node = ?config(api_node, Config), + erpc:call(Node, emqx, get_raw_config, [Path]). + +add_user_auth(Chain, AuthenticatorID, User, Config) -> + Node = ?config(api_node, Config), + erpc:call(Node, emqx_authentication, add_user, [Chain, AuthenticatorID, User]). + +delete_user_auth(Chain, AuthenticatorID, User, Config) -> + Node = ?config(api_node, Config), + erpc:call(Node, emqx_authentication, delete_user, [Chain, AuthenticatorID, User]). str(S) when is_list(S) -> S; str(S) when is_binary(S) -> binary_to_list(S). + +json(B) when is_binary(B) -> + emqx_utils_json:decode(B, [return_maps]). diff --git a/apps/emqx_bridge/test/emqx_bridge_mqtt_config_tests.erl b/apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl similarity index 58% rename from apps/emqx_bridge/test/emqx_bridge_mqtt_config_tests.erl rename to apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl index 90723252d..08bee15f3 100644 --- a/apps/emqx_bridge/test/emqx_bridge_mqtt_config_tests.erl +++ b/apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl @@ -13,7 +13,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_bridge_mqtt_config_tests). +-module(emqx_bridge_compatible_config_tests). -include_lib("eunit/include/eunit.hrl"). @@ -26,30 +26,75 @@ empty_config_test() -> %% ensure webhook config can be checked webhook_config_test() -> - Conf = parse(webhook_v5011_hocon()), + Conf1 = parse(webhook_v5011_hocon()), + Conf2 = parse(full_webhook_v5011_hocon()), + Conf3 = parse(full_webhook_v5019_hocon()), + ?assertMatch( #{ - <<"bridges">> := - #{ - <<"webhook">> := #{ - <<"the_name">> := - #{ - <<"method">> := get, - <<"body">> := <<"${payload}">> - } - } + <<"bridges">> := #{ + <<"webhook">> := #{ + <<"the_name">> := + #{ + <<"method">> := get, + <<"body">> := <<"${payload}">> + } } + } }, - check(Conf) + check(Conf1) ), + + ?assertMatch( + #{ + <<"bridges">> := #{ + <<"webhook">> := #{ + <<"the_name">> := + #{ + <<"method">> := get, + <<"body">> := <<"${payload}">> + } + } + } + }, + check(Conf2) + ), + + %% the converter should pick the greater of the two + %% request_timeouts and place them in the root and inside + %% resource_opts. + ?assertMatch( + #{ + <<"bridges">> := #{ + <<"webhook">> := #{ + <<"the_name">> := + #{ + <<"method">> := get, + <<"request_timeout">> := 60_000, + <<"resource_opts">> := #{<<"request_timeout">> := 60_000}, + <<"body">> := <<"${payload}">> + } + } + } + }, + check(Conf3) + ), + ok. up(#{<<"bridges">> := Bridges0} = Conf0) -> Bridges = up(Bridges0), Conf0#{<<"bridges">> := Bridges}; up(#{<<"mqtt">> := MqttBridges0} = Bridges) -> - MqttBridges = emqx_bridge_mqtt_config:upgrade_pre_ee(MqttBridges0), - Bridges#{<<"mqtt">> := MqttBridges}. + MqttBridges = emqx_bridge_compatible_config:upgrade_pre_ee( + MqttBridges0, fun emqx_bridge_compatible_config:maybe_upgrade/1 + ), + Bridges#{<<"mqtt">> := MqttBridges}; +up(#{<<"webhook">> := WebhookBridges0} = Bridges) -> + WebhookBridges = emqx_bridge_compatible_config:upgrade_pre_ee( + WebhookBridges0, fun emqx_bridge_compatible_config:webhook_maybe_upgrade/1 + ), + Bridges#{<<"webhook">> := WebhookBridges}. parse(HOCON) -> {ok, Conf} = hocon:binary(HOCON), @@ -84,7 +129,7 @@ assert_upgraded1(Map) -> ?assert(maps:is_key(<<"ssl">>, Map)). check(Conf) when is_map(Conf) -> - hocon_tconf:check_plain(emqx_bridge_schema, Conf). + hocon_tconf:check_plain(emqx_bridge_schema, Conf, #{required => false}). %% erlfmt-ignore %% this is config generated from v5.0.11 @@ -100,7 +145,7 @@ bridges{ max_retries = 3 method = \"get\" pool_size = 4 - request_timeout = \"5s\" + request_timeout = \"15s\" ssl {enable = false, verify = \"verify_peer\"} url = \"http://localhost:8080\" } @@ -108,6 +153,73 @@ bridges{ } """. +full_webhook_v5011_hocon() -> + "" + "\n" + "bridges{\n" + " webhook {\n" + " the_name{\n" + " body = \"${payload}\"\n" + " connect_timeout = \"5s\"\n" + " direction = \"egress\"\n" + " enable_pipelining = 100\n" + " headers {\"content-type\" = \"application/json\"}\n" + " max_retries = 3\n" + " method = \"get\"\n" + " pool_size = 4\n" + " pool_type = \"random\"\n" + " request_timeout = \"5s\"\n" + " ssl {\n" + " ciphers = \"\"\n" + " depth = 10\n" + " enable = false\n" + " reuse_sessions = true\n" + " secure_renegotiate = true\n" + " user_lookup_fun = \"emqx_tls_psk:lookup\"\n" + " verify = \"verify_peer\"\n" + " versions = [\"tlsv1.3\", \"tlsv1.2\", \"tlsv1.1\", \"tlsv1\"]\n" + " }\n" + " url = \"http://localhost:8080\"\n" + " }\n" + " }\n" + "}\n" + "". + +%% does not contain direction +full_webhook_v5019_hocon() -> + "" + "\n" + "bridges{\n" + " webhook {\n" + " the_name{\n" + " body = \"${payload}\"\n" + " connect_timeout = \"5s\"\n" + " enable_pipelining = 100\n" + " headers {\"content-type\" = \"application/json\"}\n" + " max_retries = 3\n" + " method = \"get\"\n" + " pool_size = 4\n" + " pool_type = \"random\"\n" + " request_timeout = \"1m\"\n" + " resource_opts = {\n" + " request_timeout = \"7s\"\n" + " }\n" + " ssl {\n" + " ciphers = \"\"\n" + " depth = 10\n" + " enable = false\n" + " reuse_sessions = true\n" + " secure_renegotiate = true\n" + " user_lookup_fun = \"emqx_tls_psk:lookup\"\n" + " verify = \"verify_peer\"\n" + " versions = [\"tlsv1.3\", \"tlsv1.2\", \"tlsv1.1\", \"tlsv1\"]\n" + " }\n" + " url = \"http://localhost:8080\"\n" + " }\n" + " }\n" + "}\n" + "". + %% erlfmt-ignore %% this is a generated from v5.0.11 mqtt_v5011_hocon() -> @@ -121,6 +233,7 @@ bridges { keepalive = \"60s\" mode = cluster_shareload proto_ver = \"v4\" + reconnect_interval = \"15s\" server = \"localhost:1883\" ssl {enable = false, verify = \"verify_peer\"} } @@ -138,6 +251,7 @@ bridges { keepalive = \"60s\" mode = \"cluster_shareload\" proto_ver = \"v4\" + reconnect_interval = \"15s\" server = \"localhost:1883\" ssl {enable = false, verify = \"verify_peer\"} } @@ -168,7 +282,6 @@ bridges { mode = \"cluster_shareload\" password = \"\" proto_ver = \"v5\" - reconnect_interval = \"15s\" replayq {offload = false, seg_bytes = \"100MB\"} retry_interval = \"12s\" server = \"localhost:1883\" @@ -201,7 +314,6 @@ bridges { mode = \"cluster_shareload\" password = \"\" proto_ver = \"v4\" - reconnect_interval = \"15s\" replayq {offload = false, seg_bytes = \"100MB\"} retry_interval = \"44s\" server = \"localhost:1883\" diff --git a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl index 90d19eccf..bd5cda3f0 100644 --- a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl @@ -19,7 +19,6 @@ -compile(export_all). -import(emqx_dashboard_api_test_helpers, [request/4, uri/1]). --import(emqx_common_test_helpers, [on_exit/1]). -include("emqx/include/emqx.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -32,7 +31,6 @@ -define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>). -define(TYPE_MQTT, <<"mqtt">>). --define(NAME_MQTT, <<"my_mqtt_bridge">>). -define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>). -define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>). @@ -53,7 +51,7 @@ -define(INGRESS_CONF, #{ <<"remote">> => #{ <<"topic">> => <>, - <<"qos">> => 2 + <<"qos">> => 1 }, <<"local">> => #{ <<"topic">> => <>, @@ -75,6 +73,47 @@ } }). +-define(INGRESS_CONF_NO_PAYLOAD_TEMPLATE, #{ + <<"remote">> => #{ + <<"topic">> => <>, + <<"qos">> => 1 + }, + <<"local">> => #{ + <<"topic">> => <>, + <<"qos">> => <<"${qos}">>, + <<"retain">> => <<"${retain}">> + } +}). + +-define(EGRESS_CONF_NO_PAYLOAD_TEMPLATE, #{ + <<"local">> => #{ + <<"topic">> => <> + }, + <<"remote">> => #{ + <<"topic">> => <>, + <<"qos">> => <<"${qos}">>, + <<"retain">> => <<"${retain}">> + } +}). + +-define(assertMetrics(Pat, BridgeID), + ?assertMetrics(Pat, true, BridgeID) +). +-define(assertMetrics(Pat, Guard, BridgeID), + ?assertMatch( + #{ + <<"metrics">> := Pat, + <<"node_metrics">> := [ + #{ + <<"node">> := _, + <<"metrics">> := Pat + } + ] + } when Guard, + request_bridge_metrics(BridgeID) + ) +). + inspect(Selected, _Envs, _Args) -> persistent_term:put(?MODULE, #{inspect => Selected}). @@ -122,10 +161,12 @@ set_special_configs(_) -> init_per_testcase(_, Config) -> {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), + ok = snabbkaffe:start_trace(), Config. + end_per_testcase(_, _Config) -> clear_resources(), - emqx_common_test_helpers:call_janitor(), + snabbkaffe:stop(), ok. clear_resources() -> @@ -151,7 +192,7 @@ t_mqtt_conn_bridge_ingress(_) -> {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?SERVER_CONF(User1)#{ + ServerConf = ?SERVER_CONF(User1)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF @@ -160,9 +201,22 @@ t_mqtt_conn_bridge_ingress(_) -> #{ <<"type">> := ?TYPE_MQTT, <<"name">> := ?BRIDGE_NAME_INGRESS - } = jsx:decode(Bridge), + } = emqx_utils_json:decode(Bridge), + BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), + %% try to create the bridge again + ?assertMatch( + {ok, 400, _}, + request(post, uri(["bridges"]), ServerConf) + ), + + %% try to reconfigure the bridge + ?assertMatch( + {ok, 200, _}, + request(put, uri(["bridges", BridgeIDIngress]), ServerConf) + ), + %% we now test if the bridge works as expected RemoteTopic = <>, LocalTopic = <>, @@ -173,34 +227,98 @@ t_mqtt_conn_bridge_ingress(_) -> %% the remote broker is also the local one. emqx:publish(emqx_message:make(RemoteTopic, Payload)), %% we should receive a message on the local broker, with specified topic - ?assert( - receive - {deliver, LocalTopic, #message{payload = Payload}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), + assert_mqtt_msg_received(LocalTopic, Payload), %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress]), []), + ?assertMetrics( + #{<<"matched">> := 0, <<"received">> := 1}, + BridgeIDIngress + ), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + + ok. + +t_mqtt_egress_bridge_ignores_clean_start(_) -> + BridgeName = atom_to_binary(?FUNCTION_NAME), + BridgeID = create_bridge( + ?SERVER_CONF(<<"user1">>)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => BridgeName, + <<"egress">> => ?EGRESS_CONF, + <<"clean_start">> => false + } + ), + + {ok, _, #{state := #{name := WorkerName}}} = + emqx_resource:get_instance(emqx_bridge_resource:resource_id(BridgeID)), ?assertMatch( - #{ - <<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1}, - <<"node_metrics">> := - [ - #{ - <<"node">> := _, - <<"metrics">> := - #{<<"matched">> := 0, <<"received">> := 1} - } - ] - }, - jsx:decode(BridgeStr) + #{clean_start := true}, + maps:from_list(emqx_connector_mqtt_worker:info(WorkerName)) + ), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), + + ok. + +t_mqtt_conn_bridge_ingress_downgrades_qos_2(_) -> + BridgeName = atom_to_binary(?FUNCTION_NAME), + BridgeID = create_bridge( + ?SERVER_CONF(<<"user1">>)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => BridgeName, + <<"ingress">> => emqx_utils_maps:deep_merge( + ?INGRESS_CONF, + #{<<"remote">> => #{<<"qos">> => 2}} + ) + } + ), + + RemoteTopic = <>, + LocalTopic = <>, + Payload = <<"whatqos">>, + emqx:subscribe(LocalTopic), + emqx:publish(emqx_message:make(undefined, _QoS = 2, RemoteTopic, Payload)), + + %% we should receive a message on the local broker, with specified topic + Msg = assert_mqtt_msg_received(LocalTopic, Payload), + ?assertMatch(#message{qos = 1}, Msg), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), + + ok. + +t_mqtt_conn_bridge_ingress_no_payload_template(_) -> + User1 = <<"user1">>, + BridgeIDIngress = create_bridge( + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_INGRESS, + <<"ingress">> => ?INGRESS_CONF_NO_PAYLOAD_TEMPLATE + } + ), + + %% we now test if the bridge works as expected + RemoteTopic = <>, + LocalTopic = <>, + Payload = <<"hello">>, + emqx:subscribe(LocalTopic), + timer:sleep(100), + %% PUBLISH a message to the 'remote' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(RemoteTopic, Payload)), + %% we should receive a message on the local broker, with specified topic + Msg = assert_mqtt_msg_received(LocalTopic), + ?assertMatch(#{<<"payload">> := Payload}, emqx_utils_json:decode(Msg#message.payload)), + + %% verify the metrics of the bridge + ?assertMetrics( + #{<<"matched">> := 0, <<"received">> := 1}, + BridgeIDIngress ), %% delete the bridge @@ -212,63 +330,41 @@ t_mqtt_conn_bridge_ingress(_) -> t_mqtt_conn_bridge_egress(_) -> %% then we add a mqtt connector, using POST User1 = <<"user1">>, - - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), + BridgeIDEgress = create_bridge( ?SERVER_CONF(User1)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } ), - #{ - <<"type">> := ?TYPE_MQTT, - <<"name">> := ?BRIDGE_NAME_EGRESS - } = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), + %% we now test if the bridge works as expected LocalTopic = <>, RemoteTopic = <>, Payload = <<"hello">>, emqx:subscribe(RemoteTopic), - timer:sleep(100), - %% PUBLISH a message to the 'local' broker, as we have only one broker, - %% the remote broker is also the local one. - emqx:publish(emqx_message:make(LocalTopic, Payload)), - %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic, #message{payload = Payload, from = From}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]), - Size = byte_size(ResourceID), - ?assertMatch(<>, From), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end + ?wait_async_action( + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload)), + #{?snk_kind := buffer_worker_flush_ack} ), + %% we should receive a message on the "remote" broker, with specified topic + Msg = assert_mqtt_msg_received(RemoteTopic, Payload), + Size = byte_size(ResourceID), + ?assertMatch(<>, Msg#message.from), + %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), - ?assertMatch( - #{ - <<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, - <<"node_metrics">> := - [ - #{ - <<"node">> := _, - <<"metrics">> := - #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0} - } - ] - }, - jsx:decode(BridgeStr) + ?retry( + _Interval = 200, + _Attempts = 5, + ?assertMetrics( + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + BridgeIDEgress + ) ), %% delete the bridge @@ -276,11 +372,57 @@ t_mqtt_conn_bridge_egress(_) -> {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), ok. +t_mqtt_conn_bridge_egress_no_payload_template(_) -> + %% then we add a mqtt connector, using POST + User1 = <<"user1">>, + + BridgeIDEgress = create_bridge( + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF_NO_PAYLOAD_TEMPLATE + } + ), + ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), + + %% we now test if the bridge works as expected + LocalTopic = <>, + RemoteTopic = <>, + Payload = <<"hello">>, + emqx:subscribe(RemoteTopic), + + ?wait_async_action( + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload)), + #{?snk_kind := buffer_worker_flush_ack} + ), + + %% we should receive a message on the "remote" broker, with specified topic + Msg = assert_mqtt_msg_received(RemoteTopic), + %% the MapMsg is all fields outputed by Rule-Engine. it's a binary coded json here. + ?assertMatch(<>, Msg#message.from), + ?assertMatch(#{<<"payload">> := Payload}, emqx_utils_json:decode(Msg#message.payload)), + + %% verify the metrics of the bridge + ?retry( + _Interval = 200, + _Attempts = 5, + ?assertMetrics( + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + BridgeIDEgress + ) + ), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + + ok. + t_egress_custom_clientid_prefix(_Config) -> User1 = <<"user1">>, - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), + BridgeIDEgress = create_bridge( ?SERVER_CONF(User1)#{ <<"clientid_prefix">> => <<"my-custom-prefix">>, <<"type">> => ?TYPE_MQTT, @@ -288,11 +430,6 @@ t_egress_custom_clientid_prefix(_Config) -> <<"egress">> => ?EGRESS_CONF } ), - #{ - <<"type">> := ?TYPE_MQTT, - <<"name">> := ?BRIDGE_NAME_EGRESS - } = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), LocalTopic = <>, RemoteTopic = <>, @@ -301,60 +438,36 @@ t_egress_custom_clientid_prefix(_Config) -> timer:sleep(100), emqx:publish(emqx_message:make(LocalTopic, Payload)), - receive - {deliver, RemoteTopic, #message{from = From}} -> - Size = byte_size(ResourceID), - ?assertMatch(<<"my-custom-prefix:", _ResouceID:Size/binary, _/binary>>, From), - ok - after 1000 -> - ct:fail("should have published message") - end, + Msg = assert_mqtt_msg_received(RemoteTopic, Payload), + Size = byte_size(ResourceID), + ?assertMatch(<<"my-custom-prefix:", _ResouceID:Size/binary, _/binary>>, Msg#message.from), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - ok. t_mqtt_conn_bridge_ingress_and_egress(_) -> User1 = <<"user1">>, - %% create an MQTT bridge, using POST - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), + BridgeIDIngress = create_bridge( ?SERVER_CONF(User1)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF } ), - - #{ - <<"type">> := ?TYPE_MQTT, - <<"name">> := ?BRIDGE_NAME_INGRESS - } = jsx:decode(Bridge), - BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), - {ok, 201, Bridge2} = request( - post, - uri(["bridges"]), + BridgeIDEgress = create_bridge( ?SERVER_CONF(User1)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } ), - #{ - <<"type">> := ?TYPE_MQTT, - <<"name">> := ?BRIDGE_NAME_EGRESS - } = jsx:decode(Bridge2), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), %% we now test if the bridge works as expected LocalTopic = <>, RemoteTopic = <>, Payload = <<"hello">>, emqx:subscribe(RemoteTopic), - {ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []), #{ <<"metrics">> := #{ <<"matched">> := CntMatched1, <<"success">> := CntSuccess1, <<"failed">> := 0 @@ -371,29 +484,20 @@ t_mqtt_conn_bridge_ingress_and_egress(_) -> } } ] - } = jsx:decode(BridgeStr1), - timer:sleep(100), - %% PUBLISH a message to the 'local' broker, as we have only one broker, - %% the remote broker is also the local one. - emqx:publish(emqx_message:make(LocalTopic, Payload)), + } = request_bridge_metrics(BridgeIDEgress), + + ?wait_async_action( + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload)), + #{?snk_kind := buffer_worker_flush_ack} + ), %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic, #message{payload = Payload}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), + assert_mqtt_msg_received(RemoteTopic, Payload), %% verify the metrics of the bridge timer:sleep(1000), - {ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []), #{ <<"metrics">> := #{ <<"matched">> := CntMatched2, <<"success">> := CntSuccess2, <<"failed">> := 0 @@ -410,7 +514,7 @@ t_mqtt_conn_bridge_ingress_and_egress(_) -> } } ] - } = jsx:decode(BridgeStr2), + } = request_bridge_metrics(BridgeIDEgress), ?assertEqual(CntMatched2, CntMatched1 + 1), ?assertEqual(CntSuccess2, CntSuccess1 + 1), ?assertEqual(NodeCntMatched2, NodeCntMatched1 + 1), @@ -423,16 +527,13 @@ t_mqtt_conn_bridge_ingress_and_egress(_) -> ok. t_ingress_mqtt_bridge_with_rules(_) -> - {ok, 201, _} = request( - post, - uri(["bridges"]), + BridgeIDIngress = create_bridge( ?SERVER_CONF(<<"user1">>)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF } ), - BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), {ok, 201, Rule} = request( post, @@ -444,7 +545,7 @@ t_ingress_mqtt_bridge_with_rules(_) -> <<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">> } ), - #{<<"id">> := RuleId} = jsx:decode(Rule), + #{<<"id">> := RuleId} = emqx_utils_json:decode(Rule), %% we now test if the bridge works as expected @@ -457,22 +558,11 @@ t_ingress_mqtt_bridge_with_rules(_) -> %% the remote broker is also the local one. emqx:publish(emqx_message:make(RemoteTopic, Payload)), %% we should receive a message on the local broker, with specified topic - ?assert( - receive - {deliver, LocalTopic, #message{payload = Payload}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), + assert_mqtt_msg_received(LocalTopic, Payload), %% and also the rule should be matched, with matched + 1: {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), {ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []), - ?assertMatch(#{<<"id">> := RuleId}, jsx:decode(Rule1)), + ?assertMatch(#{<<"id">> := RuleId}, emqx_utils_json:decode(Rule1)), ?assertMatch( #{ <<"metrics">> := #{ @@ -491,7 +581,7 @@ t_ingress_mqtt_bridge_with_rules(_) -> <<"actions.failed.unknown">> := 0 } }, - jsx:decode(Metrics) + emqx_utils_json:decode(Metrics) ), %% we also check if the actions of the rule is triggered @@ -513,37 +603,22 @@ t_ingress_mqtt_bridge_with_rules(_) -> ), %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress]), []), - ?assertMatch( - #{ - <<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1}, - <<"node_metrics">> := - [ - #{ - <<"node">> := _, - <<"metrics">> := - #{<<"matched">> := 0, <<"received">> := 1} - } - ] - }, - jsx:decode(BridgeStr) + ?assertMetrics( + #{<<"matched">> := 0, <<"received">> := 1}, + BridgeIDIngress ), {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []). t_egress_mqtt_bridge_with_rules(_) -> - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), + BridgeIDEgress = create_bridge( ?SERVER_CONF(<<"user1">>)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } ), - #{<<"type">> := ?TYPE_MQTT, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), {ok, 201, Rule} = request( post, @@ -555,7 +630,7 @@ t_egress_mqtt_bridge_with_rules(_) -> <<"sql">> => <<"SELECT * from \"t/1\"">> } ), - #{<<"id">> := RuleId} = jsx:decode(Rule), + #{<<"id">> := RuleId} = emqx_utils_json:decode(Rule), %% we now test if the bridge works as expected LocalTopic = <>, @@ -567,18 +642,7 @@ t_egress_mqtt_bridge_with_rules(_) -> %% the remote broker is also the local one. emqx:publish(emqx_message:make(LocalTopic, Payload)), %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic, #message{payload = Payload}} -> - ct:pal("remote broker got message: ~p on topic ~p", [Payload, RemoteTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), + assert_mqtt_msg_received(RemoteTopic, Payload), emqx:unsubscribe(RemoteTopic), %% PUBLISH a message to the rule. @@ -589,7 +653,7 @@ t_egress_mqtt_bridge_with_rules(_) -> timer:sleep(100), emqx:publish(emqx_message:make(RuleTopic, Payload2)), {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), - ?assertMatch(#{<<"id">> := RuleId, <<"name">> := _}, jsx:decode(Rule1)), + ?assertMatch(#{<<"id">> := RuleId, <<"name">> := _}, emqx_utils_json:decode(Rule1)), {ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []), ?assertMatch( #{ @@ -609,39 +673,16 @@ t_egress_mqtt_bridge_with_rules(_) -> <<"actions.failed.unknown">> := 0 } }, - jsx:decode(Metrics) + emqx_utils_json:decode(Metrics) ), %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic2, #message{payload = Payload2}} -> - ct:pal("remote broker got message: ~p on topic ~p", [Payload2, RemoteTopic2]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), + assert_mqtt_msg_received(RemoteTopic2, Payload2), %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), - ?assertMatch( - #{ - <<"metrics">> := #{<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0}, - <<"node_metrics">> := - [ - #{ - <<"node">> := _, - <<"metrics">> := #{ - <<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0 - } - } - ] - }, - jsx:decode(BridgeStr) + ?assertMetrics( + #{<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0}, + BridgeIDEgress ), {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), @@ -650,81 +691,58 @@ t_egress_mqtt_bridge_with_rules(_) -> t_mqtt_conn_bridge_egress_reconnect(_) -> %% then we add a mqtt connector, using POST User1 = <<"user1">>, - - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), + BridgeIDEgress = create_bridge( ?SERVER_CONF(User1)#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF, - %% to make it reconnect quickly - <<"reconnect_interval">> => <<"1s">>, <<"resource_opts">> => #{ <<"worker_pool_size">> => 2, - <<"enable_queue">> => true, <<"query_mode">> => <<"sync">>, + %% using a long time so we can test recovery + <<"request_timeout">> => <<"15s">>, %% to make it check the healthy quickly - <<"health_check_interval">> => <<"0.5s">> + <<"health_check_interval">> => <<"0.5s">>, + %% to make it reconnect quickly + <<"auto_restart_interval">> => <<"1s">> } } ), - #{ - <<"type">> := ?TYPE_MQTT, - <<"name">> := ?BRIDGE_NAME_EGRESS - } = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), - on_exit(fun() -> - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - ok - end), + %% we now test if the bridge works as expected LocalTopic = <>, RemoteTopic = <>, Payload0 = <<"hello">>, emqx:subscribe(RemoteTopic), - timer:sleep(100), - %% PUBLISH a message to the 'local' broker, as we have only one broker, - %% the remote broker is also the local one. - emqx:publish(emqx_message:make(LocalTopic, Payload0)), + + ?wait_async_action( + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload0)), + #{?snk_kind := buffer_worker_flush_ack} + ), %% we should receive a message on the "remote" broker, with specified topic assert_mqtt_msg_received(RemoteTopic, Payload0), %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), - ?assertMatch( - #{ - <<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, - <<"node_metrics">> := - [ - #{ - <<"node">> := _, - <<"metrics">> := - #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0} - } - ] - }, - jsx:decode(BridgeStr) + ?assertMetrics( + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + BridgeIDEgress ), %% stop the listener 1883 to make the bridge disconnected ok = emqx_listeners:stop_listener('tcp:default'), ct:sleep(1500), - %% PUBLISH 2 messages to the 'local' broker, the message should - ok = snabbkaffe:start_trace(), + %% PUBLISH 2 messages to the 'local' broker, the messages should + %% be enqueued and the resource will block {ok, SRef} = snabbkaffe:subscribe( fun - ( - #{ - ?snk_kind := call_query_enter, - query := {query, _From, {send_message, #{}}, _Sent} - } - ) -> + (#{?snk_kind := buffer_worker_retry_inflight_failed}) -> + true; + (#{?snk_kind := buffer_worker_flush_nack}) -> true; (_) -> false @@ -734,66 +752,176 @@ t_mqtt_conn_bridge_egress_reconnect(_) -> ), Payload1 = <<"hello2">>, Payload2 = <<"hello3">>, - emqx:publish(emqx_message:make(LocalTopic, Payload1)), - emqx:publish(emqx_message:make(LocalTopic, Payload2)), + %% We need to do it in other processes because it'll block due to + %% the long timeout + spawn(fun() -> emqx:publish(emqx_message:make(LocalTopic, Payload1)) end), + spawn(fun() -> emqx:publish(emqx_message:make(LocalTopic, Payload2)) end), {ok, _} = snabbkaffe:receive_events(SRef), - ok = snabbkaffe:stop(), %% verify the metrics of the bridge, the message should be queued - {ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []), - Decoded1 = jsx:decode(BridgeStr1), ?assertMatch( - Status when (Status == <<"connected">> orelse Status == <<"connecting">>), - maps:get(<<"status">>, Decoded1) + #{<<"status">> := Status} when + Status == <<"connecting">> orelse Status == <<"disconnected">>, + request_bridge(BridgeIDEgress) ), %% matched >= 3 because of possible retries. - ?assertMatch( + ?assertMetrics( #{ <<"matched">> := Matched, <<"success">> := 1, <<"failed">> := 0, - <<"queuing">> := 2 - } when Matched >= 3, - maps:get(<<"metrics">>, Decoded1) + <<"queuing">> := Queuing, + <<"inflight">> := Inflight + }, + Matched >= 3 andalso Inflight + Queuing == 2, + BridgeIDEgress ), %% start the listener 1883 to make the bridge reconnected ok = emqx_listeners:start_listener('tcp:default'), timer:sleep(1500), %% verify the metrics of the bridge, the 2 queued messages should have been sent - {ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []), + ?assertMatch(#{<<"status">> := <<"connected">>}, request_bridge(BridgeIDEgress)), %% matched >= 3 because of possible retries. - ?assertMatch( + ?assertMetrics( #{ - <<"status">> := <<"connected">>, - <<"metrics">> := #{ - <<"matched">> := Matched, - <<"success">> := 3, - <<"failed">> := 0, - <<"queuing">> := 0, - <<"retried">> := _ - } - } when Matched >= 3, - jsx:decode(BridgeStr2) + <<"matched">> := Matched, + <<"success">> := 3, + <<"failed">> := 0, + <<"queuing">> := 0, + <<"retried">> := _ + }, + Matched >= 3, + BridgeIDEgress ), %% also verify the 2 messages have been sent to the remote broker assert_mqtt_msg_received(RemoteTopic, Payload1), assert_mqtt_msg_received(RemoteTopic, Payload2), ok. +t_mqtt_conn_bridge_egress_async_reconnect(_) -> + User1 = <<"user1">>, + BridgeIDEgress = create_bridge( + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF, + <<"resource_opts">> => #{ + <<"worker_pool_size">> => 2, + <<"query_mode">> => <<"async">>, + %% using a long time so we can test recovery + <<"request_timeout">> => <<"15s">>, + %% to make it check the healthy quickly + <<"health_check_interval">> => <<"0.5s">>, + %% to make it reconnect quickly + <<"auto_restart_interval">> => <<"1s">> + } + } + ), + + Self = self(), + LocalTopic = <>, + RemoteTopic = <>, + emqx:subscribe(RemoteTopic), + + Publisher = start_publisher(LocalTopic, 200, Self), + ct:sleep(1000), + + %% stop the listener 1883 to make the bridge disconnected + ok = emqx_listeners:stop_listener('tcp:default'), + ct:sleep(1500), + ?assertMatch( + #{<<"status">> := Status} when + Status == <<"connecting">> orelse Status == <<"disconnected">>, + request_bridge(BridgeIDEgress) + ), + + %% start the listener 1883 to make the bridge reconnected + ok = emqx_listeners:start_listener('tcp:default'), + timer:sleep(1500), + ?assertMatch( + #{<<"status">> := <<"connected">>}, + request_bridge(BridgeIDEgress) + ), + + N = stop_publisher(Publisher), + + %% all those messages should eventually be delivered + [ + assert_mqtt_msg_received(RemoteTopic, Payload) + || I <- lists:seq(1, N), + Payload <- [integer_to_binary(I)] + ], + + ok. + +start_publisher(Topic, Interval, CtrlPid) -> + spawn_link(fun() -> publisher(Topic, 1, Interval, CtrlPid) end). + +stop_publisher(Pid) -> + _ = Pid ! {self(), stop}, + receive + {Pid, N} -> N + after 1_000 -> ct:fail("publisher ~p did not stop", [Pid]) + end. + +publisher(Topic, N, Delay, CtrlPid) -> + _ = emqx:publish(emqx_message:make(Topic, integer_to_binary(N))), + receive + {CtrlPid, stop} -> + CtrlPid ! {self(), N} + after Delay -> + publisher(Topic, N + 1, Delay, CtrlPid) + end. + +%% + +assert_mqtt_msg_received(Topic) -> + assert_mqtt_msg_received(Topic, '_', 200). + assert_mqtt_msg_received(Topic, Payload) -> - ?assert( - receive - {deliver, Topic, #message{payload = Payload}} -> - ct:pal("Got mqtt message: ~p on topic ~p", [Payload, Topic]), - true; - Msg -> - ct:pal("Unexpected Msg: ~p", [Msg]), - false - after 100 -> - false - end - ). + assert_mqtt_msg_received(Topic, Payload, 200). + +assert_mqtt_msg_received(Topic, Payload, Timeout) -> + receive + {deliver, Topic, Msg = #message{}} when Payload == '_' -> + ct:pal("received mqtt ~p on topic ~p", [Msg, Topic]), + Msg; + {deliver, Topic, Msg = #message{payload = Payload}} -> + ct:pal("received mqtt ~p on topic ~p", [Msg, Topic]), + Msg + after Timeout -> + {messages, Messages} = process_info(self(), messages), + ct:fail("timeout waiting ~p ms for ~p on topic '~s', messages = ~0p", [ + Timeout, + Payload, + Topic, + Messages + ]) + end. + +create_bridge(Config = #{<<"type">> := Type, <<"name">> := Name}) -> + {ok, 201, Bridge} = request( + post, + uri(["bridges"]), + Config + ), + ?assertMatch( + #{ + <<"type">> := Type, + <<"name">> := Name + }, + emqx_utils_json:decode(Bridge) + ), + emqx_bridge_resource:bridge_id(Type, Name). + +request_bridge(BridgeID) -> + {ok, 200, Bridge} = request(get, uri(["bridges", BridgeID]), []), + emqx_utils_json:decode(Bridge). + +request_bridge_metrics(BridgeID) -> + {ok, 200, BridgeMetrics} = request(get, uri(["bridges", BridgeID, "metrics"]), []), + emqx_utils_json:decode(BridgeMetrics). request(Method, Url, Body) -> request(<<"connector_admin">>, Method, Url, Body). diff --git a/apps/emqx_bridge/test/emqx_bridge_resource_tests.erl b/apps/emqx_bridge/test/emqx_bridge_resource_tests.erl new file mode 100644 index 000000000..a8a83ff6a --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_resource_tests.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_resource_tests). + +-include_lib("eunit/include/eunit.hrl"). + +bridge_hookpoint_test_() -> + BridgeId = emqx_bridge_resource:bridge_id(type, name), + BridgeHookpoint = emqx_bridge_resource:bridge_hookpoint(BridgeId), + [ + ?_assertEqual(<<"$bridges/type:name">>, BridgeHookpoint), + ?_assertEqual( + {ok, BridgeId}, + emqx_bridge_resource:bridge_hookpoint_to_bridge_id(BridgeHookpoint) + ), + ?_assertEqual( + {error, bad_bridge_hookpoint}, + emqx_bridge_resource:bridge_hookpoint_to_bridge_id(BridgeId) + ) + ]. diff --git a/apps/emqx_bridge/test/emqx_bridge_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_testlib.erl new file mode 100644 index 000000000..47f29aa36 --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_testlib.erl @@ -0,0 +1,350 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_testlib). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% ct setup helpers + +init_per_suite(Config, Apps) -> + [{start_apps, Apps} | Config]. + +end_per_suite(Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))), + _ = application:stop(emqx_connector), + ok. + +init_per_group(TestGroup, BridgeType, Config) -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer([positive])), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic}, + {test_group, TestGroup}, + {bridge_type, BridgeType} + | Config + ]. + +end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config0, BridgeConfigCb) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + BridgeTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + TestGroup = ?config(test_group, Config0), + Config = [{bridge_topic, BridgeTopic} | Config0], + {Name, ConfigString, BridgeConfig} = BridgeConfigCb( + TestCase, TestGroup, Config + ), + ok = snabbkaffe:start_trace(), + [ + {bridge_name, Name}, + {bridge_config_string, ConfigString}, + {bridge_config, BridgeConfig} + | Config + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%% test helpers +parse_and_check(Config, ConfigString, Name) -> + BridgeType = ?config(bridge_type, Config), + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := BridgeConfig}}} = RawConf, + BridgeConfig. + +resource_id(Config) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + emqx_bridge_resource:resource_id(BridgeType, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig0 = ?config(bridge_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, BridgeConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig0 = ?config(bridge_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig0 = ?config(bridge_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name), + Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, _Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig = ?config(bridge_config, Config), + Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +create_rule_and_action_http(BridgeType, RuleTopic, Config) -> + BridgeName = ?config(bridge_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", RuleTopic/binary, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query(Config, MakeMessageFun, IsSuccessCheck) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, MakeMessageFun()}, + IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)), + ok + end, + [] + ), + ok. + +t_async_query(Config, MakeMessageFun, IsSuccessCheck) -> + ResourceId = resource_id(Config), + ReplyFun = + fun(Pid, Result) -> + Pid ! {result, Result} + end, + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, MakeMessageFun()}, + emqx_resource:query(ResourceId, Message, #{async_reply_fun => {ReplyFun, [self()]}}), + ok + end, + [] + ), + receive + {result, Result} -> IsSuccessCheck(Result) + after 5_000 -> + throw(timeout) + end, + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config, StopTracePoint) -> + BridgeType = ?config(bridge_type, Config), + BridgeName = ?config(bridge_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, BridgeType, BridgeName), + #{?snk_kind := StopTracePoint}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(StopTracePoint, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. diff --git a/apps/emqx_bridge/test/emqx_bridge_webhook_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_webhook_SUITE.erl new file mode 100644 index 000000000..f08c87b6e --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_webhook_SUITE.erl @@ -0,0 +1,268 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_webhook_SUITE). + +%% This suite should contains testcases that are specific for the webhook +%% bridge. There are also some test cases that implicitly tests the webhook +%% bridge in emqx_bridge_api_SUITE + +-compile(nowarn_export_all). +-compile(export_all). + +-import(emqx_mgmt_api_test_util, [request/3, uri/1]). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +groups() -> + []. + +init_per_suite(_Config) -> + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + []. + +end_per_suite(_Config) -> + ok = emqx_config:put([bridges], #{}), + ok = emqx_config:put_raw([bridges], #{}), + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_bridge]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector), + _ = application:stop(emqx_bridge), + ok. + +suite() -> + [{timetrap, {seconds, 60}}]. + +%%------------------------------------------------------------------------------ +%% HTTP server for testing +%% (Orginally copied from emqx_bridge_api_SUITE) +%%------------------------------------------------------------------------------ +start_http_server(HTTPServerConfig) -> + ct:pal("Start server\n"), + process_flag(trap_exit, true), + Parent = self(), + {ok, {Port, Sock}} = listen_on_random_port(), + Acceptor = spawn(fun() -> + accept_loop(Sock, Parent, HTTPServerConfig) + end), + timer:sleep(100), + #{port => Port, sock => Sock, acceptor => Acceptor}. + +stop_http_server(#{sock := Sock, acceptor := Acceptor}) -> + ct:pal("Stop server\n"), + exit(Acceptor, kill), + gen_tcp:close(Sock). + +listen_on_random_port() -> + SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}], + case gen_tcp:listen(0, SockOpts) of + {ok, Sock} -> + {ok, Port} = inet:port(Sock), + {ok, {Port, Sock}}; + {error, Reason} when Reason =/= eaddrinuse -> + {error, Reason} + end. + +accept_loop(Sock, Parent, HTTPServerConfig) -> + process_flag(trap_exit, true), + case gen_tcp:accept(Sock) of + {ok, Conn} -> + spawn(fun() -> handle_fun_200_ok(Conn, Parent, HTTPServerConfig, <<>>) end), + %%gen_tcp:controlling_process(Conn, Handler), + accept_loop(Sock, Parent, HTTPServerConfig); + {error, closed} -> + %% socket owner died + ok + end. + +make_response(CodeStr, Str) -> + B = iolist_to_binary(Str), + iolist_to_binary( + io_lib:fwrite( + "HTTP/1.0 ~s\r\nContent-Type: text/html\r\nContent-Length: ~p\r\n\r\n~s", + [CodeStr, size(B), B] + ) + ). + +handle_fun_200_ok(Conn, Parent, HTTPServerConfig, Acc) -> + ResponseDelayMS = maps:get(response_delay_ms, HTTPServerConfig, 0), + ct:pal("Waiting for request~n"), + case gen_tcp:recv(Conn, 0) of + {ok, ReqStr} -> + ct:pal("The http handler got request: ~p", [ReqStr]), + case parse_http_request(<>) of + {ok, incomplete, NewAcc} -> + handle_fun_200_ok(Conn, Parent, HTTPServerConfig, NewAcc); + {ok, Req, NewAcc} -> + timer:sleep(ResponseDelayMS), + Parent ! {http_server, received, Req}, + gen_tcp:send(Conn, make_response("200 OK", "Request OK")), + handle_fun_200_ok(Conn, Parent, HTTPServerConfig, NewAcc) + end; + {error, closed} -> + ct:pal("http connection closed"); + {error, Reason} -> + ct:pal("the http handler recv error: ~p", [Reason]), + timer:sleep(100), + gen_tcp:close(Conn) + end. + +parse_http_request(ReqStr) -> + try + parse_http_request_assertive(ReqStr) + catch + _:_ -> + {ok, incomplete, ReqStr} + end. + +parse_http_request_assertive(ReqStr0) -> + %% find body length + [_, LengthStr0] = string:split(ReqStr0, "content-length:"), + [LengthStr, _] = string:split(LengthStr0, "\r\n"), + Length = binary_to_integer(string:trim(LengthStr, both)), + %% split between multiple requests + [Method, ReqStr1] = string:split(ReqStr0, " ", leading), + [Path, ReqStr2] = string:split(ReqStr1, " ", leading), + [_ProtoVsn, ReqStr3] = string:split(ReqStr2, "\r\n", leading), + [_HeaderStr, Rest] = string:split(ReqStr3, "\r\n\r\n", leading), + <> = Rest, + {ok, #{method => Method, path => Path, body => Body}, Remain}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% Helper functions +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +bridge_async_config(#{port := Port} = Config) -> + Type = maps:get(type, Config, <<"webhook">>), + Name = maps:get(name, Config, atom_to_binary(?MODULE)), + PoolSize = maps:get(pool_size, Config, 1), + QueryMode = maps:get(query_mode, Config, "async"), + ConnectTimeout = maps:get(connect_timeout, Config, 1), + RequestTimeout = maps:get(request_timeout, Config, 10000), + ResourceRequestTimeout = maps:get(resouce_request_timeout, Config, "infinity"), + ConfigString = io_lib:format( + "bridges.~s.~s {\n" + " url = \"http://localhost:~p\"\n" + " connect_timeout = \"~ps\"\n" + " enable = true\n" + " enable_pipelining = 100\n" + " max_retries = 2\n" + " method = \"post\"\n" + " pool_size = ~p\n" + " pool_type = \"random\"\n" + " request_timeout = \"~ps\"\n" + " body = \"${id}\"" + " resource_opts {\n" + " inflight_window = 100\n" + " auto_restart_interval = \"60s\"\n" + " health_check_interval = \"15s\"\n" + " max_buffer_bytes = \"1GB\"\n" + " query_mode = \"~s\"\n" + " request_timeout = \"~s\"\n" + " start_after_created = \"true\"\n" + " start_timeout = \"5s\"\n" + " worker_pool_size = \"1\"\n" + " }\n" + " ssl {\n" + " enable = false\n" + " }\n" + "}\n", + [ + Type, + Name, + Port, + ConnectTimeout, + PoolSize, + RequestTimeout, + QueryMode, + ResourceRequestTimeout + ] + ), + ct:pal(ConfigString), + parse_and_check(ConfigString, Type, Name). + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf, + RetConfig. + +make_bridge(Config) -> + Type = <<"webhook">>, + Name = atom_to_binary(?MODULE), + BridgeConfig = bridge_async_config(Config#{ + name => Name, + type => Type + }), + {ok, _} = emqx_bridge:create( + Type, + Name, + BridgeConfig + ), + emqx_bridge_resource:bridge_id(Type, Name). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +%% This test ensures that https://emqx.atlassian.net/browse/CI-62 is fixed. +%% When the connection time out all the queued requests where dropped in +t_send_async_connection_timeout(_Config) -> + ResponseDelayMS = 90, + #{port := Port} = Server = start_http_server(#{response_delay_ms => 900}), + % Port = 9000, + BridgeID = make_bridge(#{ + port => Port, + pool_size => 1, + query_mode => "async", + connect_timeout => ResponseDelayMS * 2, + request_timeout => 10000, + resouce_request_timeout => "infinity" + }), + NumberOfMessagesToSend = 10, + [ + emqx_bridge:send_message(BridgeID, #{<<"id">> => Id}) + || Id <- lists:seq(1, NumberOfMessagesToSend) + ], + %% Make sure server recive all messages + ct:pal("Sent messages\n"), + MessageIDs = maps:from_keys(lists:seq(1, NumberOfMessagesToSend), void), + receive_request_notifications(MessageIDs, ResponseDelayMS), + stop_http_server(Server), + ok. + +receive_request_notifications(MessageIDs, _ResponseDelay) when map_size(MessageIDs) =:= 0 -> + ok; +receive_request_notifications(MessageIDs, ResponseDelay) -> + receive + {http_server, received, Req} -> + RemainingMessageIDs = remove_message_id(MessageIDs, Req), + receive_request_notifications(RemainingMessageIDs, ResponseDelay) + after (30 * 1000) -> + ct:pal("Waited to long time but did not get any message\n"), + ct:fail("All requests did not reach server at least once") + end. + +remove_message_id(MessageIDs, #{body := IDBin}) -> + ID = erlang:binary_to_integer(IDBin), + %% It is acceptable to get the same message more than once + maps:without([ID], MessageIDs). diff --git a/apps/emqx_bridge_cassandra/BSL.txt b/apps/emqx_bridge_cassandra/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_cassandra/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md new file mode 100644 index 000000000..c5a2609a5 --- /dev/null +++ b/apps/emqx_bridge_cassandra/README.md @@ -0,0 +1,41 @@ +# EMQX Cassandra Bridge + +[Apache Cassandra](https://github.com/apache/cassandra) is an open-source, distributed +NoSQL database management system that is designed to manage large amounts of structured +and semi-structured data across many commodity servers, providing high availability +with no single point of failure. +It is commonly used in web and mobile applications, IoT, and other systems that +require storing, querying, and analyzing large amounts of data. + +The application is used to connect EMQX and Cassandra. User can create a rule +and easily ingest IoT data into Cassandra by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_cassandra/docker-ct b/apps/emqx_bridge_cassandra/docker-ct new file mode 100644 index 000000000..2626b4068 --- /dev/null +++ b/apps/emqx_bridge_cassandra/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +cassandra diff --git a/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl b/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl similarity index 53% rename from lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl rename to apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl index 4b6fbbd92..eef7c5d2b 100644 --- a/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl +++ b/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%%------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- --define(INFLUXDB_DEFAULT_PORT, 8086). +-define(CASSANDRA_DEFAULT_PORT, 9042). diff --git a/apps/emqx_bridge_cassandra/rebar.config b/apps/emqx_bridge_cassandra/rebar.config new file mode 100644 index 000000000..b8bfc7dd6 --- /dev/null +++ b/apps/emqx_bridge_cassandra/rebar.config @@ -0,0 +1,11 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_cassandra]} +]}. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src new file mode 100644 index 000000000..1bde274f3 --- /dev/null +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_cassandra, [ + {description, "EMQX Enterprise Cassandra Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, ecql]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl new file mode 100644 index 000000000..e8f7d50ce --- /dev/null +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl @@ -0,0 +1,117 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_cassandra). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%% schema examples +-export([ + conn_bridge_examples/1, + values/2, + fields/2 +]). + +%% schema +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_CQL, << + "insert into mqtt_msg(topic, msgid, sender, qos, payload, arrived, retain) " + "values (${topic}, ${id}, ${clientid}, ${qos}, ${payload}, ${timestamp}, ${flags.retain})" +>>). + +%%-------------------------------------------------------------------- +%% schema examples + +conn_bridge_examples(Method) -> + [ + #{ + <<"cassandra">> => #{ + summary => <<"Cassandra Bridge">>, + value => values(Method, cassandra) + } + } + ]. + +%% no difference in get/post/put method +values(_Method, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + servers => <<"127.0.0.1:9042">>, + keyspace => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">>, + cql => ?DEFAULT_CQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => sync, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%%-------------------------------------------------------------------- +%% schema + +namespace() -> "bridge_cassa". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {cql, + mk( + binary(), + #{desc => ?DESC("cql_template"), default => ?DEFAULT_CQL, format => <<"sql">>} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_bridge_cassandra_connector:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + fields("post", cassandra); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Cassandra using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% utils + +type_field(Type) -> + {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl new file mode 100644 index 000000000..a3032a9df --- /dev/null +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -0,0 +1,513 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_cassandra_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include("emqx_bridge_cassandra.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% schema +-export([roots/0, fields/1]). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_query_async/4, + on_batch_query/3, + on_batch_query_async/4, + on_get_status/2 +]). + +%% callbacks of ecpool +-export([ + connect/1, + prepare_cql_to_conn/2 +]). + +%% callbacks for query executing +-export([query/4, prepared_query/4, batch_query/3]). + +-export([do_get_status/1]). + +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. + +-type state() :: + #{ + pool_name := binary(), + prepare_cql := prepares(), + params_tokens := params_tokens(), + %% returned by ecql:prepare/2 + prepare_statement := binary() + }. + +-define(DEFAULT_SERVER_OPTION, #{default_port => ?CASSANDRA_DEFAULT_PORT}). + +%%-------------------------------------------------------------------- +%% schema + +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + cassandra_db_fields() ++ + emqx_connector_schema_lib:ssl_fields() ++ + emqx_connector_schema_lib:prepare_statement_fields(). + +cassandra_db_fields() -> + [ + {servers, servers()}, + {keyspace, fun keyspace/1}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {username, fun emqx_connector_schema_lib:username/1}, + {password, fun emqx_connector_schema_lib:password/1}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +servers() -> + Meta = #{desc => ?DESC("servers")}, + emqx_schema:servers_sc(Meta, ?DEFAULT_SERVER_OPTION). + +keyspace(type) -> binary(); +keyspace(desc) -> ?DESC("keyspace"); +keyspace(required) -> true; +keyspace(_) -> undefined. + +%%-------------------------------------------------------------------- +%% callbacks for emqx_resource + +callback_mode() -> async_if_possible. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. +on_start( + InstId, + #{ + servers := Servers0, + keyspace := Keyspace, + username := Username, + pool_size := PoolSize, + ssl := SSL + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_cassandra_connector", + connector => InstId, + config => emqx_utils:redact(Config) + }), + Servers = + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION) + ), + + Options = [ + {nodes, Servers}, + {username, Username}, + {password, emqx_secret:wrap(maps:get(password, Config, ""))}, + {keyspace, Keyspace}, + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, + {pool_size, PoolSize} + ], + + SslOpts = + case maps:get(enable, SSL) of + true -> + [ + %% note: type defined at ecql:option/0 + {ssl, emqx_tls_lib:to_client_opts(SSL)} + ]; + false -> + [] + end, + State = parse_prepare_cql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of + ok -> + {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; + {error, Reason} -> + ?tp( + cassandra_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(InstId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_cassandra_connector", + connector => InstId + }), + emqx_resource_pool:stop(PoolName). + +-type request() :: + % emqx_bridge.erl + {send_message, Params :: map()} + % common query + | {query, SQL :: binary()} + | {query, SQL :: binary(), Params :: map()}. + +-spec on_query( + emqx_resource:resource_id(), + request(), + state() +) -> ok | {ok, ecql:cql_result()} | {error, {recoverable_error | unrecoverable_error, term()}}. +on_query( + InstId, + Request, + State +) -> + do_single_query(InstId, Request, sync, State). + +-spec on_query_async( + emqx_resource:resource_id(), + request(), + {function(), list()}, + state() +) -> ok | {error, {recoverable_error | unrecoverable_error, term()}}. +on_query_async( + InstId, + Request, + Callback, + State +) -> + do_single_query(InstId, Request, {async, Callback}, State). + +do_single_query( + InstId, + Request, + Async, + #{pool_name := PoolName} = State +) -> + {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), + ?tp( + debug, + cassandra_connector_received_cql_query, + #{ + connector => InstId, + type => Type, + params => Params, + prepared_key_or_cql => PreparedKeyOrSQL, + state => State + } + ), + {PreparedKeyOrSQL1, Data} = proc_cql_params(Type, PreparedKeyOrSQL, Params, State), + Res = exec_cql_query(InstId, PoolName, Type, Async, PreparedKeyOrSQL1, Data), + handle_result(Res). + +-spec on_batch_query( + emqx_resource:resource_id(), + [request()], + state() +) -> ok | {error, {recoverable_error | unrecoverable_error, term()}}. +on_batch_query( + InstId, + Requests, + State +) -> + do_batch_query(InstId, Requests, sync, State). + +-spec on_batch_query_async( + emqx_resource:resource_id(), + [request()], + {function(), list()}, + state() +) -> ok | {error, {recoverable_error | unrecoverable_error, term()}}. +on_batch_query_async( + InstId, + Requests, + Callback, + State +) -> + do_batch_query(InstId, Requests, {async, Callback}, State). + +do_batch_query( + InstId, + Requests, + Async, + #{pool_name := PoolName} = State +) -> + CQLs = + lists:map( + fun(Request) -> + {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), + proc_cql_params(Type, PreparedKeyOrSQL, Params, State) + end, + Requests + ), + ?tp( + debug, + cassandra_connector_received_cql_batch_query, + #{ + connector => InstId, + cqls => CQLs, + state => State + } + ), + Res = exec_cql_batch_query(InstId, PoolName, Async, CQLs), + handle_result(Res). + +parse_request_to_cql({send_message, Params}) -> + {prepared_query, _Key = send_message, Params}; +parse_request_to_cql({query, SQL}) -> + parse_request_to_cql({query, SQL, #{}}); +parse_request_to_cql({query, SQL, Params}) -> + {query, SQL, Params}. + +proc_cql_params( + prepared_query, + PreparedKey0, + Params, + #{prepare_statement := Prepares, params_tokens := ParamsTokens} +) -> + %% assert + _PreparedKey = maps:get(PreparedKey0, Prepares), + Tokens = maps:get(PreparedKey0, ParamsTokens), + {PreparedKey0, assign_type_for_params(emqx_plugin_libs_rule:proc_sql(Tokens, Params))}; +proc_cql_params(query, SQL, Params, _State) -> + {SQL1, Tokens} = emqx_plugin_libs_rule:preproc_sql(SQL, '?'), + {SQL1, assign_type_for_params(emqx_plugin_libs_rule:proc_sql(Tokens, Params))}. + +exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when + Type == query; Type == prepared_query +-> + case exec(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}) of + {error, Reason} = Result -> + ?tp( + error, + cassandra_connector_query_return, + #{connector => InstId, error => Reason} + ), + Result; + Result -> + ?tp(debug, cassandra_connector_query_return, #{result => Result}), + Result + end. + +exec_cql_batch_query(InstId, PoolName, Async, CQLs) -> + case exec(PoolName, {?MODULE, batch_query, [Async, CQLs]}) of + {error, Reason} = Result -> + ?tp( + error, + cassandra_connector_query_return, + #{connector => InstId, error => Reason} + ), + Result; + Result -> + ?tp(debug, cassandra_connector_query_return, #{result => Result}), + Result + end. + +%% Pick one of the pool members to do the query. +%% Using 'no_handoever' strategy, +%% meaning the buffer worker does the gen_server call or gen_server cast +%% towards the connection process. +exec(PoolName, Query) -> + ecpool:pick_and_do(PoolName, Query, no_handover). + +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of + true -> + case do_check_prepares(State) of + ok -> + connected; + {ok, NState} -> + %% return new state with prepared statements + {connected, NState}; + false -> + %% do not log error, it is logged in prepare_cql_to_conn + connecting + end; + false -> + connecting + end. + +do_get_status(Conn) -> + ok == element(1, ecql:query(Conn, "SELECT count(1) AS T FROM system.local")). + +do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) -> + ok; +do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) -> + %% retry to prepare + case prepare_cql(Prepares, PoolName) of + {ok, Sts} -> + %% remove the error + {ok, State#{prepare_cql => Prepares, prepare_statement := Sts}}; + _Error -> + false + end. + +%%-------------------------------------------------------------------- +%% callbacks query + +query(Conn, sync, CQL, Params) -> + ecql:query(Conn, CQL, Params); +query(Conn, {async, Callback}, CQL, Params) -> + ok = ecql:async_query(Conn, CQL, Params, one, Callback), + %% return the connection pid for buffer worker to monitor + {ok, Conn}. + +prepared_query(Conn, sync, PreparedKey, Params) -> + ecql:execute(Conn, PreparedKey, Params); +prepared_query(Conn, {async, Callback}, PreparedKey, Params) -> + ok = ecql:async_execute(Conn, PreparedKey, Params, Callback), + %% return the connection pid for buffer worker to monitor + {ok, Conn}. + +batch_query(Conn, sync, Rows) -> + ecql:batch(Conn, Rows); +batch_query(Conn, {async, Callback}, Rows) -> + ok = ecql:async_batch(Conn, Rows, Callback), + %% return the connection pid for buffer worker to monitor + {ok, Conn}. + +%%-------------------------------------------------------------------- +%% callbacks for ecpool + +connect(Opts) -> + case ecql:connect(conn_opts(Opts)) of + {ok, _Conn} = Ok -> + Ok; + {error, Reason} -> + {error, Reason} + end. + +conn_opts(Opts) -> + conn_opts(Opts, []). + +conn_opts([], Acc) -> + Acc; +conn_opts([{password, Password} | Opts], Acc) -> + conn_opts(Opts, [{password, emqx_secret:unwrap(Password)} | Acc]); +conn_opts([Opt | Opts], Acc) -> + conn_opts(Opts, [Opt | Acc]). + +%%-------------------------------------------------------------------- +%% prepare + +%% XXX: hardcode +%% note: the `cql` param is passed by emqx_ee_bridge_cassa +parse_prepare_cql(#{cql := SQL}) -> + parse_prepare_cql([{send_message, SQL}], #{}, #{}); +parse_prepare_cql(_) -> + #{prepare_cql => #{}, params_tokens => #{}}. + +parse_prepare_cql([{Key, H} | T], Prepares, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H, '?'), + parse_prepare_cql( + T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} + ); +parse_prepare_cql([], Prepares, Tokens) -> + #{ + prepare_cql => Prepares, + params_tokens => Tokens + }. + +init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) -> + case maps:size(Prepares) of + 0 -> + State; + _ -> + case prepare_cql(Prepares, PoolName) of + {ok, Sts} -> + State#{prepare_statement := Sts}; + Error -> + ?tp( + error, + cassandra_prepare_cql_failed, + #{prepares => Prepares, reason => Error} + ), + %% mark the prepare_cql as failed + State#{prepare_cql => {error, Prepares}} + end + end. + +prepare_cql(Prepares, PoolName) when is_map(Prepares) -> + prepare_cql(maps:to_list(Prepares), PoolName); +prepare_cql(Prepares, PoolName) -> + case do_prepare_cql(Prepares, PoolName) of + {ok, _Sts} = Ok -> + %% prepare for reconnect + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_cql_to_conn, [Prepares]}), + Ok; + Error -> + Error + end. + +do_prepare_cql(Prepares, PoolName) -> + do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}). + +do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) -> + {ok, Conn} = ecpool_worker:client(Worker), + case prepare_cql_to_conn(Conn, Prepares) of + {ok, Sts} -> + do_prepare_cql(T, Prepares, Sts); + Error -> + Error + end; +do_prepare_cql([], _Prepares, LastSts) -> + {ok, LastSts}. + +prepare_cql_to_conn(Conn, Prepares) -> + prepare_cql_to_conn(Conn, Prepares, #{}). + +prepare_cql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; +prepare_cql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> + ?SLOG(info, #{msg => "cassandra_prepare_cql", name => Key, prepare_cql => SQL}), + case ecql:prepare(Conn, Key, SQL) of + {ok, Statement} -> + prepare_cql_to_conn(Conn, PrepareList, Statements#{Key => Statement}); + {error, Error} = Other -> + ?SLOG(error, #{ + msg => "cassandra_prepare_cql_failed", + worker_pid => Conn, + name => Key, + prepare_cql => SQL, + error => Error + }), + Other + end. + +handle_result({error, disconnected}) -> + {error, {recoverable_error, disconnected}}; +handle_result({error, Error}) -> + {error, {unrecoverable_error, Error}}; +handle_result(Res) -> + Res. + +%%-------------------------------------------------------------------- +%% utils + +%% see ecql driver requirements +assign_type_for_params(Params) -> + assign_type_for_params(Params, []). + +assign_type_for_params([], Acc) -> + lists:reverse(Acc); +assign_type_for_params([Param | More], Acc) -> + assign_type_for_params(More, [maybe_assign_type(Param) | Acc]). + +maybe_assign_type(true) -> + {int, 1}; +maybe_assign_type(false) -> + {int, 0}; +maybe_assign_type(V) when is_binary(V); is_list(V); is_atom(V) -> V; +maybe_assign_type(V) when is_integer(V) -> + %% The max value of signed int(4) is 2147483647 + case V > 2147483647 orelse V < -2147483647 of + true -> {bigint, V}; + false -> {int, V} + end; +maybe_assign_type(V) when is_float(V) -> {double, V}; +maybe_assign_type(V) -> + V. diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl new file mode 100644 index 000000000..79220321e --- /dev/null +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl @@ -0,0 +1,659 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_cassandra_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% SQL definitions +-define(SQL_BRIDGE, + "insert into mqtt_msg_test(topic, payload, arrived) " + "values (${topic}, ${payload}, ${timestamp})" +). +-define(SQL_CREATE_TABLE, + "" + "\n" + "CREATE TABLE mqtt.mqtt_msg_test (\n" + " topic text,\n" + " payload text,\n" + " arrived timestamp,\n" + " PRIMARY KEY (topic)\n" + ");\n" + "" +). +-define(SQL_DROP_TABLE, "DROP TABLE mqtt.mqtt_msg_test"). +-define(SQL_DELETE, "TRUNCATE mqtt.mqtt_msg_test"). +-define(SQL_SELECT, "SELECT payload FROM mqtt.mqtt_msg_test"). + +% DB defaults +-define(CASSA_KEYSPACE, "mqtt"). +-define(CASSA_USERNAME, "cassandra"). +-define(CASSA_PASSWORD, "cassandra"). +-define(BATCH_SIZE, 10). + +%% cert files for client +-define(CERT_ROOT, + filename:join([emqx_common_test_helpers:proj_root(), ".ci", "docker-compose-file", "certs"]) +). + +-define(CAFILE, filename:join(?CERT_ROOT, ["ca.crt"])). +-define(CERTFILE, filename:join(?CERT_ROOT, ["client.pem"])). +-define(KEYFILE, filename:join(?CERT_ROOT, ["client.key"])). + +%% How to run it locally: +%% 1. Start all deps services +%% sudo docker compose -f .ci/docker-compose-file/docker-compose.yaml \ +%% -f .ci/docker-compose-file/docker-compose-cassandra.yaml \ +%% -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ +%% up --build +%% +%% 2. Run use cases with special environment variables +%% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \ +%% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \ +%% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \ +%% --suite apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl +%% + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, tcp}, + {group, tls} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + NonBatchCases = [t_write_timeout, t_simple_sql_query], + QueryModeGroups = [{group, async}, {group, sync}], + BatchingGroups = [ + {group, with_batch}, + {group, without_batch} + ], + [ + {tcp, QueryModeGroups}, + {tls, QueryModeGroups}, + {async, BatchingGroups}, + {sync, BatchingGroups}, + {with_batch, TCs -- NonBatchCases}, + {without_batch, TCs} + ]. + +init_per_group(tcp, Config) -> + Host = os:getenv("CASSA_TCP_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("CASSA_TCP_PORT", "9042")), + [ + {cassa_host, Host}, + {cassa_port, Port}, + {enable_tls, false}, + {proxy_name, "cassa_tcp"} + | Config + ]; +init_per_group(tls, Config) -> + Host = os:getenv("CASSA_TLS_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("CASSA_TLS_PORT", "9142")), + [ + {cassa_host, Host}, + {cassa_port, Port}, + {enable_tls, true}, + {proxy_name, "cassa_tls"} + | Config + ]; +init_per_group(async, Config) -> + [{query_mode, async} | Config]; +init_per_group(sync, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config0) -> + Config = [{enable_batch, true} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{enable_batch, false} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group == without_batch; Group == without_batch +-> + connect_and_drop_table(Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + connect_and_clear_table(Config), + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + connect_and_clear_table(Config), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(Config0) -> + ct:pal("commit_init: ~p~n", [Config0]), + BridgeType = proplists:get_value(bridge_type, Config0, <<"cassandra">>), + Host = ?config(cassa_host, Config0), + Port = ?config(cassa_port, Config0), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % Connect to cassnadra directly and create the table + catch connect_and_drop_table(Config0), + connect_and_create_table(Config0), + {Name, CassaConf} = cassa_config(BridgeType, Config0), + Config = + [ + {cassa_config, CassaConf}, + {cassa_bridge_type, BridgeType}, + {cassa_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_cassandra); + _ -> + {skip, no_cassandra} + end + end. + +cassa_config(BridgeType, Config) -> + Port = integer_to_list(?config(cassa_port, Config)), + Server = ?config(cassa_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = + case ?config(enable_batch, Config) of + true -> ?BATCH_SIZE; + false -> 1 + end, + QueryMode = ?config(query_mode, Config), + TlsEnabled = ?config(enable_tls, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " servers = ~p\n" + " keyspace = ~p\n" + " username = ~p\n" + " password = ~p\n" + " cql = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " }\n" + " ssl = {\n" + " enable = ~w\n" + " cacertfile = \"~s\"\n" + " certfile = \"~s\"\n" + " keyfile = \"~s\"\n" + " server_name_indication = disable\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + ?CASSA_KEYSPACE, + ?CASSA_USERNAME, + ?CASSA_PASSWORD, + ?SQL_BRIDGE, + BatchSize, + QueryMode, + TlsEnabled, + ?CAFILE, + ?CERTFILE, + ?KEYFILE + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(cassa_bridge_type, Config), + Name = ?config(cassa_name, Config), + BridgeConfig0 = ?config(cassa_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, BridgeConfig). + +delete_bridge(Config) -> + BridgeType = ?config(cassa_bridge_type, Config), + Name = ?config(cassa_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +bridges_probe_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, _} -> ok; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(cassa_name, Config), + BridgeType = ?config(cassa_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?config(cassa_name, Config), + BridgeType = ?config(cassa_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + +query_resource_async(Config, Request) -> + Name = ?config(cassa_name, Config), + BridgeType = ?config(cassa_bridge_type, Config), + Ref = alias([reply]), + AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + Return = emqx_resource:query(ResourceID, Request, #{ + timeout => 500, async_reply_fun => {AsyncReplyFun, []} + }), + {Return, Ref}. + +receive_result(Ref, Timeout) when is_reference(Ref) -> + receive + {result, Ref, Result} -> + {ok, Result}; + {Ref, Result} -> + {ok, Result} + after Timeout -> + timeout + end. + +connect_direct_cassa(Config) -> + Opts = #{ + nodes => [{?config(cassa_host, Config), ?config(cassa_port, Config)}], + username => ?CASSA_USERNAME, + password => ?CASSA_PASSWORD, + keyspace => ?CASSA_KEYSPACE + }, + SslOpts = + case ?config(enable_tls, Config) of + true -> + Opts#{ + ssl => emqx_tls_lib:to_client_opts( + #{ + enable => true, + cacertfile => ?CAFILE, + certfile => ?CERTFILE, + keyfile => ?KEYFILE + } + ) + }; + false -> + Opts + end, + {ok, Con} = ecql:connect(maps:to_list(SslOpts)), + Con. + +% These funs connect and then stop the cassandra connection +connect_and_create_table(Config) -> + with_direct_conn(Config, fun(Conn) -> + {ok, _} = ecql:query(Conn, ?SQL_CREATE_TABLE) + end). + +connect_and_drop_table(Config) -> + with_direct_conn(Config, fun(Conn) -> + {ok, _} = ecql:query(Conn, ?SQL_DROP_TABLE) + end). + +connect_and_clear_table(Config) -> + with_direct_conn(Config, fun(Conn) -> + ok = ecql:query(Conn, ?SQL_DELETE) + end). + +connect_and_get_payload(Config) -> + with_direct_conn(Config, fun(Conn) -> + {ok, {_Keyspace, _ColsSpec, [[Result]]}} = ecql:query(Conn, ?SQL_SELECT), + Result + end). + +with_direct_conn(Config, Fn) -> + Conn = connect_direct_cassa(Config), + try + Fn(Conn) + after + ok = ecql:close(Conn) + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{ + topic => atom_to_binary(?FUNCTION_NAME), + payload => Val, + timestamp => 1668602148000 + }, + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := cassandra_connector_query_return}, + 10_000 + ), + ?assertMatch( + Val, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(cassandra_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _Pid}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(cassa_bridge_type, Config), + Name = ?config(cassa_name, Config), + BridgeConfig0 = ?config(cassa_config, Config), + BridgeConfig = BridgeConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(BridgeConfig) + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{ + topic => atom_to_binary(?FUNCTION_NAME), + payload => Val, + timestamp => 1668602148000 + }, + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := cassandra_connector_query_return}, + 10_000 + ), + ?assertMatch( + Val, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(cassandra_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _Pid}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + Name = ?config(cassa_name, Config), + BridgeType = ?config(cassa_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, Status} when Status =:= disconnected orelse Status =:= connecting, + emqx_resource_manager:health_check(ResourceID) + ) + end), + ok. + +t_bridges_probe_via_http(Config) -> + BridgeType = ?config(cassa_bridge_type, Config), + Name = ?config(cassa_name, Config), + BridgeConfig0 = ?config(cassa_config, Config), + BridgeConfig = BridgeConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch(ok, bridges_probe_http(BridgeConfig)), + + ok. + +t_create_disconnected(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch({ok, _}, create_bridge(Config)) + end), + fun(Trace) -> + ?assertMatch( + [#{error := {start_pool_failed, _, _}}], + ?of_kind(cassandra_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + {ok, _} = create_bridge(Config), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{ + topic => atom_to_binary(?FUNCTION_NAME), + payload => Val, + timestamp => 1668602148000 + }, + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, _}} = + ?wait_async_action( + case QueryMode of + sync -> + ?assertMatch({error, _}, send_message(Config, SentData)); + async -> + send_message(Config, SentData) + end, + #{?snk_kind := buffer_worker_flush_nack}, + 10_000 + ) + end), + fun(Trace0) -> + ct:pal("trace: ~p", [Trace0]), + Trace = ?of_kind(buffer_worker_flush_nack, Trace0), + [#{result := Result} | _] = Trace, + case Result of + {async_return, {error, {resource_error, _}}} -> + ok; + {async_return, {error, {recoverable_error, disconnected}}} -> + ok; + {error, {resource_error, _}} -> + ok; + _ -> + ct:fail("unexpected error: ~p", [Result]) + end + end + ), + ok. + +%% This test doesn't work with batch enabled since it is not possible +%% to set the timeout directly for batch queries +%% +%% XXX: parameter with request timeout is not supported yet. +%% +%t_write_timeout(Config) -> +% ProxyName = ?config(proxy_name, Config), +% ProxyPort = ?config(proxy_port, Config), +% ProxyHost = ?config(proxy_host, Config), +% {ok, _} = create_bridge(Config), +% Val = integer_to_binary(erlang:unique_integer()), +% SentData = #{payload => Val, timestamp => 1668602148000}, +% Timeout = 1000, +% emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() -> +% ?assertMatch( +% {error, {resource_error, #{reason := timeout}}}, +% query_resource(Config, {send_message, SentData, [], Timeout}) +% ) +% end), +% ok. + +t_simple_sql_query(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {query, <<"SELECT count(1) AS T FROM system.local">>}, + Result = + case QueryMode of + sync -> + query_resource(Config, Request); + async -> + {_, Ref} = query_resource_async(Config, Request), + {ok, Res} = receive_result(Ref, 2_000), + Res + end, + ?assertMatch({ok, {<<"system.local">>, _, [[1]]}}, Result), + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% emqx_bridge_cassandra_connector will send missed data as a `null` atom + %% to ecql driver + ?check_trace( + begin + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := handle_async_reply, result := {error, {8704, _}}}, + 10_000 + ), + ok + end, + fun(Trace0) -> + %% 1. ecql driver will return `ok` first in async query + Trace = ?of_kind(cassandra_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _Pid}}], Trace), + %% 2. then it will return an error in callback function + Trace1 = ?of_kind(handle_async_reply, Trace0), + ?assertMatch([#{result := {error, {8704, _}}}], Trace1), + ok + end + ), + ok. + +t_bad_sql_parameter(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ) + ), + Request = {query, <<"">>, [bad_parameter]}, + Result = + case QueryMode of + sync -> + query_resource(Config, Request); + async -> + {_, Ref} = query_resource_async(Config, Request), + case receive_result(Ref, 5_000) of + {ok, Res} -> + Res; + timeout -> + ct:pal("mailbox:\n ~p", [process_info(self(), messages)]), + ct:fail("no response received") + end + end, + ?assertMatch({error, _}, Result), + ok. + +t_nasty_sql_string(Config) -> + ?assertMatch({ok, _}, create_bridge(Config)), + Payload = list_to_binary(lists:seq(1, 127)), + Message = #{ + topic => atom_to_binary(?FUNCTION_NAME), + payload => Payload, + timestamp => erlang:system_time(millisecond) + }, + %% XXX: why ok instead of {ok, AffectedLines}? + ?assertEqual(ok, send_message(Config, Message)), + ?assertEqual(Payload, connect_and_get_payload(Config)). diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl new file mode 100644 index 000000000..452db33a7 --- /dev/null +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -0,0 +1,194 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_cassandra_connector_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include("emqx_bridge_cassandra.hrl"). +-include("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("emqx/include/emqx.hrl"). +-include_lib("stdlib/include/assert.hrl"). + +%% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml` +%% You can change it to `127.0.0.1`, if you run this SUITE locally +-define(CASSANDRA_HOST, "cassandra"). +-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector). + +%% This test SUITE requires a running cassandra instance. If you don't want to +%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script +%% you can create a cassandra instance with the following command (execute it +%% from root of the EMQX directory.). You also need to set ?CASSANDRA_HOST and +%% ?CASSANDRA_PORT to appropriate values. +%% +%% sudo docker run --rm -d --name cassandra --network host cassandra:3.11.14 + +%% Cassandra default username & password once enable `authenticator: PasswordAuthenticator` +%% in cassandra config +-define(CASSA_USERNAME, <<"cassandra">>). +-define(CASSA_PASSWORD, <<"cassandra">>). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +groups() -> + []. + +cassandra_servers() -> + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers( + iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), + #{default_port => ?CASSANDRA_DEFAULT_PORT} + ) + ). + +init_per_suite(Config) -> + case + emqx_common_test_helpers:is_tcp_server_available(?CASSANDRA_HOST, ?CASSANDRA_DEFAULT_PORT) + of + true -> + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(emqx_ee_connector), + %% keyspace `mqtt` must be created in advance + {ok, Conn} = + ecql:connect([ + {nodes, cassandra_servers()}, + {username, ?CASSA_USERNAME}, + {password, ?CASSA_PASSWORD}, + {keyspace, "mqtt"} + ]), + ecql:close(Conn), + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_cassandra); + _ -> + {skip, no_cassandra} + end + end. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector), + _ = application:stop(emqx_ee_connector). + +init_per_testcase(_, Config) -> + Config. + +end_per_testcase(_, _Config) -> + ok. + +%%-------------------------------------------------------------------- +%% cases +%%-------------------------------------------------------------------- + +t_lifecycle(_Config) -> + perform_lifecycle_check( + <<"emqx_connector_cassandra_SUITE">>, + cassandra_config() + ). + +show(X) -> + erlang:display(X), + X. + +show(Label, What) -> + erlang:display({Label, What}), + What. + +perform_lifecycle_check(ResourceId, InitialConfig) -> + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig), + {ok, #{ + state := #{pool_name := PoolName} = State, + status := InitialStatus + }} = + emqx_resource:create_local( + ResourceId, + ?CONNECTOR_RESOURCE_GROUP, + ?CASSANDRA_RESOURCE_MOD, + CheckedConfig, + #{} + ), + ?assertEqual(InitialStatus, connected), + % Instance should match the state and status of the just started resource + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := InitialStatus + }} = + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + % % Perform query as further check that the resource is working as expected + (fun() -> + erlang:display({pool_name, ResourceId}), + QueryNoParamsResWrapper = emqx_resource:query(ResourceId, test_query_no_params()), + ?assertMatch({ok, _}, QueryNoParamsResWrapper) + end)(), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), + % Resource will be listed still, but state will be changed and healthcheck will fail + % as the worker no longer exists. + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := StoppedStatus + }} = + emqx_resource:get_instance(ResourceId), + ?assertEqual(stopped, StoppedStatus), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), + % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), + % Can call stop/1 again on an already stopped instance + ?assertEqual(ok, emqx_resource:stop(ResourceId)), + % Make sure it can be restarted and the healthchecks and queries work properly + ?assertEqual(ok, emqx_resource:restart(ResourceId)), + % async restart, need to wait resource + timer:sleep(500), + {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + (fun() -> + QueryNoParamsResWrapper = + emqx_resource:query(ResourceId, test_query_no_params()), + ?assertMatch({ok, _}, QueryNoParamsResWrapper) + end)(), + % Stop and remove the resource in one go. + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), + % Should not even be able to get the resource data out of ets now unlike just stopping. + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). + +%%-------------------------------------------------------------------- +%% utils +%%-------------------------------------------------------------------- + +cassandra_config() -> + Config = + #{ + auto_reconnect => true, + keyspace => <<"mqtt">>, + username => ?CASSA_USERNAME, + password => ?CASSA_PASSWORD, + pool_size => 8, + servers => iolist_to_binary( + io_lib:format( + "~s:~b", + [ + ?CASSANDRA_HOST, + ?CASSANDRA_DEFAULT_PORT + ] + ) + ) + }, + #{<<"config">> => Config}. + +test_query_no_params() -> + {query, <<"SELECT count(1) AS T FROM system.local">>}. diff --git a/apps/emqx_bridge_clickhouse/BSL.txt b/apps/emqx_bridge_clickhouse/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_clickhouse/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_clickhouse/README.md b/apps/emqx_bridge_clickhouse/README.md new file mode 100644 index 000000000..ff870e87d --- /dev/null +++ b/apps/emqx_bridge_clickhouse/README.md @@ -0,0 +1,37 @@ +# EMQX ClickHouse Bridge + +[ClickHouse](https://github.com/ClickHouse/ClickHouse) is an open-source, column-based +database management system. It is designed for real-time processing of large volumes of +data and is known for its high performance and scalability. + +The application is used to connect EMQX and ClickHouse. +User can create a rule and easily ingest IoT data into ClickHouse by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html) + for how to use EMQX dashboard to ingest IoT data into ClickHouse. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src new file mode 100644 index 000000000..a0b409d5b --- /dev/null +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_clickhouse, [ + {description, "EMQX Enterprise ClickHouse Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_dynamo/BSL.txt b/apps/emqx_bridge_dynamo/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_dynamo/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_dynamo/README.md b/apps/emqx_bridge_dynamo/README.md new file mode 100644 index 000000000..48dcb781d --- /dev/null +++ b/apps/emqx_bridge_dynamo/README.md @@ -0,0 +1,40 @@ +# EMQX DynamoDB Bridge + +[Dynamodb](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database +service provided by Amazon that's designed for scalability and low-latency access +to structured data. + +It's often used in applications that require fast and reliable access to data, +such as mobile, ad tech, and IoT. + +The application is used to connect EMQX and DynamoDB. +User can create a rule and easily ingest IoT data into DynamoDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into DynamoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-dynamo.html) + for how to use EMQX dashboard to ingest IoT data into DynamoDB. + +- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_dynamo/docker-ct b/apps/emqx_bridge_dynamo/docker-ct new file mode 100644 index 000000000..b63325b8b --- /dev/null +++ b/apps/emqx_bridge_dynamo/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +dynamo diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_acked.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_acked.json new file mode 100644 index 000000000..6ede088a4 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_acked.json @@ -0,0 +1,15 @@ +{ + "TableName": "mqtt_acked", + "KeySchema": [ + { "AttributeName": "topic", "KeyType": "HASH" }, + { "AttributeName": "clientid", "KeyType": "RANGE" } + ], + "AttributeDefinitions": [ + { "AttributeName": "topic", "AttributeType": "S" }, + { "AttributeName": "clientid", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_client.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_client.json new file mode 100644 index 000000000..ce1b7d267 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_client.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_client", + "KeySchema": [ + { "AttributeName": "clientid", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "clientid", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_clientid_msg_map.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_clientid_msg_map.json new file mode 100644 index 000000000..fd703c664 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_clientid_msg_map.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_clientid_msg_map", + "KeySchema": [ + { "AttributeName": "clientid", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "clientid", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_msg.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_msg.json new file mode 100644 index 000000000..ad94b8f72 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_msg.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_msg", + "KeySchema": [ + { "AttributeName": "id", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "id", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_retain.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_retain.json new file mode 100644 index 000000000..2a0af2e86 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_retain.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_retain", + "KeySchema": [ + { "AttributeName": "topic", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "topic", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_sub.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_sub.json new file mode 100644 index 000000000..9a559f048 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_sub.json @@ -0,0 +1,16 @@ +{ + "TableName": "mqtt_sub", + "KeySchema": [ + { "AttributeName": "clientid", "KeyType": "HASH" }, + { "AttributeName": "topic", "KeyType": "RANGE" } + ], + "AttributeDefinitions": [ + { "AttributeName": "clientid", "AttributeType": "S" }, + { "AttributeName": "topic", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} + diff --git a/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_topic_msg_map.json b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_topic_msg_map.json new file mode 100644 index 000000000..effd4b4b9 --- /dev/null +++ b/apps/emqx_bridge_dynamo/priv/dynamo/mqtt_topic_msg_map.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_topic_msg_map", + "KeySchema": [ + { "AttributeName": "topic", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "topic", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/apps/emqx_bridge_dynamo/rebar.config b/apps/emqx_bridge_dynamo/rebar.config new file mode 100644 index 000000000..fbccb5c9a --- /dev/null +++ b/apps/emqx_bridge_dynamo/rebar.config @@ -0,0 +1,11 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud.git", {tag, "3.5.16-emqx-1"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_dynamo]} +]}. diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src new file mode 100644 index 000000000..2d2e299d2 --- /dev/null +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_dynamo, [ + {description, "EMQX Enterprise Dynamo Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, erlcloud]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.erl b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.erl new file mode 100644 index 000000000..251e79ca2 --- /dev/null +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.erl @@ -0,0 +1,118 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_dynamo). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1, + values/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_TEMPLATE, <<>>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"dynamo">> => #{ + summary => <<"DynamoDB Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => dynamo, + name => <<"foo">>, + url => <<"http://127.0.0.1:8000">>, + table => <<"mqtt">>, + pool_size => 8, + aws_access_key_id => <<"root">>, + aws_secret_access_key => <<"******">>, + template => ?DEFAULT_TEMPLATE, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => sync, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_dynamo". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {template, + mk( + binary(), + #{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + (emqx_bridge_dynamo_connector:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("creation_opts") -> + emqx_resource_schema:fields("creation_opts"); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."]; +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field() -> + {type, mk(enum([dynamo]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl new file mode 100644 index 000000000..981c31090 --- /dev/null +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl @@ -0,0 +1,219 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_dynamo_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([ + connect/1 +]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {url, mk(binary(), #{required => true, desc => ?DESC("url")})}, + {table, mk(binary(), #{required => true, desc => ?DESC("table")})}, + {aws_access_key_id, + mk( + binary(), + #{required => true, desc => ?DESC("aws_access_key_id")} + )}, + {aws_secret_access_key, + mk( + binary(), + #{ + required => true, + desc => ?DESC("aws_secret_access_key"), + sensitive => true + } + )}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + url := Url, + aws_access_key_id := AccessKeyID, + aws_secret_access_key := SecretAccessKey, + table := Table, + pool_size := PoolSize + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_dynamo_connector", + connector => InstanceId, + config => redact(Config) + }), + + {Schema, Server, DefaultPort} = get_host_info(to_str(Url)), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, #{ + default_port => DefaultPort + }), + + Options = [ + {config, #{ + host => Host, + port => Port, + aws_access_key_id => to_str(AccessKeyID), + aws_secret_access_key => to_str(SecretAccessKey), + schema => Schema + }}, + {pool_size, PoolSize} + ], + + Templates = parse_template(Config), + State = #{ + pool_name => InstanceId, + table => Table, + templates => Templates + }, + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end. + +on_stop(InstanceId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_dynamo_connector", + connector => InstanceId + }), + emqx_resource_pool:stop(PoolName). + +on_query(InstanceId, Query, State) -> + do_query(InstanceId, Query, State). + +%% we only support batch insert +on_batch_query(InstanceId, [{send_message, _} | _] = Query, State) -> + do_query(InstanceId, Query, State); +on_batch_query(_InstanceId, Query, _State) -> + {error, {unrecoverable_error, {invalid_request, Query}}}. + +%% we only support batch insert + +on_get_status(_InstanceId, #{pool_name := Pool}) -> + Health = emqx_resource_pool:health_check_workers( + Pool, {emqx_bridge_dynamo_connector_client, is_connected, []} + ), + status_result(Health). + +status_result(_Status = true) -> connected; +status_result(_Status = false) -> connecting. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query( + InstanceId, + Query, + #{pool_name := PoolName, templates := Templates, table := Table} = State +) -> + ?TRACE( + "QUERY", + "dynamo_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do( + PoolName, + {emqx_bridge_dynamo_connector_client, query, [Table, Query, Templates]}, + no_handover + ), + + case Result of + {error, Reason} -> + ?tp( + dynamo_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "dynamo_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + dynamo_connector_query_return, + #{result => Result} + ), + Result + end. + +connect(Opts) -> + Options = proplists:get_value(config, Opts), + {ok, _Pid} = Result = emqx_bridge_dynamo_connector_client:start_link(Options), + Result. + +parse_template(Config) -> + Templates = + case maps:get(template, Config, undefined) of + undefined -> #{}; + <<>> -> #{}; + Template -> #{send_message => Template} + end, + + parse_template(maps:to_list(Templates), #{}). + +parse_template([{Key, H} | T], Templates) -> + ParamsTks = emqx_plugin_libs_rule:preproc_tmpl(H), + parse_template( + T, + Templates#{Key => ParamsTks} + ); +parse_template([], Templates) -> + Templates. + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). + +get_host_info("http://" ++ Server) -> + {"http://", Server, 80}; +get_host_info("https://" ++ Server) -> + {"https://", Server, 443}; +get_host_info(Server) -> + {"http://", Server, 80}. + +redact(Data) -> + emqx_utils:redact(Data, fun(Any) -> Any =:= aws_secret_access_key end). diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl new file mode 100644 index 000000000..faaef9df4 --- /dev/null +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl @@ -0,0 +1,186 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_dynamo_connector_client). + +-behaviour(gen_server). + +%% API +-export([ + start_link/1, + is_connected/1, + query/4 +]). + +%% gen_server callbacks +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3, + format_status/2 +]). + +-ifdef(TEST). +-export([execute/2]). +-endif. + +%% The default timeout for DynamoDB REST API calls is 10 seconds, +%% but this value for `gen_server:call` is 5s, +%% so we should pass the timeout to `gen_server:call` +-define(HEALTH_CHECK_TIMEOUT, 10000). + +%%%=================================================================== +%%% API +%%%=================================================================== +is_connected(Pid) -> + try + gen_server:call(Pid, is_connected, ?HEALTH_CHECK_TIMEOUT) + catch + _:_ -> + false + end. + +query(Pid, Table, Query, Templates) -> + gen_server:call(Pid, {query, Table, Query, Templates}, infinity). + +%%-------------------------------------------------------------------- +%% @doc +%% Starts Bridge which transfer data to DynamoDB +%% @endn +%%-------------------------------------------------------------------- +start_link(Options) -> + gen_server:start_link(?MODULE, Options, []). + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +%% Initialize dynamodb data bridge +init(#{ + aws_access_key_id := AccessKeyID, + aws_secret_access_key := SecretAccessKey, + host := Host, + port := Port, + schema := Schema +}) -> + erlcloud_ddb2:configure(AccessKeyID, SecretAccessKey, Host, Port, Schema), + {ok, #{}}. + +handle_call(is_connected, _From, State) -> + IsConnected = + case erlcloud_ddb2:list_tables([{limit, 1}]) of + {ok, _} -> + true; + _ -> + false + end, + {reply, IsConnected, State}; +handle_call({query, Table, Query, Templates}, _From, State) -> + Result = do_query(Table, Query, Templates), + {reply, Result, State}; +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast({query, Table, Query, Templates, {ReplyFun, [Context]}}, State) -> + Result = do_query(Table, Query, Templates), + ReplyFun(Context, Result), + {noreply, State}; +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +-spec format_status( + Opt :: normal | terminate, + Status :: list() +) -> Status :: term(). +format_status(_Opt, Status) -> + Status. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== +do_query(Table, Query0, Templates) -> + try + Query = apply_template(Query0, Templates), + execute(Query, Table) + catch + _Type:Reason -> + {error, {unrecoverable_error, {invalid_request, Reason}}} + end. + +%% some simple query commands for authn/authz or test +execute({insert_item, Msg}, Table) -> + Item = convert_to_item(Msg), + erlcloud_ddb2:put_item(Table, Item); +execute({delete_item, Key}, Table) -> + erlcloud_ddb2:delete_item(Table, Key); +execute({get_item, Key}, Table) -> + erlcloud_ddb2:get_item(Table, Key); +%% commands for data bridge query or batch query +execute({send_message, Msg}, Table) -> + Item = convert_to_item(Msg), + erlcloud_ddb2:put_item(Table, Item); +execute([{put, _} | _] = Msgs, Table) -> + %% type of batch_write_item argument :: batch_write_item_request_items() + %% batch_write_item_request_items() :: maybe_list(batch_write_item_request_item()) + %% batch_write_item_request_item() :: {table_name(), list(batch_write_item_request())} + %% batch_write_item_request() :: {put, item()} | {delete, key()} + erlcloud_ddb2:batch_write_item({Table, Msgs}). + +apply_template({Key, Msg} = Req, Templates) -> + case maps:get(Key, Templates, undefined) of + undefined -> + Req; + Template -> + {Key, emqx_plugin_libs_rule:proc_tmpl(Template, Msg)} + end; +%% now there is no batch delete, so +%% 1. we can simply replace the `send_message` to `put` +%% 2. convert the message to in_item() here, not at the time when calling `batch_write_items`, +%% so we can reduce some list map cost +apply_template([{send_message, _Msg} | _] = Msgs, Templates) -> + lists:map( + fun(Req) -> + {_, Msg} = apply_template(Req, Templates), + {put, convert_to_item(Msg)} + end, + Msgs + ). + +convert_to_item(Msg) when is_map(Msg), map_size(Msg) > 0 -> + maps:fold( + fun + (_K, <<>>, AccIn) -> + AccIn; + (K, V, AccIn) -> + [{convert2binary(K), convert2binary(V)} | AccIn] + end, + [], + Msg + ); +convert_to_item(MsgBin) when is_binary(MsgBin) -> + Msg = emqx_utils_json:decode(MsgBin), + convert_to_item(Msg); +convert_to_item(Item) -> + erlang:throw({invalid_item, Item}). + +convert2binary(Value) when is_atom(Value) -> + erlang:atom_to_binary(Value, utf8); +convert2binary(Value) when is_binary(Value); is_number(Value) -> + Value; +convert2binary(Value) when is_list(Value) -> + unicode:characters_to_binary(Value); +convert2binary(Value) when is_map(Value) -> + emqx_utils_json:encode(Value). diff --git a/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl b/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl new file mode 100644 index 000000000..da87f6047 --- /dev/null +++ b/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl @@ -0,0 +1,431 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_dynamo_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% DB defaults +-define(TABLE, "mqtt"). +-define(TABLE_BIN, to_bin(?TABLE)). +-define(ACCESS_KEY_ID, "root"). +-define(SECRET_ACCESS_KEY, "public"). +-define(HOST, "dynamo"). +-define(PORT, 8000). +-define(SCHEMA, "http://"). +-define(BATCH_SIZE, 10). +-define(PAYLOAD, <<"HELLO">>). + +-define(GET_CONFIG(KEY__, CFG__), proplists:get_value(KEY__, CFG__)). + +%% How to run it locally (all commands are run in $PROJ_ROOT dir): +%% run ct in docker container +%% run script: +%% ```bash +%% ./scripts/ct/run.sh --ci --app apps/emqx_bridge_dynamo -- \ +%% --name 'test@127.0.0.1' -c -v --readable true \ +%% --suite apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch}, + {group, flaky} + ]. + +groups() -> + TCs0 = emqx_common_test_helpers:all(?MODULE), + + %% due to the poorly implemented driver or other reasons + %% if we mix these cases with others, this suite will become flaky. + Flaky = [t_get_status, t_write_failure], + TCs = TCs0 -- Flaky, + + [ + {with_batch, TCs}, + {without_batch, TCs}, + {flaky, Flaky} + ]. + +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(flaky, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(Group, Config) when Group =:= flaky -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + timer:sleep(1000), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(TestCase, Config) -> + create_table(Config), + ok = snabbkaffe:start_trace(), + [{dynamo_name, atom_to_binary(TestCase)} | Config]. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_table(Config), + delete_all_bridges(), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("DYNAMO_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("DYNAMO_PORT", "8000")), + + Config0 = [ + {host, Host}, + {port, Port}, + {query_mode, sync}, + {proxy_name, "dynamo"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"dynamo">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % setup dynamo + setup_dynamo(Config0), + {Name, TDConf} = dynamo_config(BridgeType, Config0), + Config = + [ + {dynamo_config, TDConf}, + {dynamo_bridge_type, BridgeType}, + {dynamo_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_dynamo); + _ -> + {skip, no_dynamo} + end + end. + +dynamo_config(BridgeType, Config) -> + Port = integer_to_list(?GET_CONFIG(port, Config)), + Url = "http://" ++ ?GET_CONFIG(host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?GET_CONFIG(batch_size, Config), + QueryMode = ?GET_CONFIG(query_mode, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " url = ~p\n" + " table = ~p\n" + " aws_access_key_id = ~p\n" + " aws_secret_access_key = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " }\n" + "}", + [ + BridgeType, + Name, + Url, + ?TABLE, + ?ACCESS_KEY_ID, + ?SECRET_ACCESS_KEY, + BatchSize, + QueryMode + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(dynamo_bridge_type, Config), + Name = ?config(dynamo_name, Config), + DynamoConfig0 = ?config(dynamo_config, Config), + DynamoConfig = emqx_utils_maps:deep_merge(DynamoConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, DynamoConfig). + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(dynamo_name, Config), + BridgeType = ?config(dynamo_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?config(dynamo_name, Config), + BridgeType = ?config(dynamo_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + +%% create a table, use the apps/emqx_bridge_dynamo/priv/dynamo/mqtt_msg.json as template +create_table(Config) -> + directly_setup_dynamo(), + delete_table(Config), + ?assertMatch( + {ok, _}, + erlcloud_ddb2:create_table( + ?TABLE_BIN, + [{<<"id">>, s}], + <<"id">>, + [{provisioned_throughput, {5, 5}}] + ) + ). + +delete_table(_Config) -> + erlcloud_ddb2:delete_table(?TABLE_BIN). + +setup_dynamo(Config) -> + Host = ?GET_CONFIG(host, Config), + Port = ?GET_CONFIG(port, Config), + erlcloud_ddb2:configure(?ACCESS_KEY_ID, ?SECRET_ACCESS_KEY, Host, Port, ?SCHEMA). + +directly_setup_dynamo() -> + erlcloud_ddb2:configure(?ACCESS_KEY_ID, ?SECRET_ACCESS_KEY, ?HOST, ?PORT, ?SCHEMA). + +directly_query(Query) -> + directly_setup_dynamo(), + emqx_bridge_dynamo_connector_client:execute(Query, ?TABLE_BIN). + +directly_get_payload(Key) -> + case directly_query({get_item, {<<"id">>, Key}}) of + {ok, Values} -> + proplists:get_value(<<"payload">>, Values, {error, {invalid_item, Values}}); + Error -> + Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertNotEqual(undefined, get(aws_config)), + create_table(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + MsgId = emqx_utils:gen_id(), + SentData = #{id => MsgId, payload => ?PAYLOAD}, + ?check_trace( + begin + ?wait_async_action( + ?assertMatch( + {ok, _}, send_message(Config, SentData) + ), + #{?snk_kind := dynamo_connector_query_return}, + 10_000 + ), + ?assertMatch( + ?PAYLOAD, + directly_get_payload(MsgId) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(dynamo_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(dynamo_bridge_type, Config), + Name = ?config(dynamo_name, Config), + PgsqlConfig0 = ?config(dynamo_config, Config), + PgsqlConfig = PgsqlConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(PgsqlConfig) + ), + MsgId = emqx_utils:gen_id(), + SentData = #{id => MsgId, payload => ?PAYLOAD}, + ?check_trace( + begin + ?wait_async_action( + ?assertMatch( + {ok, _}, send_message(Config, SentData) + ), + #{?snk_kind := dynamo_connector_query_return}, + 10_000 + ), + ?assertMatch( + ?PAYLOAD, + directly_get_payload(MsgId) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(dynamo_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := resource_connected_enter}, + 20_000 + ), + + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + Name = ?config(dynamo_name, Config), + BridgeType = ?config(dynamo_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + case emqx_resource_manager:health_check(ResourceID) of + {ok, Status} when Status =:= disconnected orelse Status =:= connecting -> + ok; + {error, timeout} -> + ok; + Other -> + ?assert( + false, lists:flatten(io_lib:format("invalid health check result:~p~n", [Other])) + ) + end + end), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := resource_connected_enter}, + 20_000 + ), + SentData = #{id => emqx_utils:gen_id(), payload => ?PAYLOAD}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, send_message(Config, SentData) + ) + end), + ok. + +t_simple_query(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {get_item, {<<"id">>, <<"not_exists">>}}, + Result = query_resource(Config, Request), + case ?GET_CONFIG(batch_size, Config) of + ?BATCH_SIZE -> + ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result); + 1 -> + ?assertMatch({ok, []}, Result) + end, + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Result = send_message(Config, #{}), + ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result), + ok. + +t_bad_parameter(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {insert_item, bad_parameter}, + Result = query_resource(Config, Request), + ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result), + ok. + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Bin) when is_binary(Bin) -> + Bin. diff --git a/apps/emqx_bridge_gcp_pubsub/BSL.txt b/apps/emqx_bridge_gcp_pubsub/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_gcp_pubsub/README.md b/apps/emqx_bridge_gcp_pubsub/README.md new file mode 100644 index 000000000..e33c5ab15 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/README.md @@ -0,0 +1,36 @@ +# EMQX GCP Pub/Sub Bridge + +[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service +provided by Google Cloud Platform (GCP). + +The application is used to connect EMQX and GCP Pub/Sub. +User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into GCP Pub/Sub](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-gcp-pubsub.html) + for how to use EMQX dashboard to ingest IoT data into GCP Pub/Sub. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_gcp_pubsub/rebar.config b/apps/emqx_bridge_gcp_pubsub/rebar.config new file mode 100644 index 000000000..2fd264fc0 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/rebar.config @@ -0,0 +1,10 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_gcp_pubsub]} +]}. diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src new file mode 100644 index 000000000..2b3d359d3 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -0,0 +1,13 @@ +{application, emqx_bridge_gcp_pubsub, [ + {description, "EMQX Enterprise GCP Pub/Sub Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + ehttpc + ]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl similarity index 86% rename from lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl rename to apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl index 760aba9e1..70109a0ea 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl @@ -2,9 +2,8 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_gcp_pubsub). +-module(emqx_bridge_gcp_pubsub). --include_lib("emqx_bridge/include/emqx_bridge.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -50,7 +49,7 @@ fields(bridge_config) -> sc( emqx_schema:duration_ms(), #{ - default => "15s", + default => <<"15s">>, desc => ?DESC("connect_timeout") } )}, @@ -84,7 +83,8 @@ fields(bridge_config) -> emqx_schema:duration_ms(), #{ required => false, - default => "15s", + deprecated => {since, "e5.0.1"}, + default => <<"15s">>, desc => ?DESC("request_timeout") } )}, @@ -124,7 +124,7 @@ fields(bridge_config) -> )} ]; fields("get") -> - emqx_bridge_schema:metrics_status_fields() ++ fields("post"); + emqx_bridge_schema:status_fields() ++ fields("post"); fields("post") -> [type_field(), name_field() | fields("config")]; fields("put") -> @@ -145,39 +145,35 @@ conn_bridge_examples(Method) -> } ]. -values(get) -> - maps:merge(values(post), ?METRICS_EXAMPLE); -values(post) -> +values(_Method) -> #{ - <<"pubsub_topic">> => <<"mytopic">>, - <<"service_account_json">> => + pubsub_topic => <<"mytopic">>, + service_account_json => #{ - <<"auth_provider_x509_cert_url">> => + auth_provider_x509_cert_url => <<"https://www.googleapis.com/oauth2/v1/certs">>, - <<"auth_uri">> => + auth_uri => <<"https://accounts.google.com/o/oauth2/auth">>, - <<"client_email">> => + client_email => <<"test@myproject.iam.gserviceaccount.com">>, - <<"client_id">> => <<"123812831923812319190">>, - <<"client_x509_cert_url">> => + client_id => <<"123812831923812319190">>, + client_x509_cert_url => << "https://www.googleapis.com/robot/v1/" "metadata/x509/test%40myproject.iam.gserviceaccount.com" >>, - <<"private_key">> => + private_key => << "-----BEGIN PRIVATE KEY-----\n" "MIIEvQI..." >>, - <<"private_key_id">> => <<"kid">>, - <<"project_id">> => <<"myproject">>, - <<"token_uri">> => + private_key_id => <<"kid">>, + project_id => <<"myproject">>, + token_uri => <<"https://oauth2.googleapis.com/token">>, - <<"type">> => <<"service_account">> + type => <<"service_account">> } - }; -values(put) -> - values(post). + }. %%------------------------------------------------------------------------------------------------- %% Helper fns diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl similarity index 77% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl rename to apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl index 37d193edf..be5e56e85 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_gcp_pubsub). +-module(emqx_bridge_gcp_pubsub_connector). -behaviour(emqx_resource). @@ -26,24 +26,22 @@ ]). -export([reply_delegator/3]). --type bridge_id() :: binary(). -type jwt_worker() :: binary(). --type service_account_json() :: emqx_ee_bridge_gcp_pubsub:service_account_json(). +-type service_account_json() :: emqx_bridge_gcp_pubsub:service_account_json(). -type config() :: #{ connect_timeout := emqx_schema:duration_ms(), max_retries := non_neg_integer(), pubsub_topic := binary(), - request_timeout := emqx_schema:duration_ms(), + resource_opts := #{request_timeout := emqx_schema:duration_ms(), any() => term()}, service_account_json := service_account_json(), any() => term() }. -type state() :: #{ connect_timeout := timer:time(), - instance_id := manager_id(), jwt_worker_id := jwt_worker(), max_retries := non_neg_integer(), payload_template := emqx_plugin_libs_rule:tmpl_token(), - pool_name := atom(), + pool_name := binary(), project_id := binary(), pubsub_topic := binary(), request_timeout := timer:time() @@ -58,36 +56,35 @@ %% emqx_resource API %%------------------------------------------------------------------------------------------------- -%% TODO: check is_buffer_supported() -> false. callback_mode() -> async_if_possible. --spec on_start(manager_id(), config()) -> {ok, state()} | {error, term()}. +-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. on_start( - InstanceId, + ResourceId, #{ connect_timeout := ConnectTimeout, max_retries := MaxRetries, payload_template := PayloadTemplate, pool_size := PoolSize, pubsub_topic := PubSubTopic, - request_timeout := RequestTimeout + resource_opts := #{request_timeout := RequestTimeout} } = Config ) -> ?SLOG(info, #{ msg => "starting_gcp_pubsub_bridge", - connector => InstanceId, + connector => ResourceId, config => Config }), %% emulating the emulator behavior %% https://cloud.google.com/pubsub/docs/emulator HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"), - {Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), + #{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), PoolType = random, Transport = tls, TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}), - NTransportOpts = emqx_misc:ipv6_probe(TransportOpts), + NTransportOpts = emqx_utils:ipv6_probe(TransportOpts), PoolOpts = [ {host, Host}, {port, Port}, @@ -102,15 +99,13 @@ on_start( #{ jwt_worker_id := JWTWorkerId, project_id := ProjectId - } = ensure_jwt_worker(InstanceId, Config), - PoolName = emqx_plugin_libs_pool:pool_name(InstanceId), + } = ensure_jwt_worker(ResourceId, Config), State = #{ connect_timeout => ConnectTimeout, - instance_id => InstanceId, jwt_worker_id => JWTWorkerId, max_retries => MaxRetries, payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate), - pool_name => PoolName, + pool_name => ResourceId, project_id => ProjectId, pubsub_topic => PubSubTopic, request_timeout => RequestTimeout @@ -118,45 +113,42 @@ on_start( ?tp( gcp_pubsub_on_start_before_starting_pool, #{ - instance_id => InstanceId, - pool_name => PoolName, + resource_id => ResourceId, + pool_name => ResourceId, pool_opts => PoolOpts } ), - ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => PoolName}), - case ehttpc_sup:start_pool(PoolName, PoolOpts) of + ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => ResourceId}), + case ehttpc_sup:start_pool(ResourceId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> - ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => PoolName}), + ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => ResourceId}), {ok, State}; {error, Reason} -> ?tp(gcp_pubsub_ehttpc_pool_start_failure, #{ - pool_name => PoolName, + pool_name => ResourceId, reason => Reason }), {error, Reason} end. --spec on_stop(manager_id(), state()) -> ok | {error, term()}. +-spec on_stop(resource_id(), state()) -> ok | {error, term()}. on_stop( - InstanceId, - _State = #{ - jwt_worker_id := JWTWorkerId, - pool_name := PoolName - } + ResourceId, + _State = #{jwt_worker_id := JWTWorkerId} ) -> - ?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}), + ?tp(gcp_pubsub_stop, #{resource_id => ResourceId, jwt_worker_id => JWTWorkerId}), ?SLOG(info, #{ msg => "stopping_gcp_pubsub_bridge", - connector => InstanceId + connector => ResourceId }), emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), - emqx_connector_jwt:delete_jwt(?JWT_TABLE, InstanceId), - ehttpc_sup:stop_pool(PoolName). + emqx_connector_jwt:delete_jwt(?JWT_TABLE, ResourceId), + ehttpc_sup:stop_pool(ResourceId). -spec on_query( - bridge_id(), + resource_id(), {send_message, map()}, state() ) -> @@ -164,32 +156,32 @@ on_stop( | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(BridgeId, {send_message, Selected}, State) -> +on_query(ResourceId, {send_message, Selected}, State) -> Requests = [{send_message, Selected}], ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_sync(State, Requests, BridgeId). + do_send_requests_sync(State, Requests, ResourceId). -spec on_query_async( - bridge_id(), + resource_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() -) -> ok. -on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> +) -> {ok, pid()}. +on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) -> Requests = [{send_message, Selected}], ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). + do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). -spec on_batch_query( - bridge_id(), + resource_id(), [{send_message, map()}], state() ) -> @@ -197,35 +189,31 @@ on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -on_batch_query(BridgeId, Requests, State) -> +on_batch_query(ResourceId, Requests, State) -> ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_sync(State, Requests, BridgeId). + do_send_requests_sync(State, Requests, ResourceId). -spec on_batch_query_async( - bridge_id(), + resource_id(), [{send_message, map()}], {ReplyFun :: function(), Args :: list()}, state() -) -> ok. -on_batch_query_async(BridgeId, Requests, ReplyFunAndArgs, State) -> +) -> {ok, pid()}. +on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). + do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). --spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(InstanceId, State) -> - #{ - connect_timeout := Timeout, - pool_name := PoolName - } = State, - case do_get_status(InstanceId, PoolName, Timeout) of +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(ResourceId, #{connect_timeout := Timeout} = State) -> + case do_get_status(ResourceId, Timeout) of true -> connected; false -> @@ -240,14 +228,13 @@ on_get_status(InstanceId, State) -> %% Helper fns %%------------------------------------------------------------------------------------------------- --spec ensure_jwt_worker(manager_id(), config()) -> +-spec ensure_jwt_worker(resource_id(), config()) -> #{ jwt_worker_id := jwt_worker(), project_id := binary() }. -ensure_jwt_worker(InstanceId, #{ - service_account_json := ServiceAccountJSON, - pubsub_topic := PubSubTopic +ensure_jwt_worker(ResourceId, #{ + service_account_json := ServiceAccountJSON }) -> #{ project_id := ProjectId, @@ -261,7 +248,7 @@ ensure_jwt_worker(InstanceId, #{ Alg = <<"RS256">>, Config = #{ private_key => PrivateKeyPEM, - resource_id => InstanceId, + resource_id => ResourceId, expiration => ExpirationMS, table => ?JWT_TABLE, iss => ServiceAccountEmail, @@ -271,20 +258,14 @@ ensure_jwt_worker(InstanceId, #{ alg => Alg }, - JWTWorkerId = <<"gcp_pubsub_jwt_worker:", InstanceId/binary>>, + JWTWorkerId = <<"gcp_pubsub_jwt_worker:", ResourceId/binary>>, Worker = case emqx_connector_jwt_sup:ensure_worker_present(JWTWorkerId, Config) of {ok, Worker0} -> Worker0; Error -> - ?tp( - gcp_pubsub_bridge_jwt_worker_failed_to_start, - #{instance_id => InstanceId, reason => Error} - ), - ?SLOG(error, #{ - msg => "failed_to_start_gcp_pubsub_jwt_worker", - instance_id => InstanceId, - pubsub_topic => PubSubTopic, + ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ + connector => ResourceId, reason => Error }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), @@ -298,30 +279,18 @@ ensure_jwt_worker(InstanceId, #{ %% produced by the worker. receive {Ref, token_created} -> - ?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => InstanceId}), + ?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => ResourceId}), demonitor(MRef, [flush]), ok; {'DOWN', MRef, process, Worker, Reason} -> - ?tp( - gcp_pubsub_bridge_jwt_worker_failed_to_start, - #{ - resource_id => InstanceId, - reason => Reason - } - ), - ?SLOG(error, #{ - msg => "gcp_pubsub_bridge_jwt_worker_failed_to_start", - connector => InstanceId, + ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ + connector => ResourceId, reason => Reason }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(failed_to_start_jwt_worker) after 10_000 -> - ?tp(gcp_pubsub_bridge_jwt_timeout, #{resource_id => InstanceId}), - ?SLOG(warning, #{ - msg => "gcp_pubsub_bridge_jwt_timeout", - connector => InstanceId - }), + ?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => ResourceId}), demonitor(MRef, [flush]), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(timeout_creating_jwt) @@ -335,14 +304,14 @@ ensure_jwt_worker(InstanceId, #{ encode_payload(_State = #{payload_template := PayloadTemplate}, Selected) -> Interpolated = case PayloadTemplate of - [] -> emqx_json:encode(Selected); + [] -> emqx_utils_json:encode(Selected); _ -> emqx_plugin_libs_rule:proc_tmpl(PayloadTemplate, Selected) end, #{data => base64:encode(Interpolated)}. -spec to_pubsub_request([#{data := binary()}]) -> binary(). to_pubsub_request(Payloads) -> - emqx_json:encode(#{messages => Payloads}). + emqx_utils_json:encode(#{messages => Payloads}). -spec publish_path(state()) -> binary(). publish_path( @@ -354,8 +323,8 @@ publish_path( <<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>. -spec get_jwt_authorization_header(resource_id()) -> [{binary(), binary()}]. -get_jwt_authorization_header(InstanceId) -> - case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, InstanceId) of +get_jwt_authorization_header(ResourceId) -> + case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, ResourceId) of %% Since we synchronize the JWT creation during resource start %% (see `on_start/2'), this will be always be populated. {ok, JWT} -> @@ -374,7 +343,6 @@ get_jwt_authorization_header(InstanceId) -> do_send_requests_sync(State, Requests, ResourceId) -> #{ pool_name := PoolName, - instance_id := InstanceId, max_retries := MaxRetries, request_timeout := RequestTimeout } = State, @@ -382,12 +350,11 @@ do_send_requests_sync(State, Requests, ResourceId) -> gcp_pubsub_bridge_do_send_requests, #{ query_mode => sync, - instance_id => InstanceId, resource_id => ResourceId, requests => Requests } ), - Headers = get_jwt_authorization_header(InstanceId), + Headers = get_jwt_authorization_header(ResourceId), Payloads = lists:map( fun({send_message, Selected}) -> @@ -496,23 +463,21 @@ do_send_requests_sync(State, Requests, ResourceId) -> [{send_message, map()}], {ReplyFun :: function(), Args :: list()}, resource_id() -) -> ok. +) -> {ok, pid()}. do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) -> #{ pool_name := PoolName, - instance_id := InstanceId, request_timeout := RequestTimeout } = State, ?tp( gcp_pubsub_bridge_do_send_requests, #{ query_mode => async, - instance_id => InstanceId, resource_id => ResourceId, requests => Requests } ), - Headers = get_jwt_authorization_header(InstanceId), + Headers = get_jwt_authorization_header(ResourceId), Payloads = lists:map( fun({send_message, Selected}) -> @@ -531,7 +496,8 @@ do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) -> Request, RequestTimeout, {fun ?MODULE:reply_delegator/3, [ResourceId, ReplyFunAndArgs]} - ). + ), + {ok, Worker}. -spec reply_delegator( resource_id(), @@ -569,9 +535,9 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) -> emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) end. --spec do_get_status(manager_id(), atom(), timer:time()) -> boolean(). -do_get_status(InstanceId, PoolName, Timeout) -> - Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)], +-spec do_get_status(resource_id(), timer:time()) -> boolean(). +do_get_status(ResourceId, Timeout) -> + Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(ResourceId)], DoPerWorker = fun(Worker) -> case ehttpc:health_check(Worker, Timeout) of @@ -580,14 +546,14 @@ do_get_status(InstanceId, PoolName, Timeout) -> {error, Reason} -> ?SLOG(error, #{ msg => "ehttpc_health_check_failed", - instance_id => InstanceId, + connector => ResourceId, reason => Reason, worker => Worker }), false end end, - try emqx_misc:pmap(DoPerWorker, Workers, Timeout) of + try emqx_utils:pmap(DoPerWorker, Workers, Timeout) of [_ | _] = Status -> lists:all(fun(St) -> St =:= true end, Status); [] -> diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl similarity index 86% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl rename to apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl index a8ba91175..55527bf1f 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_gcp_pubsub_SUITE). +-module(emqx_bridge_gcp_pubsub_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -38,18 +38,12 @@ groups() -> {group, sync_query}, {group, async_query} ], - QueueGroups = [ - {group, queue_enabled}, - {group, queue_disabled} - ], ResourceGroups = [{group, gcp_pubsub}], [ {with_batch, SynchronyGroups}, {without_batch, SynchronyGroups}, - {sync_query, QueueGroups}, - {async_query, QueueGroups}, - {queue_enabled, ResourceGroups}, - {queue_disabled, ResourceGroups}, + {sync_query, ResourceGroups}, + {async_query, ResourceGroups}, {gcp_pubsub, MatrixTCs} ]. @@ -76,22 +70,13 @@ init_per_suite(Config) -> ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), {ok, _} = application:ensure_all_started(emqx_connector), emqx_mgmt_api_test_util:init_suite(), - HTTPHost = "localhost", - HTTPPort = 56000, - HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort), - true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), - [ - {http_host, HTTPHost}, - {http_port, HTTPPort} - | Config - ]. + Config. end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), ok = emqx_common_test_helpers:stop_apps([emqx_conf]), ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), _ = application:stop(emqx_connector), - os:unsetenv("PUBSUB_EMULATOR_HOST"), ok. init_per_group(sync_query, Config) -> @@ -99,13 +84,9 @@ init_per_group(sync_query, Config) -> init_per_group(async_query, Config) -> [{query_mode, async} | Config]; init_per_group(with_batch, Config) -> - [{enable_batch, true} | Config]; + [{batch_size, 100} | Config]; init_per_group(without_batch, Config) -> - [{enable_batch, false} | Config]; -init_per_group(queue_enabled, Config) -> - [{enable_queue, true} | Config]; -init_per_group(queue_disabled, Config) -> - [{enable_queue, false} | Config]; + [{batch_size, 1} | Config]; init_per_group(_Group, Config) -> Config. @@ -118,29 +99,31 @@ end_per_group(_Group, _Config) -> init_per_testcase(TestCase, Config0) when TestCase =:= t_publish_success_batch -> - case ?config(enable_batch, Config0) of - true -> - {ok, _} = start_echo_http_server(), + ct:timetrap({seconds, 30}), + case ?config(batch_size, Config0) of + 1 -> + [{skip_due_to_no_batching, true}]; + _ -> delete_all_bridges(), Tid = install_telemetry_handler(TestCase), Config = generate_config(Config0), put(telemetry_table, Tid), - [{telemetry_table, Tid} | Config]; - false -> - {skip, no_batching} + {ok, HttpServer} = start_echo_http_server(), + [{telemetry_table, Tid}, {http_server, HttpServer} | Config] end; init_per_testcase(TestCase, Config0) -> - {ok, _} = start_echo_http_server(), + ct:timetrap({seconds, 30}), + {ok, HttpServer} = start_echo_http_server(), delete_all_bridges(), Tid = install_telemetry_handler(TestCase), Config = generate_config(Config0), put(telemetry_table, Tid), - [{telemetry_table, Tid} | Config]. + [{telemetry_table, Tid}, {http_server, HttpServer} | Config]. end_per_testcase(_TestCase, _Config) -> ok = snabbkaffe:stop(), delete_all_bridges(), - ok = emqx_connector_web_hook_server:stop(), + ok = stop_echo_http_server(), emqx_common_test_helpers:call_janitor(), ok. @@ -189,7 +172,7 @@ create_bridge(Config, GCPPubSubConfigOverrides) -> TypeBin = ?BRIDGE_TYPE_BIN, Name = ?config(gcp_pubsub_name, Config), GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), - GCPPubSubConfig = emqx_map_lib:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), + GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), ct:pal("creating bridge: ~p", [GCPPubSubConfig]), Res = emqx_bridge:create(TypeBin, Name, GCPPubSubConfig), ct:pal("bridge creation result: ~p", [Res]), @@ -202,17 +185,21 @@ create_bridge_http(Config, GCPPubSubConfigOverrides) -> TypeBin = ?BRIDGE_TYPE_BIN, Name = ?config(gcp_pubsub_name, Config), GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), - GCPPubSubConfig = emqx_map_lib:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), + GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), Params = GCPPubSubConfig#{<<"type">> => TypeBin, <<"name">> => Name}, Path = emqx_mgmt_api_test_util:api_path(["bridges"]), AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ProbePath = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + ProbeResult = emqx_mgmt_api_test_util:request_api(post, ProbePath, "", AuthHeader, Params), ct:pal("creating bridge (via http): ~p", [Params]), + ct:pal("probe result: ~p", [ProbeResult]), Res = case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res0} -> {ok, emqx_json:decode(Res0, [return_maps])}; + {ok, Res0} -> {ok, emqx_utils_json:decode(Res0, [return_maps])}; Error -> Error end, ct:pal("bridge creation result: ~p", [Res]), + ?assertEqual(element(1, ProbeResult), element(1, Res)), Res. create_rule_and_action_http(Config) -> @@ -226,7 +213,7 @@ create_rule_and_action_http(Config) -> Path = emqx_mgmt_api_test_util:api_path(["rules"]), AuthHeader = emqx_mgmt_api_test_util:auth_header_(), case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. @@ -238,7 +225,7 @@ success_http_handler() -> Rep = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{messageIds => [<<"6058891368195201">>]}), + emqx_utils_json:encode(#{messageIds => [<<"6058891368195201">>]}), Req ), {ok, Rep, State} @@ -246,7 +233,6 @@ success_http_handler() -> start_echo_http_server() -> HTTPHost = "localhost", - HTTPPort = 56000, HTTPPath = <<"/v1/projects/myproject/topics/mytopic:publish">>, ServerSSLOpts = [ @@ -254,14 +240,23 @@ start_echo_http_server() -> {versions, ['tlsv1.2', 'tlsv1.3']}, {ciphers, ["ECDHE-RSA-AES256-GCM-SHA384", "TLS_CHACHA20_POLY1305_SHA256"]} ] ++ certs(), - {ok, _} = emqx_connector_web_hook_server:start_link(HTTPPort, HTTPPath, ServerSSLOpts), + {ok, {HTTPPort, _Pid}} = emqx_connector_web_hook_server:start_link( + random, HTTPPath, ServerSSLOpts + ), ok = emqx_connector_web_hook_server:set_handler(success_http_handler()), + HTTPHost = "localhost", + HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort), + true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), {ok, #{ - host_port => HTTPHost ++ ":" ++ integer_to_list(HTTPPort), + host_port => HostPort, host => HTTPHost, port => HTTPPort }}. +stop_echo_http_server() -> + os:unsetenv("PUBSUB_EMULATOR_HOST"), + ok = emqx_connector_web_hook_server:stop(). + certs() -> CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), [ @@ -271,16 +266,14 @@ certs() -> ]. gcp_pubsub_config(Config) -> - EnableBatch = proplists:get_value(enable_batch, Config, true), QueryMode = proplists:get_value(query_mode, Config, sync), - EnableQueue = proplists:get_value(enable_queue, Config, false), BatchSize = proplists:get_value(batch_size, Config, 100), BatchTime = proplists:get_value(batch_time, Config, <<"20ms">>), PayloadTemplate = proplists:get_value(payload_template, Config, ""), PubSubTopic = proplists:get_value(pubsub_topic, Config, <<"mytopic">>), PipelineSize = proplists:get_value(pipeline_size, Config, 100), ServiceAccountJSON = proplists:get_value(pubsub_topic, Config, generate_service_account_json()), - ServiceAccountJSONStr = emqx_json:encode(ServiceAccountJSON), + ServiceAccountJSONStr = emqx_utils_json:encode(ServiceAccountJSON), GUID = emqx_guid:to_hexstr(emqx_guid:gen()), Name = <<(atom_to_binary(?MODULE))/binary, (GUID)/binary>>, ConfigString = @@ -288,17 +281,15 @@ gcp_pubsub_config(Config) -> "bridges.gcp_pubsub.~s {\n" " enable = true\n" " connect_timeout = 1s\n" - " request_timeout = 1s\n" " service_account_json = ~s\n" " payload_template = ~p\n" " pubsub_topic = ~s\n" " pool_size = 1\n" " pipelining = ~b\n" " resource_opts = {\n" + " request_timeout = 500ms\n" " worker_pool_size = 1\n" - " enable_batch = ~p\n" " query_mode = ~s\n" - " enable_queue = ~p\n" " batch_size = ~b\n" " batch_time = \"~s\"\n" " }\n" @@ -309,9 +300,7 @@ gcp_pubsub_config(Config) -> PayloadTemplate, PubSubTopic, PipelineSize, - EnableBatch, QueryMode, - EnableQueue, BatchSize, BatchTime ] @@ -358,13 +347,13 @@ service_account_json(PrivateKeyPEM) -> metrics_mapping() -> #{ - batching => fun emqx_resource_metrics:batching_get/1, dropped => fun emqx_resource_metrics:dropped_get/1, + dropped_expired => fun emqx_resource_metrics:dropped_expired_get/1, dropped_other => fun emqx_resource_metrics:dropped_other_get/1, dropped_queue_full => fun emqx_resource_metrics:dropped_queue_full_get/1, - dropped_queue_not_enabled => fun emqx_resource_metrics:dropped_queue_not_enabled_get/1, dropped_resource_not_found => fun emqx_resource_metrics:dropped_resource_not_found_get/1, dropped_resource_stopped => fun emqx_resource_metrics:dropped_resource_stopped_get/1, + late_reply => fun emqx_resource_metrics:late_reply_get/1, failed => fun emqx_resource_metrics:failed_get/1, inflight => fun emqx_resource_metrics:inflight_get/1, matched => fun emqx_resource_metrics:matched_get/1, @@ -473,7 +462,7 @@ assert_valid_request_headers(Headers, ServiceAccountJSON) -> end. assert_valid_request_body(Body) -> - BodyMap = emqx_json:decode(Body, [return_maps]), + BodyMap = emqx_utils_json:decode(Body, [return_maps]), ?assertMatch(#{<<"messages">> := [_ | _]}, BodyMap), #{<<"messages">> := Messages} = BodyMap, lists:map( @@ -481,7 +470,7 @@ assert_valid_request_body(Body) -> ?assertMatch(#{<<"data">> := <<_/binary>>}, Msg), #{<<"data">> := Content64} = Msg, Content = base64:decode(Content64), - Decoded = emqx_json:decode(Content, [return_maps]), + Decoded = emqx_utils_json:decode(Content, [return_maps]), ct:pal("decoded payload: ~p", [Decoded]), ?assert(is_map(Decoded)), Decoded @@ -524,6 +513,32 @@ install_telemetry_handler(TestCase) -> end), Tid. +wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> + Events = receive_all_events(GaugeName, Timeout), + case length(Events) > 0 andalso lists:last(Events) of + #{measurements := #{gauge_set := ExpectedValue}} -> + ok; + #{measurements := #{gauge_set := Value}} -> + ct:pal("events: ~p", [Events]), + ct:fail( + "gauge ~p didn't reach expected value ~p; last value: ~p", + [GaugeName, ExpectedValue, Value] + ); + false -> + ct:pal("no ~p gauge events received!", [GaugeName]) + end. + +receive_all_events(EventName, Timeout) -> + receive_all_events(EventName, Timeout, []). + +receive_all_events(EventName, Timeout, Acc) -> + receive + {telemetry, #{name := [_, _, EventName]} = Event} -> + receive_all_events(EventName, Timeout, [Event | Acc]) + after Timeout -> + lists:reverse(Acc) + end. + wait_telemetry_event(TelemetryTable, EventName, ResourceId) -> wait_telemetry_event(TelemetryTable, EventName, ResourceId, #{timeout => 5_000, n_events => 1}). @@ -600,9 +615,10 @@ t_publish_success(Config) -> ResourceId, #{n_events => ExpectedInflightEvents, timeout => 5_000} ), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), assert_metrics( #{ - batching => 0, dropped => 0, failed => 0, inflight => 0, @@ -649,9 +665,10 @@ t_publish_success_local_topic(Config) -> ResourceId, #{n_events => ExpectedInflightEvents, timeout => 5_000} ), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), assert_metrics( #{ - batching => 0, dropped => 0, failed => 0, inflight => 0, @@ -669,7 +686,7 @@ t_create_via_http(Config) -> create_bridge_http(Config), fun(Res, Trace) -> ?assertMatch({ok, _}, Res), - ?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_created, Trace)), + ?assertMatch([_, _], ?of_kind(gcp_pubsub_bridge_jwt_created, Trace)), ok end ), @@ -736,9 +753,10 @@ t_publish_templated(Config) -> ResourceId, #{n_events => ExpectedInflightEvents, timeout => 5_000} ), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), assert_metrics( #{ - batching => 0, dropped => 0, failed => 0, inflight => 0, @@ -752,6 +770,15 @@ t_publish_templated(Config) -> ok. t_publish_success_batch(Config) -> + case proplists:get_bool(skip_due_to_no_batching, Config) of + true -> + ct:pal("this test case is skipped due to non-applicable config"), + ok; + false -> + test_publish_success_batch(Config) + end. + +test_publish_success_batch(Config) -> ResourceId = ?config(resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), @@ -781,7 +808,7 @@ t_publish_success_batch(Config) -> %% making 1-sized batches. also important to note that the pool %% size for the resource (replayq buffering) must be set to 1 to %% avoid further segmentation of batches. - emqx_misc:pmap(fun emqx:publish/1, Messages), + emqx_utils:pmap(fun emqx:publish/1, Messages), DecodedMessages0 = assert_http_request(ServiceAccountJSON), ?assertEqual(BatchSize, length(DecodedMessages0)), DecodedMessages1 = assert_http_request(ServiceAccountJSON), @@ -803,13 +830,14 @@ t_publish_success_batch(Config) -> ResourceId, #{timeout => 15_000, n_events => NumMessages} ), + wait_until_gauge_is(queuing, 0, _Timeout = 400), + wait_until_gauge_is(inflight, 0, _Timeout = 400), assert_metrics( #{ - batching => 0, dropped => 0, failed => 0, inflight => 0, - matched => NumMessages div BatchSize, + matched => NumMessages, queuing => 0, retried => 0, success => NumMessages @@ -821,7 +849,6 @@ t_publish_success_batch(Config) -> t_not_a_json(Config) -> ?assertMatch( {error, #{ - discarded_errors_count := 0, kind := validation_error, reason := #{exception := {error, {badmap, "not a json"}}}, %% should be censored as it contains secrets @@ -839,7 +866,6 @@ t_not_a_json(Config) -> t_not_of_service_account_type(Config) -> ?assertMatch( {error, #{ - discarded_errors_count := 0, kind := validation_error, reason := {wrong_type, <<"not a service account">>}, %% should be censored as it contains secrets @@ -858,7 +884,6 @@ t_json_missing_fields(Config) -> GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), ?assertMatch( {error, #{ - discarded_errors_count := 0, kind := validation_error, reason := {missing_keys, [ @@ -891,7 +916,7 @@ t_invalid_private_key(Config) -> #{<<"private_key">> => InvalidPrivateKeyPEM} } ), - #{?snk_kind := gcp_pubsub_bridge_jwt_worker_failed_to_start}, + #{?snk_kind := "gcp_pubsub_bridge_jwt_worker_failed_to_start"}, 20_000 ), Res @@ -902,7 +927,7 @@ t_invalid_private_key(Config) -> [#{reason := Reason}] when Reason =:= noproc orelse Reason =:= {shutdown, {error, empty_key}}, - ?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) + ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace) ), ?assertMatch( [#{error := empty_key}], @@ -930,14 +955,14 @@ t_jwt_worker_start_timeout(Config) -> #{<<"private_key">> => InvalidPrivateKeyPEM} } ), - #{?snk_kind := gcp_pubsub_bridge_jwt_timeout}, + #{?snk_kind := "gcp_pubsub_bridge_jwt_timeout"}, 20_000 ), Res end, fun(Res, Trace) -> ?assertMatch({ok, _}, Res), - ?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_timeout, Trace)), + ?assertMatch([_], ?of_kind("gcp_pubsub_bridge_jwt_timeout", Trace)), ok end ), @@ -947,7 +972,13 @@ t_publish_econnrefused(Config) -> ResourceId = ?config(resource_id, Config), %% set pipelining to 1 so that one of the 2 requests is `pending' %% in ehttpc. - {ok, _} = create_bridge(Config, #{<<"pipelining">> => 1}), + {ok, _} = create_bridge( + Config, + #{ + <<"pipelining">> => 1, + <<"resource_opts">> => #{<<"resume_interval">> => <<"15s">>} + } + ), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), assert_empty_metrics(ResourceId), @@ -961,7 +992,10 @@ t_publish_timeout(Config) -> %% requests are done separately. {ok, _} = create_bridge(Config, #{ <<"pipelining">> => 1, - <<"resource_opts">> => #{<<"batch_size">> => 1} + <<"resource_opts">> => #{ + <<"batch_size">> => 1, + <<"resume_interval">> => <<"15s">> + } }), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), @@ -979,7 +1013,7 @@ t_publish_timeout(Config) -> Rep = cowboy_req:reply( 200, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{messageIds => [<<"6058891368195201">>]}), + emqx_utils_json:encode(#{messageIds => [<<"6058891368195201">>]}), Req ), {ok, Rep, State} @@ -988,8 +1022,6 @@ t_publish_timeout(Config) -> do_econnrefused_or_timeout_test(Config, timeout). do_econnrefused_or_timeout_test(Config, Error) -> - EnableQueue = ?config(enable_queue, Config), - QueryMode = ?config(query_mode, Config), ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, @@ -997,15 +1029,8 @@ do_econnrefused_or_timeout_test(Config, Error) -> Message = emqx_message:make(Topic, Payload), ?check_trace( begin - case {QueryMode, Error} of - {sync, _} -> - {_, {ok, _}} = - ?wait_async_action( - emqx:publish(Message), - #{?snk_kind := gcp_pubsub_request_failed, recoverable_error := true}, - 15_000 - ); - {async, econnrefused} -> + case Error of + econnrefused -> %% at the time of writing, async requests %% are never considered expired by ehttpc %% (even if they arrive late, or never @@ -1025,7 +1050,7 @@ do_econnrefused_or_timeout_test(Config, Error) -> }, 15_000 ); - {async, timeout} -> + timeout -> %% at the time of writing, async requests %% are never considered expired by ehttpc %% (even if they arrive late, or never @@ -1043,18 +1068,13 @@ do_econnrefused_or_timeout_test(Config, Error) -> end end, fun(Trace) -> - case {QueryMode, Error} of - {sync, _} -> + case Error of + econnrefused -> ?assertMatch( [#{reason := Error, connector := ResourceId} | _], ?of_kind(gcp_pubsub_request_failed, Trace) ); - {async, econnrefused} -> - ?assertMatch( - [#{reason := Error, connector := ResourceId} | _], - ?of_kind(gcp_pubsub_request_failed, Trace) - ); - {async, timeout} -> + timeout -> ?assertMatch( [_, _ | _], ?of_kind(gcp_pubsub_response, Trace) @@ -1064,80 +1084,45 @@ do_econnrefused_or_timeout_test(Config, Error) -> end ), - case {Error, QueryMode, EnableQueue} of - {_, sync, false} -> - wait_telemetry_event(TelemetryTable, dropped_queue_not_enabled, ResourceId, #{ - timeout => 10_000, - n_events => 1 - }), - assert_metrics( - #{ - batching => 0, - dropped => 1, - dropped_queue_not_enabled => 1, - failed => 0, - inflight => 0, - matched => 1, - queuing => 0, - retried => 0, - success => 0 - }, - ResourceId - ); + case Error of %% apparently, async with disabled queue doesn't mark the %% message as dropped; and since it never considers the %% response expired, this succeeds. - {econnrefused, async, _} -> + econnrefused -> wait_telemetry_event(TelemetryTable, queuing, ResourceId, #{ timeout => 10_000, n_events => 1 }), + %% even waiting, hard to avoid flakiness... simpler to just sleep + %% a bit until stabilization. + ct:sleep(200), CurrentMetrics = current_metrics(ResourceId), RecordedEvents = ets:tab2list(TelemetryTable), ct:pal("telemetry events: ~p", [RecordedEvents]), ?assertMatch( #{ - batching := 0, dropped := Dropped, - failed := 0, + failed := Failed, inflight := Inflight, matched := Matched, queuing := Queueing, retried := 0, success := 0 - } when Matched >= 1 andalso Inflight + Queueing + Dropped =< 2, + } when Matched >= 1 andalso Inflight + Queueing + Dropped + Failed =< 2, CurrentMetrics ); - {timeout, async, _} -> - wait_telemetry_event(TelemetryTable, success, ResourceId, #{ - timeout => 10_000, n_events => 2 - }), + timeout -> + wait_until_gauge_is(inflight, 0, _Timeout = 400), + wait_until_gauge_is(queuing, 0, _Timeout = 400), assert_metrics( #{ - batching => 0, dropped => 0, failed => 0, inflight => 0, matched => 2, queuing => 0, retried => 0, - success => 2 - }, - ResourceId - ); - {_, sync, true} -> - wait_telemetry_event(TelemetryTable, queuing, ResourceId, #{ - timeout => 10_000, n_events => 2 - }), - assert_metrics( - #{ - batching => 0, - dropped => 0, - failed => 0, - inflight => 0, - matched => 1, - queuing => 1, - retried => 0, - success => 0 + success => 0, + late_reply => 2 }, ResourceId ) @@ -1194,7 +1179,7 @@ t_failure_with_body(Config) -> Rep = cowboy_req:reply( 400, #{<<"content-type">> => <<"application/json">>}, - jiffy:encode(#{}), + emqx_utils_json:encode(#{}), Req ), {ok, Rep, State} @@ -1263,8 +1248,6 @@ t_failure_no_body(Config) -> t_unrecoverable_error(Config) -> ResourceId = ?config(resource_id, Config), - TelemetryTable = ?config(telemetry_table, Config), - QueryMode = ?config(query_mode, Config), TestPid = self(), FailureNoBodyHandler = fun(Req0, State) -> @@ -1295,52 +1278,32 @@ t_unrecoverable_error(Config) -> Message = emqx_message:make(Topic, Payload), ?check_trace( {_, {ok, _}} = - case QueryMode of - sync -> - ?wait_async_action( - emqx:publish(Message), - #{?snk_kind := gcp_pubsub_request_failed}, - 5_000 - ); - async -> - ?wait_async_action( - emqx:publish(Message), - #{?snk_kind := gcp_pubsub_response}, - 5_000 - ) - end, + ?wait_async_action( + emqx:publish(Message), + #{?snk_kind := gcp_pubsub_response}, + 5_000 + ), fun(Trace) -> - case QueryMode of - sync -> - ?assertMatch( - [#{reason := killed}], - ?of_kind(gcp_pubsub_request_failed, Trace) - ); - async -> - ?assertMatch( - [#{response := {error, killed}}], - ?of_kind(gcp_pubsub_response, Trace) - ) - end, + ?assertMatch( + [#{response := {error, killed}}], + ?of_kind(gcp_pubsub_response, Trace) + ), ok end ), - wait_telemetry_event(TelemetryTable, failed, ResourceId), - ExpectedInflightEvents = - case QueryMode of - sync -> 1; - async -> 3 - end, - wait_telemetry_event( - TelemetryTable, - inflight, - ResourceId, - #{n_events => ExpectedInflightEvents, timeout => 5_000} - ), + + wait_until_gauge_is(queuing, 0, _Timeout = 400), + %% TODO: once temporary clause in + %% `emqx_resource_buffer_worker:is_unrecoverable_error' + %% that marks all unknown errors as unrecoverable is + %% removed, this inflight should be 1, because we retry if + %% the worker is killed. + wait_until_gauge_is(inflight, 0, _Timeout = 400), assert_metrics( #{ - batching => 0, dropped => 0, + %% FIXME: see comment above; failed should be 0 + %% and inflight should be 1. failed => 1, inflight => 0, matched => 1, @@ -1365,7 +1328,7 @@ t_failed_to_start_jwt_worker(Config) -> fun(Trace) -> ?assertMatch( [#{reason := {error, restarting}}], - ?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) + ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace) ), ok end diff --git a/apps/emqx_bridge_hstreamdb/BSL.txt b/apps/emqx_bridge_hstreamdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_hstreamdb/README.md b/apps/emqx_bridge_hstreamdb/README.md new file mode 100644 index 000000000..520817e82 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/README.md @@ -0,0 +1,40 @@ +# EMQX HStreamDB Bridge + +[HStreamDB](https://hstream.io/) is streaming database purpose-built to ingest, +store, process, and analyze massive data streams. It is a modern data infrastructure +that unifies messaging, stream processing, and storage to help get value out of +your data in real-time. + +The application is used to connect EMQX and HStreamDB. +User can create a rule and easily ingest IoT data into HStreamDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src new file mode 100644 index 000000000..1cb3742b3 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_hstreamdb, [ + {description, "EMQX Enterprise HStreamDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_influxdb/BSL.txt b/apps/emqx_bridge_influxdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_influxdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_influxdb/README.md b/apps/emqx_bridge_influxdb/README.md new file mode 100644 index 000000000..fe0f14600 --- /dev/null +++ b/apps/emqx_bridge_influxdb/README.md @@ -0,0 +1,49 @@ +# EMQX InfluxDB Bridge + +[InfluxDB](https://github.com/influxdata/influxdb) is an open-source time-series +database that is optimized for storing, retrieving, and querying large volumes of +time-stamped data. +It is commonly used for monitoring and analysis of metrics, events, and real-time +analytics. +InfluxDB is designed to be fast, efficient, and scalable, and it has a SQL-like +query language that makes it easy to extract insights from time-series data. + +The application is used to connect EMQX and InfluxDB. User can create a rule and +easily ingest IoT data into InfluxDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into InfluxDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-influxdb.html) + for how to use EMQX dashboard to ingest IoT data into InfluxDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + +- [Create bridge API doc](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges/paths/~1bridges/post) + list required parameters for creating a InfluxDB bridge. + There are two types of InfluxDB API (`v1` and `v2`), please select the right + version of InfluxDB. Below are several important parameters for `v1`, + - `server`: The IPv4 or IPv6 address or the hostname to connect to. + - `database`: InfluxDB database name + - `write_syntax`: Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_influxdb/docker-ct b/apps/emqx_bridge_influxdb/docker-ct new file mode 100644 index 000000000..ef579c036 --- /dev/null +++ b/apps/emqx_bridge_influxdb/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +influxdb diff --git a/apps/emqx_bridge_influxdb/rebar.config b/apps/emqx_bridge_influxdb/rebar.config new file mode 100644 index 000000000..0b11423c4 --- /dev/null +++ b/apps/emqx_bridge_influxdb/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {influxdb, {git, "https://github.com/emqx/influxdb-client-erl", {tag, "1.1.9"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src new file mode 100644 index 000000000..14d881399 --- /dev/null +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_influxdb, [ + {description, "EMQX Enterprise InfluxDB Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, influxdb]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.erl b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.erl new file mode 100644 index 000000000..c2a04e93d --- /dev/null +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.erl @@ -0,0 +1,322 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_influxdb). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-type write_syntax() :: list(). +-reflect_type([write_syntax/0]). +-typerefl_from_string({write_syntax/0, ?MODULE, to_influx_lines}). +-export([to_influx_lines/1]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"influxdb_api_v1">> => #{ + summary => <<"InfluxDB HTTP API V1 Bridge">>, + value => values("influxdb_api_v1", Method) + } + }, + #{ + <<"influxdb_api_v2">> => #{ + summary => <<"InfluxDB HTTP API V2 Bridge">>, + value => values("influxdb_api_v2", Method) + } + } + ]. + +values(Protocol, get) -> + values(Protocol, post); +values("influxdb_api_v2", post) -> + SupportUint = <<"uint_value=${payload.uint_key}u,">>, + TypeOpts = #{ + bucket => <<"example_bucket">>, + org => <<"examlpe_org">>, + token => <<"example_token">>, + server => <<"127.0.0.1:8086">> + }, + values(common, "influxdb_api_v2", SupportUint, TypeOpts); +values("influxdb_api_v1", post) -> + SupportUint = <<>>, + TypeOpts = #{ + database => <<"example_database">>, + username => <<"example_username">>, + password => <<"******">>, + server => <<"127.0.0.1:8086">> + }, + values(common, "influxdb_api_v1", SupportUint, TypeOpts); +values(Protocol, put) -> + values(Protocol, post). + +values(common, Protocol, SupportUint, TypeOpts) -> + CommonConfigs = #{ + type => list_to_atom(Protocol), + name => <<"demo">>, + enable => true, + local_topic => <<"local/topic/#">>, + write_syntax => + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", SupportUint/binary, + "bool=${payload.bool}">>, + precision => ms, + resource_opts => #{ + batch_size => 100, + batch_time => <<"20ms">> + }, + server => <<"127.0.0.1:8086">>, + ssl => #{enable => false} + }, + maps:merge(TypeOpts, CommonConfigs). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_influxdb". + +roots() -> []. + +fields("post_api_v1") -> + method_fileds(post, influxdb_api_v1); +fields("post_api_v2") -> + method_fileds(post, influxdb_api_v2); +fields("put_api_v1") -> + method_fileds(put, influxdb_api_v1); +fields("put_api_v2") -> + method_fileds(put, influxdb_api_v2); +fields("get_api_v1") -> + method_fileds(get, influxdb_api_v1); +fields("get_api_v2") -> + method_fileds(get, influxdb_api_v2); +fields(Type) when + Type == influxdb_api_v1 orelse Type == influxdb_api_v2 +-> + influxdb_bridge_common_fields() ++ + connector_fields(Type). + +method_fileds(post, ConnectorType) -> + influxdb_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType); +method_fileds(get, ConnectorType) -> + influxdb_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType) ++ + emqx_bridge_schema:status_fields(); +method_fileds(put, ConnectorType) -> + influxdb_bridge_common_fields() ++ + connector_fields(ConnectorType). + +influxdb_bridge_common_fields() -> + emqx_bridge_schema:common_bridge_fields() ++ + [ + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, + {write_syntax, fun write_syntax/1} + ] ++ + emqx_resource_schema:fields("resource_opts"). + +connector_fields(Type) -> + emqx_bridge_influxdb_connector:fields(Type). + +type_name_fields(Type) -> + [ + {type, mk(Type, #{required => true, desc => ?DESC("desc_type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for InfluxDB using `", string:to_upper(Method), "` method."]; +desc(influxdb_api_v1) -> + ?DESC(emqx_bridge_influxdb_connector, "influxdb_api_v1"); +desc(influxdb_api_v2) -> + ?DESC(emqx_bridge_influxdb_connector, "influxdb_api_v2"); +desc(_) -> + undefined. + +write_syntax(type) -> + ?MODULE:write_syntax(); +write_syntax(required) -> + true; +write_syntax(validator) -> + [?NOT_EMPTY("the value of the field 'write_syntax' cannot be empty")]; +write_syntax(converter) -> + fun to_influx_lines/1; +write_syntax(desc) -> + ?DESC("write_syntax"); +write_syntax(format) -> + <<"sql">>; +write_syntax(_) -> + undefined. + +to_influx_lines(RawLines) -> + try + influx_lines(str(RawLines), []) + catch + _:Reason:Stacktrace -> + Msg = lists:flatten( + io_lib:format("Unable to parse InfluxDB line protocol: ~p", [RawLines]) + ), + ?SLOG(error, #{msg => Msg, error_reason => Reason, stacktrace => Stacktrace}), + throw(Msg) + end. + +-define(MEASUREMENT_ESC_CHARS, [$,, $\s]). +-define(TAG_FIELD_KEY_ESC_CHARS, [$,, $=, $\s]). +-define(FIELD_VAL_ESC_CHARS, [$", $\\]). +% Common separator for both tags and fields +-define(SEP, $\s). +-define(MEASUREMENT_TAG_SEP, $,). +-define(KEY_SEP, $=). +-define(VAL_SEP, $,). +-define(NON_EMPTY, [_ | _]). + +influx_lines([] = _RawLines, Acc) -> + ?NON_EMPTY = lists:reverse(Acc); +influx_lines(RawLines, Acc) -> + {Acc1, RawLines1} = influx_line(string:trim(RawLines, leading, "\s\n"), Acc), + influx_lines(RawLines1, Acc1). + +influx_line([], Acc) -> + {Acc, []}; +influx_line(Line, Acc) -> + {?NON_EMPTY = Measurement, Line1} = measurement(Line), + {Tags, Line2} = tags(Line1), + {?NON_EMPTY = Fields, Line3} = influx_fields(Line2), + {Timestamp, Line4} = timestamp(Line3), + { + [ + #{ + measurement => Measurement, + tags => Tags, + fields => Fields, + timestamp => Timestamp + } + | Acc + ], + Line4 + }. + +measurement(Line) -> + unescape(?MEASUREMENT_ESC_CHARS, [?MEASUREMENT_TAG_SEP, ?SEP], Line, []). + +tags([?MEASUREMENT_TAG_SEP | Line]) -> + tags1(Line, []); +tags(Line) -> + {[], Line}. + +%% Empty line is invalid as fields are required after tags, +%% need to break recursion here and fail later on parsing fields +tags1([] = Line, Acc) -> + {lists:reverse(Acc), Line}; +%% Matching non empty Acc treats lines like "m, field=field_val" invalid +tags1([?SEP | _] = Line, ?NON_EMPTY = Acc) -> + {lists:reverse(Acc), Line}; +tags1(Line, Acc) -> + {Tag, Line1} = tag(Line), + tags1(Line1, [Tag | Acc]). + +tag(Line) -> + {?NON_EMPTY = Key, Line1} = key(Line), + {?NON_EMPTY = Val, Line2} = tag_val(Line1), + {{Key, Val}, Line2}. + +tag_val(Line) -> + {Val, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?VAL_SEP, ?SEP], Line, []), + {Val, strip_l(Line1, ?VAL_SEP)}. + +influx_fields([?SEP | Line]) -> + fields1(string:trim(Line, leading, "\s"), []). + +%% Timestamp is optional, so fields may be at the very end of the line +fields1([Ch | _] = Line, Acc) when Ch =:= ?SEP; Ch =:= $\n -> + {lists:reverse(Acc), Line}; +fields1([] = Line, Acc) -> + {lists:reverse(Acc), Line}; +fields1(Line, Acc) -> + {Field, Line1} = field(Line), + fields1(Line1, [Field | Acc]). + +field(Line) -> + {?NON_EMPTY = Key, Line1} = key(Line), + {Val, Line2} = field_val(Line1), + {{Key, Val}, Line2}. + +field_val([$" | Line]) -> + {Val, [$" | Line1]} = unescape(?FIELD_VAL_ESC_CHARS, [$"], Line, []), + %% Quoted val can be empty + {Val, strip_l(Line1, ?VAL_SEP)}; +field_val(Line) -> + %% Unquoted value should not be un-escaped according to InfluxDB protocol, + %% as it can only hold float, integer, uinteger or boolean value. + %% However, as templates are possible, un-escaping is applied here, + %% which also helps to detect some invalid lines, e.g.: "m,tag=1 field= ${timestamp}" + {Val, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?VAL_SEP, ?SEP, $\n], Line, []), + {?NON_EMPTY = Val, strip_l(Line1, ?VAL_SEP)}. + +timestamp([?SEP | Line]) -> + Line1 = string:trim(Line, leading, "\s"), + %% Similarly to unquoted field value, un-escape a timestamp to validate and handle + %% potentially escaped characters in a template + {T, Line2} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?SEP, $\n], Line1, []), + {timestamp1(T), Line2}; +timestamp(Line) -> + {undefined, Line}. + +timestamp1(?NON_EMPTY = Ts) -> Ts; +timestamp1(_Ts) -> undefined. + +%% Common for both tag and field keys +key(Line) -> + {Key, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?KEY_SEP], Line, []), + {Key, strip_l(Line1, ?KEY_SEP)}. + +%% Only strip a character between pairs, don't strip it(and let it fail) +%% if the char to be stripped is at the end, e.g.: m,tag=val, field=val +strip_l([Ch, Ch1 | Str], Ch) when Ch1 =/= ?SEP -> + [Ch1 | Str]; +strip_l(Str, _Ch) -> + Str. + +unescape(EscapeChars, SepChars, [$\\, Char | T], Acc) -> + ShouldEscapeBackslash = lists:member($\\, EscapeChars), + Acc1 = + case lists:member(Char, EscapeChars) of + true -> [Char | Acc]; + false when not ShouldEscapeBackslash -> [Char, $\\ | Acc] + end, + unescape(EscapeChars, SepChars, T, Acc1); +unescape(EscapeChars, SepChars, [Char | T] = L, Acc) -> + IsEscapeChar = lists:member(Char, EscapeChars), + case lists:member(Char, SepChars) of + true -> {lists:reverse(Acc), L}; + false when not IsEscapeChar -> unescape(EscapeChars, SepChars, T, [Char | Acc]) + end; +unescape(_EscapeChars, _SepChars, [] = L, Acc) -> + {lists:reverse(Acc), L}. + +str(A) when is_atom(A) -> + atom_to_list(A); +str(B) when is_binary(B) -> + binary_to_list(B); +str(S) when is_list(S) -> + S. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl similarity index 63% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl rename to apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl index 7974bf028..2f65f7902 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl @@ -1,9 +1,8 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_influxdb). +-module(emqx_bridge_influxdb_connector). --include("emqx_ee_connector.hrl"). -include_lib("emqx_connector/include/emqx_connector.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -26,18 +25,29 @@ on_batch_query_async/4, on_get_status/2 ]). +-export([reply_callback/2]). -export([ + roots/0, namespace/0, fields/1, desc/1 ]). +%% only for test +-export([is_unrecoverable_error/1]). + +-type ts_precision() :: ns | us | ms | s. + +-define(INFLUXDB_DEFAULT_PORT, 8086). + %% influxdb servers don't need parse -define(INFLUXDB_HOST_OPTIONS, #{ default_port => ?INFLUXDB_DEFAULT_PORT }). +-define(DEFAULT_TIMESTAMP_TMPL, "${timestamp}"). + %% ------------------------------------------------------------------------------------------------- %% resource callback callback_mode() -> async_if_possible. @@ -56,13 +66,13 @@ on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, c #{points => Points, batch => false, mode => sync} ), do_query(InstId, Client, Points); - {error, ErrorPoints} = Err -> + {error, ErrorPoints} -> ?tp( influxdb_connector_send_query_error, #{batch => false, mode => sync, error => ErrorPoints} ), log_error_points(InstId, ErrorPoints), - Err + {error, {unrecoverable_error, ErrorPoints}} end. %% Once a Batched Data trans to points failed. @@ -80,7 +90,7 @@ on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client influxdb_connector_send_query_error, #{batch => true, mode => sync, error => Reason} ), - {error, Reason} + {error, {unrecoverable_error, Reason}} end. on_query_async( @@ -123,7 +133,7 @@ on_batch_query_async( influxdb_connector_send_query_error, #{batch => true, mode => async, error => Reason} ), - {error, Reason} + {error, {unrecoverable_error, Reason}} end. on_get_status(_InstId, #{client := Client}) -> @@ -138,22 +148,42 @@ on_get_status(_InstId, #{client := Client}) -> %% schema namespace() -> connector_influxdb. +roots() -> + [ + {config, #{ + type => hoconsc:union( + [ + hoconsc:ref(?MODULE, influxdb_api_v1), + hoconsc:ref(?MODULE, influxdb_api_v2) + ] + ) + }} + ]. + fields(common) -> [ {server, server()}, {precision, - mk(enum([ns, us, ms, s, m, h]), #{ + %% The influxdb only supports these 4 precision: + %% See "https://github.com/influxdata/influxdb/blob/ + %% 6b607288439a991261307518913eb6d4e280e0a7/models/points.go#L487" for + %% more information. + mk(enum([ns, us, ms, s]), #{ required => false, default => ms, desc => ?DESC("precision") })} ]; -fields(influxdb_udp) -> - fields(common); fields(influxdb_api_v1) -> fields(common) ++ [ {database, mk(binary(), #{required => true, desc => ?DESC("database")})}, {username, mk(binary(), #{desc => ?DESC("username")})}, - {password, mk(binary(), #{desc => ?DESC("password"), format => <<"password">>})} + {password, + mk(binary(), #{ + desc => ?DESC("password"), + format => <<"password">>, + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} ] ++ emqx_connector_schema_lib:ssl_fields(); fields(influxdb_api_v2) -> fields(common) ++ @@ -174,8 +204,6 @@ server() -> desc(common) -> ?DESC("common"); -desc(influxdb_udp) -> - ?DESC("influxdb_udp"); desc(influxdb_api_v1) -> ?DESC("influxdb_api_v1"); desc(influxdb_api_v2) -> @@ -189,15 +217,15 @@ start_client(InstId, Config) -> ?SLOG(info, #{ msg => "starting influxdb connector", connector => InstId, - config => Config, - client_config => ClientConfig + config => emqx_utils:redact(Config), + client_config => emqx_utils:redact(ClientConfig) }), try do_start_client(InstId, ClientConfig, Config) catch E:R:S -> ?tp(influxdb_connector_start_exception, #{error => {E, R}}), - ?SLOG(error, #{ + ?SLOG(warning, #{ msg => "start influxdb connector error", connector => InstId, error => E, @@ -210,33 +238,36 @@ start_client(InstId, Config) -> do_start_client( InstId, ClientConfig, - Config = #{ - write_syntax := Lines - } + Config = #{write_syntax := Lines} ) -> + Precision = maps:get(precision, Config, ms), case influxdb:start_client(ClientConfig) of {ok, Client} -> - case influxdb:is_alive(Client) of + case influxdb:is_alive(Client, true) of true -> State = #{ client => Client, - write_syntax => to_config(Lines) + write_syntax => to_config(Lines, Precision) }, ?SLOG(info, #{ msg => "starting influxdb connector success", connector => InstId, - client => Client, - state => State + client => redact_auth(Client), + state => redact_auth(State) }), {ok, State}; - false -> - ?tp(influxdb_connector_start_failed, #{error => influxdb_client_not_alive}), - ?SLOG(error, #{ - msg => "starting influxdb connector failed", - connector => InstId, - client => Client, - reason => "client is not alive" + {false, Reason} -> + ?tp(influxdb_connector_start_failed, #{ + error => influxdb_client_not_alive, reason => Reason }), + ?SLOG(warning, #{ + msg => "failed_to_start_influxdb_connector", + connector => InstId, + client => redact_auth(Client), + reason => Reason + }), + %% no leak + _ = influxdb:stop_client(Client), {error, influxdb_client_not_alive} end; {error, {already_started, Client0}} -> @@ -244,14 +275,14 @@ do_start_client( ?SLOG(info, #{ msg => "restarting influxdb connector, found already started client", connector => InstId, - old_client => Client0 + old_client => redact_auth(Client0) }), _ = influxdb:stop_client(Client0), do_start_client(InstId, ClientConfig, Config); {error, Reason} -> ?tp(influxdb_connector_start_failed, #{error => Reason}), - ?SLOG(error, #{ - msg => "starting influxdb connector failed", + ?SLOG(warning, #{ + msg => "failed_to_start_influxdb_connector", connector => InstId, reason => Reason }), @@ -264,29 +295,28 @@ client_config( server := Server } ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?INFLUXDB_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?INFLUXDB_HOST_OPTIONS), [ {host, str(Host)}, {port, Port}, {pool_size, erlang:system_info(schedulers)}, - {pool, binary_to_atom(InstId, utf8)}, + {pool, InstId}, {precision, atom_to_binary(maps:get(precision, Config, ms), utf8)} ] ++ protocol_config(Config). %% api v1 config -protocol_config(#{ - username := Username, - password := Password, - database := DB, - ssl := SSL -}) -> +protocol_config( + #{ + database := DB, + ssl := SSL + } = Config +) -> [ {protocol, http}, {version, v1}, - {username, str(Username)}, - {password, str(Password)}, {database, str(DB)} - ] ++ ssl_config(SSL); + ] ++ username(Config) ++ + password(Config) ++ ssl_config(SSL); %% api v2 config protocol_config(#{ bucket := Bucket, @@ -300,12 +330,7 @@ protocol_config(#{ {bucket, str(Bucket)}, {org, str(Org)}, {token, Token} - ] ++ ssl_config(SSL); -%% udp config -protocol_config(_) -> - [ - {protocol, udp} - ]. + ] ++ ssl_config(SSL). ssl_config(#{enable := false}) -> [ @@ -314,8 +339,27 @@ ssl_config(#{enable := false}) -> ssl_config(SSL = #{enable := true}) -> [ {https_enabled, true}, - {transport, ssl} - ] ++ maps:to_list(maps:remove(enable, SSL)). + {transport, ssl}, + {transport_opts, emqx_tls_lib:to_client_opts(SSL)} + ]. + +username(#{username := Username}) -> + [{username, str(Username)}]; +username(_) -> + []. + +password(#{password := Password}) -> + [{password, str(Password)}]; +password(_) -> + []. + +redact_auth(Term) -> + emqx_utils:redact(Term, fun is_auth_key/1). + +is_auth_key(Key) when is_binary(Key) -> + string:equal("authorization", Key, true); +is_auth_key(_) -> + false. %% ------------------------------------------------------------------------------------------------- %% Query @@ -334,7 +378,12 @@ do_query(InstId, Client, Points) -> connector => InstId, reason => Reason }), - Err + case is_unrecoverable_error(Err) of + true -> + {error, {unrecoverable_error, Reason}}; + false -> + {error, {recoverable_error, Reason}} + end end. do_async_query(InstId, Client, Points, ReplyFunAndArgs) -> @@ -343,35 +392,60 @@ do_async_query(InstId, Client, Points, ReplyFunAndArgs) -> connector => InstId, points => Points }), - ok = influxdb:write_async(Client, Points, ReplyFunAndArgs). + WrappedReplyFunAndArgs = {fun ?MODULE:reply_callback/2, [ReplyFunAndArgs]}, + {ok, _WorkerPid} = influxdb:write_async(Client, Points, WrappedReplyFunAndArgs). + +reply_callback(ReplyFunAndArgs, {error, Reason} = Error) -> + case is_unrecoverable_error(Error) of + true -> + Result = {error, {unrecoverable_error, Reason}}, + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result); + false -> + Result = {error, {recoverable_error, Reason}}, + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) + end; +reply_callback(ReplyFunAndArgs, Result) -> + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result). %% ------------------------------------------------------------------------------------------------- %% Tags & Fields Config Trans -to_config(Lines) -> - to_config(Lines, []). +to_config(Lines, Precision) -> + to_config(Lines, [], Precision). -to_config([], Acc) -> +to_config([], Acc, _Precision) -> lists:reverse(Acc); -to_config( - [ - #{ - measurement := Measurement, - timestamp := Timestamp, - tags := Tags, - fields := Fields - } - | Rest - ], - Acc -) -> - Res = #{ - measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement), - timestamp => emqx_plugin_libs_rule:preproc_tmpl(Timestamp), - tags => to_kv_config(Tags), - fields => to_kv_config(Fields) +to_config([Item0 | Rest], Acc, Precision) -> + Ts0 = maps:get(timestamp, Item0, undefined), + {Ts, FromPrecision, ToPrecision} = preproc_tmpl_timestamp(Ts0, Precision), + Item = #{ + measurement => emqx_plugin_libs_rule:preproc_tmpl(maps:get(measurement, Item0)), + timestamp => Ts, + precision => {FromPrecision, ToPrecision}, + tags => to_kv_config(maps:get(tags, Item0)), + fields => to_kv_config(maps:get(fields, Item0)) }, - to_config(Rest, [Res | Acc]). + to_config(Rest, [Item | Acc], Precision). + +%% pre-process the timestamp template +%% returns a tuple of three elements: +%% 1. The timestamp template itself. +%% 2. The source timestamp precision (ms if the template ${timestamp} is used). +%% 3. The target timestamp precision (configured for the client). +preproc_tmpl_timestamp(undefined, Precision) -> + %% not configured, we default it to the message timestamp + preproc_tmpl_timestamp(?DEFAULT_TIMESTAMP_TMPL, Precision); +preproc_tmpl_timestamp(Ts, Precision) when is_integer(Ts) -> + %% a const value is used which is very much unusual, but we have to add a special handling + {Ts, Precision, Precision}; +preproc_tmpl_timestamp(Ts, Precision) when is_list(Ts) -> + preproc_tmpl_timestamp(iolist_to_binary(Ts), Precision); +preproc_tmpl_timestamp(<> = Ts, Precision) -> + {emqx_plugin_libs_rule:preproc_tmpl(Ts), ms, Precision}; +preproc_tmpl_timestamp(Ts, Precision) when is_binary(Ts) -> + %% a placehold is in use. e.g. ${payload.my_timestamp} + %% we can only hope it the value will be of the same precision in the configs + {emqx_plugin_libs_rule:preproc_tmpl(Ts), Precision, Precision}. to_kv_config(KVfields) -> maps:fold(fun to_maps_config/3, #{}, proplists:to_map(KVfields)). @@ -414,7 +488,8 @@ parse_batch_data(InstId, BatchData, SyntaxLines) -> fields := [{binary(), binary()}], measurement := binary(), tags := [{binary(), binary()}], - timestamp := binary() + timestamp := emqx_plugin_libs_rule:tmpl_token() | integer(), + precision := {From :: ts_precision(), To :: ts_precision()} } ]) -> {ok, [map()]} | {error, term()}. data_to_points(Data, SyntaxLines) -> @@ -430,46 +505,71 @@ lines_to_points(_, [], Points, ErrorPoints) -> %% ignore trans succeeded points {error, ErrorPoints} end; -lines_to_points( - Data, - [ - #{ - measurement := Measurement, - timestamp := Timestamp, - tags := Tags, - fields := Fields - } - | Rest - ], - ResultPointsAcc, - ErrorPointsAcc -) -> +lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when + is_list(Ts) +-> TransOptions = #{return => rawlist, var_trans => fun data_filter/1}, - case emqx_plugin_libs_rule:proc_tmpl(Timestamp, Data, TransOptions) of - [TimestampInt] when is_integer(TimestampInt) -> - {_, EncodedTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags), - {_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), - Point = #{ - measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Data), - timestamp => TimestampInt, - tags => EncodedTags, - fields => EncodedFields - }, - case map_size(EncodedFields) =:= 0 of - true -> - %% influxdb client doesn't like empty field maps... - lines_to_points(Data, Rest, ResultPointsAcc, [ - {error, no_fields} | ErrorPointsAcc - ]); - false -> - lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc) - end; - BadTimestamp -> + case parse_timestamp(emqx_plugin_libs_rule:proc_tmpl(Ts, Data, TransOptions)) of + {ok, TsInt} -> + Item1 = Item#{timestamp => TsInt}, + continue_lines_to_points(Data, Item1, Rest, ResultPointsAcc, ErrorPointsAcc); + {error, BadTs} -> lines_to_points(Data, Rest, ResultPointsAcc, [ - {error, {bad_timestamp, BadTimestamp}} | ErrorPointsAcc + {error, {bad_timestamp, BadTs}} | ErrorPointsAcc ]) + end; +lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when + is_integer(Ts) +-> + continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc). + +parse_timestamp([TsInt]) when is_integer(TsInt) -> + {ok, TsInt}; +parse_timestamp([TsBin]) -> + try + {ok, binary_to_integer(TsBin)} + catch + _:_ -> + {error, TsBin} end. +continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc) -> + case line_to_point(Data, Item) of + #{fields := Fields} when map_size(Fields) =:= 0 -> + %% influxdb client doesn't like empty field maps... + ErrorPointsAcc1 = [{error, no_fields} | ErrorPointsAcc], + lines_to_points(Data, Rest, ResultPointsAcc, ErrorPointsAcc1); + Point -> + lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc) + end. + +line_to_point( + Data, + #{ + measurement := Measurement, + tags := Tags, + fields := Fields, + timestamp := Ts, + precision := Precision + } = Item +) -> + {_, EncodedTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags), + {_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), + maps:without([precision], Item#{ + measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Data), + tags => EncodedTags, + fields => EncodedFields, + timestamp => maybe_convert_time_unit(Ts, Precision) + }). + +maybe_convert_time_unit(Ts, {FromPrecision, ToPrecision}) -> + erlang:convert_time_unit(Ts, time_unit(FromPrecision), time_unit(ToPrecision)). + +time_unit(s) -> second; +time_unit(ms) -> millisecond; +time_unit(us) -> microsecond; +time_unit(ns) -> nanosecond. + maps_config_to_data(K, V, {Data, Res}) -> KTransOptions = #{return => rawlist, var_trans => fun key_filter/1}, VTransOptions = #{return => rawlist, var_trans => fun data_filter/1}, @@ -557,6 +657,11 @@ str(B) when is_binary(B) -> str(S) when is_list(S) -> S. +is_unrecoverable_error({error, {unrecoverable_error, _}}) -> + true; +is_unrecoverable_error(_) -> + false. + %%=================================================================== %% eunit tests %%=================================================================== @@ -564,6 +669,13 @@ str(S) when is_list(S) -> -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). +is_auth_key_test_() -> + [ + ?_assert(is_auth_key(<<"Authorization">>)), + ?_assertNot(is_auth_key(<<"Something">>)), + ?_assertNot(is_auth_key(89)) + ]. + %% for coverage desc_test_() -> [ @@ -571,10 +683,6 @@ desc_test_() -> {desc, _, _}, desc(common) ), - ?_assertMatch( - {desc, _, _}, - desc(influxdb_udp) - ), ?_assertMatch( {desc, _, _}, desc(influxdb_api_v1) diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl b/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_SUITE.erl similarity index 83% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl rename to apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_SUITE.erl index f2037ba14..825721052 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl +++ b/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_SUITE.erl @@ -1,7 +1,7 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_influxdb_SUITE). +-module(emqx_bridge_influxdb_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -204,9 +204,9 @@ init_per_group(sync_query, Config) -> init_per_group(async_query, Config) -> [{query_mode, async} | Config]; init_per_group(with_batch, Config) -> - [{enable_batch, true} | Config]; + [{batch_size, 100} | Config]; init_per_group(without_batch, Config) -> - [{enable_batch, false} | Config]; + [{batch_size, 1} | Config]; init_per_group(_Group, Config) -> Config. @@ -261,7 +261,6 @@ example_write_syntax() -> "${undef_key}=\"hard-coded-value\",", "bool=${payload.bool}">>. influxdb_config(apiv1 = Type, InfluxDBHost, InfluxDBPort, Config) -> - EnableBatch = proplists:get_value(enable_batch, Config, true), BatchSize = proplists:get_value(batch_size, Config, 100), QueryMode = proplists:get_value(query_mode, Config, sync), UseTLS = proplists:get_value(use_tls, Config, false), @@ -278,7 +277,7 @@ influxdb_config(apiv1 = Type, InfluxDBHost, InfluxDBPort, Config) -> " precision = ns\n" " write_syntax = \"~s\"\n" " resource_opts = {\n" - " enable_batch = ~p\n" + " request_timeout = 1s\n" " query_mode = ~s\n" " batch_size = ~b\n" " }\n" @@ -292,7 +291,6 @@ influxdb_config(apiv1 = Type, InfluxDBHost, InfluxDBPort, Config) -> InfluxDBHost, InfluxDBPort, WriteSyntax, - EnableBatch, QueryMode, BatchSize, UseTLS @@ -300,7 +298,6 @@ influxdb_config(apiv1 = Type, InfluxDBHost, InfluxDBPort, Config) -> ), {Name, ConfigString, parse_and_check(ConfigString, Type, Name)}; influxdb_config(apiv2 = Type, InfluxDBHost, InfluxDBPort, Config) -> - EnableBatch = proplists:get_value(enable_batch, Config, true), BatchSize = proplists:get_value(batch_size, Config, 100), QueryMode = proplists:get_value(query_mode, Config, sync), UseTLS = proplists:get_value(use_tls, Config, false), @@ -317,7 +314,7 @@ influxdb_config(apiv2 = Type, InfluxDBHost, InfluxDBPort, Config) -> " precision = ns\n" " write_syntax = \"~s\"\n" " resource_opts = {\n" - " enable_batch = ~p\n" + " request_timeout = 1s\n" " query_mode = ~s\n" " batch_size = ~b\n" " }\n" @@ -331,7 +328,6 @@ influxdb_config(apiv2 = Type, InfluxDBHost, InfluxDBPort, Config) -> InfluxDBHost, InfluxDBPort, WriteSyntax, - EnableBatch, QueryMode, BatchSize, UseTLS @@ -358,7 +354,7 @@ create_bridge(Config, Overrides) -> Type = influxdb_type_bin(?config(influxdb_type, Config)), Name = ?config(influxdb_name, Config), InfluxDBConfig0 = ?config(influxdb_config, Config), - InfluxDBConfig = emqx_map_lib:deep_merge(InfluxDBConfig0, Overrides), + InfluxDBConfig = emqx_utils_maps:deep_merge(InfluxDBConfig0, Overrides), emqx_bridge:create(Type, Name, InfluxDBConfig). delete_bridge(Config) -> @@ -394,11 +390,11 @@ create_rule_and_action_http(Config, Overrides) -> sql => <<"SELECT * FROM \"t/topic\"">>, actions => [BridgeId] }, - Params = emqx_map_lib:deep_merge(Params0, Overrides), + Params = emqx_utils_maps:deep_merge(Params0, Overrides), Path = emqx_mgmt_api_test_util:api_path(["rules"]), AuthHeader = emqx_mgmt_api_test_util:auth_header_(), case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. @@ -439,7 +435,7 @@ query_by_clientid(ClientId, Config) -> {"Content-Type", "application/json"} ], Body = - emqx_json:encode(#{ + emqx_utils_json:encode(#{ query => Query, dialect => #{ header => true, @@ -506,11 +502,6 @@ resource_id(Config) -> Name = ?config(influxdb_name, Config), emqx_bridge_resource:resource_id(Type, Name). -instance_id(Config) -> - ResourceId = resource_id(Config), - [{_, InstanceId}] = ets:lookup(emqx_resource_manager, {owner, ResourceId}), - InstanceId. - %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ @@ -531,15 +522,17 @@ t_start_ok(Config) -> SentData = #{ <<"clientid">> => ClientId, <<"topic">> => atom_to_binary(?FUNCTION_NAME), - <<"timestamp">> => erlang:system_time(nanosecond), - <<"payload">> => Payload + <<"payload">> => Payload, + <<"timestamp">> => erlang:system_time(millisecond) }, ?check_trace( begin - ?assertEqual(ok, send_message(Config, SentData)), case QueryMode of - async -> ct:sleep(500); - sync -> ok + async -> + ?assertMatch(ok, send_message(Config, SentData)), + ct:sleep(500); + sync -> + ?assertMatch({ok, 204, _}, send_message(Config, SentData)) end, PersistedData = query_by_clientid(ClientId, Config), Expected = #{ @@ -547,7 +540,7 @@ t_start_ok(Config) -> int_value => <<"-123">>, uint_value => <<"123">>, float_value => <<"24.5">>, - payload => emqx_json:encode(Payload) + payload => emqx_utils_json:encode(Payload) }, assert_persisted_data(ClientId, Expected, PersistedData), ok @@ -583,14 +576,14 @@ t_start_already_started(Config) -> {ok, _}, create_bridge(Config) ), - InstanceId = instance_id(Config), + ResourceId = resource_id(Config), TypeAtom = binary_to_atom(Type), NameAtom = binary_to_atom(Name), {ok, #{bridges := #{TypeAtom := #{NameAtom := InfluxDBConfigMap}}}} = emqx_hocon:check( emqx_bridge_schema, InfluxDBConfigString ), ?check_trace( - emqx_ee_connector_influxdb:on_start(InstanceId, InfluxDBConfigMap), + emqx_bridge_influxdb_connector:on_start(ResourceId, InfluxDBConfigMap), fun(Result, Trace) -> ?assertMatch({ok, _}, Result), ?assertMatch([_], ?of_kind(influxdb_connector_start_already_started, Trace)), @@ -668,6 +661,57 @@ t_start_ok_no_subject_tags_write_syntax(Config) -> ), ok. +t_const_timestamp(Config) -> + QueryMode = ?config(query_mode, Config), + Const = erlang:system_time(nanosecond), + ConstBin = integer_to_binary(Const), + TsStr = iolist_to_binary( + calendar:system_time_to_rfc3339(Const, [{unit, nanosecond}, {offset, "Z"}]) + ), + ?assertMatch( + {ok, _}, + create_bridge( + Config, + #{ + <<"write_syntax">> => + <<"mqtt,clientid=${clientid} foo=${payload.foo}i,bar=5i ", ConstBin/binary>> + } + ) + ), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{<<"foo">> => 123}, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"payload">> => Payload, + <<"timestamp">> => erlang:system_time(millisecond) + }, + case QueryMode of + async -> + ?assertMatch(ok, send_message(Config, SentData)), + ct:sleep(500); + sync -> + ?assertMatch({ok, 204, _}, send_message(Config, SentData)) + end, + PersistedData = query_by_clientid(ClientId, Config), + Expected = #{foo => <<"123">>}, + assert_persisted_data(ClientId, Expected, PersistedData), + TimeReturned0 = maps:get(<<"_time">>, maps:get(<<"foo">>, PersistedData)), + TimeReturned = pad_zero(TimeReturned0), + ?assertEqual(TsStr, TimeReturned). + +%% influxdb returns timestamps without trailing zeros such as +%% "2023-02-28T17:21:51.63678163Z" +%% while the standard should be +%% "2023-02-28T17:21:51.636781630Z" +pad_zero(BinTs) -> + StrTs = binary_to_list(BinTs), + [Nano | Rest] = lists:reverse(string:tokens(StrTs, ".")), + [$Z | NanoNum] = lists:reverse(Nano), + Padding = lists:duplicate(10 - length(Nano), $0), + NewNano = lists:reverse(NanoNum) ++ Padding ++ "Z", + iolist_to_binary(string:join(lists:reverse([NewNano | Rest]), ".")). + t_boolean_variants(Config) -> QueryMode = ?config(query_mode, Config), ?assertMatch( @@ -697,10 +741,15 @@ t_boolean_variants(Config) -> SentData = #{ <<"clientid">> => ClientId, <<"topic">> => atom_to_binary(?FUNCTION_NAME), - <<"timestamp">> => erlang:system_time(nanosecond), + <<"timestamp">> => erlang:system_time(millisecond), <<"payload">> => Payload }, - ?assertEqual(ok, send_message(Config, SentData)), + case QueryMode of + sync -> + ?assertMatch({ok, 204, _}, send_message(Config, SentData)); + async -> + ?assertMatch(ok, send_message(Config, SentData)) + end, case QueryMode of async -> ct:sleep(500); sync -> ok @@ -710,7 +759,7 @@ t_boolean_variants(Config) -> bool => atom_to_binary(Translation), int_value => <<"-123">>, uint_value => <<"123">>, - payload => emqx_json:encode(Payload) + payload => emqx_utils_json:encode(Payload) }, assert_persisted_data(ClientId, Expected, PersistedData), ok @@ -723,7 +772,7 @@ t_bad_timestamp(Config) -> InfluxDBType = ?config(influxdb_type, Config), InfluxDBName = ?config(influxdb_name, Config), QueryMode = ?config(query_mode, Config), - EnableBatch = ?config(enable_batch, Config), + BatchSize = ?config(batch_size, Config), InfluxDBConfigString0 = ?config(influxdb_config_string, Config), InfluxDBTypeCfg = case InfluxDBType of @@ -762,7 +811,7 @@ t_bad_timestamp(Config) -> SentData = #{ <<"clientid">> => ClientId, <<"topic">> => atom_to_binary(?FUNCTION_NAME), - <<"timestamp">> => erlang:system_time(nanosecond), + <<"timestamp">> => erlang:system_time(millisecond), <<"payload">> => Payload }, ?check_trace( @@ -774,7 +823,8 @@ t_bad_timestamp(Config) -> fun(Result, Trace) -> ?assertMatch({_, {ok, _}}, Result), {Return, {ok, _}} = Result, - case {QueryMode, EnableBatch} of + IsBatch = BatchSize > 1, + case {QueryMode, IsBatch} of {async, true} -> ?assertEqual(ok, Return), ?assertMatch( @@ -784,15 +834,24 @@ t_bad_timestamp(Config) -> {async, false} -> ?assertEqual(ok, Return), ?assertMatch( - [#{error := [{error, {bad_timestamp, [<<"bad_timestamp">>]}}]}], + [ + #{ + error := [ + {error, {bad_timestamp, <<"bad_timestamp">>}} + ] + } + ], ?of_kind(influxdb_connector_send_query_error, Trace) ); {sync, false} -> ?assertEqual( - {error, [{error, {bad_timestamp, [<<"bad_timestamp">>]}}]}, Return + {error, [ + {error, {bad_timestamp, <<"bad_timestamp">>}} + ]}, + Return ); {sync, true} -> - ?assertEqual({error, points_trans_failed}, Return) + ?assertEqual({error, {unrecoverable_error, points_trans_failed}}, Return) end, ok end @@ -821,7 +880,7 @@ t_create_disconnected(Config) -> end), fun(Trace) -> ?assertMatch( - [#{error := influxdb_client_not_alive}], + [#{error := influxdb_client_not_alive, reason := econnrefused}], ?of_kind(influxdb_connector_start_failed, Trace) ), ok @@ -895,24 +954,48 @@ t_write_failure(Config) -> SentData = #{ <<"clientid">> => ClientId, <<"topic">> => atom_to_binary(?FUNCTION_NAME), - <<"timestamp">> => erlang:system_time(nanosecond), + <<"timestamp">> => erlang:system_time(millisecond), <<"payload">> => Payload }, ?check_trace( emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> - send_message(Config, SentData) - end), - fun(Result, _Trace) -> case QueryMode of sync -> + {_, {ok, _}} = + ?wait_async_action( + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + send_message(Config, SentData) + ), + #{?snk_kind := handle_async_reply, action := nack}, + 1_000 + ); + async -> + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := handle_async_reply}, + 1_000 + ) + end + end), + fun(Trace0) -> + case QueryMode of + sync -> + Trace = ?of_kind(handle_async_reply, Trace0), + ?assertMatch([_ | _], Trace), + [#{result := Result} | _] = Trace, ?assert( - {error, {error, {closed, "The connection was lost."}}} =:= Result orelse - {error, {error, closed}} =:= Result orelse - {error, {error, econnrefused}} =:= Result, + not emqx_bridge_influxdb_connector:is_unrecoverable_error(Result), #{got => Result} ); async -> - ?assertEqual(ok, Result) + Trace = ?of_kind(handle_async_reply, Trace0), + ?assertMatch([#{action := nack} | _], Trace), + [#{result := Result} | _] = Trace, + ?assert( + not emqx_bridge_influxdb_connector:is_unrecoverable_error(Result), + #{got => Result} + ) end, ok end @@ -920,13 +1003,13 @@ t_write_failure(Config) -> ok. t_missing_field(Config) -> - QueryMode = ?config(query_mode, Config), - EnableBatch = ?config(enable_batch, Config), + BatchSize = ?config(batch_size, Config), + IsBatch = BatchSize > 1, {ok, _} = create_bridge( Config, #{ - <<"resource_opts">> => #{<<"batch_size">> => 1}, + <<"resource_opts">> => #{<<"worker_pool_size">> => 1}, <<"write_syntax">> => <<"${clientid} foo=${foo}i">> } ), @@ -936,18 +1019,18 @@ t_missing_field(Config) -> ClientId0 = emqx_guid:to_hexstr(emqx_guid:gen()), ClientId1 = emqx_guid:to_hexstr(emqx_guid:gen()), %% Message with the field that we "forgot" to select in the rule - Msg0 = emqx_message:make(ClientId0, <<"t/topic">>, emqx_json:encode(#{foo => 123})), + Msg0 = emqx_message:make(ClientId0, <<"t/topic">>, emqx_utils_json:encode(#{foo => 123})), %% Message without any fields - Msg1 = emqx_message:make(ClientId1, <<"t/topic">>, emqx_json:encode(#{})), + Msg1 = emqx_message:make(ClientId1, <<"t/topic">>, emqx_utils_json:encode(#{})), ?check_trace( begin emqx:publish(Msg0), emqx:publish(Msg1), + NEvents = 1, {ok, _} = snabbkaffe:block_until( - ?match_n_events(2, #{ - ?snk_kind := influxdb_connector_send_query_error, - mode := QueryMode + ?match_n_events(NEvents, #{ + ?snk_kind := influxdb_connector_send_query_error }), _Timeout1 = 10_000 ), @@ -956,15 +1039,15 @@ t_missing_field(Config) -> fun(Trace) -> PersistedData0 = query_by_clientid(ClientId0, Config), PersistedData1 = query_by_clientid(ClientId1, Config), - case EnableBatch of + case IsBatch of true -> ?assertMatch( - [#{error := points_trans_failed}, #{error := points_trans_failed} | _], + [#{error := points_trans_failed} | _], ?of_kind(influxdb_connector_send_query_error, Trace) ); false -> ?assertMatch( - [#{error := [{error, no_fields}]}, #{error := [{error, no_fields}]} | _], + [#{error := [{error, no_fields}]} | _], ?of_kind(influxdb_connector_send_query_error, Trace) ) end, diff --git a/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_connector_SUITE.erl b/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_connector_SUITE.erl new file mode 100644 index 000000000..9aec94b65 --- /dev/null +++ b/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_connector_SUITE.erl @@ -0,0 +1,232 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_influxdb_connector_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(INFLUXDB_RESOURCE_MOD, emqx_bridge_influxdb_connector). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +groups() -> + []. + +init_per_suite(Config) -> + InfluxDBTCPHost = os:getenv("INFLUXDB_APIV2_TCP_HOST", "toxiproxy"), + InfluxDBTCPPort = list_to_integer(os:getenv("INFLUXDB_APIV2_TCP_PORT", "8086")), + InfluxDBTLSHost = os:getenv("INFLUXDB_APIV2_TLS_HOST", "toxiproxy"), + InfluxDBTLSPort = list_to_integer(os:getenv("INFLUXDB_APIV2_TLS_PORT", "8087")), + Servers = [{InfluxDBTCPHost, InfluxDBTCPPort}, {InfluxDBTLSHost, InfluxDBTLSPort}], + case emqx_common_test_helpers:is_all_tcp_servers_available(Servers) of + true -> + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + [ + {influxdb_tcp_host, InfluxDBTCPHost}, + {influxdb_tcp_port, InfluxDBTCPPort}, + {influxdb_tls_host, InfluxDBTLSHost}, + {influxdb_tls_port, InfluxDBTLSPort} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_influxdb); + _ -> + {skip, no_influxdb} + end + end. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). + +init_per_testcase(_, Config) -> + Config. + +end_per_testcase(_, _Config) -> + ok. + +% %%------------------------------------------------------------------------------ +% %% Testcases +% %%------------------------------------------------------------------------------ + +t_lifecycle(Config) -> + Host = ?config(influxdb_tcp_host, Config), + Port = ?config(influxdb_tcp_port, Config), + perform_lifecycle_check( + <<"emqx_bridge_influxdb_connector_SUITE">>, + influxdb_config(Host, Port, false, <<"verify_none">>) + ). + +perform_lifecycle_check(PoolName, InitialConfig) -> + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(?INFLUXDB_RESOURCE_MOD, InitialConfig), + % We need to add a write_syntax to the config since the connector + % expects this + FullConfig = CheckedConfig#{write_syntax => influxdb_write_syntax()}, + {ok, #{ + state := #{client := #{pool := ReturnedPoolName}} = State, + status := InitialStatus + }} = emqx_resource:create_local( + PoolName, + ?CONNECTOR_RESOURCE_GROUP, + ?INFLUXDB_RESOURCE_MOD, + FullConfig, + #{} + ), + ?assertEqual(InitialStatus, connected), + % Instance should match the state and status of the just started resource + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := InitialStatus + }} = + emqx_resource:get_instance(PoolName), + ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + % % Perform query as further check that the resource is working as expected + ?assertMatch({ok, 204, _}, emqx_resource:query(PoolName, test_query())), + ?assertEqual(ok, emqx_resource:stop(PoolName)), + % Resource will be listed still, but state will be changed and healthcheck will fail + % as the worker no longer exists. + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := StoppedStatus + }} = + emqx_resource:get_instance(PoolName), + ?assertEqual(stopped, StoppedStatus), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + % Can call stop/1 again on an already stopped instance + ?assertEqual(ok, emqx_resource:stop(PoolName)), + % Make sure it can be restarted and the healthchecks and queries work properly + ?assertEqual(ok, emqx_resource:restart(PoolName)), + % async restart, need to wait resource + timer:sleep(500), + {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = + emqx_resource:get_instance(PoolName), + ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + ?assertMatch({ok, 204, _}, emqx_resource:query(PoolName, test_query())), + % Stop and remove the resource in one go. + ?assertEqual(ok, emqx_resource:remove_local(PoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + % Should not even be able to get the resource data out of ets now unlike just stopping. + ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + +t_tls_verify_none(Config) -> + PoolName = <<"emqx_bridge_influxdb_connector_SUITE">>, + Host = ?config(influxdb_tls_host, Config), + Port = ?config(influxdb_tls_port, Config), + InitialConfig = influxdb_config(Host, Port, true, <<"verify_none">>), + ValidStatus = perform_tls_opts_check(PoolName, InitialConfig, valid), + ?assertEqual(connected, ValidStatus), + InvalidStatus = perform_tls_opts_check(PoolName, InitialConfig, fail), + ?assertEqual(disconnected, InvalidStatus), + ok. + +t_tls_verify_peer(Config) -> + PoolName = <<"emqx_bridge_influxdb_connector_SUITE">>, + Host = ?config(influxdb_tls_host, Config), + Port = ?config(influxdb_tls_port, Config), + InitialConfig = influxdb_config(Host, Port, true, <<"verify_peer">>), + %% This works without a CA-cert & friends since we are using a mock + ValidStatus = perform_tls_opts_check(PoolName, InitialConfig, valid), + ?assertEqual(connected, ValidStatus), + InvalidStatus = perform_tls_opts_check(PoolName, InitialConfig, fail), + ?assertEqual(disconnected, InvalidStatus), + ok. + +perform_tls_opts_check(PoolName, InitialConfig, VerifyReturn) -> + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(?INFLUXDB_RESOURCE_MOD, InitialConfig), + % Meck handling of TLS opt handling so that we can inject custom + % verification returns + meck:new(emqx_tls_lib, [passthrough, no_link]), + meck:expect( + emqx_tls_lib, + to_client_opts, + fun(Opts) -> + Verify = {verify_fun, {custom_verify(), {return, VerifyReturn}}}, + [Verify | meck:passthrough([Opts])] + end + ), + try + % We need to add a write_syntax to the config since the connector + % expects this + FullConfig = CheckedConfig#{write_syntax => influxdb_write_syntax()}, + {ok, #{ + config := #{ssl := #{enable := SslEnabled}}, + status := Status + }} = emqx_resource:create_local( + PoolName, + ?CONNECTOR_RESOURCE_GROUP, + ?INFLUXDB_RESOURCE_MOD, + FullConfig, + #{} + ), + ?assert(SslEnabled), + ?assert(meck:validate(emqx_tls_lib)), + % Stop and remove the resource in one go. + ?assertEqual(ok, emqx_resource:remove_local(PoolName)), + Status + after + meck:unload(emqx_tls_lib) + end. + +% %%------------------------------------------------------------------------------ +% %% Helpers +% %%------------------------------------------------------------------------------ + +influxdb_config(Host, Port, SslEnabled, Verify) -> + Server = list_to_binary(io_lib:format("~s:~b", [Host, Port])), + ResourceConfig = #{ + <<"bucket">> => <<"mqtt">>, + <<"org">> => <<"emqx">>, + <<"token">> => <<"abcdefg">>, + <<"server">> => Server, + <<"ssl">> => #{ + <<"enable">> => SslEnabled, + <<"verify">> => Verify + } + }, + #{<<"config">> => ResourceConfig}. + +custom_verify() -> + fun + (_, {bad_cert, unknown_ca} = Event, {return, Return} = UserState) -> + ct:pal("Call to custom verify fun. Event: ~p UserState: ~p", [Event, UserState]), + {Return, UserState}; + (_, Event, UserState) -> + ct:pal("Unexpected call to custom verify fun. Event: ~p UserState: ~p", [ + Event, UserState + ]), + {fail, unexpected_call_to_verify_fun} + end. + +influxdb_write_syntax() -> + [ + #{ + measurement => "${topic}", + tags => [{"clientid", "${clientid}"}], + fields => [{"payload", "${payload}"}], + timestamp => undefined + } + ]. + +test_query() -> + {send_message, #{ + <<"clientid">> => <<"something">>, + <<"payload">> => #{bool => true}, + <<"topic">> => <<"connector_test">>, + <<"timestamp">> => 1678220316257 + }}. diff --git a/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_tests.erl b/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_tests.erl new file mode 100644 index 000000000..9ad685f77 --- /dev/null +++ b/apps/emqx_bridge_influxdb/test/emqx_bridge_influxdb_tests.erl @@ -0,0 +1,348 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_influxdb_tests). + +-include_lib("eunit/include/eunit.hrl"). + +-define(INVALID_LINES, [ + " ", + " \n", + " \n\n\n ", + "\n", + " \n\n \n \n", + "measurement", + "measurement ", + "measurement,tag", + "measurement field", + "measurement,tag field", + "measurement,tag field ${timestamp}", + "measurement,tag=", + "measurement,tag=tag1", + "measurement,tag =", + "measurement field=", + "measurement field= ", + "measurement field = ", + "measurement, tag = field = ", + "measurement, tag = field = ", + "measurement, tag = tag_val field = field_val", + "measurement, tag = tag_val field = field_val ${timestamp}", + "measurement,= = ${timestamp}", + "measurement,t=a, f=a, ${timestamp}", + "measurement,t=a,t1=b, f=a,f1=b, ${timestamp}", + "measurement,t=a,t1=b, f=a,f1=b,", + "measurement,t=a, t1=b, f=a,f1=b,", + "measurement,t=a,,t1=b, f=a,f1=b,", + "measurement,t=a,,t1=b f=a,,f1=b", + "measurement,t=a,,t1=b f=a,f1=b ${timestamp}", + "measurement, f=a,f1=b", + "measurement, f=a,f1=b ${timestamp}", + "measurement,, f=a,f1=b ${timestamp}", + "measurement,, f=a,f1=b", + "measurement,, f=a,f1=b,, ${timestamp}", + "measurement f=a,f1=b,, ${timestamp}", + "measurement,t=a f=a,f1=b,, ${timestamp}", + "measurement,t=a f=a,f1=b,, ", + "measurement,t=a f=a,f1=b,,", + "measurement, t=a f=a,f1=b", + "measurement,t=a f=a, f1=b", + "measurement,t=a f=a, f1=b ${timestamp}", + "measurement, t=a f=a, f1=b ${timestamp}", + "measurement,t= a f=a,f1=b ${timestamp}", + "measurement,t= a f=a,f1 =b ${timestamp}", + "measurement, t = a f = a,f1 = b ${timestamp}", + "measurement,t=a f=a,f1=b \n ${timestamp}", + "measurement,t=a \n f=a,f1=b \n ${timestamp}", + "measurement,t=a \n f=a,f1=b \n ", + "\n measurement,t=a \n f=a,f1=b \n ${timestamp}", + "\n measurement,t=a \n f=a,f1=b \n", + %% not escaped backslash in a quoted field value is invalid + "measurement,tag=1 field=\"val\\1\"" +]). + +-define(VALID_LINE_PARSED_PAIRS, [ + {"m1,tag=tag1 field=field1 ${timestamp1}", #{ + measurement => "m1", + tags => [{"tag", "tag1"}], + fields => [{"field", "field1"}], + timestamp => "${timestamp1}" + }}, + {"m2,tag=tag2 field=field2", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field2"}], + timestamp => undefined + }}, + {"m3 field=field3 ${timestamp3}", #{ + measurement => "m3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${timestamp3}" + }}, + {"m4 field=field4", #{ + measurement => "m4", + tags => [], + fields => [{"field", "field4"}], + timestamp => undefined + }}, + {"m5,tag=tag5,tag_a=tag5a,tag_b=tag5b field=field5,field_a=field5a,field_b=field5b ${timestamp5}", + #{ + measurement => "m5", + tags => [{"tag", "tag5"}, {"tag_a", "tag5a"}, {"tag_b", "tag5b"}], + fields => [{"field", "field5"}, {"field_a", "field5a"}, {"field_b", "field5b"}], + timestamp => "${timestamp5}" + }}, + {"m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=field6,field_a=field6a,field_b=field6b", #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }}, + {"m7,tag=tag7,tag_a=\"tag7a\",tag_b=tag7b field=\"field7\",field_a=field7a,field_b=\"field7b\"", + #{ + measurement => "m7", + tags => [{"tag", "tag7"}, {"tag_a", "\"tag7a\""}, {"tag_b", "tag7b"}], + fields => [{"field", "field7"}, {"field_a", "field7a"}, {"field_b", "field7b"}], + timestamp => undefined + }}, + {"m8,tag=tag8,tag_a=\"tag8a\",tag_b=tag8b field=\"field8\",field_a=field8a,field_b=\"field8b\" ${timestamp8}", + #{ + measurement => "m8", + tags => [{"tag", "tag8"}, {"tag_a", "\"tag8a\""}, {"tag_b", "tag8b"}], + fields => [{"field", "field8"}, {"field_a", "field8a"}, {"field_b", "field8b"}], + timestamp => "${timestamp8}" + }}, + {"m9,tag=tag9,tag_a=\"tag9a\",tag_b=tag9b field=\"field9\",field_a=field9a,field_b=\"\" ${timestamp9}", + #{ + measurement => "m9", + tags => [{"tag", "tag9"}, {"tag_a", "\"tag9a\""}, {"tag_b", "tag9b"}], + fields => [{"field", "field9"}, {"field_a", "field9a"}, {"field_b", ""}], + timestamp => "${timestamp9}" + }}, + {"m10 field=\"\" ${timestamp10}", #{ + measurement => "m10", + tags => [], + fields => [{"field", ""}], + timestamp => "${timestamp10}" + }} +]). + +-define(VALID_LINE_EXTRA_SPACES_PARSED_PAIRS, [ + {"\n m1,tag=tag1 field=field1 ${timestamp1} \n", #{ + measurement => "m1", + tags => [{"tag", "tag1"}], + fields => [{"field", "field1"}], + timestamp => "${timestamp1}" + }}, + {" m2,tag=tag2 field=field2 ", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field2"}], + timestamp => undefined + }}, + {" m3 field=field3 ${timestamp3} ", #{ + measurement => "m3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${timestamp3}" + }}, + {" \n m4 field=field4\n ", #{ + measurement => "m4", + tags => [], + fields => [{"field", "field4"}], + timestamp => undefined + }}, + {" \n m5,tag=tag5,tag_a=tag5a,tag_b=tag5b field=field5,field_a=field5a,field_b=field5b ${timestamp5} \n", + #{ + measurement => "m5", + tags => [{"tag", "tag5"}, {"tag_a", "tag5a"}, {"tag_b", "tag5b"}], + fields => [{"field", "field5"}, {"field_a", "field5a"}, {"field_b", "field5b"}], + timestamp => "${timestamp5}" + }}, + {" m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=field6,field_a=field6a,field_b=field6b\n ", #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }} +]). + +-define(VALID_LINE_PARSED_ESCAPED_CHARS_PAIRS, [ + {"m\\ =1\\,,\\,tag\\ \\==\\=tag\\ 1\\, \\,fie\\ ld\\ =\\ field\\,1 ${timestamp1}", #{ + measurement => "m =1,", + tags => [{",tag =", "=tag 1,"}], + fields => [{",fie ld ", " field,1"}], + timestamp => "${timestamp1}" + }}, + {"m2,tag=tag2 field=\"field \\\"2\\\",\n\"", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field \"2\",\n"}], + timestamp => undefined + }}, + {"m\\ 3 field=\"field3\" ${payload.timestamp\\ 3}", #{ + measurement => "m 3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${payload.timestamp 3}" + }}, + {"m4 field=\"\\\"field\\\\4\\\"\"", #{ + measurement => "m4", + tags => [], + fields => [{"field", "\"field\\4\""}], + timestamp => undefined + }}, + { + "m5\\,mA,tag=\\=tag5\\=,\\,tag_a\\,=tag\\ 5a,tag_b=tag5b \\ field\\ =field5," + "field\\ _\\ a=field5a,\\,field_b\\ =\\=\\,\\ field5b ${timestamp5}", + #{ + measurement => "m5,mA", + tags => [{"tag", "=tag5="}, {",tag_a,", "tag 5a"}, {"tag_b", "tag5b"}], + fields => [ + {" field ", "field5"}, {"field _ a", "field5a"}, {",field_b ", "=, field5b"} + ], + timestamp => "${timestamp5}" + } + }, + {"m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=\"field6\",field_a=\"field6a\",field_b=\"field6b\"", + #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }}, + { + "\\ \\ m7\\ \\ ,tag=\\ tag\\,7\\ ,tag_a=\"tag7a\",tag_b\\,tag1=tag7b field=\"field7\"," + "field_a=field7a,field_b=\"field7b\\\\\n\"", + #{ + measurement => " m7 ", + tags => [{"tag", " tag,7 "}, {"tag_a", "\"tag7a\""}, {"tag_b,tag1", "tag7b"}], + fields => [{"field", "field7"}, {"field_a", "field7a"}, {"field_b", "field7b\\\n"}], + timestamp => undefined + } + }, + { + "m8,tag=tag8,tag_a=\"tag8a\",tag_b=tag8b field=\"field8\",field_a=field8a," + "field_b=\"\\\"field\\\" = 8b\" ${timestamp8}", + #{ + measurement => "m8", + tags => [{"tag", "tag8"}, {"tag_a", "\"tag8a\""}, {"tag_b", "tag8b"}], + fields => [{"field", "field8"}, {"field_a", "field8a"}, {"field_b", "\"field\" = 8b"}], + timestamp => "${timestamp8}" + } + }, + {"m\\9,tag=tag9,tag_a=\"tag9a\",tag_b=tag9b field\\=field=\"field9\",field_a=field9a,field_b=\"\" ${timestamp9}", + #{ + measurement => "m\\9", + tags => [{"tag", "tag9"}, {"tag_a", "\"tag9a\""}, {"tag_b", "tag9b"}], + fields => [{"field=field", "field9"}, {"field_a", "field9a"}, {"field_b", ""}], + timestamp => "${timestamp9}" + }}, + {"m\\,10 \"field\\\\\"=\"\" ${timestamp10}", #{ + measurement => "m,10", + tags => [], + %% backslash should not be un-escaped in tag key + fields => [{"\"field\\\\\"", ""}], + timestamp => "${timestamp10}" + }} +]). + +-define(VALID_LINE_PARSED_ESCAPED_CHARS_EXTRA_SPACES_PAIRS, [ + {" \n m\\ =1\\,,\\,tag\\ \\==\\=tag\\ 1\\, \\,fie\\ ld\\ =\\ field\\,1 ${timestamp1} ", #{ + measurement => "m =1,", + tags => [{",tag =", "=tag 1,"}], + fields => [{",fie ld ", " field,1"}], + timestamp => "${timestamp1}" + }}, + {" m2,tag=tag2 field=\"field \\\"2\\\",\n\" ", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field \"2\",\n"}], + timestamp => undefined + }}, + {" m\\ 3 field=\"field3\" ${payload.timestamp\\ 3} ", #{ + measurement => "m 3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${payload.timestamp 3}" + }}, + {" m4 field=\"\\\"field\\\\4\\\"\" ", #{ + measurement => "m4", + tags => [], + fields => [{"field", "\"field\\4\""}], + timestamp => undefined + }}, + { + " m5\\,mA,tag=\\=tag5\\=,\\,tag_a\\,=tag\\ 5a,tag_b=tag5b \\ field\\ =field5," + "field\\ _\\ a=field5a,\\,field_b\\ =\\=\\,\\ field5b ${timestamp5} ", + #{ + measurement => "m5,mA", + tags => [{"tag", "=tag5="}, {",tag_a,", "tag 5a"}, {"tag_b", "tag5b"}], + fields => [ + {" field ", "field5"}, {"field _ a", "field5a"}, {",field_b ", "=, field5b"} + ], + timestamp => "${timestamp5}" + } + }, + {" m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=\"field6\",field_a=\"field6a\",field_b=\"field6b\" ", + #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }} +]). + +invalid_write_syntax_line_test_() -> + [?_assertThrow(_, to_influx_lines(L)) || L <- ?INVALID_LINES]. + +invalid_write_syntax_multiline_test_() -> + LinesList = [ + join("\n", ?INVALID_LINES), + join("\n\n\n", ?INVALID_LINES), + join("\n\n", lists:reverse(?INVALID_LINES)) + ], + [?_assertThrow(_, to_influx_lines(Lines)) || Lines <- LinesList]. + +valid_write_syntax_test_() -> + test_pairs(?VALID_LINE_PARSED_PAIRS). + +valid_write_syntax_with_extra_spaces_test_() -> + test_pairs(?VALID_LINE_EXTRA_SPACES_PARSED_PAIRS). + +valid_write_syntax_escaped_chars_test_() -> + test_pairs(?VALID_LINE_PARSED_ESCAPED_CHARS_PAIRS). + +valid_write_syntax_escaped_chars_with_extra_spaces_test_() -> + test_pairs(?VALID_LINE_PARSED_ESCAPED_CHARS_EXTRA_SPACES_PAIRS). + +test_pairs(PairsList) -> + {Lines, AllExpected} = lists:unzip(PairsList), + JoinedLines = join("\n", Lines), + JoinedLines1 = join("\n\n\n", Lines), + JoinedLines2 = join("\n\n", lists:reverse(Lines)), + SingleLineTests = + [ + ?_assertEqual([Expected], to_influx_lines(Line)) + || {Line, Expected} <- PairsList + ], + JoinedLinesTests = + [ + ?_assertEqual(AllExpected, to_influx_lines(JoinedLines)), + ?_assertEqual(AllExpected, to_influx_lines(JoinedLines1)), + ?_assertEqual(lists:reverse(AllExpected), to_influx_lines(JoinedLines2)) + ], + SingleLineTests ++ JoinedLinesTests. + +join(Sep, LinesList) -> + lists:flatten(lists:join(Sep, LinesList)). + +to_influx_lines(RawLines) -> + OldLevel = emqx_logger:get_primary_log_level(), + try + %% mute error logs from this call + emqx_logger:set_primary_log_level(none), + emqx_bridge_influxdb:to_influx_lines(RawLines) + after + emqx_logger:set_primary_log_level(OldLevel) + end. diff --git a/apps/emqx_bridge_iotdb/.gitignore b/apps/emqx_bridge_iotdb/.gitignore new file mode 100644 index 000000000..e9bc1c544 --- /dev/null +++ b/apps/emqx_bridge_iotdb/.gitignore @@ -0,0 +1,19 @@ +.rebar3 + _* + .eunit + *.o + *.beam + *.plt + *.swp + *.swo + .erlang.cookie + ebin + log + erl_crash.dump + .rebar + logs + _build + .idea + *.iml + rebar3.crashdump + *~ diff --git a/apps/emqx_bridge_iotdb/BSL.txt b/apps/emqx_bridge_iotdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_iotdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_iotdb/README.md b/apps/emqx_bridge_iotdb/README.md new file mode 100644 index 000000000..48f5d74c2 --- /dev/null +++ b/apps/emqx_bridge_iotdb/README.md @@ -0,0 +1,26 @@ +# Apache IoTDB Data Integration Bridge + +This application houses the IoTDB data integration bridge for EMQX Enterprise + Edition. It provides the means to connect to IoTDB and publish messages to it. + +It implements the connection management and interaction without need for a + separate connector app, since it's not used by authentication and authorization + applications. + +# Documentation links + +For more information on Apache IoTDB, please see its [official + site](https://iotdb.apache.org/). + +# Configurations + +Please see [our official + documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-iotdb.html) + for more detailed info. + +# Contributing - [Mandatory] +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_iotdb/docker-ct b/apps/emqx_bridge_iotdb/docker-ct new file mode 100644 index 000000000..8a8973a88 --- /dev/null +++ b/apps/emqx_bridge_iotdb/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +iotdb diff --git a/apps/emqx_bridge_iotdb/etc/emqx_bridge_iotdb.conf b/apps/emqx_bridge_iotdb/etc/emqx_bridge_iotdb.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_iotdb/include/emqx_bridge_iotdb.hrl b/apps/emqx_bridge_iotdb/include/emqx_bridge_iotdb.hrl new file mode 100644 index 000000000..5e6bf9ac5 --- /dev/null +++ b/apps/emqx_bridge_iotdb/include/emqx_bridge_iotdb.hrl @@ -0,0 +1,11 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_IOTDB_HRL). +-define(EMQX_BRIDGE_IOTDB_HRL, true). + +-define(VSN_1_0_X, 'v1.0.x'). +-define(VSN_0_13_X, 'v0.13.x'). + +-endif. diff --git a/apps/emqx_bridge_iotdb/rebar.config b/apps/emqx_bridge_iotdb/rebar.config new file mode 100644 index 000000000..a4afd2877 --- /dev/null +++ b/apps/emqx_bridge_iotdb/rebar.config @@ -0,0 +1,14 @@ +%% -*- mode: erlang -*- + +{erl_opts, [ + debug_info +]}. + +{deps, [ + {emqx, {path, "../../apps/emqx"}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. +{plugins, [rebar3_path_deps]}. +{project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src new file mode 100644 index 000000000..9c5108307 --- /dev/null +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src @@ -0,0 +1,22 @@ +%% -*- mode: erlang -*- +{application, emqx_bridge_iotdb, [ + {description, "EMQX Enterprise Apache IoTDB Bridge"}, + {vsn, "0.1.0"}, + {modules, [ + emqx_bridge_iotdb, + emqx_bridge_iotdb_impl + ]}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_connector + ]}, + {env, []}, + {licenses, ["Business Source License 1.1"]}, + {maintainers, ["EMQX Team "]}, + {links, [ + {"Homepage", "https://emqx.io/"}, + {"Github", "https://github.com/emqx/emqx"} + ]} +]}. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl new file mode 100644 index 000000000..90e8d18a4 --- /dev/null +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl @@ -0,0 +1,233 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_iotdb). + +-include("emqx_bridge_iotdb.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% emqx_ee_bridge "unofficial" API +-export([conn_bridge_examples/1]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> "bridge_iotdb". + +roots() -> []. + +fields("config") -> + basic_config() ++ request_config(); +fields("post") -> + [ + type_field(), + name_field() + ] ++ fields("config"); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"); +fields("creation_opts") -> + lists:filter( + fun({K, _V}) -> + not lists:member(K, unsupported_opts()) + end, + emqx_resource_schema:fields("creation_opts") + ); +fields(auth_basic) -> + [ + {username, mk(binary(), #{required => true, desc => ?DESC("config_auth_basic_username")})}, + {password, + mk(binary(), #{ + required => true, + desc => ?DESC("config_auth_basic_password"), + format => <<"password">>, + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc("creation_opts") -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("post") -> + ["Configuration for IoTDB using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +struct_names() -> + [ + auth_basic + ]. + +basic_config() -> + [ + {enable, + mk( + boolean(), + #{ + desc => ?DESC("config_enable"), + default => true + } + )}, + {authentication, + mk( + hoconsc:union([ref(?MODULE, auth_basic)]), + #{ + default => auth_basic, desc => ?DESC("config_authentication") + } + )}, + {is_aligned, + mk( + boolean(), + #{ + desc => ?DESC("config_is_aligned"), + default => false + } + )}, + {device_id, + mk( + binary(), + #{ + desc => ?DESC("config_device_id") + } + )}, + {iotdb_version, + mk( + hoconsc:enum([?VSN_1_0_X, ?VSN_0_13_X]), + #{ + desc => ?DESC("config_iotdb_version"), + default => ?VSN_1_0_X + } + )} + ] ++ resource_creation_opts() ++ + proplists_without( + [max_retries, base_url, request], + emqx_connector_http:fields(config) + ). + +proplists_without(Keys, List) -> + [El || El = {K, _} <- List, not lists:member(K, Keys)]. + +request_config() -> + [ + {base_url, + mk( + emqx_schema:url(), + #{ + desc => ?DESC("config_base_url") + } + )}, + {max_retries, + mk( + non_neg_integer(), + #{ + default => 2, + desc => ?DESC("config_max_retries") + } + )}, + {request_timeout, + mk( + emqx_schema:duration_ms(), + #{ + default => <<"15s">>, + desc => ?DESC("config_request_timeout") + } + )} + ]. + +resource_creation_opts() -> + [ + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ]. + +unsupported_opts() -> + [ + batch_size, + batch_time + ]. + +%%====================================================================================== + +type_field() -> + {type, + mk( + hoconsc:enum([iotdb]), + #{ + required => true, + desc => ?DESC("desc_type") + } + )}. + +name_field() -> + {name, + mk( + binary(), + #{ + required => true, + desc => ?DESC("desc_name") + } + )}. + +%%====================================================================================== + +conn_bridge_examples(Method) -> + [ + #{ + <<"iotdb">> => + #{ + summary => <<"Apache IoTDB Bridge">>, + value => conn_bridge_example(Method, iotdb) + } + } + ]. + +conn_bridge_example(_Method, Type) -> + #{ + name => <<"My IoTDB Bridge">>, + type => Type, + enable => true, + authentication => #{ + <<"username">> => <<"root">>, + <<"password">> => <<"*****">> + }, + is_aligned => false, + device_id => <<"my_device">>, + base_url => <<"http://iotdb.local:18080/">>, + iotdb_version => ?VSN_1_0_X, + connect_timeout => <<"15s">>, + pool_type => <<"random">>, + pool_size => 8, + enable_pipelining => 100, + ssl => #{enable => false}, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb_impl.erl b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb_impl.erl new file mode 100644 index 000000000..2f8794560 --- /dev/null +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb_impl.erl @@ -0,0 +1,382 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_iotdb_impl). + +-include("emqx_bridge_iotdb.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4 +]). + +-type config() :: + #{ + base_url := #{ + scheme := http | https, + host := iolist(), + port := inet:port_number(), + path := '_' + }, + connect_timeout := pos_integer(), + pool_type := random | hash, + pool_size := pos_integer(), + request := undefined | map(), + is_aligned := boolean(), + iotdb_version := binary(), + device_id := binary() | undefined, + atom() => '_' + }. + +-type state() :: + #{ + base_path := '_', + base_url := #{ + scheme := http | https, + host := iolist(), + port := inet:port_number(), + path := '_' + }, + connect_timeout := pos_integer(), + pool_type := random | hash, + pool_size := pos_integer(), + request := undefined | map(), + is_aligned := boolean(), + iotdb_version := binary(), + device_id := binary() | undefined, + atom() => '_' + }. + +-type manager_id() :: binary(). + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- +callback_mode() -> async_if_possible. + +-spec on_start(manager_id(), config()) -> {ok, state()} | no_return(). +on_start(InstanceId, Config) -> + %% [FIXME] The configuration passed in here is pre-processed and transformed + %% in emqx_bridge_resource:parse_confs/2. + case emqx_connector_http:on_start(InstanceId, Config) of + {ok, State} -> + ?SLOG(info, #{ + msg => "iotdb_bridge_started", + instance_id => InstanceId, + request => maps:get(request, State, <<>>) + }), + ?tp(iotdb_bridge_started, #{}), + {ok, maps:merge(Config, State)}; + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_iotdb_bridge", + instance_id => InstanceId, + base_url => maps:get(request, Config, <<>>), + reason => Reason + }), + throw(failed_to_start_iotdb_bridge) + end. + +-spec on_stop(manager_id(), state()) -> ok | {error, term()}. +on_stop(InstanceId, State) -> + ?SLOG(info, #{ + msg => "stopping_iotdb_bridge", + connector => InstanceId + }), + Res = emqx_connector_http:on_stop(InstanceId, State), + ?tp(iotdb_bridge_stopped, #{instance_id => InstanceId}), + Res. + +-spec on_get_status(manager_id(), state()) -> + {connected, state()} | {disconnected, state(), term()}. +on_get_status(InstanceId, State) -> + emqx_connector_http:on_get_status(InstanceId, State). + +-spec on_query(manager_id(), {send_message, map()}, state()) -> + {ok, pos_integer(), [term()], term()} + | {ok, pos_integer(), [term()]} + | {error, term()}. +on_query(InstanceId, {send_message, Message}, State) -> + ?SLOG(debug, #{ + msg => "iotdb_bridge_on_query_called", + instance_id => InstanceId, + send_message => Message, + state => emqx_utils:redact(State) + }), + IoTDBPayload = make_iotdb_insert_request(Message, State), + handle_response( + emqx_connector_http:on_query( + InstanceId, {send_message, IoTDBPayload}, State + ) + ). + +-spec on_query_async(manager_id(), {send_message, map()}, {function(), [term()]}, state()) -> + {ok, pid()}. +on_query_async(InstanceId, {send_message, Message}, ReplyFunAndArgs0, State) -> + ?SLOG(debug, #{ + msg => "iotdb_bridge_on_query_async_called", + instance_id => InstanceId, + send_message => Message, + state => emqx_utils:redact(State) + }), + IoTDBPayload = make_iotdb_insert_request(Message, State), + ReplyFunAndArgs = + { + fun(Result) -> + Response = handle_response(Result), + emqx_resource:apply_reply_fun(ReplyFunAndArgs0, Response) + end, + [] + }, + emqx_connector_http:on_query_async( + InstanceId, {send_message, IoTDBPayload}, ReplyFunAndArgs, State + ). + +%%-------------------------------------------------------------------- +%% Internal Functions +%%-------------------------------------------------------------------- + +preproc_data(DataList) -> + lists:map( + fun( + #{ + measurement := Measurement, + data_type := DataType, + value := Value + } = Data + ) -> + #{ + timestamp => emqx_plugin_libs_rule:preproc_tmpl( + maps:get(<<"timestamp">>, Data, <<"now">>) + ), + measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement), + data_type => DataType, + value => emqx_plugin_libs_rule:preproc_tmpl(Value) + } + end, + DataList + ). + +proc_data(PreProcessedData, Msg) -> + NowNS = erlang:system_time(nanosecond), + Nows = #{ + now_ms => erlang:convert_time_unit(NowNS, nanosecond, millisecond), + now_us => erlang:convert_time_unit(NowNS, nanosecond, microsecond), + now_ns => NowNS + }, + lists:map( + fun( + #{ + timestamp := TimestampTkn, + measurement := Measurement, + data_type := DataType, + value := ValueTkn + } + ) -> + #{ + timestamp => iot_timestamp( + emqx_plugin_libs_rule:proc_tmpl(TimestampTkn, Msg), Nows + ), + measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Msg), + data_type => DataType, + value => proc_value(DataType, ValueTkn, Msg) + } + end, + PreProcessedData + ). + +iot_timestamp(Timestamp, #{now_ms := NowMs}) when + Timestamp =:= <<"now">>; Timestamp =:= <<"now_ms">>; Timestamp =:= <<>> +-> + NowMs; +iot_timestamp(Timestamp, #{now_us := NowUs}) when Timestamp =:= <<"now_us">> -> + NowUs; +iot_timestamp(Timestamp, #{now_ns := NowNs}) when Timestamp =:= <<"now_ns">> -> + NowNs; +iot_timestamp(Timestamp, _) when is_binary(Timestamp) -> + binary_to_integer(Timestamp). + +proc_value(<<"TEXT">>, ValueTkn, Msg) -> + case emqx_plugin_libs_rule:proc_tmpl(ValueTkn, Msg) of + <<"undefined">> -> null; + Val -> Val + end; +proc_value(<<"BOOLEAN">>, ValueTkn, Msg) -> + convert_bool(replace_var(ValueTkn, Msg)); +proc_value(Int, ValueTkn, Msg) when Int =:= <<"INT32">>; Int =:= <<"INT64">> -> + convert_int(replace_var(ValueTkn, Msg)); +proc_value(Int, ValueTkn, Msg) when Int =:= <<"FLOAT">>; Int =:= <<"DOUBLE">> -> + convert_float(replace_var(ValueTkn, Msg)). + +replace_var(Tokens, Data) when is_list(Tokens) -> + [Val] = emqx_plugin_libs_rule:proc_tmpl(Tokens, Data, #{return => rawlist}), + Val; +replace_var(Val, _Data) -> + Val. + +convert_bool(B) when is_boolean(B) -> B; +convert_bool(1) -> true; +convert_bool(0) -> false; +convert_bool(<<"1">>) -> true; +convert_bool(<<"0">>) -> false; +convert_bool(<<"true">>) -> true; +convert_bool(<<"True">>) -> true; +convert_bool(<<"TRUE">>) -> true; +convert_bool(<<"false">>) -> false; +convert_bool(<<"False">>) -> false; +convert_bool(<<"FALSE">>) -> false; +convert_bool(undefined) -> null. + +convert_int(Int) when is_integer(Int) -> Int; +convert_int(Float) when is_float(Float) -> floor(Float); +convert_int(Str) when is_binary(Str) -> + try + binary_to_integer(Str) + catch + _:_ -> + convert_int(binary_to_float(Str)) + end; +convert_int(undefined) -> + null. + +convert_float(Float) when is_float(Float) -> Float; +convert_float(Int) when is_integer(Int) -> Int * 10 / 10; +convert_float(Str) when is_binary(Str) -> + try + binary_to_float(Str) + catch + _:_ -> + convert_float(binary_to_integer(Str)) + end; +convert_float(undefined) -> + null. + +make_iotdb_insert_request(Message, State) -> + IsAligned = maps:get(is_aligned, State, false), + DeviceId = device_id(Message, State), + IotDBVsn = maps:get(iotdb_version, State, ?VSN_1_0_X), + Payload = make_list(maps:get(payload, Message)), + PreProcessedData = preproc_data(Payload), + DataList = proc_data(PreProcessedData, Message), + InitAcc = #{timestamps => [], measurements => [], dtypes => [], values => []}, + Rows = replace_dtypes(aggregate_rows(DataList, InitAcc), IotDBVsn), + maps:merge(Rows, #{ + iotdb_field_key(is_aligned, IotDBVsn) => IsAligned, + iotdb_field_key(device_id, IotDBVsn) => DeviceId + }). + +replace_dtypes(Rows, IotDBVsn) -> + {Types, Map} = maps:take(dtypes, Rows), + Map#{iotdb_field_key(data_types, IotDBVsn) => Types}. + +aggregate_rows(DataList, InitAcc) -> + lists:foldr( + fun( + #{ + timestamp := Timestamp, + measurement := Measurement, + data_type := DataType, + value := Data + }, + #{ + timestamps := AccTs, + measurements := AccM, + dtypes := AccDt, + values := AccV + } = Acc + ) -> + Timestamps = [Timestamp | AccTs], + case index_of(Measurement, AccM) of + 0 -> + Acc#{ + timestamps => Timestamps, + values => [pad_value(Data, length(AccTs)) | pad_existing_values(AccV)], + measurements => [Measurement | AccM], + dtypes => [DataType | AccDt] + }; + Index -> + Acc#{ + timestamps => Timestamps, + values => insert_value(Index, Data, AccV), + measurements => AccM, + dtypes => AccDt + } + end + end, + InitAcc, + DataList + ). + +pad_value(Data, N) -> + [Data | lists:duplicate(N, null)]. + +pad_existing_values(Values) -> + [[null | Value] || Value <- Values]. + +index_of(E, List) -> + string:str(List, [E]). + +insert_value(_Index, _Data, []) -> + []; +insert_value(1, Data, [Value | Values]) -> + [[Data | Value] | insert_value(0, Data, Values)]; +insert_value(Index, Data, [Value | Values]) -> + [[null | Value] | insert_value(Index - 1, Data, Values)]. + +iotdb_field_key(is_aligned, ?VSN_1_0_X) -> + <<"is_aligned">>; +iotdb_field_key(is_aligned, ?VSN_0_13_X) -> + <<"isAligned">>; +iotdb_field_key(device_id, ?VSN_1_0_X) -> + <<"device">>; +iotdb_field_key(device_id, ?VSN_0_13_X) -> + <<"deviceId">>; +iotdb_field_key(data_types, ?VSN_1_0_X) -> + <<"data_types">>; +iotdb_field_key(data_types, ?VSN_0_13_X) -> + <<"dataTypes">>. + +make_list(List) when is_list(List) -> List; +make_list(Data) -> [Data]. + +device_id(Message, State) -> + case maps:get(device_id, State, undefined) of + undefined -> + case maps:get(payload, Message) of + #{device_id := DeviceId} -> + DeviceId; + _NotFound -> + Topic = maps:get(topic, Message), + case re:replace(Topic, "/", ".", [global, {return, binary}]) of + <<"root.", _/binary>> = Device -> Device; + Device -> <<"root.", Device/binary>> + end + end; + DeviceId -> + DeviceIdTkn = emqx_plugin_libs_rule:preproc_tmpl(DeviceId), + emqx_plugin_libs_rule:proc_tmpl(DeviceIdTkn, Message) + end. + +handle_response({ok, 200, _Headers, Body} = Resp) -> + eval_response_body(Body, Resp); +handle_response({ok, 200, Body} = Resp) -> + eval_response_body(Body, Resp); +handle_response({ok, Code, _Headers, Body}) -> + {error, #{code => Code, body => Body}}; +handle_response({ok, Code, Body}) -> + {error, #{code => Code, body => Body}}; +handle_response({error, _} = Error) -> + Error. + +eval_response_body(Body, Resp) -> + case emqx_utils_json:decode(Body) of + #{<<"code">> := 200} -> Resp; + Reason -> {error, Reason} + end. diff --git a/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl b/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl new file mode 100644 index 000000000..434587cf0 --- /dev/null +++ b/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl @@ -0,0 +1,229 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_iotdb_impl_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(BRIDGE_TYPE_BIN, <<"iotdb">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_iotdb]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {plain, AllTCs} + ]. + +init_per_suite(Config) -> + emqx_bridge_testlib:init_per_suite(Config, ?APPS). + +end_per_suite(Config) -> + emqx_bridge_testlib:end_per_suite(Config). + +init_per_group(plain = Type, Config0) -> + Host = os:getenv("IOTDB_PLAIN_HOST", "toxiproxy.emqx.net"), + Port = list_to_integer(os:getenv("IOTDB_PLAIN_PORT", "18080")), + ProxyName = "iotdb", + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + Config = emqx_bridge_testlib:init_per_group(Type, ?BRIDGE_TYPE_BIN, Config0), + [ + {bridge_host, Host}, + {bridge_port, Port}, + {proxy_name, ProxyName} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_iotdb); + _ -> + {skip, no_iotdb} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + emqx_bridge_testlib:end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestCase, Config0) -> + Config = emqx_bridge_testlib:init_per_testcase(TestCase, Config0, fun bridge_config/3), + reset_service(Config), + Config. + +end_per_testcase(TestCase, Config) -> + emqx_bridge_testlib:end_per_testcase(TestCase, Config). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +bridge_config(TestCase, _TestGroup, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + Host = ?config(bridge_host, Config), + Port = ?config(bridge_port, Config), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + ServerURL = iolist_to_binary([ + "http://", + Host, + ":", + integer_to_binary(Port) + ]), + ConfigString = + io_lib:format( + "bridges.iotdb.~s {\n" + " enable = true\n" + " base_url = \"~s\"\n" + " authentication = {\n" + " username = \"root\"\n" + " password = \"root\"\n" + " }\n" + " pool_size = 1\n" + " resource_opts = {\n" + " auto_restart_interval = 5000\n" + " request_timeout = 30000\n" + " query_mode = \"async\"\n" + " worker_pool_size = 1\n" + " }\n" + "}\n", + [ + Name, + ServerURL + ] + ), + {Name, ConfigString, emqx_bridge_testlib:parse_and_check(Config, ConfigString, Name)}. + +reset_service(Config) -> + _BridgeConfig = + #{ + <<"base_url">> := BaseURL, + <<"authentication">> := #{ + <<"username">> := Username, + <<"password">> := Password + } + } = + ?config(bridge_config, Config), + ct:pal("bridge config: ~p", [_BridgeConfig]), + Path = <>, + BasicToken = base64:encode(<>), + Headers = [ + {"Content-type", "application/json"}, + {"Authorization", binary_to_list(BasicToken)} + ], + Device = iotdb_device(Config), + Body = #{sql => <<"delete from ", Device/binary, ".*">>}, + {ok, _} = emqx_mgmt_api_test_util:request_api(post, Path, "", Headers, Body, #{}). + +make_iotdb_payload(DeviceId) -> + make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"). + +make_iotdb_payload(DeviceId, Measurement, Type, Value) -> + #{ + measurement => Measurement, + data_type => Type, + value => Value, + device_id => DeviceId, + is_aligned => false + }. + +make_message_fun(Topic, Payload) -> + fun() -> + MsgId = erlang:unique_integer([positive]), + #{ + topic => Topic, + id => MsgId, + payload => Payload, + retain => true + } + end. + +iotdb_device(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + Device = re:replace(MQTTTopic, "/", ".dev", [global, {return, binary}]), + <<"root.", Device/binary>>. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query_simple(Config) -> + DeviceId = iotdb_device(Config), + Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"), + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(ok, element(1, Result)) + end, + emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck). + +t_async_query(Config) -> + DeviceId = iotdb_device(Config), + Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"), + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(ok, element(1, Result)) + end, + emqx_bridge_testlib:t_async_query(Config, MakeMessageFun, IsSuccessCheck). + +t_sync_query_aggregated(Config) -> + DeviceId = iotdb_device(Config), + Payload = [ + make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"), + (make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "37"))#{timestamp => <<"mow_us">>}, + (make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "38"))#{timestamp => <<"mow_ns">>}, + make_iotdb_payload(DeviceId, "charged", <<"BOOLEAN">>, "1"), + make_iotdb_payload(DeviceId, "stoked", <<"BOOLEAN">>, "true"), + make_iotdb_payload(DeviceId, "enriched", <<"BOOLEAN">>, <<"TRUE">>), + make_iotdb_payload(DeviceId, "drained", <<"BOOLEAN">>, "0"), + make_iotdb_payload(DeviceId, "dazzled", <<"BOOLEAN">>, "false"), + make_iotdb_payload(DeviceId, "unplugged", <<"BOOLEAN">>, <<"FALSE">>), + make_iotdb_payload(DeviceId, "weight", <<"FLOAT">>, "87.3"), + make_iotdb_payload(DeviceId, "foo", <<"TEXT">>, <<"bar">>) + ], + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(ok, element(1, Result)) + end, + emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck). + +t_sync_query_fail(Config) -> + DeviceId = iotdb_device(Config), + Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "Anton"), + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(error, element(1, Result)) + end, + emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck). + +t_create_via_http(Config) -> + emqx_bridge_testlib:t_create_via_http(Config). + +t_start_stop(Config) -> + emqx_bridge_testlib:t_start_stop(Config, iotdb_bridge_stopped). + +t_on_get_status(Config) -> + emqx_bridge_testlib:t_on_get_status(Config). diff --git a/apps/emqx_bridge_kafka/BSL.txt b/apps/emqx_bridge_kafka/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_kafka/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_kafka/README.md b/apps/emqx_bridge_kafka/README.md new file mode 100644 index 000000000..07cae256b --- /dev/null +++ b/apps/emqx_bridge_kafka/README.md @@ -0,0 +1,27 @@ +# Kafka Data Integration Bridge + +This application houses the Kafka Producer and Consumer data +integration bridges for EMQX Enterprise Edition. It provides the +means to connect to Kafka and publish/consume messages to/from it. + +Currently, our Kafka Producer library (`wolff`) has its own `replayq` +buffering implementation, so this bridge does not require buffer +workers from `emqx_resource`. It implements the connection management +and interaction without need for a separate connector app, since it's +not used by authentication and authorization applications. + +# Documentation links + +For more information about Apache Kafka, please see its [official site](https://kafka.apache.org/). + +# Configurations + +Please see [Ingest data into Kafka](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html) for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_kafka/docker-ct b/apps/emqx_bridge_kafka/docker-ct new file mode 100644 index 000000000..5288ee246 --- /dev/null +++ b/apps/emqx_bridge_kafka/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +kafka diff --git a/apps/emqx_bridge_kafka/etc/emqx_bridge_kafka.conf b/apps/emqx_bridge_kafka/etc/emqx_bridge_kafka.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_kafka/rebar.config b/apps/emqx_bridge_kafka/rebar.config new file mode 100644 index 000000000..68a8d3e69 --- /dev/null +++ b/apps/emqx_bridge_kafka/rebar.config @@ -0,0 +1,14 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.5"}}} + , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.2"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}} + , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_kafka]} +]}. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src new file mode 100644 index 000000000..6c103f73b --- /dev/null +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -0,0 +1,17 @@ +{application, emqx_bridge_kafka, [ + {description, "EMQX Enterprise Kafka Bridge"}, + {vsn, "0.1.2"}, + {registered, [emqx_bridge_kafka_consumer_sup]}, + {applications, [ + kernel, + stdlib, + telemetry, + wolff, + brod, + brod_gssapi + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl new file mode 100644 index 000000000..30f6cd60d --- /dev/null +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -0,0 +1,461 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_kafka). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% allow atoms like scram_sha_256 and scram_sha_512 +%% i.e. the _256 part does not start with a-z +-elvis([ + {elvis_style, atom_naming_convention, #{ + regex => "^([a-z][a-z0-9]*_?)([a-z0-9]*_?)*$", + enclosed_atoms => ".*" + }} +]). +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1, + host_opts/0 +]). + +-export([kafka_producer_converter/2]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + %% TODO: rename this to `kafka_producer' after alias + %% support is added to hocon; keeping this as just `kafka' + %% for backwards compatibility. + <<"kafka">> => #{ + summary => <<"Kafka Producer Bridge">>, + value => values({Method, producer}) + } + }, + #{ + <<"kafka_consumer">> => #{ + summary => <<"Kafka Consumer Bridge">>, + value => values({Method, consumer}) + } + } + ]. + +values({get, KafkaType}) -> + values({post, KafkaType}); +values({post, KafkaType}) -> + maps:merge(values(common_config), values(KafkaType)); +values({put, KafkaType}) -> + values({post, KafkaType}); +values(common_config) -> + #{ + authentication => #{ + mechanism => <<"plain">>, + username => <<"username">>, + password => <<"******">> + }, + bootstrap_hosts => <<"localhost:9092">>, + connect_timeout => <<"5s">>, + enable => true, + metadata_request_timeout => <<"4s">>, + min_metadata_refresh_interval => <<"3s">>, + socket_opts => #{ + sndbuf => <<"1024KB">>, + recbuf => <<"1024KB">>, + nodelay => true + } + }; +values(producer) -> + #{ + kafka => #{ + topic => <<"kafka-topic">>, + message => #{ + key => <<"${.clientid}">>, + value => <<"${.}">>, + timestamp => <<"${.timestamp}">> + }, + max_batch_bytes => <<"896KB">>, + compression => <<"no_compression">>, + partition_strategy => <<"random">>, + required_acks => <<"all_isr">>, + partition_count_refresh_interval => <<"60s">>, + max_inflight => 10, + buffer => #{ + mode => <<"hybrid">>, + per_partition_limit => <<"2GB">>, + segment_bytes => <<"100MB">>, + memory_overload_protection => true + } + }, + local_topic => <<"mqtt/local/topic">> + }; +values(consumer) -> + #{ + kafka => #{ + max_batch_bytes => <<"896KB">>, + offset_reset_policy => <<"latest">>, + offset_commit_interval_seconds => 5 + }, + key_encoding_mode => <<"none">>, + topic_mapping => [ + #{ + kafka_topic => <<"kafka-topic-1">>, + mqtt_topic => <<"mqtt/topic/1">>, + qos => 1, + payload_template => <<"${.}">> + }, + #{ + kafka_topic => <<"kafka-topic-2">>, + mqtt_topic => <<"mqtt/topic/2">>, + qos => 2, + payload_template => <<"v = ${.value}">> + } + ], + value_encoding_mode => <<"none">> + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +host_opts() -> + #{default_port => 9092}. + +namespace() -> "bridge_kafka". + +roots() -> ["config_consumer", "config_producer"]. + +fields("post_" ++ Type) -> + [type_field(), name_field() | fields("config_" ++ Type)]; +fields("put_" ++ Type) -> + fields("config_" ++ Type); +fields("get_" ++ Type) -> + emqx_bridge_schema:status_fields() ++ fields("post_" ++ Type); +fields("config_producer") -> + fields(kafka_producer); +fields("config_consumer") -> + fields(kafka_consumer); +fields(kafka_producer) -> + fields("config") ++ fields(producer_opts); +fields(kafka_consumer) -> + fields("config") ++ fields(consumer_opts); +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {bootstrap_hosts, + mk( + binary(), + #{ + required => true, + desc => ?DESC(bootstrap_hosts), + validator => emqx_schema:servers_validator( + host_opts(), _Required = true + ) + } + )}, + {connect_timeout, + mk(emqx_schema:duration_ms(), #{ + default => <<"5s">>, + desc => ?DESC(connect_timeout) + })}, + {min_metadata_refresh_interval, + mk( + emqx_schema:duration_ms(), + #{ + default => <<"3s">>, + desc => ?DESC(min_metadata_refresh_interval) + } + )}, + {metadata_request_timeout, + mk(emqx_schema:duration_ms(), #{ + default => <<"5s">>, + desc => ?DESC(metadata_request_timeout) + })}, + {authentication, + mk(hoconsc:union([none, ref(auth_username_password), ref(auth_gssapi_kerberos)]), #{ + default => none, desc => ?DESC("authentication") + })}, + {socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(auth_username_password) -> + [ + {mechanism, + mk(enum([plain, scram_sha_256, scram_sha_512]), #{ + required => true, desc => ?DESC(auth_sasl_mechanism) + })}, + {username, mk(binary(), #{required => true, desc => ?DESC(auth_sasl_username)})}, + {password, + mk(binary(), #{ + required => true, + sensitive => true, + desc => ?DESC(auth_sasl_password), + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields(auth_gssapi_kerberos) -> + [ + {kerberos_principal, + mk(binary(), #{ + required => true, + desc => ?DESC(auth_kerberos_principal) + })}, + {kerberos_keytab_file, + mk(binary(), #{ + required => true, + desc => ?DESC(auth_kerberos_keytab_file) + })} + ]; +fields(socket_opts) -> + [ + {sndbuf, + mk( + emqx_schema:bytesize(), + #{default => <<"1MB">>, desc => ?DESC(socket_send_buffer)} + )}, + {recbuf, + mk( + emqx_schema:bytesize(), + #{default => <<"1MB">>, desc => ?DESC(socket_receive_buffer)} + )}, + {nodelay, + mk( + boolean(), + #{ + default => true, + importance => ?IMPORTANCE_HIDDEN, + desc => ?DESC(socket_nodelay) + } + )} + ]; +fields(producer_opts) -> + [ + %% Note: there's an implicit convention in `emqx_bridge' that, + %% for egress bridges with this config, the published messages + %% will be forwarded to such bridges. + {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}, + {kafka, + mk(ref(producer_kafka_opts), #{ + required => true, + desc => ?DESC(producer_kafka_opts) + })} + ]; +fields(producer_kafka_opts) -> + [ + {topic, mk(string(), #{required => true, desc => ?DESC(kafka_topic)})}, + {message, mk(ref(kafka_message), #{required => false, desc => ?DESC(kafka_message)})}, + {max_batch_bytes, + mk(emqx_schema:bytesize(), #{default => <<"896KB">>, desc => ?DESC(max_batch_bytes)})}, + {compression, + mk(enum([no_compression, snappy, gzip]), #{ + default => no_compression, desc => ?DESC(compression) + })}, + {partition_strategy, + mk( + enum([random, key_dispatch]), + #{default => random, desc => ?DESC(partition_strategy)} + )}, + {required_acks, + mk( + enum([all_isr, leader_only, none]), + #{ + default => all_isr, + desc => ?DESC(required_acks) + } + )}, + {partition_count_refresh_interval, + mk( + emqx_schema:duration_s(), + #{ + default => <<"60s">>, + desc => ?DESC(partition_count_refresh_interval) + } + )}, + {max_inflight, + mk( + pos_integer(), + #{ + default => 10, + desc => ?DESC(max_inflight) + } + )}, + {buffer, + mk(ref(producer_buffer), #{ + required => false, + desc => ?DESC(producer_buffer) + })} + ]; +fields(kafka_message) -> + [ + {key, mk(string(), #{default => <<"${.clientid}">>, desc => ?DESC(kafka_message_key)})}, + {value, mk(string(), #{default => <<"${.}">>, desc => ?DESC(kafka_message_value)})}, + {timestamp, + mk(string(), #{ + default => <<"${.timestamp}">>, desc => ?DESC(kafka_message_timestamp) + })} + ]; +fields(producer_buffer) -> + [ + {mode, + mk( + enum([memory, disk, hybrid]), + #{default => memory, desc => ?DESC(buffer_mode)} + )}, + {per_partition_limit, + mk( + emqx_schema:bytesize(), + #{default => <<"2GB">>, desc => ?DESC(buffer_per_partition_limit)} + )}, + {segment_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"100MB">>, desc => ?DESC(buffer_segment_bytes)} + )}, + {memory_overload_protection, + mk(boolean(), #{ + default => false, + desc => ?DESC(buffer_memory_overload_protection) + })} + ]; +fields(consumer_opts) -> + [ + {kafka, + mk(ref(consumer_kafka_opts), #{required => false, desc => ?DESC(consumer_kafka_opts)})}, + {topic_mapping, + mk( + hoconsc:array(ref(consumer_topic_mapping)), + #{ + required => true, + desc => ?DESC(consumer_topic_mapping), + validator => fun consumer_topic_mapping_validator/1 + } + )}, + {key_encoding_mode, + mk(enum([none, base64]), #{ + default => none, desc => ?DESC(consumer_key_encoding_mode) + })}, + {value_encoding_mode, + mk(enum([none, base64]), #{ + default => none, desc => ?DESC(consumer_value_encoding_mode) + })} + ]; +fields(consumer_topic_mapping) -> + [ + {kafka_topic, mk(binary(), #{required => true, desc => ?DESC(consumer_kafka_topic)})}, + {mqtt_topic, mk(binary(), #{required => true, desc => ?DESC(consumer_mqtt_topic)})}, + {qos, mk(emqx_schema:qos(), #{default => 0, desc => ?DESC(consumer_mqtt_qos)})}, + {payload_template, + mk( + string(), + #{default => <<"${.}">>, desc => ?DESC(consumer_mqtt_payload)} + )} + ]; +fields(consumer_kafka_opts) -> + [ + {max_batch_bytes, + mk(emqx_schema:bytesize(), #{ + default => "896KB", desc => ?DESC(consumer_max_batch_bytes) + })}, + {max_rejoin_attempts, + mk(non_neg_integer(), #{ + importance => ?IMPORTANCE_HIDDEN, + default => 5, + desc => ?DESC(consumer_max_rejoin_attempts) + })}, + {offset_reset_policy, + mk( + enum([latest, earliest]), + #{default => latest, desc => ?DESC(consumer_offset_reset_policy)} + )}, + {offset_commit_interval_seconds, + mk( + pos_integer(), + #{default => 5, desc => ?DESC(consumer_offset_commit_interval_seconds)} + )} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc("get_" ++ Type) when Type =:= "consumer"; Type =:= "producer" -> + ["Configuration for Kafka using `GET` method."]; +desc("put_" ++ Type) when Type =:= "consumer"; Type =:= "producer" -> + ["Configuration for Kafka using `PUT` method."]; +desc("post_" ++ Type) when Type =:= "consumer"; Type =:= "producer" -> + ["Configuration for Kafka using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +struct_names() -> + [ + auth_gssapi_kerberos, + auth_username_password, + kafka_message, + kafka_producer, + kafka_consumer, + producer_buffer, + producer_kafka_opts, + socket_opts, + producer_opts, + consumer_opts, + consumer_kafka_opts, + consumer_topic_mapping + ]. + +%% ------------------------------------------------------------------------------------------------- +%% internal +type_field() -> + {type, + %% TODO: rename `kafka' to `kafka_producer' after alias + %% support is added to hocon; keeping this as just `kafka' for + %% backwards compatibility. + mk(enum([kafka_consumer, kafka]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +ref(Name) -> + hoconsc:ref(?MODULE, Name). + +kafka_producer_converter(undefined, _HoconOpts) -> + undefined; +kafka_producer_converter( + #{<<"producer">> := OldOpts0, <<"bootstrap_hosts">> := _} = Config0, _HoconOpts +) -> + %% old schema + MQTTOpts = maps:get(<<"mqtt">>, OldOpts0, #{}), + LocalTopic = maps:get(<<"topic">>, MQTTOpts, undefined), + KafkaOpts = maps:get(<<"kafka">>, OldOpts0), + Config = maps:without([<<"producer">>], Config0), + case LocalTopic =:= undefined of + true -> + Config#{<<"kafka">> => KafkaOpts}; + false -> + Config#{<<"kafka">> => KafkaOpts, <<"local_topic">> => LocalTopic} + end; +kafka_producer_converter(Config, _HoconOpts) -> + %% new schema + Config. + +consumer_topic_mapping_validator(_TopicMapping = []) -> + {error, "There must be at least one Kafka-MQTT topic mapping"}; +consumer_topic_mapping_validator(TopicMapping = [_ | _]) -> + NumEntries = length(TopicMapping), + KafkaTopics = [KT || #{<<"kafka_topic">> := KT} <- TopicMapping], + DistinctKafkaTopics = length(lists:usort(KafkaTopics)), + case DistinctKafkaTopics =:= NumEntries of + true -> + ok; + false -> + {error, "Kafka topics must not be repeated in a bridge"} + end. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_consumer_sup.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_consumer_sup.erl new file mode 100644 index 000000000..638c1def6 --- /dev/null +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_consumer_sup.erl @@ -0,0 +1,79 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kafka_consumer_sup). + +-behaviour(supervisor). + +%% `supervisor' API +-export([init/1]). + +%% API +-export([ + start_link/0, + child_spec/2, + start_child/2, + ensure_child_deleted/1 +]). + +-type child_id() :: binary(). +-export_type([child_id/0]). + +%%-------------------------------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------------------------------- + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +-spec child_spec(child_id(), map()) -> supervisor:child_spec(). +child_spec(Id, GroupSubscriberConfig) -> + Mod = brod_group_subscriber_v2, + #{ + id => Id, + start => {Mod, start_link, [GroupSubscriberConfig]}, + restart => permanent, + shutdown => 10_000, + type => worker, + modules => [Mod] + }. + +-spec start_child(child_id(), map()) -> {ok, pid()} | {error, term()}. +start_child(Id, GroupSubscriberConfig) -> + ChildSpec = child_spec(Id, GroupSubscriberConfig), + case supervisor:start_child(?MODULE, ChildSpec) of + {ok, Pid} -> + {ok, Pid}; + {ok, Pid, _Info} -> + {ok, Pid}; + {error, already_present} -> + supervisor:restart_child(?MODULE, Id); + {error, {already_started, Pid}} -> + {ok, Pid}; + {error, Error} -> + {error, Error} + end. + +-spec ensure_child_deleted(child_id()) -> ok. +ensure_child_deleted(Id) -> + case supervisor:terminate_child(?MODULE, Id) of + ok -> + ok = supervisor:delete_child(?MODULE, Id), + ok; + {error, not_found} -> + ok + end. + +%%-------------------------------------------------------------------------------------------- +%% `supervisor' API +%%-------------------------------------------------------------------------------------------- + +init([]) -> + SupFlags = #{ + strategy => one_for_one, + intensity => 100, + period => 10 + }, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl new file mode 100644 index 000000000..22a67c551 --- /dev/null +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl @@ -0,0 +1,41 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +%% Kafka connection configuration +-module(emqx_bridge_kafka_impl). + +-export([ + hosts/1, + make_client_id/2, + sasl/1 +]). + +%% Parse comma separated host:port list into a [{Host,Port}] list +hosts(Hosts) when is_binary(Hosts) -> + hosts(binary_to_list(Hosts)); +hosts(Hosts) when is_list(Hosts) -> + kpro:parse_endpoints(Hosts). + +%% Client ID is better to be unique to make it easier for Kafka side trouble shooting. +make_client_id(KafkaType0, BridgeName0) -> + KafkaType = to_bin(KafkaType0), + BridgeName = to_bin(BridgeName0), + iolist_to_binary([KafkaType, ":", BridgeName, ":", atom_to_list(node())]). + +sasl(none) -> + undefined; +sasl(#{mechanism := Mechanism, username := Username, password := Password}) -> + {Mechanism, Username, emqx_secret:wrap(Password)}; +sasl(#{ + kerberos_principal := Principal, + kerberos_keytab_file := KeyTabFile +}) -> + {callback, brod_gssapi, {gssapi, KeyTabFile, Principal}}. + +to_bin(A) when is_atom(A) -> + atom_to_binary(A); +to_bin(L) when is_list(L) -> + list_to_binary(L); +to_bin(B) when is_binary(B) -> + B. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl new file mode 100644 index 000000000..f7958af81 --- /dev/null +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl @@ -0,0 +1,548 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_kafka_impl_consumer). + +-behaviour(emqx_resource). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_get_status/2 +]). + +%% `brod_group_consumer' API +-export([ + init/2, + handle_message/2 +]). + +-ifdef(TEST). +-export([consumer_group_id/1]). +-endif. + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +%% needed for the #kafka_message record definition +-include_lib("brod/include/brod.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-type config() :: #{ + authentication := term(), + bootstrap_hosts := binary(), + bridge_name := atom(), + kafka := #{ + max_batch_bytes := emqx_schema:bytesize(), + max_rejoin_attempts := non_neg_integer(), + offset_commit_interval_seconds := pos_integer(), + offset_reset_policy := offset_reset_policy(), + topic := binary() + }, + topic_mapping := nonempty_list( + #{ + kafka_topic := kafka_topic(), + mqtt_topic := emqx_types:topic(), + qos := emqx_types:qos(), + payload_template := string() + } + ), + ssl := _, + any() => term() +}. +-type subscriber_id() :: emqx_bridge_kafka_consumer_sup:child_id(). +-type kafka_topic() :: brod:topic(). +-type kafka_message() :: #kafka_message{}. +-type state() :: #{ + kafka_topics := nonempty_list(kafka_topic()), + subscriber_id := subscriber_id(), + kafka_client_id := brod:client_id() +}. +-type offset_reset_policy() :: latest | earliest. +-type encoding_mode() :: none | base64. +-type consumer_init_data() :: #{ + hookpoint := binary(), + key_encoding_mode := encoding_mode(), + resource_id := resource_id(), + topic_mapping := #{ + kafka_topic() := #{ + payload_template := emqx_plugin_libs_rule:tmpl_token(), + mqtt_topic => emqx_types:topic(), + qos => emqx_types:qos() + } + }, + value_encoding_mode := encoding_mode() +}. +-type consumer_state() :: #{ + hookpoint := binary(), + kafka_topic := binary(), + key_encoding_mode := encoding_mode(), + resource_id := resource_id(), + topic_mapping := #{ + kafka_topic() := #{ + payload_template := emqx_plugin_libs_rule:tmpl_token(), + mqtt_topic => emqx_types:topic(), + qos => emqx_types:qos() + } + }, + value_encoding_mode := encoding_mode() +}. +-type subscriber_init_info() :: #{ + topic => brod:topic(), + parition => brod:partition(), + group_id => brod:group_id(), + commit_fun => brod_group_subscriber_v2:commit_fun() +}. + +-define(CLIENT_DOWN_MESSAGE, + "Failed to start Kafka client. Please check the logs for errors and check" + " the connection parameters." +). + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- + +callback_mode() -> + async_if_possible. + +%% there are no queries to be made to this bridge, so we say that +%% buffer is supported so we don't spawn unused resource buffer +%% workers. +is_buffer_supported() -> + true. + +-spec on_start(resource_id(), config()) -> {ok, state()}. +on_start(ResourceId, Config) -> + #{ + authentication := Auth, + bootstrap_hosts := BootstrapHosts0, + bridge_name := BridgeName, + hookpoint := _, + kafka := #{ + max_batch_bytes := _, + max_rejoin_attempts := _, + offset_commit_interval_seconds := _, + offset_reset_policy := _ + }, + ssl := SSL, + topic_mapping := _ + } = Config, + BootstrapHosts = emqx_bridge_kafka_impl:hosts(BootstrapHosts0), + KafkaType = kafka_consumer, + %% Note: this is distinct per node. + ClientID = make_client_id(ResourceId, KafkaType, BridgeName), + ClientOpts0 = + case Auth of + none -> []; + Auth -> [{sasl, emqx_bridge_kafka_impl:sasl(Auth)}] + end, + ClientOpts = add_ssl_opts(ClientOpts0, SSL), + case brod:start_client(BootstrapHosts, ClientID, ClientOpts) of + ok -> + ?tp( + kafka_consumer_client_started, + #{client_id => ClientID, resource_id => ResourceId} + ), + ?SLOG(info, #{ + msg => "kafka_consumer_client_started", + resource_id => ResourceId, + kafka_hosts => BootstrapHosts + }); + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_kafka_consumer_client", + resource_id => ResourceId, + kafka_hosts => BootstrapHosts, + reason => emqx_utils:redact(Reason) + }), + throw(?CLIENT_DOWN_MESSAGE) + end, + start_consumer(Config, ResourceId, ClientID). + +-spec on_stop(resource_id(), state()) -> ok. +on_stop(_ResourceID, State) -> + #{ + subscriber_id := SubscriberId, + kafka_client_id := ClientID + } = State, + stop_subscriber(SubscriberId), + stop_client(ClientID), + ok. + +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(_ResourceID, State) -> + #{ + subscriber_id := SubscriberId, + kafka_client_id := ClientID, + kafka_topics := KafkaTopics + } = State, + case do_get_status(ClientID, KafkaTopics, SubscriberId) of + {disconnected, Message} -> + {disconnected, State, Message}; + Res -> + Res + end. + +%%------------------------------------------------------------------------------------- +%% `brod_group_subscriber' API +%%------------------------------------------------------------------------------------- + +-spec init(subscriber_init_info(), consumer_init_data()) -> {ok, consumer_state()}. +init(GroupData, State0) -> + ?tp(kafka_consumer_subscriber_init, #{group_data => GroupData, state => State0}), + #{topic := KafkaTopic} = GroupData, + State = State0#{kafka_topic => KafkaTopic}, + {ok, State}. + +-spec handle_message(kafka_message(), consumer_state()) -> {ok, commit, consumer_state()}. +handle_message(Message, State) -> + ?tp_span( + kafka_consumer_handle_message, + #{message => Message, state => State}, + do_handle_message(Message, State) + ). + +do_handle_message(Message, State) -> + #{ + hookpoint := Hookpoint, + kafka_topic := KafkaTopic, + key_encoding_mode := KeyEncodingMode, + resource_id := ResourceId, + topic_mapping := TopicMapping, + value_encoding_mode := ValueEncodingMode + } = State, + #{ + mqtt_topic := MQTTTopic, + qos := MQTTQoS, + payload_template := PayloadTemplate + } = maps:get(KafkaTopic, TopicMapping), + FullMessage = #{ + headers => maps:from_list(Message#kafka_message.headers), + key => encode(Message#kafka_message.key, KeyEncodingMode), + offset => Message#kafka_message.offset, + topic => KafkaTopic, + ts => Message#kafka_message.ts, + ts_type => Message#kafka_message.ts_type, + value => encode(Message#kafka_message.value, ValueEncodingMode) + }, + Payload = render(FullMessage, PayloadTemplate), + MQTTMessage = emqx_message:make(ResourceId, MQTTQoS, MQTTTopic, Payload), + _ = emqx:publish(MQTTMessage), + emqx:run_hook(Hookpoint, [FullMessage]), + emqx_resource_metrics:received_inc(ResourceId), + %% note: just `ack' does not commit the offset to the + %% kafka consumer group. + {ok, commit, State}. + +%%------------------------------------------------------------------------------------- +%% Helper fns +%%------------------------------------------------------------------------------------- + +add_ssl_opts(ClientOpts, #{enable := false}) -> + ClientOpts; +add_ssl_opts(ClientOpts, SSL) -> + [{ssl, emqx_tls_lib:to_client_opts(SSL)} | ClientOpts]. + +-spec make_subscriber_id(atom() | binary()) -> emqx_bridge_kafka_consumer_sup:child_id(). +make_subscriber_id(BridgeName) -> + BridgeNameBin = to_bin(BridgeName), + <<"kafka_subscriber:", BridgeNameBin/binary>>. + +ensure_consumer_supervisor_started() -> + Mod = emqx_bridge_kafka_consumer_sup, + ChildSpec = + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => permanent, + shutdown => infinity, + type => supervisor, + modules => [Mod] + }, + case supervisor:start_child(emqx_bridge_sup, ChildSpec) of + {ok, _Pid} -> + ok; + {error, already_present} -> + ok; + {error, {already_started, _Pid}} -> + ok + end. + +-spec start_consumer(config(), resource_id(), brod:client_id()) -> {ok, state()}. +start_consumer(Config, ResourceId, ClientID) -> + #{ + bootstrap_hosts := BootstrapHosts0, + bridge_name := BridgeName, + hookpoint := Hookpoint, + kafka := #{ + max_batch_bytes := MaxBatchBytes, + max_rejoin_attempts := MaxRejoinAttempts, + offset_commit_interval_seconds := OffsetCommitInterval, + offset_reset_policy := OffsetResetPolicy0 + }, + key_encoding_mode := KeyEncodingMode, + topic_mapping := TopicMapping0, + value_encoding_mode := ValueEncodingMode + } = Config, + ok = ensure_consumer_supervisor_started(), + TopicMapping = convert_topic_mapping(TopicMapping0), + InitialState = #{ + key_encoding_mode => KeyEncodingMode, + hookpoint => Hookpoint, + resource_id => ResourceId, + topic_mapping => TopicMapping, + value_encoding_mode => ValueEncodingMode + }, + %% note: the group id should be the same for all nodes in the + %% cluster, so that the load gets distributed between all + %% consumers and we don't repeat messages in the same cluster. + GroupID = consumer_group_id(BridgeName), + %% earliest or latest + BeginOffset = OffsetResetPolicy0, + OffsetResetPolicy = + case OffsetResetPolicy0 of + latest -> reset_to_latest; + earliest -> reset_to_earliest + end, + ConsumerConfig = [ + {begin_offset, BeginOffset}, + {max_bytes, MaxBatchBytes}, + {offset_reset_policy, OffsetResetPolicy} + ], + GroupConfig = [ + {max_rejoin_attempts, MaxRejoinAttempts}, + {offset_commit_interval_seconds, OffsetCommitInterval} + ], + KafkaTopics = maps:keys(TopicMapping), + GroupSubscriberConfig = + #{ + client => ClientID, + group_id => GroupID, + topics => KafkaTopics, + cb_module => ?MODULE, + init_data => InitialState, + message_type => message, + consumer_config => ConsumerConfig, + group_config => GroupConfig + }, + %% Below, we spawn a single `brod_group_consumer_v2' worker, with + %% no option for a pool of those. This is because that worker + %% spawns one worker for each assigned topic-partition + %% automatically, so we should not spawn duplicate workers. + SubscriberId = make_subscriber_id(BridgeName), + case emqx_bridge_kafka_consumer_sup:start_child(SubscriberId, GroupSubscriberConfig) of + {ok, _ConsumerPid} -> + ?tp( + kafka_consumer_subscriber_started, + #{resource_id => ResourceId, subscriber_id => SubscriberId} + ), + {ok, #{ + subscriber_id => SubscriberId, + kafka_client_id => ClientID, + kafka_topics => KafkaTopics + }}; + {error, Reason2} -> + ?SLOG(error, #{ + msg => "failed_to_start_kafka_consumer", + resource_id => ResourceId, + kafka_hosts => emqx_bridge_kafka_impl:hosts(BootstrapHosts0), + reason => emqx_utils:redact(Reason2) + }), + stop_client(ClientID), + throw(failed_to_start_kafka_consumer) + end. + +-spec stop_subscriber(emqx_bridge_kafka_consumer_sup:child_id()) -> ok. +stop_subscriber(SubscriberId) -> + _ = log_when_error( + fun() -> + emqx_bridge_kafka_consumer_sup:ensure_child_deleted(SubscriberId) + end, + #{ + msg => "failed_to_delete_kafka_subscriber", + subscriber_id => SubscriberId + } + ), + ok. + +-spec stop_client(brod:client_id()) -> ok. +stop_client(ClientID) -> + _ = log_when_error( + fun() -> + brod:stop_client(ClientID) + end, + #{ + msg => "failed_to_delete_kafka_consumer_client", + client_id => ClientID + } + ), + ok. + +do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) -> + case brod:get_partitions_count(ClientID, KafkaTopic) of + {ok, NPartitions} -> + case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of + connected -> do_get_status(ClientID, RestTopics, SubscriberId); + disconnected -> disconnected + end; + {error, {client_down, Context}} -> + case infer_client_error(Context) of + auth_error -> + Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE, + {disconnected, Message}; + {auth_error, Message0} -> + Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE, + {disconnected, Message}; + connection_refused -> + Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE, + {disconnected, Message}; + _ -> + {disconnected, ?CLIENT_DOWN_MESSAGE} + end; + {error, leader_not_available} -> + Message = + "Leader connection not available. Please check the Kafka topic used," + " the connection parameters and Kafka cluster health", + {disconnected, Message}; + _ -> + disconnected + end; +do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) -> + connected. + +-spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> + connected | disconnected. +do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) -> + Results = + lists:map( + fun(N) -> + brod_client:get_leader_connection(ClientID, KafkaTopic, N) + end, + lists:seq(0, NPartitions - 1) + ), + AllLeadersOk = + length(Results) > 0 andalso + lists:all( + fun + ({ok, _}) -> + true; + (_) -> + false + end, + Results + ), + WorkersAlive = are_subscriber_workers_alive(SubscriberId), + case AllLeadersOk andalso WorkersAlive of + true -> + connected; + false -> + disconnected + end. + +are_subscriber_workers_alive(SubscriberId) -> + Children = supervisor:which_children(emqx_bridge_kafka_consumer_sup), + case lists:keyfind(SubscriberId, 1, Children) of + false -> + false; + {_, Pid, _, _} -> + Workers = brod_group_subscriber_v2:get_workers(Pid), + %% we can't enforce the number of partitions on a single + %% node, as the group might be spread across an emqx + %% cluster. + lists:all(fun is_process_alive/1, maps:values(Workers)) + end. + +log_when_error(Fun, Log) -> + try + Fun() + catch + C:E -> + ?SLOG(error, Log#{ + exception => C, + reason => E + }) + end. + +-spec consumer_group_id(atom() | binary()) -> binary(). +consumer_group_id(BridgeName0) -> + BridgeName = to_bin(BridgeName0), + <<"emqx-kafka-consumer-", BridgeName/binary>>. + +-spec is_dry_run(resource_id()) -> boolean(). +is_dry_run(ResourceId) -> + TestIdStart = string:find(ResourceId, ?TEST_ID_PREFIX), + case TestIdStart of + nomatch -> + false; + _ -> + string:equal(TestIdStart, ResourceId) + end. + +-spec make_client_id(resource_id(), kafka_consumer, atom() | binary()) -> atom(). +make_client_id(ResourceId, KafkaType, KafkaName) -> + case is_dry_run(ResourceId) of + false -> + ClientID0 = emqx_bridge_kafka_impl:make_client_id(KafkaType, KafkaName), + binary_to_atom(ClientID0); + true -> + %% It is a dry run and we don't want to leak too many + %% atoms. + probing_brod_consumers + end. + +convert_topic_mapping(TopicMappingList) -> + lists:foldl( + fun(Fields, Acc) -> + #{ + kafka_topic := KafkaTopic, + mqtt_topic := MQTTTopic, + qos := QoS, + payload_template := PayloadTemplate0 + } = Fields, + PayloadTemplate = emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate0), + Acc#{ + KafkaTopic => #{ + payload_template => PayloadTemplate, + mqtt_topic => MQTTTopic, + qos => QoS + } + } + end, + #{}, + TopicMappingList + ). + +render(FullMessage, PayloadTemplate) -> + Opts = #{ + return => full_binary, + var_trans => fun + (undefined) -> + <<>>; + (X) -> + emqx_plugin_libs_rule:bin(X) + end + }, + emqx_plugin_libs_rule:proc_tmpl(PayloadTemplate, FullMessage, Opts). + +encode(Value, none) -> + Value; +encode(Value, base64) -> + base64:encode(Value). + +to_bin(B) when is_binary(B) -> B; +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8). + +infer_client_error(Error) -> + case Error of + [{_BrokerEndpoint, {econnrefused, _}} | _] -> + connection_refused; + [{_BrokerEndpoint, {{sasl_auth_error, Message}, _}} | _] when is_binary(Message) -> + {auth_error, Message}; + [{_BrokerEndpoint, {{sasl_auth_error, _}, _}} | _] -> + auth_error; + _ -> + undefined + end. diff --git a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl similarity index 59% rename from lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl rename to apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl index 6b46de35e..7bee2c70d 100644 --- a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl @@ -1,14 +1,18 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_bridge_impl_kafka_producer). +-module(emqx_bridge_kafka_impl_producer). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). %% callbacks of behaviour emqx_resource -export([ + is_buffer_supported/0, callback_mode/0, on_start/2, on_stop/2, on_query/3, + on_query_async/4, on_get_status/2 ]). @@ -19,44 +23,41 @@ -include_lib("emqx/include/logger.hrl"). +%% TODO: rename this to `kafka_producer' after alias support is added +%% to hocon; keeping this as just `kafka' for backwards compatibility. +-define(BRIDGE_TYPE, kafka). + +is_buffer_supported() -> true. + callback_mode() -> async_if_possible. %% @doc Config schema is defined in emqx_ee_bridge_kafka. on_start(InstId, Config) -> #{ - bridge_name := BridgeName, + authentication := Auth, bootstrap_hosts := Hosts0, + bridge_name := BridgeName, connect_timeout := ConnTimeout, + kafka := KafkaConfig = #{message := MessageTemplate, topic := KafkaTopic}, metadata_request_timeout := MetaReqTimeout, min_metadata_refresh_interval := MinMetaRefreshInterval, socket_opts := SocketOpts, - authentication := Auth, ssl := SSL } = Config, - %% TODO: change this to `kafka_producer` after refactoring for kafka_consumer - BridgeType = kafka, - ResourceID = emqx_bridge_resource:resource_id(BridgeType, BridgeName), - _ = maybe_install_wolff_telemetry_handlers(InstId, ResourceID), - %% it's a bug if producer config is not found - %% the caller should not try to start a producer if - %% there is no producer config - ProducerConfigWrapper = get_required(producer, Config, no_kafka_producer_config), - ProducerConfig = get_required(kafka, ProducerConfigWrapper, no_kafka_producer_parameters), - MessageTemplate = get_required(message, ProducerConfig, no_kafka_message_template), - Hosts = hosts(Hosts0), - ClientId = make_client_id(BridgeName), + BridgeType = ?BRIDGE_TYPE, + ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName), + _ = maybe_install_wolff_telemetry_handlers(ResourceId), + Hosts = emqx_bridge_kafka_impl:hosts(Hosts0), + ClientId = emqx_bridge_kafka_impl:make_client_id(BridgeType, BridgeName), ClientConfig = #{ min_metadata_refresh_interval => MinMetaRefreshInterval, connect_timeout => ConnTimeout, client_id => ClientId, request_timeout => MetaReqTimeout, extra_sock_opts => socket_opts(SocketOpts), - sasl => sasl(Auth), + sasl => emqx_bridge_kafka_impl:sasl(Auth), ssl => ssl(SSL) }, - #{ - topic := KafkaTopic - } = ProducerConfig, case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of {ok, _} -> ?SLOG(info, #{ @@ -73,13 +74,24 @@ on_start(InstId, Config) -> }), throw(failed_to_start_kafka_client) end, - WolffProducerConfig = producers_config(BridgeName, ClientId, ProducerConfig), + %% Check if this is a dry run + TestIdStart = string:find(InstId, ?TEST_ID_PREFIX), + IsDryRun = + case TestIdStart of + nomatch -> + false; + _ -> + string:equal(TestIdStart, InstId) + end, + WolffProducerConfig = producers_config(BridgeName, ClientId, KafkaConfig, IsDryRun), case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of {ok, Producers} -> {ok, #{ message_template => compile_message_template(MessageTemplate), client_id => ClientId, - producers => Producers + kafka_topic => KafkaTopic, + producers => Producers, + resource_id => ResourceId }}; {error, Reason2} -> ?SLOG(error, #{ @@ -102,10 +114,13 @@ on_start(InstId, Config) -> client_id => ClientId } ), - throw(failed_to_start_kafka_producer) + throw( + "Failed to start Kafka client. Please check the logs for errors and check" + " the connection parameters." + ) end. -on_stop(InstanceID, #{client_id := ClientID, producers := Producers}) -> +on_stop(_InstanceID, #{client_id := ClientID, producers := Producers, resource_id := ResourceID}) -> _ = with_log_at_error( fun() -> wolff:stop_and_delete_supervised_producers(Producers) end, #{ @@ -121,36 +136,69 @@ on_stop(InstanceID, #{client_id := ClientID, producers := Producers}) -> } ), with_log_at_error( - fun() -> uninstall_telemetry_handlers(InstanceID) end, + fun() -> uninstall_telemetry_handlers(ResourceID) end, #{ msg => "failed_to_uninstall_telemetry_handlers", client_id => ClientID } ). +on_query( + _InstId, + {send_message, Message}, + #{message_template := Template, producers := Producers} +) -> + KafkaMessage = render_message(Template, Message), + %% TODO: this function is not used so far, + %% timeout should be configurable + %% or the on_query/3 should be on_query/4 instead. + try + {_Partition, _Offset} = wolff:send_sync(Producers, [KafkaMessage], 5000), + ok + catch + error:{producer_down, _} = Reason -> + {error, Reason}; + error:timeout -> + {error, timeout} + end. + %% @doc The callback API for rule-engine (or bridge without rules) %% The input argument `Message' is an enriched format (as a map()) %% of the original #message{} record. %% The enrichment is done by rule-engine or by the data bridge framework. %% E.g. the output of rule-engine process chain %% or the direct mapping from an MQTT message. -on_query(_InstId, {send_message, Message}, #{message_template := Template, producers := Producers}) -> +on_query_async( + _InstId, + {send_message, Message}, + AsyncReplyFn, + #{message_template := Template, producers := Producers} +) -> KafkaMessage = render_message(Template, Message), + %% * Must be a batch because wolff:send and wolff:send_sync are batch APIs + %% * Must be a single element batch because wolff books calls, but not batch sizes + %% for counters and gauges. + Batch = [KafkaMessage], %% The retuned information is discarded here. %% If the producer process is down when sending, this function would %% raise an error exception which is to be caught by the caller of this callback - {_Partition, _Pid} = wolff:send(Producers, [KafkaMessage], {fun ?MODULE:on_kafka_ack/3, [#{}]}), - {async_return, ok}. + {_Partition, Pid} = wolff:send(Producers, Batch, {fun ?MODULE:on_kafka_ack/3, [AsyncReplyFn]}), + %% this Pid is so far never used because Kafka producer is by-passing the buffer worker + {ok, Pid}. -compile_message_template(#{ - key := KeyTemplate, value := ValueTemplate, timestamp := TimestampTemplate -}) -> +compile_message_template(T) -> + KeyTemplate = maps:get(key, T, <<"${.clientid}">>), + ValueTemplate = maps:get(value, T, <<"${.}">>), + TimestampTemplate = maps:get(value, T, <<"${.timestamp}">>), #{ - key => emqx_plugin_libs_rule:preproc_tmpl(KeyTemplate), - value => emqx_plugin_libs_rule:preproc_tmpl(ValueTemplate), - timestamp => emqx_plugin_libs_rule:preproc_tmpl(TimestampTemplate) + key => preproc_tmpl(KeyTemplate), + value => preproc_tmpl(ValueTemplate), + timestamp => preproc_tmpl(TimestampTemplate) }. +preproc_tmpl(Tmpl) -> + emqx_plugin_libs_rule:preproc_tmpl(Tmpl). + render_message( #{key := KeyTemplate, value := ValueTemplate, timestamp := TimestampTemplate}, Message ) -> @@ -161,7 +209,14 @@ render_message( }. render(Template, Message) -> - emqx_plugin_libs_rule:proc_tmpl(Template, Message). + Opts = #{ + var_trans => fun + (undefined) -> <<"">>; + (X) -> emqx_plugin_libs_rule:bin(X) + end, + return => full_binary + }, + emqx_plugin_libs_rule:proc_tmpl(Template, Message, Opts). render_timestamp(Template, Message) -> try @@ -171,20 +226,45 @@ render_timestamp(Template, Message) -> erlang:system_time(millisecond) end. -on_kafka_ack(_Partition, _Offset, _Extra) -> - %% Do nothing so far. - %% Maybe need to bump some counters? +%% Wolff producer never gives up retrying +%% so there can only be 'ok' results. +on_kafka_ack(_Partition, Offset, {ReplyFn, Args}) when is_integer(Offset) -> + %% the ReplyFn is emqx_resource_buffer_worker:handle_async_reply/2 + apply(ReplyFn, Args ++ [ok]); +on_kafka_ack(_Partition, buffer_overflow_discarded, _Callback) -> + %% wolff should bump the dropped_queue_full counter + %% do not apply the callback (which is basically to bump success or fail counter) ok. -on_get_status(_InstId, #{client_id := ClientID}) -> - case wolff:check_connectivity(ClientID) of - ok -> connected; - _ -> disconnected +on_get_status(_InstId, #{client_id := ClientId, kafka_topic := KafkaTopic}) -> + case wolff_client_sup:find_client(ClientId) of + {ok, Pid} -> + do_get_status(Pid, KafkaTopic); + {error, _Reason} -> + disconnected end. -%% Parse comma separated host:port list into a [{Host,Port}] list -hosts(Hosts) -> - emqx_schema:parse_servers(Hosts, emqx_ee_bridge_kafka:host_opts()). +do_get_status(Client, KafkaTopic) -> + %% TODO: add a wolff_producers:check_connectivity + case wolff_client:get_leader_connections(Client, KafkaTopic) of + {ok, Leaders} -> + %% Kafka is considered healthy as long as any of the partition leader is reachable + case + lists:any( + fun({_Partition, Pid}) -> + is_pid(Pid) andalso erlang:is_process_alive(Pid) + end, + Leaders + ) + of + true -> + connected; + false -> + disconnected + end; + {error, _} -> + disconnected + end. %% Extra socket options, such as sndbuf size etc. socket_opts(Opts) when is_map(Opts) -> @@ -213,22 +293,12 @@ adjust_socket_buffer(Bytes, Opts) -> [{buffer, max(Bytes1, Bytes)} | Acc1] end. -sasl(none) -> - undefined; -sasl(#{mechanism := Mechanism, username := Username, password := Password}) -> - {Mechanism, Username, emqx_secret:wrap(Password)}; -sasl(#{ - kerberos_principal := Principal, - kerberos_keytab_file := KeyTabFile -}) -> - {callback, brod_gssapi, {gssapi, KeyTabFile, Principal}}. - ssl(#{enable := true} = SSL) -> emqx_tls_lib:to_client_opts(SSL); ssl(_) -> []. -producers_config(BridgeName, ClientId, Input) -> +producers_config(BridgeName, ClientId, Input, IsDryRun) -> #{ max_batch_bytes := MaxBatchBytes, compression := Compression, @@ -254,12 +324,11 @@ producers_config(BridgeName, ClientId, Input) -> disk -> {false, replayq_dir(ClientId)}; hybrid -> {true, replayq_dir(ClientId)} end, - %% TODO: change this once we add kafka source - BridgeType = kafka, + BridgeType = ?BRIDGE_TYPE, ResourceID = emqx_bridge_resource:resource_id(BridgeType, BridgeName), #{ - name => make_producer_name(BridgeName), - partitioner => PartitionStrategy, + name => make_producer_name(BridgeName, IsDryRun), + partitioner => partitioner(PartitionStrategy), partition_count_refresh_interval_seconds => PCntRefreshInterval, replayq_dir => ReplayqDir, replayq_offload_mode => OffloadMode, @@ -273,23 +342,30 @@ producers_config(BridgeName, ClientId, Input) -> telemetry_meta_data => #{bridge_id => ResourceID} }. +%% Wolff API is a batch API. +%% key_dispatch only looks at the first element, so it's named 'first_key_dispatch' +partitioner(random) -> random; +partitioner(key_dispatch) -> first_key_dispatch. + replayq_dir(ClientId) -> filename:join([emqx:data_dir(), "kafka", ClientId]). -%% Client ID is better to be unique to make it easier for Kafka side trouble shooting. -make_client_id(BridgeName) when is_atom(BridgeName) -> - make_client_id(atom_to_list(BridgeName)); -make_client_id(BridgeName) -> - iolist_to_binary([BridgeName, ":", atom_to_list(node())]). - %% Producer name must be an atom which will be used as a ETS table name for %% partition worker lookup. -make_producer_name(BridgeName) when is_atom(BridgeName) -> - make_producer_name(atom_to_list(BridgeName)); -make_producer_name(BridgeName) -> - %% Woff needs atom for ets table name registration - %% The assumption here is bridge is not often re-created - binary_to_atom(iolist_to_binary(["kafka_producer_", BridgeName])). +make_producer_name(BridgeName, IsDryRun) when is_atom(BridgeName) -> + make_producer_name(atom_to_list(BridgeName), IsDryRun); +make_producer_name(BridgeName, IsDryRun) -> + %% Woff needs an atom for ets table name registration. The assumption here is + %% that bridges with new names are not often created. + case IsDryRun of + true -> + %% It is a dry run and we don't want to leak too many atoms + %% so we use the default producer name instead of creating + %% an unique name. + probing_wolff_producers; + false -> + binary_to_atom(iolist_to_binary(["kafka_producer_", BridgeName])) + end. with_log_at_error(Fun, Log) -> try @@ -302,35 +378,16 @@ with_log_at_error(Fun, Log) -> }) end. -get_required(Field, Config, Throw) -> - Value = maps:get(Field, Config, none), - Value =:= none andalso throw(Throw), - Value. - %% we *must* match the bridge id in the event metadata with that in %% the handler config; otherwise, multiple kafka producer bridges will %% install multiple handlers to the same wolff events, multiplying the -handle_telemetry_event( - [wolff, dropped], - #{counter_inc := Val}, - #{bridge_id := ID}, - #{bridge_id := ID} -) when is_integer(Val) -> - emqx_resource_metrics:dropped_inc(ID, Val); handle_telemetry_event( [wolff, dropped_queue_full], #{counter_inc := Val}, #{bridge_id := ID}, #{bridge_id := ID} ) when is_integer(Val) -> - %% When wolff emits a `dropped_queue_full' event due to replayq - %% overflow, it also emits a `dropped' event (at the time of - %% writing, wolff is 1.7.4). Since we already bump `dropped' when - %% `dropped.queue_full' occurs, we have to correct it here. This - %% correction will have to be dropped if wolff stops also emitting - %% `dropped'. - emqx_resource_metrics:dropped_queue_full_inc(ID, Val), - emqx_resource_metrics:dropped_inc(ID, -Val); + emqx_resource_metrics:dropped_queue_full_inc(ID, Val); handle_telemetry_event( [wolff, queuing], #{gauge_set := Val}, @@ -345,13 +402,6 @@ handle_telemetry_event( #{bridge_id := ID} ) when is_integer(Val) -> emqx_resource_metrics:retried_inc(ID, Val); -handle_telemetry_event( - [wolff, failed], - #{counter_inc := Val}, - #{bridge_id := ID}, - #{bridge_id := ID} -) when is_integer(Val) -> - emqx_resource_metrics:failed_inc(ID, Val); handle_telemetry_event( [wolff, inflight], #{gauge_set := Val}, @@ -359,55 +409,32 @@ handle_telemetry_event( #{bridge_id := ID} ) when is_integer(Val) -> emqx_resource_metrics:inflight_set(ID, PartitionID, Val); -handle_telemetry_event( - [wolff, retried_failed], - #{counter_inc := Val}, - #{bridge_id := ID}, - #{bridge_id := ID} -) when is_integer(Val) -> - emqx_resource_metrics:retried_failed_inc(ID, Val); -handle_telemetry_event( - [wolff, retried_success], - #{counter_inc := Val}, - #{bridge_id := ID}, - #{bridge_id := ID} -) when is_integer(Val) -> - emqx_resource_metrics:retried_success_inc(ID, Val); -handle_telemetry_event( - [wolff, success], - #{counter_inc := Val}, - #{bridge_id := ID}, - #{bridge_id := ID} -) when is_integer(Val) -> - emqx_resource_metrics:success_inc(ID, Val); handle_telemetry_event(_EventId, _Metrics, _MetaData, _HandlerConfig) -> %% Event that we do not handle ok. --spec telemetry_handler_id(emqx_resource:resource_id()) -> binary(). -telemetry_handler_id(InstanceID) -> - <<"emqx-bridge-kafka-producer-", InstanceID/binary>>. +%% Note: don't use the instance/manager ID, as that changes everytime +%% the bridge is recreated, and will lead to multiplication of +%% metrics. +-spec telemetry_handler_id(resource_id()) -> binary(). +telemetry_handler_id(ResourceID) -> + <<"emqx-bridge-kafka-producer-", ResourceID/binary>>. -uninstall_telemetry_handlers(InstanceID) -> - HandlerID = telemetry_handler_id(InstanceID), +uninstall_telemetry_handlers(ResourceID) -> + HandlerID = telemetry_handler_id(ResourceID), telemetry:detach(HandlerID). -maybe_install_wolff_telemetry_handlers(InstanceID, ResourceID) -> +maybe_install_wolff_telemetry_handlers(ResourceID) -> %% Attach event handlers for Kafka telemetry events. If a handler with the %% handler id already exists, the attach_many function does nothing telemetry:attach_many( %% unique handler id - telemetry_handler_id(InstanceID), + telemetry_handler_id(ResourceID), [ - [wolff, dropped], [wolff, dropped_queue_full], [wolff, queuing], [wolff, retried], - [wolff, failed], - [wolff, inflight], - [wolff, retried_failed], - [wolff, retried_success], - [wolff, success] + [wolff, inflight] ], fun ?MODULE:handle_telemetry_event/4, %% we *must* keep track of the same id that is handed down to diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl new file mode 100644 index 000000000..4f98f33cf --- /dev/null +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl @@ -0,0 +1,2026 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_kafka_impl_consumer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("brod/include/brod.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"kafka_consumer">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_kafka]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain}, + {group, ssl}, + {group, sasl_plain}, + {group, sasl_ssl} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + SASLAuths = [ + sasl_auth_plain, + sasl_auth_scram256, + sasl_auth_scram512, + sasl_auth_kerberos + ], + SASLAuthGroups = [{group, Type} || Type <- SASLAuths], + OnlyOnceTCs = only_once_tests(), + MatrixTCs = AllTCs -- OnlyOnceTCs, + SASLTests = [{Group, MatrixTCs} || Group <- SASLAuths], + [ + {plain, MatrixTCs ++ OnlyOnceTCs}, + {ssl, MatrixTCs}, + {sasl_plain, SASLAuthGroups}, + {sasl_ssl, SASLAuthGroups} + ] ++ SASLTests. + +sasl_only_tests() -> + [t_failed_creation_then_fixed]. + +%% tests that do not need to be run on all groups +only_once_tests() -> + [ + t_begin_offset_earliest, + t_bridge_rule_action_source, + t_cluster_group, + t_node_joins_existing_cluster, + t_cluster_node_down, + t_multiple_topic_mappings + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_PLAIN_PORT", "9292")), + DirectKafkaHost = os:getenv("KAFKA_DIRECT_PLAIN_HOST", "kafka-1.emqx.net"), + DirectKafkaPort = list_to_integer(os:getenv("KAFKA_DIRECT_PLAIN_PORT", "9092")), + ProxyName = "kafka_plain", + case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort}, + {direct_kafka_host, DirectKafkaHost}, + {direct_kafka_port, DirectKafkaPort}, + {kafka_type, Type}, + {use_sasl, false}, + {use_tls, false} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_kafka); + _ -> + {skip, no_kafka} + end + end; +init_per_group(sasl_plain = Type, Config) -> + KafkaHost = os:getenv("KAFKA_SASL_PLAIN_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_PLAIN_PORT", "9293")), + DirectKafkaHost = os:getenv("KAFKA_DIRECT_SASL_HOST", "kafka-1.emqx.net"), + DirectKafkaPort = list_to_integer(os:getenv("KAFKA_DIRECT_SASL_PORT", "9093")), + ProxyName = "kafka_sasl_plain", + case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort}, + {direct_kafka_host, DirectKafkaHost}, + {direct_kafka_port, DirectKafkaPort}, + {kafka_type, Type}, + {use_sasl, true}, + {use_tls, false} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_kafka); + _ -> + {skip, no_kafka} + end + end; +init_per_group(ssl = Type, Config) -> + KafkaHost = os:getenv("KAFKA_SSL_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_SSL_PORT", "9294")), + DirectKafkaHost = os:getenv("KAFKA_DIRECT_SSL_HOST", "kafka-1.emqx.net"), + DirectKafkaPort = list_to_integer(os:getenv("KAFKA_DIRECT_SSL_PORT", "9094")), + ProxyName = "kafka_ssl", + case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort}, + {direct_kafka_host, DirectKafkaHost}, + {direct_kafka_port, DirectKafkaPort}, + {kafka_type, Type}, + {use_sasl, false}, + {use_tls, true} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_kafka); + _ -> + {skip, no_kafka} + end + end; +init_per_group(sasl_ssl = Type, Config) -> + KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")), + DirectKafkaHost = os:getenv("KAFKA_DIRECT_SASL_SSL_HOST", "kafka-1.emqx.net"), + DirectKafkaPort = list_to_integer(os:getenv("KAFKA_DIRECT_SASL_SSL_PORT", "9095")), + ProxyName = "kafka_sasl_ssl", + case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort}, + {direct_kafka_host, DirectKafkaHost}, + {direct_kafka_port, DirectKafkaPort}, + {kafka_type, Type}, + {use_sasl, true}, + {use_tls, true} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_kafka); + _ -> + {skip, no_kafka} + end + end; +init_per_group(sasl_auth_plain, Config) -> + [{sasl_auth_mechanism, plain} | Config]; +init_per_group(sasl_auth_scram256, Config) -> + [{sasl_auth_mechanism, scram_sha_256} | Config]; +init_per_group(sasl_auth_scram512, Config) -> + [{sasl_auth_mechanism, scram_sha_512} | Config]; +init_per_group(sasl_auth_kerberos, Config0) -> + %% currently it's tricky to setup kerberos + toxiproxy, probably + %% due to hostname issues... + UseTLS = ?config(use_tls, Config0), + {KafkaHost, KafkaPort} = + case UseTLS of + true -> + { + os:getenv("KAFKA_SASL_SSL_HOST", "kafka-1.emqx.net"), + list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9095")) + }; + false -> + { + os:getenv("KAFKA_SASL_PLAIN_HOST", "kafka-1.emqx.net"), + list_to_integer(os:getenv("KAFKA_SASL_PLAIN_PORT", "9093")) + } + end, + Config = + lists:map( + fun + ({kafka_host, _KafkaHost}) -> + {kafka_host, KafkaHost}; + ({kafka_port, _KafkaPort}) -> + {kafka_port, KafkaPort}; + (KV) -> + KV + end, + [{has_proxy, false}, {sasl_auth_mechanism, kerberos} | Config0] + ), + Config; +init_per_group(_Group, Config) -> + Config. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic}, + {mqtt_qos, 0}, + {mqtt_payload, full_message}, + {num_partitions, 3} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +end_per_group(Group, Config) when + Group =:= plain; + Group =:= ssl; + Group =:= sasl_plain; + Group =:= sasl_ssl +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestCase, Config) when + TestCase =:= t_failed_creation_then_fixed +-> + KafkaType = ?config(kafka_type, Config), + AuthMechanism = ?config(sasl_auth_mechanism, Config), + IsSASL = lists:member(KafkaType, [sasl_plain, sasl_ssl]), + case {IsSASL, AuthMechanism} of + {true, kerberos} -> + [{skip_does_not_apply, true}]; + {true, _} -> + common_init_per_testcase(TestCase, Config); + {false, _} -> + [{skip_does_not_apply, true}] + end; +init_per_testcase(TestCase, Config) when + TestCase =:= t_failed_creation_then_fixed +-> + %% test with one partiton only for this case because + %% the wait probe may not be always sent to the same partition + HasProxy = proplists:get_value(has_proxy, Config, true), + case HasProxy of + false -> + [{skip_does_not_apply, true}]; + true -> + common_init_per_testcase(TestCase, [{num_partitions, 1} | Config]) + end; +init_per_testcase(TestCase, Config) when + TestCase =:= t_on_get_status; + TestCase =:= t_receive_after_recovery +-> + HasProxy = proplists:get_value(has_proxy, Config, true), + case HasProxy of + false -> + [{skip_does_not_apply, true}]; + true -> + common_init_per_testcase(TestCase, Config) + end; +init_per_testcase(t_cluster_group = TestCase, Config0) -> + Config = emqx_utils:merge_opts(Config0, [{num_partitions, 6}]), + common_init_per_testcase(TestCase, Config); +init_per_testcase(t_multiple_topic_mappings = TestCase, Config0) -> + KafkaTopicBase = + << + (atom_to_binary(TestCase))/binary, + (integer_to_binary(erlang:unique_integer()))/binary + >>, + MQTTTopicBase = + <<"mqtt/", (atom_to_binary(TestCase))/binary, + (integer_to_binary(erlang:unique_integer()))/binary, "/">>, + TopicMapping = + [ + #{ + kafka_topic => <>, + mqtt_topic => <>, + qos => 1, + payload_template => <<"${.}">> + }, + #{ + kafka_topic => <>, + mqtt_topic => <>, + qos => 2, + payload_template => <<"v = ${.value}">> + } + ], + Config = [{topic_mapping, TopicMapping} | Config0], + common_init_per_testcase(TestCase, Config); +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + KafkaTopic = + << + (atom_to_binary(TestCase))/binary, + (integer_to_binary(erlang:unique_integer()))/binary + >>, + KafkaType = ?config(kafka_type, Config0), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = proplists:get_value(mqtt_topic, Config0, <<"mqtt/topic/", UniqueNum/binary>>), + MQTTQoS = proplists:get_value(mqtt_qos, Config0, 0), + DefaultTopicMapping = [ + #{ + kafka_topic => KafkaTopic, + mqtt_topic => MQTTTopic, + qos => MQTTQoS, + payload_template => <<"${.}">> + } + ], + TopicMapping = proplists:get_value(topic_mapping, Config0, DefaultTopicMapping), + Config = [ + {kafka_topic, KafkaTopic}, + {topic_mapping, TopicMapping} + | Config0 + ], + {Name, ConfigString, KafkaConfig} = kafka_config( + TestCase, KafkaType, Config + ), + ensure_topics(Config), + ProducersConfigs = start_producers(TestCase, Config), + ok = snabbkaffe:start_trace(), + [ + {kafka_name, Name}, + {kafka_config_string, ConfigString}, + {kafka_config, KafkaConfig}, + {kafka_producers, ProducersConfigs} + | Config + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + ProducersConfigs = ?config(kafka_producers, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + #{clientid := KafkaProducerClientId, producers := ProducersMapping} = + ProducersConfigs, + lists:foreach( + fun(Producers) -> + ok = wolff:stop_and_delete_supervised_producers(Producers) + end, + maps:values(ProducersMapping) + ), + ok = wolff:stop_and_delete_supervised_client(KafkaProducerClientId), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +start_producers(TestCase, Config) -> + TopicMapping = ?config(topic_mapping, Config), + KafkaClientId = + <<"test-client-", (atom_to_binary(TestCase))/binary, + (integer_to_binary(erlang:unique_integer()))/binary>>, + DirectKafkaHost = ?config(direct_kafka_host, Config), + DirectKafkaPort = ?config(direct_kafka_port, Config), + UseTLS = ?config(use_tls, Config), + UseSASL = ?config(use_sasl, Config), + Hosts = emqx_bridge_kafka_impl:hosts( + DirectKafkaHost ++ ":" ++ integer_to_list(DirectKafkaPort) + ), + SSL = + case UseTLS of + true -> + %% hint: when running locally, need to + %% `chmod og+rw` those files to be readable. + emqx_tls_lib:to_client_opts( + #{ + keyfile => shared_secret(client_keyfile), + certfile => shared_secret(client_certfile), + cacertfile => shared_secret(client_cacertfile), + verify => verify_none, + enable => true + } + ); + false -> + [] + end, + SASL = + case UseSASL of + true -> {plain, <<"emqxuser">>, <<"password">>}; + false -> undefined + end, + ClientConfig = #{ + min_metadata_refresh_interval => 5_000, + connect_timeout => 5_000, + client_id => KafkaClientId, + request_timeout => 1_000, + sasl => SASL, + ssl => SSL + }, + {ok, Clients} = wolff:ensure_supervised_client(KafkaClientId, Hosts, ClientConfig), + ProducersData0 = + #{ + clients => Clients, + clientid => KafkaClientId, + producers => #{} + }, + lists:foldl( + fun(#{kafka_topic := KafkaTopic}, #{producers := ProducersMapping0} = Acc) -> + Producers = do_start_producer(KafkaClientId, KafkaTopic), + ProducersMapping = ProducersMapping0#{KafkaTopic => Producers}, + Acc#{producers := ProducersMapping} + end, + ProducersData0, + TopicMapping + ). + +do_start_producer(KafkaClientId, KafkaTopic) -> + Name = binary_to_atom(<>), + ProducerConfig = + #{ + name => Name, + partitioner => roundrobin, + partition_count_refresh_interval_seconds => 1_000, + replayq_max_total_bytes => 10_000, + replayq_seg_bytes => 9_000, + drop_if_highmem => false, + required_acks => leader_only, + max_batch_bytes => 900_000, + max_send_ahead => 0, + compression => no_compression, + telemetry_meta_data => #{} + }, + {ok, Producers} = wolff:ensure_supervised_producers(KafkaClientId, KafkaTopic, ProducerConfig), + Producers. + +ensure_topics(Config) -> + TopicMapping = ?config(topic_mapping, Config), + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + UseTLS = ?config(use_tls, Config), + UseSASL = ?config(use_sasl, Config), + NumPartitions = proplists:get_value(num_partitions, Config, 3), + Endpoints = [{KafkaHost, KafkaPort}], + TopicConfigs = [ + #{ + name => KafkaTopic, + num_partitions => NumPartitions, + replication_factor => 1, + assignments => [], + configs => [] + } + || #{kafka_topic := KafkaTopic} <- TopicMapping + ], + RequestConfig = #{timeout => 5_000}, + ConnConfig0 = + case UseTLS of + true -> + %% hint: when running locally, need to + %% `chmod og+rw` those files to be readable. + #{ + ssl => emqx_tls_lib:to_client_opts( + #{ + keyfile => shared_secret(client_keyfile), + certfile => shared_secret(client_certfile), + cacertfile => shared_secret(client_cacertfile), + verify => verify_none, + enable => true + } + ) + }; + false -> + #{} + end, + ConnConfig = + case UseSASL of + true -> + ConnConfig0#{sasl => {plain, <<"emqxuser">>, <<"password">>}}; + false -> + ConnConfig0#{sasl => undefined} + end, + case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of + ok -> ok; + {error, topic_already_exists} -> ok + end. + +shared_secret_path() -> + os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). + +shared_secret(client_keyfile) -> + filename:join([shared_secret_path(), "client.key"]); +shared_secret(client_certfile) -> + filename:join([shared_secret_path(), "client.crt"]); +shared_secret(client_cacertfile) -> + filename:join([shared_secret_path(), "ca.crt"]); +shared_secret(rig_keytab) -> + filename:join([shared_secret_path(), "rig.keytab"]). + +publish(Config, Messages) -> + %% pick the first topic if not specified + #{producers := ProducersMapping} = ?config(kafka_producers, Config), + [{KafkaTopic, Producers} | _] = maps:to_list(ProducersMapping), + ct:pal("publishing to ~p:\n ~p", [KafkaTopic, Messages]), + {_Partition, _OffsetReply} = wolff:send_sync(Producers, Messages, 10_000). + +publish(Config, KafkaTopic, Messages) -> + #{producers := ProducersMapping} = ?config(kafka_producers, Config), + #{KafkaTopic := Producers} = ProducersMapping, + ct:pal("publishing to ~p:\n ~p", [KafkaTopic, Messages]), + {_Partition, _OffsetReply} = wolff:send_sync(Producers, Messages, 10_000). + +kafka_config(TestCase, _KafkaType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + KafkaTopic = ?config(kafka_topic, Config), + AuthType = proplists:get_value(sasl_auth_mechanism, Config, none), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + MQTTTopic = proplists:get_value(mqtt_topic, Config, <<"mqtt/topic/", UniqueNum/binary>>), + MQTTQoS = proplists:get_value(mqtt_qos, Config, 0), + DefaultTopicMapping = [ + #{ + kafka_topic => KafkaTopic, + mqtt_topic => MQTTTopic, + qos => MQTTQoS, + payload_template => <<"${.}">> + } + ], + TopicMapping0 = proplists:get_value(topic_mapping, Config, DefaultTopicMapping), + TopicMappingStr = topic_mapping(TopicMapping0), + ConfigString = + io_lib:format( + "bridges.kafka_consumer.~s {\n" + " enable = true\n" + " bootstrap_hosts = \"~p:~b\"\n" + " connect_timeout = 5s\n" + " min_metadata_refresh_interval = 3s\n" + " metadata_request_timeout = 5s\n" + "~s" + " kafka {\n" + " max_batch_bytes = 896KB\n" + " max_rejoin_attempts = 5\n" + " offset_commit_interval_seconds = 3\n" + %% todo: matrix this + " offset_reset_policy = latest\n" + " }\n" + "~s" + " key_encoding_mode = none\n" + " value_encoding_mode = none\n" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " server_name_indication = \"auto\"\n" + " }\n" + "}\n", + [ + Name, + KafkaHost, + KafkaPort, + authentication(AuthType), + TopicMappingStr, + UseTLS + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +topic_mapping(TopicMapping0) -> + Template0 = << + "{kafka_topic = \"{{ kafka_topic }}\"," + " mqtt_topic = \"{{ mqtt_topic }}\"," + " qos = {{ qos }}," + " payload_template = \"{{{ payload_template }}}\" }" + >>, + Template = bbmustache:parse_binary(Template0), + Entries = + lists:map( + fun(Params) -> + bbmustache:compile(Template, Params, [{key_type, atom}]) + end, + TopicMapping0 + ), + iolist_to_binary( + [ + " topic_mapping = [", + lists:join(<<",\n">>, Entries), + "]\n" + ] + ). + +authentication(Type) when + Type =:= scram_sha_256; + Type =:= scram_sha_512; + Type =:= plain +-> + io_lib:format( + " authentication = {\n" + " mechanism = ~p\n" + " username = emqxuser\n" + " password = password\n" + " }\n", + [Type] + ); +authentication(kerberos) -> + %% TODO: how to make this work locally outside docker??? + io_lib:format( + " authentication = {\n" + " kerberos_principal = rig@KDC.EMQX.NET\n" + " kerberos_keytab_file = \"~s\"\n" + " }\n", + [shared_secret(rig_keytab)] + ); +authentication(_) -> + " authentication = none\n". + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(kafka_name, Config), + KafkaConfig0 = ?config(kafka_config, Config), + KafkaConfig = emqx_utils_maps:deep_merge(KafkaConfig0, Overrides), + emqx_bridge:create(Type, Name, KafkaConfig). + +delete_bridge(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(kafka_name, Config), + emqx_bridge:remove(Type, Name). + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(kafka_name, Config), + KafkaConfig0 = ?config(kafka_config, Config), + KafkaConfig = emqx_utils_maps:deep_merge(KafkaConfig0, Overrides), + Params = KafkaConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(kafka_name, Config), + KafkaConfig0 = ?config(kafka_config, Config), + KafkaConfig = emqx_utils_maps:deep_merge(KafkaConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = KafkaConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(kafka_name, Config), + KafkaConfig = ?config(kafka_config, Config), + Params = KafkaConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +send_message(Config, Payload) -> + Name = ?config(kafka_name, Config), + Type = ?BRIDGE_TYPE_BIN, + BridgeId = emqx_bridge_resource:bridge_id(Type, Name), + emqx_bridge:send_message(BridgeId, Payload). + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(kafka_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +instance_id(Config) -> + ResourceId = resource_id(Config), + [{_, InstanceId}] = ets:lookup(emqx_resource_manager, {owner, ResourceId}), + InstanceId. + +wait_for_expected_published_messages(Messages0, Timeout) -> + Messages = maps:from_list([{K, Msg} || Msg = #{key := K} <- Messages0]), + do_wait_for_expected_published_messages(Messages, [], Timeout). + +do_wait_for_expected_published_messages(Messages, Acc, _Timeout) when map_size(Messages) =:= 0 -> + lists:reverse(Acc); +do_wait_for_expected_published_messages(Messages0, Acc0, Timeout) -> + receive + {publish, Msg0 = #{payload := Payload}} -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {error, _} -> + ct:pal("unexpected message: ~p; discarding", [Msg0]), + do_wait_for_expected_published_messages(Messages0, Acc0, Timeout); + {ok, Decoded = #{<<"key">> := K}} when is_map_key(K, Messages0) -> + Msg = Msg0#{payload := Decoded}, + ct:pal("received expected message: ~p", [Msg]), + Acc = [Msg | Acc0], + Messages = maps:remove(K, Messages0), + do_wait_for_expected_published_messages(Messages, Acc, Timeout); + {ok, Decoded} -> + ct:pal("unexpected message: ~p; discarding", [Msg0#{payload := Decoded}]), + do_wait_for_expected_published_messages(Messages0, Acc0, Timeout) + end + after Timeout -> + error( + {timed_out_waiting_for_published_messages, #{ + so_far => Acc0, + remaining => Messages0, + mailbox => process_info(self(), messages) + }} + ) + end. + +receive_published() -> + receive_published(#{}). + +receive_published(Opts0) -> + Default = #{n => 1, timeout => 10_000}, + Opts = maps:merge(Default, Opts0), + receive_published(Opts, []). + +receive_published(#{n := N, timeout := _Timeout}, Acc) when N =< 0 -> + lists:reverse(Acc); +receive_published(#{n := N, timeout := Timeout} = Opts, Acc) -> + receive + {publish, Msg} -> + receive_published(Opts#{n := N - 1}, [Msg | Acc]) + after Timeout -> + error( + {timeout, #{ + msgs_so_far => Acc, + mailbox => process_info(self(), messages), + expected_remaining => N + }} + ) + end. + +wait_until_subscribers_are_ready(N, Timeout) -> + {ok, _} = + snabbkaffe:block_until( + ?match_n_events(N, #{?snk_kind := kafka_consumer_subscriber_init}), + Timeout + ), + ok. + +%% kinda hacky, but for yet unknown reasons kafka/brod seem a bit +%% flaky about when they decide truly consuming the messages... +%% `Period' should be greater than the `sleep_timeout' of the consumer +%% (default 1 s). +ping_until_healthy(Config, Period, Timeout) -> + #{producers := ProducersMapping} = ?config(kafka_producers, Config), + [KafkaTopic | _] = maps:keys(ProducersMapping), + ping_until_healthy(Config, KafkaTopic, Period, Timeout). + +ping_until_healthy(_Config, _KafkaTopic, _Period, Timeout) when Timeout =< 0 -> + ct:fail("kafka subscriber did not stabilize!"); +ping_until_healthy(Config, KafkaTopic, Period, Timeout) -> + TimeA = erlang:monotonic_time(millisecond), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + publish(Config, KafkaTopic, [#{key => <<"probing">>, value => Payload}]), + Res = + ?block_until( + #{ + ?snk_kind := kafka_consumer_handle_message, + ?snk_span := {complete, _}, + message := #kafka_message{value = Payload} + }, + Period + ), + case Res of + timeout -> + TimeB = erlang:monotonic_time(millisecond), + ConsumedTime = TimeB - TimeA, + ping_until_healthy(Config, Period, Timeout - ConsumedTime); + {ok, _} -> + ResourceId = resource_id(Config), + emqx_resource_manager:reset_metrics(ResourceId), + ok + end. + +ensure_connected(Config) -> + ?retry( + _Interval = 500, + _NAttempts = 20, + {ok, _} = get_client_connection(Config) + ), + ok. + +consumer_clientid(Config) -> + KafkaName = ?config(kafka_name, Config), + binary_to_atom(emqx_bridge_kafka_impl:make_client_id(kafka_consumer, KafkaName)). + +get_client_connection(Config) -> + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + ClientID = consumer_clientid(Config), + brod_client:get_connection(ClientID, KafkaHost, KafkaPort). + +get_subscriber_workers() -> + [{_, SubscriberPid, _, _}] = supervisor:which_children(emqx_bridge_kafka_consumer_sup), + brod_group_subscriber_v2:get_workers(SubscriberPid). + +wait_downs(Refs, _Timeout) when map_size(Refs) =:= 0 -> + ok; +wait_downs(Refs0, Timeout) -> + receive + {'DOWN', Ref, process, _Pid, _Reason} when is_map_key(Ref, Refs0) -> + Refs = maps:remove(Ref, Refs0), + wait_downs(Refs, Timeout) + after Timeout -> + ct:fail("processes didn't die; remaining: ~p", [map_size(Refs0)]) + end. + +create_rule_and_action_http(Config) -> + KafkaName = ?config(kafka_name, Config), + MQTTTopic = ?config(mqtt_topic, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, KafkaName), + ActionFn = <<(atom_to_binary(?MODULE))/binary, ":action_response">>, + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"$bridges/", BridgeId/binary, "\"">>, + actions => + [ + #{ + <<"function">> => <<"republish">>, + <<"args">> => + #{ + <<"topic">> => <<"republish/", MQTTTopic/binary>>, + <<"payload">> => <<>>, + <<"qos">> => 0, + <<"retain">> => false, + <<"user_properties">> => <<"${headers}">> + } + }, + #{<<"function">> => ActionFn} + ] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +action_response(Selected, Envs, Args) -> + ?tp(action_response, #{ + selected => Selected, + envs => Envs, + args => Args + }), + ok. + +wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, Timeout) -> + do_wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, Timeout, #{}). + +do_wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, Timeout, Acc0) -> + AllPartitionsCovered = map_size(Acc0) =:= NPartitions, + PresentNodes = lists:usort([N || {_Partition, {N, _MemberId}} <- maps:to_list(Acc0)]), + AllNodesCovered = PresentNodes =:= lists:usort(Nodes), + case AllPartitionsCovered andalso AllNodesCovered of + true -> + ct:pal("group balanced: ~p", [Acc0]), + {ok, Acc0}; + false -> + receive + {kafka_assignment, Node, {Pid, MemberId, GenerationId, TopicAssignments}} -> + Event = #{ + node => Node, + pid => Pid, + member_id => MemberId, + generation_id => GenerationId, + topic_assignments => TopicAssignments + }, + Acc = reconstruct_assignments_from_events(KafkaTopic, [Event], Acc0), + do_wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, Timeout, Acc) + after Timeout -> + {timeout, Acc0} + end + end. + +reconstruct_assignments_from_events(KafkaTopic, Events) -> + reconstruct_assignments_from_events(KafkaTopic, Events, #{}). + +reconstruct_assignments_from_events(KafkaTopic, Events0, Acc0) -> + %% when running the test multiple times with the same kafka + %% cluster, kafka will send assignments from old test topics that + %% we must discard. + Assignments = [ + {MemberId, Node, P} + || #{ + node := Node, + member_id := MemberId, + topic_assignments := Assignments + } <- Events0, + #brod_received_assignment{topic = T, partition = P} <- Assignments, + T =:= KafkaTopic + ], + ct:pal("assignments for topic ~p:\n ~p", [KafkaTopic, Assignments]), + lists:foldl( + fun({MemberId, Node, Partition}, Acc) -> + Acc#{Partition => {Node, MemberId}} + end, + Acc0, + Assignments + ). + +setup_group_subscriber_spy(Node) -> + TestPid = self(), + ok = erpc:call( + Node, + fun() -> + ok = meck:new(brod_group_subscriber_v2, [ + passthrough, no_link, no_history, non_strict + ]), + ok = meck:expect( + brod_group_subscriber_v2, + assignments_received, + fun(Pid, MemberId, GenerationId, TopicAssignments) -> + ?tp( + kafka_assignment, + #{ + node => node(), + pid => Pid, + member_id => MemberId, + generation_id => GenerationId, + topic_assignments => TopicAssignments + } + ), + TestPid ! + {kafka_assignment, node(), {Pid, MemberId, GenerationId, TopicAssignments}}, + meck:passthrough([Pid, MemberId, GenerationId, TopicAssignments]) + end + ), + ok + end + ). + +wait_for_cluster_rpc(Node) -> + %% need to wait until the config handler is ready after + %% restarting during the cluster join. + ?retry( + _Sleep0 = 100, + _Attempts0 = 50, + true = is_pid(erpc:call(Node, erlang, whereis, [emqx_config_handler])) + ). + +setup_and_start_listeners(Node, NodeOpts) -> + erpc:call( + Node, + fun() -> + lists:foreach( + fun(Type) -> + Port = emqx_common_test_helpers:listener_port(NodeOpts, Type), + ok = emqx_config:put( + [listeners, Type, default, bind], + {{127, 0, 0, 1}, Port} + ), + ok = emqx_config:put_raw( + [listeners, Type, default, bind], + iolist_to_binary([<<"127.0.0.1:">>, integer_to_binary(Port)]) + ), + ok + end, + [tcp, ssl, ws, wss] + ), + ok = emqx_listeners:start(), + ok + end + ). + +cluster(Config) -> + PrivDataDir = ?config(priv_dir, Config), + PeerModule = + case os:getenv("IS_CI") of + false -> + slave; + _ -> + ct_slave + end, + Cluster = emqx_common_test_helpers:emqx_cluster( + [core, core], + [ + {apps, [emqx_conf, emqx_bridge, emqx_rule_engine, emqx_bridge_kafka]}, + {listener_ports, []}, + {peer_mod, PeerModule}, + {priv_data_dir, PrivDataDir}, + {load_schema, true}, + {start_autocluster, true}, + {schema_mod, emqx_ee_conf_schema}, + {env_handler, fun + (emqx) -> + application:set_env(emqx, boot_modules, [broker, router]), + ok; + (emqx_conf) -> + ok; + (_) -> + ok + end} + ] + ), + ct:pal("cluster: ~p", [Cluster]), + Cluster. + +start_async_publisher(Config, KafkaTopic) -> + TId = ets:new(kafka_payloads, [public, ordered_set]), + Loop = fun Go() -> + receive + stop -> ok + after 0 -> + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + publish(Config, KafkaTopic, [#{key => Payload, value => Payload}]), + ets:insert(TId, {Payload}), + timer:sleep(400), + Go() + end + end, + Pid = spawn_link(Loop), + {TId, Pid}. + +stop_async_publisher(Pid) -> + MRef = monitor(process, Pid), + Pid ! stop, + receive + {'DOWN', MRef, process, Pid, _} -> + ok + after 1_000 -> + ct:fail("publisher didn't die") + end, + ok. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_and_consume_ok(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + MQTTQoS = ?config(mqtt_qos, Config), + KafkaTopic = ?config(kafka_topic, Config), + NPartitions = ?config(num_partitions, Config), + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + wait_until_subscribers_are_ready(NPartitions, 40_000), + ping_until_healthy(Config, _Period = 1_500, _Timeout = 24_000), + {ok, C} = emqtt:start_link(), + on_exit(fun() -> emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + {ok, _, [0]} = emqtt:subscribe(C, MQTTTopic), + + {Res, {ok, _}} = + ?wait_async_action( + publish(Config, [ + #{ + key => <<"mykey">>, + value => Payload, + headers => [{<<"hkey">>, <<"hvalue">>}] + } + ]), + #{?snk_kind := kafka_consumer_handle_message, ?snk_span := {complete, _}}, + 20_000 + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + Res + end, + fun({_Partition, OffsetReply}, Trace) -> + ?assertMatch([_, _ | _], ?of_kind(kafka_consumer_handle_message, Trace)), + Published = receive_published(), + ?assertMatch( + [ + #{ + qos := MQTTQoS, + topic := MQTTTopic, + payload := _ + } + ], + Published + ), + [#{payload := PayloadBin}] = Published, + ?assertMatch( + #{ + <<"value">> := Payload, + <<"key">> := <<"mykey">>, + <<"topic">> := KafkaTopic, + <<"offset">> := OffsetReply, + <<"headers">> := #{<<"hkey">> := <<"hvalue">>} + }, + emqx_utils_json:decode(PayloadBin, [return_maps]), + #{ + offset_reply => OffsetReply, + kafka_topic => KafkaTopic, + payload => Payload + } + ), + ?assertEqual(1, emqx_resource_metrics:received_get(ResourceId)), + ok + end + ), + ok. + +t_multiple_topic_mappings(Config) -> + TopicMapping = ?config(topic_mapping, Config), + MQTTTopics = [MQTTTopic || #{mqtt_topic := MQTTTopic} <- TopicMapping], + KafkaTopics = [KafkaTopic || #{kafka_topic := KafkaTopic} <- TopicMapping], + NumMQTTTopics = length(MQTTTopics), + NPartitions = ?config(num_partitions, Config), + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch( + {ok, {{_, 201, _}, _, _}}, + create_bridge_api(Config) + ), + wait_until_subscribers_are_ready(NPartitions, 40_000), + lists:foreach( + fun(KafkaTopic) -> + ping_until_healthy(Config, KafkaTopic, _Period = 1_500, _Timeout = 24_000) + end, + KafkaTopics + ), + + {ok, C} = emqtt:start_link([{proto_ver, v5}]), + on_exit(fun() -> emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + lists:foreach( + fun(MQTTTopic) -> + %% we use the hightest QoS so that we can check what + %% the subscription was. + QoS2Granted = 2, + {ok, _, [QoS2Granted]} = emqtt:subscribe(C, MQTTTopic, ?QOS_2) + end, + MQTTTopics + ), + + {ok, SRef0} = + snabbkaffe:subscribe( + ?match_event(#{ + ?snk_kind := kafka_consumer_handle_message, ?snk_span := {complete, _} + }), + NumMQTTTopics, + _Timeout0 = 20_000 + ), + lists:foreach( + fun(KafkaTopic) -> + publish(Config, KafkaTopic, [ + #{ + key => <<"mykey">>, + value => Payload, + headers => [{<<"hkey">>, <<"hvalue">>}] + } + ]) + end, + KafkaTopics + ), + {ok, _} = snabbkaffe:receive_events(SRef0), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + ok + end, + fun(Trace) -> + %% two messages processed with begin/end events + ?assertMatch([_, _, _, _ | _], ?of_kind(kafka_consumer_handle_message, Trace)), + Published = receive_published(#{n => NumMQTTTopics}), + lists:foreach( + fun( + #{ + mqtt_topic := MQTTTopic, + qos := MQTTQoS + } + ) -> + [Msg] = [ + Msg + || Msg = #{topic := T} <- Published, + T =:= MQTTTopic + ], + ?assertMatch( + #{ + qos := MQTTQoS, + topic := MQTTTopic, + payload := _ + }, + Msg + ) + end, + TopicMapping + ), + %% check that we observed the different payload templates + %% as configured. + Payloads = + lists:sort([ + case emqx_utils_json:safe_decode(P, [return_maps]) of + {ok, Decoded} -> Decoded; + {error, _} -> P + end + || #{payload := P} <- Published + ]), + ?assertMatch( + [ + #{ + <<"headers">> := #{<<"hkey">> := <<"hvalue">>}, + <<"key">> := <<"mykey">>, + <<"offset">> := Offset, + <<"topic">> := KafkaTopic, + <<"ts">> := TS, + <<"ts_type">> := <<"create">>, + <<"value">> := Payload + }, + <<"v = ", Payload/binary>> + ] when is_integer(Offset) andalso is_integer(TS) andalso is_binary(KafkaTopic), + Payloads + ), + ?assertEqual(2, emqx_resource_metrics:received_get(ResourceId)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + do_t_on_get_status(Config) + end. + +do_t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + KafkaName = ?config(kafka_name, Config), + ResourceId = emqx_bridge_resource:resource_id(kafka_consumer, KafkaName), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ct:sleep(1_200), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + ok. + +%% ensure that we can create and use the bridge successfully after +%% creating it with bad config. +t_failed_creation_then_fixed(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ?check_trace(do_t_failed_creation_then_fixed(Config), []) + end. + +do_t_failed_creation_then_fixed(Config) -> + ct:timetrap({seconds, 180}), + MQTTTopic = ?config(mqtt_topic, Config), + MQTTQoS = ?config(mqtt_qos, Config), + KafkaTopic = ?config(kafka_topic, Config), + NPartitions = ?config(num_partitions, Config), + {ok, _} = create_bridge(Config, #{ + <<"authentication">> => #{<<"password">> => <<"wrong password">>} + }), + ?retry( + _Interval0 = 200, + _Attempts0 = 10, + begin + ClientConn0 = get_client_connection(Config), + case ClientConn0 of + {error, client_down} -> + ok; + {error, {client_down, _Stacktrace}} -> + ok; + _ -> + error({client_should_be_down, ClientConn0}) + end + end + ), + %% now, update with the correct configuration + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + update_bridge_api(Config), + #{?snk_kind := kafka_consumer_subscriber_started}, + 60_000 + ) + ), + wait_until_subscribers_are_ready(NPartitions, 120_000), + ResourceId = resource_id(Config), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + ping_until_healthy(Config, _Period = 1_500, _Timeout = 24_000), + + {ok, C} = emqtt:start_link(), + on_exit(fun() -> emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + {ok, _, [0]} = emqtt:subscribe(C, MQTTTopic), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + + {_, {ok, _}} = + ?wait_async_action( + publish(Config, [ + #{ + key => <<"mykey">>, + value => Payload, + headers => [{<<"hkey">>, <<"hvalue">>}] + } + ]), + #{?snk_kind := kafka_consumer_handle_message, ?snk_span := {complete, _}}, + 20_000 + ), + Published = receive_published(), + ?assertMatch( + [ + #{ + qos := MQTTQoS, + topic := MQTTTopic, + payload := _ + } + ], + Published + ), + [#{payload := PayloadBin}] = Published, + ?assertMatch( + #{ + <<"value">> := Payload, + <<"key">> := <<"mykey">>, + <<"topic">> := KafkaTopic, + <<"offset">> := _, + <<"headers">> := #{<<"hkey">> := <<"hvalue">>} + }, + emqx_utils_json:decode(PayloadBin, [return_maps]), + #{ + kafka_topic => KafkaTopic, + payload => Payload + } + ), + ok. + +%% check that we commit the offsets so that restarting an emqx node or +%% recovering from a network partition will make the subscribers +%% consume the messages produced during the down time. +t_receive_after_recovery(Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + do_t_receive_after_recovery(Config) + end. + +do_t_receive_after_recovery(Config) -> + ct:timetrap(120_000), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + MQTTTopic = ?config(mqtt_topic, Config), + NPartitions = ?config(num_partitions, Config), + KafkaName = ?config(kafka_name, Config), + KafkaNameA = binary_to_atom(KafkaName), + KafkaClientId = consumer_clientid(Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + {ok, _} = create_bridge( + Config, + #{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}} + ), + ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000), + {ok, connected} = emqx_resource_manager:health_check(ResourceId), + %% 0) ensure each partition commits its offset so it can + %% recover later. + Messages0 = [ + #{ + key => <<"commit", (integer_to_binary(N))/binary>>, + value => <<"commit", (integer_to_binary(N))/binary>> + } + || N <- lists:seq(1, NPartitions) + ], + %% we do distinct passes over this producing part so that + %% wolff won't batch everything together. + lists:foreach( + fun(Msg) -> + {_, {ok, _}} = + ?wait_async_action( + publish(Config, [Msg]), + #{ + ?snk_kind := kafka_consumer_handle_message, + ?snk_span := {complete, {ok, commit, _}} + }, + _Timeout1 = 2_000 + ) + end, + Messages0 + ), + ?retry( + _Interval = 500, + _NAttempts = 20, + begin + GroupId = emqx_bridge_kafka_impl_consumer:consumer_group_id(KafkaNameA), + {ok, [#{partitions := Partitions}]} = brod:fetch_committed_offsets( + KafkaClientId, GroupId + ), + NPartitions = length(Partitions) + end + ), + %% we need some time to avoid flakiness due to the + %% subscription happening while the consumers are still + %% publishing messages... + ct:sleep(500), + + %% 1) cut the connection with kafka. + WorkerRefs = maps:from_list([ + {monitor(process, Pid), Pid} + || {_TopicPartition, Pid} <- + maps:to_list(get_subscriber_workers()) + ]), + NumMsgs = 50, + Messages1 = [ + begin + X = emqx_guid:to_hexstr(emqx_guid:gen()), + #{ + key => X, + value => X + } + end + || _ <- lists:seq(1, NumMsgs) + ], + {ok, C} = emqtt:start_link(), + on_exit(fun() -> emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + {ok, _, [0]} = emqtt:subscribe(C, MQTTTopic), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + wait_downs(WorkerRefs, _Timeout2 = 1_000), + %% 2) publish messages while the consumer is down. + %% we use `pmap' to avoid wolff sending the whole + %% batch to a single partition. + emqx_utils:pmap(fun(Msg) -> publish(Config, [Msg]) end, Messages1), + ok + end), + %% 3) restore and consume messages + {ok, SRef1} = snabbkaffe:subscribe( + ?match_event(#{ + ?snk_kind := kafka_consumer_handle_message, + ?snk_span := {complete, _} + }), + NumMsgs, + _Timeout3 = 60_000 + ), + {ok, _} = snabbkaffe:receive_events(SRef1), + #{num_msgs => NumMsgs, msgs => lists:sort(Messages1)} + end, + fun(#{num_msgs := NumMsgs, msgs := ExpectedMsgs}, Trace) -> + Received0 = wait_for_expected_published_messages(ExpectedMsgs, _Timeout4 = 2_000), + Received1 = + lists:map( + fun(#{payload := #{<<"key">> := K, <<"value">> := V}}) -> + #{key => K, value => V} + end, + Received0 + ), + Received = lists:sort(Received1), + ?assertEqual(ExpectedMsgs, Received), + ?assert(length(?of_kind(kafka_consumer_handle_message, Trace)) > NumMsgs * 2), + ok + end + ), + ok. + +t_bridge_rule_action_source(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + KafkaTopic = ?config(kafka_topic, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + {ok, _} = create_bridge(Config), + ping_until_healthy(Config, _Period = 1_500, _Timeout = 24_000), + + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + + RepublishTopic = <<"republish/", MQTTTopic/binary>>, + {ok, C} = emqtt:start_link([{proto_ver, v5}]), + on_exit(fun() -> emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + {ok, _, [0]} = emqtt:subscribe(C, RepublishTopic), + + UniquePayload = emqx_guid:to_hexstr(emqx_guid:gen()), + {_, {ok, _}} = + ?wait_async_action( + publish(Config, [ + #{ + key => UniquePayload, + value => UniquePayload, + headers => [{<<"hkey">>, <<"hvalue">>}] + } + ]), + #{?snk_kind := action_response}, + 5_000 + ), + + #{republish_topic => RepublishTopic, unique_payload => UniquePayload} + end, + fun(Res, _Trace) -> + #{ + republish_topic := RepublishTopic, + unique_payload := UniquePayload + } = Res, + Published = receive_published(), + ?assertMatch( + [ + #{ + topic := RepublishTopic, + properties := #{'User-Property' := [{<<"hkey">>, <<"hvalue">>}]}, + payload := _Payload, + dup := false, + qos := 0, + retain := false + } + ], + Published + ), + [#{payload := RawPayload}] = Published, + ?assertMatch( + #{ + <<"key">> := UniquePayload, + <<"value">> := UniquePayload, + <<"headers">> := #{<<"hkey">> := <<"hvalue">>}, + <<"topic">> := KafkaTopic + }, + emqx_utils_json:decode(RawPayload, [return_maps]) + ), + ?retry( + _Interval = 200, + _NAttempts = 20, + ?assertEqual(1, emqx_resource_metrics:received_get(ResourceId)) + ), + ok + end + ), + ok. + +%% checks that an existing cluster can be configured with a kafka +%% consumer bridge and that the consumers will distribute over the two +%% nodes. +t_cluster_group(Config) -> + ct:timetrap({seconds, 150}), + NPartitions = ?config(num_partitions, Config), + KafkaTopic = ?config(kafka_topic, Config), + KafkaName = ?config(kafka_name, Config), + ResourceId = resource_id(Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, KafkaName), + Cluster = cluster(Config), + ?check_trace( + begin + Nodes = + [_N1, N2 | _] = [ + emqx_common_test_helpers:start_slave(Name, Opts) + || {Name, Opts} <- Cluster + ], + on_exit(fun() -> + emqx_utils:pmap( + fun(N) -> + ct:pal("stopping ~p", [N]), + ok = emqx_common_test_helpers:stop_slave(N) + end, + Nodes + ) + end), + lists:foreach(fun setup_group_subscriber_spy/1, Nodes), + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := kafka_consumer_subscriber_started}), + length(Nodes), + 15_000 + ), + wait_for_cluster_rpc(N2), + erpc:call(N2, fun() -> {ok, _} = create_bridge(Config) end), + {ok, _} = snabbkaffe:receive_events(SRef0), + lists:foreach( + fun(N) -> + ?assertMatch( + {ok, _}, + erpc:call(N, emqx_bridge, lookup, [BridgeId]), + #{node => N} + ) + end, + Nodes + ), + + %% give kafka some time to rebalance the group; we need to + %% sleep so that the two nodes have time to distribute the + %% subscribers, rather than just one node containing all + %% of them. + {ok, _} = wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, 30_000), + lists:foreach( + fun(N) -> + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + end, + Nodes + ), + + #{nodes => Nodes} + end, + fun(Res, Trace0) -> + #{nodes := Nodes} = Res, + Trace1 = ?of_kind(kafka_assignment, Trace0), + Assignments = reconstruct_assignments_from_events(KafkaTopic, Trace1), + ?assertEqual( + lists:usort(Nodes), + lists:usort([ + N + || {_Partition, {N, _MemberId}} <- + maps:to_list(Assignments) + ]) + ), + ?assertEqual(NPartitions, map_size(Assignments)), + ok + end + ), + ok. + +%% test that the kafka consumer group rebalances correctly if a bridge +%% already exists when a new EMQX node joins the cluster. +t_node_joins_existing_cluster(Config) -> + ct:timetrap({seconds, 150}), + TopicMapping = ?config(topic_mapping, Config), + [MQTTTopic] = [MQTTTopic || #{mqtt_topic := MQTTTopic} <- TopicMapping], + NPartitions = ?config(num_partitions, Config), + KafkaTopic = ?config(kafka_topic, Config), + KafkaName = ?config(kafka_name, Config), + ResourceId = resource_id(Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, KafkaName), + Cluster = cluster(Config), + ?check_trace( + begin + [{Name1, Opts1}, {Name2, Opts2} | _] = Cluster, + ct:pal("starting ~p", [Name1]), + N1 = emqx_common_test_helpers:start_slave(Name1, Opts1), + on_exit(fun() -> + ct:pal("stopping ~p", [N1]), + ok = emqx_common_test_helpers:stop_slave(N1) + end), + setup_group_subscriber_spy(N1), + {{ok, _}, {ok, _}} = + ?wait_async_action( + erpc:call(N1, fun() -> + {ok, _} = create_bridge( + Config, + #{ + <<"kafka">> => + #{ + <<"offset_reset_policy">> => + <<"earliest">> + } + } + ) + end), + #{?snk_kind := kafka_consumer_subscriber_started}, + 15_000 + ), + ?assertMatch({ok, _}, erpc:call(N1, emqx_bridge, lookup, [BridgeId])), + {ok, _} = wait_until_group_is_balanced(KafkaTopic, NPartitions, [N1], 30_000), + ?assertEqual( + {ok, connected}, + erpc:call(N1, emqx_resource_manager, health_check, [ResourceId]) + ), + + %% Now, we start the second node and have it join the cluster. + setup_and_start_listeners(N1, Opts1), + TCPPort1 = emqx_common_test_helpers:listener_port(Opts1, tcp), + {ok, C1} = emqtt:start_link([{port, TCPPort1}, {proto_ver, v5}]), + on_exit(fun() -> catch emqtt:stop(C1) end), + {ok, _} = emqtt:connect(C1), + {ok, _, [2]} = emqtt:subscribe(C1, MQTTTopic, 2), + + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := kafka_consumer_subscriber_started}), + 1, + 30_000 + ), + ct:pal("starting ~p", [Name2]), + N2 = emqx_common_test_helpers:start_slave(Name2, Opts2), + on_exit(fun() -> + ct:pal("stopping ~p", [N2]), + ok = emqx_common_test_helpers:stop_slave(N2) + end), + setup_group_subscriber_spy(N2), + Nodes = [N1, N2], + wait_for_cluster_rpc(N2), + + {ok, _} = snabbkaffe:receive_events(SRef0), + ?retry( + _Sleep1 = 100, + _Attempts1 = 50, + ?assertMatch({ok, _}, erpc:call(N2, emqx_bridge, lookup, [BridgeId])) + ), + + %% Give some time for the consumers in both nodes to + %% rebalance. + {ok, _} = wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, 30_000), + %% Publish some messages so we can check they came from each node. + ?retry( + _Sleep2 = 100, + _Attempts2 = 50, + true = erpc:call(N2, emqx_router, has_routes, [MQTTTopic]) + ), + {ok, SRef1} = + snabbkaffe:subscribe( + ?match_event(#{ + ?snk_kind := kafka_consumer_handle_message, + ?snk_span := {complete, _} + }), + NPartitions, + 20_000 + ), + lists:foreach( + fun(N) -> + Key = <<"k", (integer_to_binary(N))/binary>>, + Val = <<"v", (integer_to_binary(N))/binary>>, + publish(Config, KafkaTopic, [#{key => Key, value => Val}]) + end, + lists:seq(1, NPartitions) + ), + {ok, _} = snabbkaffe:receive_events(SRef1), + + #{nodes => Nodes} + end, + fun(Res, Trace0) -> + #{nodes := Nodes} = Res, + Trace1 = ?of_kind(kafka_assignment, Trace0), + Assignments = reconstruct_assignments_from_events(KafkaTopic, Trace1), + NodeAssignments = lists:usort([ + N + || {_Partition, {N, _MemberId}} <- + maps:to_list(Assignments) + ]), + ?assertEqual(lists:usort(Nodes), NodeAssignments), + ?assertEqual(NPartitions, map_size(Assignments)), + Published = receive_published(#{n => NPartitions, timeout => 3_000}), + ct:pal("published:\n ~p", [Published]), + PublishingNodesFromTrace = + [ + N + || #{ + ?snk_kind := kafka_consumer_handle_message, + ?snk_span := start, + ?snk_meta := #{node := N} + } <- Trace0 + ], + ?assertEqual(lists:usort(Nodes), lists:usort(PublishingNodesFromTrace)), + ok + end + ), + ok. + +%% Checks that the consumers get rebalanced after an EMQX nodes goes +%% down. +t_cluster_node_down(Config) -> + ct:timetrap({seconds, 150}), + TopicMapping = ?config(topic_mapping, Config), + [MQTTTopic] = [MQTTTopic || #{mqtt_topic := MQTTTopic} <- TopicMapping], + NPartitions = ?config(num_partitions, Config), + KafkaTopic = ?config(kafka_topic, Config), + KafkaName = ?config(kafka_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, KafkaName), + Cluster = cluster(Config), + ?check_trace( + begin + {_N2, Opts2} = lists:nth(2, Cluster), + Nodes = + [N1, N2 | _] = + lists:map( + fun({Name, Opts}) -> + ct:pal("starting ~p", [Name]), + emqx_common_test_helpers:start_slave(Name, Opts) + end, + Cluster + ), + on_exit(fun() -> + emqx_utils:pmap( + fun(N) -> + ct:pal("stopping ~p", [N]), + ok = emqx_common_test_helpers:stop_slave(N) + end, + Nodes + ) + end), + lists:foreach(fun setup_group_subscriber_spy/1, Nodes), + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := kafka_consumer_subscriber_started}), + length(Nodes), + 15_000 + ), + wait_for_cluster_rpc(N2), + erpc:call(N2, fun() -> {ok, _} = create_bridge(Config) end), + {ok, _} = snabbkaffe:receive_events(SRef0), + lists:foreach( + fun(N) -> + ?retry( + _Sleep1 = 100, + _Attempts1 = 50, + ?assertMatch( + {ok, _}, + erpc:call(N, emqx_bridge, lookup, [BridgeId]), + #{node => N} + ) + ) + end, + Nodes + ), + {ok, _} = wait_until_group_is_balanced(KafkaTopic, NPartitions, Nodes, 30_000), + + %% Now, we stop one of the nodes and watch the group + %% rebalance. + setup_and_start_listeners(N2, Opts2), + TCPPort = emqx_common_test_helpers:listener_port(Opts2, tcp), + {ok, C} = emqtt:start_link([{port, TCPPort}, {proto_ver, v5}]), + on_exit(fun() -> catch emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + {ok, _, [2]} = emqtt:subscribe(C, MQTTTopic, 2), + {TId, Pid} = start_async_publisher(Config, KafkaTopic), + + ct:pal("stopping node ~p", [N1]), + ok = emqx_common_test_helpers:stop_slave(N1), + + %% Give some time for the consumers in remaining node to + %% rebalance. + {ok, _} = wait_until_group_is_balanced(KafkaTopic, NPartitions, [N2], 60_000), + + ok = stop_async_publisher(Pid), + + #{nodes => Nodes, payloads_tid => TId} + end, + fun(Res, Trace0) -> + #{nodes := Nodes, payloads_tid := TId} = Res, + [_N1, N2 | _] = Nodes, + Trace1 = ?of_kind(kafka_assignment, Trace0), + Assignments = reconstruct_assignments_from_events(KafkaTopic, Trace1), + NodeAssignments = lists:usort([ + N + || {_Partition, {N, _MemberId}} <- + maps:to_list(Assignments) + ]), + %% The surviving node has all the partitions assigned to + %% it. + ?assertEqual([N2], NodeAssignments), + ?assertEqual(NPartitions, map_size(Assignments)), + NumPublished = ets:info(TId, size), + %% All published messages are eventually received. + Published = receive_published(#{n => NumPublished, timeout => 3_000}), + ct:pal("published:\n ~p", [Published]), + ok + end + ), + ok. + +t_begin_offset_earliest(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + {ok, C} = emqtt:start_link([{proto_ver, v5}]), + on_exit(fun() -> emqtt:stop(C) end), + {ok, _} = emqtt:connect(C), + {ok, _, [2]} = emqtt:subscribe(C, MQTTTopic, 2), + + ?check_trace( + begin + %% publish a message before the bridge is started. + NumMessages = 5, + lists:foreach( + fun(N) -> + publish(Config, [ + #{ + key => <<"mykey", (integer_to_binary(N))/binary>>, + value => Payload, + headers => [{<<"hkey">>, <<"hvalue">>}] + } + ]) + end, + lists:seq(1, NumMessages) + ), + + {ok, _} = create_bridge(Config, #{ + <<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>} + }), + + #{num_published => NumMessages} + end, + fun(Res, _Trace) -> + #{num_published := NumMessages} = Res, + %% we should receive messages published before starting + %% the consumers + Published = receive_published(#{n => NumMessages}), + Payloads = lists:map( + fun(#{payload := P}) -> emqx_utils_json:decode(P, [return_maps]) end, + Published + ), + ?assert( + lists:all( + fun(#{<<"value">> := V}) -> V =:= Payload end, + Payloads + ), + #{payloads => Payloads} + ), + ?assertEqual(NumMessages, emqx_resource_metrics:received_get(ResourceId)), + ok + end + ), + ok. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl similarity index 60% rename from lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl rename to apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl index da4c1007a..d1a29fffe 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl @@ -2,16 +2,17 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_bridge_impl_kafka_producer_SUITE). +-module(emqx_bridge_kafka_impl_producer_SUITE). -compile(nowarn_export_all). -compile(export_all). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("brod/include/brod.hrl"). --define(PRODUCER, emqx_bridge_impl_kafka). +-define(PRODUCER, emqx_bridge_kafka_impl_producer). %%------------------------------------------------------------------------------ %% Things for REST API tests @@ -30,23 +31,31 @@ -include_lib("emqx/include/emqx.hrl"). -include("emqx_dashboard.hrl"). --define(CONTENT_TYPE, "application/x-www-form-urlencoded"). - -define(HOST, "http://127.0.0.1:18083"). %% -define(API_VERSION, "v5"). -define(BASE_PATH, "/api/v5"). --define(APP_DASHBOARD, emqx_dashboard). --define(APP_MANAGEMENT, emqx_management). +%% TODO: rename this to `kafka_producer' after alias support is added +%% to hocon; keeping this as just `kafka' for backwards compatibility. +-define(BRIDGE_TYPE, "kafka"). + +-define(APPS, [emqx_resource, emqx_bridge, emqx_rule_engine, emqx_bridge_kafka]). %%------------------------------------------------------------------------------ %% CT boilerplate %%------------------------------------------------------------------------------ all() -> - emqx_common_test_helpers:all(?MODULE). + [ + {group, on_query}, + {group, on_query_async} + ]. + +groups() -> + All = emqx_common_test_helpers:all(?MODULE), + [{on_query, All}, {on_query_async, All}]. wait_until_kafka_is_up() -> wait_until_kafka_is_up(0). @@ -64,8 +73,12 @@ wait_until_kafka_is_up(Attempts) -> end. init_per_suite(Config) -> + %% ensure loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + application:load(emqx_bridge), ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), + ok = emqx_connector_test_helpers:start_apps(?APPS), {ok, _} = application:ensure_all_started(emqx_connector), emqx_mgmt_api_test_util:init_suite(), wait_until_kafka_is_up(), @@ -85,10 +98,23 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), _ = application:stop(emqx_connector), ok. +init_per_group(GroupName, Config) -> + [{query_api, GroupName} | Config]. + +end_per_group(_, _) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + delete_all_bridges(), + ok. + set_special_configs(emqx_management) -> Listeners = #{http => #{port => 8081}}, Config = #{ @@ -106,20 +132,23 @@ set_special_configs(_) -> %% Test cases for all combinations of SSL, no SSL and authentication types %%------------------------------------------------------------------------------ -t_publish_no_auth(_CtConfig) -> - publish_with_and_without_ssl("none"). +t_publish_no_auth(CtConfig) -> + publish_with_and_without_ssl(CtConfig, "none"). -t_publish_sasl_plain(_CtConfig) -> - publish_with_and_without_ssl(valid_sasl_plain_settings()). +t_publish_no_auth_key_dispatch(CtConfig) -> + publish_with_and_without_ssl(CtConfig, "none", #{"partition_strategy" => "key_dispatch"}). -t_publish_sasl_scram256(_CtConfig) -> - publish_with_and_without_ssl(valid_sasl_scram256_settings()). +t_publish_sasl_plain(CtConfig) -> + publish_with_and_without_ssl(CtConfig, valid_sasl_plain_settings()). -t_publish_sasl_scram512(_CtConfig) -> - publish_with_and_without_ssl(valid_sasl_scram512_settings()). +t_publish_sasl_scram256(CtConfig) -> + publish_with_and_without_ssl(CtConfig, valid_sasl_scram256_settings()). -t_publish_sasl_kerberos(_CtConfig) -> - publish_with_and_without_ssl(valid_sasl_kerberos_settings()). +t_publish_sasl_scram512(CtConfig) -> + publish_with_and_without_ssl(CtConfig, valid_sasl_scram512_settings()). + +t_publish_sasl_kerberos(CtConfig) -> + publish_with_and_without_ssl(CtConfig, valid_sasl_kerberos_settings()). %%------------------------------------------------------------------------------ %% Test cases for REST api @@ -206,7 +235,7 @@ kafka_bridge_rest_api_all_auth_methods(UseSSL) -> ok. kafka_bridge_rest_api_helper(Config) -> - BridgeType = "kafka", + BridgeType = ?BRIDGE_TYPE, BridgeName = "my_kafka_bridge", BridgeID = emqx_bridge_resource:bridge_id( erlang:list_to_binary(BridgeType), @@ -217,12 +246,14 @@ kafka_bridge_rest_api_helper(Config) -> erlang:list_to_binary(BridgeName) ), UrlEscColon = "%3A", + BridgesProbeParts = ["bridges_probe"], BridgeIdUrlEnc = BridgeType ++ UrlEscColon ++ BridgeName, BridgesParts = ["bridges"], BridgesPartsIdDeleteAlsoActions = ["bridges", BridgeIdUrlEnc ++ "?also_delete_dep_actions"], - OpUrlFun = fun(OpName) -> ["bridges", BridgeIdUrlEnc, "operation", OpName] end, - BridgesPartsOpDisable = OpUrlFun("disable"), - BridgesPartsOpEnable = OpUrlFun("enable"), + OpUrlFun = fun(OpName) -> ["bridges", BridgeIdUrlEnc, OpName] end, + EnableFun = fun(Enable) -> ["bridges", BridgeIdUrlEnc, "enable", Enable] end, + BridgesPartsOpDisable = EnableFun("false"), + BridgesPartsOpEnable = EnableFun("true"), BridgesPartsOpRestart = OpUrlFun("restart"), BridgesPartsOpStop = OpUrlFun("stop"), %% List bridges @@ -249,17 +280,18 @@ kafka_bridge_rest_api_helper(Config) -> %% Create new Kafka bridge KafkaTopic = "test-topic-one-partition", CreateBodyTmp = #{ - <<"type">> => <<"kafka">>, + <<"type">> => <>, <<"name">> => <<"my_kafka_bridge">>, - <<"bootstrap_hosts">> => maps:get(<<"bootstrap_hosts">>, Config), + <<"bootstrap_hosts">> => iolist_to_binary(maps:get(<<"bootstrap_hosts">>, Config)), <<"enable">> => true, <<"authentication">> => maps:get(<<"authentication">>, Config), - <<"producer">> => #{ - <<"mqtt">> => #{ - topic => <<"t/#">> - }, - <<"kafka">> => #{ - <<"topic">> => erlang:list_to_binary(KafkaTopic) + <<"local_topic">> => <<"t/#">>, + <<"kafka">> => #{ + <<"topic">> => iolist_to_binary(KafkaTopic), + <<"buffer">> => #{<<"memory_overload_protection">> => <<"false">>}, + <<"message">> => #{ + <<"key">> => <<"${clientid}">>, + <<"value">> => <<"${.payload}">> } } }, @@ -271,8 +303,15 @@ kafka_bridge_rest_api_helper(Config) -> {ok, 201, _Data} = show(http_post(BridgesParts, show(CreateBody))), %% Check that the new bridge is in the list of bridges true = MyKafkaBridgeExists(), + %% Probe should work + {ok, 204, _} = http_post(BridgesProbeParts, CreateBody), + %% no extra atoms should be created when probing + AtomsBefore = erlang:system_info(atom_count), + {ok, 204, _} = http_post(BridgesProbeParts, CreateBody), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), %% Create a rule that uses the bridge - {ok, 201, _Rule} = http_post( + {ok, 201, Rule} = http_post( ["rules"], #{ <<"name">> => <<"kafka_bridge_rest_api_helper_rule">>, @@ -281,17 +320,16 @@ kafka_bridge_rest_api_helper(Config) -> <<"sql">> => <<"SELECT * from \"kafka_bridge_topic/#\"">> } ), + #{<<"id">> := RuleId} = emqx_utils_json:decode(Rule, [return_maps]), %% counters should be empty before ?assertEqual(0, emqx_resource_metrics:matched_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:success_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:failed_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:inflight_get(ResourceId)), - ?assertEqual(0, emqx_resource_metrics:batching_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:queuing_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_other_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_queue_full_get(ResourceId)), - ?assertEqual(0, emqx_resource_metrics:dropped_queue_not_enabled_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_resource_not_found_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_resource_stopped_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:retried_get(ResourceId)), @@ -311,30 +349,31 @@ kafka_bridge_rest_api_helper(Config) -> %% Check crucial counters and gauges ?assertEqual(1, emqx_resource_metrics:matched_get(ResourceId)), ?assertEqual(1, emqx_resource_metrics:success_get(ResourceId)), + ?assertEqual(1, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.success')), + ?assertEqual(0, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.failed')), ?assertEqual(0, emqx_resource_metrics:dropped_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:failed_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:inflight_get(ResourceId)), - ?assertEqual(0, emqx_resource_metrics:batching_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:queuing_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_other_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_queue_full_get(ResourceId)), - ?assertEqual(0, emqx_resource_metrics:dropped_queue_not_enabled_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_resource_not_found_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:dropped_resource_stopped_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:retried_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:retried_failed_get(ResourceId)), ?assertEqual(0, emqx_resource_metrics:retried_success_get(ResourceId)), %% Perform operations - {ok, 200, _} = show(http_post(show(BridgesPartsOpDisable), #{})), - {ok, 200, _} = show(http_post(show(BridgesPartsOpDisable), #{})), - {ok, 200, _} = show(http_post(show(BridgesPartsOpEnable), #{})), - {ok, 200, _} = show(http_post(show(BridgesPartsOpEnable), #{})), - {ok, 200, _} = show(http_post(show(BridgesPartsOpStop), #{})), - {ok, 200, _} = show(http_post(show(BridgesPartsOpStop), #{})), - {ok, 200, _} = show(http_post(show(BridgesPartsOpRestart), #{})), + {ok, 204, _} = show(http_put(show(BridgesPartsOpDisable), #{})), + {ok, 204, _} = show(http_put(show(BridgesPartsOpDisable), #{})), + {ok, 204, _} = show(http_put(show(BridgesPartsOpEnable), #{})), + {ok, 204, _} = show(http_put(show(BridgesPartsOpEnable), #{})), + {ok, 204, _} = show(http_post(show(BridgesPartsOpStop), #{})), + {ok, 204, _} = show(http_post(show(BridgesPartsOpStop), #{})), + {ok, 204, _} = show(http_post(show(BridgesPartsOpRestart), #{})), %% Cleanup {ok, 204, _} = show(http_delete(BridgesPartsIdDeleteAlsoActions)), false = MyKafkaBridgeExists(), + delete_all_bridges(), ok. %%------------------------------------------------------------------------------ @@ -346,14 +385,15 @@ kafka_bridge_rest_api_helper(Config) -> %% exists and it will. This is specially bad if the %% original crash was due to misconfiguration and we are %% trying to fix it... -t_failed_creation_then_fix(_Config) -> +t_failed_creation_then_fix(Config) -> HostsString = kafka_hosts_string_sasl(), ValidAuthSettings = valid_sasl_plain_settings(), WrongAuthSettings = ValidAuthSettings#{"password" := "wrong"}, Hash = erlang:phash2([HostsString, ?FUNCTION_NAME]), + Type = ?BRIDGE_TYPE, Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash), - ResourceId = emqx_bridge_resource:resource_id("kafka", Name), - BridgeId = emqx_bridge_resource:bridge_id("kafka", Name), + ResourceId = emqx_bridge_resource:resource_id(Type, Name), + BridgeId = emqx_bridge_resource:bridge_id(Type, Name), KafkaTopic = "test-topic-one-partition", WrongConf = config(#{ "authentication" => WrongAuthSettings, @@ -367,21 +407,32 @@ t_failed_creation_then_fix(_Config) -> "kafka_hosts_string" => HostsString, "kafka_topic" => KafkaTopic, "instance_id" => ResourceId, + "producer" => #{ + "kafka" => #{ + "buffer" => #{ + "memory_overload_protection" => false + } + } + }, "ssl" => #{} }), %% creates, but fails to start producers - %% FIXME: change to kafka_producer after config refactoring - ?assertMatch(ok, emqx_bridge_resource:create(kafka, erlang:list_to_atom(Name), WrongConf, #{})), - ?assertThrow(failed_to_start_kafka_producer, ?PRODUCER:on_start(ResourceId, WrongConf)), - %% before throwing, it should cleanup the client process. - ?assertEqual([], supervisor:which_children(wolff_client_sup)), - %% FIXME: change to kafka_producer after config refactoring + {ok, #{config := WrongConfigAtom1}} = emqx_bridge:create( + Type, erlang:list_to_atom(Name), WrongConf + ), + WrongConfigAtom = WrongConfigAtom1#{bridge_name => Name}, + ?assertThrow(Reason when is_list(Reason), ?PRODUCER:on_start(ResourceId, WrongConfigAtom)), + %% before throwing, it should cleanup the client process. we + %% retry because the supervisor might need some time to really + %% remove it from its tree. + ?retry(50, 10, ?assertEqual([], supervisor:which_children(wolff_client_sup))), %% must succeed with correct config - ?assertMatch(ok, emqx_bridge_resource:create(kafka, erlang:list_to_atom(Name), ValidConf, #{})), - {ok, State} = ?PRODUCER:on_start(ResourceId, ValidConf), - %% To make sure we get unique value - timer:sleep(1), - Time = erlang:monotonic_time(), + {ok, #{config := ValidConfigAtom1}} = emqx_bridge:create( + Type, erlang:list_to_atom(Name), ValidConf + ), + ValidConfigAtom = ValidConfigAtom1#{bridge_name => Name}, + {ok, State} = ?PRODUCER:on_start(ResourceId, ValidConfigAtom), + Time = erlang:unique_integer(), BinTime = integer_to_binary(Time), Msg = #{ clientid => BinTime, @@ -390,33 +441,78 @@ t_failed_creation_then_fix(_Config) -> }, {ok, Offset} = resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0), ct:pal("base offset before testing ~p", [Offset]), - ?assertEqual({async_return, ok}, ?PRODUCER:on_query(ResourceId, {send_message, Msg}, State)), + ok = send(Config, ResourceId, Msg, State), {ok, {_, [KafkaMsg]}} = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset), ?assertMatch(#kafka_message{key = BinTime}, KafkaMsg), %% TODO: refactor those into init/end per testcase ok = ?PRODUCER:on_stop(ResourceId, State), ok = emqx_bridge_resource:remove(BridgeId), + delete_all_bridges(), ok. %%------------------------------------------------------------------------------ %% Helper functions %%------------------------------------------------------------------------------ -publish_with_and_without_ssl(AuthSettings) -> - publish_helper(#{ - auth_settings => AuthSettings, - ssl_settings => #{} - }), - publish_helper(#{ - auth_settings => AuthSettings, - ssl_settings => valid_ssl_settings() - }), +send(Config, ResourceId, Msg, State) when is_list(Config) -> + Ref = make_ref(), + ok = do_send(Ref, Config, ResourceId, Msg, State), + receive + {ack, Ref} -> + ok + after 10000 -> + error(timeout) + end. + +do_send(Ref, Config, ResourceId, Msg, State) when is_list(Config) -> + Caller = self(), + F = fun(ok) -> + Caller ! {ack, Ref}, + ok + end, + case proplists:get_value(query_api, Config) of + on_query -> + ok = ?PRODUCER:on_query(ResourceId, {send_message, Msg}, State), + F(ok); + on_query_async -> + {ok, _} = ?PRODUCER:on_query_async(ResourceId, {send_message, Msg}, {F, []}, State), + ok + end. + +publish_with_and_without_ssl(CtConfig, AuthSettings) -> + publish_with_and_without_ssl(CtConfig, AuthSettings, #{}). + +publish_with_and_without_ssl(CtConfig, AuthSettings, Config) -> + publish_helper( + CtConfig, + #{ + auth_settings => AuthSettings, + ssl_settings => #{} + }, + Config + ), + publish_helper( + CtConfig, + #{ + auth_settings => AuthSettings, + ssl_settings => valid_ssl_settings() + }, + Config + ), ok. -publish_helper(#{ - auth_settings := AuthSettings, - ssl_settings := SSLSettings -}) -> +publish_helper(CtConfig, AuthSettings) -> + publish_helper(CtConfig, AuthSettings, #{}). + +publish_helper( + CtConfig, + #{ + auth_settings := AuthSettings, + ssl_settings := SSLSettings + }, + Conf0 +) -> + delete_all_bridges(), HostsString = case {AuthSettings, SSLSettings} of {"none", Map} when map_size(Map) =:= 0 -> @@ -430,52 +526,77 @@ publish_helper(#{ end, Hash = erlang:phash2([HostsString, AuthSettings, SSLSettings]), Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash), - InstId = emqx_bridge_resource:resource_id("kafka", Name), - BridgeId = emqx_bridge_resource:bridge_id("kafka", Name), + Type = ?BRIDGE_TYPE, + InstId = emqx_bridge_resource:resource_id(Type, Name), KafkaTopic = "test-topic-one-partition", - Conf = config(#{ - "authentication" => AuthSettings, - "kafka_hosts_string" => HostsString, - "kafka_topic" => KafkaTopic, - "instance_id" => InstId, - "ssl" => SSLSettings - }), - emqx_bridge_resource:create(kafka, erlang:list_to_atom(Name), Conf, #{}), - %% To make sure we get unique value - timer:sleep(1), - Time = erlang:monotonic_time(), + Conf = config( + #{ + "authentication" => AuthSettings, + "kafka_hosts_string" => HostsString, + "kafka_topic" => KafkaTopic, + "instance_id" => InstId, + "local_topic" => <<"mqtt/local">>, + "ssl" => SSLSettings + }, + Conf0 + ), + {ok, _} = emqx_bridge:create( + <>, list_to_binary(Name), Conf + ), + Time = erlang:unique_integer(), BinTime = integer_to_binary(Time), + Partition = 0, Msg = #{ clientid => BinTime, payload => <<"payload">>, timestamp => Time }, - {ok, Offset} = resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0), - ct:pal("base offset before testing ~p", [Offset]), - StartRes = ?PRODUCER:on_start(InstId, Conf), - {ok, State} = StartRes, - OnQueryRes = ?PRODUCER:on_query(InstId, {send_message, Msg}, State), - {async_return, ok} = OnQueryRes, - {ok, {_, [KafkaMsg]}} = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset), - ?assertMatch(#kafka_message{key = BinTime}, KafkaMsg), - ok = ?PRODUCER:on_stop(InstId, State), - ok = emqx_bridge_resource:remove(BridgeId), + {ok, Offset0} = resolve_kafka_offset(kafka_hosts(), KafkaTopic, Partition), + ct:pal("base offset before testing ~p", [Offset0]), + {ok, _Group, #{state := State}} = emqx_resource:get_instance(InstId), + ok = send(CtConfig, InstId, Msg, State), + {ok, {_, [KafkaMsg0]}} = brod:fetch(kafka_hosts(), KafkaTopic, Partition, Offset0), + ?assertMatch(#kafka_message{key = BinTime}, KafkaMsg0), + + %% test that it forwards from local mqtt topic as well + {ok, Offset1} = resolve_kafka_offset(kafka_hosts(), KafkaTopic, Partition), + ct:pal("base offset before testing (2) ~p", [Offset1]), + emqx:publish(emqx_message:make(<<"mqtt/local">>, <<"payload">>)), + ct:sleep(2_000), + {ok, {_, [KafkaMsg1]}} = brod:fetch(kafka_hosts(), KafkaTopic, Partition, Offset1), + ?assertMatch(#kafka_message{value = <<"payload">>}, KafkaMsg1), + + delete_all_bridges(), ok. +default_config() -> + #{"partition_strategy" => "random"}. + config(Args) -> + config(Args, #{}). + +config(Args0, More) -> + Args1 = maps:merge(default_config(), Args0), + Args = maps:merge(Args1, More), ConfText = hocon_config(Args), - ct:pal("Running tests with conf:\n~s", [ConfText]), - {ok, Conf} = hocon:binary(ConfText), - #{config := Parsed} = hocon_tconf:check_plain( - emqx_ee_bridge_kafka, - #{<<"config">> => Conf}, - #{atom_key => true} - ), + {ok, Conf} = hocon:binary(ConfText, #{format => map}), + ct:pal("Running tests with conf:\n~p", [Conf]), InstId = maps:get("instance_id", Args), <<"bridge:", BridgeId/binary>> = InstId, - Parsed#{bridge_name => erlang:element(2, emqx_bridge_resource:parse_bridge_id(BridgeId))}. + {Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId, #{atom_name => false}), + TypeBin = atom_to_binary(Type), + hocon_tconf:check_plain( + emqx_bridge_schema, + Conf, + #{atom_key => false, required => false} + ), + #{<<"bridges">> := #{TypeBin := #{Name := Parsed}}} = Conf, + Parsed. hocon_config(Args) -> + InstId = maps:get("instance_id", Args), + <<"bridge:", BridgeId/binary>> = InstId, + {_Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId, #{atom_name => false}), AuthConf = maps:get("authentication", Args), AuthTemplate = iolist_to_binary(hocon_config_template_authentication(AuthConf)), AuthConfRendered = bbmustache:render(AuthTemplate, AuthConf), @@ -486,6 +607,7 @@ hocon_config(Args) -> iolist_to_binary(hocon_config_template()), Args#{ "authentication" => AuthConfRendered, + "bridge_name" => Name, "ssl" => SSLConfRendered } ), @@ -493,18 +615,34 @@ hocon_config(Args) -> %% erlfmt-ignore hocon_config_template() -> +%% TODO: rename the type to `kafka_producer' after alias support is +%% added to hocon; keeping this as just `kafka' for backwards +%% compatibility. """ -bootstrap_hosts = \"{{ kafka_hosts_string }}\" -enable = true -authentication = {{{ authentication }}} -ssl = {{{ ssl }}} -producer = { - mqtt { - topic = \"t/#\" +bridges.kafka.{{ bridge_name }} { + bootstrap_hosts = \"{{ kafka_hosts_string }}\" + enable = true + authentication = {{{ authentication }}} + ssl = {{{ ssl }}} + local_topic = \"{{ local_topic }}\" + kafka = { + message = { + key = \"${clientid}\" + value = \"${.payload}\" + timestamp = \"${timestamp}\" } - kafka = { - topic = \"{{ kafka_topic }}\" + buffer = { + memory_overload_protection = false } + partition_strategy = {{ partition_strategy }} + topic = \"{{ kafka_topic }}\" + } + metadata_request_timeout = 5s + min_metadata_refresh_interval = 3s + socket_opts { + nodelay = true + } + connect_timeout = 5s } """. @@ -545,22 +683,42 @@ hocon_config_template_ssl(_) -> """. kafka_hosts_string() -> - "kafka-1.emqx.net:9092,". + KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"), + KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"), + KafkaHost ++ ":" ++ KafkaPort ++ ",". kafka_hosts_string_sasl() -> - "kafka-1.emqx.net:9093,". + KafkaHost = os:getenv("KAFKA_SASL_PLAIN_HOST", "kafka-1.emqx.net"), + KafkaPort = os:getenv("KAFKA_SASL_PLAIN_PORT", "9093"), + KafkaHost ++ ":" ++ KafkaPort ++ ",". kafka_hosts_string_ssl() -> - "kafka-1.emqx.net:9094,". + KafkaHost = os:getenv("KAFKA_SSL_HOST", "kafka-1.emqx.net"), + KafkaPort = os:getenv("KAFKA_SSL_PORT", "9094"), + KafkaHost ++ ":" ++ KafkaPort ++ ",". kafka_hosts_string_ssl_sasl() -> - "kafka-1.emqx.net:9095,". + KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "kafka-1.emqx.net"), + KafkaPort = os:getenv("KAFKA_SASL_SSL_PORT", "9095"), + KafkaHost ++ ":" ++ KafkaPort ++ ",". + +shared_secret_path() -> + os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). + +shared_secret(client_keyfile) -> + filename:join([shared_secret_path(), "client.key"]); +shared_secret(client_certfile) -> + filename:join([shared_secret_path(), "client.crt"]); +shared_secret(client_cacertfile) -> + filename:join([shared_secret_path(), "ca.crt"]); +shared_secret(rig_keytab) -> + filename:join([shared_secret_path(), "rig.keytab"]). valid_ssl_settings() -> #{ - "cacertfile" => <<"/var/lib/secret/ca.crt">>, - "certfile" => <<"/var/lib/secret/client.crt">>, - "keyfile" => <<"/var/lib/secret/client.key">>, + "cacertfile" => shared_secret(client_cacertfile), + "certfile" => shared_secret(client_certfile), + "keyfile" => shared_secret(client_keyfile), "enable" => <<"true">> }. @@ -584,7 +742,7 @@ valid_sasl_scram512_settings() -> valid_sasl_kerberos_settings() -> #{ "kerberos_principal" => "rig@KDC.EMQX.NET", - "kerberos_keytab_file" => "/var/lib/secret/rig.keytab" + "kerberos_keytab_file" => shared_secret(rig_keytab) }. kafka_hosts() -> @@ -644,5 +802,19 @@ api_path(Parts) -> ?HOST ++ filename:join([?BASE_PATH | Parts]). json(Data) -> - {ok, Jsx} = emqx_json:safe_decode(Data, [return_maps]), + {ok, Jsx} = emqx_utils_json:safe_decode(Data, [return_maps]), Jsx. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ), + %% at some point during the tests, sometimes `emqx_bridge:list()' + %% returns an empty list, but `emqx:get_config([bridges])' returns + %% a bunch of orphan test bridges... + lists:foreach(fun emqx_resource:remove/1, emqx_resource:list_instances()), + emqx_config:put([bridges], #{}), + ok. diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl new file mode 100644 index 000000000..b16df854f --- /dev/null +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl @@ -0,0 +1,287 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kafka_tests). + +-include_lib("eunit/include/eunit.hrl"). + +%%=========================================================================== +%% Test cases +%%=========================================================================== + +kafka_producer_test() -> + Conf1 = parse(kafka_producer_old_hocon(_WithLocalTopic0 = false)), + Conf2 = parse(kafka_producer_old_hocon(_WithLocalTopic1 = true)), + Conf3 = parse(kafka_producer_new_hocon()), + + ?assertMatch( + #{ + <<"bridges">> := + #{ + <<"kafka">> := + #{ + <<"myproducer">> := + #{<<"kafka">> := #{}} + } + } + }, + check(Conf1) + ), + ?assertNotMatch( + #{ + <<"bridges">> := + #{ + <<"kafka">> := + #{ + <<"myproducer">> := + #{<<"local_topic">> := _} + } + } + }, + check(Conf1) + ), + ?assertMatch( + #{ + <<"bridges">> := + #{ + <<"kafka">> := + #{ + <<"myproducer">> := + #{ + <<"kafka">> := #{}, + <<"local_topic">> := <<"mqtt/local">> + } + } + } + }, + check(Conf2) + ), + ?assertMatch( + #{ + <<"bridges">> := + #{ + <<"kafka">> := + #{ + <<"myproducer">> := + #{ + <<"kafka">> := #{}, + <<"local_topic">> := <<"mqtt/local">> + } + } + } + }, + check(Conf3) + ), + + ok. + +kafka_consumer_test() -> + Conf1 = parse(kafka_consumer_hocon()), + ?assertMatch( + #{ + <<"bridges">> := + #{ + <<"kafka_consumer">> := + #{ + <<"my_consumer">> := _ + } + } + }, + check(Conf1) + ), + + %% Bad: can't repeat kafka topics. + BadConf1 = emqx_utils_maps:deep_put( + [<<"bridges">>, <<"kafka_consumer">>, <<"my_consumer">>, <<"topic_mapping">>], + Conf1, + [ + #{ + <<"kafka_topic">> => <<"t1">>, + <<"mqtt_topic">> => <<"mqtt/t1">>, + <<"qos">> => 1, + <<"payload_template">> => <<"${.}">> + }, + #{ + <<"kafka_topic">> => <<"t1">>, + <<"mqtt_topic">> => <<"mqtt/t2">>, + <<"qos">> => 2, + <<"payload_template">> => <<"v = ${.value}">> + } + ] + ), + ?assertThrow( + {_, [ + #{ + path := "bridges.kafka_consumer.my_consumer.topic_mapping", + reason := "Kafka topics must not be repeated in a bridge" + } + ]}, + check(BadConf1) + ), + + %% Bad: there must be at least 1 mapping. + BadConf2 = emqx_utils_maps:deep_put( + [<<"bridges">>, <<"kafka_consumer">>, <<"my_consumer">>, <<"topic_mapping">>], + Conf1, + [] + ), + ?assertThrow( + {_, [ + #{ + path := "bridges.kafka_consumer.my_consumer.topic_mapping", + reason := "There must be at least one Kafka-MQTT topic mapping" + } + ]}, + check(BadConf2) + ), + + ok. + +%%=========================================================================== +%% Helper functions +%%=========================================================================== + +parse(Hocon) -> + {ok, Conf} = hocon:binary(Hocon), + Conf. + +check(Conf) when is_map(Conf) -> + hocon_tconf:check_plain(emqx_bridge_schema, Conf). + +%%=========================================================================== +%% Data section +%%=========================================================================== + +%% erlfmt-ignore +kafka_producer_old_hocon(_WithLocalTopic = true) -> + kafka_producer_old_hocon("mqtt {topic = \"mqtt/local\"}\n"); +kafka_producer_old_hocon(_WithLocalTopic = false) -> + kafka_producer_old_hocon("mqtt {}\n"); +kafka_producer_old_hocon(MQTTConfig) when is_list(MQTTConfig) -> +""" +bridges.kafka { + myproducer { + authentication = \"none\" + bootstrap_hosts = \"toxiproxy:9292\" + connect_timeout = \"5s\" + metadata_request_timeout = \"5s\" + min_metadata_refresh_interval = \"3s\" + producer { + kafka { + buffer { + memory_overload_protection = false + mode = \"memory\" + per_partition_limit = \"2GB\" + segment_bytes = \"100MB\" + } + compression = \"no_compression\" + max_batch_bytes = \"896KB\" + max_inflight = 10 + message { + key = \"${.clientid}\" + timestamp = \"${.timestamp}\" + value = \"${.}\" + } + partition_count_refresh_interval = \"60s\" + partition_strategy = \"random\" + required_acks = \"all_isr\" + topic = \"test-topic-two-partitions\" + } +""" ++ MQTTConfig ++ +""" + } + socket_opts { + nodelay = true + recbuf = \"1024KB\" + sndbuf = \"1024KB\" + } + ssl {enable = false, verify = \"verify_peer\"} + } +} +""". + +kafka_producer_new_hocon() -> + "" + "\n" + "bridges.kafka {\n" + " myproducer {\n" + " authentication = \"none\"\n" + " bootstrap_hosts = \"toxiproxy:9292\"\n" + " connect_timeout = \"5s\"\n" + " metadata_request_timeout = \"5s\"\n" + " min_metadata_refresh_interval = \"3s\"\n" + " kafka {\n" + " buffer {\n" + " memory_overload_protection = false\n" + " mode = \"memory\"\n" + " per_partition_limit = \"2GB\"\n" + " segment_bytes = \"100MB\"\n" + " }\n" + " compression = \"no_compression\"\n" + " max_batch_bytes = \"896KB\"\n" + " max_inflight = 10\n" + " message {\n" + " key = \"${.clientid}\"\n" + " timestamp = \"${.timestamp}\"\n" + " value = \"${.}\"\n" + " }\n" + " partition_count_refresh_interval = \"60s\"\n" + " partition_strategy = \"random\"\n" + " required_acks = \"all_isr\"\n" + " topic = \"test-topic-two-partitions\"\n" + " }\n" + " local_topic = \"mqtt/local\"\n" + " socket_opts {\n" + " nodelay = true\n" + " recbuf = \"1024KB\"\n" + " sndbuf = \"1024KB\"\n" + " }\n" + " ssl {enable = false, verify = \"verify_peer\"}\n" + " }\n" + "}\n" + "". + +%% erlfmt-ignore +kafka_consumer_hocon() -> +""" +bridges.kafka_consumer.my_consumer { + enable = true + bootstrap_hosts = \"kafka-1.emqx.net:9292\" + connect_timeout = 5s + min_metadata_refresh_interval = 3s + metadata_request_timeout = 5s + authentication = { + mechanism = plain + username = emqxuser + password = password + } + kafka { + max_batch_bytes = 896KB + max_rejoin_attempts = 5 + offset_commit_interval_seconds = 3 + offset_reset_policy = latest + } + topic_mapping = [ + { + kafka_topic = \"kafka-topic-1\" + mqtt_topic = \"mqtt/topic/1\" + qos = 1 + payload_template = \"${.}\" + }, + { + kafka_topic = \"kafka-topic-2\" + mqtt_topic = \"mqtt/topic/2\" + qos = 2 + payload_template = \"v = ${.value}\" + } + ] + key_encoding_mode = none + value_encoding_mode = none + ssl { + enable = false + verify = verify_none + server_name_indication = \"auto\" + } +} +""". diff --git a/apps/emqx_bridge_matrix/BSL.txt b/apps/emqx_bridge_matrix/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_matrix/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md new file mode 100644 index 000000000..0d9c4fc4a --- /dev/null +++ b/apps/emqx_bridge_matrix/README.md @@ -0,0 +1,37 @@ +# EMQX MatrixDB Bridge + +[YMatrix](https://www.ymatrix.cn/) is a hyper-converged database product developed by YMatrix based on the PostgreSQL / Greenplum classic open source database. In addition to being able to handle time series scenarios with ease, it also supports classic scenarios such as online transaction processing (OLTP) and online analytical processing (OLAP). + +The application is used to connect EMQX and MatrixDB. +User can create a rule and easily ingest IoT data into MatrixDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_matrix/rebar.config b/apps/emqx_bridge_matrix/rebar.config new file mode 100644 index 000000000..87c145f26 --- /dev/null +++ b/apps/emqx_bridge_matrix/rebar.config @@ -0,0 +1,7 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src new file mode 100644 index 000000000..7dfe7eae6 --- /dev/null +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_matrix, [ + {description, "EMQX Enterprise MatrixDB Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl new file mode 100644 index 000000000..abd98adb6 --- /dev/null +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl @@ -0,0 +1,42 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_matrix). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"matrix">> => #{ + summary => <<"Matrix Bridge">>, + value => emqx_bridge_pgsql:values(Method, matrix) + } + } + ]. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_matrix". + +roots() -> []. + +fields("post") -> + emqx_bridge_pgsql:fields("post", matrix); +fields(Method) -> + emqx_bridge_pgsql:fields(Method). + +desc(_) -> + undefined. diff --git a/apps/emqx_bridge_mongodb/BSL.txt b/apps/emqx_bridge_mongodb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_mongodb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_mongodb/README.md b/apps/emqx_bridge_mongodb/README.md new file mode 100644 index 000000000..088c8467f --- /dev/null +++ b/apps/emqx_bridge_mongodb/README.md @@ -0,0 +1,39 @@ +# EMQX MongoDB Bridge + +[MongoDB](https://github.com/mongodb/mongo) is a source-available cross-platform +document-oriented database. It is a NoSQL database that stores flexible JSON-like +documents for faster iteration and better data organization. +It provides high availability and scaling with its built-in replication and sharding +features, and is used in a variety of industries + +The application is used to connect EMQX and MongoDB. +User can create a rule and easily ingest IoT data into MongoDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into MongoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mongodb.html) + for how to use EMQX dashboard to ingest IoT data into MongoDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src new file mode 100644 index 000000000..008a9e164 --- /dev/null +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_mongodb, [ + {description, "EMQX Enterprise MongoDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_mysql/BSL.txt b/apps/emqx_bridge_mysql/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_mysql/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_mysql/README.md b/apps/emqx_bridge_mysql/README.md new file mode 100644 index 000000000..d7c9b5647 --- /dev/null +++ b/apps/emqx_bridge_mysql/README.md @@ -0,0 +1,36 @@ +# EMQX MySQL Bridge + +[MySQL](https://github.com/mysql/mysql-server) is a popular open-source relational database +management system. + +The application is used to connect EMQX and MySQL. +User can create a rule and easily ingest IoT data into MySQL by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into MySQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mysql.html) + for how to use EMQX dashboard to ingest IoT data into MySQL. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src new file mode 100644 index 000000000..2e36587a7 --- /dev/null +++ b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_mysql, [ + {description, "EMQX Enterprise MySQL Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_opents/BSL.txt b/apps/emqx_bridge_opents/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_opents/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_opents/README.md b/apps/emqx_bridge_opents/README.md new file mode 100644 index 000000000..a1d6511ee --- /dev/null +++ b/apps/emqx_bridge_opents/README.md @@ -0,0 +1,36 @@ +# EMQX OpenTSDB Bridge + +[OpenTSDB](http://opentsdb.net) is a distributed, scalable Time Series Database (TSDB) written on top of HBase. + +OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable. + +OpenTSDB allows you to collect thousands of metrics from tens of thousands of hosts and applications, at a high rate (every few seconds). + +OpenTSDB will never delete or downsample data and can easily store hundreds of billions of data points. + +The application is used to connect EMQX and OpenTSDB. User can create a rule and easily ingest IoT data into OpenTSDB by leveraging the +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_opents/docker-ct b/apps/emqx_bridge_opents/docker-ct new file mode 100644 index 000000000..fc68b978e --- /dev/null +++ b/apps/emqx_bridge_opents/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +opents diff --git a/apps/emqx_bridge_opents/rebar.config b/apps/emqx_bridge_opents/rebar.config new file mode 100644 index 000000000..d7bd4560f --- /dev/null +++ b/apps/emqx_bridge_opents/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {opentsdb, {git, "https://github.com/emqx/opentsdb-client-erl", {tag, "v0.5.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src new file mode 100644 index 000000000..d001446b3 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_opents, [ + {description, "EMQX Enterprise OpenTSDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + opentsdb + ]}, + {env, []}, + {modules, []}, + + {licenses, ["BSL"]}, + {links, []} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl new file mode 100644 index 000000000..2eb6a554f --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl @@ -0,0 +1,85 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_opents). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api +conn_bridge_examples(Method) -> + [ + #{ + <<"opents">> => #{ + summary => <<"OpenTSDB Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => opents, + name => <<"foo">>, + server => <<"http://127.0.0.1:4242">>, + pool_size => 8, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_opents". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + emqx_bridge_opents_connector:fields(config); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal + +type_field() -> + {type, mk(enum([opents]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl new file mode 100644 index 000000000..0366c9dc2 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl @@ -0,0 +1,184 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([connect/1]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, mk(binary(), #{required => true, desc => ?DESC("server")})}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})}, + {details, mk(boolean(), #{default => false, desc => ?DESC("details")})}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + server := Server, + pool_size := PoolSize, + summary := Summary, + details := Details, + resource_opts := #{batch_size := BatchSize} + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_opents_connector", + connector => InstanceId, + config => emqx_utils:redact(Config) + }), + + Options = [ + {server, to_str(Server)}, + {summary, Summary}, + {details, Details}, + {max_batch_size, BatchSize}, + {pool_size, PoolSize} + ], + + State = #{pool_name => InstanceId, server => Server}, + case opentsdb_connectivity(Server) of + ok -> + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end; + {error, Reason} = Error -> + ?SLOG(error, #{msg => "Initiate resource failed", reason => Reason}), + Error + end. + +on_stop(InstanceId, #{pool_name := PoolName} = _State) -> + ?SLOG(info, #{ + msg => "stopping_opents_connector", + connector => InstanceId + }), + emqx_resource_pool:stop(PoolName). + +on_query(InstanceId, Request, State) -> + on_batch_query(InstanceId, [Request], State). + +on_batch_query( + InstanceId, + BatchReq, + State +) -> + Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq], + do_query(InstanceId, Datas, State). + +on_get_status(_InstanceId, #{server := Server}) -> + Result = + case opentsdb_connectivity(Server) of + ok -> + connected; + {error, Reason} -> + ?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), + connecting + end, + Result. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query(InstanceId, Query, #{pool_name := PoolName} = State) -> + ?TRACE( + "QUERY", + "opents_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover), + + case Result of + {error, Reason} -> + ?tp( + opents_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "opents_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + opents_connector_query_return, + #{result => Result} + ), + Result + end. + +connect(Opts) -> + opentsdb:start_link(Opts). + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). + +opentsdb_connectivity(Server) -> + SvrUrl = + case Server of + <<"http://", _/binary>> -> Server; + <<"https://", _/binary>> -> Server; + _ -> "http://" ++ Server + end, + emqx_plugin_libs_rule:http_connectivity(SvrUrl). + +format_opentsdb_msg(Msg) -> + maps:with( + [ + timestamp, + metric, + tags, + value, + <<"timestamp">>, + <<"metric">>, + <<"tags">>, + <<"value">> + ], + Msg + ). diff --git a/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl new file mode 100644 index 000000000..6f444b93e --- /dev/null +++ b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl @@ -0,0 +1,363 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% DB defaults +-define(BATCH_SIZE, 10). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, TCs}, + {without_batch, TCs} + ]. + +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("OPENTS_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")), + + Config0 = [ + {opents_host, Host}, + {opents_port, Port}, + {proxy_name, "opents"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + {Name, OpenTSConf} = opents_config(BridgeType, Config0), + Config = + [ + {opents_config, OpenTSConf}, + {opents_bridge_type, BridgeType}, + {opents_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_opents); + _ -> + {skip, no_opents} + end + end. + +opents_config(BridgeType, Config) -> + Port = integer_to_list(?config(opents_port, Config)), + Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?config(batch_size, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = sync\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + BatchSize + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + Config0 = ?config(opents_config, Config), + Config1 = emqx_utils_maps:deep_merge(Config0, Overrides), + emqx_bridge:create(BridgeType, Name, Config1). + +delete_bridge(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + query_resource(Config, Request, 1_000). + +query_resource(Config, Request, Timeout) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => Timeout}). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + SentData = make_data(), + ?check_trace( + begin + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Result + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + OpentsConfig0 = ?config(opents_config, Config), + OpentsConfig = OpentsConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(OpentsConfig) + ), + SentData = make_data(), + ?check_trace( + begin + Request = {send_message, SentData}, + Res0 = query_resource(Config, Request, 2_500), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Res0 + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_create_disconnected(Config) -> + BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>), + Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}), + {_Name, OpenTSConf} = opents_config(BridgeType, Config1), + + Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}), + ?assertMatch({ok, _}, create_bridge(Config2)), + + Name = ?config(opents_name, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = make_data(), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({error, _}, Result), + ok + end), + ok. + +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ), + SentData = make_data(), + emqx_common_test_helpers:with_failure( + timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + query_resource(Config, {send_message, SentData}) + ) + end + ), + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, + Result + ), + ok. + +t_bad_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Data = maps:without([metric], make_data()), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, Data), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, Result + ), + ok. + +make_data() -> + make_data(<<"cpu">>, 12). + +make_data(Metric, Value) -> + #{ + metric => Metric, + tags => #{ + <<"host">> => <<"serverA">> + }, + value => Value + }. diff --git a/apps/emqx_bridge_oracle/BSL.txt b/apps/emqx_bridge_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_oracle/README.md b/apps/emqx_bridge_oracle/README.md new file mode 100644 index 000000000..d2974b722 --- /dev/null +++ b/apps/emqx_bridge_oracle/README.md @@ -0,0 +1,28 @@ +# EMQX Oracle Database Bridge + +This application houses the Oracle Database bridge for EMQX Enterprise Edition. +It implements the data bridge APIs for interacting with an Oracle Database Bridge. + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_oracle/docker-ct b/apps/emqx_bridge_oracle/docker-ct new file mode 100644 index 000000000..c24dc4bc9 --- /dev/null +++ b/apps/emqx_bridge_oracle/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +oracle diff --git a/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf b/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_oracle/rebar.config b/apps/emqx_bridge_oracle/rebar.config new file mode 100644 index 000000000..c238546c4 --- /dev/null +++ b/apps/emqx_bridge_oracle/rebar.config @@ -0,0 +1,13 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {emqx_oracle, {path, "../../apps/emqx_oracle"}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_oracle]} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src new file mode 100644 index 000000000..4f81c2110 --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_oracle, [ + {description, "EMQX Enterprise Oracle Database Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl new file mode 100644 index 000000000..8a87f02ba --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl @@ -0,0 +1,109 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload)" + "values (${id}, ${topic}, ${qos}, ${payload})" +>>). + +conn_bridge_examples(Method) -> + [ + #{ + <<"oracle">> => #{ + summary => <<"Oracle Database Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => oracle, + name => <<"foo">>, + server => <<"127.0.0.1:1521">>, + pool_size => 8, + database => <<"ORCL">>, + sid => <<"ORCL">>, + username => <<"root">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "bridge_oracle". + +roots() -> []. + +fields("config") -> + [ + {enable, + hoconsc:mk( + boolean(), + #{desc => ?DESC("config_enable"), default => true} + )}, + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + hoconsc:mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_oracle_schema:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + fields("post", oracle); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl new file mode 100644 index 000000000..b50788277 --- /dev/null +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -0,0 +1,514 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"oracle">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]). +-define(DATABASE, "XE"). +-define(RULE_TOPIC, "mqtt/rule"). +% -define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {plain, AllTCs} + ]. + +only_once_tests() -> + [t_create_via_http]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + OracleHost = os:getenv("ORACLE_PLAIN_HOST", "toxiproxy.emqx.net"), + OraclePort = list_to_integer(os:getenv("ORACLE_PLAIN_PORT", "1521")), + ProxyName = "oracle", + case emqx_common_test_helpers:is_tcp_server_available(OracleHost, OraclePort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {oracle_host, OracleHost}, + {oracle_port, OraclePort}, + {connection_type, Type} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_oracle); + _ -> + {skip, no_oracle} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + common_end_per_testcase(_Testcase, Config). + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + ConnectionType = ?config(connection_type, Config0), + Config = [{oracle_topic, OracleTopic} | Config0], + {Name, ConfigString, OracleConfig} = oracle_config( + TestCase, ConnectionType, Config + ), + ok = snabbkaffe:start_trace(), + [ + {oracle_name, Name}, + {oracle_config_string, ConfigString}, + {oracle_config, OracleConfig} + | Config + ]. + +common_end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ +sql_insert_template_for_bridge() -> + "INSERT INTO mqtt_test(topic, msgid, payload, retain) VALUES (${topic}, ${id}, ${payload}, ${retain})". + +sql_create_table() -> + "CREATE TABLE mqtt_test (topic VARCHAR2(255), msgid VARCHAR2(64), payload NCLOB, retain NUMBER(1))". + +sql_drop_table() -> + "DROP TABLE mqtt_test". + +reset_table(Config) -> + ResourceId = resource_id(Config), + _ = emqx_resource:simple_sync_query(ResourceId, {sql, sql_drop_table()}), + {ok, [{proc_result, 0, _}]} = emqx_resource:simple_sync_query( + ResourceId, {sql, sql_create_table()} + ), + ok. + +drop_table(Config) -> + ResourceId = resource_id(Config), + emqx_resource:simple_sync_query(ResourceId, {query, sql_drop_table()}), + ok. + +oracle_config(TestCase, _ConnectionType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleHost = ?config(oracle_host, Config), + OraclePort = ?config(oracle_port, Config), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + ServerURL = iolist_to_binary([ + OracleHost, + ":", + integer_to_binary(OraclePort) + ]), + ConfigString = + io_lib:format( + "bridges.oracle.~s {\n" + " enable = true\n" + " database = \"~s\"\n" + " sid = \"~s\"\n" + " server = \"~s\"\n" + " username = \"system\"\n" + " password = \"oracle\"\n" + " pool_size = 1\n" + " sql = \"~s\"\n" + " resource_opts = {\n" + " auto_restart_interval = 5000\n" + " request_timeout = 30000\n" + " query_mode = \"async\"\n" + " enable_batch = true\n" + " batch_size = 3\n" + " batch_time = \"3s\"\n" + " worker_pool_size = 1\n" + " }\n" + "}\n", + [ + Name, + ?DATABASE, + ?DATABASE, + ServerURL, + sql_insert_template_for_bridge() + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + emqx_bridge:create(Type, Name, OracleConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, _Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig = ?config(oracle_config, Config), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +create_rule_and_action_http(Config) -> + OracleName = ?config(oracle_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, OracleName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query(Config) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => true + }, + Message = {send_message, Params}, + ?assertEqual( + {ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_batch_sync_query(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => false + }, + % Send 3 async messages while resource is down. When it comes back, these messages + % will be delivered in sync way. If we try to send sync messages directly, it will + % be sent async as callback_mode is set to async_if_possible. + Message = {send_message, Params}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(1000), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message) + end), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertMatch( + {ok, [{result_set, _, _, [[{3}]]}]}, + emqx_resource:simple_sync_query( + ResourceId, {query, "SELECT COUNT(*) FROM mqtt_test"} + ) + ) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_size">> => 4} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_time">> => <<"4s">>} + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + OracleName = ?config(oracle_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, OracleName), + #{?snk_kind := oracle_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(oracle_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. diff --git a/apps/emqx_bridge_pgsql/BSL.txt b/apps/emqx_bridge_pgsql/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pgsql/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pgsql/README.md b/apps/emqx_bridge_pgsql/README.md new file mode 100644 index 000000000..fc0bd3c6f --- /dev/null +++ b/apps/emqx_bridge_pgsql/README.md @@ -0,0 +1,38 @@ +# EMQX PostgreSQL Bridge + +[PostgreSQL](https://github.com/PostgreSQL/PostgreSQL) is an open-source relational +database management system (RDBMS) that uses and extends the SQL language. +It is known for its reliability, data integrity, and advanced features such as +support for JSON, XML, and other data formats. + +The application is used to connect EMQX and PostgreSQL. +User can create a rule and easily ingest IoT data into PostgreSQL by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into PostgreSQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-pgsql.html) + for how to use EMQX dashboard to ingest IoT data into PostgreSQL. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pgsql/docker-ct b/apps/emqx_bridge_pgsql/docker-ct new file mode 100644 index 000000000..81281026b --- /dev/null +++ b/apps/emqx_bridge_pgsql/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +pgsql diff --git a/apps/emqx_bridge_pgsql/rebar.config b/apps/emqx_bridge_pgsql/rebar.config new file mode 100644 index 000000000..87c145f26 --- /dev/null +++ b/apps/emqx_bridge_pgsql/rebar.config @@ -0,0 +1,7 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src new file mode 100644 index 000000000..a310b46b4 --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_pgsql, [ + {description, "EMQX Enterprise PostgreSQL Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl new file mode 100644 index 000000000..4615b6789 --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -0,0 +1,111 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pgsql). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1, + values/2, + fields/2 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" +>>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Bridge">>, + value => values(Method, pgsql) + } + } + ]. + +values(_Method, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + server => <<"127.0.0.1:5432">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_pgsql". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {sql, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_connector_pgsql:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + fields("post", pgsql); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl b/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl new file mode 100644 index 000000000..9f2011779 --- /dev/null +++ b/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl @@ -0,0 +1,622 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_pgsql_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% SQL definitions +-define(SQL_BRIDGE, + "INSERT INTO mqtt_test(payload, arrived) " + "VALUES (${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" +). +-define(SQL_CREATE_TABLE, + "CREATE TABLE IF NOT EXISTS mqtt_test (payload text, arrived timestamp NOT NULL) " +). +-define(SQL_DROP_TABLE, "DROP TABLE mqtt_test"). +-define(SQL_DELETE, "DELETE from mqtt_test"). +-define(SQL_SELECT, "SELECT payload FROM mqtt_test"). + +% DB defaults +-define(PGSQL_DATABASE, "mqtt"). +-define(PGSQL_USERNAME, "root"). +-define(PGSQL_PASSWORD, "public"). +-define(BATCH_SIZE, 10). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, tcp}, + {group, tls} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + NonBatchCases = [t_write_timeout], + BatchVariantGroups = [ + {group, with_batch}, + {group, without_batch}, + {group, matrix}, + {group, timescale} + ], + QueryModeGroups = [{async, BatchVariantGroups}, {sync, BatchVariantGroups}], + [ + {tcp, QueryModeGroups}, + {tls, QueryModeGroups}, + {async, BatchVariantGroups}, + {sync, BatchVariantGroups}, + {with_batch, TCs -- NonBatchCases}, + {without_batch, TCs}, + {matrix, [t_setup_via_config_and_publish, t_setup_via_http_api_and_publish]}, + {timescale, [t_setup_via_config_and_publish, t_setup_via_http_api_and_publish]} + ]. + +init_per_group(tcp, Config) -> + Host = os:getenv("PGSQL_TCP_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("PGSQL_TCP_PORT", "5432")), + [ + {pgsql_host, Host}, + {pgsql_port, Port}, + {enable_tls, false}, + {proxy_name, "pgsql_tcp"} + | Config + ]; +init_per_group(tls, Config) -> + Host = os:getenv("PGSQL_TLS_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("PGSQL_TLS_PORT", "5433")), + [ + {pgsql_host, Host}, + {pgsql_port, Port}, + {enable_tls, true}, + {proxy_name, "pgsql_tls"} + | Config + ]; +init_per_group(async, Config) -> + [{query_mode, async} | Config]; +init_per_group(sync, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config0) -> + Config = [{enable_batch, true} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{enable_batch, false} | Config0], + common_init(Config); +init_per_group(matrix, Config0) -> + Config = [{bridge_type, <<"matrix">>}, {enable_batch, true} | Config0], + common_init(Config); +init_per_group(timescale, Config0) -> + Config = [{bridge_type, <<"timescale">>}, {enable_batch, true} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + connect_and_drop_table(Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + connect_and_clear_table(Config), + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + connect_and_clear_table(Config), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(Config0) -> + BridgeType = proplists:get_value(bridge_type, Config0, <<"pgsql">>), + Host = ?config(pgsql_host, Config0), + Port = ?config(pgsql_port, Config0), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % Connect to pgsql directly and create the table + connect_and_create_table(Config0), + {Name, PGConf} = pgsql_config(BridgeType, Config0), + Config = + [ + {pgsql_config, PGConf}, + {pgsql_bridge_type, BridgeType}, + {pgsql_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pgsql); + _ -> + {skip, no_pgsql} + end + end. + +pgsql_config(BridgeType, Config) -> + Port = integer_to_list(?config(pgsql_port, Config)), + Server = ?config(pgsql_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = + case ?config(enable_batch, Config) of + true -> ?BATCH_SIZE; + false -> 1 + end, + QueryMode = ?config(query_mode, Config), + TlsEnabled = ?config(enable_tls, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " database = ~p\n" + " username = ~p\n" + " password = ~p\n" + " sql = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " }\n" + " ssl = {\n" + " enable = ~w\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + ?PGSQL_DATABASE, + ?PGSQL_USERNAME, + ?PGSQL_PASSWORD, + ?SQL_BRIDGE, + BatchSize, + QueryMode, + TlsEnabled + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(pgsql_bridge_type, Config), + Name = ?config(pgsql_name, Config), + PGConfig0 = ?config(pgsql_config, Config), + PGConfig = emqx_utils_maps:deep_merge(PGConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, PGConfig). + +delete_bridge(Config) -> + BridgeType = ?config(pgsql_bridge_type, Config), + Name = ?config(pgsql_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(pgsql_name, Config), + BridgeType = ?config(pgsql_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?config(pgsql_name, Config), + BridgeType = ?config(pgsql_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + +query_resource_async(Config, Request) -> + Name = ?config(pgsql_name, Config), + BridgeType = ?config(pgsql_bridge_type, Config), + Ref = alias([reply]), + AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + Return = emqx_resource:query(ResourceID, Request, #{ + timeout => 500, async_reply_fun => {AsyncReplyFun, []} + }), + {Return, Ref}. + +receive_result(Ref, Timeout) -> + receive + {result, Ref, Result} -> + {ok, Result}; + {Ref, Result} -> + {ok, Result} + after Timeout -> + timeout + end. + +connect_direct_pgsql(Config) -> + Opts = #{ + host => ?config(pgsql_host, Config), + port => ?config(pgsql_port, Config), + username => ?PGSQL_USERNAME, + password => ?PGSQL_PASSWORD, + database => ?PGSQL_DATABASE + }, + + SslOpts = + case ?config(enable_tls, Config) of + true -> + Opts#{ + ssl => true, + ssl_opts => emqx_tls_lib:to_client_opts(#{enable => true}) + }; + false -> + Opts + end, + {ok, Con} = epgsql:connect(SslOpts), + Con. + +% These funs connect and then stop the pgsql connection +connect_and_create_table(Config) -> + Con = connect_direct_pgsql(Config), + {ok, _, _} = epgsql:squery(Con, ?SQL_CREATE_TABLE), + ok = epgsql:close(Con). + +connect_and_drop_table(Config) -> + Con = connect_direct_pgsql(Config), + {ok, _, _} = epgsql:squery(Con, ?SQL_DROP_TABLE), + ok = epgsql:close(Con). + +connect_and_clear_table(Config) -> + Con = connect_direct_pgsql(Config), + {ok, _} = epgsql:squery(Con, ?SQL_DELETE), + ok = epgsql:close(Con). + +connect_and_get_payload(Config) -> + Con = connect_direct_pgsql(Config), + {ok, _, [{Result}]} = epgsql:squery(Con, ?SQL_SELECT), + ok = epgsql:close(Con), + Result. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + ?check_trace( + begin + {_, {ok, _}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := pgsql_connector_query_return}, + 10_000 + ), + ?assertMatch( + Val, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(pgsql_connector_query_return, Trace0), + case ?config(enable_batch, Config) of + true -> + ?assertMatch([#{result := {_, [{ok, 1}]}}], Trace); + false -> + ?assertMatch([#{result := {ok, 1}}], Trace) + end, + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(pgsql_bridge_type, Config), + Name = ?config(pgsql_name, Config), + PgsqlConfig0 = ?config(pgsql_config, Config), + QueryMode = ?config(query_mode, Config), + PgsqlConfig = PgsqlConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(PgsqlConfig) + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + ?check_trace( + begin + {Res, {ok, _}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := pgsql_connector_query_return}, + 10_000 + ), + case QueryMode of + async -> + ok; + sync -> + ?assertEqual({ok, 1}, Res) + end, + ?assertMatch( + Val, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(pgsql_connector_query_return, Trace0), + case ?config(enable_batch, Config) of + true -> + ?assertMatch([#{result := {_, [{ok, 1}]}}], Trace); + false -> + ?assertMatch([#{result := {ok, 1}}], Trace) + end, + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + Name = ?config(pgsql_name, Config), + BridgeType = ?config(pgsql_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, Status} when Status =:= disconnected orelse Status =:= connecting, + emqx_resource_manager:health_check(ResourceID) + ) + end), + ok. + +t_create_disconnected(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch({ok, _}, create_bridge(Config)) + end), + fun(Trace) -> + ?assertMatch( + [#{error := {start_pool_failed, _, _}}], + ?of_kind(pgsql_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + {ok, _} = create_bridge(Config), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, _}} = + ?wait_async_action( + case QueryMode of + sync -> + ?assertMatch({error, _}, send_message(Config, SentData)); + async -> + send_message(Config, SentData) + end, + #{?snk_kind := buffer_worker_flush_nack}, + 1_000 + ) + end), + fun(Trace0) -> + ct:pal("trace: ~p", [Trace0]), + Trace = ?of_kind(buffer_worker_flush_nack, Trace0), + ?assertMatch([#{result := {error, _}} | _], Trace), + [#{result := {error, Error}} | _] = Trace, + case Error of + {resource_error, _} -> + ok; + {recoverable_error, disconnected} -> + ok; + _ -> + ct:fail("unexpected error: ~p", [Error]) + end + end + ), + ok. + +% This test doesn't work with batch enabled since it is not possible +% to set the timeout directly for batch queries +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + {ok, _} = create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := call_query_enter}), + 2_000 + ), + Res0 = + emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + Res1 = + case QueryMode of + async -> + query_resource_async(Config, {send_message, SentData}); + sync -> + query_resource(Config, {send_message, SentData}) + end, + ?assertMatch({ok, [_]}, snabbkaffe:receive_events(SRef)), + Res1 + end), + case Res0 of + {_, Ref} when is_reference(Ref) -> + case receive_result(Ref, 15_000) of + {ok, Res} -> + ?assertMatch({error, {unrecoverable_error, _}}, Res); + timeout -> + ct:pal("mailbox:\n ~p", [process_info(self(), messages)]), + ct:fail("no response received") + end; + _ -> + ?assertMatch({error, {resource_error, #{reason := timeout}}}, Res0) + end, + ok. + +t_simple_sql_query(Config) -> + EnableBatch = ?config(enable_batch, Config), + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {sql, <<"SELECT count(1) AS T">>}, + Result = + case QueryMode of + sync -> + query_resource(Config, Request); + async -> + {_, Ref} = query_resource_async(Config, Request), + {ok, Res} = receive_result(Ref, 2_000), + Res + end, + case EnableBatch of + true -> + ?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result); + false -> + ?assertMatch({ok, _, [{1}]}, Result) + end, + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {_, {ok, Event}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + #{ + result := + {error, + {unrecoverable_error, {error, error, <<"23502">>, not_null_violation, _, _}}} + }, + Event + ), + ok. + +t_bad_sql_parameter(Config) -> + QueryMode = ?config(query_mode, Config), + EnableBatch = ?config(enable_batch, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {sql, <<"">>, [bad_parameter]}, + Result = + case QueryMode of + sync -> + query_resource(Config, Request); + async -> + {_, Ref} = query_resource_async(Config, Request), + {ok, Res} = receive_result(Ref, 2_000), + Res + end, + case EnableBatch of + true -> + ?assertEqual({error, {unrecoverable_error, invalid_request}}, Result); + false -> + ?assertMatch( + {error, {unrecoverable_error, _}}, Result + ) + end, + ok. + +t_nasty_sql_string(Config) -> + ?assertMatch({ok, _}, create_bridge(Config)), + Payload = list_to_binary(lists:seq(1, 127)), + Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)}, + {_, {ok, _}} = + ?wait_async_action( + send_message(Config, Message), + #{?snk_kind := pgsql_connector_query_return}, + 1_000 + ), + ?assertEqual(Payload, connect_and_get_payload(Config)). diff --git a/apps/emqx_bridge_pulsar/.gitignore b/apps/emqx_bridge_pulsar/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_bridge_pulsar/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_bridge_pulsar/BSL.txt b/apps/emqx_bridge_pulsar/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pulsar/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pulsar/README.md b/apps/emqx_bridge_pulsar/README.md new file mode 100644 index 000000000..fbd8bf81d --- /dev/null +++ b/apps/emqx_bridge_pulsar/README.md @@ -0,0 +1,30 @@ +# Pulsar Data Integration Bridge + +This application houses the Pulsar Producer data integration bridge +for EMQX Enterprise Edition. It provides the means to connect to +Pulsar and publish messages to it. + +Currently, our Pulsar Producer library has its own `replayq` buffering +implementation, so this bridge does not require buffer workers from +`emqx_resource`. It implements the connection management and +interaction without need for a separate connector app, since it's not +used by authentication and authorization applications. + +# Documentation links + +For more information on Apache Pulsar, please see its [official +site](https://pulsar.apache.org/). + +# Configurations + +Please see [our official +documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-pulsar.html) +for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pulsar/docker-ct b/apps/emqx_bridge_pulsar/docker-ct new file mode 100644 index 000000000..6324bb4f7 --- /dev/null +++ b/apps/emqx_bridge_pulsar/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +pulsar diff --git a/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf b/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl new file mode 100644 index 000000000..5ee87e48f --- /dev/null +++ b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl @@ -0,0 +1,14 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_PULSAR_HRL). +-define(EMQX_BRIDGE_PULSAR_HRL, true). + +-define(PULSAR_HOST_OPTIONS, #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] +}). + +-endif. diff --git a/apps/emqx_bridge_pulsar/rebar.config b/apps/emqx_bridge_pulsar/rebar.config new file mode 100644 index 000000000..d5a63f320 --- /dev/null +++ b/apps/emqx_bridge_pulsar/rebar.config @@ -0,0 +1,14 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ + {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_pulsar]} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src new file mode 100644 index 000000000..b169aa2c4 --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_pulsar, [ + {description, "EMQX Pulsar Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + pulsar + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl new file mode 100644 index 000000000..18faf0e3b --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl @@ -0,0 +1,228 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). +%% emqx_ee_bridge "unofficial" API +-export([conn_bridge_examples/1]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "bridge_pulsar". + +roots() -> + []. + +fields(pulsar_producer) -> + fields(config) ++ fields(producer_opts); +fields(config) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {servers, + mk( + binary(), + #{ + required => true, + desc => ?DESC("servers"), + validator => emqx_schema:servers_validator( + ?PULSAR_HOST_OPTIONS, _Required = true + ) + } + )}, + {authentication, + mk(hoconsc:union([none, ref(auth_basic), ref(auth_token)]), #{ + default => none, desc => ?DESC("authentication") + })} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(producer_opts) -> + [ + {batch_size, + mk( + pos_integer(), + #{default => 100, desc => ?DESC("producer_batch_size")} + )}, + {compression, + mk( + hoconsc:enum([no_compression, snappy, zlib]), + #{default => no_compression, desc => ?DESC("producer_compression")} + )}, + {send_buffer, + mk(emqx_schema:bytesize(), #{ + default => <<"1MB">>, desc => ?DESC("producer_send_buffer") + })}, + {sync_timeout, + mk(emqx_schema:duration_ms(), #{ + default => <<"3s">>, desc => ?DESC("producer_sync_timeout") + })}, + {retention_period, + mk( + hoconsc:union([infinity, emqx_schema:duration_ms()]), + #{default => infinity, desc => ?DESC("producer_retention_period")} + )}, + {max_batch_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"900KB">>, desc => ?DESC("producer_max_batch_bytes")} + )}, + {local_topic, mk(binary(), #{required => false, desc => ?DESC("producer_local_topic")})}, + {pulsar_topic, mk(binary(), #{required => true, desc => ?DESC("producer_pulsar_topic")})}, + {strategy, + mk( + hoconsc:enum([random, roundrobin, key_dispatch]), + #{default => random, desc => ?DESC("producer_strategy")} + )}, + {buffer, mk(ref(producer_buffer), #{required => false, desc => ?DESC("producer_buffer")})}, + {message, + mk(ref(producer_pulsar_message), #{ + required => false, desc => ?DESC("producer_message_opts") + })}, + {resource_opts, + mk( + ref(producer_resource_opts), + #{ + required => false, + desc => ?DESC(emqx_resource_schema, "creation_opts") + } + )} + ]; +fields(producer_buffer) -> + [ + {mode, + mk( + hoconsc:enum([memory, disk, hybrid]), + #{default => memory, desc => ?DESC("buffer_mode")} + )}, + {per_partition_limit, + mk( + emqx_schema:bytesize(), + #{default => <<"2GB">>, desc => ?DESC("buffer_per_partition_limit")} + )}, + {segment_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"100MB">>, desc => ?DESC("buffer_segment_bytes")} + )}, + {memory_overload_protection, + mk(boolean(), #{ + default => false, + desc => ?DESC("buffer_memory_overload_protection") + })} + ]; +fields(producer_pulsar_message) -> + [ + {key, + mk(string(), #{default => <<"${.clientid}">>, desc => ?DESC("producer_key_template")})}, + {value, mk(string(), #{default => <<"${.}">>, desc => ?DESC("producer_value_template")})} + ]; +fields(producer_resource_opts) -> + SupportedOpts = [ + health_check_interval, + resume_interval, + start_after_created, + start_timeout, + auto_restart_interval + ], + lists:filtermap( + fun + ({health_check_interval = Field, MetaFn}) -> + {true, {Field, override_default(MetaFn, 1_000)}}; + ({Field, _Meta}) -> + lists:member(Field, SupportedOpts) + end, + emqx_resource_schema:fields("creation_opts") + ); +fields(auth_basic) -> + [ + {username, mk(binary(), #{required => true, desc => ?DESC("auth_basic_username")})}, + {password, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_basic_password"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields(auth_token) -> + [ + {jwt, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_token_jwt"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields("get_" ++ Type) -> + emqx_bridge_schema:status_fields() ++ fields("post_" ++ Type); +fields("put_" ++ Type) -> + fields("config_" ++ Type); +fields("post_" ++ Type) -> + [type_field(), name_field() | fields("config_" ++ Type)]; +fields("config_producer") -> + fields(pulsar_producer). + +desc(pulsar_producer) -> + ?DESC(pulsar_producer_struct); +desc(producer_resource_opts) -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("get_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `GET` method."]; +desc("put_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `PUT` method."]; +desc("post_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +conn_bridge_examples(_Method) -> + [ + #{ + <<"pulsar_producer">> => #{ + summary => <<"Pulsar Producer Bridge">>, + value => #{todo => true} + } + } + ]. + +%%------------------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------------------- + +mk(Type, Meta) -> hoconsc:mk(Type, Meta). +ref(Name) -> hoconsc:ref(?MODULE, Name). + +type_field() -> + {type, mk(hoconsc:enum([pulsar_producer]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +struct_names() -> + [ + auth_basic, + auth_token, + producer_buffer, + producer_pulsar_message + ]. + +override_default(OriginalFn, NewDefault) -> + fun + (default) -> NewDefault; + (Field) -> OriginalFn(Field) + end. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl new file mode 100644 index 000000000..300fe9b2d --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -0,0 +1,421 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4 +]). + +-type pulsar_client_id() :: atom(). +-type state() :: #{ + pulsar_client_id := pulsar_client_id(), + producers := pulsar_producers:producers(), + sync_timeout := infinity | time:time(), + message_template := message_template() +}. +-type buffer_mode() :: memory | disk | hybrid. +-type compression_mode() :: no_compression | snappy | zlib. +-type partition_strategy() :: random | roundrobin | key_dispatch. +-type message_template_raw() :: #{ + key := binary(), + value := binary() +}. +-type message_template() :: #{ + key := emqx_plugin_libs_rule:tmpl_token(), + value := emqx_plugin_libs_rule:tmpl_token() +}. +-type config() :: #{ + authentication := _, + batch_size := pos_integer(), + bridge_name := atom(), + buffer := #{ + mode := buffer_mode(), + per_partition_limit := emqx_schema:byte_size(), + segment_bytes := emqx_schema:byte_size(), + memory_overload_protection := boolean() + }, + compression := compression_mode(), + max_batch_bytes := emqx_schema:bytesize(), + message := message_template_raw(), + pulsar_topic := binary(), + retention_period := infinity | emqx_schema:duration_ms(), + send_buffer := emqx_schema:bytesize(), + servers := binary(), + ssl := _, + strategy := partition_strategy(), + sync_timeout := emqx_schema:duration_ms() +}. + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- + +callback_mode() -> async_if_possible. + +%% there are no queries to be made to this bridge, so we say that +%% buffer is supported so we don't spawn unused resource buffer +%% workers. +is_buffer_supported() -> true. + +-spec on_start(resource_id(), config()) -> {ok, state()}. +on_start(InstanceId, Config) -> + #{ + authentication := _Auth, + bridge_name := BridgeName, + servers := Servers0, + ssl := SSL + } = Config, + Servers = format_servers(Servers0), + ClientId = make_client_id(InstanceId, BridgeName), + SSLOpts = emqx_tls_lib:to_client_opts(SSL), + ClientOpts = #{ + ssl_opts => SSLOpts, + conn_opts => conn_opts(Config) + }, + case pulsar:ensure_supervised_client(ClientId, Servers, ClientOpts) of + {ok, _Pid} -> + ?tp( + info, + "pulsar_client_started", + #{ + instance_id => InstanceId, + pulsar_hosts => Servers + } + ); + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_pulsar_client", + instance_id => InstanceId, + pulsar_hosts => Servers, + reason => Reason + }), + throw(failed_to_start_pulsar_client) + end, + start_producer(Config, InstanceId, ClientId, ClientOpts). + +-spec on_stop(resource_id(), state()) -> ok. +on_stop(_InstanceId, State) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + stop_producers(ClientId, Producers), + stop_client(ClientId), + ?tp(pulsar_bridge_stopped, #{instance_id => _InstanceId}), + ok. + +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(_InstanceId, State = #{}) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + case pulsar_client_sup:find_client(ClientId) of + {ok, Pid} -> + try pulsar_client:get_status(Pid) of + true -> + get_producer_status(Producers); + false -> + disconnected + catch + error:timeout -> + disconnected; + exit:{noproc, _} -> + disconnected + end; + {error, _} -> + disconnected + end; +on_get_status(_InstanceId, _State) -> + %% If a health check happens just after a concurrent request to + %% create the bridge is not quite finished, `State = undefined'. + connecting. + +-spec on_query(resource_id(), {send_message, map()}, state()) -> + {ok, term()} + | {error, timeout} + | {error, term()}. +on_query(_InstanceId, {send_message, Message}, State) -> + #{ + producers := Producers, + sync_timeout := SyncTimeout, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + try + pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout) + catch + error:timeout -> + {error, timeout} + end. + +-spec on_query_async( + resource_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() +) -> + {ok, pid()}. +on_query_async(_InstanceId, {send_message, Message}, AsyncReplyFn, State) -> + ?tp_span( + pulsar_producer_on_query_async, + #{instance_id => _InstanceId, message => Message}, + do_on_query_async(Message, AsyncReplyFn, State) + ). + +do_on_query_async(Message, AsyncReplyFn, State) -> + #{ + producers := Producers, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}). + +%%------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------- + +-spec to_bin(atom() | string() | binary()) -> binary(). +to_bin(A) when is_atom(A) -> + atom_to_binary(A); +to_bin(L) when is_list(L) -> + list_to_binary(L); +to_bin(B) when is_binary(B) -> + B. + +-spec format_servers(binary()) -> [string()]. +format_servers(Servers0) -> + Servers1 = emqx_schema:parse_servers(Servers0, ?PULSAR_HOST_OPTIONS), + lists:map( + fun(#{scheme := Scheme, hostname := Host, port := Port}) -> + Scheme ++ "://" ++ Host ++ ":" ++ integer_to_list(Port) + end, + Servers1 + ). + +-spec make_client_id(resource_id(), atom() | binary()) -> pulsar_client_id(). +make_client_id(InstanceId, BridgeName) -> + case is_dry_run(InstanceId) of + true -> + pulsar_producer_probe; + false -> + ClientIdBin = iolist_to_binary([ + <<"pulsar_producer:">>, + to_bin(BridgeName), + <<":">>, + to_bin(node()) + ]), + binary_to_atom(ClientIdBin) + end. + +-spec is_dry_run(resource_id()) -> boolean(). +is_dry_run(InstanceId) -> + TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX), + case TestIdStart of + nomatch -> + false; + _ -> + string:equal(TestIdStart, InstanceId) + end. + +conn_opts(#{authentication := none}) -> + #{}; +conn_opts(#{authentication := #{username := Username, password := Password}}) -> + #{ + auth_data => iolist_to_binary([Username, <<":">>, Password]), + auth_method_name => <<"basic">> + }; +conn_opts(#{authentication := #{jwt := JWT}}) -> + #{ + auth_data => JWT, + auth_method_name => <<"token">> + }. + +-spec replayq_dir(pulsar_client_id()) -> string(). +replayq_dir(ClientId) -> + filename:join([emqx:data_dir(), "pulsar", to_bin(ClientId)]). + +-spec producer_name(pulsar_client_id()) -> atom(). +producer_name(ClientId) -> + ClientIdBin = to_bin(ClientId), + binary_to_atom( + iolist_to_binary([ + <<"producer-">>, + ClientIdBin + ]) + ). + +-spec start_producer(config(), resource_id(), pulsar_client_id(), map()) -> {ok, state()}. +start_producer(Config, InstanceId, ClientId, ClientOpts) -> + #{ + conn_opts := ConnOpts, + ssl_opts := SSLOpts + } = ClientOpts, + #{ + batch_size := BatchSize, + buffer := #{ + mode := BufferMode, + per_partition_limit := PerPartitionLimit, + segment_bytes := SegmentBytes, + memory_overload_protection := MemOLP0 + }, + compression := Compression, + max_batch_bytes := MaxBatchBytes, + message := MessageTemplateOpts, + pulsar_topic := PulsarTopic0, + retention_period := RetentionPeriod, + send_buffer := SendBuffer, + strategy := Strategy, + sync_timeout := SyncTimeout + } = Config, + {OffloadMode, ReplayQDir} = + case BufferMode of + memory -> {false, false}; + disk -> {false, replayq_dir(ClientId)}; + hybrid -> {true, replayq_dir(ClientId)} + end, + MemOLP = + case os:type() of + {unix, linux} -> MemOLP0; + _ -> false + end, + ReplayQOpts = #{ + replayq_dir => ReplayQDir, + replayq_offload_mode => OffloadMode, + replayq_max_total_bytes => PerPartitionLimit, + replayq_seg_bytes => SegmentBytes, + drop_if_highmem => MemOLP + }, + ProducerName = producer_name(ClientId), + ?tp(pulsar_producer_capture_name, #{producer_name => ProducerName}), + MessageTemplate = compile_message_template(MessageTemplateOpts), + ProducerOpts0 = + #{ + batch_size => BatchSize, + compression => Compression, + conn_opts => ConnOpts, + max_batch_bytes => MaxBatchBytes, + name => ProducerName, + retention_period => RetentionPeriod, + ssl_opts => SSLOpts, + strategy => partition_strategy(Strategy), + tcp_opts => [{sndbuf, SendBuffer}] + }, + ProducerOpts = maps:merge(ReplayQOpts, ProducerOpts0), + PulsarTopic = binary_to_list(PulsarTopic0), + ?tp(pulsar_producer_about_to_start_producers, #{producer_name => ProducerName}), + try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of + {ok, Producers} -> + State = #{ + pulsar_client_id => ClientId, + producers => Producers, + sync_timeout => SyncTimeout, + message_template => MessageTemplate + }, + ?tp(pulsar_producer_bridge_started, #{}), + {ok, State} + catch + Kind:Error:Stacktrace -> + ?tp( + error, + "failed_to_start_pulsar_producer", + #{ + instance_id => InstanceId, + kind => Kind, + reason => Error, + stacktrace => Stacktrace + } + ), + stop_client(ClientId), + throw(failed_to_start_pulsar_producer) + end. + +-spec stop_client(pulsar_client_id()) -> ok. +stop_client(ClientId) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_client(ClientId), + ?tp(pulsar_bridge_client_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_client", + pulsar_client_id => ClientId + } + ), + ok. + +-spec stop_producers(pulsar_client_id(), pulsar_producers:producers()) -> ok. +stop_producers(ClientId, Producers) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_producers(Producers), + ?tp(pulsar_bridge_producer_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_producer", + pulsar_client_id => ClientId + } + ), + ok. + +log_when_error(Fun, Log) -> + try + Fun() + catch + C:E -> + ?SLOG(error, Log#{ + exception => C, + reason => E + }) + end. + +-spec compile_message_template(message_template_raw()) -> message_template(). +compile_message_template(TemplateOpts) -> + KeyTemplate = maps:get(key, TemplateOpts, <<"${.clientid}">>), + ValueTemplate = maps:get(value, TemplateOpts, <<"${.}">>), + #{ + key => preproc_tmpl(KeyTemplate), + value => preproc_tmpl(ValueTemplate) + }. + +preproc_tmpl(Template) -> + emqx_plugin_libs_rule:preproc_tmpl(Template). + +render_message( + Message, #{key := KeyTemplate, value := ValueTemplate} +) -> + #{ + key => render(Message, KeyTemplate), + value => render(Message, ValueTemplate) + }. + +render(Message, Template) -> + Opts = #{ + var_trans => fun + (undefined) -> <<"">>; + (X) -> emqx_plugin_libs_rule:bin(X) + end, + return => full_binary + }, + emqx_plugin_libs_rule:proc_tmpl(Template, Message, Opts). + +get_producer_status(Producers) -> + case pulsar_producers:all_connected(Producers) of + true -> connected; + false -> connecting + end. + +partition_strategy(key_dispatch) -> first_key_dispatch; +partition_strategy(Strategy) -> Strategy. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl new file mode 100644 index 000000000..be38f6625 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl @@ -0,0 +1,1019 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"pulsar_producer">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_pulsar]). +-define(RULE_TOPIC, "mqtt/rule"). +-define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain}, + {group, tls} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + OnlyOnceTCs = only_once_tests(), + TCs = AllTCs -- OnlyOnceTCs, + [ + {plain, AllTCs}, + {tls, TCs} + ]. + +only_once_tests() -> + [ + t_create_via_http, + t_start_when_down, + t_send_when_down, + t_send_when_timeout, + t_failure_to_start_producer, + t_producer_process_crash + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + PulsarHost = os:getenv("PULSAR_PLAIN_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_PLAIN_PORT", "6652")), + ProxyName = "pulsar_plain", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, false} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(tls = Type, Config) -> + PulsarHost = os:getenv("PULSAR_TLS_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_TLS_PORT", "6653")), + ProxyName = "pulsar_tls", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, true} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain; + Group =:= tls +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + stop_consumer(Config), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + PulsarType = ?config(pulsar_type, Config0), + Config1 = [{pulsar_topic, PulsarTopic} | Config0], + {Name, ConfigString, PulsarConfig} = pulsar_config( + TestCase, PulsarType, Config1 + ), + ConsumerConfig = start_consumer(TestCase, Config1), + Config = ConsumerConfig ++ Config1, + ok = snabbkaffe:start_trace(), + [ + {pulsar_name, Name}, + {pulsar_config_string, ConfigString}, + {pulsar_config, PulsarConfig} + | Config + ]. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +pulsar_config(TestCase, _PulsarType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + AuthType = proplists:get_value(sasl_auth_mechanism, Config, none), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + MQTTTopic = proplists:get_value(mqtt_topic, Config, <<"mqtt/topic/", UniqueNum/binary>>), + Prefix = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + ServerURL = iolist_to_binary([ + Prefix, + PulsarHost, + ":", + integer_to_binary(PulsarPort) + ]), + ConfigString = + io_lib:format( + "bridges.pulsar_producer.~s {\n" + " enable = true\n" + " servers = \"~s\"\n" + " sync_timeout = 5s\n" + " compression = no_compression\n" + " send_buffer = 1MB\n" + " retention_period = infinity\n" + " max_batch_bytes = 900KB\n" + " batch_size = 1\n" + " strategy = random\n" + " buffer {\n" + " mode = memory\n" + " per_partition_limit = 10MB\n" + " segment_bytes = 5MB\n" + " memory_overload_protection = true\n" + " }\n" + " message {\n" + " key = \"${.clientid}\"\n" + " value = \"${.}\"\n" + " }\n" + "~s" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " server_name_indication = \"auto\"\n" + " }\n" + " pulsar_topic = \"~s\"\n" + " local_topic = \"~s\"\n" + "}\n", + [ + Name, + ServerURL, + authentication(AuthType), + UseTLS, + PulsarTopic, + MQTTTopic + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +authentication(_) -> + " authentication = none\n". + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + emqx_bridge:create(Type, Name, PulsarConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig = ?config(pulsar_config, Config), + Params0 = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Params = maps:merge(Params0, Overrides), + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +start_consumer(TestCase, Config) -> + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + UseTLS = ?config(use_tls, Config), + %% FIXME: patch pulsar to accept binary urls... + Scheme = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + URL = + binary_to_list( + <> + ), + ConnOpts = #{}, + ConsumerClientId = TestCase, + CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), + SSLOpts = #{ + enable => UseTLS, + keyfile => filename:join([CertsPath, "key.pem"]), + certfile => filename:join([CertsPath, "cert.pem"]), + cacertfile => filename:join([CertsPath, "cacert.pem"]) + }, + {ok, _ClientPid} = pulsar:ensure_supervised_client( + ConsumerClientId, + [URL], + #{ + conn_opts => ConnOpts, + ssl_opts => emqx_tls_lib:to_client_opts(SSLOpts) + } + ), + ConsumerOpts = #{ + cb_init_args => #{send_to => self()}, + cb_module => pulsar_echo_consumer, + sub_type => 'Shared', + subscription => atom_to_list(TestCase), + max_consumer_num => 1, + %% Note! This must not coincide with the client + %% id, or else weird bugs will happen, like the + %% consumer never starts... + name => test_consumer, + consumer_id => 1, + conn_opts => ConnOpts + }, + {ok, Consumer} = pulsar:ensure_supervised_consumers( + ConsumerClientId, + PulsarTopic, + ConsumerOpts + ), + %% since connection is async, and there's currently no way to + %% specify the subscription initial position as `Earliest', we + %% need to wait until the consumer is connected to avoid + %% flakiness. + ok = wait_until_consumer_connected(Consumer), + [ + {consumer_client_id, ConsumerClientId}, + {pulsar_consumer, Consumer} + ]. + +stop_consumer(Config) -> + ConsumerClientId = ?config(consumer_client_id, Config), + Consumer = ?config(pulsar_consumer, Config), + ok = pulsar:stop_and_delete_supervised_consumers(Consumer), + ok = pulsar:stop_and_delete_supervised_client(ConsumerClientId), + ok. + +wait_until_consumer_connected(Consumer) -> + ?retry( + _Sleep = 300, + _Attempts0 = 20, + true = pulsar_consumers:all_connected(Consumer) + ), + ok. + +wait_until_producer_connected() -> + wait_until_connected(pulsar_producers_sup, pulsar_producer). + +wait_until_connected(SupMod, Mod) -> + Pids = [ + P + || {_Name, SupPid, _Type, _Mods} <- supervisor:which_children(SupMod), + P <- element(2, process_info(SupPid, links)), + case proc_lib:initial_call(P) of + {Mod, init, _} -> true; + _ -> false + end + ], + ?retry( + _Sleep = 300, + _Attempts0 = 20, + lists:foreach(fun(P) -> {connected, _} = sys:get_state(P) end, Pids) + ), + ok. + +create_rule_and_action_http(Config) -> + PulsarName = ?config(pulsar_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, PulsarName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +receive_consumed(Timeout) -> + receive + {pulsar_message, #{payloads := Payloads}} -> + lists:map(fun try_decode_json/1, Payloads) + after Timeout -> + ct:pal("mailbox: ~p", [process_info(self(), messages)]), + ct:fail("no message consumed") + end. + +try_decode_json(Payload) -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {error, _} -> + Payload; + {ok, JSON} -> + JSON + end. + +cluster(Config) -> + PrivDataDir = ?config(priv_dir, Config), + PeerModule = + case os:getenv("IS_CI") of + false -> + slave; + _ -> + ct_slave + end, + Cluster = emqx_common_test_helpers:emqx_cluster( + [core, core], + [ + {apps, [emqx_conf, emqx_bridge, emqx_rule_engine, emqx_bridge_pulsar]}, + {listener_ports, []}, + {peer_mod, PeerModule}, + {priv_data_dir, PrivDataDir}, + {load_schema, true}, + {start_autocluster, true}, + {schema_mod, emqx_ee_conf_schema}, + {env_handler, fun + (emqx) -> + application:set_env(emqx, boot_modules, [broker, router]), + ok; + (emqx_conf) -> + ok; + (_) -> + ok + end} + ] + ), + ct:pal("cluster: ~p", [Cluster]), + Cluster. + +start_cluster(Cluster) -> + Nodes = + [ + emqx_common_test_helpers:start_slave(Name, Opts) + || {Name, Opts} <- Cluster + ], + on_exit(fun() -> + emqx_utils:pmap( + fun(N) -> + ct:pal("stopping ~p", [N]), + ok = emqx_common_test_helpers:stop_slave(N) + end, + Nodes + ) + end), + Nodes. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_and_produce_ok(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + %% Publish using local topic. + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + emqx:publish(Message0), + %% Publish using rule engine. + Message1 = emqx_message:make(ClientId, QoS, ?RULE_TOPIC_BIN, Payload), + emqx:publish(Message1), + + #{rule_id => RuleId} + end, + fun(#{rule_id := RuleId}, _Trace) -> + Data0 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + Data1 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := ?RULE_TOPIC_BIN + } + ], + Data1 + ), + ?retry( + _Sleep = 100, + _Attempts0 = 20, + begin + ?assertMatch( + #{ + counters := #{ + dropped := 0, + failed := 0, + late_reply := 0, + matched := 2, + received := 0, + retried := 0, + success := 2 + } + }, + emqx_resource_manager:get_metrics(ResourceId) + ), + ?assertEqual( + 1, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.success') + ), + ?assertEqual( + 0, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.failed') + ), + ok + end + ), + ok + end + ), + ok. + +%% Under normal operations, the bridge will be called async via +%% `simple_async_query'. +t_sync_query(Config) -> + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, #{payload => Payload}}, + ?assertMatch( + {ok, #{sequence_id := _}}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{<<"mode">> => <<"disk">>} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{ + <<"mode">> => <<"hybrid">>, + <<"memory_overload_protection">> => true + } + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + PulsarName = ?config(pulsar_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + redbug:start( + [ + "emqx_resource_manager:health_check_interval -> return", + "emqx_resource_manager:with_health_check -> return" + ], + [{msgs, 100}, {time, 30_000}] + ), + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, PulsarName), + #{?snk_kind := pulsar_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_producer_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_client_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. + +t_start_when_down(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ok + end), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok + end, + [] + ), + ok. + +t_send_when_down(Config) -> + do_t_send_with_failure(Config, down). + +t_send_when_timeout(Config) -> + do_t_send_with_failure(Config, timeout). + +do_t_send_with_failure(Config, FailureType) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + MQTTTopic = ?config(mqtt_topic, Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + ?check_trace( + begin + emqx_common_test_helpers:with_failure( + FailureType, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{ + ?snk_kind := pulsar_producer_on_query_async, + ?snk_span := {complete, _} + }, + 5_000 + ), + ok + end + ), + ok + end, + fun(_Trace) -> + %% Should recover given enough time. + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. + +%% Check that we correctly terminate the pulsar client when the pulsar +%% producer processes fail to start for whatever reason. +t_failure_to_start_producer(Config) -> + ?check_trace( + begin + ?force_ordering( + #{?snk_kind := name_registered}, + #{?snk_kind := pulsar_producer_about_to_start_producers} + ), + spawn_link(fun() -> + ?tp(will_register_name, #{}), + {ok, #{producer_name := ProducerName}} = ?block_until( + #{?snk_kind := pulsar_producer_capture_name}, 10_000 + ), + true = register(ProducerName, self()), + ?tp(name_registered, #{name => ProducerName}), + %% Just simulating another process so that starting the + %% producers fail. Currently it does a gen_server:call + %% with `infinity' timeout, so this is just to avoid + %% hanging. + receive + {'$gen_call', From, _Request} -> + gen_server:reply(From, {error, im_not, your_producer}) + end, + receive + die -> ok + end + end), + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_bridge_client_stopped}, + 20_000 + ), + ok + end, + [] + ), + ok. + +%% Check the driver recovers itself if one of the producer processes +%% die for whatever reason. +t_producer_process_crash(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + ?check_trace( + begin + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge( + Config, + #{<<"buffer">> => #{<<"mode">> => <<"disk">>}} + ), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + [ProducerPid | _] = [ + Pid + || {_Name, PS, _Type, _Mods} <- supervisor:which_children(pulsar_producers_sup), + Pid <- element(2, process_info(PS, links)), + case proc_lib:initial_call(Pid) of + {pulsar_producer, init, _} -> true; + _ -> false + end + ], + Ref = monitor(process, ProducerPid), + exit(ProducerPid, kill), + receive + {'DOWN', Ref, process, ProducerPid, _Killed} -> + ok + after 1_000 -> ct:fail("pid didn't die") + end, + ?assertEqual({ok, connecting}, emqx_resource_manager:health_check(ResourceId)), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{?snk_kind := pulsar_producer_on_query_async, ?snk_span := {complete, _}}, + 5_000 + ), + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end, + [] + ), + ok. + +t_cluster(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + Cluster = cluster(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + Nodes = [N1, N2 | _] = start_cluster(Cluster), + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := pulsar_producer_bridge_started}), + length(Nodes), + 15_000 + ), + {ok, _} = erpc:call(N1, fun() -> create_bridge(Config) end), + {ok, _} = snabbkaffe:receive_events(SRef0), + lists:foreach( + fun(N) -> + ?retry( + _Sleep = 1_000, + _Attempts0 = 20, + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + ) + end, + Nodes + ), + erpc:multicall(Nodes, fun wait_until_producer_connected/0), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + erpc:call(N2, emqx, publish, [Message0]), + + lists:foreach( + fun(N) -> + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + end, + Nodes + ), + + ok + end, + fun(_Trace) -> + Data0 = receive_consumed(10_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl new file mode 100644 index 000000000..834978851 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl @@ -0,0 +1,25 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(pulsar_echo_consumer). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% pulsar consumer API +-export([init/2, handle_message/3]). + +init(Topic, Args) -> + ct:pal("consumer init: ~p", [#{topic => Topic, args => Args}]), + SendTo = maps:get(send_to, Args), + ?tp(pulsar_echo_consumer_init, #{topic => Topic}), + {ok, #{topic => Topic, send_to => SendTo}}. + +handle_message(Message, Payloads, State) -> + #{send_to := SendTo, topic := Topic} = State, + ct:pal( + "pulsar consumer received:\n ~p", + [#{message => Message, payloads => Payloads}] + ), + SendTo ! {pulsar_message, #{topic => Topic, message => Message, payloads => Payloads}}, + ?tp(pulsar_echo_consumer_message, #{topic => Topic, message => Message, payloads => Payloads}), + {ok, 'Individual', State}. diff --git a/apps/emqx_bridge_rabbitmq/BSL.txt b/apps/emqx_bridge_rabbitmq/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_rabbitmq/README.md b/apps/emqx_bridge_rabbitmq/README.md new file mode 100644 index 000000000..420a9e048 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/README.md @@ -0,0 +1,46 @@ +# EMQX RabbitMQ Bridge + +[RabbitMQ](https://www.rabbitmq.com/) is a powerful, open-source message broker +that facilitates asynchronous communication between different components of an +application. Built on the Advanced Message Queuing Protocol (AMQP), RabbitMQ +enables the reliable transmission of messages by decoupling the sender and +receiver components. This separation allows for increased scalability, +robustness, and flexibility in application architecture. + +RabbitMQ is commonly used for a wide range of purposes, such as distributing +tasks among multiple workers, enabling event-driven architectures, and +implementing publish-subscribe patterns. It is a popular choice for +microservices, distributed systems, and real-time applications, providing an +efficient way to handle varying workloads and ensuring message delivery in +complex environments. + +This application is used to connect EMQX and RabbitMQ. User can create a rule +and easily ingest IoT data into RabbitMQ by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to the [RabbitMQ bridge documentation](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-rabbitmq.html) + for how to use EMQX dashboard to ingest IoT data into RabbitMQ. +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for an introduction to the EMQX rules engine. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_rabbitmq/docker-ct b/apps/emqx_bridge_rabbitmq/docker-ct new file mode 100644 index 000000000..5232abf91 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/docker-ct @@ -0,0 +1 @@ +rabbitmq diff --git a/apps/emqx_bridge_rabbitmq/rebar.config b/apps/emqx_bridge_rabbitmq/rebar.config new file mode 100644 index 000000000..3f1c5d3fc --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/rebar.config @@ -0,0 +1,33 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ + %% The following two are dependencies of rabbit_common + {thoas, {git, "https://github.com/emqx/thoas.git", {tag, "v1.0.0"}}} + , {credentials_obfuscation, {git, "https://github.com/emqx/credentials-obfuscation.git", {tag, "v3.2.0"}}} + %% The v3.11.13_with_app_src tag, employed in the next two dependencies, + %% represents a fork of the official RabbitMQ v3.11.13 tag. This fork diverges + %% from the official version as it includes app and hrl files + %% generated by make files in subdirectories deps/rabbit_common and + %% deps/amqp_client (app files are also relocated from the ebin to the src + %% directory). This modification ensures compatibility with rebar3, as + %% rabbit_common and amqp_client utilize the erlang.mk build tool. + %% Similar changes are probably needed when upgrading to newer versions + %% of rabbit_common and amqp_client. There are hex packages for rabbit_common and + %% amqp_client, but they are not used here as we don't want to depend on + %% packages that we don't have control over. + , {rabbit_common, {git_subdir, + "https://github.com/emqx/rabbitmq-server.git", + {tag, "v3.11.13-emqx"}, + "deps/rabbit_common"}} + , {amqp_client, {git_subdir, + "https://github.com/emqx/rabbitmq-server.git", + {tag, "v3.11.13-emqx"}, + "deps/amqp_client"}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_rabbitmq]} +]}. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src new file mode 100644 index 000000000..36f47aaf6 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_rabbitmq, [ + {description, "EMQX Enterprise RabbitMQ Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, ecql, rabbit_common, amqp_client]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl new file mode 100644 index 000000000..c4897fa39 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl @@ -0,0 +1,124 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_rabbitmq). + +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% Callback used by HTTP API +%% ------------------------------------------------------------------------------------------------- + +conn_bridge_examples(Method) -> + [ + #{ + <<"rabbitmq">> => #{ + summary => <<"RabbitMQ Bridge">>, + value => values(Method, "rabbitmq") + } + } + ]. + +values(_Method, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + server => <<"localhost">>, + port => 5672, + username => <<"guest">>, + password => <<"******">>, + pool_size => 8, + timeout => 5, + virtual_host => <<"/">>, + heartbeat => <<"30s">>, + auto_reconnect => <<"2s">>, + exchange => <<"messages">>, + exchange_type => <<"topic">>, + routing_key => <<"my_routing_key">>, + durable => false, + payload_template => <<"">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +%% ------------------------------------------------------------------------------------------------- + +namespace() -> "bridge_rabbitmq". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + emqx_bridge_rabbitmq_connector:fields(config); +fields("creation_opts") -> + emqx_resource_schema:fields("creation_opts"); +fields("post") -> + fields("post", rabbitmq); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for RabbitMQ using `", string:to_upper(Method), "` method."]; +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl new file mode 100644 index 000000000..6f833d659 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl @@ -0,0 +1,533 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_connector). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% Needed to create RabbitMQ connection +-include_lib("amqp_client/include/amqp_client.hrl"). + +-behaviour(emqx_resource). +-behaviour(hocon_schema). +-behaviour(ecpool_worker). + +%% hocon_schema callbacks +-export([roots/0, fields/1]). + +%% HTTP API callbacks +-export([values/1]). + +%% emqx_resource callbacks +-export([ + %% Required callbacks + on_start/2, + on_stop/2, + callback_mode/0, + %% Optional callbacks + on_get_status/2, + on_query/3, + is_buffer_supported/0, + on_batch_query/3 +]). + +%% callbacks for ecpool_worker +-export([connect/1]). + +%% Internal callbacks +-export([publish_messages/3]). + +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, + hoconsc:mk( + typerefl:binary(), + #{ + default => <<"localhost">>, + desc => ?DESC("server") + } + )}, + {port, + hoconsc:mk( + emqx_schema:port_number(), + #{ + default => 5672, + desc => ?DESC("server") + } + )}, + {username, + hoconsc:mk( + typerefl:binary(), + #{ + required => true, + desc => ?DESC("username") + } + )}, + {password, fun emqx_connector_schema_lib:password/1}, + {pool_size, + hoconsc:mk( + typerefl:pos_integer(), + #{ + default => 8, + desc => ?DESC("pool_size") + } + )}, + {timeout, + hoconsc:mk( + emqx_schema:duration_ms(), + #{ + default => <<"5s">>, + desc => ?DESC("timeout") + } + )}, + {wait_for_publish_confirmations, + hoconsc:mk( + boolean(), + #{ + default => true, + desc => ?DESC("wait_for_publish_confirmations") + } + )}, + {publish_confirmation_timeout, + hoconsc:mk( + emqx_schema:duration_ms(), + #{ + default => <<"30s">>, + desc => ?DESC("timeout") + } + )}, + + {virtual_host, + hoconsc:mk( + typerefl:binary(), + #{ + default => <<"/">>, + desc => ?DESC("virtual_host") + } + )}, + {heartbeat, + hoconsc:mk( + emqx_schema:duration_ms(), + #{ + default => <<"30s">>, + desc => ?DESC("heartbeat") + } + )}, + %% Things related to sending messages to RabbitMQ + {exchange, + hoconsc:mk( + typerefl:binary(), + #{ + required => true, + desc => ?DESC("exchange") + } + )}, + {routing_key, + hoconsc:mk( + typerefl:binary(), + #{ + required => true, + desc => ?DESC("routing_key") + } + )}, + {delivery_mode, + hoconsc:mk( + hoconsc:enum([non_persistent, persistent]), + #{ + default => non_persistent, + desc => ?DESC("delivery_mode") + } + )}, + {payload_template, + hoconsc:mk( + binary(), + #{ + default => <<"${.}">>, + desc => ?DESC("payload_template") + } + )} + ]. + +values(post) -> + maps:merge(values(put), #{name => <<"connector">>}); +values(get) -> + values(post); +values(put) -> + #{ + server => <<"localhost">>, + port => 5672, + enable => true, + pool_size => 8, + type => rabbitmq, + username => <<"guest">>, + password => <<"******">>, + routing_key => <<"my_routing_key">>, + payload_template => <<"">> + }; +values(_) -> + #{}. + +%% =================================================================== +%% Callbacks defined in emqx_resource +%% =================================================================== + +%% emqx_resource callback + +callback_mode() -> always_sync. + +%% emqx_resource callback + +-spec is_buffer_supported() -> boolean(). +is_buffer_supported() -> + %% We want to make use of EMQX's buffer mechanism + false. + +%% emqx_resource callback called when the resource is started + +-spec on_start(resource_id(), term()) -> {ok, resource_state()} | {error, _}. +on_start( + InstanceID, + #{ + pool_size := PoolSize, + payload_template := PayloadTemplate, + password := Password, + delivery_mode := InitialDeliveryMode + } = InitialConfig +) -> + DeliveryMode = + case InitialDeliveryMode of + non_persistent -> 1; + persistent -> 2 + end, + Config = InitialConfig#{ + password => emqx_secret:wrap(Password), + delivery_mode => DeliveryMode + }, + ?SLOG(info, #{ + msg => "starting_rabbitmq_connector", + connector => InstanceID, + config => emqx_utils:redact(Config) + }), + Options = [ + {config, Config}, + %% The pool_size is read by ecpool and decides the number of workers in + %% the pool + {pool_size, PoolSize}, + {pool, InstanceID} + ], + ProcessedTemplate = emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate), + State = #{ + poolname => InstanceID, + processed_payload_template => ProcessedTemplate, + config => Config + }, + case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of + ok -> + {ok, State}; + {error, Reason} -> + LogMessage = + #{ + msg => "rabbitmq_connector_start_failed", + error_reason => Reason, + config => emqx_utils:redact(Config) + }, + ?SLOG(info, LogMessage), + {error, Reason} + end. + +%% emqx_resource callback called when the resource is stopped + +-spec on_stop(resource_id(), resource_state()) -> term(). +on_stop( + ResourceID, + #{poolname := PoolName} = _State +) -> + ?SLOG(info, #{ + msg => "stopping RabbitMQ connector", + connector => ResourceID + }), + Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], + Clients = [ + begin + {ok, Client} = ecpool_worker:client(Worker), + Client + end + || Worker <- Workers + ], + %% We need to stop the pool before stopping the workers as the pool monitors the workers + StopResult = emqx_resource_pool:stop(PoolName), + lists:foreach(fun stop_worker/1, Clients), + StopResult. + +stop_worker({Channel, Connection}) -> + amqp_channel:close(Channel), + amqp_connection:close(Connection). + +%% This is the callback function that is called by ecpool when the pool is +%% started + +-spec connect(term()) -> {ok, {pid(), pid()}, map()} | {error, term()}. +connect(Options) -> + Config = proplists:get_value(config, Options), + try + create_rabbitmq_connection_and_channel(Config) + catch + _:{error, Reason} -> + ?SLOG(error, #{ + msg => "rabbitmq_connector_connection_failed", + error_type => error, + error_reason => Reason, + config => emqx_utils:redact(Config) + }), + {error, Reason}; + Type:Reason -> + ?SLOG(error, #{ + msg => "rabbitmq_connector_connection_failed", + error_type => Type, + error_reason => Reason, + config => emqx_utils:redact(Config) + }), + {error, Reason} + end. + +create_rabbitmq_connection_and_channel(Config) -> + #{ + server := Host, + port := Port, + username := Username, + password := WrappedPassword, + timeout := Timeout, + virtual_host := VirtualHost, + heartbeat := Heartbeat, + wait_for_publish_confirmations := WaitForPublishConfirmations + } = Config, + Password = emqx_secret:unwrap(WrappedPassword), + RabbitMQConnectionOptions = + #amqp_params_network{ + host = erlang:binary_to_list(Host), + port = Port, + username = Username, + password = Password, + connection_timeout = Timeout, + virtual_host = VirtualHost, + heartbeat = Heartbeat + }, + {ok, RabbitMQConnection} = + case amqp_connection:start(RabbitMQConnectionOptions) of + {ok, Connection} -> + {ok, Connection}; + {error, Reason} -> + erlang:error({error, Reason}) + end, + {ok, RabbitMQChannel} = + case amqp_connection:open_channel(RabbitMQConnection) of + {ok, Channel} -> + {ok, Channel}; + {error, OpenChannelErrorReason} -> + erlang:error({error, OpenChannelErrorReason}) + end, + %% We need to enable confirmations if we want to wait for them + case WaitForPublishConfirmations of + true -> + case amqp_channel:call(RabbitMQChannel, #'confirm.select'{}) of + #'confirm.select_ok'{} -> + ok; + Error -> + ConfirmModeErrorReason = + erlang:iolist_to_binary( + io_lib:format( + "Could not enable RabbitMQ confirmation mode ~p", + [Error] + ) + ), + erlang:error({error, ConfirmModeErrorReason}) + end; + false -> + ok + end, + {ok, {RabbitMQConnection, RabbitMQChannel}, #{ + supervisees => [RabbitMQConnection, RabbitMQChannel] + }}. + +%% emqx_resource callback called to check the status of the resource + +-spec on_get_status(resource_id(), term()) -> + {connected, resource_state()} | {disconnected, resource_state(), binary()}. +on_get_status( + _InstId, + #{ + poolname := PoolName + } = State +) -> + Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], + Clients = [ + begin + {ok, Client} = ecpool_worker:client(Worker), + Client + end + || Worker <- Workers + ], + CheckResults = [ + check_worker(Client) + || Client <- Clients + ], + Connected = length(CheckResults) > 0 andalso lists:all(fun(R) -> R end, CheckResults), + case Connected of + true -> + {connected, State}; + false -> + {disconnected, State, <<"not_connected">>} + end; +on_get_status( + _InstId, + State +) -> + {disconnect, State, <<"not_connected: no connection pool in state">>}. + +check_worker({Channel, Connection}) -> + erlang:is_process_alive(Channel) andalso erlang:is_process_alive(Connection). + +%% emqx_resource callback that is called when a non-batch query is received + +-spec on_query(resource_id(), Request, resource_state()) -> query_result() when + Request :: {RequestType, Data}, + RequestType :: send_message, + Data :: map(). +on_query( + ResourceID, + {RequestType, Data}, + #{ + poolname := PoolName, + processed_payload_template := PayloadTemplate, + config := Config + } = State +) -> + ?SLOG(debug, #{ + msg => "RabbitMQ connector received query", + connector => ResourceID, + type => RequestType, + data => Data, + state => emqx_utils:redact(State) + }), + MessageData = format_data(PayloadTemplate, Data), + ecpool:pick_and_do( + PoolName, + {?MODULE, publish_messages, [Config, [MessageData]]}, + no_handover + ). + +%% emqx_resource callback that is called when a batch query is received + +-spec on_batch_query(resource_id(), BatchReq, resource_state()) -> query_result() when + BatchReq :: nonempty_list({'send_message', map()}). +on_batch_query( + ResourceID, + BatchReq, + State +) -> + ?SLOG(debug, #{ + msg => "RabbitMQ connector received batch query", + connector => ResourceID, + data => BatchReq, + state => emqx_utils:redact(State) + }), + %% Currently we only support batch requests with the send_message key + {Keys, MessagesToInsert} = lists:unzip(BatchReq), + ensure_keys_are_of_type_send_message(Keys), + %% Pick out the payload template + #{ + processed_payload_template := PayloadTemplate, + poolname := PoolName, + config := Config + } = State, + %% Create batch payload + FormattedMessages = [ + format_data(PayloadTemplate, Data) + || Data <- MessagesToInsert + ], + %% Publish the messages + ecpool:pick_and_do( + PoolName, + {?MODULE, publish_messages, [Config, FormattedMessages]}, + no_handover + ). + +publish_messages( + {_Connection, Channel}, + #{ + delivery_mode := DeliveryMode, + routing_key := RoutingKey, + exchange := Exchange, + wait_for_publish_confirmations := WaitForPublishConfirmations, + publish_confirmation_timeout := PublishConfirmationTimeout + } = _Config, + Messages +) -> + MessageProperties = #'P_basic'{ + headers = [], + delivery_mode = DeliveryMode + }, + Method = #'basic.publish'{ + exchange = Exchange, + routing_key = RoutingKey + }, + _ = [ + amqp_channel:cast( + Channel, + Method, + #amqp_msg{ + payload = Message, + props = MessageProperties + } + ) + || Message <- Messages + ], + case WaitForPublishConfirmations of + true -> + case amqp_channel:wait_for_confirms(Channel, PublishConfirmationTimeout) of + true -> + ok; + false -> + erlang:error( + {recoverable_error, + <<"RabbitMQ: Got NACK when waiting for message acknowledgment.">>} + ); + timeout -> + erlang:error( + {recoverable_error, + <<"RabbitMQ: Timeout when waiting for message acknowledgment.">>} + ) + end; + false -> + ok + end. + +ensure_keys_are_of_type_send_message(Keys) -> + case lists:all(fun is_send_message_atom/1, Keys) of + true -> + ok; + false -> + erlang:error( + {unrecoverable_error, + <<"Unexpected type for batch message (Expected send_message)">>} + ) + end. + +is_send_message_atom(send_message) -> + true; +is_send_message_atom(_) -> + false. + +format_data([], Msg) -> + emqx_utils_json:encode(Msg); +format_data(Tokens, Msg) -> + emqx_plugin_libs_rule:proc_tmpl(Tokens, Msg). diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_SUITE.erl new file mode 100644 index 000000000..45a8693e6 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_SUITE.erl @@ -0,0 +1,371 @@ +%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +%% See comment in +%% lib-ee/emqx_ee_connector/test/ee_connector_rabbitmq_SUITE.erl for how to +%% run this without bringing up the whole CI infrastucture + +rabbit_mq_host() -> + <<"rabbitmq">>. + +rabbit_mq_port() -> + 5672. + +rabbit_mq_exchange() -> + <<"messages">>. + +rabbit_mq_queue() -> + <<"test_queue">>. + +rabbit_mq_routing_key() -> + <<"test_routing_key">>. + +get_channel_connection(Config) -> + proplists:get_value(channel_connection, Config). + +%%------------------------------------------------------------------------------ +%% Common Test Setup, Teardown and Testcase List +%%------------------------------------------------------------------------------ + +init_per_suite(Config) -> + % snabbkaffe:fix_ct_logging(), + case + emqx_common_test_helpers:is_tcp_server_available( + erlang:binary_to_list(rabbit_mq_host()), rabbit_mq_port() + ) + of + true -> + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(emqx_ee_connector), + {ok, _} = application:ensure_all_started(emqx_ee_bridge), + {ok, _} = application:ensure_all_started(amqp_client), + emqx_mgmt_api_test_util:init_suite(), + ChannelConnection = setup_rabbit_mq_exchange_and_queue(), + [{channel_connection, ChannelConnection} | Config]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_rabbitmq); + _ -> + {skip, no_rabbitmq} + end + end. + +setup_rabbit_mq_exchange_and_queue() -> + %% Create an exachange and a queue + {ok, Connection} = + amqp_connection:start(#amqp_params_network{ + host = erlang:binary_to_list(rabbit_mq_host()), + port = rabbit_mq_port() + }), + {ok, Channel} = amqp_connection:open_channel(Connection), + %% Create an exchange + #'exchange.declare_ok'{} = + amqp_channel:call( + Channel, + #'exchange.declare'{ + exchange = rabbit_mq_exchange(), + type = <<"topic">> + } + ), + %% Create a queue + #'queue.declare_ok'{} = + amqp_channel:call( + Channel, + #'queue.declare'{queue = rabbit_mq_queue()} + ), + %% Bind the queue to the exchange + #'queue.bind_ok'{} = + amqp_channel:call( + Channel, + #'queue.bind'{ + queue = rabbit_mq_queue(), + exchange = rabbit_mq_exchange(), + routing_key = rabbit_mq_routing_key() + } + ), + #{ + connection => Connection, + channel => Channel + }. + +end_per_suite(Config) -> + #{ + connection := Connection, + channel := Channel + } = get_channel_connection(Config), + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector), + _ = application:stop(emqx_ee_connector), + _ = application:stop(emqx_bridge), + %% Close the channel + ok = amqp_channel:close(Channel), + %% Close the connection + ok = amqp_connection:close(Connection). + +init_per_testcase(_, Config) -> + Config. + +end_per_testcase(_, _Config) -> + ok. + +all() -> + emqx_common_test_helpers:all(?MODULE). + +rabbitmq_config(Config) -> + %%SQL = maps:get(sql, Config, sql_insert_template_for_bridge()), + BatchSize = maps:get(batch_size, Config, 1), + BatchTime = maps:get(batch_time_ms, Config, 0), + Name = atom_to_binary(?MODULE), + Server = maps:get(server, Config, rabbit_mq_host()), + Port = maps:get(port, Config, rabbit_mq_port()), + Template = maps:get(payload_template, Config, <<"">>), + ConfigString = + io_lib:format( + "bridges.rabbitmq.~s {\n" + " enable = true\n" + " server = \"~s\"\n" + " port = ~p\n" + " username = \"guest\"\n" + " password = \"guest\"\n" + " routing_key = \"~s\"\n" + " exchange = \"~s\"\n" + " payload_template = \"~s\"\n" + " resource_opts = {\n" + " batch_size = ~b\n" + " batch_time = ~bms\n" + " }\n" + "}\n", + [ + Name, + Server, + Port, + rabbit_mq_routing_key(), + rabbit_mq_exchange(), + Template, + BatchSize, + BatchTime + ] + ), + ct:pal(ConfigString), + parse_and_check(ConfigString, <<"rabbitmq">>, Name). + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf, + RetConfig. + +make_bridge(Config) -> + Type = <<"rabbitmq">>, + Name = atom_to_binary(?MODULE), + BridgeConfig = rabbitmq_config(Config), + {ok, _} = emqx_bridge:create( + Type, + Name, + BridgeConfig + ), + emqx_bridge_resource:bridge_id(Type, Name). + +delete_bridge() -> + Type = <<"rabbitmq">>, + Name = atom_to_binary(?MODULE), + {ok, _} = emqx_bridge:remove(Type, Name), + ok. + +%%------------------------------------------------------------------------------ +%% Test Cases +%%------------------------------------------------------------------------------ + +t_make_delete_bridge(_Config) -> + make_bridge(#{}), + %% Check that the new brige is in the list of bridges + Bridges = emqx_bridge:list(), + Name = atom_to_binary(?MODULE), + IsRightName = + fun + (#{name := BName}) when BName =:= Name -> + true; + (_) -> + false + end, + ?assert(lists:any(IsRightName, Bridges)), + delete_bridge(), + BridgesAfterDelete = emqx_bridge:list(), + ?assertNot(lists:any(IsRightName, BridgesAfterDelete)), + ok. + +t_make_delete_bridge_non_existing_server(_Config) -> + make_bridge(#{server => <<"non_existing_server">>, port => 3174}), + %% Check that the new brige is in the list of bridges + Bridges = emqx_bridge:list(), + Name = atom_to_binary(?MODULE), + IsRightName = + fun + (#{name := BName}) when BName =:= Name -> + true; + (_) -> + false + end, + ?assert(lists:any(IsRightName, Bridges)), + delete_bridge(), + BridgesAfterDelete = emqx_bridge:list(), + ?assertNot(lists:any(IsRightName, BridgesAfterDelete)), + ok. + +t_send_message_query(Config) -> + BridgeID = make_bridge(#{batch_size => 1}), + Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}, + %% This will use the SQL template included in the bridge + emqx_bridge:send_message(BridgeID, Payload), + %% Check that the data got to the database + ?assertEqual(Payload, receive_simple_test_message(Config)), + delete_bridge(), + ok. + +t_send_message_query_with_template(Config) -> + BridgeID = make_bridge(#{ + batch_size => 1, + payload_template => + << + "{" + " \\\"key\\\": ${key}," + " \\\"data\\\": \\\"${data}\\\"," + " \\\"timestamp\\\": ${timestamp}," + " \\\"secret\\\": 42" + "}" + >> + }), + Payload = #{ + <<"key">> => 7, + <<"data">> => <<"RabbitMQ">>, + <<"timestamp">> => 10000 + }, + emqx_bridge:send_message(BridgeID, Payload), + %% Check that the data got to the database + ExpectedResult = Payload#{ + <<"secret">> => 42 + }, + ?assertEqual(ExpectedResult, receive_simple_test_message(Config)), + delete_bridge(), + ok. + +t_send_simple_batch(Config) -> + BridgeConf = + #{ + batch_size => 100 + }, + BridgeID = make_bridge(BridgeConf), + Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}, + emqx_bridge:send_message(BridgeID, Payload), + ?assertEqual(Payload, receive_simple_test_message(Config)), + delete_bridge(), + ok. + +t_send_simple_batch_with_template(Config) -> + BridgeConf = + #{ + batch_size => 100, + payload_template => + << + "{" + " \\\"key\\\": ${key}," + " \\\"data\\\": \\\"${data}\\\"," + " \\\"timestamp\\\": ${timestamp}," + " \\\"secret\\\": 42" + "}" + >> + }, + BridgeID = make_bridge(BridgeConf), + Payload = #{ + <<"key">> => 7, + <<"data">> => <<"RabbitMQ">>, + <<"timestamp">> => 10000 + }, + emqx_bridge:send_message(BridgeID, Payload), + ExpectedResult = Payload#{ + <<"secret">> => 42 + }, + ?assertEqual(ExpectedResult, receive_simple_test_message(Config)), + delete_bridge(), + ok. + +t_heavy_batching(Config) -> + NumberOfMessages = 20000, + BridgeConf = #{ + batch_size => 10173, + batch_time_ms => 50 + }, + BridgeID = make_bridge(BridgeConf), + SendMessage = fun(Key) -> + Payload = #{ + <<"key">> => Key + }, + emqx_bridge:send_message(BridgeID, Payload) + end, + [SendMessage(Key) || Key <- lists:seq(1, NumberOfMessages)], + AllMessages = lists:foldl( + fun(_, Acc) -> + Message = receive_simple_test_message(Config), + #{<<"key">> := Key} = Message, + Acc#{Key => true} + end, + #{}, + lists:seq(1, NumberOfMessages) + ), + ?assertEqual(NumberOfMessages, maps:size(AllMessages)), + delete_bridge(), + ok. + +receive_simple_test_message(Config) -> + #{channel := Channel} = get_channel_connection(Config), + #'basic.consume_ok'{consumer_tag = ConsumerTag} = + amqp_channel:call( + Channel, + #'basic.consume'{ + queue = rabbit_mq_queue() + } + ), + receive + %% This is the first message received + #'basic.consume_ok'{} -> + ok + end, + receive + {#'basic.deliver'{delivery_tag = DeliveryTag}, Content} -> + %% Ack the message + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), + %% Cancel the consumer + #'basic.cancel_ok'{consumer_tag = ConsumerTag} = + amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}), + emqx_utils_json:decode(Content#amqp_msg.payload) + end. + +rabbitmq_config() -> + Config = + #{ + server => rabbit_mq_host(), + port => 5672, + exchange => rabbit_mq_exchange(), + routing_key => rabbit_mq_routing_key() + }, + #{<<"config">> => Config}. + +test_data() -> + #{<<"msg_field">> => <<"Hello">>}. diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl new file mode 100644 index 000000000..6b6ad617f --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl @@ -0,0 +1,232 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_connector_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include("emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +%% This test SUITE requires a running RabbitMQ instance. If you don't want to +%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script +%% you can create a clickhouse instance with the following command. +%% 5672 is the default port for AMQP 0-9-1 and 15672 is the default port for +%% the HTTP managament interface. +%% +%% docker run -it --rm --name rabbitmq -p 127.0.0.1:5672:5672 -p 127.0.0.1:15672:15672 rabbitmq:3.11-management + +rabbit_mq_host() -> + <<"rabbitmq">>. + +rabbit_mq_port() -> + 5672. + +rabbit_mq_exchange() -> + <<"test_exchange">>. + +rabbit_mq_queue() -> + <<"test_queue">>. + +rabbit_mq_routing_key() -> + <<"test_routing_key">>. + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + case + emqx_common_test_helpers:is_tcp_server_available( + erlang:binary_to_list(rabbit_mq_host()), rabbit_mq_port() + ) + of + true -> + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(emqx_ee_connector), + {ok, _} = application:ensure_all_started(amqp_client), + ChannelConnection = setup_rabbit_mq_exchange_and_queue(), + [{channel_connection, ChannelConnection} | Config]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_rabbitmq); + _ -> + {skip, no_rabbitmq} + end + end. + +setup_rabbit_mq_exchange_and_queue() -> + %% Create an exachange and a queue + {ok, Connection} = + amqp_connection:start(#amqp_params_network{ + host = erlang:binary_to_list(rabbit_mq_host()), + port = rabbit_mq_port() + }), + {ok, Channel} = amqp_connection:open_channel(Connection), + %% Create an exchange + #'exchange.declare_ok'{} = + amqp_channel:call( + Channel, + #'exchange.declare'{ + exchange = rabbit_mq_exchange(), + type = <<"topic">> + } + ), + %% Create a queue + #'queue.declare_ok'{} = + amqp_channel:call( + Channel, + #'queue.declare'{queue = rabbit_mq_queue()} + ), + %% Bind the queue to the exchange + #'queue.bind_ok'{} = + amqp_channel:call( + Channel, + #'queue.bind'{ + queue = rabbit_mq_queue(), + exchange = rabbit_mq_exchange(), + routing_key = rabbit_mq_routing_key() + } + ), + #{ + connection => Connection, + channel => Channel + }. + +get_channel_connection(Config) -> + proplists:get_value(channel_connection, Config). + +end_per_suite(Config) -> + #{ + connection := Connection, + channel := Channel + } = get_channel_connection(Config), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector), + %% Close the channel + ok = amqp_channel:close(Channel), + %% Close the connection + ok = amqp_connection:close(Connection). + +% %%------------------------------------------------------------------------------ +% %% Testcases +% %%------------------------------------------------------------------------------ + +t_lifecycle(Config) -> + perform_lifecycle_check( + erlang:atom_to_binary(?MODULE), + rabbitmq_config(), + Config + ). + +perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) -> + #{ + channel := Channel + } = get_channel_connection(TestConfig), + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(emqx_bridge_rabbitmq_connector, InitialConfig), + {ok, #{ + state := #{poolname := PoolName} = State, + status := InitialStatus + }} = + emqx_resource:create_local( + ResourceID, + ?CONNECTOR_RESOURCE_GROUP, + emqx_bridge_rabbitmq_connector, + CheckedConfig, + #{} + ), + ?assertEqual(InitialStatus, connected), + %% Instance should match the state and status of the just started resource + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := InitialStatus + }} = + emqx_resource:get_instance(ResourceID), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)), + %% Perform query as further check that the resource is working as expected + perform_query(ResourceID, Channel), + ?assertEqual(ok, emqx_resource:stop(ResourceID)), + %% Resource will be listed still, but state will be changed and healthcheck will fail + %% as the worker no longer exists. + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := StoppedStatus + }} = emqx_resource:get_instance(ResourceID), + ?assertEqual(stopped, StoppedStatus), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceID)), + % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), + % Can call stop/1 again on an already stopped instance + ?assertEqual(ok, emqx_resource:stop(ResourceID)), + % Make sure it can be restarted and the healthchecks and queries work properly + ?assertEqual(ok, emqx_resource:restart(ResourceID)), + % async restart, need to wait resource + timer:sleep(500), + {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = + emqx_resource:get_instance(ResourceID), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)), + %% Check that everything is working again by performing a query + perform_query(ResourceID, Channel), + % Stop and remove the resource in one go. + ?assertEqual(ok, emqx_resource:remove_local(ResourceID)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), + % Should not even be able to get the resource data out of ets now unlike just stopping. + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceID)). + +% %%------------------------------------------------------------------------------ +% %% Helpers +% %%------------------------------------------------------------------------------ + +perform_query(PoolName, Channel) -> + %% Send message to queue: + ok = emqx_resource:query(PoolName, {query, test_data()}), + %% Get the message from queue: + ok = receive_simple_test_message(Channel). + +receive_simple_test_message(Channel) -> + #'basic.consume_ok'{consumer_tag = ConsumerTag} = + amqp_channel:call( + Channel, + #'basic.consume'{ + queue = rabbit_mq_queue() + } + ), + receive + %% This is the first message received + #'basic.consume_ok'{} -> + ok + end, + receive + {#'basic.deliver'{delivery_tag = DeliveryTag}, Content} -> + Expected = test_data(), + ?assertEqual(Expected, emqx_utils_json:decode(Content#amqp_msg.payload)), + %% Ack the message + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), + %% Cancel the consumer + #'basic.cancel_ok'{consumer_tag = ConsumerTag} = + amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}), + ok + end. + +rabbitmq_config() -> + Config = + #{ + server => rabbit_mq_host(), + port => 5672, + username => <<"guest">>, + password => <<"guest">>, + exchange => rabbit_mq_exchange(), + routing_key => rabbit_mq_routing_key() + }, + #{<<"config">> => Config}. + +test_data() -> + #{<<"msg_field">> => <<"Hello">>}. diff --git a/apps/emqx_bridge_redis/BSL.txt b/apps/emqx_bridge_redis/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_redis/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_redis/README.md b/apps/emqx_bridge_redis/README.md new file mode 100644 index 000000000..73ec41f07 --- /dev/null +++ b/apps/emqx_bridge_redis/README.md @@ -0,0 +1,37 @@ +# EMQX Redis Bridge + +[Redis](https://github.com/redis/redis) is an in-memory data structure store, +used as a distributed, in-memory key–value database, cache and message broker, +with optional durability. + +The application is used to connect EMQX and Redis. +User can create a rule and easily ingest IoT data into Redis by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into Redis](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-redis.html) + for how to use EMQX dashboard to ingest IoT data into Redis. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src new file mode 100644 index 000000000..6b57c6cd7 --- /dev/null +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_redis, [ + {description, "EMQX Enterprise Redis Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_rocketmq/BSL.txt b/apps/emqx_bridge_rocketmq/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_rocketmq/README.md b/apps/emqx_bridge_rocketmq/README.md new file mode 100644 index 000000000..252e6beac --- /dev/null +++ b/apps/emqx_bridge_rocketmq/README.md @@ -0,0 +1,37 @@ +# EMQX RocketMQ Bridge + +[RocketMQ](https://github.com/apache/rocketmq) is a distributed messaging and +streaming platform developed by the Apache Software Foundation. +It provides reliable, scalable, and high-throughput messaging services for modern cloud-native applications + +The application is used to connect EMQX and RocketMQ. +User can create a rule and easily ingest IoT data into RocketMQ by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into RocketMQ](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-rocketmq.html) + for how to use EMQX dashboard to ingest IoT data into RocketMQ. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_rocketmq/docker-ct b/apps/emqx_bridge_rocketmq/docker-ct new file mode 100644 index 000000000..463a9eb66 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +rocketmq diff --git a/apps/emqx_bridge_rocketmq/rebar.config b/apps/emqx_bridge_rocketmq/rebar.config new file mode 100644 index 000000000..1af22f108 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {rocketmq, {git, "https://github.com/emqx/rocketmq-client-erl.git", {tag, "v0.5.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src new file mode 100644 index 000000000..51189d174 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_rocketmq, [ + {description, "EMQX Enterprise RocketMQ Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, rocketmq]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl new file mode 100644 index 000000000..a4a942d0e --- /dev/null +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl @@ -0,0 +1,107 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_rocketmq). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1, + values/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_TEMPLATE, <<>>). +-define(DEFFAULT_REQ_TIMEOUT, <<"15s">>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"rocketmq">> => #{ + summary => <<"RocketMQ Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + values(post); +values(post) -> + #{ + enable => true, + type => rocketmq, + name => <<"foo">>, + server => <<"127.0.0.1:9876">>, + topic => <<"TopicTest">>, + template => ?DEFAULT_TEMPLATE, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => sync, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_rocketmq". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {template, + mk( + binary(), + #{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), required => false} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_bridge_rocketmq_connector:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for RocketMQ using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field() -> + {type, mk(enum([rocketmq]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl new file mode 100644 index 000000000..a3da57147 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl @@ -0,0 +1,345 @@ +%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rocketmq_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(ROCKETMQ_HOST_OPTIONS, #{ + default_port => 9876 +}). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {servers, servers()}, + {topic, + mk( + binary(), + #{default => <<"TopicTest">>, desc => ?DESC(topic)} + )}, + {access_key, + mk( + binary(), + #{default => <<>>, desc => ?DESC("access_key")} + )}, + {secret_key, + mk( + binary(), + #{default => <<>>, desc => ?DESC("secret_key"), sensitive => true} + )}, + {security_token, + mk(binary(), #{default => <<>>, desc => ?DESC(security_token), sensitive => true})}, + {sync_timeout, + mk( + emqx_schema:duration(), + #{default => <<"3s">>, desc => ?DESC(sync_timeout)} + )}, + {refresh_interval, + mk( + emqx_schema:duration(), + #{default => <<"3s">>, desc => ?DESC(refresh_interval)} + )}, + {send_buffer, + mk( + emqx_schema:bytesize(), + #{default => <<"1024KB">>, desc => ?DESC(send_buffer)} + )}, + + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +servers() -> + Meta = #{desc => ?DESC("servers")}, + emqx_schema:servers_sc(Meta, ?ROCKETMQ_HOST_OPTIONS). + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{servers := BinServers, topic := Topic, sync_timeout := SyncTimeout} = Config +) -> + ?SLOG(info, #{ + msg => "starting_rocketmq_connector", + connector => InstanceId, + config => redact(Config) + }), + Servers = lists:map( + fun(#{hostname := Host, port := Port}) -> {Host, Port} end, + emqx_schema:parse_servers(BinServers, ?ROCKETMQ_HOST_OPTIONS) + ), + ClientId = client_id(InstanceId), + + TopicTks = emqx_plugin_libs_rule:preproc_tmpl(Topic), + #{acl_info := AclInfo} = ProducerOpts = make_producer_opts(Config), + ClientCfg = #{acl_info => AclInfo}, + Templates = parse_template(Config), + ProducersMapPID = create_producers_map(ClientId), + State = #{ + client_id => ClientId, + topic => Topic, + topic_tokens => TopicTks, + sync_timeout => SyncTimeout, + templates => Templates, + producers_map_pid => ProducersMapPID, + producers_opts => emqx_secret:wrap(ProducerOpts) + }, + + case rocketmq:ensure_supervised_client(ClientId, Servers, ClientCfg) of + {ok, _Pid} -> + {ok, State}; + {error, Reason0} -> + Reason = redact(Reason0), + ?tp( + rocketmq_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(InstanceId, #{client_id := ClientId, topic := RawTopic, producers_map_pid := Pid} = _State) -> + ?SLOG(info, #{ + msg => "stopping_rocketmq_connector", + connector => InstanceId + }), + + Producers = ets:match(ClientId, {{RawTopic, '$1'}, '$2'}), + lists:foreach( + fun([Topic, Producer]) -> + ets:delete(ClientId, {RawTopic, Topic}), + _ = rocketmq:stop_and_delete_supervised_producers(Producer) + end, + Producers + ), + + Pid ! ok, + ok = rocketmq:stop_and_delete_supervised_client(ClientId). + +on_query(InstanceId, Query, State) -> + do_query(InstanceId, Query, send_sync, State). + +%% We only support batch inserts and all messages must have the same topic +on_batch_query(InstanceId, [{send_message, _Msg} | _] = Query, State) -> + do_query(InstanceId, Query, batch_send_sync, State); +on_batch_query(_InstanceId, Query, _State) -> + {error, {unrecoverable_error, {invalid_request, Query}}}. + +on_get_status(_InstanceId, #{client_id := ClientId}) -> + case rocketmq_client_sup:find_client(ClientId) of + {ok, Pid} -> + status_result(rocketmq_client:get_status(Pid)); + _ -> + connecting + end. + +status_result(_Status = true) -> connected; +status_result(_Status) -> connecting. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query( + InstanceId, + Query, + QueryFunc, + #{ + templates := Templates, + client_id := ClientId, + topic := RawTopic, + topic_tokens := TopicTks, + producers_opts := ProducerOpts, + sync_timeout := RequestTimeout + } = State +) -> + ?TRACE( + "QUERY", + "rocketmq_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + + TopicKey = get_topic_key(Query, RawTopic, TopicTks), + Data = apply_template(Query, Templates), + + Result = safe_do_produce( + InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout + ), + case Result of + {error, Reason} -> + ?tp( + rocketmq_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "rocketmq_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + rocketmq_connector_query_return, + #{result => Result} + ), + Result + end. + +safe_do_produce(InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout) -> + try + Producers = get_producers(ClientId, TopicKey, ProducerOpts), + produce(InstanceId, QueryFunc, Producers, Data, RequestTimeout) + catch + _Type:Reason -> + {error, {unrecoverable_error, redact(Reason)}} + end. + +produce(_InstanceId, QueryFunc, Producers, Data, RequestTimeout) -> + rocketmq:QueryFunc(Producers, Data, RequestTimeout). + +parse_template(Config) -> + Templates = + case maps:get(template, Config, undefined) of + undefined -> #{}; + <<>> -> #{}; + Template -> #{send_message => Template} + end, + + parse_template(maps:to_list(Templates), #{}). + +parse_template([{Key, H} | T], Templates) -> + ParamsTks = emqx_plugin_libs_rule:preproc_tmpl(H), + parse_template( + T, + Templates#{Key => ParamsTks} + ); +parse_template([], Templates) -> + Templates. + +get_topic_key({_, Msg}, RawTopic, TopicTks) -> + {RawTopic, emqx_plugin_libs_rule:proc_tmpl(TopicTks, Msg)}; +get_topic_key([Query | _], RawTopic, TopicTks) -> + get_topic_key(Query, RawTopic, TopicTks). + +apply_template({Key, Msg} = _Req, Templates) -> + case maps:get(Key, Templates, undefined) of + undefined -> + emqx_utils_json:encode(Msg); + Template -> + emqx_plugin_libs_rule:proc_tmpl(Template, Msg) + end; +apply_template([{Key, _} | _] = Reqs, Templates) -> + case maps:get(Key, Templates, undefined) of + undefined -> + [emqx_utils_json:encode(Msg) || {_, Msg} <- Reqs]; + Template -> + [emqx_plugin_libs_rule:proc_tmpl(Template, Msg) || {_, Msg} <- Reqs] + end. + +client_id(ResourceId) -> + erlang:binary_to_atom(ResourceId, utf8). + +redact(Msg) -> + emqx_utils:redact(Msg, fun is_sensitive_key/1). + +is_sensitive_key(secret_key) -> + true; +is_sensitive_key(security_token) -> + true; +is_sensitive_key(_) -> + false. + +make_producer_opts( + #{ + access_key := AccessKey, + secret_key := SecretKey, + security_token := SecurityToken, + send_buffer := SendBuff, + refresh_interval := RefreshInterval + } +) -> + ACLInfo = acl_info(AccessKey, SecretKey, SecurityToken), + #{ + tcp_opts => [{sndbuf, SendBuff}], + ref_topic_route_interval => RefreshInterval, + acl_info => ACLInfo + }. + +acl_info(<<>>, <<>>, <<>>) -> + #{}; +acl_info(AccessKey, SecretKey, <<>>) when is_binary(AccessKey), is_binary(SecretKey) -> + #{ + access_key => AccessKey, + secret_key => SecretKey + }; +acl_info(AccessKey, SecretKey, SecurityToken) when + is_binary(AccessKey), is_binary(SecretKey), is_binary(SecurityToken) +-> + #{ + access_key => AccessKey, + secret_key => SecretKey, + security_token => SecurityToken + }; +acl_info(_, _, _) -> + #{}. + +create_producers_map(ClientId) -> + erlang:spawn(fun() -> + case ets:whereis(ClientId) of + undefined -> + _ = ets:new(ClientId, [public, named_table]), + ok; + _ -> + ok + end, + receive + _Msg -> + ok + end + end). + +get_producers(ClientId, {_, Topic1} = TopicKey, ProducerOpts) -> + case ets:lookup(ClientId, TopicKey) of + [{_, Producers0}] -> + Producers0; + _ -> + ProducerGroup = iolist_to_binary([atom_to_list(ClientId), "_", Topic1]), + {ok, Producers0} = rocketmq:ensure_supervised_producers( + ClientId, ProducerGroup, Topic1, emqx_secret:unwrap(ProducerOpts) + ), + ets:insert(ClientId, {TopicKey, Producers0}), + Producers0 + end. diff --git a/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl b/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl new file mode 100644 index 000000000..90047e577 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl @@ -0,0 +1,273 @@ +%%-------------------------------------------------------------------- +% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rocketmq_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% Bridge defaults +-define(TOPIC, "TopicTest"). +-define(BATCH_SIZE, 10). +-define(PAYLOAD, <<"HELLO">>). + +-define(GET_CONFIG(KEY__, CFG__), proplists:get_value(KEY__, CFG__)). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, async}, + {group, sync} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + BatchingGroups = [{group, with_batch}, {group, without_batch}], + [ + {async, BatchingGroups}, + {sync, BatchingGroups}, + {with_batch, TCs}, + {without_batch, TCs} + ]. + +init_per_group(async, Config) -> + [{query_mode, async} | Config]; +init_per_group(sync, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + delete_bridge(Config), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + BridgeType = <<"rocketmq">>, + Host = os:getenv("ROCKETMQ_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("ROCKETMQ_PORT", "9876")), + + Config0 = [ + {host, Host}, + {port, Port}, + {proxy_name, "rocketmq"} + | ConfigT + ], + + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + {Name, RocketMQConf} = rocketmq_config(BridgeType, Config0), + Config = + [ + {rocketmq_config, RocketMQConf}, + {rocketmq_bridge_type, BridgeType}, + {rocketmq_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + false -> + {skip, no_rocketmq}; + _ -> + throw(no_rocketmq) + end + end. + +rocketmq_config(BridgeType, Config) -> + Port = integer_to_list(?GET_CONFIG(port, Config)), + Server = ?GET_CONFIG(host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?config(batch_size, Config), + QueryMode = ?config(query_mode, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " servers = ~p\n" + " topic = ~p\n" + " resource_opts = {\n" + " request_timeout = 1500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + ?TOPIC, + BatchSize, + QueryMode + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), + Name = ?GET_CONFIG(rocketmq_name, Config), + RocketMQConf = ?GET_CONFIG(rocketmq_config, Config), + emqx_bridge:create(BridgeType, Name, RocketMQConf). + +delete_bridge(Config) -> + BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), + Name = ?GET_CONFIG(rocketmq_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?GET_CONFIG(rocketmq_name, Config), + BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?GET_CONFIG(rocketmq_name, Config), + BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 500}). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + SentData = #{payload => ?PAYLOAD}, + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := rocketmq_connector_query_return}, + 10_000 + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(rocketmq_connector_query_return, Trace0), + ?assertMatch([#{result := ok}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), + Name = ?GET_CONFIG(rocketmq_name, Config), + RocketMQConf = ?GET_CONFIG(rocketmq_config, Config), + RocketMQConf2 = RocketMQConf#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(RocketMQConf2) + ), + SentData = #{payload => ?PAYLOAD}, + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := rocketmq_connector_query_return}, + 10_000 + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(rocketmq_connector_query_return, Trace0), + ?assertMatch([#{result := ok}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + + Name = ?GET_CONFIG(rocketmq_name, Config), + BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_simple_query(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {send_message, #{message => <<"Hello">>}}, + Result = query_resource(Config, Request), + ?assertEqual(ok, Result), + ok. diff --git a/apps/emqx_bridge_sqlserver/BSL.txt b/apps/emqx_bridge_sqlserver/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_sqlserver/README.md b/apps/emqx_bridge_sqlserver/README.md new file mode 100644 index 000000000..ccb1267d8 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/README.md @@ -0,0 +1,36 @@ +# EMQX SQL Server Bridge + +[Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server) is a relational database management system (RDBMS) that is developed and owned by Microsoft. +Microsoft SQL Server offers a wide range of features, including support for high availability and disaster recovery, +integration with other Microsoft products and services, and advanced security and encryption options. +It also provides tools for data warehousing, business intelligence, and analytics, making it a versatile and powerful database platform. + +The application is used to connect EMQX and Microsoft SQL Server. +User can create a rule and easily ingest IoT data into Microsoft SQL Server by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +## Documentation links + +For more information about Microsoft SQL Server, please see the [official site](https://learn.microsoft.com/en-us/sql/sql-server/?view=sql-server-ver16) + +# Configurations + +Please see [Ingest data into SQL Server](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-sqlserver.html) for more detailed information. + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_sqlserver/docker-ct b/apps/emqx_bridge_sqlserver/docker-ct new file mode 100644 index 000000000..6f046e2df --- /dev/null +++ b/apps/emqx_bridge_sqlserver/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +sqlserver diff --git a/apps/emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl b/apps/emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl new file mode 100644 index 000000000..3aa78fdd8 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl @@ -0,0 +1,5 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-define(SQLSERVER_DEFAULT_PORT, 1433). diff --git a/apps/emqx_bridge_sqlserver/rebar.config b/apps/emqx_bridge_sqlserver/rebar.config new file mode 100644 index 000000000..5f586f529 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/rebar.config @@ -0,0 +1,10 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_sqlserver]} +]}. diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src new file mode 100644 index 000000000..a0b4e287b --- /dev/null +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_sqlserver, [ + {description, "EMQX Enterprise SQL Server Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, odbc]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.erl b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.erl new file mode 100644 index 000000000..8a97cb2ad --- /dev/null +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.erl @@ -0,0 +1,128 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_sqlserver). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload) " + "values ( ${id}, ${topic}, ${qos}, ${payload} )" +>>). + +-define(DEFAULT_DRIVER, <<"ms-sql">>). + +conn_bridge_examples(Method) -> + [ + #{ + <<"sqlserver">> => #{ + summary => <<"Microsoft SQL Server Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + values(post); +values(post) -> + #{ + enable => true, + type => sqlserver, + name => <<"bar">>, + server => <<"127.0.0.1:1433">>, + database => <<"test">>, + pool_size => 8, + username => <<"sa">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + driver => ?DEFAULT_DRIVER, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_sqlserver". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {sql, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {driver, mk(binary(), #{desc => ?DESC("driver"), default => ?DEFAULT_DRIVER})}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + (emqx_bridge_sqlserver_connector:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("creation_opts") -> + emqx_resource_schema:fields("creation_opts"); +fields("post") -> + fields("post", sqlserver); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Microsoft SQL Server using `", string:to_upper(Method), "` method."]; +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl new file mode 100644 index 000000000..ed8134051 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl @@ -0,0 +1,493 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_sqlserver_connector). + +-behaviour(emqx_resource). + +-include("emqx_bridge_sqlserver.hrl"). + +-include_lib("kernel/include/file.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%%==================================================================== +%% Exports +%%==================================================================== + +%% Hocon config schema exports +-export([ + roots/0, + fields/1 +]). + +%% callbacks for behaviour emqx_resource +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +%% callbacks for ecpool +-export([connect/1]). + +%% Internal exports used to execute code with ecpool worker +-export([do_get_status/1, worker_do_insert/3]). + +-import(emqx_plugin_libs_rule, [str/1]). +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(ACTION_SEND_MESSAGE, send_message). + +-define(SYNC_QUERY_MODE, handover). + +-define(SQLSERVER_HOST_OPTIONS, #{ + default_port => ?SQLSERVER_DEFAULT_PORT +}). + +-define(REQUEST_TIMEOUT(RESOURCE_OPTS), + maps:get(request_timeout, RESOURCE_OPTS, ?DEFAULT_REQUEST_TIMEOUT) +). + +-define(BATCH_INSERT_TEMP, batch_insert_temp). + +-define(BATCH_INSERT_PART, batch_insert_part). +-define(BATCH_PARAMS_TOKENS, batch_insert_tks). + +-define(FILE_MODE_755, 33261). +%% 32768 + 8#00400 + 8#00200 + 8#00100 + 8#00040 + 8#00010 + 8#00004 + 8#00001 +%% See also +%% https://www.erlang.org/doc/man/file.html#read_file_info-2 + +%% Copied from odbc reference page +%% https://www.erlang.org/doc/man/odbc.html + +%% as returned by connect/2 +-type connection_reference() :: pid(). +-type time_out() :: milliseconds() | infinity. +-type sql() :: string() | binary(). +-type milliseconds() :: pos_integer(). +%% Tuple of column values e.g. one row of the result set. +%% it's a variable size tuple of column values. +-type row() :: tuple(). +%% Some kind of explanation of what went wrong +-type common_reason() :: connection_closed | extended_error() | term(). +%% extended error type with ODBC +%% and native database error codes, as well as the base reason that would have been +%% returned had extended_errors not been enabled. +-type extended_error() :: {string(), integer(), _Reason :: term()}. +%% Name of column in the result set +-type col_name() :: string(). +%% e.g. a list of the names of the selected columns in the result set. +-type col_names() :: [col_name()]. +%% A list of rows from the result set. +-type rows() :: list(row()). + +%% -type result_tuple() :: {updated, n_rows()} | {selected, col_names(), rows()}. +-type updated_tuple() :: {updated, n_rows()}. +-type selected_tuple() :: {selected, col_names(), rows()}. +%% The number of affected rows for UPDATE, +%% INSERT, or DELETE queries. For other query types the value +%% is driver defined, and hence should be ignored. +-type n_rows() :: integer(). + +%% These type was not used in this module, but we may use it later +%% -type odbc_data_type() :: +%% sql_integer +%% | sql_smallint +%% | sql_tinyint +%% | {sql_decimal, precision(), scale()} +%% | {sql_numeric, precision(), scale()} +%% | {sql_char, size()} +%% | {sql_wchar, size()} +%% | {sql_varchar, size()} +%% | {sql_wvarchar, size()} +%% | {sql_float, precision()} +%% | {sql_wlongvarchar, size()} +%% | {sql_float, precision()} +%% | sql_real +%% | sql_double +%% | sql_bit +%% | atom(). +%% -type precision() :: integer(). +%% -type scale() :: integer(). +%% -type size() :: integer(). + +-type state() :: #{ + pool_name := binary(), + resource_opts := map(), + sql_templates := map() +}. + +%%==================================================================== +%% Configuration and default values +%%==================================================================== + +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, server()} + | add_default_username(emqx_connector_schema_lib:relational_db_fields()) + ]. + +add_default_username(Fields) -> + lists:map( + fun + ({username, OrigUsernameFn}) -> + {username, add_default_fn(OrigUsernameFn, <<"sa">>)}; + (Field) -> + Field + end, + Fields + ). + +add_default_fn(OrigFn, Default) -> + fun + (default) -> Default; + (Field) -> OrigFn(Field) + end. + +server() -> + Meta = #{desc => ?DESC("server")}, + emqx_schema:servers_sc(Meta, ?SQLSERVER_HOST_OPTIONS). + +%%==================================================================== +%% Callbacks defined in emqx_resource +%%==================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + ResourceId = PoolName, + #{ + server := Server, + username := Username, + password := Password, + driver := Driver, + database := Database, + pool_size := PoolSize, + resource_opts := ResourceOpts + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_sqlserver_connector", + connector => ResourceId, + config => emqx_utils:redact(Config) + }), + + ODBCDir = code:priv_dir(odbc), + OdbcserverDir = filename:join(ODBCDir, "bin/odbcserver"), + {ok, Info = #file_info{mode = Mode}} = file:read_file_info(OdbcserverDir), + case ?FILE_MODE_755 =:= Mode of + true -> + ok; + false -> + _ = file:write_file_info(OdbcserverDir, Info#file_info{mode = ?FILE_MODE_755}), + ok + end, + + Options = [ + {server, to_bin(Server)}, + {username, Username}, + {password, Password}, + {driver, Driver}, + {database, Database}, + {pool_size, PoolSize} + ], + + State = #{ + %% also ResourceId + pool_name => PoolName, + sql_templates => parse_sql_template(Config), + resource_opts => ResourceOpts + }, + case emqx_resource_pool:start(PoolName, ?MODULE, Options) of + ok -> + {ok, State}; + {error, Reason} -> + ?tp( + sqlserver_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(ResourceId, _State) -> + ?SLOG(info, #{ + msg => "stopping_sqlserver_connector", + connector => ResourceId + }), + emqx_resource_pool:stop(ResourceId). + +-spec on_query( + resource_id(), + {?ACTION_SEND_MESSAGE, map()}, + state() +) -> + ok + | {ok, list()} + | {error, {recoverable_error, term()}} + | {error, term()}. +on_query(ResourceId, {?ACTION_SEND_MESSAGE, _Msg} = Query, State) -> + ?TRACE( + "SINGLE_QUERY_SYNC", + "bridge_sqlserver_received", + #{requests => Query, connector => ResourceId, state => State} + ), + do_query(ResourceId, Query, ?SYNC_QUERY_MODE, State). + +-spec on_batch_query( + resource_id(), + [{?ACTION_SEND_MESSAGE, map()}], + state() +) -> + ok + | {ok, list()} + | {error, {recoverable_error, term()}} + | {error, term()}. +on_batch_query(ResourceId, BatchRequests, State) -> + ?TRACE( + "BATCH_QUERY_SYNC", + "bridge_sqlserver_received", + #{requests => BatchRequests, connector => ResourceId, state => State} + ), + do_query(ResourceId, BatchRequests, ?SYNC_QUERY_MODE, State). + +on_get_status(_InstanceId, #{pool_name := PoolName} = _State) -> + Health = emqx_resource_pool:health_check_workers( + PoolName, + {?MODULE, do_get_status, []} + ), + status_result(Health). + +status_result(_Status = true) -> connected; +status_result(_Status = false) -> connecting. +%% TODO: +%% case for disconnected + +%%==================================================================== +%% ecpool callback fns +%%==================================================================== + +-spec connect(Options :: list()) -> {ok, connection_reference()} | {error, term()}. +connect(Options) -> + ConnectStr = lists:concat(conn_str(Options, [])), + Opts = proplists:get_value(options, Options, []), + odbc:connect(ConnectStr, Opts). + +-spec do_get_status(connection_reference()) -> Result :: boolean(). +do_get_status(Conn) -> + case execute(Conn, <<"SELECT 1">>) of + {selected, [[]], [{1}]} -> true; + _ -> false + end. + +%%==================================================================== +%% Internal Helper fns +%%==================================================================== + +%% TODO && FIXME: +%% About the connection string attribute `Encrypt`: +%% The default value is `yes` in odbc version 18.0+ and `no` in previous versions. +%% And encrypted connections always verify the server's certificate. +%% So `Encrypt=YES;TrustServerCertificate=YES` must be set in the connection string +%% when connecting to a server that has a self-signed certificate. +%% See also: +%% 'https://learn.microsoft.com/en-us/sql/connect/odbc/ +%% dsn-connection-string-attribute?source=recommendations&view=sql-server-ver16#encrypt' +conn_str([], Acc) -> + %% we should use this for msodbcsql 18+ + %% lists:join(";", ["Encrypt=YES", "TrustServerCertificate=YES" | Acc]); + lists:join(";", Acc); +conn_str([{driver, Driver} | Opts], Acc) -> + conn_str(Opts, ["Driver=" ++ str(Driver) | Acc]); +conn_str([{server, Server} | Opts], Acc) -> + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SQLSERVER_HOST_OPTIONS), + conn_str(Opts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]); +conn_str([{database, Database} | Opts], Acc) -> + conn_str(Opts, ["Database=" ++ str(Database) | Acc]); +conn_str([{username, Username} | Opts], Acc) -> + conn_str(Opts, ["UID=" ++ str(Username) | Acc]); +conn_str([{password, Password} | Opts], Acc) -> + conn_str(Opts, ["PWD=" ++ str(Password) | Acc]); +conn_str([{_, _} | Opts], Acc) -> + conn_str(Opts, Acc). + +%% Query with singe & batch sql statement +-spec do_query( + resource_id(), + Query :: {?ACTION_SEND_MESSAGE, map()} | [{?ACTION_SEND_MESSAGE, map()}], + ApplyMode :: handover, + state() +) -> + {ok, list()} + | {error, {recoverable_error, term()}} + | {error, term()}. +do_query( + ResourceId, + Query, + ApplyMode, + #{pool_name := PoolName, sql_templates := Templates} = State +) -> + ?TRACE( + "SINGLE_QUERY_SYNC", + "sqlserver_connector_received", + #{query => Query, connector => ResourceId, state => State} + ), + + %% only insert sql statement for single query and batch query + case apply_template(Query, Templates) of + {?ACTION_SEND_MESSAGE, SQL} -> + Result = ecpool:pick_and_do( + PoolName, + {?MODULE, worker_do_insert, [SQL, State]}, + ApplyMode + ); + Query -> + Result = {error, {unrecoverable_error, invalid_query}}; + _ -> + Result = {error, {unrecoverable_error, failed_to_apply_sql_template}} + end, + case Result of + {error, Reason} -> + ?tp( + sqlserver_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "sqlserver_connector_do_query_failed", + connector => ResourceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + sqlserver_connector_query_return, + #{result => Result} + ), + Result + end. + +worker_do_insert( + Conn, SQL, #{resource_opts := ResourceOpts, pool_name := ResourceId} = State +) -> + LogMeta = #{connector => ResourceId, state => State}, + try + case execute(Conn, SQL, ?REQUEST_TIMEOUT(ResourceOpts)) of + {selected, Rows, _} -> + {ok, Rows}; + {updated, _} -> + ok; + {error, ErrStr} -> + ?SLOG(error, LogMeta#{msg => "invalid_request", reason => ErrStr}), + {error, {unrecoverable_error, {invalid_request, ErrStr}}} + end + catch + _Type:Reason -> + ?SLOG(error, LogMeta#{msg => "invalid_request", reason => Reason}), + {error, {unrecoverable_error, {invalid_request, Reason}}} + end. + +-spec execute(pid(), sql()) -> + updated_tuple() + | selected_tuple() + | [updated_tuple()] + | [selected_tuple()] + | {error, common_reason()}. +execute(Conn, SQL) -> + odbc:sql_query(Conn, str(SQL)). + +-spec execute(pid(), sql(), time_out()) -> + updated_tuple() + | selected_tuple() + | [updated_tuple()] + | [selected_tuple()] + | {error, common_reason()}. +execute(Conn, SQL, Timeout) -> + odbc:sql_query(Conn, str(SQL), Timeout). + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8). + +%% for bridge data to sql server +parse_sql_template(Config) -> + RawSQLTemplates = + case maps:get(sql, Config, undefined) of + undefined -> #{}; + <<>> -> #{}; + SQLTemplate -> #{?ACTION_SEND_MESSAGE => SQLTemplate} + end, + + BatchInsertTks = #{}, + parse_sql_template(maps:to_list(RawSQLTemplates), BatchInsertTks). + +parse_sql_template([{Key, H} | T], BatchInsertTks) -> + case emqx_plugin_libs_rule:detect_sql_type(H) of + {ok, select} -> + parse_sql_template(T, BatchInsertTks); + {ok, insert} -> + case emqx_plugin_libs_rule:split_insert_sql(H) of + {ok, {InsertSQL, Params}} -> + parse_sql_template( + T, + BatchInsertTks#{ + Key => + #{ + ?BATCH_INSERT_PART => InsertSQL, + ?BATCH_PARAMS_TOKENS => emqx_plugin_libs_rule:preproc_tmpl( + Params + ) + } + } + ); + {error, Reason} -> + ?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}), + parse_sql_template(T, BatchInsertTks) + end; + {error, Reason} -> + ?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}), + parse_sql_template(T, BatchInsertTks) + end; +parse_sql_template([], BatchInsertTks) -> + #{ + ?BATCH_INSERT_TEMP => BatchInsertTks + }. + +%% single insert +apply_template( + {?ACTION_SEND_MESSAGE = _Key, _Msg} = Query, Templates +) -> + %% TODO: fix emqx_plugin_libs_rule:proc_tmpl/2 + %% it won't add single quotes for string + apply_template([Query], Templates); +%% batch inserts +apply_template( + [{?ACTION_SEND_MESSAGE = Key, _Msg} | _T] = BatchReqs, + #{?BATCH_INSERT_TEMP := BatchInsertsTks} = _Templates +) -> + case maps:get(Key, BatchInsertsTks, undefined) of + undefined -> + BatchReqs; + #{?BATCH_INSERT_PART := BatchInserts, ?BATCH_PARAMS_TOKENS := BatchParamsTks} -> + SQL = emqx_plugin_libs_rule:proc_batch_sql(BatchReqs, BatchInserts, BatchParamsTks), + {Key, SQL} + end; +apply_template(Query, Templates) -> + %% TODO: more detail infomatoin + ?SLOG(error, #{msg => "apply sql template failed", query => Query, templates => Templates}), + {error, failed_to_apply_sql_template}. diff --git a/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl b/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl new file mode 100644 index 000000000..fcf20da8f --- /dev/null +++ b/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl @@ -0,0 +1,689 @@ +%%-------------------------------------------------------------------- +% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_sqlserver_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include("emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% SQL definitions +-define(SQL_BRIDGE, + "insert into t_mqtt_msg(msgid, topic, qos, payload) values ( ${id}, ${topic}, ${qos}, ${payload})" +). +-define(SQL_SERVER_DRIVER, "ms-sql"). + +-define(SQL_CREATE_DATABASE_IF_NOT_EXISTS, + " IF NOT EXISTS(SELECT name FROM sys.databases WHERE name = 'mqtt')" + " BEGIN" + " CREATE DATABASE mqtt;" + " END" +). + +-define(SQL_CREATE_TABLE_IN_DB_MQTT, + " CREATE TABLE mqtt.dbo.t_mqtt_msg" + " (id int PRIMARY KEY IDENTITY(1000000001,1) NOT NULL," + " msgid VARCHAR(64) NULL," + " topic VARCHAR(100) NULL," + " qos tinyint NOT NULL DEFAULT 0," + %% use VARCHAR to use utf8 encoding + %% for default, sqlserver use utf16 encoding NVARCHAR() + " payload VARCHAR(100) NULL," + " arrived DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP)" +). + +-define(SQL_DROP_DB_MQTT, "DROP DATABASE mqtt"). +-define(SQL_DROP_TABLE, "DROP TABLE mqtt.dbo.t_mqtt_msg"). +-define(SQL_DELETE, "DELETE from mqtt.dbo.t_mqtt_msg"). +-define(SQL_SELECT, "SELECT payload FROM mqtt.dbo.t_mqtt_msg"). +-define(SQL_SELECT_COUNT, "SELECT COUNT(*) FROM mqtt.dbo.t_mqtt_msg"). +% DB defaults +-define(SQL_SERVER_DATABASE, "mqtt"). +-define(SQL_SERVER_USERNAME, "sa"). +-define(SQL_SERVER_PASSWORD, "mqtt_public1"). +-define(BATCH_SIZE, 10). +-define(REQUEST_TIMEOUT_MS, 500). + +-define(WORKER_POOL_SIZE, 4). + +-define(WITH_CON(Process), + Con = connect_direct_sqlserver(Config), + Process, + ok = disconnect(Con) +). + +%% How to run it locally (all commands are run in $PROJ_ROOT dir): +%% A: run ct on host +%% 1. Start all deps services +%% ```bash +%% sudo docker compose -f .ci/docker-compose-file/docker-compose.yaml \ +%% -f .ci/docker-compose-file/docker-compose-sqlserver.yaml \ +%% -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ +%% up --build +%% ``` +%% +%% 2. Run use cases with special environment variables +%% 11433 is toxiproxy exported port. +%% Local: +%% ```bash +%% SQLSERVER_HOST=toxiproxy SQLSERVER_PORT=11433 \ +%% PROXY_HOST=toxiproxy PROXY_PORT=1433 \ +%% ./rebar3 as test ct -c -v --readable true --name ct@127.0.0.1 \ +%% --suite apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl +%% ``` +%% +%% B: run ct in docker container +%% run script: +%% ```bash +%% ./scripts/ct/run.sh --ci --app apps/emqx_bridge_sqlserver/ -- \ +%% --name 'test@127.0.0.1' -c -v --readable true \ +%% --suite apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl +%% ```` + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, async}, + {group, sync} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + NonBatchCases = [t_write_timeout], + BatchingGroups = [{group, with_batch}, {group, without_batch}], + [ + {async, BatchingGroups}, + {sync, BatchingGroups}, + {with_batch, TCs -- NonBatchCases}, + {without_batch, TCs} + ]. + +init_per_group(async, Config) -> + [{query_mode, async} | Config]; +init_per_group(sync, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config0) -> + Config = [{enable_batch, true} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{enable_batch, false} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + connect_and_drop_table(Config), + connect_and_drop_db(Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + %% drop database and table + %% connect_and_clear_table(Config), + %% create a new one + %% TODO: create a new database for each test case + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + connect_and_clear_table(Config), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Val = str(erlang:unique_integer()), + SentData = sent_data(Val), + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := sqlserver_connector_query_return}, + 10_000 + ), + ?assertMatch( + [{Val}], + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(sqlserver_connector_query_return, Trace0), + ?assertMatch([#{result := ok}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(sqlserver_bridge_type, Config), + Name = ?config(sqlserver_name, Config), + SQLServerConfig0 = ?config(sqlserver_config, Config), + SQLServerConfig = SQLServerConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(SQLServerConfig) + ), + Val = str(erlang:unique_integer()), + SentData = sent_data(Val), + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := sqlserver_connector_query_return}, + 10_000 + ), + ?assertMatch( + [{Val}], + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(sqlserver_connector_query_return, Trace0), + ?assertMatch([#{result := ok}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + health_check_resource_ok(Config), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + health_check_resource_down(Config) + end), + ok. + +t_create_disconnected(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch({ok, _}, create_bridge(Config)), + health_check_resource_down(Config) + end), + health_check_resource_ok(Config), + ok. + +t_create_with_invalid_password(Config) -> + BridgeType = ?config(sqlserver_bridge_type, Config), + Name = ?config(sqlserver_name, Config), + SQLServerConfig0 = ?config(sqlserver_config, Config), + SQLServerConfig = SQLServerConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType, + <<"password">> => <<"wrong_password">> + }, + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge_http(SQLServerConfig) + ) + end, + fun(Trace) -> + ?assertMatch( + [#{error := {start_pool_failed, _, _}}], + ?of_kind(sqlserver_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + Val = str(erlang:unique_integer()), + SentData = sent_data(Val), + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := resource_connected_enter}, + 20_000 + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + case QueryMode of + sync -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + send_message(Config, SentData) + ); + async -> + ?assertMatch( + ok, send_message(Config, SentData) + ) + end + end), + ok. + +t_write_timeout(_Config) -> + %% msodbc driver handled all connection exceptions + %% the case is same as t_write_failure/1 + ok. + +t_simple_query(Config) -> + BatchSize = batch_size(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {Requests, Vals} = gen_batch_req(BatchSize), + ?check_trace( + begin + ?wait_async_action( + begin + [?assertEqual(ok, query_resource(Config, Request)) || Request <- Requests] + end, + #{?snk_kind := sqlserver_connector_query_return}, + 10_000 + ), + %% just assert the data count is correct + ?assertMatch( + BatchSize, + connect_and_get_count(Config) + ), + %% assert the data order is correct + ?assertMatch( + Vals, + connect_and_get_payload(Config) + ) + end, + fun(Trace0) -> + Trace = ?of_kind(sqlserver_connector_query_return, Trace0), + case BatchSize of + 1 -> + ?assertMatch([#{result := ok}], Trace); + _ -> + [?assertMatch(#{result := ok}, Trace1) || Trace1 <- Trace] + end, + ok + end + ), + ok. + +-define(MISSING_TINYINT_ERROR, + "[Microsoft][ODBC Driver 17 for SQL Server][SQL Server]" + "Conversion failed when converting the varchar value 'undefined' to data type tinyint. SQLSTATE IS: 22018" +). + +t_missing_data(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Result = send_message(Config, #{}), + case QueryMode of + sync -> + ?assertMatch( + {error, {unrecoverable_error, {invalid_request, ?MISSING_TINYINT_ERROR}}}, + Result + ); + async -> + ?assertMatch( + ok, send_message(Config, #{}) + ) + end, + ok. + +t_bad_parameter(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Result = send_message(Config, #{}), + case QueryMode of + sync -> + ?assertMatch( + {error, {unrecoverable_error, {invalid_request, ?MISSING_TINYINT_ERROR}}}, + Result + ); + async -> + ?assertMatch( + ok, send_message(Config, #{}) + ) + end, + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("SQLSERVER_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("SQLSERVER_PORT", str(?SQLSERVER_DEFAULT_PORT))), + + Config0 = [ + {sqlserver_host, Host}, + {sqlserver_port, Port}, + %% see also for `proxy_name` : $PROJ_ROOT/.ci/docker-compose-file/toxiproxy.json + {proxy_name, "sqlserver"}, + {batch_size, batch_size(ConfigT)} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"sqlserver">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % Connect to sqlserver directly + % drop old db and table, and then create new ones + connect_and_create_db_and_table(Config0), + {Name, SQLServerConf} = sqlserver_config(BridgeType, Config0), + Config = + [ + {sqlserver_config, SQLServerConf}, + {sqlserver_bridge_type, BridgeType}, + {sqlserver_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_sqlserver); + _ -> + {skip, no_sqlserver} + end + end. + +sqlserver_config(BridgeType, Config) -> + Port = integer_to_list(?config(sqlserver_port, Config)), + Server = ?config(sqlserver_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = batch_size(Config), + QueryMode = ?config(query_mode, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " database = ~p\n" + " username = ~p\n" + " password = ~p\n" + " sql = ~p\n" + " driver = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " worker_pool_size = ~b\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + ?SQL_SERVER_DATABASE, + ?SQL_SERVER_USERNAME, + ?SQL_SERVER_PASSWORD, + ?SQL_BRIDGE, + ?SQL_SERVER_DRIVER, + BatchSize, + QueryMode, + ?WORKER_POOL_SIZE + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(sqlserver_bridge_type, Config), + Name = ?config(sqlserver_name, Config), + SSConfig0 = ?config(sqlserver_config, Config), + SSConfig = emqx_utils_maps:deep_merge(SSConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, SSConfig). + +delete_bridge(Config) -> + BridgeType = ?config(sqlserver_bridge_type, Config), + Name = ?config(sqlserver_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(sqlserver_name, Config), + BridgeType = ?config(sqlserver_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?config(sqlserver_name, Config), + BridgeType = ?config(sqlserver_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + +query_resource_async(Config, Request) -> + Name = ?config(sqlserver_name, Config), + BridgeType = ?config(sqlserver_bridge_type, Config), + Ref = alias([reply]), + AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + Return = emqx_resource:query(ResourceID, Request, #{ + timeout => 500, async_reply_fun => {AsyncReplyFun, []} + }), + {Return, Ref}. + +resource_id(Config) -> + Name = ?config(sqlserver_name, Config), + BridgeType = ?config(sqlserver_bridge_type, Config), + _ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name). + +health_check_resource_ok(Config) -> + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(resource_id(Config))). + +health_check_resource_down(Config) -> + case emqx_resource_manager:health_check(resource_id(Config)) of + {ok, Status} when Status =:= disconnected orelse Status =:= connecting -> + ok; + {error, timeout} -> + ok; + Other -> + ?assert( + false, lists:flatten(io_lib:format("invalid health check result:~p~n", [Other])) + ) + end. + +receive_result(Ref, Timeout) -> + receive + {result, Ref, Result} -> + {ok, Result}; + {Ref, Result} -> + {ok, Result} + after Timeout -> + timeout + end. + +connect_direct_sqlserver(Config) -> + Opts = [ + {host, ?config(sqlserver_host, Config)}, + {port, ?config(sqlserver_port, Config)}, + {username, ?SQL_SERVER_USERNAME}, + {password, ?SQL_SERVER_PASSWORD}, + {driver, ?SQL_SERVER_DRIVER}, + {pool_size, 8} + ], + {ok, Con} = connect(Opts), + Con. + +connect(Options) -> + ConnectStr = lists:concat(conn_str(Options, [])), + Opts = proplists:get_value(options, Options, []), + odbc:connect(ConnectStr, Opts). + +disconnect(Ref) -> + odbc:disconnect(Ref). + +% These funs connect and then stop the sqlserver connection +connect_and_create_db_and_table(Config) -> + ?WITH_CON(begin + {updated, undefined} = directly_query(Con, ?SQL_CREATE_DATABASE_IF_NOT_EXISTS), + {updated, undefined} = directly_query(Con, ?SQL_CREATE_TABLE_IN_DB_MQTT) + end). + +connect_and_drop_db(Config) -> + ?WITH_CON({updated, undefined} = directly_query(Con, ?SQL_DROP_DB_MQTT)). + +connect_and_drop_table(Config) -> + ?WITH_CON({updated, undefined} = directly_query(Con, ?SQL_DROP_TABLE)). + +connect_and_clear_table(Config) -> + ?WITH_CON({updated, _} = directly_query(Con, ?SQL_DELETE)). + +connect_and_get_payload(Config) -> + ?WITH_CON( + {selected, ["payload"], Rows} = directly_query(Con, ?SQL_SELECT) + ), + Rows. + +connect_and_get_count(Config) -> + ?WITH_CON( + {selected, [[]], [{Count}]} = directly_query(Con, ?SQL_SELECT_COUNT) + ), + Count. + +directly_query(Con, Query) -> + directly_query(Con, Query, ?REQUEST_TIMEOUT_MS). + +directly_query(Con, Query, Timeout) -> + odbc:sql_query(Con, Query, Timeout). + +%%-------------------------------------------------------------------- +%% help functions +%%-------------------------------------------------------------------- + +batch_size(Config) -> + case ?config(enable_batch, Config) of + true -> ?BATCH_SIZE; + false -> 1 + end. + +conn_str([], Acc) -> + %% TODO: for msodbc 18+, we need to add "Encrypt=YES;TrustServerCertificate=YES" + %% but havn't tested now + %% we should use this for msodbcsql 18+ + %% lists:join(";", ["Encrypt=YES", "TrustServerCertificate=YES" | Acc]); + lists:join(";", Acc); +conn_str([{driver, Driver} | Opts], Acc) -> + conn_str(Opts, ["Driver=" ++ str(Driver) | Acc]); +conn_str([{host, Host} | Opts], Acc) -> + Port = proplists:get_value(port, Opts, str(?SQLSERVER_DEFAULT_PORT)), + NOpts = proplists:delete(port, Opts), + conn_str(NOpts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]); +conn_str([{port, Port} | Opts], Acc) -> + Host = proplists:get_value(host, Opts, "localhost"), + NOpts = proplists:delete(host, Opts), + conn_str(NOpts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]); +conn_str([{database, Database} | Opts], Acc) -> + conn_str(Opts, ["Database=" ++ str(Database) | Acc]); +conn_str([{username, Username} | Opts], Acc) -> + conn_str(Opts, ["UID=" ++ str(Username) | Acc]); +conn_str([{password, Password} | Opts], Acc) -> + conn_str(Opts, ["PWD=" ++ str(Password) | Acc]); +conn_str([{_, _} | Opts], Acc) -> + conn_str(Opts, Acc). + +sent_data(Payload) -> + #{ + payload => to_bin(Payload), + id => <<"0005F8F84FFFAFB9F44200000D810002">>, + topic => <<"test/topic">>, + qos => 0 + }. + +gen_batch_req(Count) when + is_integer(Count) andalso Count > 0 +-> + Vals = [{str(erlang:unique_integer())} || _Seq <- lists:seq(1, Count)], + Requests = [{send_message, sent_data(Payload)} || {Payload} <- Vals], + {Requests, Vals}; +gen_batch_req(Count) -> + ct:pal("Gen batch requests failed with unexpected Count: ~p", [Count]). + +str(List) when is_list(List) -> + unicode:characters_to_list(List, utf8); +str(Bin) when is_binary(Bin) -> + unicode:characters_to_list(Bin, utf8); +str(Num) when is_number(Num) -> + number_to_list(Num). + +number_to_list(Int) when is_integer(Int) -> + integer_to_list(Int); +number_to_list(Float) when is_float(Float) -> + float_to_list(Float, [{decimals, 10}, compact]). + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Bin) when is_binary(Bin) -> + Bin. diff --git a/apps/emqx_bridge_tdengine/BSL.txt b/apps/emqx_bridge_tdengine/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_tdengine/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_tdengine/README.md b/apps/emqx_bridge_tdengine/README.md new file mode 100644 index 000000000..25faf4c14 --- /dev/null +++ b/apps/emqx_bridge_tdengine/README.md @@ -0,0 +1,39 @@ +# EMQX TDEngine Bridge + +[TDEngine](https://github.com/taosdata/TDengine) is an open-source, cloud-native +time series database (TSDB) optimized for Internet of Things (IoT), Connected Cars, +and Industrial IoT. +It enables efficient, real-time ingestion, processing, and monitoring of petabytes +of data per day, generated by billions of sensors and data collectors. + +The application is used to connect EMQX and TDEngine. +User can create a rule and easily ingest IoT data into TDEngine by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into TDEngine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-tdengine.html) + for how to use EMQX dashboard to ingest IoT data into TDEngine. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_tdengine/docker-ct b/apps/emqx_bridge_tdengine/docker-ct new file mode 100644 index 000000000..c6f0bc826 --- /dev/null +++ b/apps/emqx_bridge_tdengine/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +tdengine diff --git a/apps/emqx_bridge_tdengine/rebar.config b/apps/emqx_bridge_tdengine/rebar.config new file mode 100644 index 000000000..72ebca1db --- /dev/null +++ b/apps/emqx_bridge_tdengine/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {tdengine, {git, "https://github.com/emqx/tdengine-client-erl", {tag, "0.1.6"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src new file mode 100644 index 000000000..141973e1e --- /dev/null +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_tdengine, [ + {description, "EMQX Enterprise TDEngine Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, tdengine]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl new file mode 100644 index 000000000..abdc26592 --- /dev/null +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl @@ -0,0 +1,106 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_tdengine). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1, + values/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(ts, msgid, mqtt_topic, qos, payload, arrived) " + "values (${ts}, ${id}, ${topic}, ${qos}, ${payload}, ${timestamp})" +>>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"tdengine">> => #{ + summary => <<"TDengine Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => tdengine, + name => <<"foo">>, + server => <<"127.0.0.1:6041">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => sync, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_tdengine". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {sql, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + emqx_bridge_tdengine_connector:fields(config); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for TDengine using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field() -> + {type, mk(enum([tdengine]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl new file mode 100644 index 000000000..46a70e8b6 --- /dev/null +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl @@ -0,0 +1,249 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_tdengine_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([connect/1, do_get_status/1, execute/3]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(TD_HOST_OPTIONS, #{ + default_port => 6041 +}). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, server()} + | adjust_fields(emqx_connector_schema_lib:relational_db_fields()) + ]. + +adjust_fields(Fields) -> + lists:map( + fun + ({username, OrigUsernameFn}) -> + {username, add_default_fn(OrigUsernameFn, <<"root">>)}; + ({password, OrigPasswordFn}) -> + {password, make_required_fn(OrigPasswordFn)}; + (Field) -> + Field + end, + Fields + ). + +add_default_fn(OrigFn, Default) -> + fun + (default) -> Default; + (Field) -> OrigFn(Field) + end. + +make_required_fn(OrigFn) -> + fun + (required) -> true; + (Field) -> OrigFn(Field) + end. + +server() -> + Meta = #{desc => ?DESC("server")}, + emqx_schema:servers_sc(Meta, ?TD_HOST_OPTIONS). + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + server := Server, + username := Username, + password := Password, + pool_size := PoolSize + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_tdengine_connector", + connector => InstanceId, + config => emqx_utils:redact(Config) + }), + + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?TD_HOST_OPTIONS), + Options = [ + {host, to_bin(Host)}, + {port, Port}, + {username, Username}, + {password, Password}, + {pool_size, PoolSize}, + {pool, binary_to_atom(InstanceId, utf8)} + ], + + Prepares = parse_prepare_sql(Config), + State = Prepares#{pool_name => InstanceId, query_opts => query_opts(Config)}, + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end. + +on_stop(InstanceId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_tdengine_connector", + connector => InstanceId + }), + emqx_resource_pool:stop(PoolName). + +on_query(InstanceId, {query, SQL}, State) -> + do_query(InstanceId, SQL, State); +on_query(InstanceId, Request, State) -> + %% because the `emqx-tdengine` client only supports a single SQL cmd + %% so the `on_query` and `on_batch_query` have the same process, that is: + %% we need to collect all data into one SQL cmd and then call the insert API + on_batch_query(InstanceId, [Request], State). + +on_batch_query( + InstanceId, + BatchReq, + #{batch_inserts := Inserts, batch_params_tokens := ParamsTokens} = State +) -> + case hd(BatchReq) of + {Key, _} -> + case maps:get(Key, Inserts, undefined) of + undefined -> + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + InsertSQL -> + Tokens = maps:get(Key, ParamsTokens), + do_batch_insert(InstanceId, BatchReq, InsertSQL, Tokens, State) + end; + Request -> + LogMeta = #{connector => InstanceId, first_request => Request, state => State}, + ?SLOG(error, LogMeta#{msg => "invalid request"}), + {error, {unrecoverable_error, invalid_request}} + end. + +on_get_status(_InstanceId, #{pool_name := PoolName}) -> + Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), + status_result(Health). + +do_get_status(Conn) -> + case tdengine:insert(Conn, "select server_version()", []) of + {ok, _} -> true; + _ -> false + end. + +status_result(_Status = true) -> connected; +status_result(_Status = false) -> connecting. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_batch_insert(InstanceId, BatchReqs, InsertPart, Tokens, State) -> + SQL = emqx_plugin_libs_rule:proc_batch_sql(BatchReqs, InsertPart, Tokens), + do_query(InstanceId, SQL, State). + +do_query(InstanceId, Query, #{pool_name := PoolName, query_opts := Opts} = State) -> + ?TRACE( + "QUERY", + "tdengine_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do(PoolName, {?MODULE, execute, [Query, Opts]}, no_handover), + + case Result of + {error, Reason} -> + ?tp( + tdengine_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "tdengine_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + tdengine_connector_query_return, + #{result => Result} + ), + Result + end. + +execute(Conn, Query, Opts) -> + tdengine:insert(Conn, Query, Opts). + +connect(Opts) -> + tdengine:start_link(Opts). + +query_opts(#{database := Database} = _Opts) -> + [{db_name, Database}]. + +parse_prepare_sql(Config) -> + SQL = + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{send_message => Template} + end, + + parse_batch_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_batch_prepare_sql([{Key, H} | T], BatchInserts, BatchTks) -> + case emqx_plugin_libs_rule:detect_sql_type(H) of + {ok, select} -> + parse_batch_prepare_sql(T, BatchInserts, BatchTks); + {ok, insert} -> + case emqx_plugin_libs_rule:split_insert_sql(H) of + {ok, {InsertSQL, Params}} -> + ParamsTks = emqx_plugin_libs_rule:preproc_tmpl(Params), + parse_batch_prepare_sql( + T, + BatchInserts#{Key => InsertSQL}, + BatchTks#{Key => ParamsTks} + ); + {error, Reason} -> + ?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}), + parse_batch_prepare_sql(T, BatchInserts, BatchTks) + end; + {error, Reason} -> + ?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}), + parse_batch_prepare_sql(T, BatchInserts, BatchTks) + end; +parse_batch_prepare_sql([], BatchInserts, BatchTks) -> + #{ + batch_inserts => BatchInserts, + batch_params_tokens => BatchTks + }. + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8). diff --git a/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl b/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl new file mode 100644 index 000000000..1b8db1aaa --- /dev/null +++ b/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl @@ -0,0 +1,550 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_tdengine_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% SQL definitions +-define(SQL_BRIDGE, + "insert into mqtt.t_mqtt_msg(ts, payload) values (${timestamp}, ${payload})" +). + +-define(SQL_CREATE_DATABASE, "CREATE DATABASE IF NOT EXISTS mqtt; USE mqtt;"). +-define(SQL_CREATE_TABLE, + "CREATE TABLE t_mqtt_msg (\n" + " ts timestamp,\n" + " payload BINARY(1024)\n" + ");" +). +-define(SQL_DROP_TABLE, "DROP TABLE t_mqtt_msg"). +-define(SQL_DELETE, "DELETE from t_mqtt_msg"). +-define(SQL_SELECT, "SELECT payload FROM t_mqtt_msg"). + +% DB defaults +-define(TD_DATABASE, "mqtt"). +-define(TD_USERNAME, "root"). +-define(TD_PASSWORD, "taosdata"). +-define(BATCH_SIZE, 10). +-define(PAYLOAD, <<"HELLO">>). + +-define(WITH_CON(Process), + Con = connect_direct_tdengine(Config), + Process, + ok = tdengine:stop(Con) +). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, async}, + {group, sync} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + NonBatchCases = [t_write_timeout], + BatchingGroups = [{group, with_batch}, {group, without_batch}], + [ + {async, BatchingGroups}, + {sync, BatchingGroups}, + {with_batch, TCs -- NonBatchCases}, + {without_batch, TCs} + ]. + +init_per_group(async, Config) -> + [{query_mode, async} | Config]; +init_per_group(sync, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config0) -> + Config = [{enable_batch, true} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{enable_batch, false} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + connect_and_drop_table(Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + connect_and_clear_table(Config), + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + connect_and_clear_table(Config), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("TDENGINE_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("TDENGINE_PORT", "6041")), + + Config0 = [ + {td_host, Host}, + {td_port, Port}, + {proxy_name, "tdengine_restful"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"tdengine">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % Connect to tdengine directly and create the table + connect_and_create_table(Config0), + {Name, TDConf} = tdengine_config(BridgeType, Config0), + Config = + [ + {tdengine_config, TDConf}, + {tdengine_bridge_type, BridgeType}, + {tdengine_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_tdengine); + _ -> + {skip, no_tdengine} + end + end. + +tdengine_config(BridgeType, Config) -> + Port = integer_to_list(?config(td_port, Config)), + Server = ?config(td_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = + case ?config(enable_batch, Config) of + true -> ?BATCH_SIZE; + false -> 1 + end, + QueryMode = ?config(query_mode, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " database = ~p\n" + " username = ~p\n" + " password = ~p\n" + " sql = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + ?TD_DATABASE, + ?TD_USERNAME, + ?TD_PASSWORD, + ?SQL_BRIDGE, + BatchSize, + QueryMode + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(tdengine_bridge_type, Config), + Name = ?config(tdengine_name, Config), + TDConfig0 = ?config(tdengine_config, Config), + TDConfig = emqx_utils_maps:deep_merge(TDConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, TDConfig). + +delete_bridge(Config) -> + BridgeType = ?config(tdengine_bridge_type, Config), + Name = ?config(tdengine_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(tdengine_name, Config), + BridgeType = ?config(tdengine_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?config(tdengine_name, Config), + BridgeType = ?config(tdengine_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + +query_resource_async(Config, Request) -> + Name = ?config(tdengine_name, Config), + BridgeType = ?config(tdengine_bridge_type, Config), + Ref = alias([reply]), + AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + Return = emqx_resource:query(ResourceID, Request, #{ + timeout => 500, async_reply_fun => {AsyncReplyFun, []} + }), + {Return, Ref}. + +receive_result(Ref, Timeout) -> + receive + {result, Ref, Result} -> + {ok, Result}; + {Ref, Result} -> + {ok, Result} + after Timeout -> + timeout + end. + +connect_direct_tdengine(Config) -> + Opts = [ + {host, to_bin(?config(td_host, Config))}, + {port, ?config(td_port, Config)}, + {username, to_bin(?TD_USERNAME)}, + {password, to_bin(?TD_PASSWORD)}, + {pool_size, 8} + ], + + {ok, Con} = tdengine:start_link(Opts), + Con. + +% These funs connect and then stop the tdengine connection +connect_and_create_table(Config) -> + ?WITH_CON(begin + {ok, _} = directly_query(Con, ?SQL_CREATE_DATABASE, []), + {ok, _} = directly_query(Con, ?SQL_CREATE_TABLE) + end). + +connect_and_drop_table(Config) -> + ?WITH_CON({ok, _} = directly_query(Con, ?SQL_DROP_TABLE)). + +connect_and_clear_table(Config) -> + ?WITH_CON({ok, _} = directly_query(Con, ?SQL_DELETE)). + +connect_and_get_payload(Config) -> + ?WITH_CON( + {ok, #{<<"code">> := 0, <<"data">> := [[Result]]}} = directly_query(Con, ?SQL_SELECT) + ), + Result. + +directly_query(Con, Query) -> + directly_query(Con, Query, [{db_name, ?TD_DATABASE}]). + +directly_query(Con, Query, QueryOpts) -> + tdengine:insert(Con, Query, QueryOpts). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000}, + ?check_trace( + begin + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {ok, #{<<"code">> := 0, <<"rows">> := 1}}, Result + ), + ?assertMatch( + ?PAYLOAD, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(tdengine_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, #{<<"code">> := 0, <<"rows">> := 1}}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(tdengine_bridge_type, Config), + Name = ?config(tdengine_name, Config), + QueryMode = ?config(query_mode, Config), + TDengineConfig0 = ?config(tdengine_config, Config), + TDengineConfig = TDengineConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(TDengineConfig) + ), + SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000}, + ?check_trace( + begin + Request = {send_message, SentData}, + Res0 = + case QueryMode of + sync -> + query_resource(Config, Request); + async -> + {_, Ref} = query_resource_async(Config, Request), + {ok, Res} = receive_result(Ref, 2_000), + Res + end, + + ?assertMatch( + {ok, #{<<"code">> := 0, <<"rows">> := 1}}, Res0 + ), + ?assertMatch( + ?PAYLOAD, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(tdengine_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, #{<<"code">> := 0, <<"rows">> := 1}}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + Name = ?config(tdengine_name, Config), + BridgeType = ?config(tdengine_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, Status} when Status =:= disconnected orelse Status =:= connecting, + emqx_resource_manager:health_check(ResourceID) + ) + end), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({error, econnrefused}, Result), + ok + end), + ok. + +% This test doesn't work with batch enabled since it is not possible +% to set the timeout directly for batch queries +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + {ok, _} = create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ), + SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000}, + %% FIXME: TDengine connector hangs indefinetily during + %% `call_query' while the connection is unresponsive. Should add + %% a timeout to `APPLY_RESOURCE' in buffer worker?? + case QueryMode of + sync -> + emqx_common_test_helpers:with_failure( + timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + query_resource(Config, {send_message, SentData}) + ) + end + ); + async -> + ct:comment("tdengine connector hangs the buffer worker forever") + end, + ok. + +t_simple_sql_query(Config) -> + EnableBatch = ?config(enable_batch, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {query, <<"SELECT count(1) AS T">>}, + {_, {ok, #{result := Result}}} = + ?wait_async_action( + query_resource(Config, Request), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + case EnableBatch of + true -> + ?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result); + false -> + ?assertMatch({ok, #{<<"code">> := 0, <<"data">> := [[1]]}}, Result) + end, + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {error, #{ + <<"code">> := 534, + <<"desc">> := _ + }}, + Result + ), + ok. + +t_bad_sql_parameter(Config) -> + EnableBatch = ?config(enable_batch, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {sql, <<"">>, [bad_parameter]}, + {_, {ok, #{result := Result}}} = + ?wait_async_action( + query_resource(Config, Request), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + case EnableBatch of + true -> + ?assertEqual({error, {unrecoverable_error, invalid_request}}, Result); + false -> + ?assertMatch( + {error, {unrecoverable_error, _}}, Result + ) + end, + ok. + +t_nasty_sql_string(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + % NOTE + % Column `payload` has BINARY type, so we would certainly like to test it + % with `lists:seq(1, 127)`, but: + % 1. There's no way to insert zero byte in an SQL string, seems that TDengine's + % parser[1] has no escaping sequence for it so a zero byte probably confuses + % interpreter somewhere down the line. + % 2. Bytes > 127 come back as U+FFFDs (i.e. replacement characters) in UTF-8 for + % some reason. + % + % [1]: https://github.com/taosdata/TDengine/blob/066cb34a/source/libs/parser/src/parUtil.c#L279-L301 + Payload = list_to_binary(lists:seq(1, 127)), + Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)}, + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, Message), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {ok, #{<<"code">> := 0, <<"rows">> := 1}}, + Result + ), + ?assertEqual( + Payload, + connect_and_get_payload(Config) + ). + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Bin) when is_binary(Bin) -> + Bin. diff --git a/apps/emqx_bridge_timescale/BSL.txt b/apps/emqx_bridge_timescale/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_timescale/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md new file mode 100644 index 000000000..071cb0fa6 --- /dev/null +++ b/apps/emqx_bridge_timescale/README.md @@ -0,0 +1,40 @@ +# EMQX TimescaleDB Bridge + +[TimescaleDB](https://github.com/timescaleDB/timescaleDB) is an open-source database +designed to make SQL scalable for time-series data. +It is engineered up from PostgreSQL and packaged as a PostgreSQL extension, +providing automatic partitioning across time and space (partitioning key), as well as full SQL support. + +The application is used to connect EMQX and TimescaleDB. +User can create a rule and easily ingest IoT data into TimescaleDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_timescale/rebar.config b/apps/emqx_bridge_timescale/rebar.config new file mode 100644 index 000000000..87c145f26 --- /dev/null +++ b/apps/emqx_bridge_timescale/rebar.config @@ -0,0 +1,7 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src new file mode 100644 index 000000000..f533f3b04 --- /dev/null +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_timescale, [ + {description, "EMQX Enterprise TimescaleDB Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl new file mode 100644 index 000000000..c4dedf07c --- /dev/null +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl @@ -0,0 +1,42 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_timescale). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"timescale">> => #{ + summary => <<"Timescale Bridge">>, + value => emqx_bridge_pgsql:values(Method, timescale) + } + } + ]. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_timescale". + +roots() -> []. + +fields("post") -> + emqx_bridge_pgsql:fields("post", timescale); +fields(Method) -> + emqx_bridge_pgsql:fields(Method). + +desc(_) -> + undefined. diff --git a/apps/emqx_conf/README.md b/apps/emqx_conf/README.md new file mode 100644 index 000000000..f1efe7987 --- /dev/null +++ b/apps/emqx_conf/README.md @@ -0,0 +1,15 @@ +# Configuration Management + +This application provides configuration management capabilities for EMQX. + +At compile time it reads all configuration schemas and generates the following files: + * `config-en.md`: documentation for all configuration options. + * `schema-en.json`: JSON description of all configuration schema options. + * `emqx.conf.example`: an example of a complete configuration file. + +At runtime, it provides: +- Cluster configuration synchronization capability. + Responsible for synchronizing hot-update configurations from the HTTP API to the entire cluster + and ensuring consistency. + +In addition, this application manages system-level configurations such as `cluster`, `node`, `log`. diff --git a/apps/emqx_conf/etc/emqx_conf.conf b/apps/emqx_conf/etc/emqx_conf.conf index 86147bf25..76e3c0805 100644 --- a/apps/emqx_conf/etc/emqx_conf.conf +++ b/apps/emqx_conf/etc/emqx_conf.conf @@ -1,12 +1,13 @@ ## NOTE: -## Configs in this file might be overridden by: -## 1. Environment variables which start with 'EMQX_' prefix -## 2. File $EMQX_NODE__DATA_DIR/configs/cluster-override.conf -## 3. File $EMQX_NODE__DATA_DIR/configs/local-override.conf +## This config file overrides data/configs/cluster.hocon, +## and is merged with environment variables which start with 'EMQX_' prefix. ## -## The *-override.conf files are overwritten at runtime when changes -## are made from EMQX dashboard UI, management HTTP API, or CLI. -## All configuration details can be found in emqx.conf.example +## Config changes made from EMQX dashboard UI, management HTTP API, or CLI +## are stored in data/configs/cluster.hocon. +## To avoid confusion, please do not store the same configs in both files. +## +## See https://docs.emqx.com/en/enterprise/v5.0/configuration/configuration.html +## Configuration full example can be found in emqx.conf.example node { name = "emqx@127.0.0.1" @@ -14,13 +15,6 @@ node { data_dir = "{{ platform_data_dir }}" } -log { - file_handlers.default { - level = warning - file = "{{ platform_log_dir }}/emqx.log" - } -} - cluster { name = emqxcl discovery_strategy = manual diff --git a/apps/emqx_conf/i18n/emqx_conf_schema.conf b/apps/emqx_conf/i18n/emqx_conf_schema.conf deleted file mode 100644 index 337823233..000000000 --- a/apps/emqx_conf/i18n/emqx_conf_schema.conf +++ /dev/null @@ -1,1537 +0,0 @@ -emqx_conf_schema { - - cluster_name { - desc { - en: """Human-friendly name of the EMQX cluster.""" - zh: """EMQX集群名称。每个集群都有一个唯一的名称。服务发现时会用于做路径的一部分。""" - } - label { - en: "Cluster Name" - zh: "集群名称" - } - } - - process_limit { - desc { - en: """Maximum number of simultaneously existing processes for this Erlang system. -The actual maximum chosen may be much larger than the Number passed. -For more information, see: https://www.erlang.org/doc/man/erl.html - """ - zh: """Erlang系统同时存在的最大进程数。 -实际选择的最大值可能比设置的数字大得多。 -参考: https://www.erlang.org/doc/man/erl.html - """ - } - label { - en: "Erlang Process Limit" - zh: "Erlang 最大进程数" - } - } - - max_ports { - desc { - en: """Maximum number of simultaneously existing ports for this Erlang system. -The actual maximum chosen may be much larger than the Number passed. -For more information, see: https://www.erlang.org/doc/man/erl.html - """ - zh: """Erlang系统同时存在的最大端口数。 -实际选择的最大值可能比设置的数字大得多。 -参考: https://www.erlang.org/doc/man/erl.html - """ - } - label { - en: "Erlang Port Limit" - zh: "Erlang 最大端口数" - } - } - - dist_buffer_size { - desc { - en: """Erlang's distribution buffer busy limit in kilobytes.""" - zh: """Erlang分布式缓冲区的繁忙阈值,单位是KB。""" - } - label { - en: "Erlang's dist buffer size(KB)" - zh: "Erlang分布式缓冲区的繁忙阈值(KB)" - } - } - - max_ets_tables { - desc { - en: """Max number of ETS tables""" - zh: """Erlang ETS 表的最大数量""" - } - label { - en: "Max number of ETS tables" - zh: "Erlang 表的最大数量" - } - } - - cluster_discovery_strategy { - desc { - en: """Service discovery method for the cluster nodes.""" - zh: """集群节点发现方式。可选值为: -- manual: 手动加入集群
-- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。
-- mcast: 使用 UDP 多播的方式发现节点。
-- dns: 使用 DNS A 记录的方式发现节点。
-- etcd: 使用 etcd 发现节点。
-- k8s: 使用 Kubernetes 发现节点。
- """ - } - label { - en: "Cluster Discovery Strategy" - zh: "集群服务发现策略" - } - } - - cluster_autoclean { - desc { - en: """Remove disconnected nodes from the cluster after this interval.""" - zh: """指定多久之后从集群中删除离线节点。""" - } - label { - en: "Cluster Auto Clean" - zh: "自动删除离线节点时间" - } - } - - cluster_autoheal { - desc { - en: """If true, the node will try to heal network partitions automatically.""" - zh: """集群脑裂自动恢复机制开关。""" - } - label { - en: "Cluster Auto Heal" - zh: "节点脑裂自动修复机制" - } - } - - cluster_proto_dist { - desc { - en: """The Erlang distribution protocol for the cluster.""" - zh: """分布式 Erlang 集群协议类型。可选值为: -- inet_tcp: 使用 IPv4
-- inet6_tcp 使用 IPv6
-- inet_tls: 使用 TLS,需要与 node.ssl_dist_optfile 配置一起使用。
- """ - } - label { - en: "Cluster Protocol Distribution" - zh: "集群内部通信协议" - } - } - - cluster_static_seeds { - desc { - en: """List EMQX node names in the static cluster. See node.name.""" - zh: """集群中的EMQX节点名称列表, -指定固定的节点列表,多个节点间使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 static 时,此配置项才有效。 -适合于节点数量较少且固定的集群。 - """ - } - label { - en: "Cluster Static Seeds" - zh: "集群静态节点" - } - } - - cluster_mcast_addr { - desc { - en: """Multicast IPv4 address.""" - zh: """指定多播 IPv4 地址。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Multicast Address" - zh: "多播地址" - } - } - - cluster_mcast_ports { - desc { - en: """List of UDP ports used for service discovery.
-Note: probe messages are broadcast to all the specified ports. - """ - zh: """指定多播端口。如有多个端口使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Multicast Ports" - zh: "多播端口" - } - } - - cluster_mcast_iface { - desc { - en: """Local IP address the node discovery service needs to bind to.""" - zh: """指定节点发现服务需要绑定到本地 IP 地址。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Multicast Interface" - zh: "多播绑定地址" - } - } - - cluster_mcast_ttl { - desc { - en: """Time-to-live (TTL) for the outgoing UDP datagrams.""" - zh: """指定多播的 Time-To-Live 值。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Multicast TTL" - zh: "多播TTL" - } - } - - cluster_mcast_loop { - desc { - en: """If true, loop UDP datagrams back to the local socket.""" - zh: """设置多播的报文是否投递到本地回环地址。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Multicast Loop" - zh: "多播回环开关" - } - } - - cluster_mcast_sndbuf { - desc { - en: """Size of the kernel-level buffer for outgoing datagrams.""" - zh: """外发数据报的内核级缓冲区的大小。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Muticast Sendbuf" - zh: "多播发送缓存区" - } - } - - cluster_mcast_recbuf { - desc { - en: """Size of the kernel-level buffer for incoming datagrams.""" - zh: """接收数据报的内核级缓冲区的大小。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Muticast Sendbuf" - zh: "多播接收数据缓冲区" - } - } - - cluster_mcast_buffer { - desc { - en: """Size of the user-level buffer.""" - zh: """用户级缓冲区的大小。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。 - """ - } - label { - en: "Cluster Muticast Buffer" - zh: "多播用户级缓冲区" - } - } - - cluster_dns_name { - desc { - en: """The domain name from which to discover peer EMQX nodes' IP addresses. -Applicable when cluster.discovery_strategy = dns -""" - zh: """指定 DNS A 记录的名字。emqx 会通过访问这个 DNS A 记录来获取 IP 地址列表。 -当cluster.discovery_strategydns 时有效。 -""" - } - label { - en: "Cluster Dns Name" - zh: "DNS名称" - } - } - - cluster_dns_record_type { - desc { - en: """DNS record type. """ - zh: """DNS 记录类型。""" - } - label { - en: "DNS Record Type" - zh: "DNS记录类型" - } - } - - cluster_etcd_server { - desc { - en: """List of endpoint URLs of the etcd cluster""" - zh: """指定 etcd 服务的地址。如有多个服务使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。 - """ - } - label { - en: "Cluster Etcd Server" - zh: "Etcd 服务器地址" - } - } - - cluster_etcd_prefix { - desc { - en: """Key prefix used for EMQX service discovery.""" - zh: """指定 etcd 路径的前缀。每个节点在 etcd 中都会创建一个路径: -v2/keys///
-当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。 - """ - } - label { - en: "Cluster Etcd Prefix" - zh: "Etcd 路径前缀" - } - } - - cluster_etcd_node_ttl { - desc { - en: """Expiration time of the etcd key associated with the node. -It is refreshed automatically, as long as the node is alive. - """ - zh: """指定 etcd 中节点信息的过期时间。 -当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。 - """ - } - label { - en: "Cluster Etcd Node TTL" - zh: "Etcd 节点过期时间" - } - } - - cluster_etcd_ssl { - desc { - en: """Options for the TLS connection to the etcd cluster.""" - zh: """当使用 TLS 连接 etcd 时的配置选项。 -当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。 - """ - } - label { - en: "Cluster Etcd SSL Option" - zh: "Etcd SSL 选项" - } - } - - cluster_k8s_apiserver { - desc { - en: """Kubernetes API endpoint URL.""" - zh: """指定 Kubernetes API Server。如有多个 Server 使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 k8s 时,此配置项才有效。 - """ - } - label { - en: "Cluster k8s ApiServer" - zh: "K8s 服务地址" - } - } - - cluster_k8s_service_name { - desc { - en: """EMQX broker service name.""" - zh: """指定 Kubernetes 中 EMQX 的服务名。 -当 cluster.discovery_strategy 为 k8s 时,此配置项才有效。 - """ - } - label { - en: "K8s Service Name" - zh: "K8s 服务别名" - } - } - - cluster_k8s_address_type { - desc { - en: """Address type used for connecting to the discovered nodes. -Setting cluster.k8s.address_type to ip will -make EMQX to discover IP addresses of peer nodes from Kubernetes API. -""" - zh: """当使用 k8s 方式集群时,address_type 用来从 Kubernetes 接口的应答里获取什么形式的 Host 列表。 -指定 cluster.k8s.address_typeip,则将从 Kubernetes 接口中获取集群中其他节点 -的IP地址。 -""" - } - label { - en: "K8s Address Type" - zh: "K8s 地址类型" - } - } - - cluster_k8s_namespace { - desc { - en: """Kubernetes namespace.""" - zh: """当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时, -可设置 emqx 节点名的命名空间。与 cluster.k8s.suffix 一起使用用以拼接得到节点名列表。 - """ - } - label { - en: "K8s Namespace" - zh: "K8s 命名空间" - } - } - - cluster_k8s_suffix { - desc { - en: """Node name suffix.
-Note: this parameter is only relevant when address_type is dns -or hostname.""" - zh: """当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时,可设置 emqx 节点名的后缀。 -与 cluster.k8s.namespace 一起使用用以拼接得到节点名列表。 - """ - } - label { - en: "K8s Suffix" - zh: "K8s 前缀" - } - } - - node_name { - desc { - en: """Unique name of the EMQX node. It must follow %name%@FQDN or -%name%@IPv4 format. - """ - zh: """节点名。格式为 \@\。其中 可以是 IP 地址,也可以是 FQDN。 -详见 http://erlang.org/doc/reference_manual/distributed.html。 - """ - } - label { - en: "Node Name" - zh: "节点名" - } - } - - node_cookie { - desc { - en: """Secret cookie is a random string that should be the same on all nodes in -the given EMQX cluster, but unique per EMQX cluster. It is used to prevent EMQX nodes that -belong to different clusters from accidentally connecting to each other.""" - zh: """分布式 Erlang 集群使用的 cookie 值。集群间保持一致""" - } - label { - en: "Node Cookie" - zh: "节点 Cookie" - } - } - - node_data_dir { - desc { - en: """ -Path to the persistent data directory.
-Possible auto-created subdirectories are:
-- `mnesia/`: EMQX's built-in database directory.
-For example, `mnesia/emqx@127.0.0.1`.
-There should be only one such subdirectory.
-Meaning, in case the node is to be renamed (to e.g. `emqx@10.0.1.1`),
-the old dir should be deleted first.
-- `configs`: Generated configs at boot time, and cluster/local override configs.
-- `patches`: Hot-patch beam files are to be placed here.
-- `trace`: Trace log files.
- -**NOTE**: One data dir cannot be shared by two or more EMQX nodes. -""" - zh: """ -节点数据存放目录,可能会自动创建的子目录如下:
-- `mnesia/`。EMQX的内置数据库目录。例如,`mnesia/emqx@127.0.0.1`。
-如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。
-- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。
-- `patches`: 热补丁文件将被放在这里。
-- `trace`: 日志跟踪文件。
- -**注意**: 一个数据dir不能被两个或更多的EMQX节点同时使用。 - """ - } - label { - en: "Node Data Dir" - zh: "节点数据目录" - } - } - - node_config_files { - desc { - en: """List of configuration files that are read during startup. The order is -significant: later configuration files override the previous ones. - """ - zh: """启动时读取的配置文件列表。后面的配置文件项覆盖前面的文件。""" - } - label { - en: "Config Files" - zh: "配置文件" - } - } - - node_global_gc_interval { - desc { - en: """Periodic garbage collection interval. Set to disabled to have it disabled.""" - zh: """系统调优参数,设置节点运行多久强制进行一次全局垃圾回收。禁用设置为 disabled。""" - } - label { - en: "Global GC Interval" - zh: "全局垃圾回收" - } - } - - node_crash_dump_file { - desc { - en: """Location of the crash dump file.""" - zh: """设置 Erlang crash_dump 文件的存储路径和文件名。""" - } - label { - en: "Crash Dump File" - zh: "节点崩溃时的Dump文件" - } - } - - node_crash_dump_seconds { - desc { - en: """The number of seconds that the broker is allowed to spend writing a crash dump.""" - zh: """保存崩溃文件最大允许时间,如果文件太大,在规则时间内没有保存完成,则会直接结束。""" - } - label { - en: "Crash Dump Seconds" - zh: "保存崩溃文件最长时间" - } - } - - node_crash_dump_bytes { - desc { - en: """The maximum size of a crash dump file in bytes.""" - zh: """限制崩溃文件的大小,当崩溃时节点内存太大, -如果为了保存现场,需要全部存到崩溃文件中,此处限制最多能保存多大的文件。 - """ - } - label { - en: "Crash Dump Bytes" - zh: "崩溃文件最大容量" - } - } - - node_dist_net_ticktime { - desc { - en: """This is the approximate time an EMQX node may be unresponsive until it is considered down and thereby disconnected.""" - zh: """系统调优参数,此配置将覆盖 vm.args 文件里的 -kernel net_ticktime 参数。当一个节点持续无响应多久之后,认为其已经宕机并断开连接。 - """ - } - label { - en: "Dist Net TickTime" - zh: "节点间心跳间隔" - } - } - - node_backtrace_depth { - desc { - en: """Maximum depth of the call stack printed in error messages and -process_info. - """ - zh: """错误信息中打印的最大堆栈层数""" - } - label { - en: "BackTrace Depth" - zh: "最大堆栈导数" - } - } - - node_applications { - desc { - en: """List of Erlang applications that shall be rebooted when the EMQX broker joins the cluster. - """ - zh: """当新EMQX 加入集群时,应重启的Erlang应用程序的列表。""" - } - label { - en: "Application" - zh: "应用" - } - } - - node_etc_dir { - desc { - en: """etc dir for the node""" - zh: """etc 存放目录""" - } - label { - en: "Etc Dir" - zh: "Etc 目录" - } - } - - db_backend { - desc { - en: """ -Select the backend for the embedded database.
-rlog is the default backend, -that is suitable for very large clusters.
-mnesia is a backend that offers decent performance in small clusters. -""" - zh: """ rlog是默认的数据库,他适用于大规模的集群。 -mnesia是备选数据库,在小集群中提供了很好的性能。 - """ - } - label { - en: "DB Backend" - zh: "内置数据库" - } - } - - db_role { - desc { - en: """ -Select a node role.
-core nodes provide durability of the data, and take care of writes. -It is recommended to place core nodes in different racks or different availability zones.
-replicant nodes are ephemeral worker nodes. Removing them from the cluster -doesn't affect database redundancy
-It is recommended to have more replicant nodes than core nodes.
-Note: this parameter only takes effect when the backend is set -to rlog. -""" - zh: """ -选择节点的角色。
-core 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。
-repliant 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余
-建议复制节点多于核心节点。
-注意:该参数仅在设置backend时生效到 rlog。 - """ - } - label { - en: "DB Role" - zh: "数据库角色" - } - } - - db_core_nodes { - desc { - en: """ -List of core nodes that the replicant will connect to.
-Note: this parameter only takes effect when the backend is set -to rlog and the role is set to replicant.
-This value needs to be defined for manual or static cluster discovery mechanisms.
-If an automatic cluster discovery mechanism is being used (such as etcd), -there is no need to set this value. -""" - zh: """当前节点连接的核心节点列表。
-注意:该参数仅在设置backend时生效到 rlog -并且设置rolereplicant时生效。
-该值需要在手动或静态集群发现机制下设置。
-如果使用了自动集群发现机制(如etcd),则不需要设置该值。 - """ - } - label { - en: "Db Core Node" - zh: "数据库核心节点" - } - } - - db_rpc_module { - desc { - en: """Protocol used for pushing transaction logs to the replicant nodes.""" - zh: """集群间推送事务日志到复制节点使用的协议。""" - } - label { - en: "RPC Module" - zh: "RPC协议" - } - } - - db_tlog_push_mode { - desc { - en: """ -In sync mode the core node waits for an ack from the replicant nodes before sending the next -transaction log entry. -""" - zh: """同步模式下,核心节点等待复制节点的确认信息,然后再发送下一条事务日志。""" - } - label { - en: "Tlog Push Mode" - zh: "Tlog推送模式" - } - } - - db_default_shard_transport { - desc { - en: """Defines the default transport for pushing transaction logs.
-This may be overridden on a per-shard basis in db.shard_transports. -gen_rpc uses the gen_rpc library, -distr uses the Erlang distribution.
""" - zh: """ -定义用于推送事务日志的默认传输。
-这可以在 db.shard_transports 中基于每个分片被覆盖。 -gen_rpc 使用 gen_rpc 库, -distr 使用 Erlang 发行版。
- """ - } - label { - en: "Default Shard Transport" - zh: "事务日志传输默认协议" - } - } - - db_shard_transports { - desc { - en: """Allows to tune the transport method used for transaction log replication, on a per-shard basis.
-gen_rpc uses the gen_rpc library, -distr uses the Erlang distribution.
If not specified, -the default is to use the value set in db.default_shard_transport.""" - zh: """允许为每个 shard 下的事务日志复制操作的传输方法进行调优。
-gen_rpc 使用 gen_rpc 库, -distr 使用 Erlang 自带的 rpc 库。
如果未指定, -默认是使用 db.default_shard_transport 中设置的值。 - """ - } - label { - en: "Shard Transports" - zh: "事务日志传输协议" - } - } - - cluster_call_retry_interval { - desc { - en: """Time interval to retry after a failed call.""" - zh: """当集群间调用出错时,多长时间重试一次。""" - } - label { - en: "Cluster Call Retry Interval" - zh: "重试时间间隔" - } - } - - cluster_call_max_history { - desc { - en: """Retain the maximum number of completed transactions (for queries).""" - zh: """集群间调用最多保留的历史记录数。只用于排错时查看。""" - } - label { - en: "Cluster Call Max History" - zh: "最大历史记录" - } - } - - cluster_call_cleanup_interval { - desc { - en: """Time interval to clear completed but stale transactions. -Ensure that the number of completed transactions is less than the max_history.""" - zh: """清理过期事务的时间间隔""" - } - label { - en: "Clean Up Interval" - zh: "清理间隔" - } - } - - rpc_mode { - desc { - en: """In sync mode the sending side waits for the ack from the receiving side.""" - zh: """在 sync 模式下,发送端等待接收端的 ack信号。""" - } - label { - en: "RPC Mode" - zh: "RPC 模式" - } - } - - rpc_driver { - desc { - en: """Transport protocol used for inter-broker communication""" - zh: """集群间通信使用的传输协议。""" - } - label { - en: "RPC dirver" - zh: "RPC 驱动" - } - } - - rpc_async_batch_size { - desc { - en: """The maximum number of batch messages sent in asynchronous mode. - Note that this configuration does not work in synchronous mode. - """ - zh: """异步模式下,发送的批量消息的最大数量。""" - } - label { - en: "Async Batch Size" - zh: "异步模式下的批量消息数量" - } - } - - rpc_port_discovery { - desc { - en: """manual: discover ports by tcp_server_port.
-stateless: discover ports in a stateless manner, using the following algorithm. -If node name is emqxN@127.0.0.1, where the N is an integer, -then the listening port will be 5370 + N.""" - zh: """manual: 通过 tcp_server_port 来发现端口。 -
stateless: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 -emqxN@127.0.0.1, N 是一个数字,那么监听端口就是 5370 + N。 - """ - } - label { - en: "RRC Port Discovery" - zh: "RPC 端口发现策略" - } - } - - rpc_tcp_server_port { - desc { - en: """Listening port used by RPC local service.
-Note that this config only takes effect when rpc.port_discovery is set to manual.""" - zh: """RPC 本地服务使用的 TCP 端口。
-只有当 rpc.port_discovery 设置为 manual 时,此配置才会生效。 - """ - } - label { - en: "RPC TCP Server Port" - zh: "RPC TCP 服务监听端口" - } - } - - rpc_ssl_server_port { - desc { - en: """Listening port used by RPC local service.
-Note that this config only takes effect when rpc.port_discovery is set to manual -and driver is set to ssl.""" - zh: """RPC 本地服务使用的监听SSL端口。
-只有当 rpc.port_discovery 设置为 manual 且 dirver 设置为 ssl, -此配置才会生效。 - """ - } - label { - en: "RPC SSL Server Port" - zh: "RPC SSL 服务监听端口" - } - } - - rpc_tcp_client_num { - desc { - en: """Set the maximum number of RPC communication channels initiated by this node to each remote node.""" - zh: """设置本节点与远程节点之间的 RPC 通信通道的最大数量。""" - } - label { - en: "RPC TCP Client Num" - zh: "RPC TCP 客户端数量" - } - } - - rpc_connect_timeout { - desc { - en: """Timeout for establishing an RPC connection.""" - zh: """建立 RPC 连接的超时时间。""" - } - label { - en: "RPC Connect Timeout" - zh: "RPC 连接超时时间" - } - } - - rpc_certfile { - desc { - en: """Path to TLS certificate file used to validate identity of the cluster nodes. -Note that this config only takes effect when rpc.driver is set to ssl. - """ - zh: """TLS 证书文件的路径,用于验证集群节点的身份。 -只有当 rpc.driver 设置为 ssl 时,此配置才会生效。 - """ - } - label { - en: "RPC Certfile" - zh: "RPC 证书文件" - } - } - - rpc_keyfile { - desc { - en: """Path to the private key file for the rpc.certfile.
-Note: contents of this file are secret, so it's necessary to set permissions to 600.""" - zh: """rpc.certfile 的私钥文件的路径。
-注意:此文件内容是私钥,所以需要设置权限为 600。 - """ - } - label { - en: "RPC Keyfile" - zh: "RPC 私钥文件" - } - } - - rpc_cacertfile { - desc { - en: """Path to certification authority TLS certificate file used to validate rpc.certfile.
-Note: certificates of all nodes in the cluster must be signed by the same CA.""" - zh: """验证 rpc.certfile 的 CA 证书文件的路径。
-注意:集群中所有节点的证书必须使用同一个 CA 签发。 - """ - } - label { - en: "RPC Cacertfile" - zh: "RPC CA 证书文件" - } - } - - rpc_send_timeout { - desc { - en: """Timeout for sending the RPC request.""" - zh: """发送 RPC 请求的超时时间。""" - } - label { - en: "RPC Send Timeout" - zh: "RPC 发送超时时间" - } - } - - rpc_authentication_timeout { - desc { - en: """Timeout for the remote node authentication.""" - zh: """远程节点认证的超时时间。""" - } - label { - en: "RPC Authentication Timeout" - zh: "RPC 认证超时时间" - } - } - - rpc_call_receive_timeout { - desc { - en: """Timeout for the reply to a synchronous RPC.""" - zh: """同步 RPC 的回复超时时间。""" - } - label { - en: "RPC Call Receive Timeout" - zh: "RPC 调用接收超时时间" - } - } - - rpc_socket_keepalive_idle { - desc { - en: """How long the connections between the brokers should remain open after the last message is sent.""" - zh: """broker 之间的连接在最后一条消息发送后保持打开的时间。""" - } - label { - en: "RPC Socket Keepalive Idle" - zh: "RPC Socket Keepalive Idle" - } - } - - rpc_socket_keepalive_interval { - desc { - en: """The interval between keepalive messages.""" - zh: """keepalive 消息的间隔。""" - } - label { - en: "RPC Socket Keepalive Interval" - zh: "RPC Socket Keepalive 间隔" - } - } - - rpc_socket_keepalive_count { - desc { - en: """How many times the keepalive probe message can fail to receive a reply -until the RPC connection is considered lost.""" - zh: """keepalive 探测消息发送失败的次数,直到 RPC 连接被认为已经断开。""" - } - label { - en: "RPC Socket Keepalive Count" - zh: "RPC Socket Keepalive 次数" - } - } - - rpc_socket_sndbuf { - desc { - en: """TCP tuning parameters. TCP sending buffer size.""" - zh: """TCP 调节参数。TCP 发送缓冲区大小。""" - } - label { - en: "RPC Socket Sndbuf" - zh: "RPC 套接字发送缓冲区大小" - } - } - - rpc_socket_recbuf { - desc { - en: """TCP tuning parameters. TCP receiving buffer size.""" - zh: """TCP 调节参数。TCP 接收缓冲区大小。""" - } - label { - en: "RPC Socket Recbuf" - zh: "RPC 套接字接收缓冲区大小" - } - } - - rpc_socket_buffer { - desc { - en: """TCP tuning parameters. Socket buffer size in user mode.""" - zh: """TCP 调节参数。用户模式套接字缓冲区大小。""" - } - label { - en: "RPC Socket Buffer" - zh: "RPC 套接字缓冲区大小" - } - } - - rpc_insecure_fallback { - desc { - en: """Enable compatibility with old RPC authentication.""" - zh: """兼容旧的无鉴权模式""" - } - label { - en: "RPC insecure fallback" - zh: "向后兼容旧的无鉴权模式" - } - } - - log_file_handlers { - desc { - en: """File-based log handlers.""" - zh: """输出到文件的日志处理进程列表""" - } - label { - en: "File Handler" - zh: "File Handler" - } - } - - common_handler_enable { - desc { - en: """Enable this log handler.""" - zh: """启用此日志处理进程。""" - } - label { - en: "Enable Log Handler" - zh: "启用日志处理进程" - } - } - - common_handler_level { - desc { - en: """ -The log level for the current log handler. -Defaults to warning. -""" - zh: """ -当前日志处理进程的日志级别。 -默认为 warning 级别。 -""" - } - label { - en: "Log Level" - zh: "日志级别" - } - } - - common_handler_time_offset { - desc { - en: """ -The time offset to be used when formatting the timestamp. -Can be one of: - - system: the time offset used by the local system - - utc: the UTC time offset - - +-[hh]:[mm]: user specified time offset, such as "-02:00" or "+00:00" -Defaults to: system. -""" - zh: """ -日志中的时间戳使用的时间偏移量。 -可选值为: - - system: 本地系统使用的时区偏移量 - - utc: 0 时区的偏移量 - - +-[hh]:[mm]: 自定义偏移量,比如 "-02:00" 或者 "+00:00" -默认值为本地系统的时区偏移量:system。 -""" - } - label { - en: "Time Offset" - zh: "时间偏移量" - } - } - - common_handler_chars_limit { - desc { - en: """ -Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated. -NOTE: Restrict char limiter if formatter is JSON , it will get a truncated incomplete JSON data, which is not recommended. -""" - zh: """ -设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。最小可设置的长度为100。 -注意:如果日志格式为 JSON,限制字符长度可能会导致截断不完整的 JSON 数据。 -""" - } - label { - en: "Single Log Max Length" - zh: "单条日志长度限制" - } - } - - common_handler_formatter { - desc { - en: """Choose log formatter. text for free text, and json for structured logging.""" - zh: """选择日志格式类型。 text 用于纯文本,json 用于结构化日志记录。""" - } - label { - en: "Log Formatter" - zh: "日志格式类型" - } - } - - common_handler_single_line { - desc { - en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines.""" - zh: """如果设置为 true,则单行打印日志。 否则,日志消息可能跨越多行。""" - } - label { - en: "Single Line Mode" - zh: "单行模式" - } - } - - common_handler_sync_mode_qlen { - desc { - en: """As long as the number of buffered log events is lower than this value, -all log events are handled asynchronously. This means that the client process sending the log event, -by calling a log function in the Logger API, does not wait for a response from the handler -but continues executing immediately after the event is sent. -It is not affected by the time it takes the handler to print the event to the log device. -If the message queue grows larger than this value, -the handler starts handling log events synchronously instead, -meaning that the client process sending the event must wait for a response. -When the handler reduces the message queue to a level below the sync_mode_qlen threshold, -asynchronous operation is resumed. -""" - zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。 -这意味着,日志落地速度不会影响正常的业务进程,因为它们不需要等待日志处理进程的响应。 -如果消息队列的增长超过了这个值,处理程序开始同步处理日志事件。也就是说,发送事件的客户进程必须等待响应。 -当处理程序将消息队列减少到低于sync_mode_qlen阈值的水平时,异步操作就会恢复。 -默认为100条信息,当等待的日志事件大于100条时,就开始同步处理日志。""" - } - label { - en: "Queue Length before Entering Sync Mode" - zh: "进入异步模式的队列长度" - } - } - - common_handler_drop_mode_qlen { - desc { - en: """When the number of buffered log events is larger than this value, the new log events are dropped. -When drop mode is activated or deactivated, a message is printed in the logs.""" - zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。起到过载保护的功能。 -为了使过载保护算法正常工作必须要: sync_mode_qlen =< drop_mode_qlen =< flush_qlen 且 drop_mode_qlen > 1 -要禁用某些模式,请执行以下操作。 -- 如果sync_mode_qlen被设置为0,所有的日志事件都被同步处理。也就是说,异步日志被禁用。 -- 如果sync_mode_qlen被设置为与drop_mode_qlen相同的值,同步模式被禁用。也就是说,处理程序总是以异步模式运行,除非调用drop或flushing。 -- 如果drop_mode_qlen被设置为与flush_qlen相同的值,则drop模式被禁用,永远不会发生。 -""" - } - label { - en: "Queue Length before Entering Drop Mode" - zh: "进入丢弃模式的队列长度" - } - } - - common_handler_flush_qlen { - desc { - en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. -To flush events, the handler discards the buffered log messages without logging.""" - zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生冲刷(删除)操作。 日志处理进程会丢弃缓冲的日志消息。 -来缓解自身不会由于内存瀑涨而影响其它业务进程。日志内容会提醒有多少事件被删除。""" - } - label { - en: "Flush Threshold" - zh: "冲刷阈值" - } - } - - common_handler_supervisor_reports { - desc { - en: """ -Type of supervisor reports that are logged. Defaults to error - - error: only log errors in the Erlang processes. - - progress: log process startup. -""" - zh: """ -Supervisor 报告的类型。默认为 error 类型。 - - error:仅记录 Erlang 进程中的错误。 - - progress:除了 error 信息外,还需要记录进程启动的详细信息。 -""" - } - label { - en: "Report Type" - zh: "报告类型" - } - } - - common_handler_max_depth { - desc { - en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection.""" - zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。""" - } - label { - en: "Max Depth" - zh: "最大深度" - } - } - - log_file_handler_file { - desc { - en: """Name the log file.""" - zh: """日志文件路径及名字。""" - } - label { - en: "Log File Name" - zh: "日志文件名字" - } - } - - log_file_handler_max_size { - desc { - en: """This parameter controls log file rotation. The value `infinity` means the log file will grow indefinitely, otherwise the log file will be rotated once it reaches `max_size` in bytes.""" - zh: """此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。 -与 rotation count配合使用。如果 counter 为 10,则是10个文件轮换。 -""" - } - label { - en: "Rotation Size" - zh: "日志文件轮换大小" - } - } - - log_rotation_enable { - desc { - en: """Enable log rotation feature.""" - zh: """启用日志轮换功能。启动后生成日志文件后缀会加上对应的索引数字,比如:log/emqx.log.1。 -系统会默认生成*.siz/*.idx用于记录日志位置,请不要手动修改这两个文件。 -""" - } - label { - en: "Rotation Enable" - zh: "日志轮换" - } - } - - log_rotation_count { - desc { - en: """Maximum number of log files.""" - zh: """轮换的最大日志文件数。""" - } - label { - en: "Max Log Files Number" - zh: "最大日志文件数" - } - } - - log_overload_kill_enable { - desc { - en: """Enable log handler overload kill feature.""" - zh: """日志处理进程过载时为保护自己节点其它的业务能正常,强制杀死日志处理进程。""" - } - label { - en: "Log Handler Overload Kill" - zh: "日志处理进程过载保护" - } - } - - log_overload_kill_mem_size { - desc { - en: """Maximum memory size that the log handler process is allowed to use.""" - zh: """日志处理进程允许使用的最大内存。""" - } - label { - en: "Log Handler Max Memory Size" - zh: "日志处理进程允许使用的最大内存" - } - } - - log_overload_kill_qlen { - desc { - en: """Maximum allowed queue length.""" - zh: """允许的最大队列长度。""" - } - label { - en: "Max Queue Length" - zh: "最大队列长度" - } - } - - log_overload_kill_restart_after { - desc { - en: """If the handler is terminated, it restarts automatically after a delay specified in milliseconds. The value `infinity` prevents restarts.""" - zh: """如果处理进程终止,它会在以指定的时间后后自动重新启动。 `infinity` 不自动重启。""" - } - label { - en: "Handler Restart Timer" - zh: "处理进程重启机制" - } - } - - log_burst_limit_enable { - desc { - en: """Enable log burst control feature.""" - zh: """启用日志限流保护机制。""" - } - label { - en: "Enable Burst" - zh: "日志限流保护" - } - } - - log_burst_limit_max_count { - desc { - en: """Maximum number of log events to handle within a `window_time` interval. After the limit is reached, successive events are dropped until the end of the `window_time`.""" - zh: """在 `window_time` 间隔内处理的最大日志事件数。 达到限制后,将丢弃连续事件,直到 `window_time` 结束。""" - } - label { - en: "Events Number" - zh: "日志事件数" - } - } - - log_burst_limit_window_time { - desc { - en: """See max_count.""" - zh: """参考 max_count。""" - } - label { - en: "Window Time" - zh: "Window Time" - } - } - - authorization { - desc { - en: """ -Authorization a.k.a. ACL.
-In EMQX, MQTT client access control is extremely flexible.
-An out-of-the-box set of authorization data sources are supported. -For example,
-'file' source is to support concise and yet generic ACL rules in a file;
-'built_in_database' source can be used to store per-client customizable rule sets, -natively in the EMQX node;
-'http' source to make EMQX call an external HTTP API to make the decision;
-'PostgreSQL' etc. to look up clients or rules from external databases;
-""" - zh: """ 授权(ACL)。EMQX 支持完整的客户端访问控制(ACL)。
""" - } - label { - en: "Authorization" - zh: "授权" - } - } - - desc_cluster { - desc { - en: """EMQX nodes can form a cluster to scale up the total capacity.
- Here holds the configs to instruct how individual nodes can discover each other.""" - zh: """EMQX 节点可以组成一个集群,以提高总容量。
这里指定了节点之间如何连接。""" - } - label { - en: "Cluster" - zh: "集群" - } - } - - desc_cluster_static { - desc { - en: """Service discovery via static nodes. -The new node joins the cluster by connecting to one of the bootstrap nodes.""" - zh: """静态节点服务发现。新节点通过连接一个节点来加入集群。""" - } - label { - en: "Cluster Static" - zh: "静态节点服务发现" - } - } - - desc_cluster_mcast { - desc { - en: """Service discovery via UDP multicast.""" - zh: """UDP 组播服务发现。""" - } - label { - en: "Cluster Multicast" - zh: "UDP 组播服务发现" - } - } - - desc_cluster_dns { - desc { - en: """Service discovery via DNS SRV records.""" - zh: """DNS SRV 记录服务发现。""" - } - label { - en: "Cluster DNS" - zh: "DNS SRV 记录服务发现" - } - } - - desc_cluster_etcd { - desc { - en: """Service discovery using 'etcd' service.""" - zh: """使用 'etcd' 服务的服务发现。""" - } - label { - en: "Cluster Etcd" - zh: "'etcd' 服务的服务发现" - } - } - - desc_cluster_k8s { - desc { - en: """Service discovery via Kubernetes API server.""" - zh: """Kubernetes 服务发现。""" - } - label { - en: "Cluster Kubernetes" - zh: "Kubernetes 服务发现" - } - } - - desc_node { - desc { - en: """Node name, cookie, config & data directories and the Erlang virtual machine (BEAM) boot parameters.""" - zh: """节点名称、Cookie、配置文件、数据目录和 Erlang 虚拟机(BEAM)启动参数。""" - } - label { - en: "Node" - zh: "节点" - } - } - - desc_db { - desc { - en: """Settings for the embedded database.""" - zh: """内置数据库的配置。""" - } - label { - en: "Database" - zh: "数据库" - } - } - - desc_cluster_call { - desc { - en: """Options for the 'cluster call' feature that allows to execute a callback on all nodes in the cluster.""" - zh: """集群调用功能的选项。""" - } - label { - en: "Cluster Call" - zh: "集群调用" - } - } - - desc_rpc { - desc { - en: """EMQX uses a library called gen_rpc for inter-broker communication.
-Most of the time the default config should work, -but in case you need to do performance fine-tuning or experiment a bit, -this is where to look.""" - zh: """EMQX 使用 gen_rpc 库来实现跨节点通信。
-大多数情况下,默认的配置应该可以工作,但如果你需要做一些性能优化或者实验,可以尝试调整这些参数。""" - } - label { - en: "RPC" - zh: "RPC" - } - } - - desc_log { - desc { - en: """EMQX logging supports multiple sinks for the log events. -Each sink is represented by a _log handler_, which can be configured independently.""" - zh: """EMQX 日志记录支持日志事件的多个接收器。 每个接收器由一个_log handler_表示,可以独立配置。""" - } - label { - en: "Log" - zh: "日志" - } - } - - desc_console_handler { - desc { - en: """Log handler that prints log events to the EMQX console.""" - zh: """日志处理进程将日志事件打印到 EMQX 控制台。""" - } - label { - en: "Console Handler" - zh: "Console Handler" - } - } - - desc_log_file_handler { - desc { - en: """Log handler that prints log events to files.""" - zh: """日志处理进程将日志事件打印到文件。""" - } - label { - en: "Files Log Handler" - zh: "文件日志处理进程" - } - } - - desc_log_rotation { - desc { - en: """ -By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).
-This section of the configuration controls the number of files kept for each log handler. -""" - zh: """ -默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。
-这部分配置,控制每个日志处理进程保留的文件数量。 -""" - } - label { - en: "Log Rotation" - zh: "日志轮换" - } - } - - desc_log_overload_kill { - desc { - en: """ -Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.
-When the overload is detected, the log handler is terminated and restarted after a cooldown period. -""" - zh: """ -日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。
-检测到过载时,日志处理进程将终止,并在冷却期后重新启动。 -""" - } - label { - en: "Log Overload Kill" - zh: "日志过载保护" - } - } - - desc_log_burst_limit { - desc { - en: """Large bursts of log events produced in a short time can potentially cause problems, such as: - - Log files grow very large - - Log files are rotated too quickly, and useful information gets overwritten - - Overall performance impact on the system - -Log burst limit feature can temporarily disable logging to avoid these issues.""" - zh: """短时间内产生的大量日志事件可能会导致问题,例如: - - 日志文件变得非常大 - - 日志文件轮换过快,有用信息被覆盖 - - 对系统的整体性能影响 - -日志突发限制功能可以暂时禁用日志记录以避免这些问题。""" - } - label { - en: "Log Burst Limit" - zh: "日志突发限制" - } - } - - desc_authorization { - desc { - en: """Settings that control client authorization.""" - zh: """授权相关""" - } - label { - en: "Authorization" - zh: "授权" - } - } -} diff --git a/apps/emqx_conf/src/emqx_cluster_rpc.erl b/apps/emqx_conf/src/emqx_cluster_rpc.erl index d8b195587..c82191bc3 100644 --- a/apps/emqx_conf/src/emqx_cluster_rpc.erl +++ b/apps/emqx_conf/src/emqx_cluster_rpc.erl @@ -204,7 +204,7 @@ do_multicall(M, F, A, RequiredSyncs, Timeout) -> query(TnxId) -> transaction(fun ?MODULE:trans_query/1, [TnxId]). --spec reset() -> reset. +-spec reset() -> ok. reset() -> gen_server:call(?MODULE, reset). -spec status() -> {'atomic', [map()]} | {'aborted', Reason :: term()}. @@ -270,17 +270,22 @@ fast_forward_to_commit(Node, ToTnxId) -> %% @private init([Node, RetryMs]) -> - %% Workaround for https://github.com/emqx/mria/issues/94: - _ = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], 1000), - _ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]), {ok, _} = mnesia:subscribe({table, ?CLUSTER_MFA, simple}), State = #{node => Node, retry_interval => RetryMs}, + %% The init transaction ID is set in emqx_conf_app after + %% it has fetched the latest config from one of the core nodes TnxId = emqx_app:get_init_tnx_id(), ok = maybe_init_tnx_id(Node, TnxId), + %% Now continue with the normal catch-up process + %% That is: apply the missing transactions after the config + %% was copied until now. {ok, State, {continue, ?CATCH_UP}}. %% @private handle_continue(?CATCH_UP, State) -> + %% emqx app must be started before + %% trying to catch up the rpc commit logs + ok = wait_for_emqx_ready(), {noreply, State, catch_up(State)}. handle_call(reset, _From, State) -> @@ -396,6 +401,7 @@ get_cluster_tnx_id() -> Id -> Id end. +%% The entry point of a config change transaction. init_mfa(Node, MFA) -> mnesia:write_lock_table(?CLUSTER_MFA), LatestId = get_cluster_tnx_id(), @@ -495,15 +501,17 @@ log_and_alarm(IsSuccess, Res, #{kind := ?APPLY_KIND_INITIATE} = Meta) -> %% because nothing is committed case IsSuccess of true -> - ?SLOG(debug, Meta#{msg => "cluster_rpc_apply_result", result => Res}); + ?SLOG(debug, Meta#{msg => "cluster_rpc_apply_result", result => emqx_utils:redact(Res)}); false -> - ?SLOG(warning, Meta#{msg => "cluster_rpc_apply_result", result => Res}) + ?SLOG(warning, Meta#{ + msg => "cluster_rpc_apply_result", result => emqx_utils:redact(Res) + }) end; log_and_alarm(true, Res, Meta) -> - ?SLOG(debug, Meta#{msg => "cluster_rpc_apply_ok", result => Res}), + ?SLOG(debug, Meta#{msg => "cluster_rpc_apply_ok", result => emqx_utils:redact(Res)}), do_alarm(deactivate, Res, Meta); log_and_alarm(false, Res, Meta) -> - ?SLOG(error, Meta#{msg => "cluster_rpc_apply_failed", result => Res}), + ?SLOG(error, Meta#{msg => "cluster_rpc_apply_failed", result => emqx_utils:redact(Res)}), do_alarm(activate, Res, Meta). do_alarm(Fun, Res, #{tnx_id := Id} = Meta) -> @@ -512,7 +520,7 @@ do_alarm(Fun, Res, #{tnx_id := Id} = Meta) -> wait_for_all_nodes_commit(TnxId, Delay, Remain) -> Lagging = lagging_nodes(TnxId), - Stopped = stopped_nodes(), + Stopped = Lagging -- mria:running_nodes(), case Lagging -- Stopped of [] when Stopped =:= [] -> ok; @@ -537,9 +545,10 @@ wait_for_nodes_commit(RequiredSyncs, TnxId, Delay, Remain) -> [] -> ok; Lagging -> - case stopped_nodes() of + Stopped = Lagging -- mria:running_nodes(), + case Stopped of [] -> {peers_lagging, Lagging}; - Stopped -> {stopped_nodes, Stopped} + _ -> {stopped_nodes, Stopped} end end end. @@ -558,9 +567,6 @@ commit_status_trans(Operator, TnxId) -> Result = '$2', mnesia:select(?CLUSTER_COMMIT, [{MatchHead, [Guard], [Result]}]). -stopped_nodes() -> - ekka_cluster:info(stopped_nodes). - get_retry_ms() -> emqx_conf:get([node, cluster_call, retry_interval], timer:minutes(1)). @@ -568,3 +574,37 @@ maybe_init_tnx_id(_Node, TnxId) when TnxId < 0 -> ok; maybe_init_tnx_id(Node, TnxId) -> {atomic, _} = transaction(fun ?MODULE:commit/2, [Node, TnxId]), ok. + +%% @priv Cannot proceed until emqx app is ready. +%% Otherwise the committed transaction catch up may fail. +wait_for_emqx_ready() -> + %% wait 10 seconds for emqx to start + ok = do_wait_for_emqx_ready(10). + +%% Wait for emqx app to be ready, +%% write a log message every 1 second +do_wait_for_emqx_ready(0) -> + timeout; +do_wait_for_emqx_ready(N) -> + %% check interval is 100ms + %% makes the total wait time 1 second + case do_wait_for_emqx_ready2(10) of + ok -> + ok; + timeout -> + ?SLOG(warning, #{msg => "stil_waiting_for_emqx_app_to_be_ready"}), + do_wait_for_emqx_ready(N - 1) + end. + +%% Wait for emqx app to be ready, +%% check interval is 100ms +do_wait_for_emqx_ready2(0) -> + timeout; +do_wait_for_emqx_ready2(N) -> + case emqx:is_running() of + true -> + ok; + false -> + timer:sleep(100), + do_wait_for_emqx_ready2(N - 1) + end. diff --git a/apps/emqx_conf/src/emqx_cluster_rpc_handler.erl b/apps/emqx_conf/src/emqx_cluster_rpc_cleaner.erl similarity index 94% rename from apps/emqx_conf/src/emqx_cluster_rpc_handler.erl rename to apps/emqx_conf/src/emqx_cluster_rpc_cleaner.erl index c3d946a91..fe72cd65b 100644 --- a/apps/emqx_conf/src/emqx_cluster_rpc_handler.erl +++ b/apps/emqx_conf/src/emqx_cluster_rpc_cleaner.erl @@ -13,7 +13,9 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_cluster_rpc_handler). + +%% @doc This module is responsible for cleaning up the cluster RPC MFA. +-module(emqx_cluster_rpc_cleaner). -behaviour(gen_server). @@ -71,7 +73,7 @@ handle_info(Info, State) -> {noreply, State}. terminate(_Reason, #{timer := TRef}) -> - emqx_misc:cancel_timer(TRef). + emqx_utils:cancel_timer(TRef). code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -80,7 +82,7 @@ code_change(_OldVsn, State, _Extra) -> %% Internal functions %%-------------------------------------------------------------------- ensure_timer(State = #{cleanup_ms := Ms}) -> - State#{timer := emqx_misc:start_timer(Ms, del_stale_mfa)}. + State#{timer := emqx_utils:start_timer(Ms, del_stale_mfa)}. %% @doc Keep the latest completed 100 records for querying and troubleshooting. del_stale_mfa(MaxHistory) -> diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 6d7e9ef9c..e6c3d9cd9 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,9 +1,9 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.8"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_conf_app, []}}, - {applications, [kernel, stdlib]}, + {applications, [kernel, stdlib, emqx_ctl]}, {env, []}, {modules, []} ]}. diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 6fd9ac009..cc56d5e46 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -18,16 +18,25 @@ -compile({no_auto_import, [get/1, get/2]}). -include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx/include/emqx_schema.hrl"). -export([add_handler/2, remove_handler/1]). -export([get/1, get/2, get_raw/1, get_raw/2, get_all/1]). -export([get_by_node/2, get_by_node/3]). -export([update/3, update/4]). -export([remove/2, remove/3]). +-export([tombstone/2]). -export([reset/2, reset/3]). --export([dump_schema/1, dump_schema/3]). +-export([dump_schema/2]). -export([schema_module/0]). --export([gen_example_conf/4]). +-export([gen_example_conf/2]). + +%% TODO: move to emqx_dashboard when we stop building api schema at build time +-export([ + hotconf_schema_json/0, + bridge_schema_json/0, + hocon_schema_to_spec/2 +]). %% for rpc -export([get_node_and_config/1]). @@ -43,50 +52,50 @@ add_handler(ConfKeyPath, HandlerName) -> remove_handler(ConfKeyPath) -> emqx_config_handler:remove_handler(ConfKeyPath). --spec get(emqx_map_lib:config_key_path()) -> term(). +-spec get(emqx_utils_maps:config_key_path()) -> term(). get(KeyPath) -> emqx:get_config(KeyPath). --spec get(emqx_map_lib:config_key_path(), term()) -> term(). +-spec get(emqx_utils_maps:config_key_path(), term()) -> term(). get(KeyPath, Default) -> emqx:get_config(KeyPath, Default). --spec get_raw(emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_raw(emqx_utils_maps:config_key_path(), term()) -> term(). get_raw(KeyPath, Default) -> emqx_config:get_raw(KeyPath, Default). --spec get_raw(emqx_map_lib:config_key_path()) -> term(). +-spec get_raw(emqx_utils_maps:config_key_path()) -> term(). get_raw(KeyPath) -> emqx_config:get_raw(KeyPath). %% @doc Returns all values in the cluster. --spec get_all(emqx_map_lib:config_key_path()) -> #{node() => term()}. +-spec get_all(emqx_utils_maps:config_key_path()) -> #{node() => term()}. get_all(KeyPath) -> {ResL, []} = emqx_conf_proto_v2:get_all(KeyPath), maps:from_list(ResL). %% @doc Returns the specified node's KeyPath, or exception if not found --spec get_by_node(node(), emqx_map_lib:config_key_path()) -> term(). +-spec get_by_node(node(), emqx_utils_maps:config_key_path()) -> term(). get_by_node(Node, KeyPath) when Node =:= node() -> emqx:get_config(KeyPath); get_by_node(Node, KeyPath) -> emqx_conf_proto_v2:get_config(Node, KeyPath). %% @doc Returns the specified node's KeyPath, or the default value if not found --spec get_by_node(node(), emqx_map_lib:config_key_path(), term()) -> term(). +-spec get_by_node(node(), emqx_utils_maps:config_key_path(), term()) -> term(). get_by_node(Node, KeyPath, Default) when Node =:= node() -> emqx:get_config(KeyPath, Default); get_by_node(Node, KeyPath, Default) -> emqx_conf_proto_v2:get_config(Node, KeyPath, Default). %% @doc Returns the specified node's KeyPath, or config_not_found if key path not found --spec get_node_and_config(emqx_map_lib:config_key_path()) -> term(). +-spec get_node_and_config(emqx_utils_maps:config_key_path()) -> term(). get_node_and_config(KeyPath) -> {node(), emqx:get_config(KeyPath, config_not_found)}. %% @doc Update all value of key path in cluster-override.conf or local-override.conf. -spec update( - emqx_map_lib:config_key_path(), + emqx_utils_maps:config_key_path(), emqx_config:update_request(), emqx_config:update_opts() ) -> @@ -97,7 +106,7 @@ update(KeyPath, UpdateReq, Opts) -> %% @doc Update the specified node's key path in local-override.conf. -spec update( node(), - emqx_map_lib:config_key_path(), + emqx_utils_maps:config_key_path(), emqx_config:update_request(), emqx_config:update_opts() ) -> @@ -107,14 +116,18 @@ update(Node, KeyPath, UpdateReq, Opts0) when Node =:= node() -> update(Node, KeyPath, UpdateReq, Opts) -> emqx_conf_proto_v2:update(Node, KeyPath, UpdateReq, Opts). +%% @doc Mark the specified key path as tombstone +tombstone(KeyPath, Opts) -> + update(KeyPath, ?TOMBSTONE_CONFIG_CHANGE_REQ, Opts). + %% @doc remove all value of key path in cluster-override.conf or local-override.conf. --spec remove(emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> +-spec remove(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. remove(KeyPath, Opts) -> emqx_conf_proto_v2:remove_config(KeyPath, Opts). %% @doc remove the specified node's key path in local-override.conf. --spec remove(node(), emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> +-spec remove(node(), emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. remove(Node, KeyPath, Opts) when Node =:= node() -> emqx:remove_config(KeyPath, Opts#{override_to => local}); @@ -122,13 +135,13 @@ remove(Node, KeyPath, Opts) -> emqx_conf_proto_v2:remove_config(Node, KeyPath, Opts). %% @doc reset all value of key path in cluster-override.conf or local-override.conf. --spec reset(emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> +-spec reset(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. reset(KeyPath, Opts) -> emqx_conf_proto_v2:reset(KeyPath, Opts). %% @doc reset the specified node's key path in local-override.conf. --spec reset(node(), emqx_map_lib:config_key_path(), emqx_config:update_opts()) -> +-spec reset(node(), emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. reset(Node, KeyPath, Opts) when Node =:= node() -> emqx:reset_config(KeyPath, Opts#{override_to => local}); @@ -136,63 +149,61 @@ reset(Node, KeyPath, Opts) -> emqx_conf_proto_v2:reset(Node, KeyPath, Opts). %% @doc Called from build script. --spec dump_schema(file:name_all()) -> ok. -dump_schema(Dir) -> - I18nFile = emqx_dashboard:i18n_file(), - dump_schema(Dir, emqx_conf_schema, I18nFile). - -dump_schema(Dir, SchemaModule, I18nFile) -> +%% TODO: move to a external escript after all refactoring is done +dump_schema(Dir, SchemaModule) -> + _ = application:load(emqx_dashboard), + ok = emqx_dashboard_desc_cache:init(), lists:foreach( fun(Lang) -> - gen_config_md(Dir, I18nFile, SchemaModule, Lang), - gen_api_schema_json(Dir, I18nFile, Lang), - ExampleDir = filename:join(filename:dirname(filename:dirname(I18nFile)), "etc"), - gen_example_conf(ExampleDir, I18nFile, SchemaModule, Lang) + ok = gen_config_md(Dir, SchemaModule, Lang), + ok = gen_schema_json(Dir, SchemaModule, Lang) end, - [en, zh] + ["en", "zh"] ), - gen_schema_json(Dir, I18nFile, SchemaModule). + ok = gen_example_conf(Dir, SchemaModule). %% for scripts/spellcheck. -gen_schema_json(Dir, I18nFile, SchemaModule) -> - SchemaJsonFile = filename:join([Dir, "schema.json"]), +gen_schema_json(Dir, SchemaModule, Lang) -> + SchemaJsonFile = filename:join([Dir, "schema-" ++ Lang ++ ".json"]), io:format(user, "===< Generating: ~s~n", [SchemaJsonFile]), - Opts = #{desc_file => I18nFile, lang => "en"}, + %% EMQX_SCHEMA_FULL_DUMP is quite a hidden API + %% it is used to dump the full schema for EMQX developers and supporters + IncludeImportance = + case os:getenv("EMQX_SCHEMA_FULL_DUMP") =:= "1" of + true -> ?IMPORTANCE_HIDDEN; + false -> ?IMPORTANCE_LOW + end, + io:format(user, "===< Including fields from importance level: ~p~n", [IncludeImportance]), + Opts = #{ + include_importance_up_from => IncludeImportance, + desc_resolver => make_desc_resolver(Lang) + }, JsonMap = hocon_schema_json:gen(SchemaModule, Opts), - IoData = jsx:encode(JsonMap, [space, {indent, 4}]), + IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), ok = file:write_file(SchemaJsonFile, IoData). -gen_api_schema_json(Dir, I18nFile, Lang) -> - emqx_dashboard:init_i18n(I18nFile, Lang), - gen_api_schema_json_hotconf(Dir, Lang), - gen_api_schema_json_bridge(Dir, Lang), - emqx_dashboard:clear_i18n(). - -gen_api_schema_json_hotconf(Dir, Lang) -> +%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. +hotconf_schema_json() -> SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, - File = schema_filename(Dir, "hot-config-schema-", Lang), - ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo). + gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo). -gen_api_schema_json_bridge(Dir, Lang) -> +%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. +bridge_schema_json() -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, - File = schema_filename(Dir, "bridge-api-", Lang), - ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo). + gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). -schema_filename(Dir, Prefix, Lang) -> - Filename = Prefix ++ atom_to_list(Lang) ++ ".json", - filename:join([Dir, Filename]). - -gen_config_md(Dir, I18nFile, SchemaModule, Lang0) -> - Lang = atom_to_list(Lang0), +%% TODO: remove it and also remove hocon_md.erl and friends. +%% markdown generation from schema is a failure and we are moving to an interactive +%% viewer like swagger UI. +gen_config_md(Dir, SchemaModule, Lang) -> SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]), io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang). + ok = gen_doc(SchemaMdFile, SchemaModule, Lang). -gen_example_conf(Dir, I18nFile, SchemaModule, Lang0) -> - Lang = atom_to_list(Lang0), - SchemaMdFile = filename:join([Dir, "emqx.conf." ++ Lang ++ ".example"]), +gen_example_conf(Dir, SchemaModule) -> + SchemaMdFile = filename:join([Dir, "emqx.conf.example"]), io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_example(SchemaMdFile, SchemaModule, I18nFile, Lang). + ok = gen_example(SchemaMdFile, SchemaModule). %% @doc return the root schema module. -spec schema_module() -> module(). @@ -206,78 +217,45 @@ schema_module() -> %% Internal functions %%-------------------------------------------------------------------- --spec gen_doc(file:name_all(), module(), file:name_all(), string()) -> ok. -gen_doc(File, SchemaModule, I18nFile, Lang) -> +%% @doc Make a resolver function that can be used to lookup the description by hocon_schema_json dump. +make_desc_resolver(Lang) -> + fun + ({desc, Namespace, Id}) -> + emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, desc); + (Desc) -> + unicode:characters_to_binary(Desc) + end. + +-spec gen_doc(file:name_all(), module(), string()) -> ok. +gen_doc(File, SchemaModule, Lang) -> Version = emqx_release:version(), Title = "# " ++ emqx_release:description() ++ " Configuration\n\n" ++ "", BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]), {ok, Body} = file:read_file(BodyFile), - Opts = #{title => Title, body => Body, desc_file => I18nFile, lang => Lang}, + Resolver = make_desc_resolver(Lang), + Opts = #{title => Title, body => Body, desc_resolver => Resolver}, Doc = hocon_schema_md:gen(SchemaModule, Opts), file:write_file(File, Doc). -gen_example(File, SchemaModule, I18nFile, Lang) -> +gen_example(File, SchemaModule) -> + %% we do not generate description in example files + %% so there is no need for a desc_resolver Opts = #{ title => <<"EMQX Configuration Example">>, body => <<"">>, - desc_file => I18nFile, - lang => Lang + include_importance_up_from => ?IMPORTANCE_MEDIUM }, Example = hocon_schema_example:gen(SchemaModule, Opts), file:write_file(File, Example). -%% Only gen hot_conf schema, not all configuration fields. -do_gen_api_schema_json(File, SchemaMod, SchemaInfo) -> - io:format(user, "===< Generating: ~s~n", [File]), - {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( +gen_api_schema_json_iodata(SchemaMod, SchemaInfo) -> + emqx_dashboard_swagger:gen_api_schema_json_iodata( SchemaMod, - #{schema_converter => fun hocon_schema_to_spec/2} - ), - ApiSpec = lists:foldl( - fun({Path, Spec, _, _}, Acc) -> - NewSpec = maps:fold( - fun(Method, #{responses := Responses}, SubAcc) -> - case Responses of - #{ - <<"200">> := - #{ - <<"content">> := #{ - <<"application/json">> := #{<<"schema">> := Schema} - } - } - } -> - SubAcc#{Method => Schema}; - _ -> - SubAcc - end - end, - #{}, - Spec - ), - Acc#{list_to_atom(Path) => NewSpec} - end, - #{}, - ApiSpec0 - ), - Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), - IoData = jsx:encode( - #{ - info => SchemaInfo, - paths => ApiSpec, - components => #{schemas => Components} - }, - [space, {indent, 4}] - ), - file:write_file(File, IoData). - --define(INIT_SCHEMA, #{ - fields => #{}, - translations => #{}, - validations => [], - namespace => undefined -}). + SchemaInfo, + fun ?MODULE:hocon_schema_to_spec/2 + ). -define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])). -define(TO_COMPONENTS_SCHEMA(_M_, _F_), @@ -296,8 +274,6 @@ hocon_schema_to_spec(Type, LocalModule) when ?IS_TYPEREFL(Type) -> hocon_schema_to_spec(?ARRAY(Item), LocalModule) -> {Schema, Refs} = hocon_schema_to_spec(Item, LocalModule), {#{type => array, items => Schema}, Refs}; -hocon_schema_to_spec(?LAZY(Item), LocalModule) -> - hocon_schema_to_spec(Item, LocalModule); hocon_schema_to_spec(?ENUM(Items), _LocalModule) -> {#{type => enum, symbols => Items}, []}; hocon_schema_to_spec(?MAP(Name, Type), LocalModule) -> @@ -316,7 +292,7 @@ hocon_schema_to_spec(?UNION(Types), LocalModule) -> {[Schema | Acc], SubRefs ++ RefsAcc} end, {[], []}, - Types + hoconsc:union_members(Types) ), {#{<<"oneOf">> => OneOf}, Refs}; hocon_schema_to_spec(Atom, _LocalModule) when is_atom(Atom) -> @@ -335,7 +311,7 @@ typename_to_spec("float()", _Mod) -> typename_to_spec("integer()", _Mod) -> #{type => number}; typename_to_spec("non_neg_integer()", _Mod) -> - #{type => number, minimum => 1}; + #{type => number, minimum => 0}; typename_to_spec("number()", _Mod) -> #{type => number}; typename_to_spec("string()", _Mod) -> diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index f2e4f6f56..70234b525 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -28,7 +28,15 @@ -define(DEFAULT_INIT_TXN_ID, -1). start(_StartType, _StartArgs) -> - init_conf(), + try + ok = init_conf() + catch + C:E:St -> + %% logger is not quite ready. + io:format(standard_error, "Failed to load config~n~p~n~p~n~p~n", [C, E, St]), + init:stop(1) + end, + ok = emqx_config_logger:refresh_config(), emqx_conf_sup:start_link(). stop(_State) -> @@ -48,7 +56,15 @@ get_override_config_file() -> TnxId = emqx_cluster_rpc:get_node_tnx_id(Node), WallClock = erlang:statistics(wall_clock), Conf = emqx_config_handler:get_raw_cluster_override_conf(), - #{wall_clock => WallClock, conf => Conf, tnx_id => TnxId, node => Node} + HasDeprecateFile = emqx_config:has_deprecated_file(), + #{ + wall_clock => WallClock, + conf => Conf, + tnx_id => TnxId, + node => Node, + has_deprecated_file => HasDeprecateFile, + release => emqx_app:get_release() + } end, case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of {atomic, Res} -> {ok, Res}; @@ -72,30 +88,26 @@ sync_data_from_node() -> %% Internal functions %% ------------------------------------------------------------------------------ --ifdef(TEST). init_load() -> - emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => false}). - --else. - -init_load() -> - emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => true}). --endif. + emqx_config:init_load(emqx_conf:schema_module()). init_conf() -> + %% Workaround for https://github.com/emqx/mria/issues/94: + _ = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], 1000), + _ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]), {ok, TnxId} = copy_override_conf_from_core_node(), - emqx_app:set_init_tnx_id(TnxId), - init_load(), - emqx_app:set_init_config_load_done(). + _ = emqx_app:set_init_tnx_id(TnxId), + ok = init_load(), + ok = emqx_app:set_init_config_load_done(). cluster_nodes() -> - maps:get(running_nodes, ekka_cluster:info()) -- [node()]. + mria:cluster_nodes(cores) -- [node()]. copy_override_conf_from_core_node() -> case cluster_nodes() of %% The first core nodes is self. [] -> - ?SLOG(debug, #{msg => "skip_copy_overide_conf_from_core_node"}), + ?SLOG(debug, #{msg => "skip_copy_override_conf_from_core_node"}), {ok, ?DEFAULT_INIT_TXN_ID}; Nodes -> {Results, Failed} = emqx_conf_proto_v2:get_override_config_file(Nodes), @@ -129,7 +141,7 @@ copy_override_conf_from_core_node() -> %% finish the boot sequence and load the %% config for other nodes to copy it. ?SLOG(info, #{ - msg => "skip_copy_overide_conf_from_core_node", + msg => "skip_copy_override_conf_from_core_node", loading_from_disk => true, nodes => Nodes, failed => Failed, @@ -138,10 +150,10 @@ copy_override_conf_from_core_node() -> {ok, ?DEFAULT_INIT_TXN_ID}; false -> %% retry in some time - Jitter = rand:uniform(2_000), - Timeout = 10_000 + Jitter, + Jitter = rand:uniform(2000), + Timeout = 10000 + Jitter, ?SLOG(info, #{ - msg => "copy_overide_conf_from_core_node_retry", + msg => "copy_cluster_conf_from_core_node_retry", timeout => Timeout, nodes => Nodes, failed => Failed, @@ -153,18 +165,18 @@ copy_override_conf_from_core_node() -> _ -> [{ok, Info} | _] = lists:sort(fun conf_sort/2, Ready), #{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info, + HasDeprecatedFile = has_deprecated_file(Info), ?SLOG(debug, #{ - msg => "copy_overide_conf_from_core_node_success", + msg => "copy_cluster_conf_from_core_node_success", node => Node, - cluster_override_conf_file => application:get_env( - emqx, cluster_override_conf_file - ), - local_override_conf_file => application:get_env( - emqx, local_override_conf_file - ), - data_dir => emqx:data_dir() + has_deprecated_file => HasDeprecatedFile, + local_release => emqx_app:get_release(), + remote_release => maps:get(release, Info, "before_v5.0.24|e5.0.3"), + data_dir => emqx:data_dir(), + tnx_id => TnxId }), ok = emqx_config:save_to_override_conf( + HasDeprecatedFile, RawOverrideConf, #{override_to => cluster} ), @@ -207,3 +219,13 @@ sync_data_from_node(Node) -> ?SLOG(emergency, #{node => Node, msg => "sync_data_from_node_failed", reason => Error}), error(Error) end. + +has_deprecated_file(#{conf := Conf} = Info) -> + case maps:find(has_deprecated_file, Info) of + {ok, HasDeprecatedFile} -> + HasDeprecatedFile; + error -> + %% The old version don't have emqx_config:has_deprecated_file/0 + %% Conf is not empty if deprecated file is found. + Conf =/= #{} + end. diff --git a/apps/emqx_conf/src/emqx_conf_cli.erl b/apps/emqx_conf/src/emqx_conf_cli.erl index 5c2fd9e18..8e109a1e6 100644 --- a/apps/emqx_conf/src/emqx_conf_cli.erl +++ b/apps/emqx_conf/src/emqx_conf_cli.erl @@ -33,7 +33,7 @@ admins(["status"]) -> status(); admins(["skip"]) -> status(), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), lists:foreach(fun emqx_cluster_rpc:skip_failed_commit/1, Nodes), status(); admins(["skip", Node0]) -> @@ -46,13 +46,13 @@ admins(["tnxid", TnxId0]) -> emqx_ctl:print("~p~n", [emqx_cluster_rpc:query(TnxId)]); admins(["fast_forward"]) -> status(), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), TnxId = emqx_cluster_rpc:latest_tnx_id(), lists:foreach(fun(N) -> emqx_cluster_rpc:fast_forward_to_commit(N, TnxId) end, Nodes), status(); admins(["fast_forward", ToTnxId]) -> status(), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), TnxId = list_to_integer(ToTnxId), lists:foreach(fun(N) -> emqx_cluster_rpc:fast_forward_to_commit(N, TnxId) end, Nodes), status(); diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index 2a46e95a5..97efa67cc 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -38,7 +38,9 @@ cipher/0 ]). --export([namespace/0, roots/0, fields/1, translations/0, translation/1, validations/0, desc/1]). +-export([ + namespace/0, roots/0, fields/1, translations/0, translation/1, validations/0, desc/1, tags/0 +]). -export([conf_get/2, conf_get/3, keys/2, filter/1]). %% Static apps which merge their configs into the merged emqx.conf @@ -60,12 +62,16 @@ emqx_exhook_schema, emqx_psk_schema, emqx_limiter_schema, - emqx_slow_subs_schema + emqx_slow_subs_schema, + emqx_mgmt_api_key_schema ]). %% root config should not have a namespace namespace() -> undefined. +tags() -> + [<<"EMQX">>]. + roots() -> PtKey = ?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY, case persistent_term:get(PtKey, undefined) of @@ -87,12 +93,18 @@ roots() -> {"log", sc( ?R_REF("log"), - #{translate_to => ["kernel"]} + #{ + translate_to => ["kernel"], + importance => ?IMPORTANCE_HIGH + } )}, {"rpc", sc( ?R_REF("rpc"), - #{translate_to => ["gen_rpc"]} + #{ + translate_to => ["gen_rpc"], + importance => ?IMPORTANCE_LOW + } )} ] ++ emqx_schema:roots(medium) ++ @@ -126,7 +138,7 @@ fields("cluster") -> )}, {"core_nodes", sc( - emqx_schema:comma_separated_atoms(), + node_array(), #{ mapping => "mria.core_nodes", default => [], @@ -139,7 +151,7 @@ fields("cluster") -> emqx_schema:duration(), #{ mapping => "ekka.cluster_autoclean", - default => "5m", + default => <<"5m">>, desc => ?DESC(cluster_autoclean), 'readOnly' => true } @@ -194,7 +206,7 @@ fields(cluster_static) -> [ {"seeds", sc( - hoconsc:array(atom()), + node_array(), #{ default => [], desc => ?DESC(cluster_static_seeds), @@ -208,7 +220,7 @@ fields(cluster_mcast) -> sc( string(), #{ - default => "239.192.0.1", + default => <<"239.192.0.1">>, desc => ?DESC(cluster_mcast_addr), 'readOnly' => true } @@ -226,7 +238,7 @@ fields(cluster_mcast) -> sc( string(), #{ - default => "0.0.0.0", + default => <<"0.0.0.0">>, desc => ?DESC(cluster_mcast_iface), 'readOnly' => true } @@ -253,7 +265,7 @@ fields(cluster_mcast) -> sc( emqx_schema:bytesize(), #{ - default => "16KB", + default => <<"16KB">>, desc => ?DESC(cluster_mcast_sndbuf), 'readOnly' => true } @@ -262,7 +274,7 @@ fields(cluster_mcast) -> sc( emqx_schema:bytesize(), #{ - default => "16KB", + default => <<"16KB">>, desc => ?DESC(cluster_mcast_recbuf), 'readOnly' => true } @@ -271,7 +283,7 @@ fields(cluster_mcast) -> sc( emqx_schema:bytesize(), #{ - default => "32KB", + default => <<"32KB">>, desc => ?DESC(cluster_mcast_buffer), 'readOnly' => true } @@ -283,7 +295,7 @@ fields(cluster_dns) -> sc( string(), #{ - default => "localhost", + default => <<"localhost">>, desc => ?DESC(cluster_dns_name), 'readOnly' => true } @@ -312,7 +324,7 @@ fields(cluster_etcd) -> sc( string(), #{ - default => "emqxcl", + default => <<"emqxcl">>, desc => ?DESC(cluster_etcd_prefix), 'readOnly' => true } @@ -321,16 +333,17 @@ fields(cluster_etcd) -> sc( emqx_schema:duration(), #{ - default => "1m", + default => <<"1m">>, 'readOnly' => true, desc => ?DESC(cluster_etcd_node_ttl) } )}, - {"ssl", + {"ssl_options", sc( ?R_REF(emqx_schema, "ssl_client_opts"), #{ desc => ?DESC(cluster_etcd_ssl), + aliases => [ssl], 'readOnly' => true } )} @@ -341,7 +354,7 @@ fields(cluster_k8s) -> sc( string(), #{ - default => "http://10.110.111.204:8080", + default => <<"http://10.110.111.204:8080">>, desc => ?DESC(cluster_k8s_apiserver), 'readOnly' => true } @@ -350,7 +363,7 @@ fields(cluster_k8s) -> sc( string(), #{ - default => "emqx", + default => <<"emqx">>, desc => ?DESC(cluster_k8s_service_name), 'readOnly' => true } @@ -368,7 +381,7 @@ fields(cluster_k8s) -> sc( string(), #{ - default => "default", + default => <<"default">>, desc => ?DESC(cluster_k8s_namespace), 'readOnly' => true } @@ -377,7 +390,7 @@ fields(cluster_k8s) -> sc( string(), #{ - default => "pod.local", + default => <<"pod.local">>, 'readOnly' => true, desc => ?DESC(cluster_k8s_suffix) } @@ -389,8 +402,9 @@ fields("node") -> sc( string(), #{ - default => "emqx@127.0.0.1", + default => <<"emqx@127.0.0.1">>, 'readOnly' => true, + importance => ?IMPORTANCE_HIGH, desc => ?DESC(node_name) } )}, @@ -402,7 +416,9 @@ fields("node") -> required => true, 'readOnly' => true, sensitive => true, - desc => ?DESC(node_cookie) + desc => ?DESC(node_cookie), + importance => ?IMPORTANCE_HIGH, + converter => fun emqx_schema:password_converter/2 } )}, {"process_limit", @@ -412,6 +428,7 @@ fields("node") -> mapping => "vm_args.+P", desc => ?DESC(process_limit), default => 2097152, + importance => ?IMPORTANCE_MEDIUM, 'readOnly' => true } )}, @@ -422,6 +439,7 @@ fields("node") -> mapping => "vm_args.+Q", desc => ?DESC(max_ports), default => 1048576, + importance => ?IMPORTANCE_HIGH, 'readOnly' => true } )}, @@ -432,6 +450,7 @@ fields("node") -> mapping => "vm_args.+zdbbl", desc => ?DESC(dist_buffer_size), default => 8192, + importance => ?IMPORTANCE_LOW, 'readOnly' => true } )}, @@ -442,6 +461,7 @@ fields("node") -> mapping => "vm_args.+e", desc => ?DESC(max_ets_tables), default => 262144, + importance => ?IMPORTANCE_HIDDEN, 'readOnly' => true } )}, @@ -452,17 +472,21 @@ fields("node") -> required => true, 'readOnly' => true, mapping => "emqx.data_dir", + %% for now, it's tricky to use a different data_dir + %% otherwise data paths in cluster config may differ + %% TODO: change configurable data file paths to relative + importance => ?IMPORTANCE_LOW, desc => ?DESC(node_data_dir) } )}, {"config_files", sc( - list(string()), + hoconsc:array(string()), #{ mapping => "emqx.config_files", - default => undefined, - 'readOnly' => true, - desc => ?DESC(node_config_files) + importance => ?IMPORTANCE_HIDDEN, + required => false, + 'readOnly' => true } )}, {"global_gc_interval", @@ -470,8 +494,9 @@ fields("node") -> hoconsc:union([disabled, emqx_schema:duration()]), #{ mapping => "emqx_machine.global_gc_interval", - default => "15m", + default => <<"15m">>, desc => ?DESC(node_global_gc_interval), + importance => ?IMPORTANCE_LOW, 'readOnly' => true } )}, @@ -481,7 +506,8 @@ fields("node") -> #{ mapping => "vm_args.-env ERL_CRASH_DUMP", desc => ?DESC(node_crash_dump_file), - default => "log/erl_crash.dump", + default => crash_dump_file_default(), + importance => ?IMPORTANCE_HIDDEN, 'readOnly' => true } )}, @@ -490,8 +516,9 @@ fields("node") -> emqx_schema:duration_s(), #{ mapping => "vm_args.-env ERL_CRASH_DUMP_SECONDS", - default => "30s", + default => <<"30s">>, desc => ?DESC(node_crash_dump_seconds), + importance => ?IMPORTANCE_HIDDEN, 'readOnly' => true } )}, @@ -500,8 +527,9 @@ fields("node") -> emqx_schema:bytesize(), #{ mapping => "vm_args.-env ERL_CRASH_DUMP_BYTES", - default => "100MB", + default => <<"100MB">>, desc => ?DESC(node_crash_dump_bytes), + importance => ?IMPORTANCE_HIDDEN, 'readOnly' => true } )}, @@ -510,8 +538,9 @@ fields("node") -> emqx_schema:duration_s(), #{ mapping => "vm_args.-kernel net_ticktime", - default => "2m", + default => <<"2m">>, 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(node_dist_net_ticktime) } )}, @@ -522,6 +551,7 @@ fields("node") -> mapping => "emqx_machine.backtrace_depth", default => 23, 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(node_backtrace_depth) } )}, @@ -532,6 +562,7 @@ fields("node") -> mapping => "emqx_machine.applications", default => [], 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(node_applications) } )}, @@ -541,13 +572,17 @@ fields("node") -> #{ desc => ?DESC(node_etc_dir), 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, deprecated => {since, "5.0.8"} } )}, {"cluster_call", sc( ?R_REF("cluster_call"), - #{'readOnly' => true} + #{ + 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN + } )}, {"db_backend", sc( @@ -556,16 +591,19 @@ fields("node") -> mapping => "mria.db_backend", default => rlog, 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(db_backend) } )}, - {"db_role", + {"role", sc( hoconsc:enum([core, replicant]), #{ mapping => "mria.node_role", default => core, 'readOnly' => true, + importance => ?IMPORTANCE_HIGH, + aliases => [db_role], desc => ?DESC(db_role) } )}, @@ -576,6 +614,7 @@ fields("node") -> mapping => "mria.rlog_rpc_module", default => gen_rpc, 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(db_rpc_module) } )}, @@ -586,6 +625,7 @@ fields("node") -> mapping => "mria.tlog_push_mode", default => async, 'readOnly' => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(db_tlog_push_mode) } )}, @@ -594,7 +634,7 @@ fields("node") -> hoconsc:enum([gen_rpc, distr]), #{ mapping => "mria.shard_transport", - hidden => true, + importance => ?IMPORTANCE_HIDDEN, default => gen_rpc, desc => ?DESC(db_default_shard_transport) } @@ -604,7 +644,7 @@ fields("node") -> map(shard, hoconsc:enum([gen_rpc, distr])), #{ desc => ?DESC(db_shard_transports), - hidden => true, + importance => ?IMPORTANCE_HIDDEN, mapping => "emqx_machine.custom_shard_transports", default => #{} } @@ -617,7 +657,7 @@ fields("cluster_call") -> emqx_schema:duration(), #{ desc => ?DESC(cluster_call_retry_interval), - default => "1m" + default => <<"1m">> } )}, {"max_history", @@ -633,7 +673,7 @@ fields("cluster_call") -> emqx_schema:duration(), #{ desc => ?DESC(cluster_call_cleanup_interval), - default => "5m" + default => <<"5m">> } )} ]; @@ -647,11 +687,12 @@ fields("rpc") -> desc => ?DESC(rpc_mode) } )}, - {"driver", + {"protocol", sc( hoconsc:enum([tcp, ssl]), #{ mapping => "gen_rpc.driver", + aliases => [driver], default => tcp, desc => ?DESC(rpc_driver) } @@ -705,7 +746,7 @@ fields("rpc") -> emqx_schema:duration(), #{ mapping => "gen_rpc.connect_timeout", - default => "5s", + default => <<"5s">>, desc => ?DESC(rpc_connect_timeout) } )}, @@ -738,7 +779,7 @@ fields("rpc") -> emqx_schema:duration(), #{ mapping => "gen_rpc.send_timeout", - default => "5s", + default => <<"5s">>, desc => ?DESC(rpc_send_timeout) } )}, @@ -747,7 +788,7 @@ fields("rpc") -> emqx_schema:duration(), #{ mapping => "gen_rpc.authentication_timeout", - default => "5s", + default => <<"5s">>, desc => ?DESC(rpc_authentication_timeout) } )}, @@ -756,7 +797,7 @@ fields("rpc") -> emqx_schema:duration(), #{ mapping => "gen_rpc.call_receive_timeout", - default => "15s", + default => <<"15s">>, desc => ?DESC(rpc_call_receive_timeout) } )}, @@ -765,7 +806,7 @@ fields("rpc") -> emqx_schema:duration_s(), #{ mapping => "gen_rpc.socket_keepalive_idle", - default => "15m", + default => <<"15m">>, desc => ?DESC(rpc_socket_keepalive_idle) } )}, @@ -774,7 +815,7 @@ fields("rpc") -> emqx_schema:duration_s(), #{ mapping => "gen_rpc.socket_keepalive_interval", - default => "75s", + default => <<"75s">>, desc => ?DESC(rpc_socket_keepalive_interval) } )}, @@ -792,7 +833,7 @@ fields("rpc") -> emqx_schema:bytesize(), #{ mapping => "gen_rpc.socket_sndbuf", - default => "1MB", + default => <<"1MB">>, desc => ?DESC(rpc_socket_sndbuf) } )}, @@ -801,7 +842,7 @@ fields("rpc") -> emqx_schema:bytesize(), #{ mapping => "gen_rpc.socket_recbuf", - default => "1MB", + default => <<"1MB">>, desc => ?DESC(rpc_socket_recbuf) } )}, @@ -810,7 +851,7 @@ fields("rpc") -> emqx_schema:bytesize(), #{ mapping => "gen_rpc.socket_buffer", - default => "1MB", + default => <<"1MB">>, desc => ?DESC(rpc_socket_buffer) } )}, @@ -826,58 +867,64 @@ fields("rpc") -> ]; fields("log") -> [ - {"console_handler", ?R_REF("console_handler")}, - {"file_handlers", + {"console", + sc(?R_REF("console_handler"), #{ + aliases => [console_handler], + importance => ?IMPORTANCE_HIGH + })}, + {"file", sc( - map(name, ?R_REF("log_file_handler")), - #{desc => ?DESC("log_file_handlers")} + ?UNION([ + ?R_REF("log_file_handler"), + ?MAP(handler_name, ?R_REF("log_file_handler")) + ]), + #{ + desc => ?DESC("log_file_handlers"), + converter => fun ensure_file_handlers/2, + default => #{<<"level">> => <<"warning">>}, + aliases => [file_handlers], + importance => ?IMPORTANCE_HIGH + } )} ]; fields("console_handler") -> - log_handler_common_confs(false); + log_handler_common_confs(console); fields("log_file_handler") -> [ - {"file", + {"to", sc( file(), #{ desc => ?DESC("log_file_handler_file"), - validator => fun validate_file_location/1 + default => <<"${EMQX_LOG_DIR}/emqx.log">>, + converter => fun emqx_schema:naive_env_interpolation/1, + validator => fun validate_file_location/1, + aliases => [file], + importance => ?IMPORTANCE_HIGH } )}, - {"rotation", + {"rotation_count", sc( - ?R_REF("log_rotation"), - #{} + range(1, 128), + #{ + aliases => [rotation], + default => 10, + converter => fun convert_rotation/2, + desc => ?DESC("log_rotation_count"), + importance => ?IMPORTANCE_MEDIUM + } )}, - {"max_size", + {"rotation_size", sc( hoconsc:union([infinity, emqx_schema:bytesize()]), #{ - default => "50MB", - desc => ?DESC("log_file_handler_max_size") + default => <<"50MB">>, + desc => ?DESC("log_file_handler_max_size"), + aliases => [max_size], + importance => ?IMPORTANCE_MEDIUM } )} - ] ++ log_handler_common_confs(true); -fields("log_rotation") -> - [ - {"enable", - sc( - boolean(), - #{ - default => true, - desc => ?DESC("log_rotation_enable") - } - )}, - {"count", - sc( - range(1, 2048), - #{ - default => 10, - desc => ?DESC("log_rotation_count") - } - )} - ]; + ] ++ log_handler_common_confs(file); fields("log_overload_kill") -> [ {"enable", @@ -892,7 +939,7 @@ fields("log_overload_kill") -> sc( emqx_schema:bytesize(), #{ - default => "30MB", + default => <<"30MB">>, desc => ?DESC("log_overload_kill_mem_size") } )}, @@ -908,7 +955,7 @@ fields("log_overload_kill") -> sc( hoconsc:union([emqx_schema:duration_ms(), infinity]), #{ - default => "5s", + default => <<"5s">>, desc => ?DESC("log_overload_kill_restart_after") } )} @@ -935,14 +982,14 @@ fields("log_burst_limit") -> sc( emqx_schema:duration(), #{ - default => "1s", + default => <<"1s">>, desc => ?DESC("log_burst_limit_window_time") } )} ]; fields("authorization") -> - emqx_schema:fields("authorization") ++ - emqx_authz_schema:fields("authorization"). + emqx_schema:authz_fields() ++ + emqx_authz_schema:authz_fields(). desc("cluster") -> ?DESC("desc_cluster"); @@ -985,15 +1032,16 @@ translation("ekka") -> [{"cluster_discovery", fun tr_cluster_discovery/1}]; translation("kernel") -> [ - {"logger_level", fun tr_logger_level/1}, - {"logger", fun tr_logger/1}, + {"logger_level", fun emqx_config_logger:tr_level/1}, + {"logger", fun emqx_config_logger:tr_handlers/1}, {"error_logger", fun(_) -> silent end} ]; translation("emqx") -> [ {"config_files", fun tr_config_files/1}, {"cluster_override_conf_file", fun tr_cluster_override_conf_file/1}, - {"local_override_conf_file", fun tr_local_override_conf_file/1} + {"local_override_conf_file", fun tr_local_override_conf_file/1}, + {"cluster_hocon_file", fun tr_cluster_hocon_file/1} ]; translation("gen_rpc") -> [{"default_client_driver", fun tr_default_config_driver/1}]; @@ -1031,26 +1079,25 @@ metrics_enabled(disabled) -> []. tr_default_config_driver(Conf) -> conf_get("rpc.driver", Conf). -tr_config_files(Conf) -> - case conf_get("emqx.config_files", Conf) of - [_ | _] = Files -> - Files; - _ -> - case os:getenv("EMQX_ETC_DIR") of - false -> - [filename:join([code:lib_dir(emqx), "etc", "emqx.conf"])]; - Dir -> - [filename:join([Dir, "emqx.conf"])] - end +tr_config_files(_Conf) -> + case os:getenv("EMQX_ETC_DIR") of + false -> + %% testing, or running emqx app as deps + [filename:join([code:lib_dir(emqx), "etc", "emqx.conf"])]; + Dir -> + [filename:join([Dir, "emqx.conf"])] end. tr_cluster_override_conf_file(Conf) -> - tr_override_conf_file(Conf, "cluster-override.conf"). + tr_conf_file(Conf, "cluster-override.conf"). tr_local_override_conf_file(Conf) -> - tr_override_conf_file(Conf, "local-override.conf"). + tr_conf_file(Conf, "local-override.conf"). -tr_override_conf_file(Conf, Filename) -> +tr_cluster_hocon_file(Conf) -> + tr_conf_file(Conf, "cluster.hocon"). + +tr_conf_file(Conf, Filename) -> DataDir = conf_get("node.data_dir", Conf), %% assert, this config is not nullable [_ | _] = DataDir, @@ -1060,106 +1107,34 @@ tr_cluster_discovery(Conf) -> Strategy = conf_get("cluster.discovery_strategy", Conf), {Strategy, filter(cluster_options(Strategy, Conf))}. --spec tr_logger_level(hocon:config()) -> logger:level(). -tr_logger_level(Conf) -> - ConsoleLevel = conf_get("log.console_handler.level", Conf, undefined), - FileLevels = [ - conf_get("level", SubConf) - || {_, SubConf} <- - logger_file_handlers(Conf) - ], - case FileLevels ++ [ConsoleLevel || ConsoleLevel =/= undefined] of - %% warning is the default level we should use - [] -> warning; - Levels -> least_severe_log_level(Levels) - end. - -logger_file_handlers(Conf) -> - Handlers = maps:to_list(conf_get("log.file_handlers", Conf, #{})), - lists:filter( - fun({_Name, Opts}) -> - B = conf_get("enable", Opts), - true = is_boolean(B), - B +log_handler_common_confs(Handler) -> + %% we rarely support dynamic defaults like this + %% for this one, we have build-time default the same as runtime default + %% so it's less tricky + EnableValues = + case Handler of + console -> ["console", "both"]; + file -> ["file", "both", "", false] end, - Handlers - ). - -tr_logger(Conf) -> - %% For the default logger that outputs to console - ConsoleHandler = - case conf_get("log.console_handler.enable", Conf) of - true -> - ConsoleConf = conf_get("log.console_handler", Conf), - [ - {handler, console, logger_std_h, #{ - level => conf_get("log.console_handler.level", Conf), - config => (log_handler_conf(ConsoleConf))#{type => standard_io}, - formatter => log_formatter(ConsoleConf), - filters => log_filter(ConsoleConf) - }} - ]; - false -> - [] - end, - %% For the file logger - FileHandlers = - [ - begin - {handler, to_atom(HandlerName), logger_disk_log_h, #{ - level => conf_get("level", SubConf), - config => (log_handler_conf(SubConf))#{ - type => - case conf_get("rotation.enable", SubConf) of - true -> wrap; - _ -> halt - end, - file => conf_get("file", SubConf), - max_no_files => conf_get("rotation.count", SubConf), - max_no_bytes => conf_get("max_size", SubConf) - }, - formatter => log_formatter(SubConf), - filters => log_filter(SubConf), - filesync_repeat_interval => no_repeat - }} - end - || {HandlerName, SubConf} <- logger_file_handlers(Conf) - ], - [{handler, default, undefined}] ++ ConsoleHandler ++ FileHandlers. - -log_handler_common_confs(Enable) -> + EnvValue = os:getenv("EMQX_DEFAULT_LOG_HANDLER"), + Enable = lists:member(EnvValue, EnableValues), [ - {"enable", - sc( - boolean(), - #{ - default => Enable, - desc => ?DESC("common_handler_enable") - } - )}, {"level", sc( log_level(), #{ default => warning, - desc => ?DESC("common_handler_level") + desc => ?DESC("common_handler_level"), + importance => ?IMPORTANCE_HIGH } )}, - {"time_offset", + {"enable", sc( - string(), + boolean(), #{ - default => "system", - desc => ?DESC("common_handler_time_offset"), - validator => fun validate_time_offset/1 - } - )}, - {"chars_limit", - sc( - hoconsc:union([unlimited, range(100, inf)]), - #{ - default => unlimited, - desc => ?DESC("common_handler_chars_limit") + default => Enable, + desc => ?DESC("common_handler_enable"), + importance => ?IMPORTANCE_MEDIUM } )}, {"formatter", @@ -1167,7 +1142,27 @@ log_handler_common_confs(Enable) -> hoconsc:enum([text, json]), #{ default => text, - desc => ?DESC("common_handler_formatter") + desc => ?DESC("common_handler_formatter"), + importance => ?IMPORTANCE_MEDIUM + } + )}, + {"time_offset", + sc( + string(), + #{ + default => <<"system">>, + desc => ?DESC("common_handler_time_offset"), + validator => fun validate_time_offset/1, + importance => ?IMPORTANCE_LOW + } + )}, + {"chars_limit", + sc( + hoconsc:union([unlimited, range(100, inf)]), + #{ + default => unlimited, + desc => ?DESC("common_handler_chars_limit"), + importance => ?IMPORTANCE_HIDDEN } )}, {"single_line", @@ -1175,7 +1170,8 @@ log_handler_common_confs(Enable) -> boolean(), #{ default => true, - desc => ?DESC("common_handler_single_line") + desc => ?DESC("common_handler_single_line"), + importance => ?IMPORTANCE_HIDDEN } )}, {"sync_mode_qlen", @@ -1183,7 +1179,8 @@ log_handler_common_confs(Enable) -> non_neg_integer(), #{ default => 100, - desc => ?DESC("common_handler_sync_mode_qlen") + desc => ?DESC("common_handler_sync_mode_qlen"), + importance => ?IMPORTANCE_HIDDEN } )}, {"drop_mode_qlen", @@ -1191,7 +1188,8 @@ log_handler_common_confs(Enable) -> pos_integer(), #{ default => 3000, - desc => ?DESC("common_handler_drop_mode_qlen") + desc => ?DESC("common_handler_drop_mode_qlen"), + importance => ?IMPORTANCE_HIDDEN } )}, {"flush_qlen", @@ -1199,17 +1197,19 @@ log_handler_common_confs(Enable) -> pos_integer(), #{ default => 8000, - desc => ?DESC("common_handler_flush_qlen") + desc => ?DESC("common_handler_flush_qlen"), + importance => ?IMPORTANCE_HIDDEN } )}, - {"overload_kill", sc(?R_REF("log_overload_kill"), #{})}, - {"burst_limit", sc(?R_REF("log_burst_limit"), #{})}, + {"overload_kill", sc(?R_REF("log_overload_kill"), #{importance => ?IMPORTANCE_HIDDEN})}, + {"burst_limit", sc(?R_REF("log_burst_limit"), #{importance => ?IMPORTANCE_HIDDEN})}, {"supervisor_reports", sc( hoconsc:enum([error, progress]), #{ default => error, - desc => ?DESC("common_handler_supervisor_reports") + desc => ?DESC("common_handler_supervisor_reports"), + importance => ?IMPORTANCE_HIDDEN } )}, {"max_depth", @@ -1217,90 +1217,26 @@ log_handler_common_confs(Enable) -> hoconsc:union([unlimited, non_neg_integer()]), #{ default => 100, - desc => ?DESC("common_handler_max_depth") + desc => ?DESC("common_handler_max_depth"), + importance => ?IMPORTANCE_HIDDEN } )} ]. -log_handler_conf(Conf) -> - SycModeQlen = conf_get("sync_mode_qlen", Conf), - DropModeQlen = conf_get("drop_mode_qlen", Conf), - FlushQlen = conf_get("flush_qlen", Conf), - Overkill = conf_get("overload_kill", Conf), - BurstLimit = conf_get("burst_limit", Conf), - #{ - sync_mode_qlen => SycModeQlen, - drop_mode_qlen => DropModeQlen, - flush_qlen => FlushQlen, - overload_kill_enable => conf_get("enable", Overkill), - overload_kill_qlen => conf_get("qlen", Overkill), - overload_kill_mem_size => conf_get("mem_size", Overkill), - overload_kill_restart_after => conf_get("restart_after", Overkill), - burst_limit_enable => conf_get("enable", BurstLimit), - burst_limit_max_count => conf_get("max_count", BurstLimit), - burst_limit_window_time => conf_get("window_time", BurstLimit) - }. - -log_formatter(Conf) -> - CharsLimit = - case conf_get("chars_limit", Conf) of - unlimited -> unlimited; - V when V > 0 -> V - end, - TimeOffSet = - case conf_get("time_offset", Conf) of - "system" -> ""; - "utc" -> 0; - OffSetStr -> OffSetStr - end, - SingleLine = conf_get("single_line", Conf), - Depth = conf_get("max_depth", Conf), - do_formatter(conf_get("formatter", Conf), CharsLimit, SingleLine, TimeOffSet, Depth). - -%% helpers -do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth) -> - {emqx_logger_jsonfmt, #{ - chars_limit => CharsLimit, - single_line => SingleLine, - time_offset => TimeOffSet, - depth => Depth - }}; -do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth) -> - {emqx_logger_textfmt, #{ - template => [time, " [", level, "] ", msg, "\n"], - chars_limit => CharsLimit, - single_line => SingleLine, - time_offset => TimeOffSet, - depth => Depth - }}. - -log_filter(Conf) -> - case conf_get("supervisor_reports", Conf) of - error -> [{drop_progress_reports, {fun logger_filters:progress/2, stop}}]; - progress -> [] +crash_dump_file_default() -> + case os:getenv("EMQX_LOG_DIR") of + false -> + %% testing, or running emqx app as deps + <<"log/erl_crash.dump">>; + Dir -> + unicode:characters_to_binary(filename:join([Dir, "erl_crash.dump"]), utf8) end. -least_severe_log_level(Levels) -> - hd(sort_log_levels(Levels)). - -sort_log_levels(Levels) -> - lists:sort( - fun(A, B) -> - case logger:compare_levels(A, B) of - R when R == lt; R == eq -> true; - gt -> false - end - end, - Levels - ). - %% utils -spec conf_get(string() | [string()], hocon:config()) -> term(). -conf_get(Key, Conf) -> - ensure_list(hocon_maps:get(Key, Conf)). +conf_get(Key, Conf) -> emqx_schema:conf_get(Key, Conf). -conf_get(Key, Conf, Default) -> - ensure_list(hocon_maps:get(Key, Conf, Default)). +conf_get(Key, Conf, Default) -> emqx_schema:conf_get(Key, Conf, Default). filter(Opts) -> [{K, V} || {K, V} <- Opts, V =/= undefined]. @@ -1335,7 +1271,7 @@ cluster_options(dns, Conf) -> {type, conf_get("cluster.dns.record_type", Conf)} ]; cluster_options(etcd, Conf) -> - Namespace = "cluster.etcd.ssl", + Namespace = "cluster.etcd.ssl_options", SslOpts = fun(C) -> Options = keys(Namespace, C), lists:map(fun(Key) -> {to_atom(Key), conf_get([Namespace, Key], Conf)} end, Options) @@ -1364,15 +1300,6 @@ to_atom(Str) when is_list(Str) -> to_atom(Bin) when is_binary(Bin) -> binary_to_atom(Bin, utf8). --spec ensure_list(binary() | list(char())) -> list(char()). -ensure_list(V) -> - case is_binary(V) of - true -> - binary_to_list(V); - false -> - V - end. - roots(Module) -> lists:map(fun({_BinName, Root}) -> Root end, hocon_schema:roots(Module)). @@ -1384,7 +1311,10 @@ emqx_schema_high_prio_roots() -> {"authorization", sc( ?R_REF("authorization"), - #{desc => ?DESC(authorization)} + #{ + desc => ?DESC(authorization), + importance => ?IMPORTANCE_HIGH + } )}, lists:keyreplace("authorization", 1, Roots, Authz). @@ -1407,3 +1337,22 @@ validator_string_re(Val, RE, Error) -> catch _:_ -> {error, Error} end. + +node_array() -> + hoconsc:union([emqx_schema:comma_separated_atoms(), hoconsc:array(atom())]). + +ensure_file_handlers(Conf, _Opts) -> + FileFields = lists:flatmap( + fun({F, Schema}) -> + Alias = [atom_to_binary(A) || A <- maps:get(aliases, Schema, [])], + [list_to_binary(F) | Alias] + end, + fields("log_file_handler") + ), + HandlersWithoutName = maps:with(FileFields, Conf), + HandlersWithName = maps:without(FileFields, Conf), + emqx_utils_maps:deep_merge(#{<<"default">> => HandlersWithoutName}, HandlersWithName). + +convert_rotation(undefined, _Opts) -> undefined; +convert_rotation(#{} = Rotation, _Opts) -> maps:get(<<"count">>, Rotation, 10); +convert_rotation(Count, _Opts) when is_integer(Count) -> Count. diff --git a/apps/emqx_conf/src/emqx_conf_sup.erl b/apps/emqx_conf/src/emqx_conf_sup.erl index d4411af4b..6a3d795ae 100644 --- a/apps/emqx_conf/src/emqx_conf_sup.erl +++ b/apps/emqx_conf/src/emqx_conf_sup.erl @@ -36,7 +36,7 @@ init([]) -> ChildSpecs = [ child_spec(emqx_cluster_rpc, []), - child_spec(emqx_cluster_rpc_handler, []) + child_spec(emqx_cluster_rpc_cleaner, []) ], {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_conf/src/proto/emqx_conf_proto_v1.erl b/apps/emqx_conf/src/proto/emqx_conf_proto_v1.erl index 84687e314..b66307a1b 100644 --- a/apps/emqx_conf/src/proto/emqx_conf_proto_v1.erl +++ b/apps/emqx_conf/src/proto/emqx_conf_proto_v1.erl @@ -38,22 +38,22 @@ -include_lib("emqx/include/bpapi.hrl"). --type update_config_key_path() :: [emqx_map_lib:config_key(), ...]. +-type update_config_key_path() :: [emqx_utils_maps:config_key(), ...]. introduced_in() -> "5.0.0". --spec get_config(node(), emqx_map_lib:config_key_path()) -> +-spec get_config(node(), emqx_utils_maps:config_key_path()) -> term() | emqx_rpc:badrpc(). get_config(Node, KeyPath) -> rpc:call(Node, emqx, get_config, [KeyPath]). --spec get_config(node(), emqx_map_lib:config_key_path(), _Default) -> +-spec get_config(node(), emqx_utils_maps:config_key_path(), _Default) -> term() | emqx_rpc:badrpc(). get_config(Node, KeyPath, Default) -> rpc:call(Node, emqx, get_config, [KeyPath, Default]). --spec get_all(emqx_map_lib:config_key_path()) -> emqx_rpc:multicall_result(). +-spec get_all(emqx_utils_maps:config_key_path()) -> emqx_rpc:multicall_result(). get_all(KeyPath) -> rpc:multicall(emqx_conf, get_node_and_config, [KeyPath], 5000). diff --git a/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl b/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl index dd8d2fedd..97446ee9f 100644 --- a/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl +++ b/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl @@ -44,19 +44,19 @@ introduced_in() -> -spec sync_data_from_node(node()) -> {ok, binary()} | emqx_rpc:badrpc(). sync_data_from_node(Node) -> rpc:call(Node, emqx_conf_app, sync_data_from_node, [], 20000). --type update_config_key_path() :: [emqx_map_lib:config_key(), ...]. +-type update_config_key_path() :: [emqx_utils_maps:config_key(), ...]. --spec get_config(node(), emqx_map_lib:config_key_path()) -> +-spec get_config(node(), emqx_utils_maps:config_key_path()) -> term() | emqx_rpc:badrpc(). get_config(Node, KeyPath) -> rpc:call(Node, emqx, get_config, [KeyPath]). --spec get_config(node(), emqx_map_lib:config_key_path(), _Default) -> +-spec get_config(node(), emqx_utils_maps:config_key_path(), _Default) -> term() | emqx_rpc:badrpc(). get_config(Node, KeyPath, Default) -> rpc:call(Node, emqx, get_config, [KeyPath, Default]). --spec get_all(emqx_map_lib:config_key_path()) -> emqx_rpc:multicall_result(). +-spec get_all(emqx_utils_maps:config_key_path()) -> emqx_rpc:multicall_result(). get_all(KeyPath) -> rpc:multicall(emqx_conf, get_node_and_config, [KeyPath], 5000). diff --git a/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl b/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl index d4b28e946..8cdfcaeea 100644 --- a/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl +++ b/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl @@ -43,16 +43,21 @@ groups() -> []. init_per_suite(Config) -> application:load(emqx_conf), ok = ekka:start(), + ok = emqx_common_test_helpers:start_apps([]), ok = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], infinity), ok = emqx_config:put([node, cluster_call, retry_interval], 1000), meck:new(emqx_alarm, [non_strict, passthrough, no_link]), meck:expect(emqx_alarm, activate, 3, ok), meck:expect(emqx_alarm, deactivate, 3, ok), + meck:new(mria, [non_strict, passthrough, no_link]), + meck:expect(mria, running_nodes, 0, [?NODE1, {node(), ?NODE2}, {node(), ?NODE3}]), Config. end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([]), ekka:stop(), mria:stop(), + meck:unload(mria), mria_mnesia:delete_schema(), meck:unload(emqx_alarm), ok. @@ -252,13 +257,13 @@ t_fast_forward_commit(_Config) -> ), ok. -t_handler_unexpected_msg(_Config) -> - Handler = emqx_cluster_rpc_handler, - OldPid = erlang:whereis(Handler), - ok = gen_server:cast(Handler, unexpected_cast_msg), - ignore = gen_server:call(Handler, unexpected_cast_msg), - erlang:send(Handler, unexpected_info_msg), - NewPid = erlang:whereis(Handler), +t_cleaner_unexpected_msg(_Config) -> + Cleaner = emqx_cluster_cleaner, + OldPid = erlang:whereis(Cleaner), + ok = gen_server:cast(Cleaner, unexpected_cast_msg), + ignore = gen_server:call(Cleaner, unexpected_cast_msg), + erlang:send(Cleaner, unexpected_info_msg), + NewPid = erlang:whereis(Cleaner), ?assertEqual(OldPid, NewPid), ok. @@ -276,8 +281,8 @@ start() -> {ok, Pid1} = emqx_cluster_rpc:start_link(), {ok, Pid2} = emqx_cluster_rpc:start_link({node(), ?NODE2}, ?NODE2, 500), {ok, Pid3} = emqx_cluster_rpc:start_link({node(), ?NODE3}, ?NODE3, 500), - {ok, Pid4} = emqx_cluster_rpc_handler:start_link(100, 500), - true = erlang:register(emqx_cluster_rpc_handler, Pid4), + {ok, Pid4} = emqx_cluster_rpc_cleaner:start_link(100, 500), + true = erlang:register(emqx_cluster_rpc_cleaner, Pid4), {ok, [Pid1, Pid2, Pid3, Pid4]}. stop() -> @@ -293,7 +298,7 @@ stop() -> end || N <- [?NODE1, ?NODE2, ?NODE3] ], - gen_server:stop(emqx_cluster_rpc_handler, normal, 5000). + gen_server:stop(emqx_cluster_rpc_cleaner, normal, 5000). receive_msg(0, _Msg) -> ok; diff --git a/apps/emqx_conf/test/emqx_conf_app_SUITE.erl b/apps/emqx_conf/test/emqx_conf_app_SUITE.erl index dab4c4919..583405158 100644 --- a/apps/emqx_conf/test/emqx_conf_app_SUITE.erl +++ b/apps/emqx_conf/test/emqx_conf_app_SUITE.erl @@ -25,10 +25,9 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_copy_conf_override_on_restarts(_Config) -> - net_kernel:start(['master@127.0.0.1', longnames]), ct:timetrap({seconds, 120}), snabbkaffe:fix_ct_logging(), - Cluster = cluster([core, core, core]), + Cluster = cluster([cluster_spec({core, 1}), cluster_spec({core, 2}), cluster_spec({core, 3})]), %% 1. Start all nodes Nodes = start_cluster(Cluster), @@ -42,7 +41,7 @@ t_copy_conf_override_on_restarts(_Config) -> %% crash and eventually all nodes should be ready. start_cluster_async(Cluster), - timer:sleep(15_000), + timer:sleep(15000), assert_config_load_done(Nodes), @@ -51,23 +50,48 @@ t_copy_conf_override_on_restarts(_Config) -> stop_cluster(Nodes) end. -t_copy_data_dir(_Config) -> +t_copy_new_data_dir(_Config) -> net_kernel:start(['master1@127.0.0.1', longnames]), ct:timetrap({seconds, 120}), snabbkaffe:fix_ct_logging(), - Cluster = cluster([{core, copy1}, {core, copy2}, {core, copy3}]), + Cluster = cluster([cluster_spec({core, 4}), cluster_spec({core, 5}), cluster_spec({core, 6})]), %% 1. Start all nodes [First | Rest] = Nodes = start_cluster(Cluster), try + File = "/configs/cluster.hocon", assert_config_load_done(Nodes), - rpc:call(First, ?MODULE, create_data_dir, []), + rpc:call(First, ?MODULE, create_data_dir, [File]), {[ok, ok, ok], []} = rpc:multicall(Nodes, application, stop, [emqx_conf]), {[ok, ok, ok], []} = rpc:multicall(Nodes, ?MODULE, set_data_dir_env, []), ok = rpc:call(First, application, start, [emqx_conf]), {[ok, ok], []} = rpc:multicall(Rest, application, start, [emqx_conf]), - assert_data_copy_done(Nodes), + assert_data_copy_done(Nodes, File), + stop_cluster(Nodes), + ok + after + stop_cluster(Nodes) + end. + +t_copy_deprecated_data_dir(_Config) -> + net_kernel:start(['master2@127.0.0.1', longnames]), + ct:timetrap({seconds, 120}), + snabbkaffe:fix_ct_logging(), + Cluster = cluster([cluster_spec({core, 7}), cluster_spec({core, 8}), cluster_spec({core, 9})]), + + %% 1. Start all nodes + [First | Rest] = Nodes = start_cluster(Cluster), + try + File = "/configs/cluster-override.conf", + assert_config_load_done(Nodes), + rpc:call(First, ?MODULE, create_data_dir, [File]), + {[ok, ok, ok], []} = rpc:multicall(Nodes, application, stop, [emqx_conf]), + {[ok, ok, ok], []} = rpc:multicall(Nodes, ?MODULE, set_data_dir_env, []), + ok = rpc:call(First, application, start, [emqx_conf]), + {[ok, ok], []} = rpc:multicall(Rest, application, start, [emqx_conf]), + + assert_data_copy_done(Nodes, File), stop_cluster(Nodes), ok after @@ -78,7 +102,7 @@ t_copy_data_dir(_Config) -> %% Helper functions %%------------------------------------------------------------------------------ -create_data_dir() -> +create_data_dir(File) -> Node = atom_to_list(node()), ok = filelib:ensure_dir(Node ++ "/certs/"), ok = filelib:ensure_dir(Node ++ "/authz/"), @@ -86,21 +110,32 @@ create_data_dir() -> ok = file:write_file(Node ++ "/certs/fake-cert", list_to_binary(Node)), ok = file:write_file(Node ++ "/authz/fake-authz", list_to_binary(Node)), Telemetry = <<"telemetry.enable = false">>, - ok = file:write_file(Node ++ "/configs/cluster-override.conf", Telemetry). + ok = file:write_file(Node ++ File, Telemetry). set_data_dir_env() -> Node = atom_to_list(node()), %% will create certs and authz dir ok = filelib:ensure_dir(Node ++ "/configs/"), + {ok, [ConfigFile]} = application:get_env(emqx, config_files), + NewConfigFile = ConfigFile ++ "." ++ Node, + {ok, _} = file:copy(ConfigFile, NewConfigFile), + Bin = iolist_to_binary(io_lib:format("node.config_files = [~p]~n", [NewConfigFile])), + ok = file:write_file(NewConfigFile, Bin, [append]), + DataDir = iolist_to_binary(io_lib:format("node.data_dir = ~p~n", [Node])), + ok = file:write_file(NewConfigFile, DataDir, [append]), + application:set_env(emqx, config_files, [NewConfigFile]), application:set_env(emqx, data_dir, Node), + %% We set env both cluster.hocon and cluster-override.conf, but only one will be used + application:set_env(emqx, cluster_hocon_file, Node ++ "/configs/cluster.hocon"), application:set_env(emqx, cluster_override_conf_file, Node ++ "/configs/cluster-override.conf"), ok. -assert_data_copy_done([First0 | Rest]) -> +assert_data_copy_done([First0 | Rest], File) -> First = atom_to_list(First0), {ok, FakeCertFile} = file:read_file(First ++ "/certs/fake-cert"), {ok, FakeAuthzFile} = file:read_file(First ++ "/authz/fake-authz"), - {ok, FakeOverrideFile} = file:read_file(First ++ "/configs/cluster-override.conf"), + {ok, FakeOverrideFile} = file:read_file(First ++ File), + {ok, ExpectFake} = hocon:binary(FakeOverrideFile), lists:foreach( fun(Node0) -> Node = atom_to_list(Node0), @@ -110,8 +145,8 @@ assert_data_copy_done([First0 | Rest]) -> #{node => Node} ), ?assertEqual( - {ok, FakeOverrideFile}, - file:read_file(Node ++ "/configs/cluster-override.conf"), + {ok, ExpectFake}, + hocon:files([Node ++ File]), #{node => Node} ), ?assertEqual( @@ -157,13 +192,15 @@ cluster(Specs) -> {env, Env}, {apps, [emqx_conf]}, {load_schema, false}, - {join_to, false}, + {join_to, true}, {env_handler, fun (emqx) -> application:set_env(emqx, boot_modules, []), - io:format("~p~p~n", [node(), application:get_all_env(emqx)]), ok; (_) -> ok end} ]). + +cluster_spec({Type, Num}) -> + {Type, list_to_atom(atom_to_list(?MODULE) ++ integer_to_list(Num))}. diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index 3653b9d19..5aa45d9ad 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -6,6 +6,401 @@ -include_lib("eunit/include/eunit.hrl"). +%% erlfmt-ignore +-define(BASE_CONF, + """ + node { + name = \"emqx1@127.0.0.1\" + cookie = \"emqxsecretcookie\" + data_dir = \"data\" + } + cluster { + name = emqxcl + discovery_strategy = static + static.seeds = ~p + core_nodes = ~p + } + """). + +array_nodes_test() -> + ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'], + lists:foreach( + fun(Nodes) -> + ConfFile = to_bin(?BASE_CONF, [Nodes, Nodes]), + {ok, Conf} = hocon:binary(ConfFile, #{format => richmap}), + ConfList = hocon_tconf:generate(emqx_conf_schema, Conf), + ClusterDiscovery = proplists:get_value( + cluster_discovery, proplists:get_value(ekka, ConfList) + ), + ?assertEqual( + {static, [{seeds, ExpectNodes}]}, + ClusterDiscovery, + Nodes + ), + ?assertEqual( + ExpectNodes, + proplists:get_value(core_nodes, proplists:get_value(mria, ConfList)), + Nodes + ) + end, + [["emqx1@127.0.0.1", "emqx2@127.0.0.1"], "emqx1@127.0.0.1, emqx2@127.0.0.1"] + ), + ok. + +%% erlfmt-ignore +-define(OUTDATED_LOG_CONF, + """ +log.console_handler { + burst_limit { + enable = true + max_count = 10000 + window_time = 1000 + } + chars_limit = unlimited + drop_mode_qlen = 3000 + enable = true + flush_qlen = 8000 + formatter = text + level = warning + max_depth = 100 + overload_kill { + enable = true + mem_size = 31457280 + qlen = 20000 + restart_after = 5000 + } + single_line = true + supervisor_reports = error + sync_mode_qlen = 100 + time_offset = \"+02:00\" +} +log.file_handlers { + default { + burst_limit { + enable = true + max_count = 10000 + window_time = 1000 + } + chars_limit = unlimited + drop_mode_qlen = 3000 + enable = true + file = \"log/my-emqx.log\" + flush_qlen = 8000 + formatter = text + level = debug + max_depth = 100 + max_size = \"1024MB\" + overload_kill { + enable = true + mem_size = 31457280 + qlen = 20000 + restart_after = 5000 + } + rotation {count = 20, enable = true} + single_line = true + supervisor_reports = error + sync_mode_qlen = 100 + time_offset = \"+01:00\" + } +} + """ +). +-define(FORMATTER(TimeOffset), + {emqx_logger_textfmt, #{ + chars_limit => unlimited, + depth => 100, + single_line => true, + template => [time, " [", level, "] ", msg, "\n"], + time_offset => TimeOffset + }} +). + +-define(FILTERS, [{drop_progress_reports, {fun logger_filters:progress/2, stop}}]). +-define(LOG_CONFIG, #{ + burst_limit_enable => true, + burst_limit_max_count => 10000, + burst_limit_window_time => 1000, + drop_mode_qlen => 3000, + flush_qlen => 8000, + overload_kill_enable => true, + overload_kill_mem_size => 31457280, + overload_kill_qlen => 20000, + overload_kill_restart_after => 5000, + sync_mode_qlen => 100 +}). + +outdated_log_test() -> + validate_log(?OUTDATED_LOG_CONF). + +validate_log(Conf) -> + BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), + Conf0 = <>, + {ok, ConfMap0} = hocon:binary(Conf0, #{format => richmap}), + ConfList = hocon_tconf:generate(emqx_conf_schema, ConfMap0), + Kernel = proplists:get_value(kernel, ConfList), + + ?assertEqual(silent, proplists:get_value(error_logger, Kernel)), + ?assertEqual(debug, proplists:get_value(logger_level, Kernel)), + Loggers = proplists:get_value(logger, Kernel), + FileHandler = lists:keyfind(logger_disk_log_h, 3, Loggers), + ?assertEqual( + {handler, default, logger_disk_log_h, #{ + config => ?LOG_CONFIG#{ + type => wrap, + file => "log/my-emqx.log", + max_no_bytes => 1073741824, + max_no_files => 20 + }, + filesync_repeat_interval => no_repeat, + filters => ?FILTERS, + formatter => ?FORMATTER("+01:00"), + level => debug + }}, + FileHandler + ), + ConsoleHandler = lists:keyfind(logger_std_h, 3, Loggers), + ?assertEqual( + {handler, console, logger_std_h, #{ + config => ?LOG_CONFIG#{type => standard_io}, + filters => ?FILTERS, + formatter => ?FORMATTER("+02:00"), + level => warning + }}, + ConsoleHandler + ). + +%% erlfmt-ignore +-define(KERNEL_LOG_CONF, + """ + log.console { + enable = true + formatter = text + level = warning + time_offset = \"+02:00\" + } + log.file { + enable = false + file = \"log/xx-emqx.log\" + formatter = text + level = debug + rotation_count = 20 + rotation_size = \"1024MB\" + time_offset = \"+01:00\" + } + log.file_handlers.default { + enable = true + file = \"log/my-emqx.log\" + } + """ +). + +log_test() -> + validate_log(?KERNEL_LOG_CONF). + +%% erlfmt-ignore +log_rotation_count_limit_test() -> + Format = + """ + log.file { + enable = true + to = \"log/emqx.log\" + formatter = text + level = debug + rotation = {count = ~w} + rotation_size = \"1024MB\" + } + """, + BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), + lists:foreach(fun({Conf, Count}) -> + Conf0 = <>, + {ok, ConfMap0} = hocon:binary(Conf0, #{format => richmap}), + ConfList = hocon_tconf:generate(emqx_conf_schema, ConfMap0), + Kernel = proplists:get_value(kernel, ConfList), + Loggers = proplists:get_value(logger, Kernel), + ?assertMatch( + {handler, default, logger_disk_log_h, #{ + config := #{max_no_files := Count} + }}, + lists:keyfind(logger_disk_log_h, 3, Loggers) + ) + end, + [{to_bin(Format, [1]), 1}, {to_bin(Format, [128]), 128}]), + lists:foreach(fun({Conf, Count}) -> + Conf0 = <>, + {ok, ConfMap0} = hocon:binary(Conf0, #{format => richmap}), + ?assertThrow({emqx_conf_schema, + [#{kind := validation_error, + mismatches := #{"handler_name" := + #{kind := validation_error, + path := "log.file.default.rotation_count", + reason := #{expected_type := "1..128"}, + value := Count} + }}]}, + hocon_tconf:generate(emqx_conf_schema, ConfMap0)) + end, [{to_bin(Format, [0]), 0}, {to_bin(Format, [129]), 129}]). + +%% erlfmt-ignore +-define(BASE_AUTHN_ARRAY, + """ + authentication = [ + {backend = \"http\" + body {password = \"${password}\", username = \"${username}\"} + connect_timeout = \"15s\" + enable_pipelining = 100 + headers {\"content-type\" = \"application/json\"} + mechanism = \"password_based\" + method = \"~p\" + pool_size = 8 + request_timeout = \"5s\" + ssl {enable = ~p, verify = \"verify_peer\"} + url = \"~ts\" + } + ] + """ +). + +-define(ERROR(Reason), + {emqx_conf_schema, [ + #{ + kind := validation_error, + reason := integrity_validation_failure, + result := _, + validation_name := Reason + } + ]} +). + +authn_validations_test() -> + BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), + + OKHttps = to_bin(?BASE_AUTHN_ARRAY, [post, true, <<"https://127.0.0.1:8080">>]), + Conf0 = <>, + {ok, ConfMap0} = hocon:binary(Conf0, #{format => richmap}), + {_, Res0} = hocon_tconf:map_translate(emqx_conf_schema, ConfMap0, #{format => richmap}), + Headers0 = authentication_headers(Res0), + ?assertEqual(<<"application/json">>, maps:get(<<"content-type">>, Headers0)), + %% accept from converter + ?assertEqual(<<"application/json">>, maps:get(<<"accept">>, Headers0)), + + OKHttp = to_bin(?BASE_AUTHN_ARRAY, [post, false, <<"http://127.0.0.1:8080">>]), + Conf1 = <>, + {ok, ConfMap1} = hocon:binary(Conf1, #{format => richmap}), + {_, Res1} = hocon_tconf:map_translate(emqx_conf_schema, ConfMap1, #{format => richmap}), + Headers1 = authentication_headers(Res1), + ?assertEqual(<<"application/json">>, maps:get(<<"content-type">>, Headers1), Headers1), + ?assertEqual(<<"application/json">>, maps:get(<<"accept">>, Headers1), Headers1), + + DisableSSLWithHttps = to_bin(?BASE_AUTHN_ARRAY, [post, false, <<"https://127.0.0.1:8080">>]), + Conf2 = <>, + {ok, ConfMap2} = hocon:binary(Conf2, #{format => richmap}), + ?assertThrow( + ?ERROR(check_http_ssl_opts), + hocon_tconf:map_translate(emqx_conf_schema, ConfMap2, #{format => richmap}) + ), + + BadHeader = to_bin(?BASE_AUTHN_ARRAY, [get, true, <<"https://127.0.0.1:8080">>]), + Conf3 = <>, + {ok, ConfMap3} = hocon:binary(Conf3, #{format => richmap}), + {_, Res3} = hocon_tconf:map_translate(emqx_conf_schema, ConfMap3, #{format => richmap}), + Headers3 = authentication_headers(Res3), + %% remove the content-type header when get method + ?assertEqual(false, maps:is_key(<<"content-type">>, Headers3), Headers3), + ?assertEqual(<<"application/json">>, maps:get(<<"accept">>, Headers3), Headers3), + + BadHeaderWithTuple = binary:replace(BadHeader, [<<"[">>, <<"]">>], <<"">>, [global]), + Conf4 = <>, + {ok, ConfMap4} = hocon:binary(Conf4, #{format => richmap}), + {_, Res4} = hocon_tconf:map_translate(emqx_conf_schema, ConfMap4, #{}), + Headers4 = authentication_headers(Res4), + ?assertEqual(false, maps:is_key(<<"content-type">>, Headers4), Headers4), + ?assertEqual(<<"application/json">>, maps:get(<<"accept">>, Headers4), Headers4), + ok. + +%% erlfmt-ignore +-define(LISTENERS, + """ + listeners.ssl.default.bind = 9999 + listeners.wss.default.bind = 9998 + listeners.wss.default.ssl_options.cacertfile = \"mytest/certs/cacert.pem\" + listeners.wss.new.bind = 9997 + listeners.wss.new.websocket.mqtt_path = \"/my-mqtt\" + """ +). + +listeners_test() -> + BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), + + Conf = <>, + {ok, ConfMap0} = hocon:binary(Conf, #{format => richmap}), + {_, ConfMap} = hocon_tconf:map_translate(emqx_conf_schema, ConfMap0, #{format => richmap}), + #{<<"listeners">> := Listeners} = hocon_util:richmap_to_map(ConfMap), + #{ + <<"tcp">> := #{<<"default">> := Tcp}, + <<"ws">> := #{<<"default">> := Ws}, + <<"wss">> := #{<<"default">> := DefaultWss, <<"new">> := NewWss}, + <<"ssl">> := #{<<"default">> := Ssl} + } = Listeners, + DefaultCacertFile = <<"${EMQX_ETC_DIR}/certs/cacert.pem">>, + DefaultCertFile = <<"${EMQX_ETC_DIR}/certs/cert.pem">>, + DefaultKeyFile = <<"${EMQX_ETC_DIR}/certs/key.pem">>, + ?assertMatch( + #{ + <<"bind">> := {{0, 0, 0, 0}, 1883}, + <<"enabled">> := true + }, + Tcp + ), + ?assertMatch( + #{ + <<"bind">> := {{0, 0, 0, 0}, 8083}, + <<"enabled">> := true, + <<"websocket">> := #{<<"mqtt_path">> := "/mqtt"} + }, + Ws + ), + ?assertMatch( + #{ + <<"bind">> := 9999, + <<"ssl_options">> := #{ + <<"cacertfile">> := DefaultCacertFile, + <<"certfile">> := DefaultCertFile, + <<"keyfile">> := DefaultKeyFile + } + }, + Ssl + ), + ?assertMatch( + #{ + <<"bind">> := 9998, + <<"websocket">> := #{<<"mqtt_path">> := "/mqtt"}, + <<"ssl_options">> := + #{ + <<"cacertfile">> := <<"mytest/certs/cacert.pem">>, + <<"certfile">> := DefaultCertFile, + <<"keyfile">> := DefaultKeyFile + } + }, + DefaultWss + ), + ?assertMatch( + #{ + <<"bind">> := 9997, + <<"websocket">> := #{<<"mqtt_path">> := "/my-mqtt"}, + <<"ssl_options">> := + #{ + <<"cacertfile">> := DefaultCacertFile, + <<"certfile">> := DefaultCertFile, + <<"keyfile">> := DefaultKeyFile + } + }, + NewWss + ), + ok. + +authentication_headers(Conf) -> + [#{<<"headers">> := Headers}] = hocon_maps:get("authentication", Conf), + Headers. + doc_gen_test() -> %% the json file too large to encode. { @@ -26,3 +421,6 @@ doc_gen_test() -> ok end }. + +to_bin(Format, Args) -> + iolist_to_binary(io_lib:format(Format, Args)). diff --git a/apps/emqx_connector/README.md b/apps/emqx_connector/README.md index 7ef3a8c4a..6baba29de 100644 --- a/apps/emqx_connector/README.md +++ b/apps/emqx_connector/README.md @@ -14,7 +14,7 @@ An MySQL connector can be used as following: ``` (emqx@127.0.0.1)5> emqx_resource:list_instances_verbose(). [#{config => - #{auto_reconnect => true,cacertfile => [],certfile => [], + #{cacertfile => [],certfile => [], database => "mqtt",keyfile => [],password => "public", pool_size => 1, server => {{127,0,0,1},3306}, diff --git a/apps/emqx_connector/docker-ct b/apps/emqx_connector/docker-ct index 7b9a4c068..5a4056927 100644 --- a/apps/emqx_connector/docker-ct +++ b/apps/emqx_connector/docker-ct @@ -1,4 +1,5 @@ mongo redis +redis_cluster mysql pgsql diff --git a/apps/emqx_connector/i18n/emqx_connector_api.conf b/apps/emqx_connector/i18n/emqx_connector_api.conf deleted file mode 100644 index 2f468fff0..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_api.conf +++ /dev/null @@ -1,86 +0,0 @@ -emqx_connector_api { - - id { - desc { - en: "The connector ID. Must be of format {type}:{name}" - zh: "连接器 ID, 格式必须为 {type}:{name}" - } - label: { - en: "Connector ID" - zh: "连接器 ID" - } - } - - conn_test_post { - desc { - en: """ -Test creating a new connector by given ID
-The ID must be of format '{type}:{name}' -""" - zh: """ -通过给定的 ID 测试创建一个新的连接器
-ID 的格式必须为“{type}:{name}” -""" - } - label: { - en: "Create Test Connector" - zh: "创建测试连接器" - } - } - - conn_get { - desc { - en: "List all connectors" - zh: "列出所有连接器" - } - label: { - en: "List All Connectors" - zh: "列出所有连接器" - } - } - - conn_post { - desc { - en: "Create a new connector" - zh: "创建一个新的连接器" - } - label: { - en: "Create Connector" - zh: "创建连接器" - } - } - - conn_id_get { - desc { - en: "Get the connector by ID" - zh: "通过 ID 获取连接器" - } - label: { - en: "Get Connector" - zh: "获取连接器" - } - } - - conn_id_put { - desc { - en: "Update an existing connector by ID" - zh: "通过 ID 更新一个连接器" - } - label: { - en: "Update Connector" - zh: "更新连接器" - } - } - - conn_id_delete { - desc { - en: "Delete a connector by ID" - zh: "通过 ID 删除一个连接器" - } - label: { - en: "Delete Connector" - zh: "删除连接器" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_http.conf b/apps/emqx_connector/i18n/emqx_connector_http.conf deleted file mode 100644 index 7583a38ed..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_http.conf +++ /dev/null @@ -1,149 +0,0 @@ -emqx_connector_http { - base_url { - desc { - en: """ -The base URL is the URL includes only the scheme, host and port.
-When send an HTTP request, the real URL to be used is the concatenation of the base URL and the -path parameter (passed by the emqx_resource:query/2,3 or provided by the request parameter).
-For example: `http://localhost:9901/` -""" - zh: """ -base URL 只包含host和port。
-发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成(通过emqx_resource:query/2,3传递,或者通过请求参数提供)。
-示例:`http://localhost:9901/` -""" - } - label: { - en: "Base Url" - zh: "Base Url" - } - } - - connect_timeout { - desc { - en: "The timeout when connecting to the HTTP server." - zh: "连接HTTP服务器的超时时间。" - } - label: { - en: "Connect Timeout" - zh: "连接超时" - } - } - - max_retries { - desc { - en: "Max retry times if error on sending request." - zh: "请求出错时的最大重试次数。" - } - label: { - en: "Max Retries" - zh: "最大重试次数" - } - } - - pool_type { - desc { - en: "The type of the pool. Can be one of `random`, `hash`." - zh: "连接池的类型,可用类型有`random`, `hash`。" - } - label: { - en: "Pool Type" - zh: "连接池类型" - } - } - - pool_size { - desc { - en: "The pool size." - zh: "连接池大小。" - } - label: { - en: "Pool Size" - zh: "连接池大小" - } - } - - enable_pipelining { - desc { - en: "A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request." - zh: "正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。" - } - label: { - en: "HTTP Pipelineing" - zh: "HTTP 管道" - } - } - - request { - desc { - en: """ -If the request is provided, the caller can send HTTP requests via -emqx_resource:query(ResourceId, {send_message, BridgeId, Message}) -""" - zh: """ -如果提供了请求,调用者可以通过以下方式发送 HTTP 请求 -emqx_resource:query(ResourceId, {send_message, BridgeId, Message}) -""" - } - label: { - en: "Request" - zh: "HTTP 请求" - } - } - - method { - desc { - en: "HTTP method." - zh: "HTTP 请求方法。" - } - label: { - en: "HTTP Method" - zh: "HTTP 请求方法" - } - } - - path { - desc { - en: "URL path." - zh: "HTTP请求路径。" - } - label: { - en: "URL Path" - zh: "HTTP请求路径" - } - } - - body { - desc { - en: "HTTP request body." - zh: "HTTP请求报文主体。" - } - label: { - en: "HTTP Body" - zh: "HTTP请求报文主体" - } - } - - headers { - desc { - en: "List of HTTP headers." - zh: "HTTP 头字段列表。" - } - label: { - en: "HTTP Headers" - zh: "HTTP 头字段列表" - } - } - - request_timeout { - desc { - en: "HTTP request timeout." - zh: "HTTP 请求超时。" - } - label: { - en: "Request Timeout" - zh: "HTTP 请求超时" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_mongo.conf b/apps/emqx_connector/i18n/emqx_connector_mongo.conf deleted file mode 100644 index e43f7bc33..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_mongo.conf +++ /dev/null @@ -1,197 +0,0 @@ -emqx_connector_mongo { - - single_mongo_type { - desc { - en: "Standalone instance." - zh: "Standalone模式。" - } - label: { - en: "Standalone instance" - zh: "Standalone模式" - } - } - - rs_mongo_type { - desc { - en: "Replica set." - zh: "Replica set模式。" - } - label: { - en: "Replica set" - zh: "Replica set模式" - } - } - - sharded_mongo_type { - desc { - en: "Sharded cluster." - zh: "Sharded cluster模式。" - } - label: { - en: "Sharded cluster" - zh: "Sharded cluster模式" - } - } - - auth_source { - desc { - en: "Database name associated with the user's credentials." - zh: "与用户证书关联的数据库名称。" - } - label: { - en: "Auth Source" - zh: "认证源" - } - } - - server { - desc { - en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
-The MongoDB default port 27017 is used if `[:Port]` is not specified. -""" - zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。 -""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - - servers { - desc { - en: """ -A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` -For each Node should be: The IPv4 or IPv6 address or the hostname to connect to. -A host entry has the following form: `Host[:Port]`. -The MongoDB default port 27017 is used if `[:Port]` is not specified. -""" - zh: """ -集群将要连接的节点列表。 节点之间用逗号分隔,如:`Node[,Node].` -每个节点的配置为:将要连接的 IPv4 或 IPv6 地址或主机名。 -主机名具有以下形式:`Host[:Port]`。 -如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。 -""" - } - label: { - en: "Servers" - zh: "服务器列表" - } - } - - w_mode { - desc { - en: "Write mode." - zh: "写模式。" - } - label: { - en: "Write Mode" - zh: "写模式" - } - } - - r_mode { - desc { - en: "Read mode." - zh: "读模式。" - } - label: { - en: "Read Mode" - zh: "读模式" - } - } - - duration { - desc { - en: "Time interval, such as timeout or TTL." - zh: "时间间隔,例如超时或 TTL。" - } - label: { - en: "Time Interval" - zh: "时间间隔" - } - } - - max_overflow { - desc { - en: "Max Overflow." - zh: "最大溢出。" - } - label: { - en: "Max Overflow" - zh: "最大溢出" - } - } - - replica_set_name { - desc { - en: "Name of the replica set." - zh: "副本集的名称。" - } - label: { - en: "Replica Set Name" - zh: "副本集名称" - } - } - - srv_record { - desc { - en: "Use DNS SRV record." - zh: "使用 DNS SRV 记录。" - } - label: { - en: "Srv Record" - zh: "SRV 记录" - } - } - - desc_single { - desc { - en: """Settings for a single MongoDB instance.""" - zh: """配置 Single 模式""" - } - label: { - en: "Setting Single MongoDB" - zh: "配置 Single 模式" - } - } - - desc_rs { - desc { - en: """Settings for replica set.""" - zh: """配置 Replica Set""" - } - label: { - en: "Setting Replica Set" - zh: "配置 Replica Set" - } - } - - desc_sharded { - desc { - en: """Settings for sharded cluster.""" - zh: """配置 Sharded Cluster""" - } - label: { - en: "Setting Sharded Cluster" - zh: "配置 Sharded Cluster" - } - } - - desc_topology { - desc { - en: """Topology of MongoDB.""" - zh: """配置 Topology""" - } - label: { - en: "Setting Topology" - zh: "配置 Topology" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt.conf b/apps/emqx_connector/i18n/emqx_connector_mqtt.conf deleted file mode 100644 index 5ade54670..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_mqtt.conf +++ /dev/null @@ -1,35 +0,0 @@ -emqx_connector_mqtt { - num_of_bridges { - desc { - en: "The current number of bridges that are using this connector." - zh: "当前使用此连接器的网桥数量。" - } - label: { - en: "Num of Bridges" - zh: "网桥数量" - } - } - - type { - desc { - en: "The Connector Type." - zh: "连接器类型。" - } - label: { - en: "Connector Type" - zh: "连接器类型" - } - } - - name { - desc { - en: "Connector name, used as a human-readable description of the connector." - zh: "连接器名称,人类可读的连接器描述。" - } - label: { - en: "Connector Name" - zh: "连接器名称" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf b/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf deleted file mode 100644 index 6f573bb73..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf +++ /dev/null @@ -1,351 +0,0 @@ -emqx_connector_mqtt_schema { - ingress_desc { - desc { - en: """The ingress config defines how this bridge receive messages from the remote MQTT broker, and then - send them to the local broker.
- Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.
- NOTE: if this bridge is used as the input of a rule, and also 'local.topic' is - configured, then messages got from the remote broker will be sent to both the 'local.topic' and - the rule.""" - zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。
- 以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。
- 注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。 - """ - } - label: { - en: "Ingress Configs" - zh: "入方向配置" - } - } - - egress_desc { - desc { - en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.
-Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.
-NOTE: if this bridge is used as the action of a rule, and also 'local.topic' -is configured, then both the data got from the rule and the MQTT messages that matches -'local.topic' will be forwarded.""" - zh: """出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。 -以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。
-注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。 - """ - } - label: { - en: "Egress Configs" - zh: "出方向配置" - } - } - - ingress_remote { - desc { - en: """The configs about subscribing to the remote broker.""" - zh: """订阅远程 Broker 相关的配置。""" - } - label: { - en: "Remote Configs" - zh: "远程配置" - } - } - - ingress_local { - desc { - en: """The configs about sending message to the local broker.""" - zh: """发送消息到本地 Broker 相关的配置。""" - } - label: { - en: "Local Configs" - zh: "本地配置" - } - } - - egress_remote { - desc { - en: """The configs about sending message to the remote broker.""" - zh: """发送消息到远程 Broker 相关的配置。""" - } - label: { - en: "Remote Configs" - zh: "远程配置" - } - } - - egress_local { - desc { - en: """The configs about receiving messages from local broker.""" - zh: """如何从本地 Broker 接收消息相关的配置。""" - } - label: { - en: "Local Configs" - zh: "本地配置" - } - } - - mode { - desc { - en: """ -The mode of the MQTT Bridge.
- -- cluster_shareload: create an MQTT connection on each node in the emqx cluster.
-In 'cluster_shareload' mode, the incoming load from the remote broker is shared by -using shared subscription.
-Note that the 'clientid' is suffixed by the node name, this is to avoid -clientid conflicts between different nodes. And we can only use shared subscription -topic filters for remote.topic of ingress connections. -""" - zh: """ -MQTT 桥的模式。
- -- cluster_shareload:在 emqx 集群的每个节点上创建一个 MQTT 连接。
-在“cluster_shareload”模式下,来自远程代理的传入负载通过共享订阅的方式接收。
-请注意,clientid 以节点名称为后缀,这是为了避免不同节点之间的 clientid 冲突。 -而且对于入口连接的 remote.topic,我们只能使用共享订阅主题过滤器。 -""" - } - label: { - en: "MQTT Bridge Mode" - zh: "MQTT 桥接模式" - } - } - - server { - desc { - en: "The host and port of the remote MQTT broker" - zh: "远程 MQTT Broker的主机和端口。" - } - label: { - en: "Broker IP And Port" - zh: "Broker主机和端口" - } - } - - bridge_mode { - desc { - en: """ -If enable bridge mode. -NOTE: This setting is only for MQTT protocol version older than 5.0, and the remote MQTT -broker MUST support this feature. - """ - zh: """ -是否启用 Bridge Mode。 -注意:此设置只针对 MQTT 协议版本 < 5.0 有效,并且需要远程 MQTT Broker 支持 Bridge Mode。 - """ - } - label { - en: "Bridge Mode" - zh: "Bridge 模式" - } - } - - proto_ver { - desc { - en: "The MQTT protocol version" - zh: "MQTT 协议版本" - } - label: { - en: "Protocol Version" - zh: "协议版本" - } - } - - username { - desc { - en: "The username of the MQTT protocol" - zh: "MQTT 协议的用户名" - } - label: { - en: "Username" - zh: "用户名" - } - } - - password { - desc { - en: "The password of the MQTT protocol" - zh: "MQTT 协议的密码" - } - label: { - en: "Password" - zh: "密码" - } - } - - clean_start { - desc { - en: "The clean-start or the clean-session of the MQTT protocol" - zh: "MQTT 清除会话" - } - label: { - en: "Clean Session" - zh: "清除会话" - } - } - - max_inflight { - desc { - en: "Max inflight (sent, but un-acked) messages of the MQTT protocol" - zh: "MQTT 协议的最大飞行(已发送但未确认)消息" - } - label: { - en: "Max Inflight Message" - zh: "最大飞行消息" - } - } - - ingress_remote_topic { - desc { - en: "Receive messages from which topic of the remote broker" - zh: "从远程broker的哪个topic接收消息" - } - label: { - en: "Remote Topic" - zh: "远程主题" - } - } - - ingress_remote_qos { - desc { - en: "The QoS level to be used when subscribing to the remote broker" - zh: "订阅远程borker时要使用的 QoS 级别" - } - label: { - en: "Remote QoS" - zh: "远程 QoS" - } - } - - ingress_local_topic { - desc { - en: """ -Send messages to which topic of the local broker.
-Template with variables is allowed. -""" - zh: """ -向本地broker的哪个topic发送消息。
-允许使用带有变量的模板。 -""" - } - label: { - en: "Local Topic" - zh: "本地主题" - } - } - - ingress_local_qos { - desc { - en: """ -The QoS of the MQTT message to be sent.
-Template with variables is allowed. -""" - zh: """ -待发送 MQTT 消息的 QoS。
-允许使用带有变量的模板。 -""" - } - label: { - en: "Local QoS" - zh: "本地 QoS" - } - } - - egress_local_topic { - desc { - en: "The local topic to be forwarded to the remote broker" - zh: "要转发到远程broker的本地主题" - } - label: { - en: "Local Topic" - zh: "本地主题" - } - } - - egress_remote_topic { - desc { - en: """ -Forward to which topic of the remote broker.
-Template with variables is allowed. -""" - zh: """ -转发到远程broker的哪个topic。
-允许使用带有变量的模板。 -""" - } - label: { - en: "Remote Topic" - zh: "远程主题" - } - } - - egress_remote_qos { - desc { - en: """ -The QoS of the MQTT message to be sent.
-Template with variables is allowed. -""" - zh: """ -待发送 MQTT 消息的 QoS。
-允许使用带有变量的模板。 -""" - } - label: { - en: "Remote QoS" - zh: "远程 QoS" - } - } - - retain { - desc { - en: """ -The 'retain' flag of the MQTT message to be sent.
-Template with variables is allowed. -""" - zh: """ -要发送的 MQTT 消息的“保留”标志。
-允许使用带有变量的模板。 -""" - } - label: { - en: "Retain Flag" - zh: "保留消息标志" - } - } - - payload { - desc { - en: """ -The payload of the MQTT message to be sent.
-Template with variables is allowed. -""" - zh: """ -要发送的 MQTT 消息的负载。
-允许使用带有变量的模板。 -""" - } - label: { - en: "Payload" - zh: "消息负载" - } - } - - server_configs { - desc { - en: """Configs related to the server.""" - zh: """服务器相关的配置。""" - } - label: { - en: "Server Configs" - zh: "服务配置。" - } - } - - clientid_prefix { - desc { - en: """Optional prefix to prepend to the clientid used by egress bridges.""" - zh: """可选的前缀,用于在出口网桥使用的clientid前加上前缀。""" - } - label: { - en: "Clientid Prefix" - zh: "客户ID前缀" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_mysql.conf b/apps/emqx_connector/i18n/emqx_connector_mysql.conf deleted file mode 100644 index 499caae12..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_mysql.conf +++ /dev/null @@ -1,22 +0,0 @@ -emqx_connector_mysql { - - server { - desc { - en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
-The MySQL default port 3306 is used if `[:Port]` is not specified. -""" - zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 MySQL 默认端口 3306。 -""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_pgsql.conf b/apps/emqx_connector/i18n/emqx_connector_pgsql.conf deleted file mode 100644 index 6aa792070..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_pgsql.conf +++ /dev/null @@ -1,22 +0,0 @@ -emqx_connector_pgsql { - - server { - desc { - en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
-The PostgreSQL default port 5432 is used if `[:Port]` is not specified. -""" - zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 PostgreSQL 默认端口 5432。 -""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_redis.conf b/apps/emqx_connector/i18n/emqx_connector_redis.conf deleted file mode 100644 index 228d0805a..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_redis.conf +++ /dev/null @@ -1,99 +0,0 @@ -emqx_connector_redis { - - single { - desc { - en: "Single mode" - zh: "单机模式。" - } - label: { - en: "Single Mode" - zh: "单机模式" - } - } - - cluster { - desc { - en: "Cluster mode" - zh: "集群模式。" - } - label: { - en: "Cluster Mode" - zh: "集群模式" - } - } - - sentinel { - desc { - en: "Sentinel mode" - zh: "哨兵模式。" - } - label: { - en: "Sentinel Mode" - zh: "哨兵模式" - } - } - - sentinel_desc { - desc { - en: "The cluster name in Redis sentinel mode." - zh: "Redis 哨兵模式下的集群名称。" - } - label: { - en: "Cluster Name" - zh: "集群名称" - } - } - - server { - desc { - en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
-The Redis default port 6379 is used if `[:Port]` is not specified. -""" - zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。 -""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - - servers { - desc { - en: """ -A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` -For each Node should be: The IPv4 or IPv6 address or the hostname to connect to. -A host entry has the following form: `Host[:Port]`. -The MongoDB default port 27017 is used if `[:Port]` is not specified. -""" - zh: """ - -集群将要连接的节点列表。 节点之间用逗号分隔,如:`Node[,Node].` -每个节点的配置为:将要连接的 IPv4 或 IPv6 地址或主机名。 -主机名具有以下形式:`Host[:Port]`。 -如果未指定 `[:Port]`,则使用 Redis 默认端口 6379。 -""" - } - label: { - en: "Servers" - zh: "服务器列表" - } - } - - database { - desc { - en: "Redis database ID." - zh: "Redis 数据库 ID。" - } - label: { - en: "Database ID" - zh: "数据库 ID" - } - } - -} diff --git a/apps/emqx_connector/i18n/emqx_connector_schema_lib.conf b/apps/emqx_connector/i18n/emqx_connector_schema_lib.conf deleted file mode 100644 index f5caf29c4..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_schema_lib.conf +++ /dev/null @@ -1,80 +0,0 @@ -emqx_connector_schema_lib { - - ssl { - desc { - en: "SSL connection settings." - zh: "启用 SSL 连接。" - } - label: { - en: "Enable SSL" - zh: "启用SSL" - } - } - - prepare_statement { - desc { - en: "Key-value list of SQL prepared statements." - zh: "SQL 预处理语句列表。" - } - label: { - en: "SQL Prepared Statements List" - zh: "SQL 预处理语句列表" - } - } - - database_desc { - desc { - en: "Database name." - zh: "数据库名字。" - } - label: { - en: "Database Name" - zh: "数据库名字" - } - } - - pool_size { - desc { - en: "Size of the connection pool." - zh: "连接池大小。" - } - label: { - en: "Pool Size" - zh: "连接池大小" - } - } - - username { - desc { - en: "EMQX's username in the external database." - zh: "内部数据库的用户名。" - } - label: { - en: "Username" - zh: "用户名" - } - } - - password { - desc { - en: "EMQX's password in the external database." - zh: "内部数据库密码。" - } - label: { - en: "Password" - zh: "密码" - } - } - - auto_reconnect { - desc { - en: "Enable automatic reconnect to the database." - zh: "自动重连数据库。" - } - label: { - en: "Auto Reconnect Database" - zh: "自动重连数据库" - } - } - -} diff --git a/apps/emqx_connector/include/emqx_connector.hrl b/apps/emqx_connector/include/emqx_connector.hrl index 96b6ba4d6..cdb6ddd92 100644 --- a/apps/emqx_connector/include/emqx_connector.hrl +++ b/apps/emqx_connector/include/emqx_connector.hrl @@ -23,6 +23,9 @@ -define(MONGO_DEFAULT_PORT, 27017). -define(REDIS_DEFAULT_PORT, 6379). -define(PGSQL_DEFAULT_PORT, 5432). +-define(CLICKHOUSE_DEFAULT_PORT, 8123). + +-define(AUTO_RECONNECT_INTERVAL, 2). -define(SERVERS_DESC, "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`
" diff --git a/apps/emqx_connector/rebar.config b/apps/emqx_connector/rebar.config index 98490a91c..03be87356 100644 --- a/apps/emqx_connector/rebar.config +++ b/apps/emqx_connector/rebar.config @@ -7,21 +7,14 @@ {deps, [ {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, {emqx_resource, {path, "../emqx_resource"}}, {eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}}, - {mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.1"}}}, - {epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7-emqx.2"}}}, - %% NOTE: mind poolboy version when updating mongodb-erlang version - {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.13"}}}, - %% NOTE: mind poolboy version when updating eredis_cluster version - {eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.7.1"}}}, - %% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git - %% (which has overflow_ttl feature added). - %% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07). - %% By accident, We have always been using the upstream fork due to - %% eredis_cluster's dependency getting resolved earlier. - %% Here we pin 1.5.2 to avoid surprises in the future. - {poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}} + {mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.2"}}}, + {epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7.0.1"}}}, + {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.19"}}}, + %% NOTE: mind ecpool version when updating eredis_cluster version + {eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.8.1"}}} ]}. {shell, [ diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index 65ef49c6b..db55c7032 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.11"}, + {vsn, "0.1.22"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index 18a246edb..bb822a60a 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -47,7 +47,7 @@ namespace/0 ]). --export([check_ssl_opts/2, validate_method/1]). +-export([check_ssl_opts/2, validate_method/1, join_paths/2]). -type connect_timeout() :: emqx_schema:duration() | infinity. -type pool_type() :: random | hash. @@ -87,7 +87,7 @@ fields(config) -> sc( emqx_schema:duration_ms(), #{ - default => "15s", + default => <<"15s">>, desc => ?DESC("connect_timeout") } )}, @@ -209,7 +209,7 @@ on_start( ?SLOG(info, #{ msg => "starting_http_connector", connector => InstId, - config => Config + config => redact(Config) }), {Transport, TransportOpts} = case Scheme of @@ -219,7 +219,7 @@ on_start( SSLOpts = emqx_tls_lib:to_client_opts(maps:get(ssl, Config)), {tls, SSLOpts} end, - NTransportOpts = emqx_misc:ipv6_probe(TransportOpts), + NTransportOpts = emqx_utils:ipv6_probe(TransportOpts), PoolOpts = [ {host, Host}, {port, Port}, @@ -231,16 +231,16 @@ on_start( {transport_opts, NTransportOpts}, {enable_pipelining, maps:get(enable_pipelining, Config, ?DEFAULT_PIPELINE_SIZE)} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), State = #{ - pool_name => PoolName, + pool_name => InstId, + pool_type => PoolType, host => Host, port => Port, connect_timeout => ConnectTimeout, base_path => BasePath, request => preprocess_request(maps:get(request, Config, undefined)) }, - case ehttpc_sup:start_pool(PoolName, PoolOpts) of + case ehttpc_sup:start_pool(InstId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> {ok, State}; {error, Reason} -> {error, Reason} @@ -264,9 +264,10 @@ on_query(InstId, {send_message, Msg}, State) -> path := Path, body := Body, headers := Headers, - request_timeout := Timeout, - max_retries := Retry + request_timeout := Timeout } = process_request(Request, Msg), + %% bridge buffer worker has retry, do not let ehttpc retry + Retry = 0, on_query( InstId, {undefined, Method, {Path, Headers, Body}, Timeout, Retry}, @@ -274,33 +275,50 @@ on_query(InstId, {send_message, Msg}, State) -> ) end; on_query(InstId, {Method, Request}, State) -> - on_query(InstId, {undefined, Method, Request, 5000, 2}, State); + %% TODO: Get retry from State + on_query(InstId, {undefined, Method, Request, 5000, _Retry = 2}, State); on_query(InstId, {Method, Request, Timeout}, State) -> - on_query(InstId, {undefined, Method, Request, Timeout, 2}, State); + %% TODO: Get retry from State + on_query(InstId, {undefined, Method, Request, Timeout, _Retry = 2}, State); on_query( InstId, {KeyOrNum, Method, Request, Timeout, Retry}, - #{pool_name := PoolName, base_path := BasePath} = State + #{base_path := BasePath} = State ) -> ?TRACE( "QUERY", "http_connector_received", - #{request => Request, connector => InstId, state => State} + #{ + request => redact(Request), + connector => InstId, + state => redact(State) + } ), NRequest = formalize_request(Method, BasePath, Request), + Worker = resolve_pool_worker(State, KeyOrNum), case ehttpc:request( - case KeyOrNum of - undefined -> PoolName; - _ -> {PoolName, KeyOrNum} - end, + Worker, Method, NRequest, Timeout, Retry ) of - {error, Reason} when Reason =:= econnrefused; Reason =:= timeout -> + {error, Reason} when + Reason =:= econnrefused; + Reason =:= timeout; + Reason =:= {shutdown, normal}; + Reason =:= {shutdown, closed} + -> + ?SLOG(warning, #{ + msg => "http_connector_do_request_failed", + reason => Reason, + connector => InstId + }), + {error, {recoverable_error, Reason}}; + {error, {closed, _Message} = Reason} -> + %% _Message = "The connection was lost." ?SLOG(warning, #{ msg => "http_connector_do_request_failed", reason => Reason, @@ -310,7 +328,7 @@ on_query( {error, Reason} = Result -> ?SLOG(error, #{ msg => "http_connector_do_request_failed", - request => NRequest, + request => redact(NRequest), reason => Reason, connector => InstId }), @@ -322,15 +340,17 @@ on_query( {ok, StatusCode, Headers} -> ?SLOG(error, #{ msg => "http connector do request, received error response", - request => NRequest, + note => "the body will be redacted due to security reasons", + request => redact_request(NRequest), connector => InstId, status_code => StatusCode }), {error, #{status_code => StatusCode, headers => Headers}}; {ok, StatusCode, Headers, Body} -> ?SLOG(error, #{ - msg => "http connector do request, received error response", - request => NRequest, + msg => "http connector do request, received error response.", + note => "the body will be redacted due to security reasons", + request => redact_request(NRequest), connector => InstId, status_code => StatusCode }), @@ -361,37 +381,44 @@ on_query_async( InstId, {KeyOrNum, Method, Request, Timeout}, ReplyFunAndArgs, - #{pool_name := PoolName, base_path := BasePath} = State + #{base_path := BasePath} = State ) -> + Worker = resolve_pool_worker(State, KeyOrNum), ?TRACE( "QUERY_ASYNC", "http_connector_received", - #{request => Request, connector => InstId, state => State} + #{ + request => redact(Request), + connector => InstId, + state => redact(State) + } ), NRequest = formalize_request(Method, BasePath, Request), - Worker = - case KeyOrNum of - undefined -> ehttpc_pool:pick_worker(PoolName); - _ -> ehttpc_pool:pick_worker(PoolName, KeyOrNum) - end, ok = ehttpc:request_async( Worker, Method, NRequest, Timeout, {fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs]} - ). + ), + {ok, Worker}. + +resolve_pool_worker(State, undefined) -> + resolve_pool_worker(State, self()); +resolve_pool_worker(#{pool_name := PoolName} = State, Key) -> + case maps:get(pool_type, State, random) of + random -> + ehttpc_pool:pick_worker(PoolName); + hash -> + ehttpc_pool:pick_worker(PoolName, Key) + end. on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) -> case do_get_status(PoolName, Timeout) of - true -> - connected; - false -> - ?SLOG(error, #{ - msg => "http_connector_get_status_failed", - state => State - }), - disconnected + ok -> + {connected, State}; + {error, Reason} -> + {disconnected, State, Reason} end. do_get_status(PoolName, Timeout) -> @@ -400,24 +427,32 @@ do_get_status(PoolName, Timeout) -> fun(Worker) -> case ehttpc:health_check(Worker, Timeout) of ok -> - true; - {error, Reason} -> + ok; + {error, Reason} = Error -> ?SLOG(error, #{ - msg => "ehttpc_health_check_failed", - reason => Reason, + msg => "http_connector_get_status_failed", + reason => redact(Reason), worker => Worker }), - false + Error end end, - try emqx_misc:pmap(DoPerWorker, Workers, Timeout) of - [_ | _] = Status -> - lists:all(fun(St) -> St =:= true end, Status); - [] -> - false + try emqx_utils:pmap(DoPerWorker, Workers, Timeout) of + % we crash in case of non-empty lists since we don't know what to do in that case + [_ | _] = Results -> + case [E || {error, _} = E <- Results] of + [] -> + ok; + Errors -> + hd(Errors) + end catch exit:timeout -> - false + ?SLOG(error, #{ + msg => "http_connector_pmap_failed", + reason => timeout + }), + {error, timeout} end. %%-------------------------------------------------------------------- @@ -431,15 +466,14 @@ preprocess_request( #{ method := Method, path := Path, - body := Body, headers := Headers } = Req ) -> #{ - method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method)), + method => emqx_plugin_libs_rule:preproc_tmpl(to_bin(Method)), path => emqx_plugin_libs_rule:preproc_tmpl(Path), - body => emqx_plugin_libs_rule:preproc_tmpl(Body), - headers => preproc_headers(Headers), + body => maybe_preproc_tmpl(body, Req), + headers => wrap_auth_header(preproc_headers(Headers)), request_timeout => maps:get(request_timeout, Req, 30000), max_retries => maps:get(max_retries, Req, 2) }. @@ -449,8 +483,8 @@ preproc_headers(Headers) when is_map(Headers) -> fun(K, V, Acc) -> [ { - emqx_plugin_libs_rule:preproc_tmpl(bin(K)), - emqx_plugin_libs_rule:preproc_tmpl(bin(V)) + emqx_plugin_libs_rule:preproc_tmpl(to_bin(K)), + emqx_plugin_libs_rule:preproc_tmpl(to_bin(V)) } | Acc ] @@ -462,13 +496,49 @@ preproc_headers(Headers) when is_list(Headers) -> lists:map( fun({K, V}) -> { - emqx_plugin_libs_rule:preproc_tmpl(bin(K)), - emqx_plugin_libs_rule:preproc_tmpl(bin(V)) + emqx_plugin_libs_rule:preproc_tmpl(to_bin(K)), + emqx_plugin_libs_rule:preproc_tmpl(to_bin(V)) } end, Headers ). +wrap_auth_header(Headers) -> + lists:map(fun maybe_wrap_auth_header/1, Headers). + +maybe_wrap_auth_header({[{str, Key}] = StrKey, Val}) -> + {_, MaybeWrapped} = maybe_wrap_auth_header({Key, Val}), + {StrKey, MaybeWrapped}; +maybe_wrap_auth_header({Key, Val} = Header) when + is_binary(Key), (size(Key) =:= 19 orelse size(Key) =:= 13) +-> + %% We check the size of potential keys in the guard above and consider only + %% those that match the number of characters of either "Authorization" or + %% "Proxy-Authorization". + case try_bin_to_lower(Key) of + <<"authorization">> -> + {Key, emqx_secret:wrap(Val)}; + <<"proxy-authorization">> -> + {Key, emqx_secret:wrap(Val)}; + _Other -> + Header + end; +maybe_wrap_auth_header(Header) -> + Header. + +try_bin_to_lower(Bin) -> + try iolist_to_binary(string:lowercase(Bin)) of + LowercaseBin -> LowercaseBin + catch + _:_ -> Bin + end. + +maybe_preproc_tmpl(Key, Conf) -> + case maps:get(Key, Conf, undefined) of + undefined -> undefined; + Val -> emqx_plugin_libs_rule:preproc_tmpl(Val) + end. + process_request( #{ method := MethodTks, @@ -487,8 +557,8 @@ process_request( request_timeout => ReqTimeout }. -process_request_body([], Msg) -> - emqx_json:encode(Msg); +process_request_body(undefined, Msg) -> + emqx_utils_json:encode(Msg); process_request_body(BodyTks, Msg) -> emqx_plugin_libs_rule:proc_tmpl(BodyTks, Msg). @@ -497,7 +567,7 @@ proc_headers(HeaderTks, Msg) -> fun({K, V}) -> { emqx_plugin_libs_rule:proc_tmpl(K, Msg), - emqx_plugin_libs_rule:proc_tmpl(V, Msg) + emqx_plugin_libs_rule:proc_tmpl(emqx_secret:unwrap(V), Msg) } end, HeaderTks @@ -525,22 +595,138 @@ formalize_request(Method, BasePath, {Path, Headers, _Body}) when -> formalize_request(Method, BasePath, {Path, Headers}); formalize_request(_Method, BasePath, {Path, Headers, Body}) -> - {filename:join(BasePath, Path), Headers, Body}; + {join_paths(BasePath, Path), Headers, Body}; formalize_request(_Method, BasePath, {Path, Headers}) -> - {filename:join(BasePath, Path), Headers}. + {join_paths(BasePath, Path), Headers}. -bin(Bin) when is_binary(Bin) -> +%% By default, we cannot treat HTTP paths as "file" or "resource" paths, +%% because an HTTP server may handle paths like +%% "/a/b/c/", "/a/b/c" and "/a//b/c" differently. +%% +%% So we try to avoid unneccessary path normalization. +%% +%% See also: `join_paths_test_/0` +join_paths(Path1, Path2) -> + do_join_paths(lists:reverse(to_list(Path1)), to_list(Path2)). + +%% "abc/" + "/cde" +do_join_paths([$/ | Path1], [$/ | Path2]) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc/" + "cde" +do_join_paths([$/ | Path1], Path2) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc" + "/cde" +do_join_paths(Path1, [$/ | Path2]) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc" + "cde" +do_join_paths(Path1, Path2) -> + lists:reverse(Path1) ++ [$/ | Path2]. + +to_list(List) when is_list(List) -> List; +to_list(Bin) when is_binary(Bin) -> binary_to_list(Bin). + +to_bin(Bin) when is_binary(Bin) -> Bin; -bin(Str) when is_list(Str) -> +to_bin(Str) when is_list(Str) -> list_to_binary(Str); -bin(Atom) when is_atom(Atom) -> +to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). reply_delegator(ReplyFunAndArgs, Result) -> case Result of - {error, Reason} when Reason =:= econnrefused; Reason =:= timeout -> + %% The normal reason happens when the HTTP connection times out before + %% the request has been fully processed + {error, Reason} when + Reason =:= econnrefused; + Reason =:= timeout; + Reason =:= normal; + Reason =:= {shutdown, normal}; + Reason =:= {shutdown, closed} + -> + Result1 = {error, {recoverable_error, Reason}}, + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1); + {error, {closed, _Message} = Reason} -> + %% _Message = "The connection was lost." Result1 = {error, {recoverable_error, Reason}}, emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1); _ -> emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) end. + +%% The HOCON schema system may generate sensitive keys with this format +is_sensitive_key([{str, StringKey}]) -> + is_sensitive_key(StringKey); +is_sensitive_key(Atom) when is_atom(Atom) -> + is_sensitive_key(erlang:atom_to_binary(Atom)); +is_sensitive_key(Bin) when is_binary(Bin), (size(Bin) =:= 19 orelse size(Bin) =:= 13) -> + %% We want to convert this to lowercase since the http header fields + %% are case insensitive, which means that a user of the Webhook bridge + %% can write this field name in many different ways. + case try_bin_to_lower(Bin) of + <<"authorization">> -> true; + <<"proxy-authorization">> -> true; + _ -> false + end; +is_sensitive_key(_) -> + false. + +%% Function that will do a deep traversal of Data and remove sensitive +%% information (i.e., passwords) +redact(Data) -> + emqx_utils:redact(Data, fun is_sensitive_key/1). + +%% because the body may contain some sensitive data +%% and at the same time the redact function will not scan the binary data +%% and we also can't know the body format and where the sensitive data will be +%% so the easy way to keep data security is redacted the whole body +redact_request({Path, Headers}) -> + {Path, redact(Headers)}; +redact_request({Path, Headers, _Body}) -> + {Path, redact(Headers), <<"******">>}. + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +redact_test_() -> + TestData1 = [ + {<<"content-type">>, <<"application/json">>}, + {<<"Authorization">>, <<"Basic YWxhZGRpbjpvcGVuc2VzYW1l">>} + ], + + TestData2 = #{ + headers => + [ + {[{str, <<"content-type">>}], [{str, <<"application/json">>}]}, + {[{str, <<"Authorization">>}], [{str, <<"Basic YWxhZGRpbjpvcGVuc2VzYW1l">>}]} + ] + }, + [ + ?_assert(is_sensitive_key(<<"Authorization">>)), + ?_assert(is_sensitive_key(<<"AuthoriZation">>)), + ?_assert(is_sensitive_key('AuthoriZation')), + ?_assert(is_sensitive_key(<<"PrOxy-authoRizaTion">>)), + ?_assert(is_sensitive_key('PrOxy-authoRizaTion')), + ?_assertNot(is_sensitive_key(<<"Something">>)), + ?_assertNot(is_sensitive_key(89)), + ?_assertNotEqual(TestData1, redact(TestData1)), + ?_assertNotEqual(TestData2, redact(TestData2)) + ]. + +join_paths_test_() -> + [ + ?_assertEqual("abc/cde", join_paths("abc", "cde")), + ?_assertEqual("abc/cde", join_paths("abc", "/cde")), + ?_assertEqual("abc/cde", join_paths("abc/", "cde")), + ?_assertEqual("abc/cde", join_paths("abc/", "/cde")), + + ?_assertEqual("/", join_paths("", "")), + ?_assertEqual("/cde", join_paths("", "cde")), + ?_assertEqual("/cde", join_paths("", "/cde")), + ?_assertEqual("/cde", join_paths("/", "cde")), + ?_assertEqual("/cde", join_paths("/", "/cde")), + + ?_assertEqual("//cde/", join_paths("/", "//cde/")), + ?_assertEqual("abc///cde/", join_paths("abc//", "//cde/")) + ]. + +-endif. diff --git a/apps/emqx_connector/src/emqx_connector_jwt_worker.erl b/apps/emqx_connector/src/emqx_connector_jwt_worker.erl index e51b9bbee..b13e74a4d 100644 --- a/apps/emqx_connector/src/emqx_connector_jwt_worker.erl +++ b/apps/emqx_connector/src/emqx_connector_jwt_worker.erl @@ -120,7 +120,7 @@ init(#{private_key := PrivateKeyPEM} = Config) -> handle_continue({make_key, PrivateKeyPEM}, State0) -> ?tp(connector_jwt_worker_make_key, #{state => State0}), - case jose_jwk:from_pem(PrivateKeyPEM) of + try jose_jwk:from_pem(PrivateKeyPEM) of JWK = #jose_jwk{} -> State = State0#{jwk := JWK}, {noreply, State, {continue, create_token}}; @@ -135,6 +135,17 @@ handle_continue({make_key, PrivateKeyPEM}, State0) -> Error = {invalid_private_key, Error0}, ?tp(connector_jwt_worker_startup_error, #{error => Error}), {stop, {shutdown, {error, Error}}, State0} + catch + Kind:Error -> + ?tp( + error, + connector_jwt_worker_startup_error, + #{ + kind => Kind, + error => Error + } + ), + {stop, {shutdown, {error, Error}}, State0} end; handle_continue(create_token, State0) -> State = generate_and_store_jwt(State0), diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index 1cb65034d..c3e1db7d3 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -59,16 +59,25 @@ on_start( bind_password := BindPassword, timeout := Timeout, pool_size := PoolSize, - auto_reconnect := AutoReconn, ssl := SSL } = Config ) -> ?SLOG(info, #{ msg => "starting_ldap_connector", connector => InstId, - config => Config + config => emqx_utils:redact(Config) }), - Servers = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers1 = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers = + lists:map( + fun + (#{hostname := Host, port := Port0}) -> + {Host, Port0}; + (#{hostname := Host}) -> + Host + end, + Servers1 + ), SslOpts = case maps:get(enable, SSL) of true -> @@ -86,22 +95,21 @@ on_start( {bind_password, BindPassword}, {timeout, Timeout}, {pool_size, PoolSize}, - {auto_reconnect, reconn_interval(AutoReconn)} + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of - ok -> {ok, #{poolname => PoolName, auto_reconnect => AutoReconn}}; + case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ SslOpts) of + ok -> {ok, #{pool_name => InstId}}; {error, Reason} -> {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_ldap_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) -> +on_query(InstId, {search, Base, Filter, Attributes}, #{pool_name := PoolName} = State) -> Request = {Base, Filter, Attributes}, ?TRACE( "QUERY", @@ -129,9 +137,6 @@ on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = S on_get_status(_InstId, _State) -> connected. -reconn_interval(true) -> 15; -reconn_interval(false) -> false. - search(Conn, Base, Filter, Attributes) -> eldap2:search(Conn, [ {base, Base}, diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index 407fa39a8..dde8652f0 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -68,7 +68,6 @@ fields(single) -> {mongo_type, #{ type => single, default => single, - required => true, desc => ?DESC("single_mongo_type") }}, {server, server()}, @@ -79,7 +78,6 @@ fields(rs) -> {mongo_type, #{ type => rs, default => rs, - required => true, desc => ?DESC("rs_mongo_type") }}, {servers, servers()}, @@ -92,7 +90,6 @@ fields(sharded) -> {mongo_type, #{ type => sharded, default => sharded, - required => true, desc => ?DESC("sharded_mongo_type") }}, {servers, servers()}, @@ -102,15 +99,22 @@ fields(topology) -> [ {pool_size, fun emqx_connector_schema_lib:pool_size/1}, {max_overflow, fun max_overflow/1}, - {overflow_ttl, fun duration/1}, - {overflow_check_period, fun duration/1}, - {local_threshold_ms, fun duration/1}, - {connect_timeout_ms, fun duration/1}, - {socket_timeout_ms, fun duration/1}, - {server_selection_timeout_ms, fun duration/1}, - {wait_queue_timeout_ms, fun duration/1}, - {heartbeat_frequency_ms, fun duration/1}, - {min_heartbeat_frequency_ms, fun duration/1} + {overflow_ttl, duration("overflow_ttl")}, + {overflow_check_period, duration("overflow_check_period")}, + {local_threshold_ms, duration("local_threshold")}, + {connect_timeout_ms, duration("connect_timeout")}, + {socket_timeout_ms, duration("socket_timeout")}, + {server_selection_timeout_ms, duration("server_selection_timeout")}, + {wait_queue_timeout_ms, duration("wait_queue_timeout")}, + {heartbeat_frequency_ms, + hoconsc:mk( + emqx_schema:duration_ms(), + #{ + default => <<"200s">>, + desc => ?DESC("heartbeat_period") + } + )}, + {min_heartbeat_frequency_ms, duration("min_heartbeat_period")} ]. desc(single) -> @@ -158,7 +162,7 @@ on_start( rs -> "starting_mongodb_replica_set_connector"; sharded -> "starting_mongodb_sharded_connector" end, - ?SLOG(info, #{msg => Msg, connector => InstId, config => Config}), + ?SLOG(info, #{msg => Msg, connector => InstId, config => emqx_utils:redact(Config)}), NConfig = #{hosts := Hosts} = maybe_resolve_srv_and_txt_records(Config), SslOpts = case maps:get(enable, SSL) of @@ -178,12 +182,11 @@ on_start( {options, init_topology_options(maps:to_list(Topology), [])}, {worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), Collection = maps:get(collection, Config, <<"mqtt">>), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of + case emqx_resource_pool:start(InstId, ?MODULE, Opts) of ok -> {ok, #{ - poolname => PoolName, + pool_name => InstId, type => Type, collection => Collection }}; @@ -191,17 +194,17 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_mongodb_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query( InstId, {send_message, Document}, - #{poolname := PoolName, collection := Collection} = State + #{pool_name := PoolName, collection := Collection} = State ) -> Request = {insert, Collection, Document}, ?TRACE( @@ -230,7 +233,7 @@ on_query( on_query( InstId, {Action, Collection, Filter, Projector}, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> Request = {Action, Collection, Filter, Projector}, ?TRACE( @@ -259,8 +262,7 @@ on_query( {ok, Result} end. --dialyzer({nowarn_function, [on_get_status/2]}). -on_get_status(InstId, #{poolname := PoolName} = _State) -> +on_get_status(InstId, #{pool_name := PoolName}) -> case health_check(PoolName) of true -> ?tp(debug, emqx_connector_mongo_health_check, #{ @@ -277,8 +279,10 @@ on_get_status(InstId, #{poolname := PoolName} = _State) -> end. health_check(PoolName) -> - emqx_plugin_libs_pool:health_check_ecpool_workers( - PoolName, fun ?MODULE:check_worker_health/1, ?HEALTH_CHECK_TIMEOUT + timer:seconds(1) + emqx_resource_pool:health_check_workers( + PoolName, + fun ?MODULE:check_worker_health/1, + ?HEALTH_CHECK_TIMEOUT + timer:seconds(1) ). %% =================================================================== @@ -403,10 +407,12 @@ r_mode(desc) -> ?DESC("r_mode"); r_mode(default) -> master; r_mode(_) -> undefined. -duration(type) -> emqx_schema:duration_ms(); -duration(desc) -> ?DESC("duration"); -duration(required) -> false; -duration(_) -> undefined. +duration(Desc) -> + #{ + type => emqx_schema:duration_ms(), + required => false, + desc => ?DESC(Desc) + }. max_overflow(type) -> non_neg_integer(); max_overflow(desc) -> ?DESC("max_overflow"); @@ -531,4 +537,9 @@ format_hosts(Hosts) -> lists:map(fun format_host/1, Hosts). parse_servers(HoconValue) -> - emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS). + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS) + ). diff --git a/apps/emqx_connector/src/emqx_connector_mqtt.erl b/apps/emqx_connector/src/emqx_connector_mqtt.erl index 6c73a14c0..5cafd2d50 100644 --- a/apps/emqx_connector/src/emqx_connector_mqtt.erl +++ b/apps/emqx_connector/src/emqx_connector_mqtt.erl @@ -15,6 +15,8 @@ %%-------------------------------------------------------------------- -module(emqx_connector_mqtt). +-include("emqx_connector.hrl"). + -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). @@ -43,6 +45,8 @@ on_get_status/2 ]). +-export([on_async_result/2]). + -behaviour(hocon_schema). -import(hoconsc, [mk/2]). @@ -103,16 +107,15 @@ init([]) -> {ok, {SupFlag, []}}. bridge_spec(Config) -> + {Name, NConfig} = maps:take(name, Config), #{ - id => maps:get(name, Config), - start => {emqx_connector_mqtt_worker, start_link, [Config]}, - restart => permanent, - shutdown => 5000, - type => worker, - modules => [emqx_connector_mqtt_worker] + id => Name, + start => {emqx_connector_mqtt_worker, start_link, [Name, NConfig]}, + restart => temporary, + shutdown => 5000 }. --spec bridges() -> [{node(), map()}]. +-spec bridges() -> [{_Name, _Status}]. bridges() -> [ {Name, emqx_connector_mqtt_worker:status(Name)} @@ -136,24 +139,23 @@ drop_bridge(Name) -> %% When use this bridge as a data source, ?MODULE:on_message_received will be called %% if the bridge received msgs from the remote broker. on_message_received(Msg, HookPoint, ResId) -> - emqx_resource:inc_received(ResId), + emqx_resource_metrics:received_inc(ResId), emqx:run_hook(HookPoint, [Msg]). %% =================================================================== callback_mode() -> async_if_possible. -on_start(InstId, Conf) -> - InstanceId = binary_to_atom(InstId, utf8), +on_start(InstanceId, Conf) -> ?SLOG(info, #{ msg => "starting_mqtt_connector", connector => InstanceId, - config => Conf + config => emqx_utils:redact(Conf) }), BasicConf = basic_config(Conf), BridgeConf = BasicConf#{ name => InstanceId, - clientid => clientid(InstId, Conf), - subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstId), + clientid => clientid(InstanceId, Conf), + subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstanceId), forwards => make_forward_confs(maps:get(egress, Conf, undefined)) }, case ?MODULE:create_bridge(BridgeConf) of @@ -187,42 +189,71 @@ on_stop(_InstId, #{name := InstanceId}) -> on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) -> ?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), - emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg). - -on_query_async( - _InstId, - {send_message, Msg}, - {ReplyFun, Args}, - #{name := InstanceId} -) -> - ?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), - emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, {ReplyFun, Args}). - -on_get_status(_InstId, #{name := InstanceId, bridge_conf := Conf}) -> - AutoReconn = maps:get(auto_reconnect, Conf, true), - case emqx_connector_mqtt_worker:status(InstanceId) of - connected -> connected; - _ when AutoReconn == true -> connecting; - _ when AutoReconn == false -> disconnected + case emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg) of + ok -> + ok; + {error, Reason} -> + classify_error(Reason) end. +on_query_async(_InstId, {send_message, Msg}, CallbackIn, #{name := InstanceId}) -> + ?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), + Callback = {fun on_async_result/2, [CallbackIn]}, + case emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, Callback) of + ok -> + ok; + {ok, Pid} -> + {ok, Pid}; + {error, Reason} -> + classify_error(Reason) + end. + +on_async_result(Callback, ok) -> + apply_callback_function(Callback, ok); +on_async_result(Callback, {ok, _} = Ok) -> + apply_callback_function(Callback, Ok); +on_async_result(Callback, {error, Reason}) -> + apply_callback_function(Callback, classify_error(Reason)). + +apply_callback_function(F, Result) when is_function(F) -> + erlang:apply(F, [Result]); +apply_callback_function({F, A}, Result) when is_function(F), is_list(A) -> + erlang:apply(F, A ++ [Result]); +apply_callback_function({M, F, A}, Result) when is_atom(M), is_atom(F), is_list(A) -> + erlang:apply(M, F, A ++ [Result]). + +on_get_status(_InstId, #{name := InstanceId}) -> + emqx_connector_mqtt_worker:status(InstanceId). + +classify_error(disconnected = Reason) -> + {error, {recoverable_error, Reason}}; +classify_error({disconnected, _RC, _} = Reason) -> + {error, {recoverable_error, Reason}}; +classify_error({shutdown, _} = Reason) -> + {error, {recoverable_error, Reason}}; +classify_error(shutdown = Reason) -> + {error, {recoverable_error, Reason}}; +classify_error(Reason) -> + {error, {unrecoverable_error, Reason}}. + ensure_mqtt_worker_started(InstanceId, BridgeConf) -> - case emqx_connector_mqtt_worker:ensure_started(InstanceId) of - ok -> {ok, #{name => InstanceId, bridge_conf => BridgeConf}}; - {error, Reason} -> {error, Reason} + case emqx_connector_mqtt_worker:connect(InstanceId) of + {ok, Properties} -> + {ok, #{name => InstanceId, config => BridgeConf, props => Properties}}; + {error, Reason} -> + {error, Reason} end. make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 -> undefined; make_sub_confs(undefined, _Conf, _) -> undefined; -make_sub_confs(SubRemoteConf, Conf, InstId) -> - ResId = emqx_resource_manager:manager_id_to_resource_id(InstId), +make_sub_confs(SubRemoteConf, Conf, ResourceId) -> case maps:find(hookpoint, Conf) of error -> error({no_hookpoint_provided, Conf}); {ok, HookPoint} -> - MFA = {?MODULE, on_message_received, [HookPoint, ResId]}, + MFA = {?MODULE, on_message_received, [HookPoint, ResourceId]}, SubRemoteConf#{on_message_received => MFA} end. @@ -236,7 +267,6 @@ make_forward_confs(FrowardConf) -> basic_config( #{ server := Server, - reconnect_interval := ReconnIntv, proto_ver := ProtoVer, bridge_mode := BridgeMode, clean_start := CleanStart, @@ -252,7 +282,6 @@ basic_config( %% 30s connect_timeout => 30, auto_reconnect => true, - reconnect_interval => ReconnIntv, proto_ver => ProtoVer, %% Opening bridge_mode will form a non-standard mqtt connection message. %% A load balancing server (such as haproxy) is often set up before the emqx broker server. @@ -260,13 +289,12 @@ basic_config( %% non-standard mqtt connection packets will be filtered out by LB. %% So let's disable bridge_mode. bridge_mode => BridgeMode, - clean_start => CleanStart, keepalive => ms_to_s(KeepAlive), + clean_start => CleanStart, retry_interval => RetryIntv, max_inflight => MaxInflight, ssl => EnableSsl, - ssl_opts => maps:to_list(maps:remove(enable, Ssl)), - if_record_metrics => true + ssl_opts => maps:to_list(maps:remove(enable, Ssl)) }, maybe_put_fields([username, password], Conf, BasicConf). diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 6c0ff7210..b8c1250fe 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -51,8 +51,7 @@ -type sqls() :: #{atom() => binary()}. -type state() :: #{ - poolname := atom(), - auto_reconnect := boolean(), + pool_name := binary(), prepare_statement := prepares(), params_tokens := params_tokens(), batch_inserts := sqls(), @@ -66,10 +65,21 @@ roots() -> fields(config) -> [{server, server()}] ++ - emqx_connector_schema_lib:relational_db_fields() ++ + add_default_username(emqx_connector_schema_lib:relational_db_fields(), []) ++ emqx_connector_schema_lib:ssl_fields() ++ emqx_connector_schema_lib:prepare_statement_fields(). +add_default_username([{username, OrigUsernameFn} | Tail], Head) -> + Head ++ [{username, add_default_fn(OrigUsernameFn, <<"root">>)} | Tail]; +add_default_username([Field | Tail], Head) -> + add_default_username(Tail, Head ++ [Field]). + +add_default_fn(OrigFn, Default) -> + fun + (default) -> Default; + (Field) -> OrigFn(Field) + end. + server() -> Meta = #{desc => ?DESC("server")}, emqx_schema:servers_sc(Meta, ?MYSQL_HOST_OPTIONS). @@ -83,18 +93,16 @@ on_start( #{ server := Server, database := DB, - username := User, - password := Password, - auto_reconnect := AutoReconn, + username := Username, pool_size := PoolSize, ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_mysql_connector", connector => InstId, - config => Config + config => emqx_utils:redact(Config) }), SslOpts = case maps:get(enable, SSL) of @@ -103,21 +111,22 @@ on_start( false -> [] end, - Options = [ - {host, Host}, - {port, Port}, - {user, User}, - {password, Password}, - {database, DB}, - {auto_reconnect, reconn_interval(AutoReconn)}, - {pool_size, PoolSize} - ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = parse_prepare_sql(Config), - State = maps:merge(#{poolname => PoolName, auto_reconnect => AutoReconn}, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + Options = + maybe_add_password_opt( + maps:get(password, Config, undefined), + [ + {host, Host}, + {port, Port}, + {user, Username}, + {database, DB}, + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, + {pool_size, PoolSize} + ] + ), + State = parse_prepare_sql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId})}; {error, Reason} -> ?tp( mysql_connector_start_failed, @@ -126,12 +135,17 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +maybe_add_password_opt(undefined, Options) -> + Options; +maybe_add_password_opt(Password, Options) -> + [{password, Password} | Options]. + +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_mysql_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstId, {TypeOrKey, SQLOrKey}, State) -> on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State); @@ -140,7 +154,7 @@ on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) -> on_query( InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, - #{poolname := PoolName, prepare_statement := Prepares} = State + #{pool_name := PoolName, prepare_statement := Prepares} = State ) -> MySqlFunction = mysql_function(TypeOrKey), {SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State), @@ -155,10 +169,15 @@ on_query( %% not return result, next loop will try again on_query(InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, State); {error, Reason} -> - LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, - ?SLOG( + ?tp( error, - LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason} + "mysql_connector_do_prepare_failed", + #{ + connector => InstId, + sql => SQLOrKey, + state => State, + reason => Reason + } ), {error, Reason} end; @@ -175,7 +194,7 @@ on_batch_query( {Key, _} -> case maps:get(Key, Inserts, undefined) of undefined -> - {error, batch_select_not_implemented}; + {error, {unrecoverable_error, batch_select_not_implemented}}; InsertSQL -> Tokens = maps:get(Key, ParamsTokens), on_batch_insert(InstId, BatchReq, InsertSQL, Tokens, State) @@ -183,7 +202,7 @@ on_batch_query( Request -> LogMeta = #{connector => InstId, first_request => Request, state => State}, ?SLOG(error, LogMeta#{msg => "invalid request"}), - {error, invalid_request} + {error, {unrecoverable_error, invalid_request}} end. mysql_function(sql) -> @@ -194,8 +213,8 @@ mysql_function(prepared_query) -> mysql_function(_) -> mysql_function(prepared_query). -on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -205,10 +224,10 @@ on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn} = State {connected, NState}; {error, _Reason} -> %% do not log error, it is logged in prepare_sql_to_conn - conn_status(AutoReconn) + connecting end; false -> - conn_status(AutoReconn) + connecting end. do_get_status(Conn) -> @@ -216,7 +235,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_statement := {error, Prepares}}) -> %% retry to prepare case prepare_sql(Prepares, PoolName) of ok -> @@ -227,16 +246,11 @@ do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, P end. %% =================================================================== -conn_status(_AutoReconn = true) -> connecting; -conn_status(_AutoReconn = false) -> disconnected. - -reconn_interval(true) -> 15; -reconn_interval(false) -> false. connect(Options) -> mysql:start_link(Options). -init_prepare(State = #{prepare_statement := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -255,7 +269,7 @@ init_prepare(State = #{prepare_statement := Prepares, poolname := PoolName}) -> maybe_prepare_sql(SQLOrKey, Prepares, PoolName) -> case maps:is_key(SQLOrKey, Prepares) of true -> prepare_sql(Prepares, PoolName); - false -> {error, prepared_statement_invalid} + false -> {error, {unrecoverable_error, prepared_statement_invalid}} end. prepare_sql(Prepares, PoolName) when is_map(Prepares) -> @@ -379,31 +393,20 @@ proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens}) end. on_batch_insert(InstId, BatchReqs, InsertPart, Tokens, State) -> - JoinFun = fun - ([Msg]) -> - emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg); - ([H | T]) -> - lists:foldl( - fun(Msg, Acc) -> - Value = emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg), - <> - end, - emqx_plugin_libs_rule:proc_sql_param_str(Tokens, H), - T - ) - end, - {_, Msgs} = lists:unzip(BatchReqs), - JoinPart = JoinFun(Msgs), - SQL = <>, - on_sql_query(InstId, query, SQL, [], default_timeout, State). + ValuesPart = lists:join($,, [ + emqx_placeholder:proc_param_str(Tokens, Msg, fun emqx_placeholder:quote_mysql/1) + || {_, Msg} <- BatchReqs + ]), + Query = [InsertPart, <<" values ">> | ValuesPart], + on_sql_query(InstId, query, Query, no_params, default_timeout, State). on_sql_query( InstId, SQLFunc, SQLOrKey, - Data, + Params, Timeout, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, ?TRACE("QUERY", "mysql_connector_received", LogMeta), @@ -412,28 +415,26 @@ on_sql_query( {ok, Conn} -> ?tp( mysql_connector_send_query, - #{sql_func => SQLFunc, sql_or_key => SQLOrKey, data => Data} + #{sql_func => SQLFunc, sql_or_key => SQLOrKey, data => Params} ), - do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta); + do_sql_query(SQLFunc, Conn, SQLOrKey, Params, Timeout, LogMeta); {error, disconnected} -> - ?SLOG( + ?tp( error, - LogMeta#{ - msg => "mysql_connector_do_sql_query_failed", - reason => worker_is_disconnected - } + "mysql_connector_do_sql_query_failed", + LogMeta#{reason => worker_is_disconnected} ), {error, {recoverable_error, disconnected}} end. -do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta) -> - try mysql:SQLFunc(Conn, SQLOrKey, Data, Timeout) of +do_sql_query(SQLFunc, Conn, SQLOrKey, Params, Timeout, LogMeta) -> + try mysql:SQLFunc(Conn, SQLOrKey, Params, no_filtermap_fun, Timeout) of {error, disconnected} -> ?SLOG( error, LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected} ), - %% kill the poll worker to trigger reconnection + %% kill the pool worker to trigger reconnection _ = exit(Conn, restart), {error, {recoverable_error, disconnected}}; {error, not_prepared} = Error -> @@ -453,12 +454,12 @@ do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta) -> LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason} ), {error, {recoverable_error, Reason}}; - {error, Reason} = Result -> + {error, Reason} -> ?SLOG( error, LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason} ), - Result; + {error, {unrecoverable_error, Reason}}; Result -> ?tp( mysql_connector_query_return, @@ -469,7 +470,7 @@ do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta) -> error:badarg -> ?SLOG( error, - LogMeta#{msg => "mysql_connector_invalid_params", params => Data} + LogMeta#{msg => "mysql_connector_invalid_params", params => Params} ), - {error, {invalid_params, Data}} + {error, {unrecoverable_error, {invalid_params, Params}}} end. diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index cde8bbe3b..3b2375d04 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -20,6 +20,7 @@ -include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("epgsql/include/epgsql.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -export([roots/0, fields/1]). @@ -31,6 +32,7 @@ on_start/2, on_stop/2, on_query/3, + on_batch_query/3, on_get_status/2 ]). @@ -38,15 +40,28 @@ -export([ query/3, - prepared_query/3 + prepared_query/3, + execute_batch/3 ]). --export([do_get_status/1]). +%% for ecpool workers usage +-export([do_get_status/1, prepare_sql_to_conn/2]). -define(PGSQL_HOST_OPTIONS, #{ default_port => ?PGSQL_DEFAULT_PORT }). +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. + +-type state() :: + #{ + pool_name := binary(), + prepare_sql := prepares(), + params_tokens := params_tokens(), + prepare_statement := epgsql:statement() + }. + %%===================================================================== roots() -> @@ -65,28 +80,31 @@ server() -> %% =================================================================== callback_mode() -> always_sync. +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. on_start( InstId, #{ server := Server, database := DB, username := User, - password := Password, - auto_reconnect := AutoReconn, pool_size := PoolSize, ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_postgresql_connector", connector => InstId, - config => Config + config => emqx_utils:redact(Config) }), SslOpts = case maps:get(enable, SSL) of true -> [ + %% note: this is converted to `required' in + %% `conn_opts/2', and there's a boolean guard + %% there; if this is set to `required' here, + %% that'll require changing `conn_opts/2''s guard. {ssl, true}, {ssl_opts, emqx_tls_lib:to_client_opts(SSL)} ]; @@ -97,74 +115,187 @@ on_start( {host, Host}, {port, Port}, {username, User}, - {password, emqx_secret:wrap(Password)}, + {password, emqx_secret:wrap(maps:get(password, Config, ""))}, {database, DB}, - {auto_reconnect, reconn_interval(AutoReconn)}, - {pool_size, PoolSize}, - {prepare_statement, maps:to_list(maps:get(prepare_statement, Config, #{}))} + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, + {pool_size, PoolSize} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of - ok -> {ok, #{poolname => PoolName, auto_reconnect => AutoReconn}}; - {error, Reason} -> {error, Reason} + State = parse_prepare_sql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of + ok -> + {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; + {error, Reason} -> + ?tp( + pgsql_connector_start_failed, + #{error => Reason} + ), + {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping postgresql connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -on_query(InstId, {Type, NameOrSQL}, #{poolname := _PoolName} = State) -> - on_query(InstId, {Type, NameOrSQL, []}, State); -on_query(InstId, {Type, NameOrSQL, Params}, #{poolname := PoolName} = State) -> +on_query(InstId, {TypeOrKey, NameOrSQL}, State) -> + on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); +on_query( + InstId, + {TypeOrKey, NameOrSQL, Params}, + #{pool_name := PoolName} = State +) -> ?SLOG(debug, #{ msg => "postgresql connector received sql query", connector => InstId, + type => TypeOrKey, sql => NameOrSQL, state => State }), - case Result = ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Params]}, no_handover) of - {error, Reason} -> + Type = pgsql_query_type(TypeOrKey), + {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), + Res = on_sql_query(InstId, PoolName, Type, NameOrSQL2, Data), + handle_result(Res). + +pgsql_query_type(sql) -> + query; +pgsql_query_type(query) -> + query; +pgsql_query_type(prepared_query) -> + prepared_query; +%% for bridge +pgsql_query_type(_) -> + pgsql_query_type(prepared_query). + +on_batch_query( + InstId, + BatchReq, + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State +) -> + case BatchReq of + [{Key, _} = Request | _] -> + BinKey = to_bin(Key), + case maps:get(BinKey, Tokens, undefined) of + undefined -> + Log = #{ + connector => InstId, + first_request => Request, + state => State, + msg => "batch prepare not implemented" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + TokenList -> + {_, Datas} = lists:unzip(BatchReq), + Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], + St = maps:get(BinKey, Sts), + case on_sql_query(InstId, PoolName, execute_batch, St, Datas2) of + {error, _Error} = Result -> + handle_result(Result); + {_Column, Results} -> + handle_batch_result(Results, 0) + end + end; + _ -> + Log = #{ + connector => InstId, + request => BatchReq, + state => State, + msg => "invalid request" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, invalid_request}} + end. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(prepared_query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens}) -> + Key = to_bin(TypeOrKey), + case maps:get(Key, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + {Key, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end. + +on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) -> + try ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, no_handover) of + {error, Reason} = Result -> + ?tp( + pgsql_connector_query_return, + #{error => Reason} + ), ?SLOG(error, #{ msg => "postgresql connector do sql query failed", connector => InstId, + type => Type, sql => NameOrSQL, reason => Reason - }); - _ -> - ok - end, - Result. + }), + Result; + Result -> + ?tp( + pgsql_connector_query_return, + #{result => Result} + ), + Result + catch + error:function_clause:Stacktrace -> + ?SLOG(error, #{ + msg => "postgresql connector do sql query failed", + connector => InstId, + type => Type, + sql => NameOrSQL, + reason => function_clause, + stacktrace => Stacktrace + }), + {error, {unrecoverable_error, invalid_request}} + end. -on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn}) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of - true -> connected; - false -> conn_status(AutoReconn) +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of + true -> + case do_check_prepares(State) of + ok -> + connected; + {ok, NState} -> + %% return new state with prepared statements + {connected, NState}; + false -> + %% do not log error, it is logged in prepare_sql_to_conn + connecting + end; + false -> + connecting end. do_get_status(Conn) -> ok == element(1, epgsql:squery(Conn, "SELECT count(1) AS T")). -%% =================================================================== -conn_status(_AutoReconn = true) -> connecting; -conn_status(_AutoReconn = false) -> disconnected. +do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> + ok; +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> + %% retry to prepare + case prepare_sql(Prepares, PoolName) of + {ok, Sts} -> + %% remove the error + {ok, State#{prepare_sql => Prepares, prepare_statement := Sts}}; + _Error -> + false + end. -reconn_interval(true) -> 15; -reconn_interval(false) -> false. +%% =================================================================== connect(Opts) -> Host = proplists:get_value(host, Opts), Username = proplists:get_value(username, Opts), Password = emqx_secret:unwrap(proplists:get_value(password, Opts)), - PrepareStatement = proplists:get_value(prepare_statement, Opts), case epgsql:connect(Host, Username, Password, conn_opts(Opts)) of - {ok, Conn} -> - case parse(Conn, PrepareStatement) of - ok -> {ok, Conn}; - {error, Reason} -> {error, Reason} - end; + {ok, _Conn} = Ok -> + Ok; {error, Reason} -> {error, Reason} end. @@ -175,15 +306,8 @@ query(Conn, SQL, Params) -> prepared_query(Conn, Name, Params) -> epgsql:prepared_query2(Conn, Name, Params). -parse(_Conn, []) -> - ok; -parse(Conn, [{Name, Query} | More]) -> - case epgsql:parse2(Conn, Name, Query, []) of - {ok, _Statement} -> - parse(Conn, More); - Other -> - Other - end. +execute_batch(Conn, Statement, Params) -> + epgsql:execute_batch(Conn, Statement, Params). conn_opts(Opts) -> conn_opts(Opts, []). @@ -206,3 +330,105 @@ conn_opts([Opt = {ssl_opts, _} | Opts], Acc) -> conn_opts(Opts, [Opt | Acc]); conn_opts([_Opt | Opts], Acc) -> conn_opts(Opts, Acc). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{<<"send_message">> => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_prepare_sql([{Key, H} | T], Prepares, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H, '$n'), + parse_prepare_sql( + T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} + ); +parse_prepare_sql([], Prepares, Tokens) -> + #{ + prepare_sql => Prepares, + params_tokens => Tokens + }. + +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> + case maps:size(Prepares) of + 0 -> + State; + _ -> + case prepare_sql(Prepares, PoolName) of + {ok, Sts} -> + State#{prepare_statement := Sts}; + Error -> + LogMeta = #{ + msg => <<"PostgreSQL init prepare statement failed">>, error => Error + }, + ?SLOG(error, LogMeta), + %% mark the prepare_sqlas failed + State#{prepare_sql => {error, Prepares}} + end + end. + +prepare_sql(Prepares, PoolName) when is_map(Prepares) -> + prepare_sql(maps:to_list(Prepares), PoolName); +prepare_sql(Prepares, PoolName) -> + case do_prepare_sql(Prepares, PoolName) of + {ok, _Sts} = Ok -> + %% prepare for reconnect + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_sql_to_conn, [Prepares]}), + Ok; + Error -> + Error + end. + +do_prepare_sql(Prepares, PoolName) -> + do_prepare_sql(ecpool:workers(PoolName), Prepares, #{}). + +do_prepare_sql([{_Name, Worker} | T], Prepares, _LastSts) -> + {ok, Conn} = ecpool_worker:client(Worker), + case prepare_sql_to_conn(Conn, Prepares) of + {ok, Sts} -> + do_prepare_sql(T, Prepares, Sts); + Error -> + Error + end; +do_prepare_sql([], _Prepares, LastSts) -> + {ok, LastSts}. + +prepare_sql_to_conn(Conn, Prepares) -> + prepare_sql_to_conn(Conn, Prepares, #{}). + +prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; +prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> + LogMeta = #{msg => "PostgreSQL Prepare Statement", name => Key, prepare_sql => SQL}, + ?SLOG(info, LogMeta), + case epgsql:parse2(Conn, Key, SQL, []) of + {ok, Statement} -> + prepare_sql_to_conn(Conn, PrepareList, Statements#{Key => Statement}); + {error, Error} = Other -> + ?SLOG(error, LogMeta#{msg => "PostgreSQL parse failed", error => Error}), + Other + end. + +to_bin(Bin) when is_binary(Bin) -> + Bin; +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom). + +handle_result({error, disconnected}) -> + {error, {recoverable_error, disconnected}}; +handle_result({error, Error}) -> + {error, {unrecoverable_error, Error}}; +handle_result(Res) -> + Res. + +handle_batch_result([{ok, Count} | Rest], Acc) -> + handle_batch_result(Rest, Acc + Count); +handle_batch_result([{error, Error} | _Rest], _Acc) -> + {error, {unrecoverable_error, Error}}; +handle_batch_result([], Acc) -> + {ok, Acc}. diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index 350d49f01..32ac77226 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -64,7 +64,7 @@ fields(single) -> {redis_type, #{ type => single, default => single, - required => true, + required => false, desc => ?DESC("single") }} ] ++ @@ -76,7 +76,7 @@ fields(cluster) -> {redis_type, #{ type => cluster, default => cluster, - required => true, + required => false, desc => ?DESC("cluster") }} ] ++ @@ -88,7 +88,7 @@ fields(sentinel) -> {redis_type, #{ type => sentinel, default => sentinel, - required => true, + required => false, desc => ?DESC("sentinel") }}, {sentinel, #{ @@ -117,14 +117,13 @@ on_start( #{ redis_type := Type, pool_size := PoolSize, - auto_reconnect := AutoReconn, ssl := SSL } = Config ) -> ?SLOG(info, #{ msg => "starting_redis_connector", connector => InstId, - config => Config + config => emqx_utils:redact(Config) }), ConfKey = case Type of @@ -132,7 +131,13 @@ on_start( _ -> servers end, Servers0 = maps:get(ConfKey, Config), - Servers = [{servers, emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS)}], + Servers1 = lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS) + ), + Servers = [{servers, Servers1}], Database = case Type of cluster -> []; @@ -142,7 +147,7 @@ on_start( [ {pool_size, PoolSize}, {password, maps:get(password, Config, "")}, - {auto_reconnect, reconn_interval(AutoReconn)} + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL} ] ++ Database ++ Servers, Options = case maps:get(enable, SSL) of @@ -154,11 +159,10 @@ on_start( false -> [{ssl, false}] end ++ [{sentinel, maps:get(sentinel, Config, undefined)}], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - State = #{poolname => PoolName, type => Type, auto_reconnect => AutoReconn}, + State = #{pool_name => InstId, type => Type}, case Type of cluster -> - case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of + case eredis_cluster:start_pool(InstId, Opts ++ [{options, Options}]) of {ok, _} -> {ok, State}; {ok, _, _} -> @@ -167,22 +171,20 @@ on_start( {error, Reason} end; _ -> - case - emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}]) - of + case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ [{options, Options}]) of ok -> {ok, State}; {error, Reason} -> {error, Reason} end end. -on_stop(InstId, #{poolname := PoolName, type := Type}) -> +on_stop(InstId, #{pool_name := PoolName, type := Type}) -> ?SLOG(info, #{ msg => "stopping_redis_connector", connector => InstId }), case Type of cluster -> eredis_cluster:stop_pool(PoolName); - _ -> emqx_plugin_libs_pool:stop_pool(PoolName) + _ -> emqx_resource_pool:stop(PoolName) end. on_query(InstId, {cmd, _} = Query, State) -> @@ -190,7 +192,7 @@ on_query(InstId, {cmd, _} = Query, State) -> on_query(InstId, {cmds, _} = Query, State) -> do_query(InstId, Query, State). -do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) -> +do_query(InstId, Query, #{pool_name := PoolName, type := Type} = State) -> ?TRACE( "QUERY", "redis_connector_received", @@ -208,39 +210,37 @@ do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) -> connector => InstId, query => Query, reason => Reason - }); + }), + case is_unrecoverable_error(Reason) of + true -> + {error, {unrecoverable_error, Reason}}; + false -> + Result + end; _ -> - ok - end, - Result. + Result + end. -extract_eredis_cluster_workers(PoolName) -> - lists:flatten([ - gen_server:call(PoolPid, get_all_workers) - || PoolPid <- eredis_cluster_monitor:get_all_pools(PoolName) - ]). +is_unrecoverable_error(Results) when is_list(Results) -> + lists:any(fun is_unrecoverable_error/1, Results); +is_unrecoverable_error({error, <<"ERR unknown command ", _/binary>>}) -> + true; +is_unrecoverable_error({error, invalid_cluster_command}) -> + true; +is_unrecoverable_error(_) -> + false. -eredis_cluster_workers_exist_and_are_connected(Workers) -> - length(Workers) > 0 andalso - lists:all( - fun({_, Pid, _, _}) -> - eredis_cluster_pool_worker:is_connected(Pid) =:= true - end, - Workers - ). - -on_get_status(_InstId, #{type := cluster, poolname := PoolName, auto_reconnect := AutoReconn}) -> +on_get_status(_InstId, #{type := cluster, pool_name := PoolName}) -> case eredis_cluster:pool_exists(PoolName) of true -> - Workers = extract_eredis_cluster_workers(PoolName), - Health = eredis_cluster_workers_exist_and_are_connected(Workers), - status_result(Health, AutoReconn); + Health = eredis_cluster:ping_all(PoolName), + status_result(Health); false -> disconnected end; -on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn}) -> - Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), - status_result(Health, AutoReconn). +on_get_status(_InstId, #{pool_name := PoolName}) -> + Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), + status_result(Health). do_get_status(Conn) -> case eredis:q(Conn, ["PING"]) of @@ -248,19 +248,17 @@ do_get_status(Conn) -> _ -> false end. -status_result(_Status = true, _AutoReconn) -> connected; -status_result(_Status = false, _AutoReconn = true) -> connecting; -status_result(_Status = false, _AutoReconn = false) -> disconnected. - -reconn_interval(true) -> 15; -reconn_interval(false) -> false. +status_result(_Status = true) -> connected; +status_result(_Status = false) -> connecting. do_cmd(PoolName, cluster, {cmd, Command}) -> eredis_cluster:q(PoolName, Command); do_cmd(Conn, _Type, {cmd, Command}) -> eredis:q(Conn, Command); do_cmd(PoolName, cluster, {cmds, Commands}) -> - wrap_qp_result(eredis_cluster:qp(PoolName, Commands)); + % TODO + % Cluster mode is currently incompatible with batching. + wrap_qp_result([eredis_cluster:q(PoolName, Command) || Command <- Commands]); do_cmd(Conn, _Type, {cmds, Commands}) -> wrap_qp_result(eredis:qp(Conn, Commands)). @@ -290,7 +288,6 @@ redis_fields() -> {database, #{ type => integer(), default => 0, - required => true, desc => ?DESC("database") }}, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} diff --git a/apps/emqx_connector/src/emqx_connector_schema_lib.erl b/apps/emqx_connector/src/emqx_connector_schema_lib.erl index 2364aeeaa..f64208311 100644 --- a/apps/emqx_connector/src/emqx_connector_schema_lib.erl +++ b/apps/emqx_connector/src/emqx_connector_schema_lib.erl @@ -101,9 +101,11 @@ password(desc) -> ?DESC("password"); password(required) -> false; password(format) -> <<"password">>; password(sensitive) -> true; +password(converter) -> fun emqx_schema:password_converter/2; password(_) -> undefined. auto_reconnect(type) -> boolean(); auto_reconnect(desc) -> ?DESC("auto_reconnect"); auto_reconnect(default) -> true; +auto_reconnect(deprecated) -> {since, "v5.0.15"}; auto_reconnect(_) -> undefined. diff --git a/apps/emqx_connector/src/emqx_connector_ssl.erl b/apps/emqx_connector/src/emqx_connector_ssl.erl index 54dc0e022..e07d95d51 100644 --- a/apps/emqx_connector/src/emqx_connector_ssl.erl +++ b/apps/emqx_connector/src/emqx_connector_ssl.erl @@ -74,7 +74,7 @@ new_ssl_config(Config, _NewSSL) -> normalize_key_to_bin(undefined) -> undefined; normalize_key_to_bin(Map) when is_map(Map) -> - emqx_map_lib:binary_key_map(Map). + emqx_utils_maps:binary_key_map(Map). try_map_get(Key, Map, Default) when is_map(Map) -> maps:get(Key, Map, Default); diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl deleted file mode 100644 index 870f9acfc..000000000 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl +++ /dev/null @@ -1,236 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - -%% @doc This module implements EMQX Bridge transport layer on top of MQTT protocol - --module(emqx_connector_mqtt_mod). - --export([ - start/1, - send/2, - send_async/3, - stop/1, - ping/1 -]). - --export([ - ensure_subscribed/3, - ensure_unsubscribed/2 -]). - -%% callbacks for emqtt --export([ - handle_publish/3, - handle_disconnected/2 -]). - --include_lib("emqx/include/logger.hrl"). --include_lib("emqx/include/emqx_mqtt.hrl"). - --define(ACK_REF(ClientPid, PktId), {ClientPid, PktId}). - -%% Messages towards ack collector process --define(REF_IDS(Ref, Ids), {Ref, Ids}). - -%%-------------------------------------------------------------------- -%% emqx_bridge_connect callbacks -%%-------------------------------------------------------------------- - -start(Config) -> - Parent = self(), - ServerStr = iolist_to_binary(maps:get(server, Config)), - {Server, Port} = emqx_connector_mqtt_schema:parse_server(ServerStr), - Mountpoint = maps:get(receive_mountpoint, Config, undefined), - Subscriptions = maps:get(subscriptions, Config, undefined), - Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Subscriptions), - Handlers = make_hdlr(Parent, Vars, #{server => ServerStr}), - Config1 = Config#{ - msg_handler => Handlers, - host => Server, - port => Port, - force_ping => true, - proto_ver => maps:get(proto_ver, Config, v4) - }, - case emqtt:start_link(process_config(Config1)) of - {ok, Pid} -> - case emqtt:connect(Pid) of - {ok, _} -> - try - ok = sub_remote_topics(Pid, Subscriptions), - {ok, #{client_pid => Pid, subscriptions => Subscriptions}} - catch - throw:Reason -> - ok = stop(#{client_pid => Pid}), - {error, error_reason(Reason, ServerStr)} - end; - {error, Reason} -> - ok = stop(#{client_pid => Pid}), - {error, error_reason(Reason, ServerStr)} - end; - {error, Reason} -> - {error, error_reason(Reason, ServerStr)} - end. - -error_reason(Reason, ServerStr) -> - #{reason => Reason, server => ServerStr}. - -stop(#{client_pid := Pid}) -> - safe_stop(Pid, fun() -> emqtt:stop(Pid) end, 1000), - ok. - -ping(undefined) -> - pang; -ping(#{client_pid := Pid}) -> - emqtt:ping(Pid). - -ensure_subscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic, QoS) when - is_pid(Pid) --> - case emqtt:subscribe(Pid, Topic, QoS) of - {ok, _, _} -> Conn#{subscriptions => [{Topic, QoS} | Subs]}; - Error -> {error, Error} - end; -ensure_subscribed(_Conn, _Topic, _QoS) -> - %% return ok for now - %% next re-connect should should call start with new topic added to config - ok. - -ensure_unsubscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic) when is_pid(Pid) -> - case emqtt:unsubscribe(Pid, Topic) of - {ok, _, _} -> Conn#{subscriptions => lists:keydelete(Topic, 1, Subs)}; - Error -> {error, Error} - end; -ensure_unsubscribed(Conn, _) -> - %% return ok for now - %% next re-connect should should call start with this topic deleted from config - Conn. - -safe_stop(Pid, StopF, Timeout) -> - MRef = monitor(process, Pid), - unlink(Pid), - try - StopF() - catch - _:_ -> - ok - end, - receive - {'DOWN', MRef, _, _, _} -> - ok - after Timeout -> - exit(Pid, kill) - end. - -send(#{client_pid := ClientPid}, Msg) -> - emqtt:publish(ClientPid, Msg). - -send_async(#{client_pid := ClientPid}, Msg, Callback) -> - emqtt:publish_async(ClientPid, Msg, infinity, Callback). - -handle_publish(Msg, undefined, _Opts) -> - ?SLOG(error, #{ - msg => - "cannot_publish_to_local_broker_as" - "_'ingress'_is_not_configured", - message => Msg - }); -handle_publish(#{properties := Props} = Msg0, Vars, Opts) -> - Msg = format_msg_received(Msg0, Opts), - ?SLOG(debug, #{ - msg => "publish_to_local_broker", - message => Msg, - vars => Vars - }), - case Vars of - #{on_message_received := {Mod, Func, Args}} -> - _ = erlang:apply(Mod, Func, [Msg | Args]); - _ -> - ok - end, - maybe_publish_to_local_broker(Msg, Vars, Props). - -handle_disconnected(Reason, Parent) -> - Parent ! {disconnected, self(), Reason}. - -make_hdlr(Parent, Vars, Opts) -> - #{ - publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]}, - disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]} - }. - -sub_remote_topics(_ClientPid, undefined) -> - ok; -sub_remote_topics(ClientPid, #{remote := #{topic := FromTopic, qos := QoS}}) -> - case emqtt:subscribe(ClientPid, FromTopic, QoS) of - {ok, _, _} -> ok; - Error -> throw(Error) - end. - -process_config(Config) -> - maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config). - -maybe_publish_to_local_broker(Msg, Vars, Props) -> - case emqx_map_lib:deep_get([local, topic], Vars, undefined) of - %% local topic is not set, discard it - undefined -> ok; - _ -> emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)) - end. - -format_msg_received( - #{ - dup := Dup, - payload := Payload, - properties := Props, - qos := QoS, - retain := Retain, - topic := Topic - }, - #{server := Server} -) -> - #{ - id => emqx_guid:to_hexstr(emqx_guid:gen()), - server => Server, - payload => Payload, - topic => Topic, - qos => QoS, - dup => Dup, - retain => Retain, - pub_props => printable_maps(Props), - message_received_at => erlang:system_time(millisecond) - }. - -printable_maps(undefined) -> - #{}; -printable_maps(Headers) -> - maps:fold( - fun - ('User-Property', V0, AccIn) when is_list(V0) -> - AccIn#{ - 'User-Property' => maps:from_list(V0), - 'User-Property-Pairs' => [ - #{ - key => Key, - value => Value - } - || {Key, Value} <- V0 - ] - }; - (K, V0, AccIn) -> - AccIn#{K => V0} - end, - #{}, - Headers - ). diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl index bdd516db6..df1114483 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl @@ -71,14 +71,13 @@ to_remote_msg(#message{flags = Flags0} = Msg, Vars) -> to_remote_msg(MapMsg, #{ remote := #{ topic := TopicToken, - payload := PayloadToken, qos := QoSToken, retain := RetainToken - }, + } = Remote, mountpoint := Mountpoint }) when is_map(MapMsg) -> Topic = replace_vars_in_str(TopicToken, MapMsg), - Payload = process_payload(PayloadToken, MapMsg), + Payload = process_payload(Remote, MapMsg), QoS = replace_simple_var(QoSToken, MapMsg), Retain = replace_simple_var(RetainToken, MapMsg), PubProps = maps:get(pub_props, MapMsg, #{}), @@ -86,7 +85,7 @@ to_remote_msg(MapMsg, #{ qos = QoS, retain = Retain, topic = topic(Mountpoint, Topic), - props = emqx_misc:pub_props_to_packet(PubProps), + props = emqx_utils:pub_props_to_packet(PubProps), payload = Payload }; to_remote_msg(#message{topic = Topic} = Msg, #{mountpoint := Mountpoint}) -> @@ -100,30 +99,32 @@ to_broker_msg( #{ local := #{ topic := TopicToken, - payload := PayloadToken, qos := QoSToken, retain := RetainToken - }, + } = Local, mountpoint := Mountpoint }, Props ) -> Topic = replace_vars_in_str(TopicToken, MapMsg), - Payload = process_payload(PayloadToken, MapMsg), + Payload = process_payload(Local, MapMsg), QoS = replace_simple_var(QoSToken, MapMsg), Retain = replace_simple_var(RetainToken, MapMsg), PubProps = maps:get(pub_props, MapMsg, #{}), set_headers( - Props#{properties => emqx_misc:pub_props_to_packet(PubProps)}, + Props#{properties => emqx_utils:pub_props_to_packet(PubProps)}, emqx_message:set_flags( #{dup => Dup, retain => Retain}, emqx_message:make(bridge, QoS, topic(Mountpoint, Topic), Payload) ) ). -process_payload([], Msg) -> - emqx_json:encode(Msg); -process_payload(Tks, Msg) -> +process_payload(From, MapMsg) -> + do_process_payload(maps:get(payload, From, undefined), MapMsg). + +do_process_payload(undefined, Msg) -> + emqx_utils_json:encode(Msg); +do_process_payload(Tks, Msg) -> replace_vars_in_str(Tks, Msg). %% Replace a string contains vars to another string in which the placeholders are replace by the diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl index 5e833aa99..2a40980af 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl @@ -18,6 +18,7 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx/include/logger.hrl"). -behaviour(hocon_schema). @@ -72,12 +73,7 @@ fields("server_configs") -> )}, {server, emqx_schema:servers_sc(#{desc => ?DESC("server")}, ?MQTT_HOST_OPTS)}, {clientid_prefix, mk(binary(), #{required => false, desc => ?DESC("clientid_prefix")})}, - {reconnect_interval, - mk_duration( - "Reconnect interval. Delay for the MQTT bridge to retry establishing the connection " - "in case of transportation failure.", - #{default => "15s"} - )}, + {reconnect_interval, mk(string(), #{deprecated => {since, "v5.0.16"}})}, {proto_ver, mk( hoconsc:enum([v3, v4, v5]), @@ -107,7 +103,8 @@ fields("server_configs") -> #{ format => <<"password">>, sensitive => true, - desc => ?DESC("password") + desc => ?DESC("password"), + converter => fun emqx_schema:password_converter/2 } )}, {clean_start, @@ -118,12 +115,12 @@ fields("server_configs") -> desc => ?DESC("clean_start") } )}, - {keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})}, + {keepalive, mk_duration("MQTT Keepalive.", #{default => <<"300s">>})}, {retry_interval, mk_duration( "Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 " "messages in case of ACK not received.", - #{default => "15s"} + #{default => <<"15s">>} )}, {max_inflight, mk( @@ -145,8 +142,7 @@ fields("ingress") -> mk( ref(?MODULE, "ingress_local"), #{ - desc => ?DESC(emqx_connector_mqtt_schema, "ingress_local"), - is_required => false + desc => ?DESC(emqx_connector_mqtt_schema, "ingress_local") } )} ]; @@ -163,7 +159,7 @@ fields("ingress_remote") -> )}, {qos, mk( - qos(), + emqx_schema:qos(), #{ default => 1, desc => ?DESC("ingress_remote_qos") @@ -297,4 +293,5 @@ qos() -> hoconsc:union([emqx_schema:qos(), binary()]). parse_server(Str) -> - emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS). + #{hostname := Host, port := Port} = emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS), + {Host, Port}. diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl index ba2162993..880a99313 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl @@ -60,174 +60,262 @@ %% * Local messages are all normalised to QoS-1 when exporting to remote -module(emqx_connector_mqtt_worker). --behaviour(gen_statem). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("emqx/include/logger.hrl"). %% APIs -export([ - start_link/1, + start_link/2, stop/1 ]). -%% gen_statem callbacks --export([ - terminate/3, - code_change/4, - init/1, - callback_mode/0 -]). - -%% state functions --export([ - idle/3, - connected/3 -]). - %% management APIs -export([ - ensure_started/1, - ensure_stopped/1, + connect/1, status/1, ping/1, + info/1, send_to_remote/2, send_to_remote_async/3 ]). --export([get_forwards/1]). - --export([get_subscriptions/1]). +-export([handle_publish/3]). +-export([handle_disconnect/1]). -export_type([ config/0, ack_ref/0 ]). --type id() :: atom() | string() | pid(). --type qos() :: emqx_types:qos(). +-type name() :: term(). +% -type qos() :: emqx_types:qos(). -type config() :: map(). -type ack_ref() :: term(). --type topic() :: emqx_types:topic(). +% -type topic() :: emqx_types:topic(). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -%% same as default in-flight limit for emqtt --define(DEFAULT_INFLIGHT_SIZE, 32). --define(DEFAULT_RECONNECT_DELAY_MS, timer:seconds(5)). --define(DEFAULT_SEG_BYTES, (1 bsl 20)). --define(DEFAULT_MAX_TOTAL_SIZE, (1 bsl 31)). +-define(REF(Name), {via, gproc, ?NAME(Name)}). +-define(NAME(Name), {n, l, Name}). %% @doc Start a bridge worker. Supported configs: -%% start_type: 'manual' (default) or 'auto', when manual, bridge will stay -%% at 'idle' state until a manual call to start it. -%% connect_module: The module which implements emqx_bridge_connect behaviour -%% and work as message batch transport layer -%% reconnect_interval: Delay in milli-seconds for the bridge worker to retry -%% in case of transportation failure. -%% max_inflight: Max number of batches allowed to send-ahead before receiving -%% confirmation from remote node/cluster %% mountpoint: The topic mount point for messages sent to remote node/cluster %% `undefined', `<<>>' or `""' to disable %% forwards: Local topics to subscribe. %% %% Find more connection specific configs in the callback modules %% of emqx_bridge_connect behaviour. -start_link(Opts) when is_list(Opts) -> - start_link(maps:from_list(Opts)); -start_link(Opts) -> - case maps:get(name, Opts, undefined) of - undefined -> - gen_statem:start_link(?MODULE, Opts, []); - Name -> - Name1 = name(Name), - gen_statem:start_link({local, Name1}, ?MODULE, Opts#{name => Name1}, []) +-spec start_link(name(), map()) -> + {ok, pid()} | {error, _Reason}. +start_link(Name, BridgeOpts) -> + ?SLOG(debug, #{ + msg => "client_starting", + name => Name, + options => BridgeOpts + }), + Conf = init_config(Name, BridgeOpts), + Options = mk_client_options(Conf, BridgeOpts), + case emqtt:start_link(Options) of + {ok, Pid} -> + true = gproc:reg_other(?NAME(Name), Pid, Conf), + {ok, Pid}; + {error, Reason} = Error -> + ?SLOG(error, #{ + msg => "client_start_failed", + config => emqx_utils:redact(BridgeOpts), + reason => Reason + }), + Error end. -ensure_started(Name) -> - gen_statem:call(name(Name), ensure_started). - -%% @doc Manually stop bridge worker. State idempotency ensured. -ensure_stopped(Name) -> - gen_statem:call(name(Name), ensure_stopped, 5000). - -stop(Pid) -> gen_statem:stop(Pid). - -status(Pid) when is_pid(Pid) -> - gen_statem:call(Pid, status); -status(Name) -> - gen_statem:call(name(Name), status). - -ping(Pid) when is_pid(Pid) -> - gen_statem:call(Pid, ping); -ping(Name) -> - gen_statem:call(name(Name), ping). - -send_to_remote(Pid, Msg) when is_pid(Pid) -> - gen_statem:call(Pid, {send_to_remote, Msg}); -send_to_remote(Name, Msg) -> - gen_statem:call(name(Name), {send_to_remote, Msg}). - -send_to_remote_async(Pid, Msg, Callback) when is_pid(Pid) -> - gen_statem:cast(Pid, {send_to_remote_async, Msg, Callback}); -send_to_remote_async(Name, Msg, Callback) -> - gen_statem:cast(name(Name), {send_to_remote_async, Msg, Callback}). - -%% @doc Return all forwards (local subscriptions). --spec get_forwards(id()) -> [topic()]. -get_forwards(Name) -> gen_statem:call(name(Name), get_forwards, timer:seconds(1000)). - -%% @doc Return all subscriptions (subscription over mqtt connection to remote broker). --spec get_subscriptions(id()) -> [{emqx_types:topic(), qos()}]. -get_subscriptions(Name) -> gen_statem:call(name(Name), get_subscriptions). - -callback_mode() -> [state_functions]. - -%% @doc Config should be a map(). -init(#{name := Name} = ConnectOpts) -> - ?SLOG(debug, #{ - msg => "starting_bridge_worker", - name => Name - }), - erlang:process_flag(trap_exit, true), - State = init_state(ConnectOpts), - self() ! idle, - {ok, idle, State#{ - connect_opts => pre_process_opts(ConnectOpts) - }}. - -init_state(Opts) -> - ReconnDelayMs = maps:get(reconnect_interval, Opts, ?DEFAULT_RECONNECT_DELAY_MS), - StartType = maps:get(start_type, Opts, manual), +init_config(Name, Opts) -> Mountpoint = maps:get(forward_mountpoint, Opts, undefined), - MaxInflightSize = maps:get(max_inflight, Opts, ?DEFAULT_INFLIGHT_SIZE), - Name = maps:get(name, Opts, undefined), + Subscriptions = maps:get(subscriptions, Opts, undefined), + Forwards = maps:get(forwards, Opts, undefined), #{ - start_type => StartType, - reconnect_interval => ReconnDelayMs, mountpoint => format_mountpoint(Mountpoint), - max_inflight => MaxInflightSize, - connection => undefined, - name => Name + subscriptions => pre_process_subscriptions(Subscriptions, Name, Opts), + forwards => pre_process_forwards(Forwards) }. -pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) -> - ConnectOpts#{ - subscriptions => pre_process_in_out(in, InConf), - forwards => pre_process_in_out(out, OutConf) +mk_client_options(Conf, BridgeOpts) -> + Server = iolist_to_binary(maps:get(server, BridgeOpts)), + HostPort = emqx_connector_mqtt_schema:parse_server(Server), + Mountpoint = maps:get(receive_mountpoint, BridgeOpts, undefined), + Subscriptions = maps:get(subscriptions, Conf), + Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Subscriptions), + CleanStart = + case Subscriptions of + #{remote := _} -> + maps:get(clean_start, BridgeOpts); + undefined -> + %% NOTE + %% We are ignoring the user configuration here because there's currently no reliable way + %% to ensure proper session recovery according to the MQTT spec. + true + end, + Opts = maps:without( + [ + address, + auto_reconnect, + conn_type, + mountpoint, + forwards, + receive_mountpoint, + subscriptions + ], + BridgeOpts + ), + Opts#{ + msg_handler => mk_client_event_handler(Vars, #{server => Server}), + hosts => [HostPort], + clean_start => CleanStart, + force_ping => true, + proto_ver => maps:get(proto_ver, BridgeOpts, v4) }. -pre_process_in_out(_, undefined) -> +mk_client_event_handler(Vars, Opts) when Vars /= undefined -> + #{ + publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]}, + disconnected => {fun ?MODULE:handle_disconnect/1, []} + }; +mk_client_event_handler(undefined, _Opts) -> + undefined. + +connect(Name) -> + #{subscriptions := Subscriptions} = get_config(Name), + case emqtt:connect(get_pid(Name)) of + {ok, Properties} -> + case subscribe_remote_topics(Name, Subscriptions) of + ok -> + {ok, Properties}; + {ok, _, _RCs} -> + {ok, Properties}; + {error, Reason} = Error -> + ?SLOG(error, #{ + msg => "client_subscribe_failed", + subscriptions => Subscriptions, + reason => Reason + }), + Error + end; + {error, Reason} = Error -> + ?SLOG(error, #{ + msg => "client_connect_failed", + reason => Reason + }), + Error + end. + +subscribe_remote_topics(Ref, #{remote := #{topic := FromTopic, qos := QoS}}) -> + emqtt:subscribe(ref(Ref), FromTopic, QoS); +subscribe_remote_topics(_Ref, undefined) -> + ok. + +stop(Ref) -> + emqtt:stop(ref(Ref)). + +info(Ref) -> + emqtt:info(ref(Ref)). + +status(Ref) -> + try + case proplists:get_value(socket, info(Ref)) of + Socket when Socket /= undefined -> + connected; + undefined -> + connecting + end + catch + exit:{noproc, _} -> + disconnected + end. + +ping(Ref) -> + emqtt:ping(ref(Ref)). + +send_to_remote(Name, MsgIn) -> + trycall(fun() -> do_send(Name, export_msg(Name, MsgIn)) end). + +do_send(Name, {true, Msg}) -> + case emqtt:publish(get_pid(Name), Msg) of + ok -> + ok; + {ok, #{reason_code := RC}} when + RC =:= ?RC_SUCCESS; + RC =:= ?RC_NO_MATCHING_SUBSCRIBERS + -> + ok; + {ok, #{reason_code := RC, reason_code_name := Reason}} -> + ?SLOG(warning, #{ + msg => "remote_publish_failed", + message => Msg, + reason_code => RC, + reason_code_name => Reason + }), + {error, Reason}; + {error, Reason} -> + ?SLOG(info, #{ + msg => "client_failed", + reason => Reason + }), + {error, Reason} + end; +do_send(_Name, false) -> + ok. + +send_to_remote_async(Name, MsgIn, Callback) -> + trycall(fun() -> do_send_async(Name, export_msg(Name, MsgIn), Callback) end). + +do_send_async(Name, {true, Msg}, Callback) -> + Pid = get_pid(Name), + ok = emqtt:publish_async(Pid, Msg, _Timeout = infinity, Callback), + {ok, Pid}; +do_send_async(_Name, false, _Callback) -> + ok. + +ref(Pid) when is_pid(Pid) -> + Pid; +ref(Term) -> + ?REF(Term). + +trycall(Fun) -> + try + Fun() + catch + throw:noproc -> + {error, disconnected}; + exit:{noproc, _} -> + {error, disconnected} + end. + +format_mountpoint(undefined) -> undefined; -pre_process_in_out(in, #{local := LC} = Conf) when is_map(Conf) -> - Conf#{local => pre_process_in_out_common(LC)}; -pre_process_in_out(in, Conf) when is_map(Conf) -> +format_mountpoint(Prefix) -> + binary:replace(iolist_to_binary(Prefix), <<"${node}">>, atom_to_binary(node(), utf8)). + +pre_process_subscriptions(undefined, _, _) -> + undefined; +pre_process_subscriptions( + #{remote := RC, local := LC} = Conf, + BridgeName, + BridgeOpts +) when is_map(Conf) -> + Conf#{ + remote => pre_process_in_remote(RC, BridgeName, BridgeOpts), + local => pre_process_in_out_common(LC) + }; +pre_process_subscriptions(Conf, _, _) when is_map(Conf) -> %% have no 'local' field in the config + undefined. + +pre_process_forwards(undefined) -> undefined; -pre_process_in_out(out, #{remote := RC} = Conf) when is_map(Conf) -> +pre_process_forwards(#{remote := RC} = Conf) when is_map(Conf) -> Conf#{remote => pre_process_in_out_common(RC)}; -pre_process_in_out(out, Conf) when is_map(Conf) -> +pre_process_forwards(Conf) when is_map(Conf) -> %% have no 'remote' field in the config undefined. @@ -247,238 +335,131 @@ pre_process_conf(Key, Conf) -> Conf#{Key => Val} end. -code_change(_Vsn, State, Data, _Extra) -> - {ok, State, Data}. +pre_process_in_remote(#{qos := QoSIn} = Conf, BridgeName, BridgeOpts) -> + QoS = downgrade_ingress_qos(QoSIn), + case QoS of + QoSIn -> + ok; + _ -> + ?SLOG(warning, #{ + msg => "downgraded_unsupported_ingress_qos", + qos_configured => QoSIn, + qos_used => QoS, + name => BridgeName, + options => BridgeOpts + }) + end, + Conf#{qos => QoS}. -terminate(_Reason, _StateName, State) -> - _ = disconnect(State), - maybe_destroy_session(State). +downgrade_ingress_qos(2) -> + 1; +downgrade_ingress_qos(QoS) -> + QoS. -maybe_destroy_session(#{connect_opts := ConnectOpts = #{clean_start := false}} = State) -> +get_pid(Name) -> + case gproc:where(?NAME(Name)) of + Pid when is_pid(Pid) -> + Pid; + undefined -> + throw(noproc) + end. + +get_config(Name) -> try - %% Destroy session if clean_start is not set. - %% Ignore any crashes, just refresh the clean_start = true. - _ = do_connect(State#{connect_opts => ConnectOpts#{clean_start => true}}), - _ = disconnect(State), - ok + gproc:lookup_value(?NAME(Name)) catch - _:_ -> + error:badarg -> + throw(noproc) + end. + +export_msg(Name, Msg) -> + case get_config(Name) of + #{forwards := Forwards = #{}, mountpoint := Mountpoint} -> + {true, export_msg(Mountpoint, Forwards, Msg)}; + #{forwards := undefined} -> + ?SLOG(error, #{ + msg => "forwarding_unavailable", + message => Msg, + reason => "egress is not configured" + }), + false + end. + +export_msg(Mountpoint, Forwards, Msg) -> + Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards), + emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars). + +%% + +handle_publish(#{properties := Props} = MsgIn, Vars, Opts) -> + Msg = import_msg(MsgIn, Opts), + ?SLOG(debug, #{ + msg => "publish_local", + message => Msg, + vars => Vars + }), + case Vars of + #{on_message_received := {Mod, Func, Args}} -> + _ = erlang:apply(Mod, Func, [Msg | Args]); + _ -> ok - end; -maybe_destroy_session(_State) -> + end, + maybe_publish_local(Msg, Vars, Props). + +handle_disconnect(_Reason) -> ok. -%% ensure_started will be deprecated in the future -idle({call, From}, ensure_started, State) -> - case do_connect(State) of - {ok, State1} -> - {next_state, connected, State1, [{reply, From, ok}, {state_timeout, 0, connected}]}; - {error, Reason, _State} -> - {keep_state_and_data, [{reply, From, {error, Reason}}]} - end; -idle({call, From}, {send_to_remote, _}, _State) -> - {keep_state_and_data, [{reply, From, {error, {recoverable_error, not_connected}}}]}; -%% @doc Standing by for manual start. -idle(info, idle, #{start_type := manual}) -> - keep_state_and_data; -%% @doc Standing by for auto start. -idle(info, idle, #{start_type := auto} = State) -> - connecting(State); -idle(state_timeout, reconnect, State) -> - connecting(State); -idle(Type, Content, State) -> - common(idle, Type, Content, State). - -connecting(#{reconnect_interval := ReconnectDelayMs} = State) -> - case do_connect(State) of - {ok, State1} -> - {next_state, connected, State1, {state_timeout, 0, connected}}; +maybe_publish_local(Msg, Vars, Props) -> + case emqx_utils_maps:deep_get([local, topic], Vars, undefined) of + %% local topic is not set, discard it + undefined -> + ok; _ -> - {keep_state_and_data, {state_timeout, ReconnectDelayMs, reconnect}} + emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)) end. -connected(state_timeout, connected, State) -> - %% nothing to do - {keep_state, State}; -connected({call, From}, {send_to_remote, Msg}, State) -> - case do_send(State, Msg) of - {ok, NState} -> - {keep_state, NState, [{reply, From, ok}]}; - {error, Reason} -> - {keep_state_and_data, [[reply, From, {error, Reason}]]} - end; -connected(cast, {send_to_remote_async, Msg, Callback}, State) -> - _ = do_send_async(State, Msg, Callback), - {keep_state, State}; -connected( - info, - {disconnected, Conn, Reason}, - #{connection := Connection, name := Name, reconnect_interval := ReconnectDelayMs} = State -) -> - ?tp(info, disconnected, #{name => Name, reason => Reason}), - case Conn =:= maps:get(client_pid, Connection, undefined) of - true -> - {next_state, idle, State#{connection => undefined}, - {state_timeout, ReconnectDelayMs, reconnect}}; - false -> - keep_state_and_data - end; -connected(Type, Content, State) -> - common(connected, Type, Content, State). - -%% Common handlers -common(StateName, {call, From}, status, _State) -> - {keep_state_and_data, [{reply, From, StateName}]}; -common(_StateName, {call, From}, ping, #{connection := Conn} = _State) -> - Reply = emqx_connector_mqtt_mod:ping(Conn), - {keep_state_and_data, [{reply, From, Reply}]}; -common(_StateName, {call, From}, ensure_stopped, #{connection := undefined} = _State) -> - {keep_state_and_data, [{reply, From, ok}]}; -common(_StateName, {call, From}, ensure_stopped, #{connection := Conn} = State) -> - Reply = emqx_connector_mqtt_mod:stop(Conn), - {next_state, idle, State#{connection => undefined}, [{reply, From, Reply}]}; -common(_StateName, {call, From}, get_forwards, #{connect_opts := #{forwards := Forwards}}) -> - {keep_state_and_data, [{reply, From, Forwards}]}; -common(_StateName, {call, From}, get_subscriptions, #{connection := Connection}) -> - {keep_state_and_data, [{reply, From, maps:get(subscriptions, Connection, #{})}]}; -common(_StateName, {call, From}, Req, _State) -> - {keep_state_and_data, [{reply, From, {error, {unsupported_request, Req}}}]}; -common(_StateName, info, {'EXIT', _, _}, State) -> - {keep_state, State}; -common(StateName, Type, Content, #{name := Name} = State) -> - ?SLOG(error, #{ - msg => "bridge_discarded_event", - name => Name, - type => Type, - state_name => StateName, - content => Content - }), - {keep_state, State}. - -do_connect( +import_msg( #{ - connect_opts := ConnectOpts, - name := Name - } = State -) -> - case emqx_connector_mqtt_mod:start(ConnectOpts) of - {ok, Conn} -> - ?tp(info, connected, #{name => Name}), - {ok, State#{connection => Conn}}; - {error, Reason} -> - ConnectOpts1 = obfuscate(ConnectOpts), - ?SLOG(error, #{ - msg => "failed_to_connect", - config => ConnectOpts1, - reason => Reason - }), - {error, Reason, State} - end. - -do_send(#{connect_opts := #{forwards := undefined}}, Msg) -> - ?SLOG(error, #{ - msg => - "cannot_forward_messages_to_remote_broker" - "_as_'egress'_is_not_configured", - messages => Msg - }); -do_send( - #{ - connection := Connection, - mountpoint := Mountpoint, - connect_opts := #{forwards := Forwards} - } = State, - Msg -) -> - Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards), - ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars), - ?SLOG(debug, #{ - msg => "publish_to_remote_broker", - message => Msg, - vars => Vars - }), - case emqx_connector_mqtt_mod:send(Connection, ExportMsg) of - ok -> - {ok, State}; - {ok, #{reason_code := RC}} when - RC =:= ?RC_SUCCESS; - RC =:= ?RC_NO_MATCHING_SUBSCRIBERS - -> - {ok, State}; - {ok, #{reason_code := RC, reason_code_name := RCN}} -> - ?SLOG(warning, #{ - msg => "publish_to_remote_node_falied", - message => Msg, - reason_code => RC, - reason_code_name => RCN - }), - {error, RCN}; - {error, Reason} -> - ?SLOG(info, #{ - msg => "mqtt_bridge_produce_failed", - reason => Reason - }), - {error, Reason} - end. - -do_send_async(#{connect_opts := #{forwards := undefined}}, Msg, _Callback) -> - %% TODO: eval callback with undefined error - ?SLOG(error, #{ - msg => - "cannot_forward_messages_to_remote_broker" - "_as_'egress'_is_not_configured", - messages => Msg - }); -do_send_async( - #{ - connection := Connection, - mountpoint := Mountpoint, - connect_opts := #{forwards := Forwards} + dup := Dup, + payload := Payload, + properties := Props, + qos := QoS, + retain := Retain, + topic := Topic }, - Msg, - Callback + #{server := Server} ) -> - Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards), - ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars), - ?SLOG(debug, #{ - msg => "publish_to_remote_broker", - message => Msg, - vars => Vars - }), - emqx_connector_mqtt_mod:send_async(Connection, ExportMsg, Callback). + #{ + id => emqx_guid:to_hexstr(emqx_guid:gen()), + server => Server, + payload => Payload, + topic => Topic, + qos => QoS, + dup => Dup, + retain => Retain, + pub_props => printable_maps(Props), + message_received_at => erlang:system_time(millisecond) + }. -disconnect(#{connection := Conn} = State) when Conn =/= undefined -> - emqx_connector_mqtt_mod:stop(Conn), - State#{connection => undefined}; -disconnect(State) -> - State. - -format_mountpoint(undefined) -> - undefined; -format_mountpoint(Prefix) -> - binary:replace(iolist_to_binary(Prefix), <<"${node}">>, atom_to_binary(node(), utf8)). - -name(Id) -> list_to_atom(str(Id)). - -obfuscate(Map) -> +printable_maps(undefined) -> + #{}; +printable_maps(Headers) -> maps:fold( - fun(K, V, Acc) -> - case is_sensitive(K) of - true -> [{K, '***'} | Acc]; - false -> [{K, V} | Acc] - end + fun + ('User-Property', V0, AccIn) when is_list(V0) -> + AccIn#{ + 'User-Property' => maps:from_list(V0), + 'User-Property-Pairs' => [ + #{ + key => Key, + value => Value + } + || {Key, Value} <- V0 + ] + }; + (K, V0, AccIn) -> + AccIn#{K => V0} end, - [], - Map + #{}, + Headers ). - -is_sensitive(password) -> true; -is_sensitive(ssl_opts) -> true; -is_sensitive(_) -> false. - -str(A) when is_atom(A) -> - atom_to_list(A); -str(B) when is_binary(B) -> - binary_to_list(B); -str(S) when is_list(S) -> - S. diff --git a/apps/emqx_connector/test/emqx_connector_http_tests.erl b/apps/emqx_connector/test/emqx_connector_http_tests.erl new file mode 100644 index 000000000..8d0fa6d2c --- /dev/null +++ b/apps/emqx_connector/test/emqx_connector_http_tests.erl @@ -0,0 +1,90 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_connector_http_tests). + +-include_lib("eunit/include/eunit.hrl"). + +-define(MY_SECRET, <<"my_precious">>). + +wrap_auth_headers_test_() -> + {setup, + fun() -> + meck:expect(ehttpc_sup, start_pool, 2, {ok, foo}), + meck:expect(ehttpc, request, fun(_, _, Req, _, _) -> {ok, 200, Req} end), + meck:expect(ehttpc_pool, pick_worker, 1, self()), + [ehttpc_sup, ehttpc, ehttpc_pool] + end, + fun meck:unload/1, fun(_) -> + Config = #{ + base_url => #{ + scheme => http, + host => "localhost", + port => 18083, + path => "/status" + }, + connect_timeout => 1000, + pool_type => random, + pool_size => 1, + request => #{ + method => get, + path => "/status", + headers => auth_headers() + } + }, + {ok, #{request := #{headers := Headers}} = State} = emqx_connector_http:on_start( + <<"test">>, Config + ), + {ok, 200, Req} = emqx_connector_http:on_query(foo, {send_message, #{}}, State), + Tests = + [ + ?_assert(is_wrapped(V)) + || H <- Headers, is_tuple({K, V} = H), is_auth_header(untmpl(K)) + ], + [ + ?_assertEqual(4, length(Tests)), + ?_assert(is_unwrapped_headers(element(2, Req))) + | Tests + ] + end}. + +auth_headers() -> + [ + {<<"Authorization">>, ?MY_SECRET}, + {<<"authorization">>, ?MY_SECRET}, + {<<"Proxy-Authorization">>, ?MY_SECRET}, + {<<"proxy-authorization">>, ?MY_SECRET}, + {<<"X-Custom-Header">>, <<"foobar">>} + ]. + +is_auth_header(<<"Authorization">>) -> true; +is_auth_header(<<"Proxy-Authorization">>) -> true; +is_auth_header(<<"authorization">>) -> true; +is_auth_header(<<"proxy-authorization">>) -> true; +is_auth_header(_Other) -> false. + +is_wrapped(Secret) when is_function(Secret) -> + untmpl(emqx_secret:unwrap(Secret)) =:= ?MY_SECRET; +is_wrapped(_Other) -> + false. + +untmpl([{_, V} | _]) -> V. + +is_unwrapped_headers(Headers) -> + lists:all(fun is_unwrapped_header/1, Headers). + +is_unwrapped_header({_, V}) when is_function(V) -> false; +is_unwrapped_header({_, [{str, _V}]}) -> throw(unexpected_tmpl_token); +is_unwrapped_header(_) -> true. diff --git a/apps/emqx_connector/test/emqx_connector_jwt_worker_SUITE.erl b/apps/emqx_connector/test/emqx_connector_jwt_worker_SUITE.erl index e8355f746..a079d632f 100644 --- a/apps/emqx_connector/test/emqx_connector_jwt_worker_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_jwt_worker_SUITE.erl @@ -127,8 +127,12 @@ t_unknown_error(_Config) -> 1_000 ), fun(Trace) -> + %% there seems to be some occasions when empty_key is + %% returned instead. ?assertMatch( - [#{error := {invalid_private_key, some_strange_error}}], + [#{error := Error}] when + Error =:= {invalid_private_key, some_strange_error} orelse + Error =:= empty_key, ?of_kind(connector_jwt_worker_startup_error, Trace) ), ok @@ -360,3 +364,23 @@ t_unknown_requests(_Config) -> gen_server:cast(Worker, unknown_cast), ?assertEqual({error, bad_call}, gen_server:call(Worker, unknown_call)), ok. + +t_truncated_private_key(_Config) -> + Config0 = generate_config(), + Config = Config0#{private_key := <<"-----BEGIN PRIVATE KEY-----\nMIIEvQI...">>}, + process_flag(trap_exit, true), + ?check_trace( + ?wait_async_action( + ?assertMatch({ok, _}, emqx_connector_jwt_worker:start_link(Config)), + #{?snk_kind := connector_jwt_worker_startup_error}, + 1_000 + ), + fun(Trace) -> + ?assertMatch( + [#{error := function_clause}], + ?of_kind(connector_jwt_worker_startup_error, Trace) + ), + ok + end + ), + ok. diff --git a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl index 2be30466c..9067c85de 100644 --- a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl @@ -64,15 +64,15 @@ t_lifecycle(_Config) -> mongo_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?MONGO_RESOURCE_MOD, CheckedConfig, @@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), - ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), - ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_mqtt_tests.erl b/apps/emqx_connector/test/emqx_connector_mqtt_tests.erl deleted file mode 100644 index 88c8b5218..000000000 --- a/apps/emqx_connector/test/emqx_connector_mqtt_tests.erl +++ /dev/null @@ -1,60 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_connector_mqtt_tests). - --include_lib("eunit/include/eunit.hrl"). --include_lib("emqx/include/emqx_mqtt.hrl"). - -send_and_ack_test() -> - %% delegate from gen_rpc to rpc for unit test - meck:new(emqtt, [passthrough, no_history]), - meck:expect( - emqtt, - start_link, - 1, - fun(_) -> - {ok, spawn_link(fun() -> ok end)} - end - ), - meck:expect(emqtt, connect, 1, {ok, dummy}), - meck:expect( - emqtt, - stop, - 1, - fun(Pid) -> Pid ! stop end - ), - meck:expect( - emqtt, - publish, - 2, - fun(Client, Msg) -> - Client ! {publish, Msg}, - %% as packet id - {ok, Msg} - end - ), - try - Max = 1, - Batch = lists:seq(1, Max), - {ok, Conn} = emqx_connector_mqtt_mod:start(#{server => "127.0.0.1:1883"}), - %% return last packet id as batch reference - {ok, _AckRef} = emqx_connector_mqtt_mod:send(Conn, Batch), - - ok = emqx_connector_mqtt_mod:stop(Conn) - after - meck:unload(emqtt) - end. diff --git a/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl b/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl deleted file mode 100644 index 49bff7bbc..000000000 --- a/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl +++ /dev/null @@ -1,101 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_connector_mqtt_worker_tests). - --include_lib("eunit/include/eunit.hrl"). --include_lib("emqx/include/emqx.hrl"). --include_lib("emqx/include/emqx_mqtt.hrl"). - --define(BRIDGE_NAME, test). --define(BRIDGE_REG_NAME, emqx_connector_mqtt_worker_test). --define(WAIT(PATTERN, TIMEOUT), - receive - PATTERN -> - ok - after TIMEOUT -> - error(timeout) - end -). - --export([start/1, send/2, stop/1]). - -start(#{connect_result := Result, test_pid := Pid, test_ref := Ref}) -> - case is_pid(Pid) of - true -> Pid ! {connection_start_attempt, Ref}; - false -> ok - end, - Result. - -send(SendFun, Batch) when is_function(SendFun, 2) -> - SendFun(Batch). - -stop(_Pid) -> ok. - -%% connect first, disconnect, then connect again -disturbance_test() -> - meck:new(emqx_connector_mqtt_mod, [passthrough, no_history]), - meck:expect(emqx_connector_mqtt_mod, start, 1, fun(Conf) -> start(Conf) end), - meck:expect(emqx_connector_mqtt_mod, send, 2, fun(SendFun, Batch) -> send(SendFun, Batch) end), - meck:expect(emqx_connector_mqtt_mod, stop, 1, fun(Pid) -> stop(Pid) end), - try - emqx_metrics:start_link(), - Ref = make_ref(), - TestPid = self(), - Config = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}), - {ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => bridge_disturbance}), - ?assertEqual(Pid, whereis(bridge_disturbance)), - ?WAIT({connection_start_attempt, Ref}, 1000), - Pid ! {disconnected, TestPid, test}, - ?WAIT({connection_start_attempt, Ref}, 1000), - emqx_metrics:stop(), - ok = emqx_connector_mqtt_worker:stop(Pid) - after - meck:unload(emqx_connector_mqtt_mod) - end. - -manual_start_stop_test() -> - meck:new(emqx_connector_mqtt_mod, [passthrough, no_history]), - meck:expect(emqx_connector_mqtt_mod, start, 1, fun(Conf) -> start(Conf) end), - meck:expect(emqx_connector_mqtt_mod, send, 2, fun(SendFun, Batch) -> send(SendFun, Batch) end), - meck:expect(emqx_connector_mqtt_mod, stop, 1, fun(Pid) -> stop(Pid) end), - try - emqx_metrics:start_link(), - Ref = make_ref(), - TestPid = self(), - BridgeName = manual_start_stop, - Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}), - Config = Config0#{start_type := manual}, - {ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}), - %% call ensure_started again should yield the same result - ok = emqx_connector_mqtt_worker:ensure_started(BridgeName), - emqx_connector_mqtt_worker:ensure_stopped(BridgeName), - emqx_metrics:stop(), - ok = emqx_connector_mqtt_worker:stop(Pid) - after - meck:unload(emqx_connector_mqtt_mod) - end. - -make_config(Ref, TestPid, Result) -> - #{ - start_type => auto, - subscriptions => undefined, - forwards => undefined, - reconnect_interval => 50, - test_pid => TestPid, - test_ref => Ref, - connect_result => Result - }. diff --git a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl index dc5826766..a0455c92c 100644 --- a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl @@ -64,14 +64,14 @@ t_lifecycle(_Config) -> mysql_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?MYSQL_RESOURCE_MOD, CheckedConfig, @@ -83,53 +83,53 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())), ?assertMatch( {ok, _, [[1]]}, emqx_resource:query( - PoolName, + ResourceId, test_query_with_params_and_timeout() ) ), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())), ?assertMatch( {ok, _, [[1]]}, emqx_resource:query( - PoolName, + ResourceId, test_query_with_params_and_timeout() ) ), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl index 2f77ca38d..a4ac4f932 100644 --- a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl @@ -64,15 +64,15 @@ t_lifecycle(_Config) -> pgsql_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?PGSQL_RESOURCE_MOD, CheckedConfig, @@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl index fa742da0b..e6df4f711 100644 --- a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl @@ -27,6 +27,8 @@ -define(REDIS_SINGLE_PORT, 6379). -define(REDIS_SENTINEL_HOST, "redis-sentinel"). -define(REDIS_SENTINEL_PORT, 26379). +-define(REDIS_CLUSTER_HOST, "redis-cluster-1"). +-define(REDIS_CLUSTER_PORT, 6379). -define(REDIS_RESOURCE_MOD, emqx_connector_redis). all() -> @@ -36,22 +38,16 @@ groups() -> []. init_per_suite(Config) -> - case - emqx_common_test_helpers:is_all_tcp_servers_available( - [ - {?REDIS_SINGLE_HOST, ?REDIS_SINGLE_PORT}, - {?REDIS_SENTINEL_HOST, ?REDIS_SENTINEL_PORT} - ] - ) - of - true -> - ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource]), - {ok, _} = application:ensure_all_started(emqx_connector), - Config; - false -> - {skip, no_redis} - end. + Checks = + case os:getenv("IS_CI") of + "yes" -> 10; + _ -> 1 + end, + ok = wait_for_redis(Checks), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + Config. end_per_suite(_Config) -> ok = emqx_common_test_helpers:stop_apps([emqx_resource]), @@ -63,9 +59,27 @@ init_per_testcase(_, Config) -> end_per_testcase(_, _Config) -> ok. -% %%------------------------------------------------------------------------------ -% %% Testcases -% %%------------------------------------------------------------------------------ +wait_for_redis(0) -> + throw(timeout); +wait_for_redis(Checks) -> + case + emqx_common_test_helpers:is_all_tcp_servers_available( + [ + {?REDIS_SINGLE_HOST, ?REDIS_SINGLE_PORT}, + {?REDIS_SENTINEL_HOST, ?REDIS_SENTINEL_PORT} + ] + ) + of + true -> + ok; + false -> + timer:sleep(1000), + wait_for_redis(Checks - 1) + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ t_single_lifecycle(_Config) -> perform_lifecycle_check( @@ -88,14 +102,14 @@ t_sentinel_lifecycle(_Config) -> [<<"PING">>] ). -perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> +perform_lifecycle_check(ResourceId, InitialConfig, RedisCommand) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?REDIS_RESOURCE_MOD, CheckedConfig, @@ -107,45 +121,49 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % Perform query as further check that the resource is working as expected - ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})), ?assertEqual( {ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]}, - emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]}) + emqx_resource:query(ResourceId, {cmds, [RedisCommand, RedisCommand]}) ), ?assertMatch( - {error, [{ok, <<"PONG">>}, {error, _}]}, - emqx_resource:query(PoolName, {cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]}) + {error, {unrecoverable_error, [{ok, <<"PONG">>}, {error, _}]}}, + emqx_resource:query( + ResourceId, + {cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]}, + #{timeout => 500} + ) ), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers @@ -187,8 +205,8 @@ redis_config_base(Type, ServerKey) -> MaybeSentinel = "", MaybeDatabase = " database = 1\n"; "cluster" -> - Host = ?REDIS_SINGLE_HOST, - Port = ?REDIS_SINGLE_PORT, + Host = ?REDIS_CLUSTER_HOST, + Port = ?REDIS_CLUSTER_PORT, MaybeSentinel = "", MaybeDatabase = "" end, diff --git a/apps/emqx_connector/test/emqx_connector_web_hook_server.erl b/apps/emqx_connector/test/emqx_connector_web_hook_server.erl index b68ebcbba..bdc6e100c 100644 --- a/apps/emqx_connector/test/emqx_connector_web_hook_server.erl +++ b/apps/emqx_connector/test/emqx_connector_web_hook_server.erl @@ -29,7 +29,14 @@ start_link(Port, Path) -> start_link(Port, Path, false). start_link(Port, Path, SSLOpts) -> - supervisor:start_link({local, ?MODULE}, ?MODULE, [Port, Path, SSLOpts]). + case Port of + random -> + PickedPort = pick_port_number(56000), + {ok, Pid} = supervisor:start_link({local, ?MODULE}, ?MODULE, [PickedPort, Path, SSLOpts]), + {ok, {PickedPort, Pid}}; + _ -> + supervisor:start_link({local, ?MODULE}, ?MODULE, [Port, Path, SSLOpts]) + end. stop() -> try @@ -103,3 +110,20 @@ default_handler(Req0, State) -> Req0 ), {ok, Req, State}. + +pick_port_number(Port) -> + case is_port_in_use(Port) of + true -> + pick_port_number(Port + 1); + false -> + Port + end. + +is_port_in_use(Port) -> + case gen_tcp:listen(Port, [{reuseaddr, true}, {active, false}]) of + {ok, ListenSocket} -> + gen_tcp:close(ListenSocket), + false; + {error, eaddrinuse} -> + true + end. diff --git a/apps/emqx_ctl/README.md b/apps/emqx_ctl/README.md new file mode 100644 index 000000000..2638031e6 --- /dev/null +++ b/apps/emqx_ctl/README.md @@ -0,0 +1,41 @@ +# emqx_ctl + +This application accepts dynamic `emqx ctl` command registrations so plugins can add their own commands. +Please note that the 'proxy' command `emqx_ctl` is considered deprecated, going forward, please use `emqx ctl` instead. + +## Add a new command + +To add a new command, the application must implement a callback function to handle the command, and register the command with `emqx_ctl:register_command/2` API. + +### Register + +To add a new command which can be executed from `emqx ctl`, the application must call `emqx_ctl:register_command/2` API to register the command. + +For example, to add a new command `myplugin` which is to be executed as `emqx ctl myplugin`, the application must call `emqx_ctl:register_command/2` API as follows: + +```erlang +emqx_ctl:register_command(mypluin, {myplugin_cli, cmd}). +``` + +### Callback + +The callback function must be exported by the application and must have the following signature: + +```erlang +cmd([Arg1, Arg2, ...]) -> ok. +``` + +It must also implement a special clause to handle the `usage` argument: + +```erlang +cmd([usage]) -> "myplugin [arg1] [arg2] ..."; +``` + +### Utility + +The `emqx_ctl` application provides some utility functions which help to format the output of the command. +For example `emqx_ctl:print/2` and `emqx_ctl:usage/1`. + +## Reference + +[emqx_management_cli](../emqx_management/src/emqx_mgmt_cli.erl) can be taken as a reference for how to implement a command. diff --git a/apps/emqx_ctl/rebar.config b/apps/emqx_ctl/rebar.config new file mode 100644 index 000000000..2656fd554 --- /dev/null +++ b/apps/emqx_ctl/rebar.config @@ -0,0 +1,2 @@ +{erl_opts, [debug_info]}. +{deps, []}. diff --git a/apps/emqx_ctl/src/emqx_ctl.app.src b/apps/emqx_ctl/src/emqx_ctl.app.src new file mode 100644 index 000000000..9de598a89 --- /dev/null +++ b/apps/emqx_ctl/src/emqx_ctl.app.src @@ -0,0 +1,15 @@ +{application, emqx_ctl, [ + {description, "Backend for emqx_ctl script"}, + {vsn, "0.1.0"}, + {registered, []}, + {mod, {emqx_ctl_app, []}}, + {applications, [ + kernel, + stdlib + ]}, + {env, []}, + {modules, []}, + + {licenses, ["Apache-2.0"]}, + {links, []} +]}. diff --git a/apps/emqx/src/emqx_ctl.erl b/apps/emqx_ctl/src/emqx_ctl.erl similarity index 91% rename from apps/emqx/src/emqx_ctl.erl rename to apps/emqx_ctl/src/emqx_ctl.erl index 53eb5b888..864b53d2a 100644 --- a/apps/emqx/src/emqx_ctl.erl +++ b/apps/emqx_ctl/src/emqx_ctl.erl @@ -18,8 +18,7 @@ -behaviour(gen_server). --include("types.hrl"). --include("logger.hrl"). +-include_lib("kernel/include/logger.hrl"). -export([start_link/0, stop/0]). @@ -70,7 +69,7 @@ -define(SERVER, ?MODULE). -define(CMD_TAB, emqx_command). --spec start_link() -> startlink_ret(). +-spec start_link() -> {ok, pid()}. start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). @@ -103,7 +102,7 @@ cast(Msg) -> gen_server:cast(?SERVER, Msg). run_command([]) -> run_command(help, []); run_command([Cmd | Args]) -> - case emqx_misc:safe_to_existing_atom(Cmd) of + case safe_to_existing_atom(Cmd) of {ok, Cmd1} -> run_command(Cmd1, Args); _ -> @@ -122,7 +121,7 @@ run_command(Cmd, Args) when is_atom(Cmd) -> ok catch _:Reason:Stacktrace -> - ?SLOG(error, #{ + ?LOG_ERROR(#{ msg => "ctl_command_crashed", stacktrace => Stacktrace, reason => Reason @@ -150,7 +149,7 @@ help() -> [] -> print("No commands available.~n"); Cmds -> - print("Usage: ~ts~n", [?MODULE]), + print("Usage: ~ts~n", ["emqx ctl"]), lists:foreach( fun({_, {Mod, Cmd}, _}) -> print("~110..-s~n", [""]), @@ -220,7 +219,7 @@ format_usage(CmdParams, Desc, Width) -> %%-------------------------------------------------------------------- init([]) -> - ok = emqx_tables:new(?CMD_TAB, [protected, ordered_set]), + _ = ets:new(?CMD_TAB, [named_table, protected, ordered_set]), {ok, #state{seq = 0}}. handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq}) -> @@ -229,23 +228,23 @@ handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq}) ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts}), {reply, ok, next_seq(State)}; [[OriginSeq] | _] -> - ?SLOG(warning, #{msg => "CMD_overidden", cmd => Cmd, mf => MF}), + ?LOG_WARNING(#{msg => "CMD_overidden", cmd => Cmd, mf => MF}), true = ets:insert(?CMD_TAB, {{OriginSeq, Cmd}, MF, Opts}), {reply, ok, State} end; handle_call(Req, _From, State) -> - ?SLOG(error, #{msg => "unexpected_call", call => Req}), + ?LOG_ERROR(#{msg => "unexpected_call", call => Req}), {reply, ignored, State}. handle_cast({unregister_command, Cmd}, State) -> ets:match_delete(?CMD_TAB, {{'_', Cmd}, '_', '_'}), noreply(State); handle_cast(Msg, State) -> - ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), + ?LOG_ERROR(#{msg => "unexpected_cast", cast => Msg}), noreply(State). handle_info(Info, State) -> - ?SLOG(error, #{msg => "unexpected_info", info => Info}), + ?LOG_ERROR(#{msg => "unexpected_info", info => Info}), noreply(State). terminate(_Reason, _State) -> @@ -272,3 +271,11 @@ zip_cmd([X | Xs], [Y | Ys]) -> [{X, Y} | zip_cmd(Xs, Ys)]; zip_cmd([X | Xs], []) -> [{X, ""} | zip_cmd(Xs, [])]; zip_cmd([], [Y | Ys]) -> [{"", Y} | zip_cmd([], Ys)]; zip_cmd([], []) -> []. + +safe_to_existing_atom(Str) -> + try + {ok, list_to_existing_atom(Str)} + catch + _:badarg -> + undefined + end. diff --git a/apps/emqx_ctl/src/emqx_ctl_app.erl b/apps/emqx_ctl/src/emqx_ctl_app.erl new file mode 100644 index 000000000..803ba90d3 --- /dev/null +++ b/apps/emqx_ctl/src/emqx_ctl_app.erl @@ -0,0 +1,18 @@ +%%%------------------------------------------------------------------- +%% @doc emqx_ctl public API +%% @end +%%%------------------------------------------------------------------- + +-module(emqx_ctl_app). + +-behaviour(application). + +-export([start/2, stop/1]). + +start(_StartType, _StartArgs) -> + emqx_ctl_sup:start_link(). + +stop(_State) -> + ok. + +%% internal functions diff --git a/apps/emqx_ctl/src/emqx_ctl_sup.erl b/apps/emqx_ctl/src/emqx_ctl_sup.erl new file mode 100644 index 000000000..21086e424 --- /dev/null +++ b/apps/emqx_ctl/src/emqx_ctl_sup.erl @@ -0,0 +1,33 @@ +%%%------------------------------------------------------------------- +%% @doc emqx_ctl top level supervisor. +%% @end +%%%------------------------------------------------------------------- + +-module(emqx_ctl_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +-define(SERVER, ?MODULE). + +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +init([]) -> + SupFlags = #{ + strategy => one_for_all, + intensity => 0, + period => 1 + }, + ChildSpecs = [ + #{ + id => emqx_ctl, + start => {emqx_ctl, start_link, []}, + type => worker, + restart => permanent + } + ], + {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx/test/emqx_ctl_SUITE.erl b/apps/emqx_ctl/test/emqx_ctl_SUITE.erl similarity index 95% rename from apps/emqx/test/emqx_ctl_SUITE.erl rename to apps/emqx_ctl/test/emqx_ctl_SUITE.erl index 03f7b2148..46d9008e8 100644 --- a/apps/emqx/test/emqx_ctl_SUITE.erl +++ b/apps/emqx_ctl/test/emqx_ctl_SUITE.erl @@ -22,12 +22,10 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). -all() -> emqx_common_test_helpers:all(?MODULE). +all() -> [t_reg_unreg_command, t_run_commands, t_print, t_usage, t_unexpected]. init_per_suite(Config) -> - %% ensure stopped, this suite tests emqx_ctl process independently - application:stop(emqx), - ok = emqx_logger:set_log_level(emergency), + application:stop(emqx_ctl), Config. end_per_suite(_Config) -> diff --git a/apps/emqx_dashboard/README.md b/apps/emqx_dashboard/README.md index 7466b5afe..88c714aca 100644 --- a/apps/emqx_dashboard/README.md +++ b/apps/emqx_dashboard/README.md @@ -1 +1,17 @@ -# TODO: Doc +# EMQX Dashboard + +This application provides access to the EMQX Dashboard as well as the actual, +underlying REST API itself and provides authorization to protect against +unauthorized access. Furthermore it connects middleware adding CORS headers. +Last but not least it exposes the `/status` endpoint needed for healtcheck +monitoring. + +## Implementation details + +This implementation is based on `minirest`, and relies on `hoconsc` to provide an +OpenAPI spec for `swagger`. + +Note, at this point EMQX Dashboard itself is an independent frontend project and +is integrated through a static file handler. This code here is responsible to +provide an HTTP(S) server to give access to it and its underlying API calls. +This includes user management and login for the frontend. diff --git a/apps/emqx_dashboard/etc/emqx_dashboard.conf b/apps/emqx_dashboard/etc/emqx_dashboard.conf index 856779500..67e3f61ec 100644 --- a/apps/emqx_dashboard/etc/emqx_dashboard.conf +++ b/apps/emqx_dashboard/etc/emqx_dashboard.conf @@ -2,6 +2,4 @@ dashboard { listeners.http { bind = 18083 } - default_username = "admin" - default_password = "public" } diff --git a/apps/emqx_dashboard/i18n/emqx_dashboard_api_i18n.conf b/apps/emqx_dashboard/i18n/emqx_dashboard_api_i18n.conf deleted file mode 100644 index 602af24c7..000000000 --- a/apps/emqx_dashboard/i18n/emqx_dashboard_api_i18n.conf +++ /dev/null @@ -1,150 +0,0 @@ -emqx_dashboard_api { - - token { - desc { - en: """Dashboard Auth Token""" - zh: """Dashboard 认证 Token""" - } - } - - username { - desc { - en: """Dashboard Username""" - zh: """Dashboard 用户名""" - } - } - - user_description { - desc { - en: """Dashboard User Description""" - zh: """Dashboard 用户描述""" - } - } - - password { - desc { - en: """Dashboard Password""" - zh: """Dashboard 密码""" - } - } - - license { - desc { - en: """EMQX License. opensource or enterprise""" - zh: """EMQX 许可。开源版本 或者企业版""" - } - } - - version { - desc { - en: """EMQX Version""" - zh: """EMQX 版本""" - } - } - - login_api { - desc { - en: """Dashboard Auth. Get Token""" - zh: """Dashboard 认证。获取 Token""" - } - } - - login_success { - desc { - en: """Dashboard Auth. Success""" - zh: """Dashboard 认证。成功""" - } - } - - login_failed401 { - desc { - en: """Login failed. Bad username or password""" - zh: """登录失败。用户名或密码错误""" - } - } - - logout_api { - desc { - en: """Dashboard user logout""" - zh: """Dashboard 用户登出""" - } - } - - list_users_api { - desc { - en: """Dashboard list users""" - zh: """Dashboard 用户列表""" - } - } - - create_user_api { - desc { - en: """Create dashboard user""" - zh: """创建 Dashboard 用户""" - } - } - - create_user_api_success { - desc { - en: """Create dashboard user success""" - zh: """创建 Dashboard 用户成功""" - } - } - - update_user_api { - desc { - en: """Update dashboard user description""" - zh: """更新 Dashboard 用户描述""" - } - } - - update_user_api200 { - desc { - en: """Update dashboard user success""" - zh: """更新 Dashboard 用户成功""" - } - } - - delete_user_api { - desc { - en: """Delete dashboard user""" - zh: """删除 Dashboard 用户""" - } - } - - users_api404 { - desc { - en: """Dashboard user not found""" - zh: """Dashboard 用户不存在""" - } - } - - change_pwd_api { - desc { - en: """Change dashboard user password""" - zh: """更改 Dashboard 用户密码""" - } - } - - old_pwd { - desc { - en: """Old password""" - zh: """旧密码""" - } - } - - new_pwd { - desc { - en: """New password""" - zh: """新密码""" - } - } - - login_failed_response400 { - desc { - en: """Login failed. Bad username or password""" - zh: """登录失败。用户名或密码错误""" - } - } - -} diff --git a/apps/emqx_dashboard/i18n/emqx_dashboard_i18n.conf b/apps/emqx_dashboard/i18n/emqx_dashboard_i18n.conf deleted file mode 100644 index e6758d0de..000000000 --- a/apps/emqx_dashboard/i18n/emqx_dashboard_i18n.conf +++ /dev/null @@ -1,221 +0,0 @@ -emqx_dashboard_schema { - listeners { - desc { - en: """HTTP(s) listeners are identified by their protocol type and are -used to serve dashboard UI and restful HTTP API. -Listeners must have a unique combination of port number and IP address. -For example, an HTTP listener can listen on all configured IP addresses -on a given port for a machine by specifying the IP address 0.0.0.0. -Alternatively, the HTTP listener can specify a unique IP address for each listener, -but use the same port.""" - zh: """仪表盘监听器设置。""" - } - label { - en: "Listeners" - zh: "监听器" - } - } - sample_interval { - desc { - en: """How often to update metrics displayed in the dashboard. -Note: `sample_interval` should be a divisor of 60.""" - zh: """更新仪表板中显示的指标的时间间隔。必须小于60,且被60的整除。""" - } - } - token_expired_time { - desc { - en: "JWT token expiration time." - zh: "JWT token 过期时间" - } - label { - en: "Token expired time" - zh: "JWT 过期时间" - } - } - num_acceptors { - desc { - en: "Socket acceptor pool size for TCP protocols." - zh: "TCP协议的Socket acceptor池大小" - } - label { - en: "Number of acceptors" - zh: "Acceptor 数量" - } - } - max_connections { - desc { - en: "Maximum number of simultaneous connections." - zh: "同时处理的最大连接数" - } - label { - en: "Maximum connections" - zh: "最大连接数" - } - } - backlog { - desc { - en: "Defines the maximum length that the queue of pending connections can grow to." - zh: "排队等待连接的队列的最大长度" - } - label { - en: "Backlog" - zh: "排队长度" - } - } - send_timeout { - desc { - en: "Send timeout for the socket." - zh: "Socket发送超时时间" - } - label { - en: "Send timeout" - zh: "发送超时时间" - } - } - inet6 { - desc { - en: "Enable IPv6 support, default is false, which means IPv4 only." - zh: "启用IPv6, 如果机器不支持IPv6,请关闭此选项,否则会导致仪表盘无法使用。" - } - label { - en: "IPv6" - zh: "IPv6" - } - } - ipv6_v6only { - desc { - en: "Disable IPv4-to-IPv6 mapping for the listener." - zh: "当开启 inet6 功能的同时禁用 IPv4-to-IPv6 映射。该配置仅在 inet6 功能开启时有效。" - } - label { - en: "IPv6 only" - zh: "IPv6 only" - } - } - desc_dashboard { - desc { - en: "Configuration for EMQX dashboard." - zh: "EMQX仪表板配置" - } - label { - en: "Dashboard" - zh: "仪表板" - } - } - desc_listeners { - desc { - en: "Configuration for the dashboard listener." - zh: "仪表板监听器配置" - } - label { - en: "Listeners" - zh: "监听器" - } - } - desc_http { - desc { - en: "Configuration for the dashboard listener (plaintext)." - zh: "仪表板监听器(HTTP)配置" - } - label { - en: "HTTP" - zh: "HTTP" - } - } - desc_https { - desc { - en: "Configuration for the dashboard listener (TLS)." - zh: "仪表板监听器(HTTPS)配置" - } - label { - en: "HTTPS" - zh: "HTTPS" - } - } - listener_enable { - desc { - en: "Ignore or enable this listener" - zh: "忽略或启用该监听器配置" - } - label { - en: "Enable" - zh: "启用" - } - } - bind { - desc { - en: "Port without IP(18083) or port with specified IP(127.0.0.1:18083)." - zh: "监听的地址与端口,在dashboard更新此配置时,会重启dashboard服务。" - } - label { - en: "Bind" - zh: "绑定端口" - } - } - default_username { - desc { - en: "The default username of the automatically created dashboard user." - zh: "默认的仪表板用户名" - } - label { - en: "Default username" - zh: "默认用户名" - } - } - default_password { - desc { - en: """The initial default password for dashboard 'admin' user. -For safety, it should be changed as soon as possible.""" - zh: """默认的仪表板用户密码 -为了安全,应该尽快修改密码。""" - } - label { - en: "Default password" - zh: "默认密码" - } - } - cors { - desc { - en: """Support Cross-Origin Resource Sharing (CORS). -Allows a server to indicate any origins (domain, scheme, or port) other than -its own from which a browser should permit loading resources.""" - zh: """支持跨域资源共享(CORS) -允许服务器指示任何来源(域名、协议或端口),除了本服务器之外的任何浏览器应允许加载资源。""" - } - label { - en: "CORS" - zh: "跨域资源共享" - } - } - i18n_lang { - desc { - en: "Internationalization language support." - zh: "swagger多语言支持" - } - label { - en: "I18n language" - zh: "多语言支持" - } - } - bootstrap_users_file { - desc { - en: "Initialize users file." - zh: "初始化用户文件" - } - label { - en: """Is used to add an administrative user to Dashboard when emqx is first launched, - the format is: - ``` - username1:password1 - username2:password2 - ``` -""" - zh: """用于在首次启动 emqx 时,为 Dashboard 添加管理用户,其格式为: - ``` - username1:password1 - username2:password2 - ``` -""" - } - } -} diff --git a/apps/emqx_dashboard/rebar.config b/apps/emqx_dashboard/rebar.config index 9657d0bbf..440fde465 100644 --- a/apps/emqx_dashboard/rebar.config +++ b/apps/emqx_dashboard/rebar.config @@ -1,6 +1,9 @@ %% -*- mode: erlang -*- -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {edoc_opts, [{preprocess, true}]}. {erl_opts, [ diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 2698d5534..bd022f226 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,10 +2,10 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.11"}, + {vsn, "5.0.20"}, {modules, []}, {registered, [emqx_dashboard_sup]}, - {applications, [kernel, stdlib, mnesia, minirest, emqx]}, + {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, {mod, {emqx_dashboard_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard.erl b/apps/emqx_dashboard/src/emqx_dashboard.erl index f15467658..08b7f0142 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard.erl @@ -16,22 +16,13 @@ -module(emqx_dashboard). --define(APP, ?MODULE). - -export([ start_listeners/0, start_listeners/1, stop_listeners/1, stop_listeners/0, - list_listeners/0 -]). - --export([ - init_i18n/2, - init_i18n/0, - get_i18n/0, - i18n_file/0, - clear_i18n/0 + list_listeners/0, + wait_for_listeners/0 ]). %% Authorization @@ -65,8 +56,12 @@ start_listeners(Listeners) -> components => #{ schemas => #{}, 'securitySchemes' => #{ - 'basicAuth' => #{type => http, scheme => basic}, - 'bearerAuth' => #{type => http, scheme => bearer} + 'basicAuth' => #{ + type => http, + scheme => basic, + description => + <<"Authorize with [API Keys](https://www.emqx.io/docs/en/v5.0/admin/api.html#api-keys)">> + } } } }, @@ -86,30 +81,34 @@ start_listeners(Listeners) -> dispatch => Dispatch, middlewares => [?EMQX_MIDDLE, cowboy_router, cowboy_handler] }, - Res = + {OkListeners, ErrListeners} = lists:foldl( - fun({Name, Protocol, Bind, RanchOptions}, Acc) -> - Minirest = BaseMinirest#{protocol => Protocol}, + fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, {OkAcc, ErrAcc}) -> + Minirest = BaseMinirest#{protocol => Protocol, protocol_options => ProtoOpts}, case minirest:start(Name, RanchOptions, Minirest) of {ok, _} -> ?ULOG("Listener ~ts on ~ts started.~n", [ Name, emqx_listeners:format_bind(Bind) ]), - Acc; + {[Name | OkAcc], ErrAcc}; {error, _Reason} -> %% Don't record the reason because minirest already does(too much logs noise). - [Name | Acc] + {OkAcc, [Name | ErrAcc]} end end, - [], + {[], []}, listeners(Listeners) ), - case Res of - [] -> ok; - _ -> {error, Res} + case ErrListeners of + [] -> + optvar:set(emqx_dashboard_listeners_ready, OkListeners), + ok; + _ -> + {error, ErrListeners} end. stop_listeners(Listeners) -> + optvar:unset(emqx_dashboard_listeners_ready), [ begin case minirest:stop(Name) of @@ -121,25 +120,12 @@ stop_listeners(Listeners) -> ?SLOG(warning, #{msg => "stop_listener_failed", name => Name, port => Port}) end end - || {Name, _, Port, _} <- listeners(Listeners) + || {Name, _, Port, _, _} <- listeners(Listeners) ], ok. -get_i18n() -> - application:get_env(emqx_dashboard, i18n). - -init_i18n(File, Lang) -> - Cache = hocon_schema:new_desc_cache(File), - application:set_env(emqx_dashboard, i18n, #{lang => atom_to_binary(Lang), cache => Cache}). - -clear_i18n() -> - case application:get_env(emqx_dashboard, i18n) of - {ok, #{cache := Cache}} -> - hocon_schema:delete_desc_cache(Cache), - application:unset_env(emqx_dashboard, i18n); - undefined -> - ok - end. +wait_for_listeners() -> + optvar:read(emqx_dashboard_listeners_ready). %%-------------------------------------------------------------------- %% internal @@ -160,7 +146,13 @@ listeners(Listeners) -> maps:get(enable, Conf) andalso begin {Conf1, Bind} = ip_port(Conf), - {true, {listener_name(Protocol), Protocol, Bind, ranch_opts(Conf1)}} + {true, { + listener_name(Protocol), + Protocol, + Bind, + ranch_opts(Conf1), + proto_opts(Conf1) + }} end end, maps:to_list(Listeners) @@ -175,11 +167,6 @@ ip_port(error, Opts) -> {Opts#{port => 18083}, 18083}; ip_port({Port, Opts}, _) when is_integer(Port) -> {Opts#{port => Port}, Port}; ip_port({{IP, Port}, Opts}, _) -> {Opts#{port => Port, ip => IP}, {IP, Port}}. -init_i18n() -> - File = i18n_file(), - Lang = emqx_conf:get([dashboard, i18n_lang], en), - init_i18n(File, Lang). - ranch_opts(Options) -> Keys = [ handshake_timeout, @@ -193,7 +180,7 @@ ranch_opts(Options) -> SocketOpts = maps:fold( fun filter_false/3, [], - maps:without([enable, inet6, ipv6_v6only | Keys], Options) + maps:without([enable, inet6, ipv6_v6only, proxy_header | Keys], Options) ), InetOpts = case Options of @@ -206,6 +193,9 @@ ranch_opts(Options) -> end, RanchOpts#{socket_opts => InetOpts ++ SocketOpts}. +proto_opts(Options) -> + maps:with([proxy_header], Options). + filter_false(_K, false, S) -> S; filter_false(K, V, S) -> [{K, V} | S]. @@ -215,28 +205,7 @@ listener_name(Protocol) -> authorize(Req) -> case cowboy_req:parse_header(<<"authorization">>, Req) of {basic, Username, Password} -> - case emqx_dashboard_admin:check(Username, Password) of - ok -> - ok; - {error, <<"username_not_found">>} -> - Path = cowboy_req:path(Req), - case emqx_mgmt_auth:authorize(Path, Username, Password) of - ok -> - ok; - {error, <<"not_allowed">>} -> - return_unauthorized( - ?WRONG_USERNAME_OR_PWD, - <<"Check username/password">> - ); - {error, _} -> - return_unauthorized( - ?WRONG_USERNAME_OR_PWD_OR_API_KEY_OR_API_SECRET, - <<"Check username/password or api_key/api_secret">> - ) - end; - {error, _} -> - return_unauthorized(?WRONG_USERNAME_OR_PWD, <<"Check username/password">>) - end; + api_key_authorize(Req, Username, Password); {bearer, Token} -> case emqx_dashboard_admin:verify_token(Token) of ok -> @@ -261,11 +230,22 @@ return_unauthorized(Code, Message) -> }, #{code => Code, message => Message}}. -i18n_file() -> - case application:get_env(emqx_dashboard, i18n_file) of - undefined -> filename:join([code:priv_dir(emqx_dashboard), "i18n.conf"]); - {ok, File} -> File - end. - listeners() -> - emqx_conf:get([dashboard, listeners], []). + emqx_conf:get([dashboard, listeners], #{}). + +api_key_authorize(Req, Key, Secret) -> + Path = cowboy_req:path(Req), + case emqx_mgmt_auth:authorize(Path, Key, Secret) of + ok -> + ok; + {error, <<"not_allowed">>} -> + return_unauthorized( + ?BAD_API_KEY_OR_SECRET, + <<"Not allowed, Check api_key/api_secret">> + ); + {error, _} -> + return_unauthorized( + ?BAD_API_KEY_OR_SECRET, + <<"Check api_key/api_secret">> + ) + end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_admin.erl b/apps/emqx_dashboard/src/emqx_dashboard_admin.erl index 77c77d5b9..aaa43d621 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_admin.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_admin.erl @@ -51,8 +51,7 @@ -export([ add_default_user/0, - default_username/0, - add_bootstrap_users/0 + default_username/0 ]). -type emqx_admin() :: #?ADMIN{}. @@ -85,21 +84,6 @@ mnesia(boot) -> add_default_user() -> add_default_user(binenv(default_username), binenv(default_password)). --spec add_bootstrap_users() -> ok | {error, _}. -add_bootstrap_users() -> - case emqx:get_config([dashboard, bootstrap_users_file], undefined) of - undefined -> - ok; - File -> - case mnesia:table_info(?ADMIN, size) of - 0 -> - ?SLOG(debug, #{msg => "Add dashboard bootstrap users", file => File}), - add_bootstrap_users(File); - _ -> - ok - end - end. - %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- @@ -108,21 +92,79 @@ add_bootstrap_users() -> add_user(Username, Password, Desc) when is_binary(Username), is_binary(Password) -> - case legal_username(Username) of - true -> - return( - mria:transaction(?DASHBOARD_SHARD, fun add_user_/3, [Username, Password, Desc]) - ); - false -> + case {legal_username(Username), legal_password(Password)} of + {ok, ok} -> do_add_user(Username, Password, Desc); + {{error, Reason}, _} -> {error, Reason}; + {_, {error, Reason}} -> {error, Reason} + end. + +do_add_user(Username, Password, Desc) -> + Res = mria:transaction(?DASHBOARD_SHARD, fun add_user_/3, [Username, Password, Desc]), + return(Res). + +%% 0-9 or A-Z or a-z or $_ +legal_username(<<>>) -> + {error, <<"Username cannot be empty">>}; +legal_username(UserName) -> + case re:run(UserName, "^[_a-zA-Z0-9]*$", [{capture, none}]) of + nomatch -> {error, << "Bad Username." " Only upper and lower case letters, numbers and underscores are supported" - >>} + >>}; + match -> + ok end. -%% 0 - 9 or A -Z or a - z or $_ -legal_username(<<>>) -> false; -legal_username(UserName) -> nomatch /= re:run(UserName, "^[_a-zA-Z0-9]*$"). +-define(LOW_LETTER_CHARS, "abcdefghijklmnopqrstuvwxyz"). +-define(UPPER_LETTER_CHARS, "ABCDEFGHIJKLMNOPQRSTUVWXYZ"). +-define(LETTER, ?LOW_LETTER_CHARS ++ ?UPPER_LETTER_CHARS). +-define(NUMBER, "0123456789"). +-define(SPECIAL_CHARS, "!@#$%^&*()_+-=[]{}\"|;':,./<>?`~ "). +-define(INVALID_PASSWORD_MSG, << + "Bad password. " + "At least two different kind of characters from groups of letters, numbers, and special characters. " + "For example, if password is composed from letters, it must contain at least one number or a special character." +>>). +-define(BAD_PASSWORD_LEN, <<"The range of password length is 8~64">>). + +legal_password(Password) when is_binary(Password) -> + legal_password(binary_to_list(Password)); +legal_password(Password) when is_list(Password) -> + legal_password(Password, erlang:length(Password)). + +legal_password(Password, Len) when Len >= 8 andalso Len =< 64 -> + case is_mixed_password(Password) of + true -> ascii_character_validate(Password); + false -> {error, ?INVALID_PASSWORD_MSG} + end; +legal_password(_Password, _Len) -> + {error, ?BAD_PASSWORD_LEN}. + +%% The password must contain at least two different kind of characters +%% from groups of letters, numbers, and special characters. +is_mixed_password(Password) -> is_mixed_password(Password, [?NUMBER, ?LETTER, ?SPECIAL_CHARS], 0). + +is_mixed_password(_Password, _Chars, 2) -> + true; +is_mixed_password(_Password, [], _Count) -> + false; +is_mixed_password(Password, [Chars | Rest], Count) -> + NewCount = + case contain(Password, Chars) of + true -> Count + 1; + false -> Count + end, + is_mixed_password(Password, Rest, NewCount). + +%% regex-non-ascii-character, such as Chinese, Japanese, Korean, etc. +ascii_character_validate(Password) -> + case re:run(Password, "[^\\x00-\\x7F]+", [unicode, {capture, none}]) of + match -> {error, <<"Only ascii characters are allowed in the password">>}; + nomatch -> ok + end. + +contain(Xs, Spec) -> lists:any(fun(X) -> lists:member(X, Spec) end, Xs). %% black-magic: force overwrite a user force_add_user(Username, Password, Desc) -> @@ -204,7 +246,10 @@ change_password(Username, OldPasswd, NewPasswd) when is_binary(Username) -> end. change_password(Username, Password) when is_binary(Username), is_binary(Password) -> - change_password_hash(Username, hash(Password)). + case legal_password(Password) of + ok -> change_password_hash(Username, hash(Password)); + Error -> Error + end. change_password_hash(Username, PasswordHash) -> ChangePWD = @@ -308,47 +353,45 @@ add_default_user(Username, Password) when ?EMPTY_KEY(Username) orelse ?EMPTY_KEY {ok, empty}; add_default_user(Username, Password) -> case lookup_user(Username) of - [] -> add_user(Username, Password, <<"administrator">>); + [] -> do_add_user(Username, Password, <<"administrator">>); _ -> {ok, default_user_exists} end. -add_bootstrap_users(File) -> - case file:open(File, [read]) of - {ok, Dev} -> - {ok, MP} = re:compile(<<"(\.+):(\.+$)">>, [ungreedy]), - try - load_bootstrap_user(Dev, MP) - catch - Type:Reason -> - {error, {Type, Reason}} - after - file:close(Dev) - end; - {error, Reason} = Error -> - ?SLOG(error, #{ - msg => "failed to open the dashboard bootstrap users file", - file => File, - reason => Reason - }), - Error - end. +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). -load_bootstrap_user(Dev, MP) -> - case file:read_line(Dev) of - {ok, Line} -> - case re:run(Line, MP, [global, {capture, all_but_first, binary}]) of - {match, [[Username, Password]]} -> - case add_user(Username, Password, ?BOOTSTRAP_USER_TAG) of - {ok, _} -> - load_bootstrap_user(Dev, MP); - Error -> - Error - end; - _ -> - load_bootstrap_user(Dev, MP) - end; - eof -> - ok; - Error -> - Error - end. +legal_password_test() -> + ?assertEqual({error, ?BAD_PASSWORD_LEN}, legal_password(<<"123">>)), + MaxPassword = iolist_to_binary([lists:duplicate(63, "x"), "1"]), + ?assertEqual(ok, legal_password(MaxPassword)), + TooLongPassword = lists:duplicate(65, "y"), + ?assertEqual({error, ?BAD_PASSWORD_LEN}, legal_password(TooLongPassword)), + + ?assertEqual({error, ?INVALID_PASSWORD_MSG}, legal_password(<<"12345678">>)), + ?assertEqual({error, ?INVALID_PASSWORD_MSG}, legal_password(?LETTER)), + ?assertEqual({error, ?INVALID_PASSWORD_MSG}, legal_password(?NUMBER)), + ?assertEqual({error, ?INVALID_PASSWORD_MSG}, legal_password(?SPECIAL_CHARS)), + ?assertEqual({error, ?INVALID_PASSWORD_MSG}, legal_password(<<"映映映映无天在请"/utf8>>)), + ?assertEqual( + {error, <<"Only ascii characters are allowed in the password">>}, + legal_password(<<"️test_for_non_ascii1中"/utf8>>) + ), + ?assertEqual( + {error, <<"Only ascii characters are allowed in the password">>}, + legal_password(<<"云☁️test_for_unicode"/utf8>>) + ), + + ?assertEqual(ok, legal_password(?LOW_LETTER_CHARS ++ ?NUMBER)), + ?assertEqual(ok, legal_password(?UPPER_LETTER_CHARS ++ ?NUMBER)), + ?assertEqual(ok, legal_password(?LOW_LETTER_CHARS ++ ?SPECIAL_CHARS)), + ?assertEqual(ok, legal_password(?UPPER_LETTER_CHARS ++ ?SPECIAL_CHARS)), + ?assertEqual(ok, legal_password(?SPECIAL_CHARS ++ ?NUMBER)), + + ?assertEqual(ok, legal_password(<<"abckldiekflkdf12">>)), + ?assertEqual(ok, legal_password(<<"abckldiekflkdf w">>)), + ?assertEqual(ok, legal_password(<<"# abckldiekflkdf w">>)), + ?assertEqual(ok, legal_password(<<"# 12344858">>)), + ?assertEqual(ok, legal_password(<<"# %12344858">>)), + ok. + +-endif. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_api.erl index 9facac59c..108cde379 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_api.erl @@ -18,7 +18,6 @@ -behaviour(minirest_api). --include("emqx_dashboard.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("typerefl/include/types.hrl"). @@ -47,7 +46,7 @@ -define(EMPTY(V), (V == undefined orelse V == <<>>)). --define(WRONG_USERNAME_OR_PWD, 'WRONG_USERNAME_OR_PWD'). +-define(BAD_USERNAME_OR_PWD, 'BAD_USERNAME_OR_PWD'). -define(WRONG_TOKEN_OR_USERNAME, 'WRONG_TOKEN_OR_USERNAME'). -define(USER_NOT_FOUND, 'USER_NOT_FOUND'). -define(ERROR_PWD_NOT_MATCH, 'ERROR_PWD_NOT_MATCH'). @@ -74,7 +73,7 @@ schema("/login") -> post => #{ tags => [<<"dashboard">>], desc => ?DESC(login_api), - summary => <<"Dashboard Auth">>, + summary => <<"Dashboard authentication">>, 'requestBody' => fields([username, password]), responses => #{ 200 => fields([token, version, license]), @@ -164,7 +163,7 @@ schema("/users/:username/change_pwd") -> }. response_schema(401) -> - emqx_dashboard_swagger:error_codes([?WRONG_USERNAME_OR_PWD], ?DESC(login_failed401)); + emqx_dashboard_swagger:error_codes([?BAD_USERNAME_OR_PWD], ?DESC(login_failed401)); response_schema(404) -> emqx_dashboard_swagger:error_codes([?USER_NOT_FOUND], ?DESC(users_api404)). @@ -223,7 +222,7 @@ login(post, #{body := Params}) -> }}; {error, R} -> ?SLOG(info, #{msg => "Dashboard login failed", username => Username, reason => R}), - {401, ?WRONG_USERNAME_OR_PWD, <<"Auth failed">>} + {401, ?BAD_USERNAME_OR_PWD, <<"Auth failed">>} end. logout(_, #{ @@ -325,7 +324,7 @@ is_self_auth_token(Username, Token) -> end. change_pwd(post, #{bindings := #{username := Username}, body := Params}) -> - LogMeta = #{msg => "Dashboard change password", username => Username}, + LogMeta = #{msg => "Dashboard change password", username => binary_to_list(Username)}, OldPwd = maps:get(<<"old_pwd">>, Params), NewPwd = maps:get(<<"new_pwd">>, Params), case ?EMPTY(OldPwd) orelse ?EMPTY(NewPwd) of diff --git a/apps/emqx_dashboard/src/emqx_dashboard_app.erl b/apps/emqx_dashboard/src/emqx_dashboard_app.erl index 6956f3fc8..2c3f9b8bc 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_app.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_app.erl @@ -31,13 +31,8 @@ start(_StartType, _StartArgs) -> case emqx_dashboard:start_listeners() of ok -> emqx_dashboard_cli:load(), - case emqx_dashboard_admin:add_bootstrap_users() of - ok -> - {ok, _} = emqx_dashboard_admin:add_default_user(), - {ok, Sup}; - Error -> - Error - end; + {ok, _} = emqx_dashboard_admin:add_default_user(), + {ok, Sup}; {error, Reason} -> {error, Reason} end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl new file mode 100644 index 000000000..b503fed88 --- /dev/null +++ b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl @@ -0,0 +1,110 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module is used to cache the description of the configuration items. +-module(emqx_dashboard_desc_cache). + +-export([init/0]). + +%% internal exports +-export([load_desc/2, lookup/4, lookup/5]). + +-include_lib("emqx/include/logger.hrl"). + +%% @doc Global ETS table to cache the description of the configuration items. +%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard. +%% The cache is initialized with the default language (English) and +%% all the desc..hocon files in the www/static directory (extracted from dashboard package). +init() -> + ok = ensure_app_loaded(emqx_dashboard), + PrivDir = code:priv_dir(emqx_dashboard), + EngDesc = filename:join([PrivDir, "desc.en.hocon"]), + WwwStaticDir = filename:join([PrivDir, "www", "static"]), + OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir), + OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0), + Files = [EngDesc | OtherLangDesc], + ok = emqx_utils_ets:new(?MODULE, [public, ordered_set, {read_concurrency, true}]), + ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files). + +%% @doc Load the description of the configuration items from the file. +%% Load is incremental, so it can be called multiple times. +%% NOTE: no garbage collection is done, because stale entries are harmless. +load_desc(EtsTab, File) -> + ?SLOG(info, #{msg => "loading desc", file => File}), + {ok, Descs} = hocon:load(File), + ["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."), + Insert = fun(Namespace, Id, Tag, Text) -> + Key = {bin(Lang), bin(Namespace), bin(Id), bin(Tag)}, + true = ets:insert(EtsTab, {Key, bin(Text)}), + ok + end, + walk_ns(Insert, maps:to_list(Descs)). + +%% @doc Lookup the description of the configuration item from the global cache. +lookup(Lang, Namespace, Id, Tag) -> + lookup(?MODULE, Lang, Namespace, Id, Tag). + +%% @doc Lookup the description of the configuration item from the given cache. +lookup(EtsTab, Lang0, Namespace, Id, Tag) -> + Lang = bin(Lang0), + try ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of + [{_, Desc}] -> + Desc; + [] when Lang =/= <<"en">> -> + %% fallback to English + lookup(EtsTab, <<"en">>, Namespace, Id, Tag); + _ -> + %% undefined but not <<>> + undefined + catch + error:badarg -> + %% schema is not initialized + %% most likely in test cases + undefined + end. + +%% The desc files are of names like: +%% desc.en.hocon or desc.zh.hocon +%% And with content like: +%% namespace.id.desc = "description" +%% namespace.id.label = "label" +walk_ns(_Insert, []) -> + ok; +walk_ns(Insert, [{Namespace, Ids} | Rest]) -> + walk_id(Insert, Namespace, maps:to_list(Ids)), + walk_ns(Insert, Rest). + +walk_id(_Insert, _Namespace, []) -> + ok; +walk_id(Insert, Namespace, [{Id, Tags} | Rest]) -> + walk_tag(Insert, Namespace, Id, maps:to_list(Tags)), + walk_id(Insert, Namespace, Rest). + +walk_tag(_Insert, _Namespace, _Id, []) -> + ok; +walk_tag(Insert, Namespace, Id, [{Tag, Text} | Rest]) -> + ok = Insert(Namespace, Id, Tag, Text), + walk_tag(Insert, Namespace, Id, Rest). + +bin(A) when is_atom(A) -> atom_to_binary(A, utf8); +bin(B) when is_binary(B) -> B; +bin(L) when is_list(L) -> list_to_binary(L). + +ensure_app_loaded(App) -> + case application:load(App) of + ok -> ok; + {error, {already_loaded, _}} -> ok + end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_listener.erl b/apps/emqx_dashboard/src/emqx_dashboard_listener.erl index 112b3ad58..6a306c288 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_listener.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_listener.erl @@ -15,9 +15,11 @@ %%-------------------------------------------------------------------- -module(emqx_dashboard_listener). --include_lib("emqx/include/logger.hrl"). -behaviour(emqx_config_handler). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + %% API -export([add_handler/0, remove_handler/0]). -export([pre_config_update/3, post_config_update/5]). @@ -54,12 +56,10 @@ init([]) -> {ok, undefined, {continue, regenerate_dispatch}}. handle_continue(regenerate_dispatch, _State) -> - NewState = regenerate_minirest_dispatch(), - {noreply, NewState, hibernate}. + %% initialize the swagger dispatches + ready = regenerate_minirest_dispatch(), + {noreply, ready, hibernate}. -handle_call(is_ready, _From, retry) -> - NewState = regenerate_minirest_dispatch(), - {reply, NewState, NewState, hibernate}; handle_call(is_ready, _From, State) -> {reply, State, State, hibernate}; handle_call(_Request, _From, State) -> @@ -68,6 +68,9 @@ handle_call(_Request, _From, State) -> handle_cast(_Request, State) -> {noreply, State, hibernate}. +handle_info(i18n_lang_changed, _State) -> + NewState = regenerate_minirest_dispatch(), + {noreply, NewState, hibernate}; handle_info({update_listeners, OldListeners, NewListeners}, _State) -> ok = emqx_dashboard:stop_listeners(OldListeners), ok = emqx_dashboard:start_listeners(NewListeners), @@ -83,29 +86,26 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -%% generate dispatch is very slow. +%% generate dispatch is very slow, takes about 1s. regenerate_minirest_dispatch() -> - try - emqx_dashboard:init_i18n(), - lists:foreach( - fun(Listener) -> - minirest:update_dispatch(element(1, Listener)) - end, - emqx_dashboard:list_listeners() - ), - ready - catch - T:E:S -> - ?SLOG(error, #{ - msg => "regenerate_minirest_dispatch_failed", - reason => E, - type => T, - stacktrace => S - }), - retry - after - emqx_dashboard:clear_i18n() - end. + %% optvar:read waits for the var to be set + Names = emqx_dashboard:wait_for_listeners(), + {Time, ok} = timer:tc(fun() -> do_regenerate_minirest_dispatch(Names) end), + Lang = emqx:get_config([dashboard, i18n_lang]), + ?tp(info, regenerate_minirest_dispatch, #{ + elapsed => erlang:convert_time_unit(Time, microsecond, millisecond), + listeners => Names, + i18n_lang => Lang + }), + ready. + +do_regenerate_minirest_dispatch(Names) -> + lists:foreach( + fun(Name) -> + ok = minirest:update_dispatch(Name) + end, + Names + ). add_handler() -> Roots = emqx_dashboard_schema:roots(), @@ -117,9 +117,15 @@ remove_handler() -> ok = emqx_config_handler:remove_handler(Roots), ok. +pre_config_update(_Path, {change_i18n_lang, NewLang}, RawConf) -> + %% e.g. emqx_conf:update([dashboard], {change_i18n_lang, zh}, #{}). + %% TODO: check if there is such a language (all languages are cached in emqx_dashboard_desc_cache) + Update = #{<<"i18n_lang">> => NewLang}, + NewConf = emqx_utils_maps:deep_merge(RawConf, Update), + {ok, NewConf}; pre_config_update(_Path, UpdateConf0, RawConf) -> UpdateConf = remove_sensitive_data(UpdateConf0), - NewConf = emqx_map_lib:deep_merge(RawConf, UpdateConf), + NewConf = emqx_utils_maps:deep_merge(RawConf, UpdateConf), ensure_ssl_cert(NewConf). -define(SENSITIVE_PASSWORD, <<"******">>). @@ -134,11 +140,13 @@ remove_sensitive_data(Conf0) -> end, case Conf1 of #{<<"listeners">> := #{<<"https">> := #{<<"password">> := ?SENSITIVE_PASSWORD}}} -> - emqx_map_lib:deep_remove([<<"listeners">>, <<"https">>, <<"password">>], Conf1); + emqx_utils_maps:deep_remove([<<"listeners">>, <<"https">>, <<"password">>], Conf1); _ -> Conf1 end. +post_config_update(_, {change_i18n_lang, _}, _NewConf, _OldConf, _AppEnvs) -> + delay_job(i18n_lang_changed); post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> OldHttp = get_listener(http, OldConf), OldHttps = get_listener(https, OldConf), @@ -148,11 +156,16 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> {StopHttps, StartHttps} = diff_listeners(https, OldHttps, NewHttps), Stop = maps:merge(StopHttp, StopHttps), Start = maps:merge(StartHttp, StartHttps), - _ = erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start}), + delay_job({update_listeners, Stop, Start}). + +%% in post_config_update, the config is not yet persisted to persistent_term +%% so we need to delegate the listener update to the gen_server a bit later +delay_job(Msg) -> + _ = erlang:send_after(500, ?MODULE, Msg), ok. get_listener(Type, Conf) -> - emqx_map_lib:deep_get([listeners, Type], Conf, undefined). + emqx_utils_maps:deep_get([listeners, Type], Conf, undefined). diff_listeners(_, Listener, Listener) -> {#{}, #{}}; diff_listeners(Type, undefined, Start) -> {#{}, #{Type => Start}}; @@ -162,13 +175,14 @@ diff_listeners(Type, Stop, Start) -> {#{Type => Stop}, #{Type => Start}}. -define(DIR, <<"dashboard">>). ensure_ssl_cert(#{<<"listeners">> := #{<<"https">> := #{<<"enable">> := true}}} = Conf) -> - Https = emqx_map_lib:deep_get([<<"listeners">>, <<"https">>], Conf, undefined), - Opts = #{required_keys => [<<"keyfile">>, <<"certfile">>, <<"cacertfile">>]}, + Https = emqx_utils_maps:deep_get([<<"listeners">>, <<"https">>], Conf, undefined), + Opts = #{required_keys => [[<<"keyfile">>], [<<"certfile">>], [<<"cacertfile">>]]}, case emqx_tls_lib:ensure_ssl_files(?DIR, Https, Opts) of {ok, undefined} -> {error, <<"ssl_cert_not_found">>}; {ok, NewHttps} -> - {ok, emqx_map_lib:deep_merge(Conf, #{<<"listeners">> => #{<<"https">> => NewHttps}})}; + {ok, + emqx_utils_maps:deep_merge(Conf, #{<<"listeners">> => #{<<"https">> => NewHttps}})}; {error, Reason} -> ?SLOG(error, Reason#{msg => "bad_ssl_config"}), {error, Reason} diff --git a/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl b/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl index 23ac4f35e..019feff51 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl @@ -126,7 +126,7 @@ current_rate() -> (_Node, Error) -> Error end, - case lists:foldl(Fun, #{}, mria_mnesia:cluster_nodes(running)) of + case lists:foldl(Fun, #{}, mria:cluster_nodes(running)) of {badrpc, Reason} -> {badrpc, Reason}; Rate -> @@ -205,7 +205,7 @@ do_call(Request) -> gen_server:call(?MODULE, Request, 5000). do_sample(all, Time) -> - do_sample(mria_mnesia:cluster_nodes(running), Time, #{}); + do_sample(mria:cluster_nodes(running), Time, #{}); do_sample(Node, Time) when Node == node() -> MS = match_spec(Time), internal_format(ets:select(?TAB, MS)); diff --git a/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl index f8b0918be..c0e162b62 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl @@ -55,7 +55,7 @@ schema("/monitor/nodes/:node") -> parameters => [parameter_node(), parameter_latest()], responses => #{ 200 => hoconsc:mk(hoconsc:array(hoconsc:ref(sampler)), #{}), - 400 => emqx_dashboard_swagger:error_codes(['BAD_RPC'], <<"Bad RPC">>) + 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Node not found">>) } } }; @@ -79,7 +79,7 @@ schema("/monitor_current/nodes/:node") -> parameters => [parameter_node()], responses => #{ 200 => hoconsc:mk(hoconsc:ref(sampler_current), #{}), - 400 => emqx_dashboard_swagger:error_codes(['BAD_RPC'], <<"Bad RPC">>) + 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Node not found">>) } } }. @@ -121,39 +121,29 @@ fields(sampler_current) -> monitor(get, #{query_string := QS, bindings := Bindings}) -> Latest = maps:get(<<"latest">>, QS, infinity), - RawNode = maps:get(node, Bindings, all), - case emqx_misc:safe_to_existing_atom(RawNode, utf8) of - {ok, Node} -> - case emqx_dashboard_monitor:samplers(Node, Latest) of - {badrpc, {Node, Reason}} -> - Message = list_to_binary( - io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason]) - ), - {400, 'BAD_RPC', Message}; - Samplers -> - {200, Samplers} - end; - _ -> - Message = list_to_binary(io_lib:format("Bad node ~p", [RawNode])), - {400, 'BAD_RPC', Message} + RawNode = maps:get(node, Bindings, <<"all">>), + emqx_utils_api:with_node_or_cluster(RawNode, dashboard_samplers_fun(Latest)). + +dashboard_samplers_fun(Latest) -> + fun(NodeOrCluster) -> + case emqx_dashboard_monitor:samplers(NodeOrCluster, Latest) of + {badrpc, _} = Error -> {error, Error}; + Samplers -> {ok, Samplers} + end end. +monitor_current(get, #{bindings := []}) -> + emqx_utils_api:with_node_or_cluster(erlang:node(), fun emqx_dashboard_monitor:current_rate/1); monitor_current(get, #{bindings := Bindings}) -> - RawNode = maps:get(node, Bindings, all), - case emqx_misc:safe_to_existing_atom(RawNode, utf8) of - {ok, NodeOrCluster} -> - case emqx_dashboard_monitor:current_rate(NodeOrCluster) of - {ok, CurrentRate} -> - {200, CurrentRate}; - {badrpc, {Node, Reason}} -> - Message = list_to_binary( - io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason]) - ), - {400, 'BAD_RPC', Message} - end; - {error, _} -> - Message = list_to_binary(io_lib:format("Bad node ~p", [RawNode])), - {400, 'BAD_RPC', Message} + RawNode = maps:get(node, Bindings, <<"all">>), + emqx_utils_api:with_node_or_cluster(RawNode, fun current_rate/1). + +current_rate(Node) -> + case emqx_dashboard_monitor:current_rate(Node) of + {badrpc, _} = BadRpc -> + {error, BadRpc}; + {ok, _} = OkResult -> + OkResult end. %% ------------------------------------------------------------------------------------------------- diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl index 4605d911d..28bfb709a 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl @@ -40,8 +40,9 @@ fields("dashboard") -> ?HOCON( emqx_schema:duration_s(), #{ - default => "10s", + default => <<"10s">>, desc => ?DESC(sample_interval), + importance => ?IMPORTANCE_HIDDEN, validator => fun validate_sample_interval/1 } )}, @@ -49,14 +50,23 @@ fields("dashboard") -> ?HOCON( emqx_schema:duration(), #{ - default => "60m", + default => <<"60m">>, desc => ?DESC(token_expired_time) } )}, {cors, fun cors/1}, {i18n_lang, fun i18n_lang/1}, {bootstrap_users_file, - ?HOCON(binary(), #{desc => ?DESC(bootstrap_users_file), required => false})} + ?HOCON( + binary(), + #{ + desc => ?DESC(bootstrap_users_file), + required => false, + importance => ?IMPORTANCE_HIDDEN, + default => <<>> + %% deprecated => {since, "5.1.0"} + } + )} ]; fields("listeners") -> [ @@ -87,13 +97,13 @@ fields("https") -> [ enable(false), bind(18084) - | common_listener_fields() ++ - exclude_fields( - ["fail_if_no_peer_cert"], - emqx_schema:server_ssl_opts_schema(#{}, true) - ) + | common_listener_fields() ++ server_ssl_opts() ]. +server_ssl_opts() -> + Opts0 = emqx_schema:server_ssl_opts_schema(#{}, true), + exclude_fields(["fail_if_no_peer_cert"], Opts0). + exclude_fields([], Fields) -> Fields; exclude_fields([FieldName | Rest], Fields) -> @@ -109,7 +119,7 @@ common_listener_fields() -> ?HOCON( integer(), #{ - default => 4, + default => erlang:system_info(schedulers_online), desc => ?DESC(num_acceptors) } )}, @@ -133,7 +143,7 @@ common_listener_fields() -> ?HOCON( emqx_schema:duration(), #{ - default => "5s", + default => <<"10s">>, desc => ?DESC(send_timeout) } )}, @@ -152,6 +162,14 @@ common_listener_fields() -> default => false, desc => ?DESC(ipv6_v6only) } + )}, + {"proxy_header", + ?HOCON( + boolean(), + #{ + desc => ?DESC(proxy_header), + default => false + } )} ]. @@ -190,18 +208,21 @@ desc(_) -> undefined. default_username(type) -> binary(); -default_username(default) -> "admin"; +default_username(default) -> <<"admin">>; default_username(required) -> true; default_username(desc) -> ?DESC(default_username); default_username('readOnly') -> true; +default_username(importance) -> ?IMPORTANCE_HIDDEN; default_username(_) -> undefined. default_password(type) -> binary(); -default_password(default) -> "public"; +default_password(default) -> <<"public">>; default_password(required) -> true; default_password('readOnly') -> true; default_password(sensitive) -> true; +default_password(converter) -> fun emqx_schema:password_converter/2; default_password(desc) -> ?DESC(default_password); +default_password(importance) -> ?IMPORTANCE_HIDDEN; default_password(_) -> undefined. cors(type) -> boolean(); @@ -210,10 +231,13 @@ cors(required) -> false; cors(desc) -> ?DESC(cors); cors(_) -> undefined. +%% TODO: change it to string type +%% It will be up to the dashboard package which languagues to support i18n_lang(type) -> ?ENUM([en, zh]); i18n_lang(default) -> en; i18n_lang('readOnly') -> true; i18n_lang(desc) -> ?DESC(i18n_lang); +i18n_lang(importance) -> ?IMPORTANCE_HIDDEN; i18n_lang(_) -> undefined. validate_sample_interval(Second) -> diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl new file mode 100644 index 000000000..e4f2f0c1a --- /dev/null +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl @@ -0,0 +1,76 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This module is for dashboard to retrieve the schema hot config and bridges. +-module(emqx_dashboard_schema_api). + +-behaviour(minirest_api). + +-include_lib("hocon/include/hoconsc.hrl"). + +%% minirest API +-export([api_spec/0, paths/0, schema/1]). + +-export([get_schema/2]). + +-define(TAGS, [<<"dashboard">>]). +-define(BAD_REQUEST, 'BAD_REQUEST'). + +%%-------------------------------------------------------------------- +%% minirest API and schema +%%-------------------------------------------------------------------- + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + ["/schemas/:name"]. + +%% This is a rather hidden API, so we don't need to add translations for the description. +schema("/schemas/:name") -> + #{ + 'operationId' => get_schema, + get => #{ + parameters => [ + {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})} + ], + desc => << + "Get the schema JSON of the specified name. " + "NOTE: only intended for EMQX Dashboard." + >>, + tags => ?TAGS, + security => [], + responses => #{ + 200 => hoconsc:mk(binary(), #{desc => <<"The JSON schema of the specified name.">>}) + } + } + }. + +%%-------------------------------------------------------------------- +%% API Handler funcs +%%-------------------------------------------------------------------- + +get_schema(get, #{ + bindings := #{name := Name} +}) -> + {200, gen_schema(Name)}; +get_schema(get, _) -> + {400, ?BAD_REQUEST, <<"unknown">>}. + +gen_schema(hotconf) -> + emqx_conf:hotconf_schema_json(); +gen_schema(bridges) -> + emqx_conf:bridge_schema_json(). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_sup.erl b/apps/emqx_dashboard/src/emqx_dashboard_sup.erl index 896b44859..04d8ed1d5 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_sup.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_sup.erl @@ -28,6 +28,8 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> + %% supervisor owns the cache table + ok = emqx_dashboard_desc_cache:init(), {ok, {{one_for_one, 5, 100}, [ ?CHILD(emqx_dashboard_listener, brutal_kill), diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index 4b7a672bd..0344c84c4 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -26,7 +26,11 @@ -export([error_codes/1, error_codes/2]). -export([file_schema/1]). --export([filter_check_request/2, filter_check_request_and_translate_body/2]). +-export([ + filter_check_request/2, + filter_check_request_and_translate_body/2, + gen_api_schema_json_iodata/3 +]). -ifdef(TEST). -export([ @@ -72,6 +76,8 @@ ]) ). +-define(SPECIAL_LANG_MSGID, <<"$msgid">>). + -define(MAX_ROW_LIMIT, 1000). -define(DEFAULT_ROW, 100). @@ -84,7 +90,8 @@ -type spec_opts() :: #{ check_schema => boolean() | filter(), translate_body => boolean(), - schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()) + schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()), + i18n_lang => atom() | string() | binary() }. -type route_path() :: string() | binary(). @@ -139,14 +146,20 @@ fields(limit) -> [{limit, hoconsc:mk(range(1, ?MAX_ROW_LIMIT), Meta)}]; fields(count) -> Desc = << - "Total number of records counted.
" - "Note: this field is 0 when the queryed table is empty, " - "or if the query can not be optimized and requires a full table scan." + "Total number of records matching the query.
" + "Note: this field is present only if the query can be optimized and does " + "not require a full table scan." + >>, + Meta = #{desc => Desc, required => false}, + [{count, hoconsc:mk(non_neg_integer(), Meta)}]; +fields(hasnext) -> + Desc = << + "Flag indicating whether there are more results available on next pages." >>, Meta = #{desc => Desc, required => true}, - [{count, hoconsc:mk(non_neg_integer(), Meta)}]; + [{hasnext, hoconsc:mk(boolean(), Meta)}]; fields(meta) -> - fields(page) ++ fields(limit) ++ fields(count). + fields(page) ++ fields(limit) ++ fields(count) ++ fields(hasnext). -spec schema_with_example(hocon_schema:type(), term()) -> hocon_schema:field_schema_map(). schema_with_example(Type, Example) -> @@ -185,6 +198,50 @@ file_schema(FileName) -> } }. +gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Converter) -> + {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( + SchemaMod, + #{ + schema_converter => Converter, + i18n_lang => ?SPECIAL_LANG_MSGID + } + ), + ApiSpec = lists:foldl( + fun({Path, Spec, _, _}, Acc) -> + NewSpec = maps:fold( + fun(Method, #{responses := Responses}, SubAcc) -> + case Responses of + #{ + <<"200">> := + #{ + <<"content">> := #{ + <<"application/json">> := #{<<"schema">> := Schema} + } + } + } -> + SubAcc#{Method => Schema}; + _ -> + SubAcc + end + end, + #{}, + Spec + ), + Acc#{list_to_atom(Path) => NewSpec} + end, + #{}, + ApiSpec0 + ), + Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), + emqx_utils_json:encode( + #{ + info => SchemaInfo, + paths => ApiSpec, + components => #{schemas => Components} + }, + [pretty, force_utf8] + ). + %%------------------------------------------------------------------------------ %% Private functions %%------------------------------------------------------------------------------ @@ -229,10 +286,17 @@ parse_spec_ref(Module, Path, Options) -> Schema = try erlang:apply(Module, schema, [Path]) - %% better error message catch - error:Reason -> - throw({error, #{mfa => {Module, schema, [Path]}, reason => Reason}}) + Error:Reason:Stacktrace -> + %% This error is intended to fail the build + %% hence print to standard_error + io:format( + standard_error, + "Failed to generate swagger for path ~p in module ~p~n" + "error:~p~nreason:~p~n~p~n", + [Module, Path, Error, Reason, Stacktrace] + ), + error({failed_to_generate_swagger_spec, Module, Path}) end, {Specs, Refs} = maps:fold( fun(Method, Meta, {Acc, RefsAcc}) -> @@ -327,11 +391,11 @@ check_request_body(#{body := Body}, Spec, _Module, _CheckFun, false) when is_map %% tags, description, summary, security, deprecated meta_to_spec(Meta, Module, Options) -> - {Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module), + {Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module, Options), {RequestBody, Refs2} = request_body(maps:get('requestBody', Meta, []), Module, Options), {Responses, Refs3} = responses(maps:get(responses, Meta, #{}), Module, Options), { - generate_method_desc(to_spec(Meta, Params, RequestBody, Responses)), + generate_method_desc(to_spec(Meta, Params, RequestBody, Responses), Options), lists:usort(Refs1 ++ Refs2 ++ Refs3) }. @@ -342,13 +406,13 @@ to_spec(Meta, Params, RequestBody, Responses) -> Spec = to_spec(Meta, Params, [], Responses), maps:put('requestBody', RequestBody, Spec). -generate_method_desc(Spec = #{desc := _Desc}) -> - Spec1 = trans_description(maps:remove(desc, Spec), Spec), +generate_method_desc(Spec = #{desc := _Desc}, Options) -> + Spec1 = trans_description(maps:remove(desc, Spec), Spec, Options), trans_tags(Spec1); -generate_method_desc(Spec = #{description := _Desc}) -> - Spec1 = trans_description(Spec, Spec), +generate_method_desc(Spec = #{description := _Desc}, Options) -> + Spec1 = trans_description(Spec, Spec, Options), trans_tags(Spec1); -generate_method_desc(Spec) -> +generate_method_desc(Spec, _Options) -> trans_tags(Spec). trans_tags(Spec = #{tags := Tags}) -> @@ -356,7 +420,7 @@ trans_tags(Spec = #{tags := Tags}) -> trans_tags(Spec) -> Spec. -parameters(Params, Module) -> +parameters(Params, Module, Options) -> {SpecList, AllRefs} = lists:foldl( fun(Param, {Acc, RefsAcc}) -> @@ -382,7 +446,7 @@ parameters(Params, Module) -> Type ), Spec1 = trans_required(Spec0, Required, In), - Spec2 = trans_description(Spec1, Type), + Spec2 = trans_description(Spec1, Type, Options), {[Spec2 | Acc], Refs ++ RefsAcc} end end, @@ -411,68 +475,114 @@ init_prop(Keys, Init, Type) -> fun(Key, Acc) -> case hocon_schema:field_schema(Type, Key) of undefined -> Acc; - Schema -> Acc#{Key => to_bin(Schema)} + Schema -> Acc#{Key => format_prop(Key, Schema)} end end, Init, Keys ). +format_prop(deprecated, Value) when is_boolean(Value) -> Value; +format_prop(deprecated, _) -> true; +format_prop(_, Schema) -> to_bin(Schema). + trans_required(Spec, true, _) -> Spec#{required => true}; trans_required(Spec, _, path) -> Spec#{required => true}; trans_required(Spec, _, _) -> Spec. -trans_desc(Init, Hocon, Func, Name) -> - Spec0 = trans_description(Init, Hocon), +trans_desc(Init, Hocon, Func, Name, Options) -> + Spec0 = trans_description(Init, Hocon, Options), case Func =:= fun hocon_schema_to_spec/2 of true -> Spec0; false -> - Spec1 = trans_label(Spec0, Hocon, Name), + Spec1 = trans_label(Spec0, Hocon, Name, Options), case Spec1 of #{description := _} -> Spec1; _ -> Spec1#{description => <>} end end. -trans_description(Spec, Hocon) -> +trans_description(Spec, Hocon, Options) -> Desc = case desc_struct(Hocon) of undefined -> undefined; - ?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined); - Struct -> to_bin(Struct) + ?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined, Options); + Text -> to_bin(Text) end, case Desc of undefined -> Spec; Desc -> Desc1 = binary:replace(Desc, [<<"\n">>], <<"
">>, [global]), - Spec#{description => Desc1} + maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon, Options) end. -get_i18n(Key, Struct, Default) -> - {ok, #{cache := Cache, lang := Lang}} = emqx_dashboard:get_i18n(), - Desc = hocon_schema:resolve_schema(Struct, Cache), - emqx_map_lib:deep_get([Key, Lang], Desc, Default). - -trans_label(Spec, Hocon, Default) -> +maybe_add_summary_from_label(Spec, Hocon, Options) -> Label = case desc_struct(Hocon) of - ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default); + ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined, Options); + _ -> undefined + end, + case Label of + undefined -> Spec; + _ -> Spec#{summary => Label} + end. + +get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> + Lang = get_lang(Options), + case Lang of + ?SPECIAL_LANG_MSGID -> + make_msgid(Namespace, Id, Tag); + _ -> + get_i18n_text(Lang, Namespace, Id, Tag, Default) + end. + +get_i18n_text(Lang, Namespace, Id, Tag, Default) -> + case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of + undefined -> + Default; + Text -> + Text + end. + +%% Format:$msgid:Namespace.Id.Tag +%% e.g. $msgid:emqx_schema.key.desc +%% $msgid:emqx_schema.key.label +%% if needed, the consumer of this schema JSON can use this msgid to +%% resolve the text in the i18n database. +make_msgid(Namespace, Id, Tag) -> + iolist_to_binary(["$msgid:", to_bin(Namespace), ".", to_bin(Id), ".", Tag]). + +%% So far i18n_lang in options is only used at build time. +%% At runtime, it's still the global config which controls the language. +get_lang(#{i18n_lang := Lang}) -> Lang; +get_lang(_) -> emqx:get_config([dashboard, i18n_lang]). + +trans_label(Spec, Hocon, Default, Options) -> + Label = + case desc_struct(Hocon) of + ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default, Options); _ -> Default end, Spec#{label => Label}. desc_struct(Hocon) -> - case hocon_schema:field_schema(Hocon, desc) of - undefined -> - case hocon_schema:field_schema(Hocon, description) of - undefined -> get_ref_desc(Hocon); - Struct1 -> Struct1 - end; - Struct -> - Struct - end. + R = + case hocon_schema:field_schema(Hocon, desc) of + undefined -> + case hocon_schema:field_schema(Hocon, description) of + undefined -> get_ref_desc(Hocon); + Struct1 -> Struct1 + end; + Struct -> + Struct + end, + ensure_bin(R). + +ensure_bin(undefined) -> undefined; +ensure_bin(?DESC(_Namespace, _Id) = Desc) -> Desc; +ensure_bin(Text) -> to_bin(Text). get_ref_desc(?R_REF(Mod, Name)) -> case erlang:function_exported(Mod, desc, 1) of @@ -503,7 +613,7 @@ responses(Responses, Module, Options) -> {Spec, Refs}. response(Status, ?DESC(_Mod, _Id) = Schema, {Acc, RefsAcc, Module, Options}) -> - Desc = trans_description(#{}, #{desc => Schema}), + Desc = trans_description(#{}, #{desc => Schema}, Options), {Acc#{integer_to_binary(Status) => Desc}, RefsAcc, Module, Options}; response(Status, Bin, {Acc, RefsAcc, Module, Options}) when is_binary(Bin) -> {Acc#{integer_to_binary(Status) => #{description => Bin}}, RefsAcc, Module, Options}; @@ -532,7 +642,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) -> Hocon = hocon_schema:field_schema(Schema, type), Examples = hocon_schema:field_schema(Schema, examples), {Spec, Refs} = hocon_schema_to_spec(Hocon, Module), - Init = trans_description(#{}, Schema), + Init = trans_description(#{}, Schema, Options), Content = content(Spec, Examples), { Acc#{integer_to_binary(Status) => Init#{<<"content">> => Content}}, @@ -542,7 +652,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) -> }; false -> {Props, Refs} = parse_object(Schema, Module, Options), - Init = trans_description(#{}, Schema), + Init = trans_description(#{}, Schema, Options), Content = Init#{<<"content">> => content(Props)}, {Acc#{integer_to_binary(Status) => Content}, Refs ++ RefsAcc, Module, Options} end. @@ -569,7 +679,7 @@ components(Options, [{Module, Field} | Refs], SpecAcc, SubRefsAcc) -> %% parameters in ref only have one value, not array components(Options, [{Module, Field, parameter} | Refs], SpecAcc, SubRefsAcc) -> Props = hocon_schema_fields(Module, Field), - {[Param], SubRefs} = parameters(Props, Module), + {[Param], SubRefs} = parameters(Props, Module, Options), Namespace = namespace(Module), NewSpecAcc = SpecAcc#{?TO_REF(Namespace, Field) => Param}, components(Options, Refs, NewSpecAcc, SubRefs ++ SubRefsAcc). @@ -603,8 +713,6 @@ hocon_schema_to_spec(Type, LocalModule) when ?IS_TYPEREFL(Type) -> hocon_schema_to_spec(?ARRAY(Item), LocalModule) -> {Schema, Refs} = hocon_schema_to_spec(Item, LocalModule), {#{type => array, items => Schema}, Refs}; -hocon_schema_to_spec(?LAZY(Item), LocalModule) -> - hocon_schema_to_spec(Item, LocalModule); hocon_schema_to_spec(?ENUM(Items), _LocalModule) -> {#{type => string, enum => Items}, []}; hocon_schema_to_spec(?MAP(Name, Type), LocalModule) -> @@ -623,7 +731,7 @@ hocon_schema_to_spec(?UNION(Types), LocalModule) -> {[Schema | Acc], SubRefs ++ RefsAcc} end, {[], []}, - Types + hoconsc:union_members(Types) ), {#{<<"oneOf">> => OneOf}, Refs}; hocon_schema_to_spec(Atom, _LocalModule) when is_atom(Atom) -> @@ -705,9 +813,11 @@ typename_to_spec("service_account_json()", _Mod) -> typename_to_spec("#{" ++ _, Mod) -> typename_to_spec("map()", Mod); typename_to_spec("qos()", _Mod) -> - #{type => string, enum => [0, 1, 2]}; + #{type => integer, minimum => 0, maximum => 2, example => 0}; typename_to_spec("{binary(), binary()}", _Mod) -> #{type => object, example => #{}}; +typename_to_spec("{string(), string()}", _Mod) -> + #{type => object, example => #{}}; typename_to_spec("comma_separated_list()", _Mod) -> #{type => string, example => <<"item1,item2">>}; typename_to_spec("comma_separated_binary()", _Mod) -> @@ -723,7 +833,7 @@ typename_to_spec("log_level()", _Mod) -> }; typename_to_spec("rate()", _Mod) -> #{type => string, example => <<"10MB">>}; -typename_to_spec("capacity()", _Mod) -> +typename_to_spec("burst()", _Mod) -> #{type => string, example => <<"100MB">>}; typename_to_spec("burst_rate()", _Mod) -> %% 0/0s = no burst @@ -734,6 +844,10 @@ typename_to_spec("initial()", _Mod) -> #{type => string, example => <<"0MB">>}; typename_to_spec("bucket_name()", _Mod) -> #{type => string, example => <<"retainer">>}; +typename_to_spec("json_binary()", _Mod) -> + #{type => string, example => <<"{\"a\": [1,true]}">>}; +typename_to_spec("port_number()", _Mod) -> + range("1..65535"); typename_to_spec(Name, Mod) -> Spec = range(Name), Spec1 = remote_module_type(Spec, Name, Mod), @@ -809,36 +923,8 @@ to_bin(X) -> X. parse_object(PropList = [_ | _], Module, Options) when is_list(PropList) -> - {Props, Required, Refs} = - lists:foldl( - fun({Name, Hocon}, {Acc, RequiredAcc, RefsAcc}) -> - NameBin = to_bin(Name), - case hoconsc:is_schema(Hocon) of - true -> - HoconType = hocon_schema:field_schema(Hocon, type), - Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon), - SchemaToSpec = schema_converter(Options), - Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin), - {Prop, Refs1} = SchemaToSpec(HoconType, Module), - NewRequiredAcc = - case is_required(Hocon) of - true -> [NameBin | RequiredAcc]; - false -> RequiredAcc - end, - { - [{NameBin, maps:merge(Prop, Init)} | Acc], - NewRequiredAcc, - Refs1 ++ RefsAcc - }; - false -> - {SubObject, SubRefs} = parse_object(Hocon, Module, Options), - {[{NameBin, SubObject} | Acc], RequiredAcc, SubRefs ++ RefsAcc} - end - end, - {[], [], []}, - PropList - ), - Object = #{<<"type">> => object, <<"properties">> => lists:reverse(Props)}, + {Props, Required, Refs} = parse_object_loop(PropList, Module, Options), + Object = #{<<"type">> => object, <<"properties">> => Props}, case Required of [] -> {Object, Refs}; _ -> {maps:put(required, Required, Object), Refs} @@ -853,6 +939,54 @@ parse_object(Other, Module, Options) -> }} ). +parse_object_loop(PropList0, Module, Options) -> + PropList = lists:filter( + fun({_, Hocon}) -> + case hoconsc:is_schema(Hocon) andalso is_hidden(Hocon) of + true -> false; + false -> true + end + end, + PropList0 + ), + parse_object_loop(PropList, Module, Options, _Props = [], _Required = [], _Refs = []). + +parse_object_loop([], _Modlue, _Options, Props, Required, Refs) -> + {lists:reverse(Props), lists:usort(Required), Refs}; +parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs) -> + NameBin = to_bin(Name), + case hoconsc:is_schema(Hocon) of + true -> + HoconType = hocon_schema:field_schema(Hocon, type), + Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon), + SchemaToSpec = schema_converter(Options), + Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options), + {Prop, Refs1} = SchemaToSpec(HoconType, Module), + NewRequiredAcc = + case is_required(Hocon) of + true -> [NameBin | Required]; + false -> Required + end, + parse_object_loop( + Rest, + Module, + Options, + [{NameBin, maps:merge(Prop, Init)} | Props], + NewRequiredAcc, + Refs1 ++ Refs + ); + false -> + %% TODO: there is only a handful of such + %% refactor the schema to unify the two cases + {SubObject, SubRefs} = parse_object(Hocon, Module, Options), + parse_object_loop( + Rest, Module, Options, [{NameBin, SubObject} | Props], Required, SubRefs ++ Refs + ) + end. + +%% return true if the field has 'importance' set to 'hidden' +is_hidden(Hocon) -> + hocon_schema:is_hidden(Hocon, #{include_importance_up_from => ?IMPORTANCE_LOW}). is_required(Hocon) -> hocon_schema:field_schema(Hocon, required) =:= true. @@ -872,4 +1006,4 @@ schema_converter(Options) -> maps:get(schema_converter, Options, fun hocon_schema_to_spec/2). hocon_error_msg(Reason) -> - emqx_misc:readable_error_msg(Reason). + emqx_utils:readable_error_msg(Reason). diff --git a/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl index 934d6055d..1f14b02c0 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl @@ -56,42 +56,26 @@ all() -> emqx_common_test_helpers:all(?MODULE). -end_suite() -> - end_suite([]). - -end_suite(Apps) -> - application:unload(emqx_management), - emqx_common_test_helpers:stop_apps(Apps ++ [emqx_dashboard]). - init_per_suite(Config) -> - emqx_common_test_helpers:start_apps( - [emqx_management, emqx_dashboard], - fun set_special_configs/1 - ), + emqx_mgmt_api_test_util:init_suite([emqx_management]), Config. end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_management]), - mria:stop(). - -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. + emqx_mgmt_api_test_util:end_suite([emqx_management]). t_overview(_) -> mnesia:clear_table(?ADMIN), - emqx_dashboard_admin:add_user(<<"admin">>, <<"public">>, <<"simple_description">>), + emqx_dashboard_admin:add_user(<<"admin">>, <<"public_www1">>, <<"simple_description">>), + Headers = auth_header_(<<"admin">>, <<"public_www1">>), [ - {ok, _} = request_dashboard(get, api_path([Overview]), auth_header_()) + {ok, _} = request_dashboard(get, api_path([Overview]), Headers) || Overview <- ?OVERVIEWS ]. t_admins_add_delete(_) -> mnesia:clear_table(?ADMIN), Desc = <<"simple description">>, - {ok, _} = emqx_dashboard_admin:add_user(<<"username">>, <<"password">>, Desc), + {ok, _} = emqx_dashboard_admin:add_user(<<"username">>, <<"password_0">>, Desc), {ok, _} = emqx_dashboard_admin:add_user(<<"username1">>, <<"password1">>, Desc), Admins = emqx_dashboard_admin:all_users(), ?assertEqual(2, length(Admins)), @@ -100,8 +84,8 @@ t_admins_add_delete(_) -> ?assertEqual(1, length(Users)), {ok, _} = emqx_dashboard_admin:change_password( <<"username">>, - <<"password">>, - <<"pwd">> + <<"password_0">>, + <<"new_pwd_1234">> ), timer:sleep(10), {ok, _} = emqx_dashboard_admin:remove_user(<<"username">>). @@ -109,20 +93,21 @@ t_admins_add_delete(_) -> t_admin_delete_self_failed(_) -> mnesia:clear_table(?ADMIN), Desc = <<"simple description">>, - _ = emqx_dashboard_admin:add_user(<<"username1">>, <<"password">>, Desc), + _ = emqx_dashboard_admin:add_user(<<"username1">>, <<"password_1">>, Desc), Admins = emqx_dashboard_admin:all_users(), ?assertEqual(1, length(Admins)), - Header = auth_header_(<<"username1">>, <<"password">>), + Header = auth_header_(<<"username1">>, <<"password_1">>), {error, {_, 400, _}} = request_dashboard(delete, api_path(["users", "username1"]), Header), - Token = erlang:iolist_to_binary(["Basic ", base64:encode("username1:password")]), + Token = ["Basic ", base64:encode("username1:password_1")], Header2 = {"Authorization", Token}, - {error, {_, 400, _}} = request_dashboard(delete, api_path(["users", "username1"]), Header2), + {error, {_, 401, _}} = request_dashboard(delete, api_path(["users", "username1"]), Header2), mnesia:clear_table(?ADMIN). t_rest_api(_Config) -> mnesia:clear_table(?ADMIN), Desc = <<"administrator">>, - emqx_dashboard_admin:add_user(<<"admin">>, <<"public">>, Desc), + Password = <<"public_www1">>, + emqx_dashboard_admin:add_user(<<"admin">>, Password, Desc), {ok, 200, Res0} = http_get(["users"]), ?assertEqual( [ @@ -136,7 +121,7 @@ t_rest_api(_Config) -> {ok, 200, _} = http_put(["users", "admin"], #{<<"description">> => <<"a_new_description">>}), {ok, 200, _} = http_post(["users"], #{ <<"username">> => <<"usera">>, - <<"password">> => <<"passwd">>, + <<"password">> => <<"passwd_01234">>, <<"description">> => Desc }), {ok, 204, _} = http_delete(["users", "usera"]), @@ -144,34 +129,46 @@ t_rest_api(_Config) -> {ok, 204, _} = http_post( ["users", "admin", "change_pwd"], #{ - <<"old_pwd">> => <<"public">>, - <<"new_pwd">> => <<"newpwd">> + <<"old_pwd">> => Password, + <<"new_pwd">> => <<"newpwd_lkdfki1">> } ), mnesia:clear_table(?ADMIN), - emqx_dashboard_admin:add_user(<<"admin">>, <<"public">>, <<"administrator">>), + emqx_dashboard_admin:add_user(<<"admin">>, Password, <<"administrator">>), + ok. + +t_swagger_json(_Config) -> + Url = ?HOST ++ "/api-docs/swagger.json", + %% with auth + Auth = auth_header_(<<"admin">>, <<"public_www1">>), + {ok, 200, Body1} = request_api(get, Url, Auth), + ?assert(emqx_utils_json:is_json(Body1)), + %% without auth + {ok, {{"HTTP/1.1", 200, "OK"}, _Headers, Body2}} = + httpc:request(get, {Url, []}, [], [{body_format, binary}]), + ?assertEqual(Body1, Body2), ok. t_cli(_Config) -> [mria:dirty_delete(?ADMIN, Admin) || Admin <- mnesia:dirty_all_keys(?ADMIN)], - emqx_dashboard_cli:admins(["add", "username", "password"]), + emqx_dashboard_cli:admins(["add", "username", "password_ww2"]), [#?ADMIN{username = <<"username">>, pwdhash = <>}] = emqx_dashboard_admin:lookup_user(<<"username">>), - ?assertEqual(Hash, crypto:hash(sha256, <>/binary>>)), - emqx_dashboard_cli:admins(["passwd", "username", "newpassword"]), + ?assertEqual(Hash, crypto:hash(sha256, <>/binary>>)), + emqx_dashboard_cli:admins(["passwd", "username", "new_password"]), [#?ADMIN{username = <<"username">>, pwdhash = <>}] = emqx_dashboard_admin:lookup_user(<<"username">>), - ?assertEqual(Hash1, crypto:hash(sha256, <>/binary>>)), + ?assertEqual(Hash1, crypto:hash(sha256, <>/binary>>)), emqx_dashboard_cli:admins(["del", "username"]), [] = emqx_dashboard_admin:lookup_user(<<"username">>), - emqx_dashboard_cli:admins(["add", "admin1", "pass1"]), - emqx_dashboard_cli:admins(["add", "admin2", "passw2"]), + emqx_dashboard_cli:admins(["add", "admin1", "pass_lkdfkd1"]), + emqx_dashboard_cli:admins(["add", "admin2", "w_pass_lkdfkd2"]), AdminList = emqx_dashboard_admin:all_users(), ?assertEqual(2, length(AdminList)). t_lookup_by_username_jwt(_Config) -> User = bin(["user-", integer_to_list(random_num())]), - Pwd = bin(integer_to_list(random_num())), + Pwd = bin("t_password" ++ integer_to_list(random_num())), emqx_dashboard_token:sign(User, Pwd), ?assertMatch( [#?ADMIN_JWT{username = User}], @@ -185,7 +182,7 @@ t_lookup_by_username_jwt(_Config) -> t_clean_expired_jwt(_Config) -> User = bin(["user-", integer_to_list(random_num())]), - Pwd = bin(integer_to_list(random_num())), + Pwd = bin("t_password" ++ integer_to_list(random_num())), emqx_dashboard_token:sign(User, Pwd), [#?ADMIN_JWT{username = User, exptime = ExpTime}] = emqx_dashboard_token:lookup_by_username(User), @@ -208,16 +205,16 @@ random_num() -> erlang:system_time(nanosecond). http_get(Parts) -> - request_api(get, api_path(Parts), auth_header_()). + request_api(get, api_path(Parts), auth_header_(<<"admin">>, <<"public_www1">>)). http_delete(Parts) -> - request_api(delete, api_path(Parts), auth_header_()). + request_api(delete, api_path(Parts), auth_header_(<<"admin">>, <<"public_www1">>)). http_post(Parts, Body) -> - request_api(post, api_path(Parts), [], auth_header_(), Body). + request_api(post, api_path(Parts), [], auth_header_(<<"admin">>, <<"public_www1">>), Body). http_put(Parts, Body) -> - request_api(put, api_path(Parts), [], auth_header_(), Body). + request_api(put, api_path(Parts), [], auth_header_(<<"admin">>, <<"public_www1">>), Body). request_dashboard(Method, Url, Auth) -> Request = {Url, [Auth]}, @@ -249,5 +246,5 @@ api_path(Parts) -> ?HOST ++ filename:join([?BASE_PATH | Parts]). json(Data) -> - {ok, Jsx} = emqx_json:safe_decode(Data, [return_maps]), + {ok, Jsx} = emqx_utils_json:safe_decode(Data, [return_maps]), Jsx. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_admin_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_admin_SUITE.erl index fefc492cc..c12849ac7 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_admin_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_admin_SUITE.erl @@ -19,39 +19,25 @@ -compile(export_all). -include("emqx_dashboard.hrl"). --include_lib("emqx/include/http_api.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - mria:start(), - application:load(emqx_dashboard), - emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. - -end_per_suite(Config) -> - end_suite(), - Config. +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). end_per_testcase(_, _Config) -> All = emqx_dashboard_admin:all_users(), [emqx_dashboard_admin:remove_user(Name) || #{username := Name} <- All]. -end_suite() -> - application:unload(emqx_management), - emqx_common_test_helpers:stop_apps([emqx_dashboard]). - t_check_user(_) -> Username = <<"admin1">>, - Password = <<"public">>, + Password = <<"public_1">>, BadUsername = <<"admin_bad">>, BadPassword = <<"public_bad">>, EmptyUsername = <<>>, @@ -108,7 +94,7 @@ t_lookup_user(_) -> t_all_users(_) -> Username = <<"admin_all">>, - Password = <<"public">>, + Password = <<"public_2">>, {ok, _} = emqx_dashboard_admin:add_user(Username, Password, <<"desc">>), All = emqx_dashboard_admin:all_users(), ?assert(erlang:length(All) >= 1), @@ -153,6 +139,7 @@ t_change_password(_) -> Description = <<"change_description">>, NewPassword = <<"new_password">>, + NewBadPassword = <<"public">>, BadChangeUser = <<"change_user_bad">>, @@ -163,14 +150,17 @@ t_change_password(_) -> {error, <<"password_error">>} = emqx_dashboard_admin:change_password(User, OldPassword, NewPassword), + {error, <<"The range of password length is 8~64">>} = + emqx_dashboard_admin:change_password(User, NewPassword, NewBadPassword), + {error, <<"username_not_found">>} = emqx_dashboard_admin:change_password(BadChangeUser, OldPassword, NewPassword), ok. t_clean_token(_) -> Username = <<"admin_token">>, - Password = <<"public">>, - NewPassword = <<"public1">>, + Password = <<"public_www1">>, + NewPassword = <<"public_www2">>, {ok, _} = emqx_dashboard_admin:add_user(Username, Password, <<"desc">>), {ok, Token} = emqx_dashboard_admin:sign_token(Username, Password), ok = emqx_dashboard_admin:verify_token(Token), diff --git a/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl b/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl index 87a3654ac..25b4065de 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl @@ -19,6 +19,8 @@ -export([ set_default_config/0, set_default_config/1, + set_default_config/2, + set_default_config/3, request/2, request/3, request/4, @@ -36,17 +38,24 @@ set_default_config() -> set_default_config(<<"admin">>). set_default_config(DefaultUsername) -> + set_default_config(DefaultUsername, false). + +set_default_config(DefaultUsername, HAProxyEnabled) -> + set_default_config(DefaultUsername, HAProxyEnabled, #{}). + +set_default_config(DefaultUsername, HAProxyEnabled, Opts) -> Config = #{ listeners => #{ http => #{ enable => true, - bind => 18083, + bind => maps:get(bind, Opts, 18083), inet6 => false, ipv6_v6only => false, max_connections => 512, num_acceptors => 4, send_timeout => 5000, - backlog => 512 + backlog => 512, + proxy_header => HAProxyEnabled } }, default_username => DefaultUsername, @@ -77,7 +86,7 @@ request(Username, Method, Url, Body) -> -> {Url, [auth_header(Username)]}; _ -> - {Url, [auth_header(Username)], "application/json", jsx:encode(Body)} + {Url, [auth_header(Username)], "application/json", emqx_utils_json:encode(Body)} end, ct:pal("Method: ~p, Request: ~p", [Method, Request]), case httpc:request(Method, Request, [], [{body_format, binary}]) of diff --git a/apps/emqx_dashboard/test/emqx_dashboard_bad_api_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_bad_api_SUITE.erl index b7fbf889e..92327a7db 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_bad_api_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_bad_api_SUITE.erl @@ -25,43 +25,19 @@ -define(SERVER, "http://127.0.0.1:18083/api/v5"). +-import(emqx_mgmt_api_test_util, [request/2]). + all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - mria:start(), - application:load(emqx_dashboard), - emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. - -end_per_suite(Config) -> - end_suite(), - Config. - -end_suite() -> - application:unload(emqx_management), - emqx_common_test_helpers:stop_apps([emqx_dashboard]). +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). t_bad_api_path(_) -> Url = ?SERVER ++ "/for/test/some/path/not/exist", - {error, {"HTTP/1.1", 404, "Not Found"}} = request(Url), + {ok, 404, _} = request(get, Url), ok. - -request(Url) -> - Request = {Url, []}, - case httpc:request(get, Request, [], []) of - {error, Reason} -> - {error, Reason}; - {ok, {{"HTTP/1.1", Code, _}, _, Return}} when - Code >= 200 andalso Code =< 299 - -> - {ok, emqx_json:decode(Return, [return_maps])}; - {ok, {Reason, _, _}} -> - {error, Reason} - end. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl index 5def3c9dd..588a69065 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl @@ -29,24 +29,11 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - mria:start(), - application:load(emqx_dashboard), - emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. - -end_per_suite(Config) -> - end_suite(), - Config. - -end_suite() -> - application:unload(emqx_management), - emqx_common_test_helpers:stop_apps([emqx_dashboard]). +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). t_all_code(_) -> HrlDef = ?ERROR_CODES, @@ -70,7 +57,7 @@ t_look_up_code(_) -> t_description_code(_) -> {error, not_found} = emqx_dashboard_error_code:description('_____NOT_EXIST_NAME'), - {ok, <<"Request parameters are not legal">>} = + {ok, <<"Request parameters are invalid">>} = emqx_dashboard_error_code:description('BAD_REQUEST'), ok. @@ -92,7 +79,7 @@ t_api_code(_) -> Url = ?SERVER ++ "/error_codes/BAD_REQUEST", {ok, #{ <<"code">> := <<"BAD_REQUEST">>, - <<"description">> := <<"Request parameters are not legal">> + <<"description">> := <<"Request parameters are invalid">> }} = request(Url), ok. @@ -113,7 +100,7 @@ request(Url) -> {ok, {{"HTTP/1.1", Code, _}, _, Return}} when Code >= 200 andalso Code =< 299 -> - {ok, emqx_json:decode(Return, [return_maps])}; + {ok, emqx_utils_json:decode(Return, [return_maps])}; {ok, {Reason, _, _}} -> {error, Reason} end. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_haproxy_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_haproxy_SUITE.erl new file mode 100644 index 000000000..cb6a5a9fd --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_haproxy_SUITE.erl @@ -0,0 +1,93 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_dashboard_haproxy_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include("emqx_dashboard.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_management], fun set_special_configs/1), + Config. + +set_special_configs(emqx_dashboard) -> + emqx_dashboard_api_test_helpers:set_default_config(<<"admin">>, true), + ok; +set_special_configs(_) -> + ok. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_management]). + +t_status(_Config) -> + ProxyInfo = #{ + version => 1, + command => proxy, + transport_family => ipv4, + transport_protocol => stream, + src_address => {127, 0, 0, 1}, + src_port => 444, + dest_address => {192, 168, 0, 1}, + dest_port => 443 + }, + {ok, Socket} = gen_tcp:connect( + "localhost", + 18083, + [binary, {active, false}, {packet, raw}] + ), + ok = gen_tcp:send(Socket, ranch_proxy_header:header(ProxyInfo)), + {ok, Token} = emqx_dashboard_admin:sign_token(<<"admin">>, <<"public">>), + ok = gen_tcp:send( + Socket, + "GET /status HTTP/1.1\r\n" + "Host: localhost\r\n" + "Authorization: Bearer " ++ binary_to_list(Token) ++ + "\r\n" + "\r\n" + ), + {_, 200, _, Rest0} = cow_http:parse_status_line(raw_recv_head(Socket)), + {Headers, Body0} = cow_http:parse_headers(Rest0), + {_, LenBin} = lists:keyfind(<<"content-length">>, 1, Headers), + Len = binary_to_integer(LenBin), + Body = + if + byte_size(Body0) =:= Len -> + Body0; + true -> + {ok, Body1} = gen_tcp:recv(Socket, Len - byte_size(Body0), 5000), + <> + end, + ?assertMatch({match, _}, re:run(Body, "Node .+ is started\nemqx is running")), + ok. + +raw_recv_head(Socket) -> + {ok, Data} = gen_tcp:recv(Socket, 0, 10000), + raw_recv_head(Socket, Data). + +raw_recv_head(Socket, Buffer) -> + case binary:match(Buffer, <<"\r\n\r\n">>) of + nomatch -> + {ok, Data} = gen_tcp:recv(Socket, 0, 10000), + raw_recv_head(Socket, <>); + {_, _} -> + Buffer + end. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl new file mode 100644 index 000000000..1bc463b1f --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl @@ -0,0 +1,52 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_dashboard_listener_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:load_config(emqx_dashboard_schema, <<"dashboard {}">>), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + ok = change_i18n_lang(en), + Config. + +end_per_suite(_Config) -> + ok = change_i18n_lang(en), + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_change_i18n_lang(_Config) -> + ?check_trace( + begin + ok = change_i18n_lang(zh), + {ok, _} = ?block_until(#{?snk_kind := regenerate_minirest_dispatch}, 10_000), + ok + end, + fun(ok, Trace) -> + ?assertMatch([#{i18n_lang := zh}], ?of_kind(regenerate_minirest_dispatch, Trace)) + end + ), + ok. + +change_i18n_lang(Lang) -> + {ok, _} = emqx_conf:update([dashboard], {change_i18n_lang, Lang}, #{}), + ok. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_monitor_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_monitor_SUITE.erl index 6f4a0e0fd..a24fc2337 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_monitor_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_monitor_SUITE.erl @@ -19,9 +19,9 @@ -compile(nowarn_export_all). -compile(export_all). +-import(emqx_dashboard_SUITE, [auth_header_/0]). + -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). --include_lib("emqx/include/emqx.hrl"). -include("emqx_dashboard.hrl"). -define(SERVER, "http://127.0.0.1:18083"). @@ -31,19 +31,11 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - mria:start(), - emqx_common_test_helpers:start_apps([emqx_dashboard], fun set_special_configs/1), + emqx_mgmt_api_test_util:init_suite([]), Config. -end_per_suite(Config) -> - emqx_common_test_helpers:stop_apps([emqx_dashboard]), - Config. - -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([]). t_monitor_samplers_all(_Config) -> timer:sleep(?DEFAULT_SAMPLE_INTERVAL * 2 * 1000 + 20), @@ -111,9 +103,9 @@ t_monitor_reset(_) -> ok. t_monitor_api_error(_) -> - {error, {400, #{<<"code">> := <<"BAD_RPC">>}}} = + {error, {404, #{<<"code">> := <<"NOT_FOUND">>}}} = request(["monitor", "nodes", 'emqx@127.0.0.2']), - {error, {400, #{<<"code">> := <<"BAD_RPC">>}}} = + {error, {404, #{<<"code">> := <<"NOT_FOUND">>}}} = request(["monitor_current", "nodes", 'emqx@127.0.0.2']), {error, {400, #{<<"code">> := <<"BAD_REQUEST">>}}} = request(["monitor"], "latest=0"), @@ -145,18 +137,14 @@ do_request_api(Method, Request) -> Code >= 200 andalso Code =< 299 -> ct:pal("Resp ~p ~p~n", [Code, Return]), - {ok, emqx_json:decode(Return, [return_maps])}; + {ok, emqx_utils_json:decode(Return, [return_maps])}; {ok, {{"HTTP/1.1", Code, _}, _, Return}} -> ct:pal("Resp ~p ~p~n", [Code, Return]), - {error, {Code, emqx_json:decode(Return, [return_maps])}}; + {error, {Code, emqx_utils_json:decode(Return, [return_maps])}}; {error, Reason} -> {error, Reason} end. -auth_header_() -> - Basic = binary_to_list(base64:encode(<<"admin:public">>)), - {"Authorization", "Basic " ++ Basic}. - restart_monitor() -> OldMonitor = erlang:whereis(emqx_dashboard_monitor), erlang:exit(OldMonitor, kill), diff --git a/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl new file mode 100644 index 000000000..e4425aed8 --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl @@ -0,0 +1,52 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_dashboard_schema_api_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx/include/http_api.hrl"). + +-include_lib("eunit/include/eunit.hrl"). + +-define(SERVER, "http://127.0.0.1:18083/api/v5"). + +-import(emqx_mgmt_api_test_util, [request/2]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_hotconf(_) -> + Url = ?SERVER ++ "/schemas/hotconf", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. + +t_bridges(_) -> + Url = ?SERVER ++ "/schemas/bridges", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. diff --git a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl index 8a5fe68e7..81b3f4402 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl @@ -63,25 +63,11 @@ groups() -> ]. init_per_suite(Config) -> - mria:start(), - application:load(emqx_dashboard), - emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), - emqx_dashboard:init_i18n(), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. - -end_per_suite(Config) -> - end_suite(), - Config. - -end_suite() -> - application:unload(emqx_management), - emqx_common_test_helpers:stop_apps([emqx_dashboard]). +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). t_in_path(_Config) -> Expect = @@ -112,7 +98,7 @@ t_in_query(_Config) -> description => <<"QOS">>, in => query, name => qos, - schema => #{enum => [0, 1, 2], type => string} + schema => #{minimum => 0, maximum => 2, type => integer, example => 0} } ], validate("/test/in/query", Expect), diff --git a/apps/emqx_dashboard/test/emqx_swagger_remote_schema.erl b/apps/emqx_dashboard/test/emqx_swagger_remote_schema.erl index a797d3b43..c2266ad5b 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_remote_schema.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_remote_schema.erl @@ -32,8 +32,8 @@ fields("root") -> )}, {default_username, fun default_username/1}, {default_password, fun default_password/1}, - {sample_interval, mk(emqx_schema:duration_s(), #{default => "10s"})}, - {token_expired_time, mk(emqx_schema:duration(), #{default => "30m"})} + {sample_interval, mk(emqx_schema:duration_s(), #{default => <<"10s">>})}, + {token_expired_time, mk(emqx_schema:duration(), #{default => <<"30m">>})} ]; fields("ref1") -> [ @@ -52,7 +52,7 @@ fields("ref3") -> ]. default_username(type) -> string(); -default_username(default) -> "admin"; +default_username(default) -> <<"admin">>; default_username(required) -> true; default_username(_) -> undefined. diff --git a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl index d17725e80..af4b901b2 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl @@ -33,7 +33,6 @@ init_per_suite(Config) -> mria:start(), application:load(emqx_dashboard), emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), - emqx_dashboard:init_i18n(), Config. set_special_configs(emqx_dashboard) -> @@ -61,7 +60,7 @@ t_object(_Config) -> #{ <<"schema">> => #{ - required => [<<"timeout">>, <<"per_page">>], + required => [<<"per_page">>, <<"timeout">>], <<"properties">> => [ {<<"per_page">>, #{ description => <<"good per page desc">>, @@ -94,6 +93,30 @@ t_object(_Config) -> validate("/object", Spec, Refs), ok. +t_deprecated(_Config) -> + ?assertMatch( + [ + #{ + <<"emqx_swagger_requestBody_SUITE.deprecated_ref">> := + #{ + <<"properties">> := + [ + {<<"tag1">>, #{ + deprecated := true + }}, + {<<"tag2">>, #{ + deprecated := true + }}, + {<<"tag3">>, #{ + deprecated := false + }} + ] + } + } + ], + emqx_dashboard_swagger:components([{?MODULE, deprecated_ref}], #{}) + ). + t_nest_object(_Config) -> GoodRef = <<"#/components/schemas/emqx_swagger_requestBody_SUITE.good_ref">>, Spec = #{ @@ -284,8 +307,8 @@ t_nest_ref(_Config) -> t_none_ref(_Config) -> Path = "/ref/none", - ?assertThrow( - {error, #{mfa := {?MODULE, schema, [Path]}}}, + ?assertError( + {failed_to_generate_swagger_spec, ?MODULE, Path}, emqx_dashboard_swagger:parse_spec_ref(?MODULE, Path, #{}) ), ok. @@ -790,7 +813,7 @@ to_schema(Body) -> fields(good_ref) -> [ - {'webhook-host', mk(emqx_schema:ip_port(), #{default => "127.0.0.1:80"})}, + {'webhook-host', mk(emqx_schema:ip_port(), #{default => <<"127.0.0.1:80">>})}, {log_dir, mk(emqx_schema:file(), #{example => "var/log/emqx"})}, {tag, mk(binary(), #{desc => <<"tag">>})} ]; @@ -812,7 +835,13 @@ fields(sub_fields) -> {init_file, fun init_file/1} ], desc => <<"test sub fields">> - }. + }; +fields(deprecated_ref) -> + [ + {tag1, mk(binary(), #{desc => <<"tag1">>, deprecated => {since, "4.3.0"}})}, + {tag2, mk(binary(), #{desc => <<"tag2">>, deprecated => true})}, + {tag3, mk(binary(), #{desc => <<"tag3">>, deprecated => false})} + ]. enable(type) -> boolean(); enable(desc) -> <<"Whether to enable tls psk support">>; diff --git a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl index 346f4ef71..a3d2b4e75 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl @@ -32,25 +32,16 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - mria:start(), - application:load(emqx_dashboard), - emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), - emqx_dashboard:init_i18n(), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(), - ok; -set_special_configs(_) -> - ok. - end_per_suite(Config) -> end_suite(), Config. end_suite() -> application:unload(emqx_management), - emqx_common_test_helpers:stop_apps([emqx_dashboard]). + emqx_mgmt_api_test_util:end_suite([emqx_conf]). t_simple_binary(_config) -> Path = "/simple/bin", @@ -67,7 +58,7 @@ t_object(_config) -> <<"application/json">> => #{ <<"schema">> => #{ - required => [<<"timeout">>, <<"per_page">>], + required => [<<"per_page">>, <<"timeout">>], <<"properties">> => [ {<<"per_page">>, #{ description => <<"good per page desc">>, @@ -286,11 +277,8 @@ t_bad_ref(_Config) -> t_none_ref(_Config) -> Path = "/ref/none", - ?assertThrow( - {error, #{ - mfa := {?MODULE, schema, ["/ref/none"]}, - reason := function_clause - }}, + ?assertError( + {failed_to_generate_swagger_spec, ?MODULE, Path}, validate(Path, #{}, []) ), ok. @@ -689,7 +677,7 @@ to_schema(Object) -> fields(good_ref) -> [ - {'webhook-host', mk(emqx_schema:ip_port(), #{default => "127.0.0.1:80"})}, + {'webhook-host', mk(emqx_schema:ip_port(), #{default => <<"127.0.0.1:80">>})}, {log_dir, mk(emqx_schema:file(), #{example => "var/log/emqx"})}, {tag, mk(binary(), #{desc => <<"tag">>})} ]; diff --git a/apps/emqx_eviction_agent/BSL.txt b/apps/emqx_eviction_agent/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_eviction_agent/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_eviction_agent/README.md b/apps/emqx_eviction_agent/README.md new file mode 100644 index 000000000..943bd7d12 --- /dev/null +++ b/apps/emqx_eviction_agent/README.md @@ -0,0 +1,35 @@ +# EMQX Eviction Agent + +`emqx_eviction_agent` is a part of the node evacuation/node rebalance feature in EMQX. +It is a low-level application that encapsulates working with actual MQTT connections. + +## Application Responsibilities + +`emqx_eviction_agent` application: + +* Blocks incoming connection to the node it is running on. +* Serves as a facade for connection/session eviction operations. +* Reports blocking status via HTTP API. + +The `emqx_eviction_agent` is relatively passive and has no eviction/rebalancing logic. It allows +`emqx_node_rebalance` to perform eviction/rebalancing operations using high-level API, without having to deal with +MQTT connections directly. + +## EMQX Integration + +`emqx_eviction_agent` interacts with the following EMQX components: +* `emqx_cm` - to get the list of active MQTT connections; +* `emqx_hooks` subsystem - to block/unblock incoming connections; +* `emqx_channel` and the corresponding connection modules to perform the eviction. + +## User Facing API + +The application provided a very simple API (CLI and HTTP) to inspect the current blocking status. + +# Documentation + +The rebalancing concept is described in the corresponding [EIP](https://github.com/emqx/eip/blob/main/active/0020-node-rebalance.md). + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). diff --git a/apps/emqx_eviction_agent/etc/emqx_eviction_agent.conf b/apps/emqx_eviction_agent/etc/emqx_eviction_agent.conf new file mode 100644 index 000000000..011b7fb0f --- /dev/null +++ b/apps/emqx_eviction_agent/etc/emqx_eviction_agent.conf @@ -0,0 +1,3 @@ +##-------------------------------------------------------------------- +## EMQX Eviction Agent Plugin +##-------------------------------------------------------------------- diff --git a/apps/emqx_eviction_agent/rebar.config b/apps/emqx_eviction_agent/rebar.config new file mode 100644 index 000000000..b055d8f4f --- /dev/null +++ b/apps/emqx_eviction_agent/rebar.config @@ -0,0 +1,2 @@ +{deps, [{emqx, {path, "../../apps/emqx"}}]}. +{project_plugins, [erlfmt]}. diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src new file mode 100644 index 000000000..239d9052e --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src @@ -0,0 +1,21 @@ +{application, emqx_eviction_agent, [ + {description, "EMQX Eviction Agent"}, + {vsn, "5.0.0"}, + {registered, [ + emqx_eviction_agent_sup, + emqx_eviction_agent, + emqx_eviction_agent_conn_sup + ]}, + {applications, [ + kernel, + stdlib, + emqx_ctl + ]}, + {mod, {emqx_eviction_agent_app, []}}, + {env, []}, + {modules, []}, + {links, [ + {"Homepage", "https://www.emqx.com/"}, + {"Github", "https://github.com/emqx"} + ]} +]}. diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.appup.src b/apps/emqx_eviction_agent/src/emqx_eviction_agent.appup.src new file mode 100644 index 000000000..c1b84778d --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.appup.src @@ -0,0 +1,3 @@ +%% -*- mode: erlang -*- +%% Unless you know what you are doing, DO NOT edit manually!! +{VSN, [{<<".*">>, []}], [{<<".*">>, []}]}. diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl new file mode 100644 index 000000000..9a29adc69 --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl @@ -0,0 +1,348 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent). + +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/types.hrl"). +-include_lib("emqx/include/emqx_hooks.hrl"). + +-include_lib("stdlib/include/qlc.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-export([ + start_link/0, + enable/2, + disable/1, + status/0, + connection_count/0, + session_count/0, + session_count/1, + evict_connections/1, + evict_sessions/2, + evict_sessions/3, + evict_session_channel/3 +]). + +-behaviour(gen_server). + +-export([ + init/1, + handle_call/3, + handle_info/2, + handle_cast/2, + code_change/3 +]). + +-export([ + on_connect/2, + on_connack/3 +]). + +-export([ + hook/0, + unhook/0 +]). + +-export_type([server_reference/0]). + +-define(CONN_MODULES, [ + emqx_connection, emqx_ws_connection, emqx_quic_connection, emqx_eviction_agent_channel +]). + +%%-------------------------------------------------------------------- +%% APIs +%%-------------------------------------------------------------------- + +-type server_reference() :: binary() | undefined. +-type status() :: {enabled, conn_stats()} | disabled. +-type conn_stats() :: #{ + connections := non_neg_integer(), + sessions := non_neg_integer() +}. +-type kind() :: atom(). + +-spec start_link() -> startlink_ret(). +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +-spec enable(kind(), server_reference()) -> ok_or_error(eviction_agent_busy). +enable(Kind, ServerReference) -> + gen_server:call(?MODULE, {enable, Kind, ServerReference}). + +-spec disable(kind()) -> ok. +disable(Kind) -> + gen_server:call(?MODULE, {disable, Kind}). + +-spec status() -> status(). +status() -> + case enable_status() of + {enabled, _Kind, _ServerReference} -> + {enabled, stats()}; + disabled -> + disabled + end. + +-spec evict_connections(pos_integer()) -> ok_or_error(disabled). +evict_connections(N) -> + case enable_status() of + {enabled, _Kind, ServerReference} -> + ok = do_evict_connections(N, ServerReference); + disabled -> + {error, disabled} + end. + +-spec evict_sessions(pos_integer(), node() | [node()]) -> ok_or_error(disabled). +evict_sessions(N, Node) when is_atom(Node) -> + evict_sessions(N, [Node]); +evict_sessions(N, Nodes) when is_list(Nodes) andalso length(Nodes) > 0 -> + evict_sessions(N, Nodes, any). + +-spec evict_sessions(pos_integer(), node() | [node()], atom()) -> ok_or_error(disabled). +evict_sessions(N, Node, ConnState) when is_atom(Node) -> + evict_sessions(N, [Node], ConnState); +evict_sessions(N, Nodes, ConnState) when + is_list(Nodes) andalso length(Nodes) > 0 +-> + case enable_status() of + {enabled, _Kind, _ServerReference} -> + ok = do_evict_sessions(N, Nodes, ConnState); + disabled -> + {error, disabled} + end. + +%%-------------------------------------------------------------------- +%% gen_server callbacks +%%-------------------------------------------------------------------- + +init([]) -> + _ = persistent_term:erase(?MODULE), + {ok, #{}}. + +%% enable +handle_call({enable, Kind, ServerReference}, _From, St) -> + Reply = + case enable_status() of + disabled -> + ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference}); + {enabled, Kind, _ServerReference} -> + ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference}); + {enabled, _OtherKind, _ServerReference} -> + {error, eviction_agent_busy} + end, + {reply, Reply, St}; +%% disable +handle_call({disable, Kind}, _From, St) -> + Reply = + case enable_status() of + disabled -> + {error, disabled}; + {enabled, Kind, _ServerReference} -> + _ = persistent_term:erase(?MODULE), + ok; + {enabled, _OtherKind, _ServerReference} -> + {error, eviction_agent_busy} + end, + {reply, Reply, St}; +handle_call(Msg, _From, St) -> + ?SLOG(warning, #{msg => "unknown_call", call => Msg, state => St}), + {reply, {error, unknown_call}, St}. + +handle_info(Msg, St) -> + ?SLOG(warning, #{msg => "unknown_msg", info => Msg, state => St}), + {noreply, St}. + +handle_cast(Msg, St) -> + ?SLOG(warning, #{msg => "unknown_cast", cast => Msg, state => St}), + {noreply, St}. + +code_change(_Vsn, State, _Extra) -> + {ok, State}. + +%%-------------------------------------------------------------------- +%% Hook callbacks +%%-------------------------------------------------------------------- + +on_connect(_ConnInfo, _Props) -> + case enable_status() of + {enabled, _Kind, _ServerReference} -> + {stop, {error, ?RC_USE_ANOTHER_SERVER}}; + disabled -> + ignore + end. + +on_connack( + #{proto_name := <<"MQTT">>, proto_ver := ?MQTT_PROTO_V5}, + use_another_server, + Props +) -> + case enable_status() of + {enabled, _Kind, ServerReference} -> + {ok, Props#{'Server-Reference' => ServerReference}}; + disabled -> + {ok, Props} + end; +on_connack(_ClientInfo, _Reason, Props) -> + {ok, Props}. + +%%-------------------------------------------------------------------- +%% Hook funcs +%%-------------------------------------------------------------------- + +hook() -> + ?tp(debug, eviction_agent_hook, #{}), + ok = emqx_hooks:put('client.connack', {?MODULE, on_connack, []}, ?HP_NODE_REBALANCE), + ok = emqx_hooks:put('client.connect', {?MODULE, on_connect, []}, ?HP_NODE_REBALANCE). + +unhook() -> + ?tp(debug, eviction_agent_unhook, #{}), + ok = emqx_hooks:del('client.connect', {?MODULE, on_connect}), + ok = emqx_hooks:del('client.connack', {?MODULE, on_connack}). + +enable_status() -> + persistent_term:get(?MODULE, disabled). + +% connection management +stats() -> + #{ + connections => connection_count(), + sessions => session_count() + }. + +connection_table() -> + emqx_cm:live_connection_table(?CONN_MODULES). + +connection_count() -> + table_count(connection_table()). + +channel_with_session_table(any) -> + qlc:q([ + {ClientId, ConnInfo, ClientInfo} + || {ClientId, _, ConnInfo, ClientInfo} <- + emqx_cm:channel_with_session_table(?CONN_MODULES) + ]); +channel_with_session_table(RequiredConnState) -> + qlc:q([ + {ClientId, ConnInfo, ClientInfo} + || {ClientId, ConnState, ConnInfo, ClientInfo} <- + emqx_cm:channel_with_session_table(?CONN_MODULES), + RequiredConnState =:= ConnState + ]). + +session_count() -> + session_count(any). + +session_count(ConnState) -> + table_count(channel_with_session_table(ConnState)). + +table_count(QH) -> + qlc:fold(fun(_, Acc) -> Acc + 1 end, 0, QH). + +take_connections(N) -> + ChanQH = qlc:q([ChanPid || {_ClientId, ChanPid} <- connection_table()]), + ChanPidCursor = qlc:cursor(ChanQH), + ChanPids = qlc:next_answers(ChanPidCursor, N), + ok = qlc:delete_cursor(ChanPidCursor), + ChanPids. + +take_channel_with_sessions(N, ConnState) -> + ChanPidCursor = qlc:cursor(channel_with_session_table(ConnState)), + Channels = qlc:next_answers(ChanPidCursor, N), + ok = qlc:delete_cursor(ChanPidCursor), + Channels. + +do_evict_connections(N, ServerReference) when N > 0 -> + ChanPids = take_connections(N), + ok = lists:foreach( + fun(ChanPid) -> + disconnect_channel(ChanPid, ServerReference) + end, + ChanPids + ). + +do_evict_sessions(N, Nodes, ConnState) when N > 0 -> + Channels = take_channel_with_sessions(N, ConnState), + ok = lists:foreach( + fun({ClientId, ConnInfo, ClientInfo}) -> + evict_session_channel(Nodes, ClientId, ConnInfo, ClientInfo) + end, + Channels + ). + +evict_session_channel(Nodes, ClientId, ConnInfo, ClientInfo) -> + Node = select_random(Nodes), + ?SLOG( + info, + #{ + msg => "evict_session_channel", + client_id => ClientId, + node => Node, + conn_info => ConnInfo, + client_info => ClientInfo + } + ), + case emqx_eviction_agent_proto_v1:evict_session_channel(Node, ClientId, ConnInfo, ClientInfo) of + {badrpc, Reason} -> + ?SLOG( + error, + #{ + msg => "evict_session_channel_rpc_error", + client_id => ClientId, + node => Node, + reason => Reason + } + ), + {error, Reason}; + {error, Reason} = Error -> + ?SLOG( + error, + #{ + msg => "evict_session_channel_error", + client_id => ClientId, + node => Node, + reason => Reason + } + ), + Error; + Res -> + Res + end. + +-spec evict_session_channel( + emqx_types:clientid(), + emqx_types:conninfo(), + emqx_types:clientinfo() +) -> supervisor:startchild_ret(). +evict_session_channel(ClientId, ConnInfo, ClientInfo) -> + ?SLOG(info, #{ + msg => "evict_session_channel", + client_id => ClientId, + conn_info => ConnInfo, + client_info => ClientInfo + }), + Result = emqx_eviction_agent_channel:start_supervised( + #{ + conninfo => ConnInfo, + clientinfo => ClientInfo + } + ), + ?SLOG( + info, + #{ + msg => "evict_session_channel_result", + client_id => ClientId, + result => Result + } + ), + Result. + +disconnect_channel(ChanPid, ServerReference) -> + ChanPid ! + {disconnect, ?RC_USE_ANOTHER_SERVER, use_another_server, #{ + 'Server-Reference' => ServerReference + }}. + +select_random(List) when length(List) > 0 -> + lists:nth(rand:uniform(length(List)), List). diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_api.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_api.erl new file mode 100644 index 000000000..d8c1d7645 --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_api.erl @@ -0,0 +1,85 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_api). + +-behaviour(minirest_api). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx/include/logger.hrl"). + +%% Swagger specs from hocon schema +-export([ + api_spec/0, + paths/0, + schema/1, + namespace/0 +]). + +-export([ + fields/1, + roots/0 +]). + +%% API callbacks +-export([ + '/node_eviction/status'/2 +]). + +-import(hoconsc, [mk/2, ref/1, ref/2]). + +namespace() -> "node_eviction". + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + [ + "/node_eviction/status" + ]. + +schema("/node_eviction/status") -> + #{ + 'operationId' => '/node_eviction/status', + get => #{ + tags => [<<"node_eviction">>], + summary => <<"Get node eviction status">>, + description => ?DESC("node_eviction_status_get"), + responses => #{ + 200 => schema_status() + } + } + }. + +'/node_eviction/status'(_Bindings, _Params) -> + case emqx_eviction_agent:status() of + disabled -> + {200, #{status => disabled}}; + {enabled, Stats} -> + {200, #{ + status => enabled, + stats => Stats + }} + end. + +schema_status() -> + mk(hoconsc:union([ref(status_enabled), ref(status_disabled)]), #{}). + +roots() -> []. + +fields(status_enabled) -> + [ + {status, mk(enabled, #{default => enabled})}, + {stats, ref(stats)} + ]; +fields(stats) -> + [ + {connections, mk(integer(), #{})}, + {sessions, mk(integer(), #{})} + ]; +fields(status_disabled) -> + [ + {status, mk(disabled, #{default => disabled})} + ]. diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_app.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_app.erl new file mode 100644 index 000000000..90b09884f --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_app.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_app). + +-behaviour(application). + +-export([ + start/2, + stop/1 +]). + +start(_Type, _Args) -> + ok = emqx_eviction_agent:hook(), + {ok, Sup} = emqx_eviction_agent_sup:start_link(), + ok = emqx_eviction_agent_cli:load(), + {ok, Sup}. + +stop(_State) -> + ok = emqx_eviction_agent:unhook(), + ok = emqx_eviction_agent_cli:unload(). diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl new file mode 100644 index 000000000..a6097f03d --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl @@ -0,0 +1,358 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +%% MQTT Channel +-module(emqx_eviction_agent_channel). + +-include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_channel.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/types.hrl"). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-export([ + start_link/1, + start_supervised/1, + call/2, + call/3, + cast/2, + stop/1 +]). + +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-type opts() :: #{ + conninfo := emqx_types:conninfo(), + clientinfo := emqx_types:clientinfo() +}. + +%%-------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------- + +-spec start_supervised(opts()) -> supervisor:startchild_ret(). +start_supervised(#{clientinfo := #{clientid := ClientId}} = Opts) -> + RandomId = integer_to_binary(erlang:unique_integer([positive])), + ClientIdBin = bin_clientid(ClientId), + Id = <>, + ChildSpec = #{ + id => Id, + start => {?MODULE, start_link, [Opts]}, + restart => temporary, + shutdown => 5000, + type => worker, + modules => [?MODULE] + }, + supervisor:start_child( + emqx_eviction_agent_conn_sup, + ChildSpec + ). + +-spec start_link(opts()) -> startlink_ret(). +start_link(Opts) -> + gen_server:start_link(?MODULE, [Opts], []). + +-spec cast(pid(), term()) -> ok. +cast(Pid, Req) -> + gen_server:cast(Pid, Req). + +-spec call(pid(), term()) -> term(). +call(Pid, Req) -> + call(Pid, Req, infinity). + +-spec call(pid(), term(), timeout()) -> term(). +call(Pid, Req, Timeout) -> + gen_server:call(Pid, Req, Timeout). + +-spec stop(pid()) -> ok. +stop(Pid) -> + gen_server:stop(Pid). + +%%-------------------------------------------------------------------- +%% gen_server API +%%-------------------------------------------------------------------- + +init([#{conninfo := OldConnInfo, clientinfo := #{clientid := ClientId} = OldClientInfo}]) -> + process_flag(trap_exit, true), + ClientInfo = clientinfo(OldClientInfo), + ConnInfo = conninfo(OldConnInfo), + case open_session(ConnInfo, ClientInfo) of + {ok, Channel0} -> + case set_expiry_timer(Channel0) of + {ok, Channel1} -> + ?SLOG( + info, + #{ + msg => "channel_initialized", + clientid => ClientId, + node => node() + } + ), + ok = emqx_cm:mark_channel_disconnected(self()), + {ok, Channel1, hibernate}; + {error, Reason} -> + {stop, Reason} + end; + {error, Reason} -> + {stop, Reason} + end. + +handle_call(kick, _From, Channel) -> + {stop, kicked, ok, Channel}; +handle_call(discard, _From, Channel) -> + {stop, discarded, ok, Channel}; +handle_call({takeover, 'begin'}, _From, #{session := Session} = Channel) -> + {reply, Session, Channel#{takeover => true}}; +handle_call( + {takeover, 'end'}, + _From, + #{ + session := Session, + clientinfo := #{clientid := ClientId}, + pendings := Pendings + } = Channel +) -> + ok = emqx_session:takeover(Session), + %% TODO: Should not drain deliver here (side effect) + Delivers = emqx_utils:drain_deliver(), + AllPendings = lists:append(Delivers, Pendings), + ?tp( + debug, + emqx_channel_takeover_end, + #{clientid => ClientId} + ), + {stop, normal, AllPendings, Channel}; +handle_call(list_acl_cache, _From, Channel) -> + {reply, [], Channel}; +handle_call({quota, _Policy}, _From, Channel) -> + {reply, ok, Channel}; +handle_call(Req, _From, Channel) -> + ?SLOG( + error, + #{ + msg => "unexpected_call", + req => Req + } + ), + {reply, ignored, Channel}. + +handle_info(Deliver = {deliver, _Topic, _Msg}, Channel) -> + Delivers = [Deliver | emqx_utils:drain_deliver()], + {noreply, handle_deliver(Delivers, Channel)}; +handle_info(expire_session, Channel) -> + {stop, expired, Channel}; +handle_info(Info, Channel) -> + ?SLOG( + error, + #{ + msg => "unexpected_info", + info => Info + } + ), + {noreply, Channel}. + +handle_cast(Msg, Channel) -> + ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), + {noreply, Channel}. + +terminate(Reason, #{conninfo := ConnInfo, clientinfo := ClientInfo, session := Session} = Channel) -> + ok = cancel_expiry_timer(Channel), + (Reason =:= expired) andalso emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), + emqx_session:terminate(ClientInfo, Reason, Session). + +code_change(_OldVsn, Channel, _Extra) -> + {ok, Channel}. + +%%-------------------------------------------------------------------- +%% Internal functions +%%-------------------------------------------------------------------- + +handle_deliver( + Delivers, + #{ + takeover := true, + pendings := Pendings, + session := Session, + clientinfo := #{clientid := ClientId} = ClientInfo + } = Channel +) -> + %% NOTE: Order is important here. While the takeover is in + %% progress, the session cannot enqueue messages, since it already + %% passed on the queue to the new connection in the session state. + NPendings = lists:append( + Pendings, + emqx_session:ignore_local(ClientInfo, emqx_channel:maybe_nack(Delivers), ClientId, Session) + ), + Channel#{pendings => NPendings}; +handle_deliver( + Delivers, + #{ + takeover := false, + session := Session, + clientinfo := #{clientid := ClientId} = ClientInfo + } = Channel +) -> + Delivers1 = emqx_channel:maybe_nack(Delivers), + Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), + NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), + NChannel = persist(NSession, Channel), + %% We consider queued/dropped messages as delivered since they are now in the session state. + emqx_channel:maybe_mark_as_delivered(Session, Delivers), + NChannel. + +cancel_expiry_timer(#{expiry_timer := TRef}) when is_reference(TRef) -> + _ = erlang:cancel_timer(TRef), + ok; +cancel_expiry_timer(_) -> + ok. + +set_expiry_timer(#{conninfo := ConnInfo} = Channel) -> + case maps:get(expiry_interval, ConnInfo) of + ?UINT_MAX -> + {ok, Channel}; + I when I > 0 -> + Timer = erlang:send_after(timer:seconds(I), self(), expire_session), + {ok, Channel#{expiry_timer => Timer}}; + _ -> + {error, should_be_expired} + end. + +open_session(ConnInfo, #{clientid := ClientId} = ClientInfo) -> + Channel = channel(ConnInfo, ClientInfo), + case emqx_cm:open_session(_CleanSession = false, ClientInfo, ConnInfo) of + {ok, #{present := false}} -> + ?SLOG( + info, + #{ + msg => "no_session", + clientid => ClientId, + node => node() + } + ), + {error, no_session}; + {ok, #{session := Session, present := true, pendings := Pendings0}} -> + ?SLOG( + info, + #{ + msg => "session_opened", + clientid => ClientId, + node => node() + } + ), + Pendings1 = lists:usort(lists:append(Pendings0, emqx_utils:drain_deliver())), + NSession = emqx_session:enqueue( + ClientInfo, + emqx_session:ignore_local( + ClientInfo, + emqx_channel:maybe_nack(Pendings1), + ClientId, + Session + ), + Session + ), + NChannel = Channel#{session => NSession}, + ok = emqx_cm:insert_channel_info(ClientId, info(NChannel), stats(NChannel)), + ?SLOG( + info, + #{ + msg => "channel_info_updated", + clientid => ClientId, + node => node() + } + ), + {ok, NChannel}; + {error, Reason} = Error -> + ?SLOG( + error, + #{ + msg => "session_open_failed", + clientid => ClientId, + node => node(), + reason => Reason + } + ), + Error + end. + +conninfo(OldConnInfo) -> + DisconnectedAt = maps:get(disconnected_at, OldConnInfo, erlang:system_time(millisecond)), + ConnInfo0 = maps:with( + [ + socktype, + sockname, + peername, + peercert, + clientid, + clean_start, + receive_maximum, + expiry_interval, + connected_at, + disconnected_at, + keepalive + ], + OldConnInfo + ), + ConnInfo0#{ + conn_mod => ?MODULE, + connected => false, + disconnected_at => DisconnectedAt + }. + +clientinfo(OldClientInfo) -> + maps:with( + [ + zone, + protocol, + peerhost, + sockport, + clientid, + username, + is_bridge, + is_superuser, + mountpoint + ], + OldClientInfo + ). + +channel(ConnInfo, ClientInfo) -> + #{ + conninfo => ConnInfo, + clientinfo => ClientInfo, + expiry_timer => undefined, + takeover => false, + resuming => false, + pendings => [] + }. + +persist(Session, #{clientinfo := ClientInfo, conninfo := ConnInfo} = Channel) -> + Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), + Channel#{session => Session1}. + +info(Channel) -> + #{ + conninfo => maps:get(conninfo, Channel, undefined), + clientinfo => maps:get(clientinfo, Channel, undefined), + session => emqx_utils:maybe_apply( + fun emqx_session:info/1, + maps:get(session, Channel, undefined) + ), + conn_state => disconnected + }. + +stats(#{session := Session}) -> + lists:append(emqx_session:stats(Session), emqx_pd:get_counters(?CHANNEL_METRICS)). + +bin_clientid(ClientId) when is_binary(ClientId) -> + ClientId; +bin_clientid(ClientId) when is_atom(ClientId) -> + atom_to_binary(ClientId). diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_cli.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_cli.erl new file mode 100644 index 000000000..3ae9365e3 --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_cli.erl @@ -0,0 +1,30 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_cli). + +%% APIs +-export([ + load/0, + unload/0, + cli/1 +]). + +load() -> + emqx_ctl:register_command(eviction, {?MODULE, cli}, []). + +unload() -> + emqx_ctl:unregister_command(eviction). + +cli(["status"]) -> + case emqx_eviction_agent:status() of + disabled -> + emqx_ctl:print("Eviction status: disabled~n"); + {enabled, _Stats} -> + emqx_ctl:print("Eviction status: enabled~n") + end; +cli(_) -> + emqx_ctl:usage( + [{"eviction status", "Get current node eviction status"}] + ). diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_conn_sup.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_conn_sup.erl new file mode 100644 index 000000000..195555bd3 --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_conn_sup.erl @@ -0,0 +1,21 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_conn_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init([]) -> + {ok, + { + #{strategy => one_for_one, intensity => 10, period => 3600}, + [] + }}. diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_sup.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_sup.erl new file mode 100644 index 000000000..8b774ef85 --- /dev/null +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_sup.erl @@ -0,0 +1,34 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init([]) -> + Childs = [ + child_spec(worker, emqx_eviction_agent, []), + child_spec(supervisor, emqx_eviction_agent_conn_sup, []) + ], + {ok, { + #{strategy => one_for_one, intensity => 10, period => 3600}, + Childs + }}. + +child_spec(Type, Mod, Args) -> + #{ + id => Mod, + start => {Mod, start_link, Args}, + restart => permanent, + shutdown => 5000, + type => Type, + modules => [Mod] + }. diff --git a/apps/emqx_eviction_agent/src/proto/emqx_eviction_agent_proto_v1.erl b/apps/emqx_eviction_agent/src/proto/emqx_eviction_agent_proto_v1.erl new file mode 100644 index 000000000..f4c958150 --- /dev/null +++ b/apps/emqx_eviction_agent/src/proto/emqx_eviction_agent_proto_v1.erl @@ -0,0 +1,27 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_proto_v1). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + evict_session_channel/4 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +introduced_in() -> + "5.0.22". + +-spec evict_session_channel( + node(), + emqx_types:clientid(), + emqx_types:conninfo(), + emqx_types:clientinfo() +) -> supervisor:startchild_err() | emqx_rpc:badrpc(). +evict_session_channel(Node, ClientId, ConnInfo, ClientInfo) -> + rpc:call(Node, emqx_eviction_agent, evict_session_channel, [ClientId, ConnInfo, ClientInfo]). diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl new file mode 100644 index 000000000..22b694d77 --- /dev/null +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl @@ -0,0 +1,467 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("emqx/include/asserts.hrl"). + +-import( + emqx_eviction_agent_test_helpers, + [emqtt_connect/0, emqtt_connect/1, emqtt_connect/2] +). + +-define(assertPrinted(Printed, Code), + ?assertMatch( + {match, _}, + re:run(Code, Printed) + ) +). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([emqx_eviction_agent]), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([emqx_eviction_agent]). + +init_per_testcase(Case, Config) -> + _ = emqx_eviction_agent:disable(test_eviction), + ok = snabbkaffe:start_trace(), + start_slave(Case, Config). + +start_slave(t_explicit_session_takeover, Config) -> + ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + [{evacuate_test1, 2883}, {evacuate_test2, 3883}], + [emqx_eviction_agent] + ), + [{evacuate_nodes, ClusterNodes} | Config]; +start_slave(_Case, Config) -> + Config. + +end_per_testcase(TestCase, Config) -> + emqx_eviction_agent:disable(test_eviction), + ok = snabbkaffe:stop(), + stop_slave(TestCase, Config). + +stop_slave(t_explicit_session_takeover, Config) -> + emqx_eviction_agent_test_helpers:stop_cluster( + ?config(evacuate_nodes, Config), + [emqx_eviction_agent] + ); +stop_slave(_Case, _Config) -> + ok. + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_enable_disable(_Config) -> + erlang:process_flag(trap_exit, true), + + ?assertMatch( + disabled, + emqx_eviction_agent:status() + ), + + {ok, C0} = emqtt_connect(), + ok = emqtt:disconnect(C0), + + ok = emqx_eviction_agent:enable(test_eviction, undefined), + + ?assertMatch( + {error, eviction_agent_busy}, + emqx_eviction_agent:enable(bar, undefined) + ), + + ?assertMatch( + ok, + emqx_eviction_agent:enable(test_eviction, <<"srv">>) + ), + + ?assertMatch( + {enabled, #{}}, + emqx_eviction_agent:status() + ), + + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_connect() + ), + + ?assertMatch( + {error, eviction_agent_busy}, + emqx_eviction_agent:disable(bar) + ), + + ?assertMatch( + ok, + emqx_eviction_agent:disable(test_eviction) + ), + + ?assertMatch( + {error, disabled}, + emqx_eviction_agent:disable(test_eviction) + ), + + ?assertMatch( + disabled, + emqx_eviction_agent:status() + ), + + {ok, C1} = emqtt_connect(), + ok = emqtt:disconnect(C1). + +t_evict_connections_status(_Config) -> + erlang:process_flag(trap_exit, true), + + {ok, _C} = emqtt_connect(), + + {error, disabled} = emqx_eviction_agent:evict_connections(1), + + ok = emqx_eviction_agent:enable(test_eviction, undefined), + + ?assertMatch( + {enabled, #{connections := 1, sessions := _}}, + emqx_eviction_agent:status() + ), + + ok = emqx_eviction_agent:evict_connections(1), + + ct:sleep(100), + + ?assertMatch( + {enabled, #{connections := 0, sessions := _}}, + emqx_eviction_agent:status() + ), + + ok = emqx_eviction_agent:disable(test_eviction). + +t_explicit_session_takeover(Config) -> + _ = erlang:process_flag(trap_exit, true), + ok = restart_emqx(), + + [{Node1, Port1}, {Node2, _Port2}] = ?config(evacuate_nodes, Config), + + {ok, C0} = emqtt_connect([ + {clientid, <<"client_with_session">>}, + {clean_start, false}, + {port, Port1} + ]), + {ok, _, _} = emqtt:subscribe(C0, <<"t1">>), + + ok = rpc:call(Node1, emqx_eviction_agent, enable, [test_eviction, undefined]), + + ?assertEqual( + 1, + rpc:call(Node1, emqx_eviction_agent, connection_count, []) + ), + + [ChanPid] = rpc:call(Node1, emqx_cm, lookup_channels, [<<"client_with_session">>]), + + ?assertWaitEvent( + begin + ok = rpc:call(Node1, emqx_eviction_agent, evict_connections, [1]), + receive + {'EXIT', C0, {disconnected, ?RC_USE_ANOTHER_SERVER, _}} -> ok + after 1000 -> + ?assert(false, "Connection not evicted") + end + end, + #{?snk_kind := emqx_cm_connected_client_count_dec, chan_pid := ChanPid}, + 2000 + ), + + ?assertEqual( + 0, + rpc:call(Node1, emqx_eviction_agent, connection_count, []) + ), + + ?assertEqual( + 1, + rpc:call(Node1, emqx_eviction_agent, session_count, []) + ), + + %% First, evacuate to the same node + + ?assertWaitEvent( + rpc:call(Node1, emqx_eviction_agent, evict_sessions, [1, Node1]), + #{?snk_kind := emqx_channel_takeover_end, clientid := <<"client_with_session">>}, + 1000 + ), + + ok = rpc:call(Node1, emqx_eviction_agent, disable, [test_eviction]), + + {ok, C1} = emqtt_connect([{port, Port1}]), + emqtt:publish(C1, <<"t1">>, <<"MessageToEvictedSession1">>), + ok = emqtt:disconnect(C1), + + ok = rpc:call(Node1, emqx_eviction_agent, enable, [test_eviction, undefined]), + + %% Evacuate to another node + + ?assertWaitEvent( + rpc:call(Node1, emqx_eviction_agent, evict_sessions, [1, Node2]), + #{?snk_kind := emqx_channel_takeover_end, clientid := <<"client_with_session">>}, + 1000 + ), + + ?assertEqual( + 0, + rpc:call(Node1, emqx_eviction_agent, session_count, []) + ), + + ?assertEqual( + 1, + rpc:call(Node2, emqx_eviction_agent, session_count, []) + ), + + ok = rpc:call(Node1, emqx_eviction_agent, disable, [test_eviction]), + + %% Session is on Node2, but we connect to Node1 + {ok, C2} = emqtt_connect([{port, Port1}]), + emqtt:publish(C2, <<"t1">>, <<"MessageToEvictedSession2">>), + ok = emqtt:disconnect(C2), + + ct:sleep(100), + + %% Session is on Node2, but we connect the subscribed client to Node1 + %% It should take over the session for the third time and recieve + %% previously published messages + {ok, C3} = emqtt_connect([ + {clientid, <<"client_with_session">>}, + {clean_start, false}, + {port, Port1} + ]), + + ok = assert_receive_publish( + [ + #{payload => <<"MessageToEvictedSession1">>, topic => <<"t1">>}, + #{payload => <<"MessageToEvictedSession2">>, topic => <<"t1">>} + ] + ), + ok = emqtt:disconnect(C3). + +t_disable_on_restart(_Config) -> + ok = emqx_eviction_agent:enable(test_eviction, undefined), + + ok = supervisor:terminate_child(emqx_eviction_agent_sup, emqx_eviction_agent), + {ok, _} = supervisor:restart_child(emqx_eviction_agent_sup, emqx_eviction_agent), + + ?assertEqual( + disabled, + emqx_eviction_agent:status() + ). + +t_session_serialization(_Config) -> + _ = erlang:process_flag(trap_exit, true), + ok = restart_emqx(), + + {ok, C0} = emqtt_connect(<<"client_with_session">>, false), + {ok, _, _} = emqtt:subscribe(C0, <<"t1">>), + ok = emqtt:disconnect(C0), + + ok = emqx_eviction_agent:enable(test_eviction, undefined), + + ?assertEqual( + 1, + emqx_eviction_agent:session_count() + ), + + %% Evacuate to the same node + + ?assertWaitEvent( + emqx_eviction_agent:evict_sessions(1, node()), + #{?snk_kind := emqx_channel_takeover_end, clientid := <<"client_with_session">>}, + 1000 + ), + + ok = emqx_eviction_agent:disable(test_eviction), + + ?assertEqual( + 1, + emqx_eviction_agent:session_count() + ), + + ?assertMatch( + #{data := [#{clientid := <<"client_with_session">>}]}, + emqx_mgmt_api:cluster_query( + emqx_channel_info, + #{}, + [], + fun emqx_mgmt_api_clients:qs2ms/2, + fun emqx_mgmt_api_clients:format_channel_info/2 + ) + ), + + mock_print(), + + ?assertPrinted( + "client_with_session", + emqx_mgmt_cli:clients(["list"]) + ), + + ?assertPrinted( + "client_with_session", + emqx_mgmt_cli:clients(["show", "client_with_session"]) + ), + + ?assertWaitEvent( + emqx_cm:kick_session(<<"client_with_session">>), + #{?snk_kind := emqx_cm_clean_down, client_id := <<"client_with_session">>}, + 1000 + ), + + ?assertEqual( + 0, + emqx_eviction_agent:session_count() + ). + +t_will_msg(_Config) -> + erlang:process_flag(trap_exit, true), + + WillMsg = <<"will_msg">>, + WillTopic = <<"will_topic">>, + ClientId = <<"client_with_will">>, + + _ = emqtt_connect([ + {clean_start, false}, + {clientid, ClientId}, + {will_payload, WillMsg}, + {will_topic, WillTopic} + ]), + + {ok, C} = emqtt_connect(), + {ok, _, _} = emqtt:subscribe(C, WillTopic), + + [ChanPid] = emqx_cm:lookup_channels(ClientId), + + ChanPid ! + {disconnect, ?RC_USE_ANOTHER_SERVER, use_another_server, #{ + 'Server-Reference' => <<>> + }}, + + receive + {publish, #{ + payload := WillMsg, + topic := WillTopic + }} -> + ok + after 1000 -> + ct:fail("Will message not received") + end, + + ok = emqtt:disconnect(C). + +t_ws_conn(_Config) -> + erlang:process_flag(trap_exit, true), + + ClientId = <<"ws_client">>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {port, 8083}, + {ws_path, "/mqtt"} + ]), + {ok, _} = emqtt:ws_connect(C), + + ok = emqx_eviction_agent:enable(test_eviction, undefined), + + ?assertEqual( + 1, + emqx_eviction_agent:connection_count() + ), + + ?assertWaitEvent( + ok = emqx_eviction_agent:evict_connections(1), + #{?snk_kind := emqx_cm_connected_client_count_dec}, + 1000 + ), + + ?assertEqual( + 0, + emqx_eviction_agent:connection_count() + ). + +-ifndef(BUILD_WITHOUT_QUIC). + +t_quic_conn(_Config) -> + erlang:process_flag(trap_exit, true), + + QuicPort = emqx_common_test_helpers:select_free_port(quic), + application:ensure_all_started(quicer), + emqx_common_test_helpers:ensure_quic_listener(?MODULE, QuicPort), + + ClientId = <<"quic_client">>, + {ok, C} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {port, QuicPort} + ]), + {ok, _} = emqtt:quic_connect(C), + + ok = emqx_eviction_agent:enable(test_eviction, undefined), + + ?assertEqual( + 1, + emqx_eviction_agent:connection_count() + ), + + ?assertWaitEvent( + ok = emqx_eviction_agent:evict_connections(1), + #{?snk_kind := emqx_cm_connected_client_count_dec}, + 1000 + ), + + ?assertEqual( + 0, + emqx_eviction_agent:connection_count() + ). + +-endif. + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +assert_receive_publish([]) -> + ok; +assert_receive_publish([#{payload := Msg, topic := Topic} | Rest]) -> + receive + {publish, #{ + payload := Msg, + topic := Topic + }} -> + assert_receive_publish(Rest) + after 1000 -> + ?assert(false, "Message `" ++ binary_to_list(Msg) ++ "` is lost") + end. + +connect_and_publish(Topic, Message) -> + {ok, C} = emqtt_connect(), + emqtt:publish(C, Topic, Message), + ok = emqtt:disconnect(C). + +restart_emqx() -> + _ = application:stop(emqx), + _ = application:start(emqx), + _ = application:stop(emqx_eviction_agent), + _ = application:start(emqx_eviction_agent), + ok. + +mock_print() -> + catch meck:unload(emqx_ctl), + meck:new(emqx_ctl, [non_strict, passthrough]), + meck:expect(emqx_ctl, print, fun(Arg) -> emqx_ctl:format(Arg, []) end), + meck:expect(emqx_ctl, print, fun(Msg, Arg) -> emqx_ctl:format(Msg, Arg) end), + meck:expect(emqx_ctl, usage, fun(Usages) -> emqx_ctl:format_usage(Usages) end), + meck:expect(emqx_ctl, usage, fun(Cmd, Descr) -> emqx_ctl:format_usage(Cmd, Descr) end). diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl new file mode 100644 index 000000000..3fe15e53a --- /dev/null +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl @@ -0,0 +1,69 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_api_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-import( + emqx_mgmt_api_test_util, + [ + request_api/2, + uri/1 + ] +). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_eviction_agent]), + Config. + +end_per_suite(Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_eviction_agent]), + Config. + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_status(_Config) -> + ?assertMatch( + {ok, #{<<"status">> := <<"disabled">>}}, + api_get(["node_eviction", "status"]) + ), + + ok = emqx_eviction_agent:enable(apitest, undefined), + + ?assertMatch( + {ok, #{ + <<"status">> := <<"enabled">>, + <<"stats">> := #{} + }}, + api_get(["node_eviction", "status"]) + ), + + ok = emqx_eviction_agent:disable(apitest), + + ?assertMatch( + {ok, #{<<"status">> := <<"disabled">>}}, + api_get(["node_eviction", "status"]) + ). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +api_get(Path) -> + case request_api(get, uri(Path)) of + {ok, ResponseBody} -> + {ok, jiffy:decode(list_to_binary(ResponseBody), [return_maps])}; + {error, _} = Error -> + Error + end. diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl new file mode 100644 index 000000000..3b7ef6672 --- /dev/null +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl @@ -0,0 +1,251 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_channel_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). + +-define(CLIENT_ID, <<"client_with_session">>). + +-import( + emqx_eviction_agent_test_helpers, + [emqtt_connect/0, emqtt_connect/2] +). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([emqx_conf, emqx_eviction_agent]), + {ok, _} = emqx:update_config([rpc, port_discovery], manual), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]). + +init_per_testcase(t_persistence, Config) -> + emqx_config:put([persistent_session_store, enabled], true), + {ok, _} = emqx_persistent_session_sup:start_link(), + emqx_persistent_session:init_db_backend(), + ?assert(emqx_persistent_session:is_store_enabled()), + Config; +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(t_persistence, Config) -> + emqx_config:put([persistent_session_store, enabled], false), + emqx_persistent_session:init_db_backend(), + ?assertNot(emqx_persistent_session:is_store_enabled()), + Config; +end_per_testcase(_TestCase, _Config) -> + ok. + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_start_no_session(_Config) -> + Opts = #{ + clientinfo => #{ + clientid => ?CLIENT_ID, + zone => internal + }, + conninfo => #{ + clientid => ?CLIENT_ID, + receive_maximum => 32, + expiry_interval => 10000 + } + }, + ?assertMatch( + {error, {no_session, _}}, + emqx_eviction_agent_channel:start_supervised(Opts) + ). + +t_start_no_expire(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + + Opts = #{ + clientinfo => #{ + clientid => ?CLIENT_ID, + zone => internal + }, + conninfo => #{ + clientid => ?CLIENT_ID, + receive_maximum => 32, + expiry_interval => 0 + } + }, + ?assertMatch( + {error, {should_be_expired, _}}, + emqx_eviction_agent_channel:start_supervised(Opts) + ). + +t_start_infinite_expire(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + + Opts = #{ + clientinfo => #{ + clientid => ?CLIENT_ID, + zone => internal + }, + conninfo => #{ + clientid => ?CLIENT_ID, + receive_maximum => 32, + expiry_interval => ?UINT_MAX + } + }, + ?assertMatch( + {ok, _}, + emqx_eviction_agent_channel:start_supervised(Opts) + ). + +t_kick(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + Opts = evict_session_opts(?CLIENT_ID), + + {ok, Pid} = emqx_eviction_agent_channel:start_supervised(Opts), + + ?assertEqual( + ok, + emqx_eviction_agent_channel:call(Pid, kick) + ). + +t_discard(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + Opts = evict_session_opts(?CLIENT_ID), + + {ok, Pid} = emqx_eviction_agent_channel:start_supervised(Opts), + + ?assertEqual( + ok, + emqx_eviction_agent_channel:call(Pid, discard) + ). + +t_stop(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + Opts = evict_session_opts(?CLIENT_ID), + + {ok, Pid} = emqx_eviction_agent_channel:start_supervised(Opts), + + ?assertEqual( + ok, + emqx_eviction_agent_channel:stop(Pid) + ). + +t_ignored_calls(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + Opts = evict_session_opts(?CLIENT_ID), + + {ok, Pid} = emqx_eviction_agent_channel:start_supervised(Opts), + + ok = emqx_eviction_agent_channel:cast(Pid, unknown), + Pid ! unknown, + + ?assertEqual( + [], + emqx_eviction_agent_channel:call(Pid, list_acl_cache) + ), + + ?assertEqual( + ok, + emqx_eviction_agent_channel:call(Pid, {quota, quota}) + ), + + ?assertEqual( + ignored, + emqx_eviction_agent_channel:call(Pid, unknown) + ). + +t_expire(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + #{conninfo := ConnInfo} = Opts0 = evict_session_opts(?CLIENT_ID), + Opts1 = Opts0#{conninfo => ConnInfo#{expiry_interval => 1}}, + + {ok, Pid} = emqx_eviction_agent_channel:start_supervised(Opts1), + + ct:sleep(1500), + + ?assertNot(is_process_alive(Pid)). + +t_get_connected_client_count(_Config) -> + erlang:process_flag(trap_exit, true), + + _ = emqtt_connect(?CLIENT_ID, false), + + ?assertEqual( + 1, + emqx_cm:get_connected_client_count() + ), + + Opts = evict_session_opts(?CLIENT_ID), + + {ok, _} = emqx_eviction_agent_channel:start_supervised(Opts), + + ?assertEqual( + 0, + emqx_cm:get_connected_client_count() + ). + +t_persistence(_Config) -> + erlang:process_flag(trap_exit, true), + + Topic = <<"t1">>, + Message = <<"message_to_persist">>, + + {ok, C0} = emqtt_connect(?CLIENT_ID, false), + {ok, _, _} = emqtt:subscribe(C0, Topic, 0), + + Opts = evict_session_opts(?CLIENT_ID), + {ok, Pid} = emqx_eviction_agent_channel:start_supervised(Opts), + + {ok, C1} = emqtt_connect(), + {ok, _} = emqtt:publish(C1, Topic, Message, 1), + ok = emqtt:disconnect(C1), + + %% Kill channel so that the session is only persisted + ok = emqx_eviction_agent_channel:call(Pid, kick), + + %% Should restore session from persistents storage and receive messages + {ok, C2} = emqtt_connect(?CLIENT_ID, false), + + receive + {publish, #{ + payload := Message, + topic := Topic + }} -> + ok + after 1000 -> + ct:fail("message not received") + end, + + ok = emqtt:disconnect(C2). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +evict_session_opts(ClientId) -> + maps:with( + [conninfo, clientinfo], + emqx_cm:get_chan_info(ClientId) + ). diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl new file mode 100644 index 000000000..4cfb2fff5 --- /dev/null +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl @@ -0,0 +1,39 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_cli_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([emqx_eviction_agent]), + Config. + +end_per_suite(Config) -> + _ = emqx_eviction_agent:disable(foo), + emqx_common_test_helpers:stop_apps([emqx_eviction_agent]), + Config. + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_status(_Config) -> + %% usage + ok = emqx_eviction_agent_cli:cli(["foobar"]), + + %% status + ok = emqx_eviction_agent_cli:cli(["status"]), + + ok = emqx_eviction_agent:enable(foo, undefined), + + %% status + ok = emqx_eviction_agent_cli:cli(["status"]). diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl new file mode 100644 index 000000000..3953ec3e2 --- /dev/null +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl @@ -0,0 +1,134 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_eviction_agent_test_helpers). + +-export([ + emqtt_connect/0, + emqtt_connect/1, + emqtt_connect/2, + emqtt_connect_many/2, + stop_many/1, + + emqtt_try_connect/1, + + start_cluster/2, + start_cluster/3, + stop_cluster/2, + + case_specific_node_name/2, + case_specific_node_name/3, + concat_atoms/1 +]). + +emqtt_connect() -> + emqtt_connect(<<"client1">>, true). + +emqtt_connect(ClientId, CleanStart) -> + emqtt_connect([{clientid, ClientId}, {clean_start, CleanStart}]). + +emqtt_connect(Opts) -> + {ok, C} = emqtt:start_link( + Opts ++ + [ + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 600}} + ] + ), + case emqtt:connect(C) of + {ok, _} -> {ok, C}; + {error, _} = Error -> Error + end. + +emqtt_connect_many(Port, Count) -> + lists:map( + fun(N) -> + NBin = integer_to_binary(N), + ClientId = <<"client-", NBin/binary>>, + {ok, C} = emqtt_connect([{clientid, ClientId}, {clean_start, false}, {port, Port}]), + C + end, + lists:seq(1, Count) + ). + +stop_many(Clients) -> + lists:foreach( + fun(C) -> + catch emqtt:disconnect(C) + end, + Clients + ), + ct:sleep(100). + +emqtt_try_connect(Opts) -> + case emqtt_connect(Opts) of + {ok, C} -> + emqtt:disconnect(C), + ok; + {error, _} = Error -> + Error + end. + +start_cluster(NamesWithPorts, Apps) -> + start_cluster(NamesWithPorts, Apps, []). + +start_cluster(NamesWithPorts, Apps, Env) -> + Specs = lists:map( + fun({ShortName, Port}) -> + {core, ShortName, #{listener_ports => [{tcp, Port}]}} + end, + NamesWithPorts + ), + Opts0 = [ + {env, [{emqx, boot_modules, [broker, listeners]}] ++ Env}, + {apps, Apps}, + {conf, + [{[listeners, Proto, default, enabled], false} || Proto <- [ssl, ws, wss]] ++ + [{[rpc, mode], async}]} + ], + Cluster = emqx_common_test_helpers:emqx_cluster( + Specs, + Opts0 + ), + NodesWithPorts = [ + { + emqx_common_test_helpers:start_slave(Name, Opts), + proplists:get_value(Name, NamesWithPorts) + } + || {Name, Opts} <- Cluster + ], + NodesWithPorts. + +stop_cluster(NodesWithPorts, Apps) -> + lists:foreach( + fun({Node, _Port}) -> + lists:foreach( + fun(App) -> + rpc:call(Node, application, stop, [App]) + end, + Apps + ), + %% This sleep is just to make logs cleaner + ct:sleep(100), + _ = rpc:call(Node, emqx_common_test_helpers, stop_apps, []), + emqx_common_test_helpers:stop_slave(Node) + end, + NodesWithPorts + ). + +case_specific_node_name(Module, Case) -> + concat_atoms([Module, '__', Case]). + +case_specific_node_name(Module, Case, Node) -> + concat_atoms([Module, '__', Case, '__', Node]). + +concat_atoms(Atoms) -> + binary_to_atom( + iolist_to_binary( + lists:map( + fun atom_to_binary/1, + Atoms + ) + ) + ). diff --git a/apps/emqx_exhook/i18n/emqx_exhook_api_i18n.conf b/apps/emqx_exhook/i18n/emqx_exhook_api_i18n.conf deleted file mode 100644 index 492c2c4ec..000000000 --- a/apps/emqx_exhook/i18n/emqx_exhook_api_i18n.conf +++ /dev/null @@ -1,177 +0,0 @@ -emqx_exhook_api { - - list_all_servers { - desc { - en: "List all servers" - zh: "查看ExHook 服务器列表" - } - } - - add_server { - desc { - en: "Add a server" - zh: "添加 ExHook 服务器" - } - } - - get_detail { - desc { - en: "Get the detail information of Exhook server" - zh: "查看 Exhook 服务器详细信息" - } - } - - update_server { - desc { - en: "Update the server" - zh: "更新 Exhook 服务器" - } - } - - delete_server { - desc { - en: "Delete the server" - zh: "删除 Exhook 服务器" - } - } - - get_hooks { - desc { - en: "Get the hooks information of server" - zh: "获取 Exhook 服务器的钩子信息" - } - } - - move_api { - desc { - en: """Move the server. -NOTE: The position should be \"front | rear | before:{name} | after:{name}""" - zh: """移动 Exhook 服务器顺序。 -注意: 移动的参数只能是:front | rear | before:{name} | after:{name}""" - } - } - - move_position { - desc { - en: "The target position to be moved" - zh: "移动的方向" - } - } - - hook_name { - desc { - en: "The hook's name" - zh: "钩子的名称" - } - } - - server_name { - desc { - en: "The Exhook server name" - zh: "Exhook 服务器的名称" - } - } - - hook_params { - desc { - en: "The parameters used when the hook is registered" - zh: "钩子注册时使用的参数" - } - } - - server_metrics { - desc { - en: "Metrics information of this server in the current node" - zh: "当前节点中该服务器的指标信息" - } - } - - node_metrics { - desc { - en: "Metrics information of this server in all nodes" - zh: "所有节点中该服务器的指标信息" - } - } - - node_status { - desc { - en: "status of this server in all nodes" - zh: "所有节点中该服务器的状态信息" - } - } - - hook_metrics { - desc { - en: "Metrics information of this hook in the current node" - zh: "当前节点中该钩子的指标信息" - } - } - - node_hook_metrics { - desc { - en: "Metrics information of this hook in all nodes" - zh: "所有节点中该钩子的指标信息" - } - } - - node { - desc { - en: "Node name" - zh: "节点名称" - } - } - - metrics { - desc { - en: "Metrics information" - zh: "指标信息" - } - } - - status { - desc { - en: """The status of Exhook server. -connected: connection succeeded -connecting: connection failed, reconnecting -disconnected: failed to connect and didn't reconnect -disabled: this server is disabled -error: failed to view the status of this server -""" - zh: """Exhook 服务器的状态。 -connected: 连接成功 -connecting: 连接失败,重连中 -disconnected: 连接失败,且未设置自动重连 -disabled: 该服务器未开启 -error: 查看该服务器状态时发生错误""" - } - } - - metric_succeed { - desc { - en: "The number of times the hooks execution successful" - zh: "钩子执行成功的次数" - } - } - - metric_failed { - desc { - en: "The number of times the hook execution failed" - zh: "钩子执行失败的次数" - } - } - - metric_rate { - desc { - en: "The call rate of hooks" - zh: "钩子的调用速率" - } - } - - metric_max_rate { - desc { - en: "Maximum call rate of hooks" - zh: "钩子的最大调用速率" - } - } - -} diff --git a/apps/emqx_exhook/i18n/emqx_exhook_i18n.conf b/apps/emqx_exhook/i18n/emqx_exhook_i18n.conf deleted file mode 100644 index 5b34a245a..000000000 --- a/apps/emqx_exhook/i18n/emqx_exhook_i18n.conf +++ /dev/null @@ -1,97 +0,0 @@ -emqx_exhook_schema { - - servers { - desc { - en: "List of exhook servers" - zh: "ExHook 服务器列表" - } - } - - name { - desc { - en: "Name of the exhook server" - zh: "ExHook 服务器名称" - } - } - - enable { - desc { - en: "Enable this Exhook server" - zh: "开启这个 Exhook 服务器" - } - } - - url { - desc { - en: "URL of the gRPC server" - zh: "gRPC 服务器地址" - } - } - - request_timeout { - desc { - en: "The timeout of request gRPC server" - zh: "gRPC 服务器请求超时时间" - } - } - - failed_action { - desc { - en: "The value that is returned when the request to the gRPC server fails for any reason" - zh: "当 gRPC 请求失败后的操作" - } - } - - auto_reconnect { - desc { - en: """Whether to automatically reconnect (initialize) the gRPC server. -When gRPC is not available, Exhook tries to request the gRPC service at that interval and reinitialize the list of mounted hooks.""" - zh: """自动重连到 gRPC 服务器的设置。 -当 gRPC 服务器不可用时,Exhook 将会按照这里设置的间隔时间进行重连,并重新初始化注册的钩子""" - } - } - - pool_size { - desc { - en: "The process pool size for gRPC client" - zh: "gRPC 客户端进程池大小" - } - } - - socket_options { - desc { - en: "Connection socket options" - zh: "连接套接字设置" - } - } - - keepalive { - desc { - en: """Enables/disables periodic transmission on a connected socket when no other data is exchanged. -If the other end does not respond, the connection is considered broken and an error message is sent to the controlling process.""" - zh: """当没有其他数据交换时,是否向连接的对端套接字定期的发送探测包。如果另一端没有响应,则认为连接断开,并向控制进程发送错误消息""" - } - } - - nodelay { - desc { - en: """If true, option TCP_NODELAY is turned on for the socket, -which means that also small amounts of data are sent immediately""" - zh: "如果为 true,则为套接字设置 TCP_NODELAY 选项,这意味着会立即发送数据包" - } - } - - recbuf { - desc { - en: "The minimum size of receive buffer to use for the socket" - zh: "套接字的最小接收缓冲区大小" - } - } - - sndbuf { - desc { - en: "The minimum size of send buffer to use for the socket" - zh: "套接字的最小发送缓冲区大小" - } - } -} diff --git a/apps/emqx_exhook/rebar.config b/apps/emqx_exhook/rebar.config index fad539ed1..7abc601b4 100644 --- a/apps/emqx_exhook/rebar.config +++ b/apps/emqx_exhook/rebar.config @@ -5,7 +5,8 @@ ]}. {deps, [ - {emqx, {path, "../emqx"}} + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} ]}. {grpc, [ diff --git a/apps/emqx_exhook/src/emqx_exhook.app.src b/apps/emqx_exhook/src/emqx_exhook.app.src index d81819c98..194c91206 100644 --- a/apps/emqx_exhook/src/emqx_exhook.app.src +++ b/apps/emqx_exhook/src/emqx_exhook.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_exhook, [ {description, "EMQX Extension for Hook"}, - {vsn, "5.0.9"}, + {vsn, "5.0.12"}, {modules, []}, {registered, []}, {mod, {emqx_exhook_app, []}}, diff --git a/apps/emqx_exhook/src/emqx_exhook_api.erl b/apps/emqx_exhook/src/emqx_exhook_api.erl index 4d7de2866..9bfae9579 100644 --- a/apps/emqx_exhook/src/emqx_exhook_api.erl +++ b/apps/emqx_exhook/src/emqx_exhook_api.erl @@ -229,9 +229,9 @@ server_conf_schema() -> name => "default", enable => true, url => <<"http://127.0.0.1:8081">>, - request_timeout => "5s", + request_timeout => <<"5s">>, failed_action => deny, - auto_reconnect => "60s", + auto_reconnect => <<"60s">>, pool_size => 8, ssl => SSL } @@ -471,14 +471,14 @@ fill_server_hooks_info([], _Name, _Default, MetricsL) -> -spec call_cluster(fun(([node()]) -> emqx_rpc:erpc_multicall(A))) -> [{node(), A | {error, _Err}}]. call_cluster(Fun) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), Ret = Fun(Nodes), lists:zip(Nodes, lists:map(fun emqx_rpc:unwrap_erpc/1, Ret)). %%-------------------------------------------------------------------- %% Internal Funcs %%-------------------------------------------------------------------- -err_msg(Msg) -> emqx_misc:readable_error_msg(Msg). +err_msg(Msg) -> emqx_utils:readable_error_msg(Msg). get_raw_config() -> RawConfig = emqx:get_raw_config([exhook, servers], []), diff --git a/apps/emqx_exhook/src/emqx_exhook_mgr.erl b/apps/emqx_exhook/src/emqx_exhook_mgr.erl index 77937a835..0647c80ea 100644 --- a/apps/emqx_exhook/src/emqx_exhook_mgr.erl +++ b/apps/emqx_exhook/src/emqx_exhook_mgr.erl @@ -507,11 +507,11 @@ sort_name_by_order(Names, Orders) -> lists:sort( fun (A, B) when is_binary(A) -> - emqx_map_lib:deep_get([A, order], Orders) < - emqx_map_lib:deep_get([B, order], Orders); + emqx_utils_maps:deep_get([A, order], Orders) < + emqx_utils_maps:deep_get([B, order], Orders); (#{name := A}, #{name := B}) -> - emqx_map_lib:deep_get([A, order], Orders) < - emqx_map_lib:deep_get([B, order], Orders) + emqx_utils_maps:deep_get([A, order], Orders) < + emqx_utils_maps:deep_get([B, order], Orders) end, Names ). diff --git a/apps/emqx_exhook/src/emqx_exhook_schema.erl b/apps/emqx_exhook/src/emqx_exhook_schema.erl index ce79dddac..f6cc896f3 100644 --- a/apps/emqx_exhook/src/emqx_exhook_schema.erl +++ b/apps/emqx_exhook/src/emqx_exhook_schema.erl @@ -31,7 +31,8 @@ namespace() -> exhook. -roots() -> [exhook]. +roots() -> + [{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_LOW})}]. fields(exhook) -> [ @@ -63,7 +64,7 @@ fields(server) -> })}, {request_timeout, ?HOCON(emqx_schema:duration(), #{ - default => "5s", + default => <<"5s">>, desc => ?DESC(request_timeout) })}, {failed_action, failed_action()}, @@ -74,7 +75,7 @@ fields(server) -> })}, {auto_reconnect, ?HOCON(hoconsc:union([false, emqx_schema:duration()]), #{ - default => "60s", + default => <<"60s">>, desc => ?DESC(auto_reconnect) })}, {pool_size, diff --git a/apps/emqx_exhook/test/emqx_exhook_SUITE.erl b/apps/emqx_exhook/test/emqx_exhook_SUITE.erl index aaa8649c0..ff313c8c8 100644 --- a/apps/emqx_exhook/test/emqx_exhook_SUITE.erl +++ b/apps/emqx_exhook/test/emqx_exhook_SUITE.erl @@ -24,13 +24,13 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_hooks.hrl"). +-include_lib("emqx_conf/include/emqx_conf.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -define(DEFAULT_CLUSTER_NAME_ATOM, emqxcl). -define(OTHER_CLUSTER_NAME_ATOM, test_emqx_cluster). -define(OTHER_CLUSTER_NAME_STRING, "test_emqx_cluster"). --define(CLUSTER_RPC_SHARD, emqx_cluster_rpc_shard). -define(CONF_DEFAULT, << "\n" @@ -54,6 +54,8 @@ "}\n" >>). +-import(emqx_common_test_helpers, [on_exit/1]). + %%-------------------------------------------------------------------- %% Setups %%-------------------------------------------------------------------- @@ -89,7 +91,7 @@ init_per_testcase(_, Config) -> timer:sleep(200), Config. -end_per_testcase(_, Config) -> +end_per_testcase(_, _Config) -> case erlang:whereis(node()) of undefined -> ok; @@ -97,7 +99,8 @@ end_per_testcase(_, Config) -> erlang:unlink(P), erlang:exit(P, kill) end, - Config. + emqx_common_test_helpers:call_janitor(), + ok. load_cfg(Cfg) -> ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, Cfg). @@ -298,8 +301,14 @@ t_cluster_name(_) -> ok end, - emqx_common_test_helpers:stop_apps([emqx, emqx_exhook]), + stop_apps([emqx, emqx_exhook]), emqx_common_test_helpers:start_apps([emqx, emqx_exhook], SetEnvFun), + on_exit(fun() -> + stop_apps([emqx, emqx_exhook]), + load_cfg(?CONF_DEFAULT), + emqx_common_test_helpers:start_apps([emqx_exhook]), + mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]) + end), ?assertEqual(?OTHER_CLUSTER_NAME_STRING, emqx_sys:cluster_name()), @@ -480,3 +489,7 @@ data_file(Name) -> cert_file(Name) -> data_file(filename:join(["certs", Name])). + +%% FIXME: this creats inter-test dependency +stop_apps(Apps) -> + emqx_common_test_helpers:stop_apps(Apps, #{erase_all_configs => false}). diff --git a/apps/emqx_exhook/test/emqx_exhook_api_SUITE.erl b/apps/emqx_exhook/test/emqx_exhook_api_SUITE.erl index 7be940a53..c03b3f231 100644 --- a/apps/emqx_exhook/test/emqx_exhook_api_SUITE.erl +++ b/apps/emqx_exhook/test/emqx_exhook_api_SUITE.erl @@ -310,8 +310,8 @@ t_update(Cfg) -> ?assertMatch([], emqx_exhook_mgr:running()). decode_json(Data) -> - BinJosn = emqx_json:decode(Data, [return_maps]), - emqx_map_lib:unsafe_atom_key_map(BinJosn). + BinJosn = emqx_utils_json:decode(Data, [return_maps]), + emqx_utils_maps:unsafe_atom_key_map(BinJosn). request_api(Method, Url, Auth) -> request_api(Method, Url, [], Auth, []). @@ -332,7 +332,7 @@ request_api(Method, Url, QueryParams, Auth, Body) -> "" -> Url; _ -> Url ++ "?" ++ QueryParams end, - do_request_api(Method, {NewUrl, [Auth], "application/json", emqx_json:encode(Body)}). + do_request_api(Method, {NewUrl, [Auth], "application/json", emqx_utils_json:encode(Body)}). do_request_api(Method, Request) -> case httpc:request(Method, Request, [], [{body_format, binary}]) of @@ -347,13 +347,7 @@ do_request_api(Method, Request) -> end. auth_header_() -> - AppId = <<"admin">>, - AppSecret = <<"public">>, - auth_header_(binary_to_list(AppId), binary_to_list(AppSecret)). - -auth_header_(User, Pass) -> - Encoded = base64:encode_to_string(lists:append([User, ":", Pass])), - {"Authorization", "Basic " ++ Encoded}. + emqx_mgmt_api_test_util:auth_header_(). api_path(Parts) -> ?HOST ++ filename:join([?BASE_PATH, ?API_VERSION] ++ Parts). diff --git a/apps/emqx_gateway/.gitignore b/apps/emqx_gateway/.gitignore index 5bff8a84d..a81bb07da 100644 --- a/apps/emqx_gateway/.gitignore +++ b/apps/emqx_gateway/.gitignore @@ -18,8 +18,4 @@ _build rebar3.crashdump *~ rebar.lock -src/exproto/emqx_exproto_pb.erl -src/exproto/emqx_exproto_v_1_connection_adapter_bhvr.erl -src/exproto/emqx_exproto_v_1_connection_adapter_client.erl -src/exproto/emqx_exproto_v_1_connection_handler_bhvr.erl -src/exproto/emqx_exproto_v_1_connection_handler_client.erl + diff --git a/apps/emqx_gateway/Makefile b/apps/emqx_gateway/Makefile deleted file mode 100644 index b2a54f7dd..000000000 --- a/apps/emqx_gateway/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -## shallow clone for speed - -REBAR_GIT_CLONE_OPTIONS += --depth 1 -export REBAR_GIT_CLONE_OPTIONS - -REBAR = rebar3 -all: compile - -compile: - $(REBAR) compile - -clean: distclean - -ct: - $(REBAR) as test ct -v - -eunit: - $(REBAR) as test eunit - -xref: - $(REBAR) xref - -cover: - $(REBAR) cover - -distclean: - @rm -rf _build - @rm -f data/app.*.config data/vm.*.args rebar.lock diff --git a/apps/emqx_gateway/README.md b/apps/emqx_gateway/README.md index be8f6cb35..ebab3a7a9 100644 --- a/apps/emqx_gateway/README.md +++ b/apps/emqx_gateway/README.md @@ -1,332 +1,83 @@ -# emqx_gateway +# Gateway -EMQX Gateway +EMQX Gateway is an application framework that manages all gateways within EMQX. -## Concept +It provides a set of standards to define how to implement a certain type of +protocol access on EMQX. For example: - EMQX Gateway Management - - Gateway-Registry (or Gateway Type) - - *Load - - *UnLoad - - *List +- Frame parsing +- Access authentication +- Publish and subscribe +- Configuration & Schema +- HTTP/CLI management interfaces - - Gateway - - *Create - - *Delete - - *Update - - *Stop-And-Start - - *Hot-Upgrade - - *Satrt/Enable - - *Stop/Disable - - Listener +The emqx_gateway application depends on `emqx`, `emqx_authn`, `emqx_authz`, `emqx_ctl` that +provide the foundation for protocol access. -## ROADMAP +More introduction: [Extended Protocol Gateway](https://www.emqx.io/docs/en/v5.0/gateway/gateway.html) -Gateway v0.1: "Basic Functionals" - - Management support - - Conn/Frame/Protocol Template - - Support Stomp/MQTT-SN/CoAP/LwM2M/ExProto +## Usage -Gateway v0.2: "Integration & Friendly Management" - - Hooks & Metrics & Statistic - - HTTP APIs - - Management in the cluster - - Integrate with AuthN - - Integrate with `emqx_config` - - Improve hocon config - - Mountpoint & ClientInfo's Metadata - - The Concept Review +This application is just a Framework, we provide some standard implementations, +such as [Stomp](../emqx_stomp/README.md), [MQTT-SN](../emqx_mqttsn/README.md), +[CoAP](../emqx_coap/README.md) and [LwM2M](../emqx_lwm2m/README.md) gateway. -Gateway v0.3: "Fault tolerance and high availability" - - A common session modoule for message delivery policy - - The restart mechanism for gateway-instance - - Consistency of cluster state - - Configuration hot update - -Gateway v1.0: "Best practices for each type of protocol" - - CoAP - - Stomp - - MQTT-SN - - LwM2M - -### Compatible with EMQX - -> Why we need to compatible - -1. Authentication -2. Hooks/Event system -3. Messages Mode & Rule Engine -4. Cluster registration -5. Metrics & Statistic - -> How to do it - -> - -### User Interface - -#### Configurations +These applications are all packaged by default in the EMQX distribution. If you +need to start a certain gateway, you only need to enable it via +Dashboard, HTTP API or emqx.conf file. +For instance, enable the Stomp gateway in emqx.conf: ```hocon -gateway { +gateway.stomp { - ## ... some confs for top scope - .. - ## End. + mountpoint = "stomp/" - ## Gateway Instances - - lwm2m[.name] { - - ## variable support - mountpoint: lwm2m/%e/ - - lifetime_min: 1s - lifetime_max: 86400s - #qmode_time_window: 22 - #auto_observe: off - - #update_msg_publish_condition: contains_object_list - - xml_dir: {{ platform_etc_dir }}/lwm2m_xml - - clientinfo_override: { - username: ${register.opts.uname} - password: ${register.opts.passwd} - clientid: ${epn} - } - - #authenticator: allow_anonymous - authenticator: [ - { - type: auth-http - method: post - //?? how to generate clientinfo ?? - params: $client.credential - } - ] - - translator: { - downlink: "dn/#" - uplink: { - notify: "up/notify" - response: "up/resp" - register: "up/resp" - update: "up/reps" - } - } - - %% ?? listener.$type.name ?? - listener.udp[.name] { - listen_on: 0.0.0.0:5683 - max_connections: 1024000 - max_conn_rate: 1000 - ## ?? udp keepalive in socket level ??? - #keepalive: - ## ?? udp proxy-protocol in socket level ??? - #proxy_protocol: on - #proxy_timeout: 30s - recbuf: 2KB - sndbuf: 2KB - buffer: 2KB - tune_buffer: off - #access: allow all - read_packets: 20 - } - - listener.dtls[.name] { - listen_on: 0.0.0.0:5684 - ... - } - } - - ## The CoAP Gateway - coap[.name] { - - #enable_stats: on - - authenticator: [ - ... - ] - - listener.udp[.name] { - ... - } - - listener.dtls[.name] { - ... - } -} - - ## The Stomp Gateway - stomp[.name] { - - allow_anonymous: true - - default_user.login: guest - default_user.passcode: guest - - frame.max_headers: 10 - frame.max_header_length: 1024 - frame.max_body_length: 8192 - - listener.tcp[.name] { - ... - } - - listener.ssl[.name] { - ... - } - } - - exproto[.name] { - - proto_name: DL-648 - - authenticators: [...] - - adapter: { - type: grpc - options: { - listen_on: 9100 - } - } - - handler: { - type: grpc - options: { - url: - } - } - - listener.tcp[.name] { - ... - } - } - - ## ============================ Enterpise gateways - - ## The JT/T 808 Gateway - jtt808[.name] { - - idle_timeout: 30s - enable_stats: on - max_packet_size: 8192 - - clientinfo_override: { - clientid: $phone - username: xxx - password: xxx - } - - authenticator: [ - { - type: auth-http - method: post - params: $clientinfo.credential - } - ] - - translator: { - subscribe: [jt808/%c/dn] - publish: [jt808/%c/up] - } - - listener.tcp[.name] { - ... - } - - listener.ssl[.name] { - ... - } - } - - gbt32960[.name] { - - frame.max_length: 8192 - retx_interval: 8s - retx_max_times: 3 - message_queue_len: 10 - - authenticators: [...] - - translator: { - ## upstream - login: gbt32960/${vin}/upstream/vlogin - logout: gbt32960/${vin}/upstream/vlogout - informing: gbt32960/${vin}/upstream/info - reinforming: gbt32960/${vin}/upstream/reinfo - ## downstream - downstream: gbt32960/${vin}/dnstream - response: gbt32960/${vin}/upstream/response - } - - listener.tcp[.name] { - ... - } - - listener.ssl[.name] { - ... - } - } - - privtcp[.name] { - - max_packet_size: 65535 - idle_timeout: 15s - - enable_stats: on - - force_gc_policy: 1000|1MB - force_shutdown_policy: 8000|800MB - - translator: { - up_topic: tcp/%c/up - dn_topic: tcp/%c/dn - } - - listener.tcp[.name]: { - ... - } + listeners.tcp.default { + bind = 61613 + acceptors = 16 + max_connections = 1024000 + max_conn_rate = 1000 } } ``` -#### CLI +## How to develop your Gateway application -##### Gateway +There are three ways to develop a Gateway application to accept your private protocol +clients. -```bash -## List all started gateway and gateway-instance -emqx_ctl gateway list -emqx_ctl gateway lookup -emqx_ctl gateway stop -emqx_ctl gateway start +### Raw Erlang Application -emqx_ctl gateway-registry re-searching -emqx_ctl gateway-registry list +This approach is the same as in EMQX 4.x. You need to implement an Erlang application, +which is packaged in EMQX as a Plugin or as a source code dependency. +In this approach, you do not need to respect any specifications of emqx_gateway, +and you can freely implement the features you need. -emqx_ctl gateway-clients list -emqx_ctl gateway-clients show -emqx_ctl gateway-clients kick -## Banned ?? -emqx_ctl gateway-banned +### Respect emqx_gateway framework -## Metrics -emqx_ctl gateway-metrics [] -``` +Similar to the first approach, you still need to implement an application using Erlang +and package it into EMQX. +The only difference is that you need to follow the standard behaviors(callbacks) provided +by emqx_gateway. -#### Management by HTTP-API/Dashboard/ +This is the approach we recommend. In this approach, your implementation can be managed +by the emqx_gateway framework, even if it may require you to understand more details about it. -#### How to integrate a protocol to your platform -### Develop your protocol gateway +### Use ExProto Gateway (Non-Erlang developers) -There are 3 way to create your protocol gateway for EMQX 5.0: +If you want to implement your gateway using other programming languages such as +Java, Python, Go, etc. -1. Use Erlang to create a new emqx plugin to handle all of protocol packets (same as v5.0 before) +You need to implement a gRPC service in the other programming language to parse +your device protocol and integrate it with EMQX. -2. Based on the emqx-gateway-impl-bhvr and emqx-gateway -3. Use the gRPC Gateway +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +## License + +See [LICENSE](../../APL.txt) diff --git a/apps/emqx_gateway/i18n/emqx_coap_api_i18n.conf b/apps/emqx_gateway/i18n/emqx_coap_api_i18n.conf deleted file mode 100644 index 77ca40c00..000000000 --- a/apps/emqx_gateway/i18n/emqx_coap_api_i18n.conf +++ /dev/null @@ -1,58 +0,0 @@ -emqx_coap_api { - - send_coap_request { - desc { - en: """Send a CoAP request message to the client""" - zh: """发送 CoAP 消息到指定客户端""" - } - } - - token { - desc { - en: """Message token, can be empty""" - zh: """消息 Token, 可以为空""" - } - } - - method { - desc { - en: """Request method type""" - zh: """请求 Method 类型""" - } - } - - timeout { - desc { - en: """Timespan for response""" - zh: """请求超时时间""" - } - } - - content_type { - desc { - en: """Payload type""" - zh: """Payload 类型""" - } - } - - payload { - desc { - en: """The content of the payload""" - zh: """Payload 内容""" - } - } - - message_id { - desc { - en: """Message ID""" - zh: """消息 ID""" - } - } - - response_code { - desc { - en: """Response code""" - zh: """应答码""" - } - } -} diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf deleted file mode 100644 index a9ae33f0c..000000000 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf +++ /dev/null @@ -1,99 +0,0 @@ -emqx_gateway_api_authn { - - get_authn { - desc { - en: """Gets the configuration of the specified gateway authenticator.
-Returns 404 when gateway or authentication is not enabled.""" - zh: """获取指定网关认证器的配置 -当网关或认证未启用时,返回 404。""" - } - } - - update_authn { - desc { - en: """Update the configuration of the specified gateway authenticator, or disable the authenticator.""" - zh: """更新指定网关认证器的配置,或停用认证器。""" - } - } - - add_authn { - desc { - en: """Enables the authenticator for client authentication for the specified gateway.
-When the authenticator is not configured or turned off, all client connections are assumed to be allowed.
-Note: Only one authenticator is allowed to be enabled at a time in the gateway, rather than allowing multiple authenticators to be configured to form an authentication chain as in MQTT.""" - zh: """为指定网关开启认证器实现客户端认证的功能。
-当未配置认证器或关闭认证器时,则认为允许所有客户端的连接。
-注:在网关中仅支持添加一个认证器,而不是像 MQTT 一样允许配置多个认证器构成认证链。""" - } - } - - delete_authn { - desc { - en: """Delete the authenticator of the specified gateway.""" - zh: """删除指定网关的认证器。""" - } - } - - list_users { - desc { - en: """Get the users for the authenticator (only supported by built_in_database).""" - zh: """获取用户列表(仅支持 built_in_database 类型的认证器)""" - } - } - - add_user { - desc { - en: """Add user for the authenticator (only supports built_in_database).""" - zh: """添加用户(仅支持 built_in_database 类型的认证器)""" - } - } - - get_user { - desc { - en: """Get user info from the gateway authenticator (only supports built_in_database)""" - zh: """获取用户信息(仅支持 built_in_database 类型的认证器)""" - } - } - - update_user { - desc { - en: """Update the user info for the gateway authenticator (only supports built_in_database)""" - zh: """更新用户信息(仅支持 built_in_database 类型的认证器)""" - } - } - - delete_user { - desc { - en: """Delete the user for the gateway authenticator (only supports built_in_database)""" - zh: """删除用户(仅支持 built_in_database 类型的认证器)""" - } - } - - import_users { - desc { - en: """Import users into the gateway authenticator (only supports built_in_database)""" - zh: """导入用户(仅支持 built_in_database 类型的认证器)""" - } - } - - user_id { - desc { - en: """User ID""" - zh: """用户 ID""" - } - } - - like_user_id { - desc { - en: """Fuzzy search using user ID (username or clientid), only supports search by substring.""" - zh: """使用用户 ID (username 或 clientid)模糊搜索,仅支持按子串的方式进行搜索。""" - } - } - - is_superuser { - desc { - en: """Is superuser""" - zh: """是否是超级用户""" - } - } -} diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_clients_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_clients_i18n.conf deleted file mode 100644 index 1e6f575c3..000000000 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_clients_i18n.conf +++ /dev/null @@ -1,478 +0,0 @@ -emqx_gateway_api_clients { - - list_clients { - desc { - en: """Get the gateway client list""" - zh: """获取指定网关的客户端列表""" - } - } - - get_client { - desc { - en: """Get the gateway client information""" - zh: """获取客户端信息""" - } - } - - kick_client { - desc { - en: """Kick out the gateway client""" - zh: """踢出指定客户端""" - } - } - - list_subscriptions { - desc { - en: """Get the gateway client subscriptions""" - zh: """获取某客户端的主题订阅列表""" - } - } - - add_subscription { - desc { - en: """Create a subscription membership""" - zh: """为某客户端新增订阅关系""" - } - } - - delete_subscription { - desc { - en: """Delete a subscriptions membership""" - zh: """为某客户端删除某订阅关系""" - } - } - - param_node { - desc { - en: """Match the client's node name""" - zh: """匹配客户端的节点名称""" - } - } - - param_clientid { - desc { - en: """Match the client's ID""" - zh: """匹配客户端 ID""" - } - } - - param_username { - desc { - en: """Match the client's Username""" - zh: """匹配客户端 Username""" - } - } - - param_ip_address { - desc { - en: """Match the client's ip address""" - zh: """匹配客户端 IP 地址""" - } - } - - param_conn_state { - desc { - en: """Match the client's connection state""" - zh: """匹配客户端连接状态""" - } - } - - param_proto_ver { - desc { - en: """Match the client's protocol version""" - zh: """匹配客户端协议版本""" - } - } - - param_clean_start { - desc { - en: """Match the client's clean start flag""" - zh: """匹配客户端 `clean_start` 标记""" - } - } - - param_like_clientid { - desc { - en: """Use sub-string to match client's ID""" - zh: """子串匹配客户端 ID""" - } - } - - param_like_username { - desc { - en: """Use sub-string to match client's username""" - zh: """子串匹配 客户端 Username""" - } - } - - param_gte_created_at { - desc { - en: """Match the session created datetime greater than a certain value""" - zh: """匹配会话创建时间大于等于指定值的客户端""" - } - } - - param_lte_created_at { - desc { - en: """Match the session created datetime less than a certain value""" - zh: """匹配会话创建时间小于等于指定值的客户端""" - } - } - - param_gte_connected_at{ - desc { - en: """Match the client socket connected datetime greater than a certain value""" - zh: """匹配连接创建时间大于等于指定值的客户端""" - } - } - - param_lte_connected_at { - desc { - en: """Match the client socket connected datatime less than a certain value""" - zh: """匹配连接创建时间小于等于指定值的客户端""" - } - } - - param_endpoint_name { - desc { - en: """Match the lwm2m client's endpoint name""" - zh: """匹配 LwM2M 客户端 Endpoint Name""" - } - } - - param_like_endpoint_name { - desc { - en: """Use sub-string to match lwm2m client's endpoint name""" - zh: """子串匹配 LwM2M 客户端 Endpoint Name""" - } - } - - param_gte_lifetime { - desc { - en: """Match the lwm2m client registered lifetime greater than a certain value""" - zh: """匹配心跳时间大于等于指定值的 LwM2M 客户端""" - } - } - - param_lte_lifetime { - desc { - en: """Match the lwm2m client registered lifetime less than a certain value""" - zh: """匹配心跳时间小于等于指定值的 LwM2M 客户端""" - } - } - - clientid { - desc { - en: """Client ID""" - zh: """客户端 ID""" - } - } - - topic { - desc { - en: """Topic Filter/Name""" - zh: """主题过滤器或主题名称""" - } - } - - endpoint_name { - desc { - en: """The LwM2M client endpoint name""" - zh: """LwM2M 客户端 Endpoint Name""" - } - } - - lifetime { - desc { - en: """LwM2M Life time""" - zh: """LwM2M 客户端心跳周期""" - } - } - - qos { - desc { - en: """QoS level, enum: 0, 1, 2""" - zh: """QoS 等级,枚举:0,1,2""" - } - } - - nl { - desc { - en: """No Local option, enum: 0, 1""" - zh: """No Local 选项,枚举:0,1""" - } - } - - rap { - desc { - en: """Retain as Published option, enum: 0, 1""" - zh: """Retain as Published 选项,枚举:0,1""" - } - } - - rh { - desc { - en: """Retain Handling option, enum: 0, 1, 2""" - zh: """Retain Handling 选项,枚举:0,1,2""" - } - } - - sub_props { - desc { - en: """Subscription properties""" - zh: """订阅属性""" - } - } - - subid { - desc { - en: """Only stomp protocol, a unique identity for the subscription. range: 1-65535.""" - zh: """订阅ID,仅用于 Stomp 网关。用于创建订阅关系时指定订阅 ID。取值范围 1-65535。""" - } - } - - node { - desc { - en: """Name of the node to which the client is connected""" - zh: """客户端连接到的节点名称""" - } - } - - username { - desc { - en: """Username of client when connecting""" - zh: """客户端连接的用户名""" - } - } - - mountpoint { - desc { - en: """Topic mountpoint""" - zh: """主题固定前缀""" - } - } - - proto_name { - desc { - en: """Client protocol name""" - zh: """客户端使用的协议名称""" - } - } - - proto_ver { - desc { - en: """Protocol version used by the client""" - zh: """客户端使用的协议版本""" - } - } - - ip_address { - desc { - en: """Client's IP address""" - zh: """客户端 IP 地址""" - } - } - - port { - desc { - en: """Client's port""" - zh: """客户端端口""" - } - } - - is_bridge { - desc { - en: """Indicates whether the client is connected via bridge""" - zh: """标识客户端是否通过 is_bridge 标志连接""" - } - } - - connected_at { - desc { - en: """Client connection time""" - zh: """客户端连接时间""" - } - } - - disconnected_at { - desc { - en: """Client offline time, This field is only valid and returned when connected is false""" - zh: """客户端连接断开时间""" - } - } - - connected { - desc { - en: """Whether the client is connected""" - zh: """标识客户端是否已连接到网关""" - } - } - - keepalive { - desc { - en: """Keepalive time, with the unit of second""" - zh: """Keepalive 时间,单位:秒""" - } - } - - clean_start { - desc { - en: """Indicate whether the client is using a brand new session""" - zh: """标识客户端是否以 clean_start 的标志连接到网关""" - } - } - - expiry_interval { - desc { - en: """Session expiration interval, with the unit of second""" - zh: """会话超期时间,单位:秒""" - } - } - - created_at { - desc { - en: """Session creation time""" - zh: """会话创建时间""" - } - } - - subscriptions_cnt { - desc { - en: """Number of subscriptions established by this client""" - zh: """客户端已订阅主题数""" - } - } - - subscriptions_max { - desc { - en: """Maximum number of subscriptions allowed by this client""" - zh: """客户端允许订阅的最大主题数""" - } - } - - inflight_cnt { - desc { - en: """Current length of inflight""" - zh: """客户端当前飞行窗口大小""" - } - } - - inflight_max { - desc { - en: """Maximum length of inflight""" - zh: """客户端允许的飞行窗口最大值""" - } - } - - mqueue_len { - desc { - en: """Current length of message queue""" - zh: """客户端当前消息队列长度""" - } - } - - mqueue_max { - desc { - en: """Maximum length of message queue""" - zh: """客户端允许的最大消息队列长度""" - } - } - - mqueue_dropped { - desc { - en: """Number of messages dropped by the message queue due to exceeding the length""" - zh: """由于消息队列过程,客户端消息队列丢弃消息条数""" - } - } - - awaiting_rel_cnt { - desc { - en: """Number of awaiting acknowledge packet""" - zh: """客户端当前等待 PUBREL 确认的 PUBREC 消息的条数""" - } - } - - awaiting_rel_max { - desc { - en: """Maximum allowed number of awaiting PUBREC packet""" - zh: """客户端允许的最大 PUBREC 等待队列长度""" - } - } - - recv_oct { - desc { - en: """Number of bytes received""" - zh: """已接收的字节数""" - } - } - - recv_cnt { - desc { - en: """Number of socket packets received""" - zh: """已接收 Socket 报文次数""" - } - } - - recv_pkt { - desc { - en: """Number of protocol packets received""" - zh: """已接收应用层协议控制报文数""" - } - } - - recv_msg { - desc { - en: """Number of message packets received""" - zh: """已接收上行的消息条数""" - } - } - - send_oct { - desc { - en: """Number of bytes sent""" - zh: """已发送字节数""" - } - } - - send_cnt { - desc { - en: """Number of socket packets sent""" - zh: """已发送 Socket 报文次数""" - } - } - - send_pkt { - desc { - en: """Number of protocol packets sent""" - zh: """已发送应用层协议控制报文数""" - } - } - - send_msg { - desc { - en: """Number of message packets sent""" - zh: """已发送下行消息数条数""" - } - } - - mailbox_len { - desc { - en: """Process mailbox size""" - zh: """进程邮箱大小""" - } - } - - heap_size { - desc { - en: """Process heap size with the unit of byte""" - zh: """进程堆内存大小,单位:字节""" - } - } - - reductions { - desc { - en: """Erlang reduction""" - zh: """进程已消耗 Reduction 数""" - } - } -} diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf deleted file mode 100644 index 197e6a5ed..000000000 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf +++ /dev/null @@ -1,168 +0,0 @@ -emqx_gateway_api { - - list_gateway { - desc { - en: """This API returns an overview info for the specified or all gateways. -including current running status, number of connections, listener status, etc.""" - zh: """该接口会返回指定或所有网关的概览状态, -包括当前状态、连接数、监听器状态等。""" - } - } - - enable_gateway { - desc { - en: """Enable a gateway by confs.""" - zh: """使用配置启动某一网关。""" - } - } - - get_gateway { - desc { - en: """Get the gateway configurations""" - zh: """获取网关配置详情""" - } - } - - delete_gateway { - desc { - en: """Unload the specified gateway""" - zh: """停用指定网关""" - } - } - - update_gateway { - desc { - en: """Update the gateway basic configurations and running status.
-Note: The Authentication and Listener configurations should be updated by other special APIs. """ - zh: """更新指定网关的基础配置、和启用的状态。
-注:认证、和监听器的配置更新需参考对应的 API 接口。""" - } - } - - gateway_name { - desc { - en: """Gateway Name""" - zh: """网关名称""" - } - } - - gateway_name_in_qs { - desc { - en: """Gateway Name.
-It's enum with `stomp`, `mqttsn`, `coap`, `lwm2m`, `exproto` -""" - zh: """网关名称.
-可取值为 `stomp`、`mqttsn`、`coap`、`lwm2m`、`exproto` -""" - } - } - - gateway_enable_in_path { - desc { - en: """Whether or not gateway is enabled""" - - zh: """是否开启此网关""" - } - } - - gateway_status { - desc { - en: """Gateway status""" - zh: """网关启用状态""" - } - } - - gateway_status_in_qs { - desc { - en: """Filter gateways by status.
-It is enum with `running`, `stopped`, `unloaded`""" - zh: """通过网关状态筛选
-可选值为 `running`、`stopped`、`unloaded`""" - } - } - - gateway_created_at { - desc { - en: """The Gateway created datetime""" - zh: """网关创建时间""" - } - } - - gateway_started_at { - desc { - en: """The Gateway started datetime""" - zh: """网关启用时间""" - } - } - - gateway_stopped_at { - desc { - en: """The Gateway stopped datetime""" - zh: """网关停用时间""" - } - } - - gateway_max_connections { - desc { - en: """The Gateway allowed maximum connections/clients""" - zh: """最大连接数""" - } - } - - gateway_current_connections { - desc { - en: """The Gateway current connected connections/clients""" - zh: """当前连接数""" - } - } - - gateway_listeners { - desc { - en: """The Gateway listeners overview""" - zh: """网关监听器列表""" - } - } - - gateway_listener_id { - desc { - en: """Listener ID""" - zh: """监听器 ID""" - } - } - - gateway_listener_name { - desc { - en: """Listener Name""" - zh: """监听器名称""" - } - } - - gateway_listener_running { - desc { - en: """Listener Running status""" - zh: """监听器运行状态""" - } - } - - gateway_listener_type { - desc { - en: """Listener Type""" - zh: """监听器类型""" - } - } - - gateway_node_status { - desc { - en: """The status of the gateway on each node in the cluster""" - zh: """网关在集群中每个节点上的状态""" - } - } - - node { - desc { - en: """Node Name""" - zh: """节点名称""" - } - } - -} diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf deleted file mode 100644 index dc14a7e01..000000000 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf +++ /dev/null @@ -1,146 +0,0 @@ -emqx_gateway_api_listeners { - - list_listeners { - desc { - en: """Gets a list of gateway listeners. This interface returns all the configs of the listener (including the authenticator on that listener), as well as the status of that listener running in the cluster.""" - zh: """获取网关监听器列表。该接口会返回监听器所有的配置(包括该监听器上的认证器),同时也会返回该监听器在集群中运行的状态。""" - } - } - - add_listener { - desc { - en: """Create the gateway listener.
-Note: For listener types not supported by a gateway, this API returns `400: BAD_REQUEST`.""" - zh: """为指定网关添加监听器。
-注:对于某网关不支持的监听器类型,该接口会返回 `400: BAD_REQUEST`。""" - } - } - - get_listener { - desc { - en: """Get the gateway listener configs""" - zh: """获取指定网关监听器的配置。""" - } - } - - delete_listener { - desc { - en: """Delete the gateway listener. All connected clients under the deleted listener will be disconnected.""" - zh: """删除指定监听器。被删除的监听器下所有已连接的客户端都会离线。""" - } - } - - update_listener { - desc { - en: """Update the gateway listener. The listener being updated performs a restart and all clients connected to that listener will be disconnected.""" - zh: """更新某网关监听器的配置。被更新的监听器会执行重启,所有已连接到该监听器上的客户端都会被断开。""" - } - } - - get_listener_authn { - desc { - en: """Get the listener's authenticator configs.""" - zh: """获取监听器的认证器配置。""" - } - } - - add_listener_authn { - desc { - en: """Enable authenticator for specified listener for client authentication.
-When authenticator is enabled for a listener, all clients connecting to that listener will use that authenticator for authentication.""" - zh: """为指定监听器开启认证器以实现客户端认证的能力。
-当某一监听器开启认证后,所有连接到该监听器的客户端会使用该认证器进行认证。""" - } - } - - update_listener_authn { - desc { - en: """Update authenticator configs for the listener, or disable/enable it.""" - zh: """更新指定监听器的认证器配置,或停用/启用该认证器。""" - } - } - - delete_listener_authn { - desc { - en: """Remove authenticator for the listener.""" - zh: """移除指定监听器的认证器。""" - } - } - - list_users { - desc { - en: """Get the users for the authenticator (only supported by built_in_database)""" - zh: """获取用户列表(仅支持 built_in_database 类型的认证器)""" - } - } - - add_user { - desc { - en: """Add user for the authenticator (only supports built_in_database)""" - zh: """添加用户(仅支持 built_in_database 类型的认证器)""" - } - } - - get_user { - desc { - en: """Get user info from the gateway authenticator (only supports built_in_database)""" - zh: """获取用户信息(仅支持 built_in_database 类型的认证器)""" - } - } - - update_user { - desc { - en: """Update the user info for the gateway authenticator (only supports built_in_database)""" - zh: """更新用户信息(仅支持 built_in_database 类型的认证器)""" - } - } - - delete_user { - desc { - en: """Delete the user for the gateway authenticator (only supports built_in_database)""" - zh: """删除用户(仅支持 built_in_database 类型的认证器)""" - } - } - - import_users { - desc { - en: """Import users into the gateway authenticator (only supports built_in_database)""" - zh: """导入用户(仅支持 built_in_database 类型的认证器)""" - } - } - - listener_id { - desc { - en: """Listener ID""" - zh: """监听器 ID""" - } - } - - listener_status { - desc { - en: """listener status """ - zh: """监听器状态""" - } - } - - listener_node_status { - desc { - en: """listener status of each node in the cluster""" - zh: """监听器在集群中每个节点上的状态""" - } - } - - node { - desc { - en: """Node Name""" - zh: """节点名称""" - } - } - - current_connections { - desc { - en: """Current Connections""" - zh: """当前连接数""" - } - } -} diff --git a/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf deleted file mode 100644 index a05fec5c4..000000000 --- a/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf +++ /dev/null @@ -1,628 +0,0 @@ -emqx_gateway_schema { - - stomp { - desc { - en: """The Stomp Gateway configuration. -This gateway supports v1.2/1.1/1.0""" - zh: """Stomp 网关配置。当前实现支持 v1.2/1.1/1.0 协议版本""" - } - } - - stom_frame_max_headers { - desc { - en: """The maximum number of Header""" - zh: """允许的 Header 最大数量""" - } - } - - stomp_frame_max_headers_length { - desc { - en: """The maximum string length of the Header Value""" - zh: """允许的 Header 字符串的最大长度""" - } - } - - stom_frame_max_body_length { - desc { - en: """Maximum number of bytes of Body allowed per Stomp packet""" - zh: """允许的 Stomp 报文 Body 的最大字节数""" - } - } - - mqttsn { - desc { - en: """The MQTT-SN Gateway configuration. -This gateway only supports the v1.2 protocol""" - zh: """MQTT-SN 网关配置。当前实现仅支持 v1.2 版本""" - } - } - - mqttsn_gateway_id { - desc { - en: """MQTT-SN Gateway ID. -When the broadcast option is enabled, the gateway will broadcast ADVERTISE message with this value""" - zh: """MQTT-SN 网关 ID。 -当 broadcast 打开时,MQTT-SN 网关会使用该 ID 来广播 ADVERTISE 消息""" - } - } - - mqttsn_broadcast { - desc { - en: """Whether to periodically broadcast ADVERTISE messages""" - zh: """是否周期性广播 ADVERTISE 消息 """ - } - } - - mqttsn_enable_qos3 { - desc { - en: """Allows connectionless clients to publish messages with a Qos of -1. -This feature is defined for very simple client implementations which do not support any other features except this one. There is no connection setup nor tear down, no registration nor subscription. The client just sends its 'PUBLISH' messages to a GW""" - zh: """是否允许无连接的客户端发送 QoS 等于 -1 的消息。 -该功能主要用于支持轻量的 MQTT-SN 客户端实现,它不会向网关建立连接,注册主题,也不会发起订阅;它只使用 QoS 为 -1 来发布消息""" - } - } - - mqttsn_subs_resume { - desc { - en: """Whether to initiate all subscribed topic name registration messages to the client after the Session has been taken over by a new channel""" - zh: """在会话被重用后,网关是否主动向客户端注册对已订阅主题名称""" - } - } - - mqttsn_predefined { - desc { - en: """The pre-defined topic IDs and topic names. -A 'pre-defined' topic ID is a topic ID whose mapping to a topic name is known in advance by both the client's application and the gateway""" - zh: """预定义主题列表。 -预定义的主题列表,是一组 主题 ID 和 主题名称 的映射关系。使用预先定义的主题列表,可以减少 MQTT-SN 客户端和网关对于固定主题的注册请求""" - } - } - - mqttsn_predefined_id { - desc { - en: """Topic ID. Range: 1-65535""" - zh: """主题 ID。范围:1-65535 """ - } - } - - mqttsn_predefined_topic { - desc { - en: """Topic Name""" - zh: """主题名称。注:不支持通配符""" - } - } - - coap { - desc { - en: """The CoAP Gateway configuration. -This gateway is implemented based on RFC-7252 and https://core-wg.github.io/coap-pubsub/draft-ietf-core-pubsub.html""" - zh: """CoAP 网关配置。 -该网关的实现基于 RFC-7252 和 https://core-wg.github.io/coap-pubsub/draft-ietf-core-pubsub.html""" - } - } - - coap_heartbeat { - desc { - en: """The gateway server required minimum heartbeat interval. -When connection mode is enabled, this parameter is used to set the minimum heartbeat interval for the connection to be alive""" - zh: """CoAP 网关要求客户端的最小心跳间隔时间。 -当 connection_required 开启后,该参数用于检查客户端连接是否存活""" - } - } - - coap_connection_required { - desc { - en: """Enable or disable connection mode. -Connection mode is a feature of non-standard protocols. When connection mode is enabled, it is necessary to maintain the creation, authentication and alive of connection resources""" - zh: """是否开启连接模式。 -连接模式是非标准协议的功能。它维护 CoAP 客户端上线、认证、和连接状态的保持""" - } - } - - coap_notify_type { - desc { - en: """The Notification Message will be delivered to the CoAP client if a new message received on an observed topic. -The type of delivered coap message can be set to: - - non: Non-confirmable; - - con: Confirmable; - - qos: Mapping from QoS type of received message, QoS0 -> non, QoS1,2 -> con -""" - zh: """投递给 CoAP 客户端的通知消息类型。当客户端 Observe 一个资源(或订阅某个主题)时,网关会向客户端推送新产生的消息。其消息类型可设置为: - - non: 不需要客户端返回确认消息; - - con: 需要客户端返回一个确认消息; - - qos: 取决于消息的 QoS 等级; QoS 0 会以 `non` 类型下发,QoS 1/2 会以 `con` 类型下发 -""" - } - } - - coap_subscribe_qos { - desc { - en: """The Default QoS Level indicator for subscribe request. -This option specifies the QoS level for the CoAP Client when establishing a subscription membership, if the subscribe request is not carried `qos` option. The indicator can be set to: - - qos0, qos1, qos2: Fixed default QoS level - - coap: Dynamic QoS level by the message type of subscribe request - * qos0: If the subscribe request is non-confirmable - * qos1: If the subscribe request is confirmable -""" - zh: """客户端订阅请求的默认 QoS 等级。 -当 CoAP 客户端发起订阅请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为: - - qos0、 qos1、qos2: 设置为固定的 QoS 等级 - - coap: 依据订阅操作的 CoAP 报文类型来动态决定 - * 当订阅请求为 `non-confirmable` 类型时,取值为 qos0 - * 当订阅请求为 `confirmable` 类型时,取值为 qos1 -""" - } - } - - coap_publish_qos { - desc { - en: """The Default QoS Level indicator for publish request. -This option specifies the QoS level for the CoAP Client when publishing a message to EMQX PUB/SUB system, if the publish request is not carried `qos` option. The indicator can be set to: - - qos0, qos1, qos2: Fixed default QoS level - - coap: Dynamic QoS level by the message type of publish request - * qos0: If the publish request is non-confirmable - * qos1: If the publish request is confirmable""" - - zh: """客户端发布请求的默认 QoS 等级。 -当 CoAP 客户端发起发布请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为: - - qos0、qos1、qos2: 设置为固定的 QoS 等级 - - coap: 依据发布操作的 CoAP 报文类型来动态决定 - * 当发布请求为 `non-confirmable` 类型时,取值为 qos0 - * 当发布请求为 `confirmable` 类型时,取值为 qos1 - """ - } - } - - lwm2m { - desc { - en: """The LwM2M Gateway configuration. This gateway only supports the v1.0.1 protocol.""" - zh: """LwM2M 网关配置。仅支持 v1.0.1 协议。""" - } - } - - lwm2m_xml_dir { - desc { - en: """The Directory for LwM2M Resource definition.""" - zh: """LwM2M Resource 定义的 XML 文件目录路径。""" - } - } - - lwm2m_lifetime_min { - desc { - en: """Minimum value of lifetime allowed to be set by the LwM2M client.""" - zh: """允许 LwM2M 客户端允许设置的心跳最小值。""" - } - } - - lwm2m_lifetime_max { - desc { - en: """Maximum value of lifetime allowed to be set by the LwM2M client.""" - zh: """允许 LwM2M 客户端允许设置的心跳最大值。""" - } - } - - lwm2m_qmode_time_window { - desc { - en: """The value of the time window during which the network link is considered valid by the LwM2M Gateway in QMode mode. -For example, after receiving an update message from a client, any messages within this time window are sent directly to the LwM2M client, and all messages beyond this time window are temporarily stored in memory.""" - - zh: """在QMode模式下,LwM2M网关认为网络链接有效的时间窗口的值。 -例如,在收到客户端的更新信息后,在这个时间窗口内的任何信息都会直接发送到LwM2M客户端,而超过这个时间窗口的所有信息都会暂时储存在内存中。""" - } - } - - lwm2m_auto_observe { - desc { - en: """Automatically observe the object list of REGISTER packet.""" - zh: """自动 Observe REGISTER 数据包的 Object 列表。""" - } - } - - lwm2m_update_msg_publish_condition { - desc { - en: """Policy for publishing UPDATE event message. - - always: send update events as long as the UPDATE request is received. - - contains_object_list: send update events only if the UPDATE request carries any Object List -""" - zh: """发布UPDATE事件消息的策略。 - - always: 只要收到 UPDATE 请求,就发送更新事件。 - - contains_object_list: 仅当 UPDATE 请求携带 Object 列表时才发送更新事件。 -""" - } - } - - lwm2m_translators { - desc { - en: """Topic configuration for LwM2M's gateway publishing and subscription.""" - zh: """LwM2M 网关订阅/发布消息的主题映射配置。""" - } - } - - lwm2m_translators_command { - desc { - en: """The topic for receiving downstream commands. -For each new LwM2M client that succeeds in going online, the gateway creates a subscription relationship to receive downstream commands and send it to the LwM2M client""" - - zh: """下行命令主题。 -对于每个成功上线的新 LwM2M 客户端,网关会创建一个订阅关系来接收下行消息并将其发送给客户端。""" - } - } - - lwm2m_translators_response { - desc { - en: """The topic for gateway to publish the acknowledge events from LwM2M client""" - zh: """用于网关发布来自 LwM2M 客户端的确认事件的主题。""" - } - } - - lwm2m_translators_notify { - desc { - en: """The topic for gateway to publish the notify events from LwM2M client. -After succeed observe a resource of LwM2M client, Gateway will send the notify events via this topic, if the client reports any resource changes""" - - zh: """用于发布来自 LwM2M 客户端的通知事件的主题。 -在成功 Observe 到 LwM2M 客户端的资源后,如果客户端报告任何资源状态的变化,网关将通过该主题发送通知事件。""" - } - } - - lwm2m_translators_register { - desc { - en: """The topic for gateway to publish the register events from LwM2M client.""" - zh: """用于发布来自 LwM2M 客户端的注册事件的主题。""" - } - } - - lwm2m_translators_update { - desc { - en: """The topic for gateway to publish the update events from LwM2M client""" - zh: """用于发布来自LwM2M客户端的更新事件的主题。""" - } - } - - translator { - desc { - en: """MQTT topic that corresponds to a particular type of event.""" - zh: """配置某网关客户端对于发布消息或订阅的主题和 QoS 等级。""" - } - } - - translator_topic { - desc { - en: """Topic Name""" - zh: """主题名称""" - } - } - - translator_qos { - desc { - en: """QoS Level""" - zh: """QoS 等级""" - } - } - - exproto { - desc { - en: """The Extension Protocol configuration""" - zh: """ExProto 网关""" - } - } - - exproto_server { - desc { - en: """Configurations for starting the ConnectionAdapter service""" - zh: """配置 ExProto 网关需要启动的 ConnectionAdapter 服务。 -该服务用于提供客户端的认证、发布、订阅和数据下行等功能。""" - } - } - - exproto_grpc_server_bind { - desc { - en: """Listening address and port for the gRPC server.""" - zh: """服务监听地址和端口。""" - } - } - - exproto_grpc_server_ssl { - desc { - en: """SSL configuration for the gRPC server.""" - zh: """服务 SSL 配置。""" - } - } - - exproto_handler { - desc { - en: """Configurations for request to ConnectionHandler service""" - zh: """配置 ExProto 网关需要请求的 ConnectionHandler 服务地址。 -该服务用于给 ExProto 提供客户端的 Socket 事件处理、字节解码、订阅消息接收等功能。""" - } - } - - exproto_grpc_handler_address { - desc { - en: """gRPC server address.""" - zh: """对端 gRPC 服务器地址。""" - } - } - - exproto_grpc_handler_ssl { - desc { - en: """SSL configuration for the gRPC client.""" - zh: """gRPC 客户端的 SSL 配置。""" - } - } - - gateway_common_enable { - desc { - en: """Whether to enable this gateway""" - zh: """是否启用该网关""" - } - } - - gateway_common_enable_stats { - desc { - en: """Whether to enable client process statistic""" - zh: """是否开启客户端统计""" - } - } - - gateway_common_idle_timeout { - desc { - en: """The idle time of the client connection process. It has two purposes: - 1. A newly created client process that does not receive any client requests after that time will be closed directly. - 2. A running client process that does not receive any client requests after this time will go into hibernation to save resources.""" - zh: """客户端连接过程的空闲时间。该配置用于: - 1. 一个新创建的客户端进程如果在该时间间隔内没有收到任何客户端请求,将被直接关闭。 - 2. 一个正在运行的客户进程如果在这段时间后没有收到任何客户请求,将进入休眠状态以节省资源。""" - } - } - - gateway_common_mountpoint { - desc { - en: """ """ - zh: """ """ - } - } - - gateway_common_clientinfo_override { - desc { - en: """ClientInfo override.""" - zh: """ClientInfo 重写。""" - } - } - - gateway_common_clientinfo_override_username { - desc { - en: """Template for overriding username.""" - zh: """username 重写模板""" - } - } - gateway_common_clientinfo_override_password { - desc { - en: """Template for overriding password.""" - zh: """password 重写模板""" - } - } - gateway_common_clientinfo_override_clientid { - desc { - en: """Template for overriding clientid.""" - zh: """clientid 重写模板""" - } - } - - gateway_common_authentication { - desc { - en: """Default authentication configs for all the gateway listeners. For per-listener overrides see authentication\n in listener configs""" - zh: """网关的认证器配置,对该网关下所以的监听器生效。如果每个监听器需要配置不同的认证器,需要配置监听器下的 authentication 字段。""" - } - } - - tcp_udp_listeners { - desc { - en: """Settings for the listeners.""" - zh: """监听器配置。""" - } - } - - tcp_listeners { - desc { - en: """Settings for the TCP listeners.""" - zh: """配置 TCP 类型的监听器。""" - } - } - - udp_listeners { - desc { - en: """Settings for the UDP listeners.""" - zh: """配置 UDP 类型的监听器。""" - } - } - - tcp_listener { - desc { - en: """ """ - zh: """ """ - } - } - - tcp_listener_acceptors { - desc { - en: """Size of the acceptor pool.""" - zh: """Acceptor 进程池大小。""" - } - } - - tcp_listener_tcp_opts{ - desc { - en: """Setting the TCP socket options.""" - zh: """TCP Socket 配置。""" - } - } - - tcp_listener_proxy_protocol { - desc { - en: """Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx. -See: https://www.haproxy.com/blog/haproxy/proxy-protocol/""" - zh: """是否开启 Proxy Protocol V1/2。当 EMQX 集群部署在 HAProxy 或 Nginx 后需要获取客户端真实 IP 时常用到该选项。参考:https://www.haproxy.com/blog/haproxy/proxy-protocol/""" - } - } - - tcp_listener_proxy_protocol_timeout { - desc { - en: """Timeout for proxy protocol. -EMQX will close the TCP connection if proxy protocol packet is not received within the timeout.""" - zh: """接收 Proxy Protocol 报文头的超时时间。如果在超时内没有收到 Proxy Protocol 包,EMQX 将关闭 TCP 连接。""" - } - } - - ssl_listener { - desc { - en: """ """ - zh: """ """ - } - } - - ssl_listener_options { - desc { - en: """SSL Socket options.""" - zh: """SSL Socket 配置。""" - } - } - - udp_listener { - desc { - en: """ """ - zh: """ """ - } - } - - udp_listener_udp_opts { - desc { - en: """Settings for the UDP sockets.""" - zh: """UDP Socket 配置。""" - } - } - - udp_listener_active_n { - desc { - en: """Specify the {active, N} option for the socket. -See: https://erlang.org/doc/man/inet.html#setopts-2""" - zh: """为 Socket 指定 {active, N} 选项。 -参见:https://erlang.org/doc/man/inet.html#setopts-2""" - } - } - - udp_listener_recbuf { - desc { - en: """Size of the kernel-space receive buffer for the socket.""" - zh: """Socket 在内核空间接收缓冲区的大小。""" - } - } - - udp_listener_sndbuf { - desc { - en: """Size of the kernel-space send buffer for the socket.""" - zh: """Socket 在内核空间发送缓冲区的大小。""" - } - } - - udp_listener_buffer { - desc { - en: """Size of the user-space buffer for the socket.""" - zh: """Socket 在用户空间的缓冲区大小。""" - } - } - - udp_listener_reuseaddr { - desc { - en: """Allow local reuse of port numbers.""" - zh: """允许重用本地处于 TIME_WAIT 的端口号。""" - } - } - - dtls_listener { - desc { - en: """ """ - zh: """ """ - } - } - - dtls_listener_acceptors { - desc { - en: """Size of the acceptor pool.""" - zh: """Acceptor 进程池大小。""" - } - } - - dtls_listener_dtls_opts { - desc { - en: """DTLS socket options""" - zh: """DTLS Socket 配置""" - } - - } - - gateway_common_listener_enable { - desc { - en: """Enable the listener.""" - zh: """是否启用该监听器。""" - } - } - - gateway_common_listener_bind { - desc { - en: """The IP address and port that the listener will bind.""" - zh: """监听器绑定的 IP 地址或端口。""" - } - } - - gateway_common_listener_max_connections { - desc { - en: """Maximum number of concurrent connections.""" - zh: """监听器支持的最大连接数。""" - } - } - - gateway_common_listener_max_conn_rate { - desc { - en: """Maximum connections per second.""" - zh: """监听器支持的最大连接速率。""" - } - } - - gateway_common_listener_enable_authn { - desc { - en: """Set true (default) to enable client authentication on this listener. -When set to false clients will be allowed to connect without authentication.""" - zh: """配置 true (默认值)启用客户端进行身份认证。 -配置 false 时,将不对客户端做任何认证。""" - } - } - - gateway_common_listener_mountpoint { - desc { - en: """When publishing or subscribing, prefix all topics with a mountpoint string. -The prefixed string will be removed from the topic name when the message is delivered to the subscriber. The mountpoint is a way that users can use to implement isolation of message routing between different listeners. -For example if a client A subscribes to `t` with `listeners.tcp.\.mountpoint` set to `some_tenant`, then the client actually subscribes to the topic `some_tenant/t`. Similarly, if another client B (connected to the same listener as the client A) sends a message to topic `t`, the message is routed to all the clients subscribed `some_tenant/t`, so client A will receive the message, with topic name `t`. Set to `\"\"` to disable the feature. -Variables in mountpoint string: - - ${clientid}: clientid - - ${username}: username -""" - zh: """发布或订阅时,在所有主题前增加前缀字符串。 -当消息投递给订阅者时,前缀字符串将从主题名称中删除。挂载点是用户可以用来实现不同监听器之间的消息路由隔离的一种方式。 -例如,如果客户端 A 在 `listeners.tcp.\.mountpoint` 设置为 `some_tenant` 的情况下订阅 `t`,则客户端实际上订阅了 `some_tenant/t` 主题。 类似地,如果另一个客户端 B(连接到与客户端 A 相同的侦听器)向主题 `t` 发送消息,则该消息被路由到所有订阅了 `some_tenant/t` 的客户端,因此客户端 A 将收到该消息,带有 主题名称`t`。 设置为 `\"\"` 以禁用该功能。 -挂载点字符串中可用的变量: - - ${clientid}:clientid - - ${username}:用户名 -""" - } - } - - gateway_common_listener_access_rules { - desc { - en: """The access control rules for this listener. -See: https://github.com/emqtt/esockd#allowdeny""" - zh: """配置监听器的访问控制规则。 -见:https://github.com/emqtt/esockd#allowdeny""" - } - } -} diff --git a/apps/emqx_gateway/i18n/emqx_lwm2m_api_i18n.conf b/apps/emqx_gateway/i18n/emqx_lwm2m_api_i18n.conf deleted file mode 100644 index 9cd7e27c0..000000000 --- a/apps/emqx_gateway/i18n/emqx_lwm2m_api_i18n.conf +++ /dev/null @@ -1,58 +0,0 @@ -emqx_lwm2m_api { - - lookup_resource { - desc { - en: """Look up a resource""" - zh: """查看指定资源状态""" - } - } - - observe_resource { - desc { - en: """Observe or Cancel observe a resource""" - zh: """Observe/Un-Observe 指定资源""" - } - } - - read_resource { - desc { - en: """Send a read command to a resource""" - zh: """发送读指令到某资源""" - } - } - - write_resource { - desc { - en: """Send a write command to a resource""" - zh: """发送写指令到某资源""" - } - } - - operations { - desc { - en: """Resource Operations""" - zh: """资源可用操作列表""" - } - } - - dataType { - desc { - en: """Data Type""" - zh: """数据类型""" - } - } - - path { - desc { - en: """Resource Path""" - zh: """资源路径""" - } - } - - name { - desc { - en: """Resource Name""" - zh: """资源名称""" - } - } -} diff --git a/apps/emqx_gateway/include/emqx_gateway.hrl b/apps/emqx_gateway/include/emqx_gateway.hrl index 3466ecd98..c880aca26 100644 --- a/apps/emqx_gateway/include/emqx_gateway.hrl +++ b/apps/emqx_gateway/include/emqx_gateway.hrl @@ -37,4 +37,11 @@ config => emqx_config:config() }. +-type gateway_def() :: + #{ + name := gateway_name(), + callback_module := module(), + config_schema_module := module() + }. + -endif. diff --git a/apps/emqx_gateway/rebar.config b/apps/emqx_gateway/rebar.config index 272783758..2340a2dd8 100644 --- a/apps/emqx_gateway/rebar.config +++ b/apps/emqx_gateway/rebar.config @@ -1,38 +1,6 @@ %% -*- mode: erlang -*- - {erl_opts, [debug_info]}. {deps, [ - {emqx, {path, "../emqx"}} + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} ]}. - -{plugins, [ - {grpc_plugin, {git, "https://github.com/HJianBo/grpc_plugin", {tag, "v0.10.2"}}} -]}. - -{grpc, [ - {protos, ["src/exproto/protos"]}, - {out_dir, "src/exproto/"}, - {gpb_opts, [ - {module_name_prefix, "emqx_"}, - {module_name_suffix, "_pb"} - ]} -]}. - -{provider_hooks, [ - {pre, [ - {compile, {grpc, gen}}, - {clean, {grpc, clean}} - ]} -]}. - -{xref_ignores, [emqx_exproto_pb]}. - -{cover_excl_mods, [ - emqx_exproto_pb, - emqx_exproto_v_1_connection_adapter_client, - emqx_exproto_v_1_connection_adapter_bhvr, - emqx_exproto_v_1_connection_handler_client, - emqx_exproto_v_1_connection_handler_bhvr -]}. - -{project_plugins, [erlfmt]}. diff --git a/apps/emqx_gateway/src/bhvrs/emqx_gateway_conn.erl b/apps/emqx_gateway/src/bhvrs/emqx_gateway_conn.erl index 7f37061ac..4145a92a7 100644 --- a/apps/emqx_gateway/src/bhvrs/emqx_gateway_conn.erl +++ b/apps/emqx_gateway/src/bhvrs/emqx_gateway_conn.erl @@ -173,7 +173,7 @@ stats(#state{ end, ConnStats = emqx_pd:get_counters(?CONN_STATS), ChanStats = ChannMod:stats(Channel), - ProcStats = emqx_misc:proc_stats(), + ProcStats = emqx_utils:proc_stats(), lists:append([SockStats, ConnStats, ChanStats, ProcStats]). call(Pid, Req) -> @@ -297,7 +297,7 @@ init_state(WrappedSock, Peername, Options, FrameMod, ChannMod) -> StatsTimer = emqx_gateway_utils:stats_timer(Options), IdleTimeout = emqx_gateway_utils:idle_timeout(Options), OomPolicy = emqx_gateway_utils:oom_policy(Options), - IdleTimer = emqx_misc:start_timer(IdleTimeout, idle_timeout), + IdleTimer = emqx_utils:start_timer(IdleTimeout, idle_timeout), #state{ socket = WrappedSock, peername = Peername, @@ -327,7 +327,7 @@ run_loop( } ) -> emqx_logger:set_metadata_peername(esockd:format(Peername)), - _ = emqx_misc:tune_heap_size(OomPolicy), + _ = emqx_utils:tune_heap_size(OomPolicy), case activate_socket(State) of {ok, NState} -> hibernate(Parent, NState); @@ -383,14 +383,14 @@ wakeup_from_hib(Parent, State) -> %% Ensure/cancel stats timer ensure_stats_timer(Timeout, State = #state{stats_timer = undefined}) -> - State#state{stats_timer = emqx_misc:start_timer(Timeout, emit_stats)}; + State#state{stats_timer = emqx_utils:start_timer(Timeout, emit_stats)}; ensure_stats_timer(_Timeout, State) -> State. cancel_stats_timer(State = #state{stats_timer = TRef}) when is_reference(TRef) -> - ok = emqx_misc:cancel_timer(TRef), + ok = emqx_utils:cancel_timer(TRef), State#state{stats_timer = undefined}; cancel_stats_timer(State) -> State. @@ -471,7 +471,7 @@ handle_msg( State = #state{idle_timer = IdleTimer} ) -> IdleTimer /= undefined andalso - emqx_misc:cancel_timer(IdleTimer), + emqx_utils:cancel_timer(IdleTimer), NState = State#state{idle_timer = undefined}, handle_incoming(Packet, NState); handle_msg({outgoing, Data}, State) -> @@ -501,7 +501,7 @@ handle_msg( Deliver = {deliver, _Topic, _Msg}, State = #state{active_n = ActiveN} ) -> - Delivers = [Deliver | emqx_misc:drain_deliver(ActiveN)], + Delivers = [Deliver | emqx_utils:drain_deliver(ActiveN)], with_channel(handle_deliver, [Delivers], State); %% Something sent %% TODO: Who will deliver this message? @@ -904,7 +904,7 @@ handle_info(Info, State) -> %% msg => "reach_rate_limit", %% pause => Time %% }), -%% TRef = emqx_misc:start_timer(Time, limit_timeout), +%% TRef = emqx_utils:start_timer(Time, limit_timeout), %% State#state{ %% sockstate = blocked, %% limiter = Limiter1, @@ -928,7 +928,7 @@ run_gc(Stats, State = #state{gc_state = GcSt}) -> end. check_oom(State = #state{oom_policy = OomPolicy}) -> - case ?ENABLED(OomPolicy) andalso emqx_misc:check_oom(OomPolicy) of + case ?ENABLED(OomPolicy) andalso emqx_utils:check_oom(OomPolicy) of {shutdown, Reason} -> %% triggers terminate/2 callback immediately erlang:exit({shutdown, Reason}); diff --git a/apps/emqx_gateway/src/coap/README.md b/apps/emqx_gateway/src/coap/README.md deleted file mode 100644 index 045db529d..000000000 --- a/apps/emqx_gateway/src/coap/README.md +++ /dev/null @@ -1,443 +0,0 @@ - -# Table of Contents - -1. [EMQX 5.0 CoAP Gateway](#org61e5bb8) - 1. [Features](#orgeddbc94) - 1. [PubSub Handler](#orgfc7be2d) - 2. [MQTT Handler](#org55be508) - 3. [Heartbeat](#org3d1a32e) - 4. [Query String](#org9a6b996) - 2. [Implementation](#org9985dfe) - 1. [Request/Response flow](#orge94210c) - 3. [Example](#ref_example) - - - - - -# EMQX 5.0 CoAP Gateway - -emqx-coap is a CoAP Gateway for EMQX. It translates CoAP messages into MQTT messages and make it possible to communiate between CoAP clients and MQTT clients. - - - - -## Features - -- Partially achieves [Publish-Subscribe Broker for the Constrained Application Protocol (CoAP)](https://datatracker.ietf.org/doc/html/draft-ietf-core-coap-pubsub-09) - we called this as ps handler, include following functions: - - Publish - - Subscribe - - UnSubscribe -- Long connection and authorization verification called as MQTT handler - - - - -### PubSub Handler - -1. Publish - - Method: POST\ - URI Schema: ps/{+topic}{?q\*}\ - q\*: [Shared Options](#orgc50043b)\ - Response: - - - 2.04 "Changed" when success - - 4.00 "Bad Request" when error - - 4.01 "Unauthorized" when with wrong auth uri query - -2. Subscribe - - Method: GET - Options: - - - Observer = 0 - - URI Schema: ps/{+topic}{?q\*}\ - q\*: see [Shared Options](#orgc50043b)\ - Response: - - - 2.05 "Content" when success - - 4.00 "Bad Request" when error - - 4.01 "Unauthorized" when with wrong auth uri query - -``` - Client1 Client2 Broker - | | Subscribe | - | | ----- GET /ps/topic1 Observe:0 Token:XX ----> | - | | | - | | <---------- 2.05 Content Observe:10---------- | - | | | - | | | - | | Publish | - | ---------|----------- PUT /ps/topic1 "1033.3" --------> | - | | Notify | - | | <---------- 2.05 Content Observe:11 --------- | - | | | -``` - -3. UnSubscribe - - Method : GET - Options: - - - Observe = 1 - - URI Schema: ps/{+topic}{?q\*}\ - q\*: see [Shared Options](#orgc50043b)\ - Response: - - - 2.07 "No Content" when success - - 4.00 "Bad Request" when error - - 4.01 "Unauthorized" when with wrong auth uri query - - - - -### MQTT Handler - - Establishing a connection is optional. If the CoAP client needs to use connection-based operations, it must first establish a connection. -At the same time, the connectionless mode and the connected mode cannot be mixed. -In connection mode, the Publish/Subscribe/UnSubscribe sent by the client must be has Token and ClientId in query string. -If the Token and Clientid is wrong/miss, EMQX will reset the request. -The communication token is the data carried in the response payload after the client successfully establishes a connection. -After obtaining the token, the client's subsequent request must attach "token=Token" to the Query String -ClientId is necessary when there is a connection, and is a unique identifier defined by the client. -The server manages the client through the ClientId. If the ClientId is wrong, EMQX will reset the request. - -1. Create a Connection - - Method: POST - URI Schema: mqtt/connection{?q\*} - q\*: - - - clientid := client uid - - username - - password - - Response: - - - 2.01 "Created" when success - - 4.00 "Bad Request" when error - - 4.01 "Unauthorized" wrong username or password - - Payload: Token if success - -2. Close a Connection - - Method : DELETE - URI Schema: mqtt/connection{?q\*} - q\*: - - - clientid := client uid - - token - - Response: - - - 2.01 "Deleted" when success - - 4.00 "Bad Request" when error - - 4.01 "Unauthorized" wrong clientid or token - - - - -### Heartbeat - -The Coap client can maintain the "connection" with the server through the heartbeat, -regardless of whether it is authenticated or not, -so that the server will not release related resources -Method : PUT -URI Schema: mqtt/connection{?q\*} -q\*: - -- clientid if authenticated -- token if authenticated - -Response: - -- 2.01 "Changed" when success -- 4.00 "Bad Request" when error -- 4.01 "Unauthorized" wrong clientid or token - - - - -### Query String - -CoAP gateway uses some options in query string to conversion between MQTT CoAP. - -1. Shared Options - - - clientid - - token - -2. Connect Options - - - username - - password - -3. Publish - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionTypeDefault
retainbooleanfalse
qosMQTT QosSee here
expiryMessage Expiry Interval0(Never expiry)
- -4. Subscribe - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionTypeDefault
qosMQTT QosSee here
nlMQTT Subscribe No Local0
rhMQTT Subscribe Retain Handing0
- -5. MQTT Qos <=> CoAP non/con - - 1.notif_type - Control the type of notify messages when the observed object has changed.Can be: - - - non - - con - - qos - in this value, MQTT Qos0 -> non, Qos1/Qos2 -> con - - 2.subscribe_qos - Control the qos of subscribe.Can be: - - - qos0 - - qos1 - - qos2 - - coap - in this value, CoAP non -> qos0, con -> qos1 - - 3.publish_qos - like subscribe_qos, but control the qos of the publish MQTT message - - - - -## Implementation - - - - -### Request/Response flow - -![img](./doc/flow.png) - -1. Authorization check - - Check whether the clientid and token in the query string match the current connection - -2. Session - - Manager the "Transport Manager" "Observe Resources Manager" and next message id - -3. Transport Mnager - - Manager "Transport" create/close/dispatch - -4. Observe resources Mnager - - Mnager observe topic and token - -5. Transport - - ![img](./doc/transport.png) - - 1. Shared State - - ![img](./doc/shared_state.png) - -6. Handler - - 1. pubsub - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
MethodObserveAction
GET0subscribe and reply result
GET1unsubscribe and reply result
POSTXpublish and reply result
- - 2. mqtt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
MethodAction
PUTreply result
POSTreturn create connection action
DELETEreturn close connection action
- - - -## Example -1. Create Connection -``` -coap-client -m post -e "" "coap://127.0.0.1/mqtt/connection?clientid=123&username=admin&password=public" -``` -Server will return token **X** in payload - -2. Update Connection -``` -coap-client -m put -e "" "coap://127.0.0.1/mqtt/connection?clientid=123&token=X" -``` - -3. Publish -``` -coap-client -m post -e "Hellow" "obstoken" "coap://127.0.0.1/ps/coap/test?clientid=123&username=admin&password=public" -``` -if you want to publish with auth, you must first establish a connection, and then post publish request on the same socket, so libcoap client can't simulation publish with a token - -``` -coap-client -m post -e "Hellow" "coap://127.0.0.1/ps/coap/test?clientid=123&token=X" -``` - -4. Subscribe -``` -coap-client -m get -s 60 -O 6,0x00 -o - -T "obstoken" "coap://127.0.0.1/ps/coap/test?clientid=123&username=admin&password=public" -``` -**Or** - -``` -coap-client -m get -s 60 -O 6,0x00 -o - -T "obstoken" "coap://127.0.0.1/ps/coap/test?clientid=123&token=X" -``` -5. Close Connection -``` -coap-client -m delete -e "" "coap://127.0.0.1/mqtt/connection?clientid=123&token=X -``` - diff --git a/apps/emqx_gateway/src/emqx_gateway.app.src b/apps/emqx_gateway/src/emqx_gateway.app.src index 53403a67a..2ffca464d 100644 --- a/apps/emqx_gateway/src/emqx_gateway.app.src +++ b/apps/emqx_gateway/src/emqx_gateway.app.src @@ -1,10 +1,10 @@ %% -*- mode: erlang -*- {application, emqx_gateway, [ {description, "The Gateway management application"}, - {vsn, "0.1.10"}, + {vsn, "0.1.16"}, {registered, []}, {mod, {emqx_gateway_app, []}}, - {applications, [kernel, stdlib, grpc, emqx, emqx_authn]}, + {applications, [kernel, stdlib, emqx, emqx_authn, emqx_ctl]}, {env, []}, {modules, []}, {licenses, ["Apache 2.0"]}, diff --git a/apps/emqx_gateway/src/emqx_gateway_api.erl b/apps/emqx_gateway/src/emqx_gateway_api.erl index 1c43340e2..bc44daca8 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api.erl @@ -180,7 +180,7 @@ schema("/gateways") -> #{ tags => ?TAGS, desc => ?DESC(list_gateway), - summary => <<"List All Gateways">>, + summary => <<"List all gateways">>, parameters => params_gateway_status_in_qs(), responses => #{ @@ -201,7 +201,7 @@ schema("/gateways/:name") -> #{ tags => ?TAGS, desc => ?DESC(get_gateway), - summary => <<"Get the Gateway">>, + summary => <<"Get gateway">>, parameters => params_gateway_name_in_path(), responses => #{ @@ -395,7 +395,7 @@ fields(Gw) when Gw == exproto -> [{name, mk(Gw, #{desc => ?DESC(gateway_name)})}] ++ - convert_listener_struct(emqx_gateway_schema:fields(Gw)); + convert_listener_struct(emqx_gateway_schema:gateway_schema(Gw)); fields(Gw) when Gw == update_stomp; Gw == update_mqttsn; @@ -405,7 +405,7 @@ fields(Gw) when -> "update_" ++ GwStr = atom_to_list(Gw), Gw1 = list_to_existing_atom(GwStr), - remove_listener_and_authn(emqx_gateway_schema:fields(Gw1)); + remove_listener_and_authn(emqx_gateway_schema:gateway_schema(Gw1)); fields(Listener) when Listener == tcp_listener; Listener == ssl_listener; @@ -608,7 +608,7 @@ examples_gateway_confs() -> #{ stomp_gateway => #{ - summary => <<"A simple STOMP gateway configs">>, + summary => <<"A simple STOMP gateway config">>, value => #{ enable => true, @@ -636,7 +636,7 @@ examples_gateway_confs() -> }, mqttsn_gateway => #{ - summary => <<"A simple MQTT-SN gateway configs">>, + summary => <<"A simple MQTT-SN gateway config">>, value => #{ enable => true, @@ -672,7 +672,7 @@ examples_gateway_confs() -> }, coap_gateway => #{ - summary => <<"A simple CoAP gateway configs">>, + summary => <<"A simple CoAP gateway config">>, value => #{ enable => true, @@ -699,7 +699,7 @@ examples_gateway_confs() -> }, lwm2m_gateway => #{ - summary => <<"A simple LwM2M gateway configs">>, + summary => <<"A simple LwM2M gateway config">>, value => #{ enable => true, @@ -735,7 +735,7 @@ examples_gateway_confs() -> }, exproto_gateway => #{ - summary => <<"A simple ExProto gateway configs">>, + summary => <<"A simple ExProto gateway config">>, value => #{ enable => true, @@ -765,7 +765,7 @@ examples_update_gateway_confs() -> #{ stomp_gateway => #{ - summary => <<"A simple STOMP gateway configs">>, + summary => <<"A simple STOMP gateway config">>, value => #{ enable => true, @@ -782,7 +782,7 @@ examples_update_gateway_confs() -> }, mqttsn_gateway => #{ - summary => <<"A simple MQTT-SN gateway configs">>, + summary => <<"A simple MQTT-SN gateway config">>, value => #{ enable => true, @@ -803,7 +803,7 @@ examples_update_gateway_confs() -> }, coap_gateway => #{ - summary => <<"A simple CoAP gateway configs">>, + summary => <<"A simple CoAP gateway config">>, value => #{ enable => true, @@ -819,7 +819,7 @@ examples_update_gateway_confs() -> }, lwm2m_gateway => #{ - summary => <<"A simple LwM2M gateway configs">>, + summary => <<"A simple LwM2M gateway config">>, value => #{ enable => true, @@ -844,7 +844,7 @@ examples_update_gateway_confs() -> }, exproto_gateway => #{ - summary => <<"A simple ExProto gateway configs">>, + summary => <<"A simple ExProto gateway config">>, value => #{ enable => true, diff --git a/apps/emqx_gateway/src/emqx_gateway_api_authn.erl b/apps/emqx_gateway/src/emqx_gateway_api_authn.erl index f52b26cd2..41b1b11d5 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_authn.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_authn.erl @@ -185,13 +185,13 @@ schema("/gateways/:name/authentication") -> #{ tags => ?TAGS, desc => ?DESC(get_authn), - summary => <<"Get Authenticator Configuration">>, + summary => <<"Get authenticator configuration">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP( #{ 200 => schema_authn(), - 204 => <<"Authenticator doesn't initiated">> + 204 => <<"Authenticator not initialized">> } ) }, @@ -199,7 +199,7 @@ schema("/gateways/:name/authentication") -> #{ tags => ?TAGS, desc => ?DESC(update_authn), - summary => <<"Update Authenticator Configuration">>, + summary => <<"Update authenticator configuration">>, parameters => params_gateway_name_in_path(), 'requestBody' => schema_authn(), responses => @@ -209,7 +209,7 @@ schema("/gateways/:name/authentication") -> #{ tags => ?TAGS, desc => ?DESC(add_authn), - summary => <<"Create an Authenticator for a Gateway">>, + summary => <<"Create authenticator for gateway">>, parameters => params_gateway_name_in_path(), 'requestBody' => schema_authn(), responses => @@ -219,7 +219,7 @@ schema("/gateways/:name/authentication") -> #{ tags => ?TAGS, desc => ?DESC(delete_authn), - summary => <<"Delete the Gateway Authenticator">>, + summary => <<"Delete gateway authenticator">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP(#{204 => <<"Deleted">>}) @@ -232,7 +232,7 @@ schema("/gateways/:name/authentication/users") -> #{ tags => ?TAGS, desc => ?DESC(list_users), - summary => <<"List users for a Gateway Authenticator">>, + summary => <<"List users for gateway authenticator">>, parameters => params_gateway_name_in_path() ++ params_paging_in_qs() ++ params_fuzzy_in_qs(), @@ -250,7 +250,7 @@ schema("/gateways/:name/authentication/users") -> #{ tags => ?TAGS, desc => ?DESC(add_user), - summary => <<"Add User for a Gateway Authenticator">>, + summary => <<"Add user for gateway authenticator">>, parameters => params_gateway_name_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( ref(emqx_authn_api, request_user_create), @@ -274,7 +274,7 @@ schema("/gateways/:name/authentication/users/:uid") -> #{ tags => ?TAGS, desc => ?DESC(get_user), - summary => <<"Get User Info for a Gateway Authenticator">>, + summary => <<"Get user info for gateway authenticator">>, parameters => params_gateway_name_in_path() ++ params_userid_in_path(), responses => @@ -291,7 +291,7 @@ schema("/gateways/:name/authentication/users/:uid") -> #{ tags => ?TAGS, desc => ?DESC(update_user), - summary => <<"Update User Info for a Gateway Authenticator">>, + summary => <<"Update user info for gateway authenticator">>, parameters => params_gateway_name_in_path() ++ params_userid_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -312,7 +312,7 @@ schema("/gateways/:name/authentication/users/:uid") -> #{ tags => ?TAGS, desc => ?DESC(delete_user), - summary => <<"Delete User for a Gateway Authenticator">>, + summary => <<"Delete user for gateway authenticator">>, parameters => params_gateway_name_in_path() ++ params_userid_in_path(), responses => diff --git a/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl b/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl index 705fccf90..68f392923 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl @@ -126,7 +126,7 @@ schema("/gateways/:name/authentication/import_users") -> #{ tags => ?TAGS, desc => ?DESC(emqx_gateway_api_authn, import_users), - summary => <<"Import Users">>, + summary => <<"Import users">>, parameters => params_gateway_name_in_path(), 'requestBody' => emqx_dashboard_swagger:file_schema(filename), responses => @@ -140,7 +140,7 @@ schema("/gateways/:name/listeners/:id/authentication/import_users") -> #{ tags => ?TAGS, desc => ?DESC(emqx_gateway_api_listeners, import_users), - summary => <<"Import Users">>, + summary => <<"Import users">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => emqx_dashboard_swagger:file_schema(filename), diff --git a/apps/emqx_gateway/src/emqx_gateway_api_clients.erl b/apps/emqx_gateway/src/emqx_gateway_api_clients.erl index ef1c4c386..cd387e3bb 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_clients.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_clients.erl @@ -19,7 +19,6 @@ -include("emqx_gateway_http.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --include_lib("emqx/include/emqx_placeholder.hrl"). -include_lib("emqx/include/logger.hrl"). -behaviour(minirest_api). @@ -116,7 +115,7 @@ clients(get, #{ fun ?MODULE:format_channel_info/2 ); Node0 -> - case emqx_misc:safe_to_existing_atom(Node0) of + case emqx_utils:safe_to_existing_atom(Node0) of {ok, Node1} -> QStringWithoutNode = maps:without([<<"node">>], QString), emqx_mgmt_api:node_query( @@ -461,10 +460,15 @@ schema("/gateways/:name/clients") -> #{ tags => ?TAGS, desc => ?DESC(list_clients), - summary => <<"List Gateway's Clients">>, + summary => <<"List gateway's clients">>, parameters => params_client_query(), responses => - ?STANDARD_RESP(#{200 => schema_client_list()}) + ?STANDARD_RESP(#{ + 200 => [ + {data, schema_client_list()}, + {meta, mk(hoconsc:ref(emqx_dashboard_swagger, meta), #{})} + ] + }) } }; schema("/gateways/:name/clients/:clientid") -> @@ -474,7 +478,7 @@ schema("/gateways/:name/clients/:clientid") -> #{ tags => ?TAGS, desc => ?DESC(get_client), - summary => <<"Get Client Info">>, + summary => <<"Get client info">>, parameters => params_client_insta(), responses => ?STANDARD_RESP(#{200 => schema_client()}) @@ -483,7 +487,7 @@ schema("/gateways/:name/clients/:clientid") -> #{ tags => ?TAGS, desc => ?DESC(kick_client), - summary => <<"Kick out Client">>, + summary => <<"Kick out client">>, parameters => params_client_insta(), responses => ?STANDARD_RESP(#{204 => <<"Kicked">>}) @@ -496,7 +500,7 @@ schema("/gateways/:name/clients/:clientid/subscriptions") -> #{ tags => ?TAGS, desc => ?DESC(list_subscriptions), - summary => <<"List Client's Subscription">>, + summary => <<"List client's subscription">>, parameters => params_client_insta(), responses => ?STANDARD_RESP( @@ -512,7 +516,7 @@ schema("/gateways/:name/clients/:clientid/subscriptions") -> #{ tags => ?TAGS, desc => ?DESC(add_subscription), - summary => <<"Add Subscription for Client">>, + summary => <<"Add subscription for client">>, parameters => params_client_insta(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( ref(subscription), @@ -536,7 +540,7 @@ schema("/gateways/:name/clients/:clientid/subscriptions/:topic") -> #{ tags => ?TAGS, desc => ?DESC(delete_subscription), - summary => <<"Delete Client's Subscription">>, + summary => <<"Delete client's subscription">>, parameters => params_topic_name_in_path() ++ params_client_insta(), responses => ?STANDARD_RESP(#{204 => <<"Unsubscribed">>}) @@ -1016,12 +1020,12 @@ examples_client_list() -> #{ general_client_list => #{ - summary => <<"General Client List">>, + summary => <<"General client list">>, value => [example_general_client()] }, lwm2m_client_list => #{ - summary => <<"LwM2M Client List">>, + summary => <<"LwM2M client list">>, value => [example_lwm2m_client()] } }. @@ -1030,12 +1034,12 @@ examples_client() -> #{ general_client => #{ - summary => <<"General Client Info">>, + summary => <<"General client info">>, value => example_general_client() }, lwm2m_client => #{ - summary => <<"LwM2M Client Info">>, + summary => <<"LwM2M client info">>, value => example_lwm2m_client() } }. @@ -1044,12 +1048,12 @@ examples_subscription_list() -> #{ general_subscription_list => #{ - summary => <<"A General Subscription List">>, + summary => <<"A general subscription list">>, value => [example_general_subscription()] }, stomp_subscription_list => #{ - summary => <<"The Stomp Subscription List">>, + summary => <<"The STOMP subscription list">>, value => [example_stomp_subscription] } }. @@ -1058,12 +1062,12 @@ examples_subscription() -> #{ general_subscription => #{ - summary => <<"A General Subscription">>, + summary => <<"A general subscription">>, value => example_general_subscription() }, stomp_subscription => #{ - summary => <<"A Stomp Subscription">>, + summary => <<"A STOMP subscription">>, value => example_stomp_subscription() } }. diff --git a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl index 1b4f2e0ac..d90bf3689 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl @@ -96,7 +96,7 @@ listeners(post, #{bindings := #{name := Name0}, body := LConf}) -> LName = binary_to_atom(maps:get(<<"name">>, LConf)), Path = [listeners, Type, LName], - case emqx_map_lib:deep_get(Path, RunningConf, undefined) of + case emqx_utils_maps:deep_get(Path, RunningConf, undefined) of undefined -> ListenerId = emqx_gateway_utils:listener_id( GwName, Type, LName @@ -283,7 +283,7 @@ get_cluster_listeners_info(GwName) -> ). listeners_cluster_status(Listeners) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_gateway_api_listeners_proto_v1:listeners_cluster_status(Nodes, Listeners) of {Results, []} -> Results; @@ -362,7 +362,7 @@ schema("/gateways/:name/listeners") -> #{ tags => ?TAGS, desc => ?DESC(list_listeners), - summary => <<"List All Listeners">>, + summary => <<"List all listeners">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP( @@ -378,7 +378,7 @@ schema("/gateways/:name/listeners") -> #{ tags => ?TAGS, desc => ?DESC(add_listener), - summary => <<"Add a Listener">>, + summary => <<"Add listener">>, parameters => params_gateway_name_in_path(), %% XXX: How to distinguish the different listener supported by %% different types of gateways? @@ -404,7 +404,7 @@ schema("/gateways/:name/listeners/:id") -> #{ tags => ?TAGS, desc => ?DESC(get_listener), - summary => <<"Get the Listener Configs">>, + summary => <<"Get listener config">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -421,7 +421,7 @@ schema("/gateways/:name/listeners/:id") -> #{ tags => ?TAGS, desc => ?DESC(delete_listener), - summary => <<"Delete the Listener">>, + summary => <<"Delete listener">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -431,7 +431,7 @@ schema("/gateways/:name/listeners/:id") -> #{ tags => ?TAGS, desc => ?DESC(update_listener), - summary => <<"Update the Listener Configs">>, + summary => <<"Update listener config">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -456,7 +456,7 @@ schema("/gateways/:name/listeners/:id/authentication") -> #{ tags => ?TAGS, desc => ?DESC(get_listener_authn), - summary => <<"Get the Listener's Authenticator">>, + summary => <<"Get the listener's authenticator">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -471,7 +471,7 @@ schema("/gateways/:name/listeners/:id/authentication") -> #{ tags => ?TAGS, desc => ?DESC(add_listener_authn), - summary => <<"Create an Authenticator for a Listener">>, + summary => <<"Create authenticator for listener">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => schema_authn(), @@ -482,7 +482,7 @@ schema("/gateways/:name/listeners/:id/authentication") -> #{ tags => ?TAGS, desc => ?DESC(update_listener_authn), - summary => <<"Update the Listener Authenticator configs">>, + summary => <<"Update config of authenticator for listener">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => schema_authn(), @@ -493,7 +493,7 @@ schema("/gateways/:name/listeners/:id/authentication") -> #{ tags => ?TAGS, desc => ?DESC(delete_listener_authn), - summary => <<"Delete the Listener's Authenticator">>, + summary => <<"Delete the listener's authenticator">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -507,7 +507,7 @@ schema("/gateways/:name/listeners/:id/authentication/users") -> #{ tags => ?TAGS, desc => ?DESC(list_users), - summary => <<"List Authenticator's Users">>, + summary => <<"List authenticator's users">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_paging_in_qs(), @@ -525,7 +525,7 @@ schema("/gateways/:name/listeners/:id/authentication/users") -> #{ tags => ?TAGS, desc => ?DESC(add_user), - summary => <<"Add User for an Authenticator">>, + summary => <<"Add user for an authenticator">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -550,7 +550,7 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> #{ tags => ?TAGS, desc => ?DESC(get_user), - summary => <<"Get User Info">>, + summary => <<"Get user info">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_userid_in_path(), @@ -568,7 +568,7 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> #{ tags => ?TAGS, desc => ?DESC(update_user), - summary => <<"Update User Info">>, + summary => <<"Update user info">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_userid_in_path(), @@ -590,7 +590,7 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> #{ tags => ?TAGS, desc => ?DESC(delete_user), - summary => <<"Delete User">>, + summary => <<"Delete user">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_userid_in_path(), @@ -712,7 +712,7 @@ examples_listener() -> #{ tcp_listener => #{ - summary => <<"A simple tcp listener example">>, + summary => <<"A simple TCP listener example">>, value => #{ name => <<"tcp-def">>, @@ -738,7 +738,7 @@ examples_listener() -> }, ssl_listener => #{ - summary => <<"A simple ssl listener example">>, + summary => <<"A simple SSL listener example">>, value => #{ name => <<"ssl-def">>, @@ -771,7 +771,7 @@ examples_listener() -> }, udp_listener => #{ - summary => <<"A simple udp listener example">>, + summary => <<"A simple UDP listener example">>, value => #{ name => <<"udp-def">>, @@ -789,7 +789,7 @@ examples_listener() -> }, dtls_listener => #{ - summary => <<"A simple dtls listener example">>, + summary => <<"A simple DTLS listener example">>, value => #{ name => <<"dtls-def">>, @@ -817,7 +817,7 @@ examples_listener() -> }, dtls_listener_with_psk_ciphers => #{ - summary => <<"A dtls listener with PSK example">>, + summary => <<"A DTLS listener with PSK example">>, value => #{ name => <<"dtls-psk">>, @@ -845,7 +845,7 @@ examples_listener() -> }, lisetner_with_authn => #{ - summary => <<"A tcp listener with authentication example">>, + summary => <<"A TCP listener with authentication example">>, value => #{ name => <<"tcp-with-authn">>, diff --git a/apps/emqx_gateway/src/emqx_gateway_app.erl b/apps/emqx_gateway/src/emqx_gateway_app.erl index cb5a16fde..f0406bcaa 100644 --- a/apps/emqx_gateway/src/emqx_gateway_app.erl +++ b/apps/emqx_gateway/src/emqx_gateway_app.erl @@ -41,35 +41,38 @@ stop(_State) -> %% Internal funcs load_default_gateway_applications() -> - Apps = gateway_type_searching(), - lists:foreach(fun reg/1, Apps). + lists:foreach( + fun(Def) -> + load_gateway_application(Def) + end, + emqx_gateway_utils:find_gateway_definitions() + ). -gateway_type_searching() -> - %% FIXME: Hardcoded apps - [ - emqx_stomp_impl, - emqx_sn_impl, - emqx_exproto_impl, - emqx_coap_impl, - emqx_lwm2m_impl - ]. - -reg(Mod) -> - try - Mod:reg(), - ?SLOG(debug, #{ - msg => "register_gateway_succeed", - callback_module => Mod - }) - catch - Class:Reason:Stk -> +load_gateway_application( + #{ + name := Name, + callback_module := CbMod, + config_schema_module := SchemaMod + } +) -> + RegistryOptions = [{cbkmod, CbMod}, {schema, SchemaMod}], + case emqx_gateway_registry:reg(Name, RegistryOptions) of + ok -> + ?SLOG(debug, #{ + msg => "register_gateway_succeed", + callback_module => CbMod + }); + {error, already_registered} -> ?SLOG(error, #{ - msg => "failed_to_register_gateway", - callback_module => Mod, - reason => {Class, Reason}, - stacktrace => Stk + msg => "gateway_already_registered", + name => Name, + callback_module => CbMod }) - end. + end; +load_gateway_application(_) -> + ?SLOG(error, #{ + msg => "invalid_gateway_defination" + }). load_gateway_by_default() -> load_gateway_by_default(confs()). diff --git a/apps/emqx_gateway/src/emqx_gateway_cli.erl b/apps/emqx_gateway/src/emqx_gateway_cli.erl index df808f295..fb4261065 100644 --- a/apps/emqx_gateway/src/emqx_gateway_cli.erl +++ b/apps/emqx_gateway/src/emqx_gateway_cli.erl @@ -74,7 +74,7 @@ gateway(["load", Name, Conf]) -> case emqx_gateway_conf:load_gateway( bin(Name), - emqx_json:decode(Conf, [return_maps]) + emqx_utils_json:decode(Conf, [return_maps]) ) of {ok, _} -> diff --git a/apps/emqx_gateway/src/emqx_gateway_cm.erl b/apps/emqx_gateway/src/emqx_gateway_cm.erl index 5cba1464a..837600811 100644 --- a/apps/emqx_gateway/src/emqx_gateway_cm.erl +++ b/apps/emqx_gateway/src/emqx_gateway_cm.erl @@ -214,7 +214,7 @@ get_chan_info(GwName, ClientId, ChanPid) -> -spec lookup_by_clientid(gateway_name(), emqx_types:clientid()) -> [pid()]. lookup_by_clientid(GwName, ClientId) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_gateway_cm_proto_v1:lookup_by_clientid( Nodes, GwName, ClientId @@ -587,24 +587,24 @@ request_stepdown(Action, ConnMod, Pid) -> catch % emqx_ws_connection: call _:noproc -> - ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_gone", #{stale_pid => Pid, action => Action}), {error, noproc}; % emqx_connection: gen_server:call _:{noproc, _} -> - ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_gone", #{stale_pid => Pid, action => Action}), {error, noproc}; _:Reason = {shutdown, _} -> - ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_shutdown", #{stale_pid => Pid, action => Action}), {error, Reason}; _:Reason = {{shutdown, _}, _} -> - ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action}), + ok = ?tp(debug, "session_already_shutdown", #{stale_pid => Pid, action => Action}), {error, Reason}; _:{timeout, {gen_server, call, _}} -> ?tp( warning, "session_stepdown_request_timeout", #{ - pid => Pid, + stale_pid => Pid, action => Action, stale_channel => stale_channel_info(Pid) } @@ -616,7 +616,7 @@ request_stepdown(Action, ConnMod, Pid) -> error, "session_stepdown_request_exception", #{ - pid => Pid, + stale_pid => Pid, action => Action, reason => Error, stacktrace => St, @@ -766,9 +766,9 @@ init(Options) -> TabOpts = [public, {write_concurrency, true}], {ChanTab, ConnTab, InfoTab} = cmtabs(GwName), - ok = emqx_tables:new(ChanTab, [bag, {read_concurrency, true} | TabOpts]), - ok = emqx_tables:new(ConnTab, [bag | TabOpts]), - ok = emqx_tables:new(InfoTab, [set, compressed | TabOpts]), + ok = emqx_utils_ets:new(ChanTab, [bag, {read_concurrency, true} | TabOpts]), + ok = emqx_utils_ets:new(ConnTab, [bag | TabOpts]), + ok = emqx_utils_ets:new(InfoTab, [ordered_set, compressed | TabOpts]), %% Start link cm-registry process %% XXX: Should I hang it under a higher level supervisor? @@ -802,7 +802,7 @@ handle_info( {'DOWN', _MRef, process, Pid, _Reason}, State = #state{gwname = GwName, chan_pmon = PMon} ) -> - ChanPids = [Pid | emqx_misc:drain_down(?DEFAULT_BATCH_SIZE)], + ChanPids = [Pid | emqx_utils:drain_down(?DEFAULT_BATCH_SIZE)], {Items, PMon1} = emqx_pmon:erase_all(ChanPids, PMon), CmTabs = cmtabs(GwName), diff --git a/apps/emqx_gateway/src/emqx_gateway_conf.erl b/apps/emqx_gateway/src/emqx_gateway_conf.erl index 07a4c1e2c..da86d6a58 100644 --- a/apps/emqx_gateway/src/emqx_gateway_conf.erl +++ b/apps/emqx_gateway/src/emqx_gateway_conf.erl @@ -106,7 +106,7 @@ unconvert_listeners(Ls) when is_list(Ls) -> {[Type, Name], Lis1} = maps_key_take([<<"type">>, <<"name">>], Lis), _ = vaildate_listener_name(Name), NLis1 = maps:without([<<"id">>, <<"running">>], Lis1), - emqx_map_lib:deep_merge(Acc, #{Type => #{Name => NLis1}}) + emqx_utils_maps:deep_merge(Acc, #{Type => #{Name => NLis1}}) end, #{}, Ls @@ -160,8 +160,8 @@ gateway(GwName0) -> RawConf = emqx_config:fill_defaults( emqx_config:get_root_raw(Path) ), - Confs = emqx_map_lib:jsonable_map( - emqx_map_lib:deep_get(Path, RawConf) + Confs = emqx_utils_maps:jsonable_map( + emqx_utils_maps:deep_get(Path, RawConf) ), LsConf = maps:get(<<"listeners">>, Confs, #{}), Confs#{<<"listeners">> => convert_listeners(GwName, LsConf)}. @@ -198,8 +198,8 @@ listeners(GwName0) -> RawConf = emqx_config:fill_defaults( emqx_config:get_root_raw([<<"gateway">>]) ), - Listeners = emqx_map_lib:jsonable_map( - emqx_map_lib:deep_get( + Listeners = emqx_utils_maps:jsonable_map( + emqx_utils_maps:deep_get( [<<"gateway">>, GwName, <<"listeners">>], RawConf ) ), @@ -213,12 +213,12 @@ listener(ListenerId) -> ), try Path = [<<"gateway">>, GwName, <<"listeners">>, Type, LName], - LConf = emqx_map_lib:deep_get(Path, RootConf), + LConf = emqx_utils_maps:deep_get(Path, RootConf), Running = emqx_gateway_utils:is_running( binary_to_existing_atom(ListenerId), LConf ), {ok, - emqx_map_lib:jsonable_map( + emqx_utils_maps:jsonable_map( LConf#{ id => ListenerId, type => Type, @@ -305,8 +305,8 @@ ret_ok_err({ok, _}) -> ok; ret_ok_err(Err) -> Err. ret_gw(GwName, {ok, #{raw_config := GwConf}}) -> - GwConf1 = emqx_map_lib:deep_get([bin(GwName)], GwConf), - LsConf = emqx_map_lib:deep_get( + GwConf1 = emqx_utils_maps:deep_get([bin(GwName)], GwConf), + LsConf = emqx_utils_maps:deep_get( [bin(GwName), <<"listeners">>], GwConf, #{} @@ -331,7 +331,7 @@ ret_gw(_GwName, Err) -> Err. ret_authn(GwName, {ok, #{raw_config := GwConf}}) -> - Authn = emqx_map_lib:deep_get( + Authn = emqx_utils_maps:deep_get( [bin(GwName), <<"authentication">>], GwConf ), @@ -340,7 +340,7 @@ ret_authn(_GwName, Err) -> Err. ret_authn(GwName, {LType, LName}, {ok, #{raw_config := GwConf}}) -> - Authn = emqx_map_lib:deep_get( + Authn = emqx_utils_maps:deep_get( [ bin(GwName), <<"listeners">>, @@ -355,7 +355,7 @@ ret_authn(_, _, Err) -> Err. ret_listener_or_err(GwName, {LType, LName}, {ok, #{raw_config := GwConf}}) -> - LConf = emqx_map_lib:deep_get( + LConf = emqx_utils_maps:deep_get( [bin(GwName), <<"listeners">>, bin(LType), bin(LName)], GwConf ), @@ -377,7 +377,7 @@ pre_config_update(_, {load_gateway, GwName, Conf}, RawConf) -> case maps:get(GwName, RawConf, undefined) of undefined -> NConf = tune_gw_certs(fun convert_certs/2, GwName, Conf), - {ok, emqx_map_lib:deep_put([GwName], RawConf, NConf)}; + {ok, emqx_utils_maps:deep_put([GwName], RawConf, NConf)}; _ -> badres_gateway(already_exist, GwName) end; @@ -389,7 +389,7 @@ pre_config_update(_, {update_gateway, GwName, Conf}, RawConf) -> Conf1 = maps:without([<<"listeners">>, ?AUTHN_BIN], Conf), NConf = tune_gw_certs(fun convert_certs/2, GwName, Conf1), NConf1 = maps:merge(GwRawConf, NConf), - {ok, emqx_map_lib:deep_put([GwName], RawConf, NConf1)} + {ok, emqx_utils_maps:deep_put([GwName], RawConf, NConf1)} end; pre_config_update(_, {unload_gateway, GwName}, RawConf) -> _ = tune_gw_certs( @@ -400,7 +400,7 @@ pre_config_update(_, {unload_gateway, GwName}, RawConf) -> {ok, maps:remove(GwName, RawConf)}; pre_config_update(_, {add_listener, GwName, {LType, LName}, Conf}, RawConf) -> case - emqx_map_lib:deep_get( + emqx_utils_maps:deep_get( [GwName, <<"listeners">>, LType, LName], RawConf, undefined ) of @@ -408,7 +408,7 @@ pre_config_update(_, {add_listener, GwName, {LType, LName}, Conf}, RawConf) -> NConf = convert_certs(certs_dir(GwName), Conf), NListener = #{LType => #{LName => NConf}}, {ok, - emqx_map_lib:deep_merge( + emqx_utils_maps:deep_merge( RawConf, #{GwName => #{<<"listeners">> => NListener}} )}; @@ -417,7 +417,7 @@ pre_config_update(_, {add_listener, GwName, {LType, LName}, Conf}, RawConf) -> end; pre_config_update(_, {update_listener, GwName, {LType, LName}, Conf}, RawConf) -> case - emqx_map_lib:deep_get( + emqx_utils_maps:deep_get( [GwName, <<"listeners">>, LType, LName], RawConf, undefined ) of @@ -425,7 +425,7 @@ pre_config_update(_, {update_listener, GwName, {LType, LName}, Conf}, RawConf) - badres_listener(not_found, GwName, LType, LName); OldConf -> NConf = convert_certs(certs_dir(GwName), Conf, OldConf), - NRawConf = emqx_map_lib:deep_put( + NRawConf = emqx_utils_maps:deep_put( [GwName, <<"listeners">>, LType, LName], RawConf, NConf @@ -434,31 +434,33 @@ pre_config_update(_, {update_listener, GwName, {LType, LName}, Conf}, RawConf) - end; pre_config_update(_, {remove_listener, GwName, {LType, LName}}, RawConf) -> Path = [GwName, <<"listeners">>, LType, LName], - case emqx_map_lib:deep_get(Path, RawConf, undefined) of + case emqx_utils_maps:deep_get(Path, RawConf, undefined) of undefined -> {ok, RawConf}; OldConf -> clear_certs(certs_dir(GwName), OldConf), - {ok, emqx_map_lib:deep_remove(Path, RawConf)} + {ok, emqx_utils_maps:deep_remove(Path, RawConf)} end; pre_config_update(_, {add_authn, GwName, Conf}, RawConf) -> case - emqx_map_lib:deep_get( + emqx_utils_maps:deep_get( [GwName, ?AUTHN_BIN], RawConf, undefined ) of undefined -> + CertsDir = authn_certs_dir(GwName, Conf), + Conf1 = emqx_authentication_config:convert_certs(CertsDir, Conf), {ok, - emqx_map_lib:deep_merge( + emqx_utils_maps:deep_merge( RawConf, - #{GwName => #{?AUTHN_BIN => Conf}} + #{GwName => #{?AUTHN_BIN => Conf1}} )}; _ -> badres_authn(already_exist, GwName) end; pre_config_update(_, {add_authn, GwName, {LType, LName}, Conf}, RawConf) -> case - emqx_map_lib:deep_get( + emqx_utils_maps:deep_get( [GwName, <<"listeners">>, LType, LName], RawConf, undefined @@ -469,7 +471,9 @@ pre_config_update(_, {add_authn, GwName, {LType, LName}, Conf}, RawConf) -> Listener -> case maps:get(?AUTHN_BIN, Listener, undefined) of undefined -> - NListener = maps:put(?AUTHN_BIN, Conf, Listener), + CertsDir = authn_certs_dir(GwName, LType, LName, Conf), + Conf1 = emqx_authentication_config:convert_certs(CertsDir, Conf), + NListener = maps:put(?AUTHN_BIN, Conf1, Listener), NGateway = #{ GwName => #{ @@ -477,25 +481,27 @@ pre_config_update(_, {add_authn, GwName, {LType, LName}, Conf}, RawConf) -> #{LType => #{LName => NListener}} } }, - {ok, emqx_map_lib:deep_merge(RawConf, NGateway)}; + {ok, emqx_utils_maps:deep_merge(RawConf, NGateway)}; _ -> badres_listener_authn(already_exist, GwName, LType, LName) end end; pre_config_update(_, {update_authn, GwName, Conf}, RawConf) -> case - emqx_map_lib:deep_get( + emqx_utils_maps:deep_get( [GwName, ?AUTHN_BIN], RawConf, undefined ) of undefined -> badres_authn(not_found, GwName); - _Authn -> - {ok, emqx_map_lib:deep_put([GwName, ?AUTHN_BIN], RawConf, Conf)} + OldAuthnConf -> + CertsDir = authn_certs_dir(GwName, Conf), + Conf1 = emqx_authentication_config:convert_certs(CertsDir, Conf, OldAuthnConf), + {ok, emqx_utils_maps:deep_put([GwName, ?AUTHN_BIN], RawConf, Conf1)} end; pre_config_update(_, {update_authn, GwName, {LType, LName}, Conf}, RawConf) -> case - emqx_map_lib:deep_get( + emqx_utils_maps:deep_get( [GwName, <<"listeners">>, LType, LName], RawConf, undefined @@ -507,14 +513,20 @@ pre_config_update(_, {update_authn, GwName, {LType, LName}, Conf}, RawConf) -> case maps:get(?AUTHN_BIN, Listener, undefined) of undefined -> badres_listener_authn(not_found, GwName, LType, LName); - _Auth -> + OldAuthnConf -> + CertsDir = authn_certs_dir(GwName, LType, LName, OldAuthnConf), + Conf1 = emqx_authentication_config:convert_certs( + CertsDir, + Conf, + OldAuthnConf + ), NListener = maps:put( ?AUTHN_BIN, - Conf, + Conf1, Listener ), {ok, - emqx_map_lib:deep_put( + emqx_utils_maps:deep_put( [GwName, <<"listeners">>, LType, LName], RawConf, NListener @@ -522,13 +534,37 @@ pre_config_update(_, {update_authn, GwName, {LType, LName}, Conf}, RawConf) -> end end; pre_config_update(_, {remove_authn, GwName}, RawConf) -> + case + emqx_utils_maps:deep_get( + [GwName, ?AUTHN_BIN], RawConf, undefined + ) + of + undefined -> + ok; + OldAuthnConf -> + CertsDir = authn_certs_dir(GwName, OldAuthnConf), + emqx_authentication_config:clear_certs(CertsDir, OldAuthnConf) + end, {ok, - emqx_map_lib:deep_remove( + emqx_utils_maps:deep_remove( [GwName, ?AUTHN_BIN], RawConf )}; pre_config_update(_, {remove_authn, GwName, {LType, LName}}, RawConf) -> Path = [GwName, <<"listeners">>, LType, LName, ?AUTHN_BIN], - {ok, emqx_map_lib:deep_remove(Path, RawConf)}; + case + emqx_utils_maps:deep_get( + Path, + RawConf, + undefined + ) + of + undefined -> + ok; + OldAuthnConf -> + CertsDir = authn_certs_dir(GwName, LType, LName, OldAuthnConf), + emqx_authentication_config:clear_certs(CertsDir, OldAuthnConf) + end, + {ok, emqx_utils_maps:deep_remove(Path, RawConf)}; pre_config_update(_, UnknownReq, _RawConf) -> logger:error("Unknown configuration update request: ~0p", [UnknownReq]), {error, badreq}. @@ -678,6 +714,18 @@ apply_to_gateway_basic_confs(_Fun, _GwName, Conf) -> certs_dir(GwName) when is_binary(GwName) -> GwName. +authn_certs_dir(GwName, ListenerType, ListenerName, AuthnConf) -> + ChainName = emqx_gateway_utils:listener_chain(GwName, ListenerType, ListenerName), + emqx_authentication_config:certs_dir(ChainName, AuthnConf). + +authn_certs_dir(GwName, AuthnConf) when is_binary(GwName) -> + authn_certs_dir(binary_to_existing_atom(GwName), AuthnConf); +authn_certs_dir(GwName, AuthnConf) -> + emqx_authentication_config:certs_dir( + emqx_gateway_utils:global_chain(GwName), + AuthnConf + ). + convert_certs(SubDir, Conf) -> convert_certs(<<"dtls_options">>, SubDir, convert_certs(<<"ssl_options">>, SubDir, Conf)). diff --git a/apps/emqx_gateway/src/emqx_gateway_http.erl b/apps/emqx_gateway/src/emqx_gateway_http.erl index d80e3433f..7aaaee9cb 100644 --- a/apps/emqx_gateway/src/emqx_gateway_http.erl +++ b/apps/emqx_gateway/src/emqx_gateway_http.erl @@ -148,7 +148,7 @@ gateway_status(GwName) -> end. cluster_gateway_status(GwName) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_gateway_http_proto_v1:get_cluster_status(Nodes, GwName) of {Results, []} -> Results; @@ -240,7 +240,7 @@ authn(GwName) -> ChainName = emqx_gateway_utils:global_chain(GwName), wrap_chain_name( ChainName, - emqx_map_lib:jsonable_map(emqx:get_raw_config(Path)) + emqx_utils_maps:jsonable_map(emqx:get_raw_config(Path)) ). -spec authn(gateway_name(), binary()) -> map(). @@ -250,7 +250,7 @@ authn(GwName, ListenerId) -> ChainName = emqx_gateway_utils:listener_chain(GwName, Type, Name), wrap_chain_name( ChainName, - emqx_map_lib:jsonable_map(emqx:get_raw_config(Path)) + emqx_utils_maps:jsonable_map(emqx:get_raw_config(Path)) ). wrap_chain_name(ChainName, Conf) -> @@ -404,7 +404,7 @@ return_http_error(Code, Msg) -> -spec reason2msg({atom(), map()} | any()) -> error | string(). reason2msg({badconf, #{key := Key, value := Value, reason := Reason}}) -> NValue = - case emqx_json:safe_encode(Value) of + case emqx_utils_json:safe_encode(Value) of {ok, Str} -> Str; {error, _} -> emqx_gateway_utils:stringfy(Value) end, @@ -495,7 +495,7 @@ reason2msg( reason2msg( {#{roots := [{gateway, _}]}, [_ | _]} = Error ) -> - Bin = emqx_misc:readable_error_msg(Error), + Bin = emqx_utils:readable_error_msg(Error), <<"Invalid configurations: ", Bin/binary>>; reason2msg(_) -> error. diff --git a/apps/emqx_gateway/src/emqx_gateway_metrics.erl b/apps/emqx_gateway/src/emqx_gateway_metrics.erl index e94510387..0aa2ff210 100644 --- a/apps/emqx_gateway/src/emqx_gateway_metrics.erl +++ b/apps/emqx_gateway/src/emqx_gateway_metrics.erl @@ -89,7 +89,7 @@ tabname(GwName) -> init([GwName]) -> TabOpts = [public, {write_concurrency, true}], - ok = emqx_tables:new(tabname(GwName), [set | TabOpts]), + ok = emqx_utils_ets:new(tabname(GwName), [set | TabOpts]), {ok, #state{}}. handle_call(_Request, _From, State) -> diff --git a/apps/emqx_gateway/src/emqx_gateway_schema.erl b/apps/emqx_gateway/src/emqx_gateway_schema.erl index e89280f14..8c80fc1fa 100644 --- a/apps/emqx_gateway/src/emqx_gateway_schema.erl +++ b/apps/emqx_gateway/src/emqx_gateway_schema.erl @@ -49,415 +49,62 @@ ]). -elvis([{elvis_style, dont_repeat_yourself, disable}]). --export([namespace/0, roots/0, fields/1, desc/1]). +-export([namespace/0, roots/0, fields/1, desc/1, tags/0]). -export([proxy_protocol_opts/0]). +-export([mountpoint/0, mountpoint/1, gateway_common_options/0, gateway_schema/1]). + namespace() -> gateway. -roots() -> [gateway]. +tags() -> + [<<"Gateway">>]. + +roots() -> + [{gateway, sc(ref(?MODULE, gateway), #{importance => ?IMPORTANCE_HIDDEN})}]. fields(gateway) -> - [ - {stomp, - sc( - ref(stomp), - #{ - required => {false, recursively}, - desc => ?DESC(stomp) - } - )}, - {mqttsn, - sc( - ref(mqttsn), - #{ - required => {false, recursively}, - desc => ?DESC(mqttsn) - } - )}, - {coap, - sc( - ref(coap), - #{ - required => {false, recursively}, - desc => ?DESC(coap) - } - )}, - {lwm2m, - sc( - ref(lwm2m), - #{ - required => {false, recursively}, - desc => ?DESC(lwm2m) - } - )}, - {exproto, - sc( - ref(exproto), - #{ - required => {false, recursively}, - desc => ?DESC(exproto) - } - )} - ]; -fields(stomp) -> - [ - {frame, sc(ref(stomp_frame))}, - {mountpoint, mountpoint()}, - {listeners, sc(ref(tcp_listeners), #{desc => ?DESC(tcp_listeners)})} - ] ++ gateway_common_options(); -fields(stomp_frame) -> - [ - {max_headers, - sc( - non_neg_integer(), - #{ - default => 10, - desc => ?DESC(stom_frame_max_headers) - } - )}, - {max_headers_length, - sc( - non_neg_integer(), - #{ - default => 1024, - desc => ?DESC(stomp_frame_max_headers_length) - } - )}, - {max_body_length, - sc( - integer(), - #{ - default => 65536, - desc => ?DESC(stom_frame_max_body_length) - } - )} - ]; -fields(mqttsn) -> - [ - {gateway_id, - sc( - integer(), - #{ - default => 1, - required => true, - desc => ?DESC(mqttsn_gateway_id) - } - )}, - {broadcast, - sc( - boolean(), - #{ - default => false, - desc => ?DESC(mqttsn_broadcast) - } - )}, - %% TODO: rename - {enable_qos3, - sc( - boolean(), - #{ - default => true, - desc => ?DESC(mqttsn_enable_qos3) - } - )}, - {subs_resume, - sc( - boolean(), - #{ - default => false, - desc => ?DESC(mqttsn_subs_resume) - } - )}, - {predefined, - sc( - hoconsc:array(ref(mqttsn_predefined)), - #{ - default => [], - required => {false, recursively}, - desc => ?DESC(mqttsn_predefined) - } - )}, - {mountpoint, mountpoint()}, - {listeners, sc(ref(udp_listeners), #{desc => ?DESC(udp_listeners)})} - ] ++ gateway_common_options(); -fields(mqttsn_predefined) -> - [ - {id, - sc(integer(), #{ - required => true, - desc => ?DESC(mqttsn_predefined_id) - })}, - - {topic, - sc(binary(), #{ - required => true, - desc => ?DESC(mqttsn_predefined_topic) - })} - ]; -fields(coap) -> - [ - {heartbeat, - sc( - duration(), - #{ - default => <<"30s">>, - desc => ?DESC(coap_heartbeat) - } - )}, - {connection_required, - sc( - boolean(), - #{ - default => false, - desc => ?DESC(coap_connection_required) - } - )}, - {notify_type, - sc( - hoconsc:enum([non, con, qos]), - #{ - default => qos, - desc => ?DESC(coap_notify_type) - } - )}, - {subscribe_qos, - sc( - hoconsc:enum([qos0, qos1, qos2, coap]), - #{ - default => coap, - desc => ?DESC(coap_subscribe_qos) - } - )}, - {publish_qos, - sc( - hoconsc:enum([qos0, qos1, qos2, coap]), - #{ - default => coap, - desc => ?DESC(coap_publish_qos) - } - )}, - {mountpoint, mountpoint()}, - {listeners, - sc( - ref(udp_listeners), - #{desc => ?DESC(udp_listeners)} - )} - ] ++ gateway_common_options(); -fields(lwm2m) -> - [ - {xml_dir, - sc( - binary(), - #{ - %% since this is not packaged with emqx, nor - %% present in the packages, we must let the user - %% specify it rather than creating a dynamic - %% default (especially difficult to handle when - %% generating docs). - example => <<"/etc/emqx/lwm2m_xml">>, - required => true, - desc => ?DESC(lwm2m_xml_dir) - } - )}, - {lifetime_min, - sc( - duration(), - #{ - default => "15s", - desc => ?DESC(lwm2m_lifetime_min) - } - )}, - {lifetime_max, - sc( - duration(), - #{ - default => "86400s", - desc => ?DESC(lwm2m_lifetime_max) - } - )}, - {qmode_time_window, - sc( - duration_s(), - #{ - default => "22s", - desc => ?DESC(lwm2m_qmode_time_window) - } - )}, - %% TODO: Support config resource path - {auto_observe, - sc( - boolean(), - #{ - default => false, - desc => ?DESC(lwm2m_auto_observe) - } - )}, - %% FIXME: not working now - {update_msg_publish_condition, - sc( - hoconsc:enum([always, contains_object_list]), - #{ - default => contains_object_list, - desc => ?DESC(lwm2m_update_msg_publish_condition) - } - )}, - {translators, - sc( - ref(lwm2m_translators), - #{ - required => true, - desc => ?DESC(lwm2m_translators) - } - )}, - {mountpoint, mountpoint("lwm2m/${endpoint_name}/")}, - {listeners, sc(ref(udp_listeners), #{desc => ?DESC(udp_listeners)})} - ] ++ gateway_common_options(); -fields(exproto) -> - [ - {server, - sc( - ref(exproto_grpc_server), - #{ - required => true, - desc => ?DESC(exproto_server) - } - )}, - {handler, - sc( - ref(exproto_grpc_handler), - #{ - required => true, - desc => ?DESC(exproto_handler) - } - )}, - {mountpoint, mountpoint()}, - {listeners, sc(ref(tcp_udp_listeners), #{desc => ?DESC(tcp_udp_listeners)})} - ] ++ gateway_common_options(); -fields(exproto_grpc_server) -> - [ - {bind, - sc( - hoconsc:union([ip_port(), integer()]), - #{ - required => true, - desc => ?DESC(exproto_grpc_server_bind) - } - )}, - {ssl_options, - sc( - ref(ssl_server_opts), - #{ - required => {false, recursively}, - desc => ?DESC(exproto_grpc_server_ssl) - } - )} - ]; -fields(exproto_grpc_handler) -> - [ - {address, sc(binary(), #{required => true, desc => ?DESC(exproto_grpc_handler_address)})}, - {ssl_options, - sc( - ref(emqx_schema, "ssl_client_opts"), - #{ - required => {false, recursively}, - desc => ?DESC(exproto_grpc_handler_ssl) - } - )} - ]; -fields(ssl_server_opts) -> - emqx_schema:server_ssl_opts_schema( - #{ - depth => 10, - reuse_sessions => true, - versions => tls_all_available - }, - true + lists:map( + fun(#{name := Name, config_schema_module := Mod}) -> + {Name, + sc( + ref(Mod, Name), + #{ + required => {false, recursively}, + desc => ?DESC(Name) + } + )} + end, + emqx_gateway_utils:find_gateway_definitions() ); fields(clientinfo_override) -> [ {username, sc(binary(), #{desc => ?DESC(gateway_common_clientinfo_override_username)})}, - {password, sc(binary(), #{desc => ?DESC(gateway_common_clientinfo_override_password)})}, + {password, + sc(binary(), #{ + desc => ?DESC(gateway_common_clientinfo_override_password), + sensitive => true, + format => <<"password">>, + converter => fun emqx_schema:password_converter/2 + })}, {clientid, sc(binary(), #{desc => ?DESC(gateway_common_clientinfo_override_clientid)})} ]; -fields(lwm2m_translators) -> - [ - {command, - sc( - ref(translator), - #{ - desc => ?DESC(lwm2m_translators_command), - required => true - } - )}, - {response, - sc( - ref(translator), - #{ - desc => ?DESC(lwm2m_translators_response), - required => true - } - )}, - {notify, - sc( - ref(translator), - #{ - desc => ?DESC(lwm2m_translators_notify), - required => true - } - )}, - {register, - sc( - ref(translator), - #{ - desc => ?DESC(lwm2m_translators_register), - required => true - } - )}, - {update, - sc( - ref(translator), - #{ - desc => ?DESC(lwm2m_translators_update), - required => true - } - )} - ]; -fields(translator) -> - [ - {topic, - sc( - binary(), - #{ - required => true, - desc => ?DESC(translator_topic) - } - )}, - {qos, - sc( - emqx_schema:qos(), - #{ - default => 0, - desc => ?DESC(translator_qos) - } - )} - ]; fields(udp_listeners) -> [ - {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(udp_listener)})}, - {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(dtls_listener)})} + {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}, + {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(listener_name_to_settings_map)})} ]; fields(tcp_listeners) -> [ - {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(tcp_listener)})}, - {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(ssl_listener)})} + {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}, + {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(listener_name_to_settings_map)})} ]; fields(tcp_udp_listeners) -> [ - {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(tcp_listener)})}, - {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(ssl_listener)})}, - {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(udp_listener)})}, - {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(dtls_listener)})} + {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}, + {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}, + {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}, + {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(listener_name_to_settings_map)})} ]; fields(tcp_listener) -> %% some special configs for tcp listener @@ -513,55 +160,26 @@ fields(dtls_opts) -> desc(gateway) -> "EMQX Gateway configuration root."; -desc(stomp) -> - "The STOMP protocol gateway provides EMQX with the ability to access STOMP\n" - "(Simple (or Streaming) Text Orientated Messaging Protocol) protocol."; -desc(stomp_frame) -> - "Size limits for the STOMP frames."; -desc(mqttsn) -> - "The MQTT-SN (MQTT for Sensor Networks) protocol gateway."; -desc(mqttsn_predefined) -> - "The pre-defined topic name corresponding to the pre-defined topic\n" - "ID of N.\n\n" - "Note: the pre-defined topic ID of 0 is reserved."; -desc(coap) -> - "The CoAP protocol gateway provides EMQX with the access capability of the CoAP protocol.\n" - "It allows publishing, subscribing, and receiving messages to EMQX in accordance\n" - "with a certain defined CoAP message format."; -desc(lwm2m) -> - "The LwM2M protocol gateway."; -desc(exproto) -> - "Settings for EMQX extension protocol (exproto)."; -desc(exproto_grpc_server) -> - "Settings for the exproto gRPC server."; -desc(exproto_grpc_handler) -> - "Settings for the exproto gRPC connection handler."; -desc(ssl_server_opts) -> - "SSL configuration for the server."; desc(clientinfo_override) -> "ClientInfo override."; -desc(lwm2m_translators) -> - "MQTT topics that correspond to LwM2M events."; -desc(translator) -> - "MQTT topic that corresponds to a particular type of event."; desc(udp_listeners) -> "Settings for the UDP listeners."; desc(tcp_listeners) -> "Settings for the TCP listeners."; desc(tcp_udp_listeners) -> - "Settings for the listeners."; + "Settings for TCP and UDP listeners."; desc(tcp_listener) -> - "Settings for the TCP listener."; + "Settings for TCP listener."; desc(ssl_listener) -> - "Settings for the SSL listener."; + "Settings for SSL listener."; desc(udp_listener) -> - "Settings for the UDP listener."; + "Settings for UDP listener."; desc(dtls_listener) -> - "Settings for the DTLS listener."; + "Settings for DTLS listener."; desc(udp_opts) -> - "Settings for the UDP sockets."; + "Settings for UDP sockets."; desc(dtls_opts) -> - "Settings for the DTLS protocol."; + "Settings for DTLS protocol."; desc(_) -> undefined. @@ -571,6 +189,8 @@ authentication_schema() -> #{ required => {false, recursively}, desc => ?DESC(gateway_common_authentication), + %% we do not expose this to the user for now + importance => ?IMPORTANCE_HIDDEN, examples => emqx_authn_api:authenticator_examples() } ). @@ -615,8 +235,8 @@ mountpoint(Default) -> sc( binary(), #{ - default => Default, - desc => ?DESC(gateway_common_mountpoint) + default => iolist_to_binary(Default), + desc => ?DESC(gateway_mountpoint) } ). @@ -665,7 +285,7 @@ common_listener_opts() -> binary(), #{ default => undefined, - desc => ?DESC(gateway_common_listener_mountpoint) + desc => ?DESC(gateway_mountpoint) } )}, {access_rules, @@ -698,14 +318,24 @@ proxy_protocol_opts() -> sc( duration(), #{ - default => "15s", + default => <<"15s">>, desc => ?DESC(tcp_listener_proxy_protocol_timeout) } )} ]. -sc(Type) -> - sc(Type, #{}). +%%-------------------------------------------------------------------- +%% dynamic schemas + +%% FIXME: don't hardcode the gateway names +gateway_schema(stomp) -> emqx_stomp_schema:fields(stomp); +gateway_schema(mqttsn) -> emqx_mqttsn_schema:fields(mqttsn); +gateway_schema(coap) -> emqx_coap_schema:fields(coap); +gateway_schema(lwm2m) -> emqx_lwm2m_schema:fields(lwm2m); +gateway_schema(exproto) -> emqx_exproto_schema:fields(exproto). + +%%-------------------------------------------------------------------- +%% helpers sc(Type, Meta) -> hoconsc:mk(Type, Meta). diff --git a/apps/emqx_gateway/src/emqx_gateway_utils.erl b/apps/emqx_gateway/src/emqx_gateway_utils.erl index cee5baaa8..ced9eff48 100644 --- a/apps/emqx_gateway/src/emqx_gateway_utils.erl +++ b/apps/emqx_gateway/src/emqx_gateway_utils.erl @@ -46,7 +46,8 @@ global_chain/1, listener_chain/3, make_deprecated_paths/1, - make_compatible_schema/2 + make_compatible_schema/2, + find_gateway_definitions/0 ]). -export([stringfy/1]). @@ -77,7 +78,7 @@ -define(DEFAULT_GC_OPTS, #{count => 1000, bytes => 1024 * 1024}). -define(DEFAULT_OOM_POLICY, #{ max_heap_size => 4194304, - max_message_queue_len => 32000 + max_mailbox_size => 32000 }). -elvis([{elvis_style, god_modules, disable}]). @@ -222,7 +223,7 @@ merge_default(Udp, Options) -> case lists:keytake(Key, 1, Options) of {value, {Key, TcpOpts}, Options1} -> [ - {Key, emqx_misc:merge_opts(Default, TcpOpts)} + {Key, emqx_utils:merge_opts(Default, TcpOpts)} | Options1 ]; false -> @@ -481,7 +482,7 @@ frame_options(Options) -> -spec init_gc_state(map()) -> emqx_gc:gc_state() | undefined. init_gc_state(Options) -> - emqx_misc:maybe_apply(fun emqx_gc:init/1, force_gc_policy(Options)). + emqx_utils:maybe_apply(fun emqx_gc:init/1, force_gc_policy(Options)). -spec force_gc_policy(map()) -> emqx_gc:opts() | undefined. force_gc_policy(Options) -> @@ -562,3 +563,82 @@ make_compatible_schema2(Path, SchemaFun) -> end, Schema ). + +-spec find_gateway_definitions() -> list(gateway_def()). +find_gateway_definitions() -> + lists:flatten( + lists:map( + fun(App) -> + gateways(find_attrs(App, gateway)) + end, + ignore_lib_apps(application:loaded_applications()) + ) + ). + +gateways([]) -> + []; +gateways([ + {_App, _Mod, + Defination = + #{ + name := Name, + callback_module := CbMod, + config_schema_module := SchemaMod + }} + | More +]) when is_atom(Name), is_atom(CbMod), is_atom(SchemaMod) -> + [Defination | gateways(More)]. + +find_attrs(App, Def) -> + [ + {App, Mod, Attr} + || {ok, Modules} <- [application:get_key(App, modules)], + Mod <- Modules, + {Name, Attrs} <- module_attributes(Mod), + Name =:= Def, + Attr <- Attrs + ]. + +module_attributes(Module) -> + try + apply(Module, module_info, [attributes]) + catch + error:undef -> [] + end. + +ignore_lib_apps(Apps) -> + LibApps = [ + kernel, + stdlib, + sasl, + appmon, + eldap, + erts, + syntax_tools, + ssl, + crypto, + mnesia, + os_mon, + inets, + goldrush, + gproc, + runtime_tools, + snmp, + otp_mibs, + public_key, + asn1, + ssh, + hipe, + common_test, + observer, + webtool, + xmerl, + tools, + test_server, + compiler, + debugger, + eunit, + et, + wx + ], + [AppName || {AppName, _, _} <- Apps, not lists:member(AppName, LibApps)]. diff --git a/apps/emqx_gateway/src/lwm2m/.gitignore b/apps/emqx_gateway/src/lwm2m/.gitignore deleted file mode 100644 index be6914be3..000000000 --- a/apps/emqx_gateway/src/lwm2m/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -deps/ -ebin/ -_rel/ -.erlang.mk/ -*.d -*.o -*.exe -data/ -*.iml -.idea/ -logs/ -*.beam -emqx_coap.d -erlang.mk -integration_test/emqx-rel/ -integration_test/build_wakaama/ -integration_test/case*.txt -integration_test/paho/ -integration_test/wakaama/ -_build/ -rebar.lock -rebar3.crashdump -*.conf.rendered -.rebar3/ -*.swp diff --git a/apps/emqx_gateway/src/lwm2m/README.md b/apps/emqx_gateway/src/lwm2m/README.md deleted file mode 100644 index bf7626c6f..000000000 --- a/apps/emqx_gateway/src/lwm2m/README.md +++ /dev/null @@ -1,357 +0,0 @@ - -# LwM2M Gateway - -[The LwM2M Specifications](http://www.openmobilealliance.org/release/LightweightM2M) is a Lightweight Machine to Machine protocol. - -With `emqx_lwm2m`, user is able to send LwM2M commands(READ/WRITE/EXECUTE/...) and get LwM2M response in MQTT way. `emqx_lwm2m` transforms data between MQTT and LwM2M protocol. - -emqx_lwm2m needs object definitions to parse data from lwm2m devices. Object definitions are declared by organizations in XML format, you could find those XMLs from [LwM2MRegistry](http://www.openmobilealliance.org/wp/OMNA/LwM2M/LwM2MRegistry.html), download and put them into the directory specified by `lwm2m.xml_dir`. If no associated object definition is found, response from device will be discarded and report an error message in log. - -## Load emqx_lwm2m - -``` -./bin/emqx_ctl plugins load emqx_lwm2m -``` - -## Test emqx-lwm2m using *wakaama* - -[wakaama](https://github.com/eclipse/wakaama) is an easy-to-use lwm2m client command line tool. - -Start *lwm2mclient* using an endpoint name `ep1`: -``` -./lwm2mclient -n ep1 -h 127.0.0.1 -p 5683 -4 -``` - -To send an LwM2M DISCOVER command to *lwm2mclient*, publish an MQTT message to topic `lwm2m//dn` (where `` is the endpoint name of the client), with following payload: - -```json -{ - "reqID": "2", - "msgType": "discover", - "data": { - "path": "/3/0" - } -} -``` - -The MQTT message will be translated to an LwM2M DISCOVER command and sent to the *lwm2mclient*. Then the response of *lwm2mclient* will be in turn translated to an MQTT message, with topic `lwm2m//up/resp`, with following payload: - -```json -{ - "reqID": "2", - "msgType": "discover", - "data": { - "code":"2.05", - "codeMsg": "content", - "content": [ - ";dim=8", - "", - "", - "", - "" - ] - } -} -``` - -## LwM2M <--> MQTT Mapping - -### Register/Update (LwM2M Client Registration Interface) - -- **LwM2M Register and Update message will be converted to following MQTT message:** - - - **Method:** PUBLISH - - **Topic:** `lwm2m/{?EndpointName}/up/resp` (configurable) - - **Payload**: - - MsgType **register** and **update**: - ```json - { - "msgType": {?MsgType}, - "data": { - "ep": {?EndpointName}, - "lt": {?LifeTime}, - "sms": {?MSISDN}, - "lwm2m": {?Lwm2mVersion}, - "b": {?Binding}, - "alternatePath": {?AlternatePath}, - "objectList": {?ObjectList} - } - } - ``` - - {?EndpointName}: String, the endpoint name of the LwM2M client - - {?MsgType}: String, could be: - - "register": LwM2M Register - - "update": LwM2M Update - - "data" contains the query options and the object-list of the register message - - The *update* message is only published if the object-list changed. - -### Downlink Command and Uplink Response (LwM2M Device Management & Service Enablement Interface) - -- **To send a downlink command to device, publish following MQTT message:** - - **Method:** PUBLISH - - **Topic:** `lwm2m/{?EndpointName}/dn` - - **Request Payload**: - ```json - { - "reqID": {?ReqID}, - "msgType": {?MsgType}, - "data": {?Data} - } - ``` - - {?ReqID}: Integer, request-id, used for matching the response to the request - - {?MsgType}: String, can be one of the following: - - "read": LwM2M Read - - "discover": LwM2M Discover - - "write": LwM2M Write - - "write-attr": LwM2M Write Attributes - - "execute": LwM2M Execute - - "create": LwM2M Create - - "delete": LwM2M Delete - - {?Data}: JSON Object, its value depends on the {?MsgType}: - - **If {?MsgType} = "read" or "discover"**: - ```json - { - "path": {?ResourcePath} - } - ``` - - {?ResourcePath}: String, LwM2M full resource path. e.g. "3/0", "/3/0/0", "/3/0/6/0" - - **If {?MsgType} = "write" (single write)**: - ```json - { - "path": {?ResourcePath}, - "type": {?ValueType}, - "value": {?Value} - } - ``` - - {?ValueType}: String, can be: "Time", "String", "Integer", "Float", "Boolean", "Opaque", "Objlnk" - - {?Value}: Value of the resource, depends on "type". - - **If {?MsgType} = "write" (batch write)**: - ```json - { - "basePath": {?BasePath}, - "content": [ - { - "path": {?ResourcePath}, - "type": {?ValueType}, - "value": {?Value} - } - ] - } - ``` - - The full path is concatenation of "basePath" and "path". - - **If {?MsgType} = "write-attr"**: - ```json - { - "path": {?ResourcePath}, - "pmin": {?PeriodMin}, - "pmax": {?PeriodMax}, - "gt": {?GreaterThan}, - "lt": {?LessThan}, - "st": {?Step} - } - ``` - - {?PeriodMin}: Number, LwM2M Notification Class Attribute - Minimum Period. - - {?PeriodMax}: Number, LwM2M Notification Class Attribute - Maximum Period. - - {?GreaterThan}: Number, LwM2M Notification Class Attribute - Greater Than. - - {?LessThan}: Number, LwM2M Notification Class Attribute - Less Than. - - {?Step}: Number, LwM2M Notification Class Attribute - Step. - - - **If {?MsgType} = "execute"**: - ```json - { - "path": {?ResourcePath}, - "args": {?Arguments} - } - ``` - - {?Arguments}: String, LwM2M Execute Arguments. - - - **If {?MsgType} = "create"**: - ```json - { - "basePath": "/{?ObjectID}", - "content": [ - { - "path": {?ResourcePath}, - "type": {?ValueType}, - "value": {?Value} - } - ] - } - ``` - - {?ObjectID}: Integer, LwM2M Object ID - - - **If {?MsgType} = "delete"**: - ```json - { - "path": "{?ObjectID}/{?ObjectInstanceID}" - } - ``` - - {?ObjectInstanceID}: Integer, LwM2M Object Instance ID - -- **The response of LwM2M will be converted to following MQTT message:** - - **Method:** PUBLISH - - **Topic:** `"lwm2m/{?EndpointName}/up/resp"` - - **Response Payload:** - - ```json - { - "reqID": {?ReqID}, - "imei": {?IMEI}, - "imsi": {?IMSI}, - "msgType": {?MsgType}, - "data": {?Data} - } - ``` - - - {?MsgType}: String, can be: - - "read": LwM2M Read - - "discover": LwM2M Discover - - "write": LwM2M Write - - "write-attr": LwM2M Write Attributes - - "execute": LwM2M Execute - - "create": LwM2M Create - - "delete": LwM2M Delete - - **"ack"**: [CoAP Empty ACK](https://tools.ietf.org/html/rfc7252#section-5.2.2) - - {?Data}: JSON Object, its value depends on {?MsgType}: - - **If {?MsgType} = "write", "write-attr", "execute", "create", "delete", or "read"(when response without content)**: - ```json - { - "code": {?StatusCode}, - "codeMsg": {?CodeMsg}, - "reqPath": {?RequestPath} - } - ``` - - {?StatusCode}: String, LwM2M status code, e.g. "2.01", "4.00", etc. - - {?CodeMsg}: String, LwM2M response message, e.g. "content", "bad_request" - - {?RequestPath}: String, the requested "path" or "basePath" - - - **If {?MsgType} = "discover"**: - ```json - { - "code": {?StatusCode}, - "codeMsg": {?CodeMsg}, - "reqPath": {?RequestPath}, - "content": [ - {?Link}, - ... - ] - } - ``` - - {?Link}: String(LwM2M link format) e.g. `""`, `"<3/0/1>;dim=8"` - - - **If {?MsgType} = "read"(when response with content)**: - ```json - { - "code": {?StatusCode}, - "codeMsg": {?CodeMsg}, - "content": {?Content} - } - ``` - - {?Content} - ```json - [ - { - "path": {?ResourcePath}, - "value": {?Value} - } - ] - ``` - - - **If {?MsgType} = "ack", "data" does not exists** - -### Observe (Information Reporting Interface - Observe/Cancel-Observe) - -- **To observe/cancel-observe LwM2M client, send following MQTT PUBLISH:** - - **Method:** PUBLISH - - **Topic:** `lwm2m/{?EndpointName}/dn` - - **Request Payload**: - ```json - { - "reqID": {?ReqID}, - "msgType": {?MsgType}, - "data": { - "path": {?ResourcePath} - } - } - ``` - - {?ResourcePath}: String, the LwM2M resource to be observed/cancel-observed. - - {?MsgType}: String, can be: - - "observe": LwM2M Observe - - "cancel-observe": LwM2M Cancel Observe - - {?ReqID}: Integer, request-id, is the {?ReqID} in the request - -- **Responses will be converted to following MQTT message:** - - **Method:** PUBLISH - - **Topic:** `lwm2m/{?EndpointName}/up/resp` - - **Response Payload**: - ```json - { - "reqID": {?ReqID}, - "msgType": {?MsgType}, - "data": { - "code": {?StatusCode}, - "codeMsg": {?CodeMsg}, - "reqPath": {?RequestPath}, - "content": [ - { - "path": {?ResourcePath}, - "value": {?Value} - } - ] - } - } - ``` - - {?MsgType}: String, can be: - - "observe": LwM2M Observe - - "cancel-observe": LwM2M Cancel Observe - - **"ack"**: [CoAP Empty ACK](https://tools.ietf.org/html/rfc7252#section-5.2.2) - -### Notification (Information Reporting Interface - Notify) - -- **The notifications from LwM2M clients will be converted to MQTT PUBLISH:** - - **Method:** PUBLISH - - **Topic:** `lwm2m/{?EndpiontName}/up/notify` - - **Notification Payload**: - ```json - { - "reqID": {?ReqID}, - "msgType": {?MsgType}, - "seqNum": {?ObserveSeqNum}, - "data": { - "code": {?StatusCode}, - "codeMsg": {?CodeMsg}, - "reqPath": {?RequestPath}, - "content": [ - { - "path": {?ResourcePath}, - "value": {?Value} - } - ] - } - } - ``` - - {?MsgType}: String, must be "notify" - - {?ObserveSeqNum}: Number, value of "Observe" option in CoAP message - - "content": same to the "content" field contains in the response of "read" command - -## Feature limitations - -- emqx_lwm2m implements LwM2M gateway to EMQX, not a full-featured and independent LwM2M server. -- emqx_lwm2m does not include LwM2M bootstrap server. -- emqx_lwm2m supports UDP binding, no SMS binding yet. -- Firmware object is not fully supported now since mqtt to coap block-wise transfer is not available. -- Object Versioning is not supported now. - -## DTLS - -emqx-lwm2m support DTLS to secure UDP data. - -Please config lwm2m.certfile and lwm2m.keyfile in emqx_lwm2m.conf. If certfile or keyfile are invalid, DTLS will be turned off and you could read a error message in the log. - -## License - -Apache License Version 2.0 - -## Author - -EMQX-Men Team. diff --git a/apps/emqx_gateway/src/mqttsn/README.md b/apps/emqx_gateway/src/mqttsn/README.md deleted file mode 100644 index 67938b748..000000000 --- a/apps/emqx_gateway/src/mqttsn/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# MQTT-SN Gateway - -EMQX MQTT-SN Gateway. - -## Configure Plugin - - -File: etc/emqx_sn.conf - -``` -## The UDP port which emq-sn is listening on. -## -## Value: IP:Port | Port -## -## Examples: 1884, 127.0.0.1:1884, ::1:1884 -mqtt.sn.port = 1884 - -## The duration(seconds) that emq-sn broadcast ADVERTISE message through. -## -## Value: Second -mqtt.sn.advertise_duration = 900 - -## The MQTT-SN Gateway id in ADVERTISE message. -## -## Value: Number -mqtt.sn.gateway_id = 1 - -## To control whether write statistics data into ETS table for dashboard to read. -## -## Value: on | off -mqtt.sn.enable_stats = off - -## To control whether accept and process the received publish message with qos=-1. -## -## Value: on | off -mqtt.sn.enable_qos3 = off - -## The pre-defined topic name corresponding to the pre-defined topic id of N. -## Note that the pre-defined topic id of 0 is reserved. -mqtt.sn.predefined.topic.0 = reserved -mqtt.sn.predefined.topic.1 = /predefined/topic/name/hello -mqtt.sn.predefined.topic.2 = /predefined/topic/name/nice - -## Default username for MQTT-SN. This parameter is optional. If specified, -## emq-sn will connect EMQ core with this username. It is useful if any auth -## plug-in is enabled. -## -## Value: String -mqtt.sn.username = mqtt_sn_user - -## This parameter is optional. Pair with username above. -## -## Value: String -mqtt.sn.password = abc -``` - -- mqtt.sn.port - * The UDP port which emqx-sn is listening on. -- mqtt.sn.advertise_duration - * The duration(seconds) that emqx-sn broadcast ADVERTISE message through. -- mqtt.sn.gateway_id - * Gateway id in ADVERTISE message. -- mqtt.sn.enable_stats - * To control whether write statistics data into ETS table for dashboard to read. -- mqtt.sn.enable_qos3 - * To control whether accept and process the received publish message with qos=-1. -- mqtt.sn.predefined.topic.N - * The pre-defined topic name corresponding to the pre-defined topic id of N. Note that the pre-defined topic id of 0 is reserved. -- mqtt.sn.username - * This parameter is optional. If specified, emqx-sn will connect EMQX core with this username. It is useful if any auth plug-in is enabled. -- mqtt.sn.password - * This parameter is optional. Pair with username above. - -## Load Plugin - -``` -./bin/emqx_ctl plugins load emqx_sn -``` - -## Client - -### NOTE -- Topic ID is per-client, and will be cleared if client disconnected with broker or keepalive failure is detected in broker. -- Please register your topics again each time connected with broker. -- If your udp socket(mqtt-sn client) has successfully connected to broker, don't try to send another CONNECT on this socket again, which will lead to confusing behaviour. If you want to start from beging, please do as following: - + destroy your present socket and create a new socket to connect again - + or send DISCONNECT on the same socket and connect again. - -### Library - -- https://github.com/eclipse/paho.mqtt-sn.embedded-c/ -- https://github.com/ty4tw/MQTT-SN -- https://github.com/njh/mqtt-sn-tools -- https://github.com/arobenko/mqtt-sn - -### sleeping device - -PINGREQ must have a ClientId which is identical to the one in CONNECT message. Without ClientId, emqx-sn will ignore such PINGREQ. - -### pre-defined topics - -The mapping of a pre-defined topic id and topic name should be known inadvance by both client's application and gateway. We define this mapping info in emqx_sn.conf file, and which shall be kept equivalent in all client's side. - -## License - -Apache License Version 2.0 - -## Author - -EMQX Team. diff --git a/apps/emqx_gateway/src/stomp/README.md b/apps/emqx_gateway/src/stomp/README.md deleted file mode 100644 index d96999aec..000000000 --- a/apps/emqx_gateway/src/stomp/README.md +++ /dev/null @@ -1,73 +0,0 @@ - -# emqx-stomp - - -The plugin adds STOMP 1.0/1.1/1.2 protocol supports to the EMQX broker. - -The STOMP clients could PubSub to the MQTT clients. - -## Configuration - -etc/emqx_stomp.conf - -``` -## The Port that stomp listener will bind. -## -## Value: Port -stomp.listener = 61613 - -## The acceptor pool for stomp listener. -## -## Value: Number -stomp.listener.acceptors = 4 - -## Maximum number of concurrent stomp connections. -## -## Value: Number -stomp.listener.max_connections = 512 - -## Default login user -## -## Value: String -stomp.default_user.login = guest - -## Default login password -## -## Value: String -stomp.default_user.passcode = guest - -## Allow anonymous authentication. -## -## Value: true | false -stomp.allow_anonymous = true - -## Maximum numbers of frame headers. -## -## Value: Number -stomp.frame.max_headers = 10 - -## Maximum length of frame header. -## -## Value: Number -stomp.frame.max_header_length = 1024 - -## Maximum body length of frame. -## -## Value: Number -stomp.frame.max_body_length = 8192 -``` - -## Load the Plugin - -``` -./bin/emqx_ctl plugins load emqx_stomp -``` - -## License - -Apache License Version 2.0 - -## Author - -EMQX Team. - diff --git a/apps/emqx_gateway/test/emqx_gateway_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_SUITE.erl index f611988a0..9f8c7911c 100644 --- a/apps/emqx_gateway/test/emqx_gateway_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_SUITE.erl @@ -33,6 +33,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> emqx_config:erase(gateway), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_common_test_helpers:start_apps([emqx_authn, emqx_gateway]), Conf. @@ -67,11 +68,11 @@ end_per_testcase(_TestCase, _Config) -> t_registered_gateway(_) -> [ - {coap, #{cbkmod := emqx_coap_impl}}, - {exproto, #{cbkmod := emqx_exproto_impl}}, - {lwm2m, #{cbkmod := emqx_lwm2m_impl}}, - {mqttsn, #{cbkmod := emqx_sn_impl}}, - {stomp, #{cbkmod := emqx_stomp_impl}} + {coap, #{cbkmod := emqx_gateway_coap}}, + {exproto, #{cbkmod := emqx_gateway_exproto}}, + {lwm2m, #{cbkmod := emqx_gateway_lwm2m}}, + {mqttsn, #{cbkmod := emqx_gateway_mqttsn}}, + {stomp, #{cbkmod := emqx_gateway_stomp}} ] = emqx_gateway:registered_gateway(). t_load_unload_list_lookup(_) -> @@ -187,7 +188,14 @@ read_lwm2m_conf(DataDir) -> Conf. setup_fake_usage_data(Lwm2mDataDir) -> - XmlDir = emqx_common_test_helpers:deps_path(emqx_gateway, "src/lwm2m/lwm2m_xml"), + XmlDir = filename:join( + [ + emqx_common_test_helpers:proj_root(), + "apps", + "emqx_gateway_lwm2m", + "lwm2m_xml" + ] + ), Lwm2mConf = read_lwm2m_conf(Lwm2mDataDir), ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, Lwm2mConf), emqx_config:put([gateway, lwm2m, xml_dir], XmlDir), diff --git a/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl index 7aac45d61..fb648062a 100644 --- a/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl @@ -46,6 +46,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> application:load(emqx), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_config:delete_override_conf_files(), emqx_config:erase(gateway), emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), @@ -163,7 +164,7 @@ t_gateway_stomp(_) -> {204, _} = request(put, "/gateways/stomp", GwConf), {200, ConfResp} = request(get, "/gateways/stomp"), assert_confs(GwConf, ConfResp), - GwConf2 = emqx_map_lib:deep_merge(GwConf, #{frame => #{max_headers => 10}}), + GwConf2 = emqx_utils_maps:deep_merge(GwConf, #{frame => #{max_headers => 10}}), {204, _} = request(put, "/gateways/stomp", maps:without([name, listeners], GwConf2)), {200, ConfResp2} = request(get, "/gateways/stomp"), assert_confs(GwConf2, ConfResp2), @@ -185,7 +186,7 @@ t_gateway_mqttsn(_) -> {204, _} = request(put, "/gateways/mqttsn", GwConf), {200, ConfResp} = request(get, "/gateways/mqttsn"), assert_confs(GwConf, ConfResp), - GwConf2 = emqx_map_lib:deep_merge(GwConf, #{predefined => []}), + GwConf2 = emqx_utils_maps:deep_merge(GwConf, #{predefined => []}), {204, _} = request(put, "/gateways/mqttsn", maps:without([name, listeners], GwConf2)), {200, ConfResp2} = request(get, "/gateways/mqttsn"), assert_confs(GwConf2, ConfResp2), @@ -205,7 +206,7 @@ t_gateway_coap(_) -> {204, _} = request(put, "/gateways/coap", GwConf), {200, ConfResp} = request(get, "/gateways/coap"), assert_confs(GwConf, ConfResp), - GwConf2 = emqx_map_lib:deep_merge(GwConf, #{heartbeat => <<"10s">>}), + GwConf2 = emqx_utils_maps:deep_merge(GwConf, #{heartbeat => <<"10s">>}), {204, _} = request(put, "/gateways/coap", maps:without([name, listeners], GwConf2)), {200, ConfResp2} = request(get, "/gateways/coap"), assert_confs(GwConf2, ConfResp2), @@ -214,9 +215,17 @@ t_gateway_coap(_) -> t_gateway_lwm2m(_) -> {200, Gw} = request(get, "/gateways/lwm2m"), assert_gw_unloaded(Gw), + XmlDir = filename:join( + [ + emqx_common_test_helpers:proj_root(), + "apps", + "emqx_gateway_lwm2m", + "lwm2m_xml" + ] + ), GwConf = #{ name => <<"lwm2m">>, - xml_dir => <<"../../lib/emqx_gateway/src/lwm2m/lwm2m_xml">>, + xml_dir => list_to_binary(XmlDir), lifetime_min => <<"1s">>, lifetime_max => <<"1000s">>, qmode_time_window => <<"30s">>, @@ -235,7 +244,7 @@ t_gateway_lwm2m(_) -> {204, _} = request(put, "/gateways/lwm2m", GwConf), {200, ConfResp} = request(get, "/gateways/lwm2m"), assert_confs(GwConf, ConfResp), - GwConf2 = emqx_map_lib:deep_merge(GwConf, #{qmode_time_window => <<"10s">>}), + GwConf2 = emqx_utils_maps:deep_merge(GwConf, #{qmode_time_window => <<"10s">>}), {204, _} = request(put, "/gateways/lwm2m", maps:without([name, listeners], GwConf2)), {200, ConfResp2} = request(get, "/gateways/lwm2m"), assert_confs(GwConf2, ConfResp2), @@ -255,7 +264,7 @@ t_gateway_exproto(_) -> {204, _} = request(put, "/gateways/exproto", GwConf), {200, ConfResp} = request(get, "/gateways/exproto"), assert_confs(GwConf, ConfResp), - GwConf2 = emqx_map_lib:deep_merge(GwConf, #{server => #{bind => <<"9200">>}}), + GwConf2 = emqx_utils_maps:deep_merge(GwConf, #{server => #{bind => <<"9200">>}}), {204, _} = request(put, "/gateways/exproto", maps:without([name, listeners], GwConf2)), {200, ConfResp2} = request(get, "/gateways/exproto"), assert_confs(GwConf2, ConfResp2), @@ -284,7 +293,7 @@ t_gateway_exproto_with_ssl(_) -> {204, _} = request(put, "/gateways/exproto", GwConf), {200, ConfResp} = request(get, "/gateways/exproto"), assert_confs(GwConf, ConfResp), - GwConf2 = emqx_map_lib:deep_merge(GwConf, #{ + GwConf2 = emqx_utils_maps:deep_merge(GwConf, #{ server => #{ bind => <<"9200">>, ssl_options => SslCliOpts diff --git a/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl b/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl index d75bf80eb..0ed66a38d 100644 --- a/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl +++ b/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl @@ -153,7 +153,7 @@ on_start_auth(authn_http) -> Handler = fun(Req0, State) -> ct:pal("Authn Req:~p~nState:~p~n", [Req0, State]), Headers = #{<<"content-type">> => <<"application/json">>}, - Response = jiffy:encode(#{result => allow, is_superuser => false}), + Response = emqx_utils_json:encode(#{result => allow, is_superuser => false}), case cowboy_req:match_qs([username, password], Req0) of #{ username := <<"admin">>, diff --git a/apps/emqx_gateway/test/emqx_gateway_authn_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_authn_SUITE.erl index 208262f22..149e6acd6 100644 --- a/apps/emqx_gateway/test/emqx_gateway_authn_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_authn_SUITE.erl @@ -66,6 +66,7 @@ end_per_group(AuthName, Conf) -> Conf. init_per_suite(Config) -> + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_config:erase(gateway), init_gateway_conf(), emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_authn, emqx_gateway]), @@ -77,7 +78,7 @@ init_per_suite(Config) -> end_per_suite(Config) -> emqx_gateway_auth_ct:stop(), emqx_config:erase(gateway), - emqx_mgmt_api_test_util:end_suite([cowboy, emqx_authn, emqx_gateway]), + emqx_mgmt_api_test_util:end_suite([cowboy, emqx_conf, emqx_authn, emqx_gateway]), Config. init_per_testcase(_Case, Config) -> @@ -265,7 +266,7 @@ t_case_exproto(_) -> Mod:send(Sock, ConnBin), {ok, Recv} = Mod:recv(Sock, 5000), - C = ?FUNCTOR(Bin, emqx_json:decode(Bin, [return_maps])), + C = ?FUNCTOR(Bin, emqx_utils_json:decode(Bin, [return_maps])), ?assertEqual(C(Expect), C(Recv)) end ) @@ -281,7 +282,7 @@ t_case_exproto(_) -> disable_authn(GwName, Type, Name) -> RawCfg = emqx_conf:get_raw([gateway, GwName], #{}), - ListenerCfg = emqx_map_lib:deep_get( + ListenerCfg = emqx_utils_maps:deep_get( [<<"listeners">>, atom_to_binary(Type), atom_to_binary(Name)], RawCfg ), {ok, _} = emqx_gateway_conf:update_listener(GwName, {Type, Name}, ListenerCfg#{ diff --git a/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl index 2e44415aa..c62e840df 100644 --- a/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl @@ -66,6 +66,7 @@ end_per_group(AuthName, Conf) -> init_per_suite(Config) -> emqx_config:erase(gateway), + emqx_gateway_test_utils:load_all_gateway_apps(), init_gateway_conf(), meck:new(emqx_authz_file, [non_strict, passthrough, no_history, no_link]), meck:expect(emqx_authz_file, create, fun(S) -> S end), @@ -164,7 +165,7 @@ t_case_lwm2m(_) -> Test("lwm2m", fun(SubTopic, Msg) -> ?assertEqual(true, lists:member(SubTopic, test_mqtt_broker:get_subscrbied_topics())), Payload = emqx_message:payload(Msg), - Cmd = emqx_json:decode(Payload, [return_maps]), + Cmd = emqx_utils_json:decode(Payload, [return_maps]), ?assertMatch(#{<<"msgType">> := <<"register">>, <<"data">> := _}, Cmd) end), @@ -225,7 +226,7 @@ t_case_sn_subscribe(_) -> ) end, Sub(<<"/subscribe">>, fun(Data) -> - {ok, Msg, _, _} = emqx_sn_frame:parse(Data, undefined), + {ok, Msg, _, _} = emqx_mqttsn_frame:parse(Data, undefined), ?assertMatch({mqtt_sn_message, _, {_, 3, 0, Payload}}, Msg) end), Sub(<<"/badsubscribe">>, fun(Data) -> @@ -349,7 +350,7 @@ t_case_exproto_publish(_) -> Mod:send(Sock, ConnBin), {ok, Recv} = Mod:recv(Sock, 5000), - C = ?FUNCTOR(Bin, emqx_json:decode(Bin, [return_maps])), + C = ?FUNCTOR(Bin, emqx_utils_json:decode(Bin, [return_maps])), ?assertEqual(C(SvrMod:frame_connack(0)), C(Recv)), Send = fun() -> @@ -386,7 +387,7 @@ t_case_exproto_subscribe(_) -> Mod:send(Sock, ConnBin), {ok, Recv} = Mod:recv(Sock, WaitTime), - C = ?FUNCTOR(Bin, emqx_json:decode(Bin, [return_maps])), + C = ?FUNCTOR(Bin, emqx_utils_json:decode(Bin, [return_maps])), ?assertEqual(C(SvrMod:frame_connack(0)), C(Recv)), SubBin = SvrMod:frame_subscribe(Topic, 0), diff --git a/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl index c66785e00..641528eda 100644 --- a/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl @@ -62,6 +62,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> emqx_config:erase(gateway), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_authn, emqx_gateway]), Conf. @@ -116,11 +117,11 @@ t_gateway_registry_usage(_) -> t_gateway_registry_list(_) -> emqx_gateway_cli:'gateway-registry'(["list"]), ?assertEqual( - "Registered Name: coap, Callback Module: emqx_coap_impl\n" - "Registered Name: exproto, Callback Module: emqx_exproto_impl\n" - "Registered Name: lwm2m, Callback Module: emqx_lwm2m_impl\n" - "Registered Name: mqttsn, Callback Module: emqx_sn_impl\n" - "Registered Name: stomp, Callback Module: emqx_stomp_impl\n", + "Registered Name: coap, Callback Module: emqx_gateway_coap\n" + "Registered Name: exproto, Callback Module: emqx_gateway_exproto\n" + "Registered Name: lwm2m, Callback Module: emqx_gateway_lwm2m\n" + "Registered Name: mqttsn, Callback Module: emqx_gateway_mqttsn\n" + "Registered Name: stomp, Callback Module: emqx_gateway_stomp\n", acc_print() ). diff --git a/apps/emqx_gateway/test/emqx_gateway_cm_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_cm_SUITE.erl index c5e8d9a92..8b0dacd75 100644 --- a/apps/emqx_gateway/test/emqx_gateway_cm_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_cm_SUITE.erl @@ -34,6 +34,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> emqx_config:erase(gateway), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_common_test_helpers:start_apps([]), diff --git a/apps/emqx_gateway/test/emqx_gateway_cm_registry_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_cm_registry_SUITE.erl index 77f4058e7..35e32d3da 100644 --- a/apps/emqx_gateway/test/emqx_gateway_cm_registry_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_cm_registry_SUITE.erl @@ -34,6 +34,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> emqx_config:erase(gateway), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_common_test_helpers:start_apps([]), Conf. diff --git a/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl index 6f6c2c45a..ce709efc3 100644 --- a/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl @@ -37,6 +37,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:load_config(emqx_gateway_schema, <<"gateway {}">>), emqx_common_test_helpers:start_apps([emqx_conf, emqx_authn, emqx_gateway]), Conf. @@ -273,7 +274,7 @@ t_load_unload_gateway(_) -> ?assertException( error, - {config_not_found, [gateway, stomp]}, + {config_not_found, [<<"gateway">>, stomp]}, emqx:get_raw_config([gateway, stomp]) ), ok. @@ -306,7 +307,7 @@ t_load_remove_authn(_) -> ?assertException( error, - {config_not_found, [gateway, stomp, authentication]}, + {config_not_found, [<<"gateway">>, stomp, authentication]}, emqx:get_raw_config([gateway, stomp, authentication]) ), ok. @@ -351,7 +352,7 @@ t_load_remove_listeners(_) -> ?assertException( error, - {config_not_found, [gateway, stomp, listeners, tcp, default]}, + {config_not_found, [<<"gateway">>, stomp, listeners, tcp, default]}, emqx:get_raw_config([gateway, stomp, listeners, tcp, default]) ), ok. @@ -400,7 +401,7 @@ t_load_remove_listener_authn(_) -> Path = [gateway, stomp, listeners, tcp, default, authentication], ?assertException( error, - {config_not_found, Path}, + {config_not_found, [<<"gateway">>, stomp, listeners, tcp, default, authentication]}, emqx:get_raw_config(Path) ), ok. @@ -412,7 +413,7 @@ t_load_gateway_with_certs_content(_) -> ), {ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf), assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])), - SslConf = emqx_map_lib:deep_get( + SslConf = emqx_utils_maps:deep_get( [<<"listeners">>, <<"ssl">>, <<"default">>, <<"ssl_options">>], emqx:get_raw_config([gateway, stomp]) ), @@ -420,7 +421,7 @@ t_load_gateway_with_certs_content(_) -> assert_ssl_confs_files_deleted(SslConf), ?assertException( error, - {config_not_found, [gateway, stomp]}, + {config_not_found, [<<"gateway">>, stomp]}, emqx:get_raw_config([gateway, stomp]) ), ok. @@ -435,7 +436,7 @@ t_load_gateway_with_certs_content(_) -> % ), % {ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf), % assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])), -% SslConf = emqx_map_lib:deep_get( +% SslConf = emqx_utils_maps:deep_get( % [<<"listeners">>, <<"ssl">>, <<"default">>, <<"ssl_options">>], % emqx:get_raw_config([gateway, stomp]) % ), @@ -470,7 +471,7 @@ t_add_listener_with_certs_content(_) -> emqx:get_raw_config([gateway, stomp]) ), - SslConf = emqx_map_lib:deep_get( + SslConf = emqx_utils_maps:deep_get( [<<"listeners">>, <<"ssl">>, <<"default">>, <<"ssl_options">>], emqx:get_raw_config([gateway, stomp]) ), @@ -488,7 +489,7 @@ t_add_listener_with_certs_content(_) -> ?assertException( error, - {config_not_found, [gateway, stomp, listeners, ssl, default]}, + {config_not_found, [<<"gateway">>, stomp, listeners, ssl, default]}, emqx:get_raw_config([gateway, stomp, listeners, ssl, default]) ), ok. diff --git a/apps/emqx_gateway/test/emqx_gateway_ctx_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_ctx_SUITE.erl index 0aa3172f1..35ce5fb31 100644 --- a/apps/emqx_gateway/test/emqx_gateway_ctx_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_ctx_SUITE.erl @@ -28,6 +28,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> + emqx_gateway_test_utils:load_all_gateway_apps(), ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]), ok = meck:expect( emqx_access_control, diff --git a/apps/emqx_gateway/test/emqx_gateway_metrics_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_metrics_SUITE.erl index 211315e6c..b82e049d3 100644 --- a/apps/emqx_gateway/test/emqx_gateway_metrics_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_metrics_SUITE.erl @@ -33,6 +33,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> emqx_config:erase(gateway), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_common_test_helpers:start_apps([]), Conf. diff --git a/apps/emqx_gateway/test/emqx_gateway_registry_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_registry_SUITE.erl index cc5f7bf37..a51621688 100644 --- a/apps/emqx_gateway/test/emqx_gateway_registry_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_registry_SUITE.erl @@ -37,6 +37,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). %%-------------------------------------------------------------------- init_per_suite(Cfg) -> + emqx_gateway_test_utils:load_all_gateway_apps(), ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_common_test_helpers:start_apps([emqx_authn, emqx_gateway]), Cfg. diff --git a/apps/emqx_gateway/test/emqx_gateway_test_utils.erl b/apps/emqx_gateway/test/emqx_gateway_test_utils.erl index a6791a36b..bb378ef10 100644 --- a/apps/emqx_gateway/test/emqx_gateway_test_utils.erl +++ b/apps/emqx_gateway/test/emqx_gateway_test_utils.erl @@ -101,13 +101,17 @@ assert_fields_exist(Ks, Map) -> end, Ks ). +load_all_gateway_apps() -> + application:load(emqx_gateway_stomp), + application:load(emqx_gateway_mqttsn), + application:load(emqx_gateway_coap), + application:load(emqx_gateway_lwm2m), + application:load(emqx_gateway_exproto). %%-------------------------------------------------------------------- %% http -define(http_api_host, "http://127.0.0.1:18083/api/v5"). --define(default_user, "admin"). --define(default_pass, "public"). request(delete = Mth, Path) -> do_request(Mth, req(Path, [])); @@ -155,8 +159,8 @@ do_request(Mth, Req) -> <<>> -> #{}; _ -> - emqx_map_lib:unsafe_atom_key_map( - emqx_json:decode(Resp, [return_maps]) + emqx_utils_maps:unsafe_atom_key_map( + emqx_utils_json:decode(Resp, [return_maps]) ) end, {Code, NResp}; @@ -168,7 +172,7 @@ req(Path, Qs) -> {url(Path, Qs), auth([])}. req(Path, Qs, Body) -> - {url(Path, Qs), auth([]), "application/json", emqx_json:encode(Body)}. + {url(Path, Qs), auth([]), "application/json", emqx_utils_json:encode(Body)}. url(Path, []) -> lists:concat([?http_api_host, Path]); @@ -176,5 +180,4 @@ url(Path, Qs) -> lists:concat([?http_api_host, Path, "?", binary_to_list(cow_qs:qs(Qs))]). auth(Headers) -> - Token = base64:encode(?default_user ++ ":" ++ ?default_pass), - [{"Authorization", "Basic " ++ binary_to_list(Token)}] ++ Headers. + [emqx_mgmt_api_test_util:auth_header_() | Headers]. diff --git a/apps/emqx_gateway_coap/.gitignore b/apps/emqx_gateway_coap/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_gateway_coap/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_gateway_coap/README.md b/apps/emqx_gateway_coap/README.md new file mode 100644 index 000000000..405366e89 --- /dev/null +++ b/apps/emqx_gateway_coap/README.md @@ -0,0 +1,31 @@ +# emqx_coap + +The CoAP gateway implements publish, subscribe, and receive messages as standard +with [Publish-Subscribe Broker for the CoAP](https://datatracker.ietf.org/doc/html/draft-ietf-core-coap-pubsub-09). + +## Quick Start + +In EMQX 5.0, CoAP gateways can be configured and enabled through the Dashboard. + +It can also be enabled via the HTTP API or emqx.conf, e.g. In emqx.conf: + +```properties +gateway.coap { + + mountpoint = "coap/" + + connection_required = false + + listeners.udp.default { + bind = "5683" + max_connections = 1024000 + max_conn_rate = 1000 + } +} +``` + +> Note: +> Configuring the gateway via emqx.conf requires changes on a per-node basis, +> but configuring it via Dashboard or the HTTP API will take effect across the cluster. + +More documentations: [CoAP Gateway](https://www.emqx.io/docs/en/v5.0/gateway/coap.html) diff --git a/apps/emqx_gateway/src/coap/doc/flow.png b/apps/emqx_gateway_coap/doc/flow.png similarity index 100% rename from apps/emqx_gateway/src/coap/doc/flow.png rename to apps/emqx_gateway_coap/doc/flow.png diff --git a/apps/emqx_gateway/src/coap/doc/shared_state.png b/apps/emqx_gateway_coap/doc/shared_state.png similarity index 100% rename from apps/emqx_gateway/src/coap/doc/shared_state.png rename to apps/emqx_gateway_coap/doc/shared_state.png diff --git a/apps/emqx_gateway/src/coap/doc/transport.png b/apps/emqx_gateway_coap/doc/transport.png similarity index 100% rename from apps/emqx_gateway/src/coap/doc/transport.png rename to apps/emqx_gateway_coap/doc/transport.png diff --git a/apps/emqx_gateway/src/coap/include/emqx_coap.hrl b/apps/emqx_gateway_coap/include/emqx_coap.hrl similarity index 100% rename from apps/emqx_gateway/src/coap/include/emqx_coap.hrl rename to apps/emqx_gateway_coap/include/emqx_coap.hrl diff --git a/apps/emqx_gateway_coap/rebar.config b/apps/emqx_gateway_coap/rebar.config new file mode 100644 index 000000000..3b070a72a --- /dev/null +++ b/apps/emqx_gateway_coap/rebar.config @@ -0,0 +1,6 @@ +{erl_opts, [debug_info]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, + {emqx_gateway, {path, "../emqx_gateway"}} +]}. diff --git a/apps/emqx_gateway/src/coap/emqx_coap_api.erl b/apps/emqx_gateway_coap/src/emqx_coap_api.erl similarity index 98% rename from apps/emqx_gateway/src/coap/emqx_coap_api.erl rename to apps/emqx_gateway_coap/src/emqx_coap_api.erl index 0f4c7a053..b4fce5473 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_api.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_api.erl @@ -18,10 +18,10 @@ -behaviour(minirest_api). +-include("emqx_coap.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). %% API -export([api_spec/0, paths/0, schema/1, namespace/0]). @@ -34,9 +34,12 @@ -import(hoconsc, [mk/2, enum/1]). -import(emqx_dashboard_swagger, [error_codes/2]). +-elvis([{elvis_style, atom_naming_convention, disable}]). + %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- + namespace() -> "gateway_coap". api_spec() -> diff --git a/apps/emqx_gateway/src/coap/emqx_coap_channel.erl b/apps/emqx_gateway_coap/src/emqx_coap_channel.erl similarity index 99% rename from apps/emqx_gateway/src/coap/emqx_coap_channel.erl rename to apps/emqx_gateway_coap/src/emqx_coap_channel.erl index d6b8594b1..b90fd630d 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_channel.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_channel.erl @@ -45,8 +45,8 @@ -export_type([channel/0]). +-include("emqx_coap.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). -include_lib("emqx/include/emqx_authentication.hrl"). -define(AUTHN, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM). @@ -111,7 +111,7 @@ info(conn_state, #channel{conn_state = ConnState}) -> info(clientinfo, #channel{clientinfo = ClientInfo}) -> ClientInfo; info(session, #channel{session = Session}) -> - emqx_misc:maybe_apply(fun emqx_coap_session:info/1, Session); + emqx_utils:maybe_apply(fun emqx_coap_session:info/1, Session); info(clientid, #channel{clientinfo = #{clientid := ClientId}}) -> ClientId; info(ctx, #channel{ctx = Ctx}) -> @@ -366,7 +366,7 @@ ensure_timer(Name, Time, Msg, #channel{timers = Timers} = Channel) -> end. make_timer(Name, Time, Msg, Channel = #channel{timers = Timers}) -> - TRef = emqx_misc:start_timer(Time, Msg), + TRef = emqx_utils:start_timer(Time, Msg), Channel#channel{timers = Timers#{Name => TRef}}. ensure_keepalive_timer(Channel) -> @@ -710,7 +710,7 @@ process_connection( ) -> Queries = emqx_coap_message:get_option(uri_query, Req), case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun enrich_conninfo/2, fun run_conn_hooks/2, diff --git a/apps/emqx_gateway/src/coap/emqx_coap_frame.erl b/apps/emqx_gateway_coap/src/emqx_coap_frame.erl similarity index 99% rename from apps/emqx_gateway/src/coap/emqx_coap_frame.erl rename to apps/emqx_gateway_coap/src/emqx_coap_frame.erl index 4d2479d75..535d07a94 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_frame.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_frame.erl @@ -29,7 +29,7 @@ is_message/1 ]). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_coap.hrl"). -include_lib("emqx/include/types.hrl"). -define(VERSION, 1). @@ -55,6 +55,8 @@ -define(OPTION_PROXY_SCHEME, 39). -define(OPTION_SIZE1, 60). +-elvis([{elvis_style, no_if_expression, disable}]). + %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- diff --git a/apps/emqx_gateway/src/coap/emqx_coap_medium.erl b/apps/emqx_gateway_coap/src/emqx_coap_medium.erl similarity index 98% rename from apps/emqx_gateway/src/coap/emqx_coap_medium.erl rename to apps/emqx_gateway_coap/src/emqx_coap_medium.erl index 8f5028f25..b6bd8e764 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_medium.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_medium.erl @@ -20,7 +20,7 @@ -module(emqx_coap_medium). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_coap.hrl"). %% API -export([ diff --git a/apps/emqx_gateway/src/coap/emqx_coap_message.erl b/apps/emqx_gateway_coap/src/emqx_coap_message.erl similarity index 99% rename from apps/emqx_gateway/src/coap/emqx_coap_message.erl rename to apps/emqx_gateway_coap/src/emqx_coap_message.erl index 99c9e0840..ee17231a7 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_message.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_message.erl @@ -43,7 +43,7 @@ set_payload_block/3, set_payload_block/4 ]). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_coap.hrl"). request(Type, Method) -> request(Type, Method, <<>>, []). diff --git a/apps/emqx_gateway/src/coap/handler/emqx_coap_mqtt_handler.erl b/apps/emqx_gateway_coap/src/emqx_coap_mqtt_handler.erl similarity index 96% rename from apps/emqx_gateway/src/coap/handler/emqx_coap_mqtt_handler.erl rename to apps/emqx_gateway_coap/src/emqx_coap_mqtt_handler.erl index 59825a745..4bcf71b1a 100644 --- a/apps/emqx_gateway/src/coap/handler/emqx_coap_mqtt_handler.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_mqtt_handler.erl @@ -16,7 +16,7 @@ -module(emqx_coap_mqtt_handler). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_coap.hrl"). -export([handle_request/4]). -import(emqx_coap_message, [response/2, response/3]). diff --git a/apps/emqx_gateway/src/coap/emqx_coap_observe_res.erl b/apps/emqx_gateway_coap/src/emqx_coap_observe_res.erl similarity index 100% rename from apps/emqx_gateway/src/coap/emqx_coap_observe_res.erl rename to apps/emqx_gateway_coap/src/emqx_coap_observe_res.erl diff --git a/apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl b/apps/emqx_gateway_coap/src/emqx_coap_pubsub_handler.erl similarity index 99% rename from apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl rename to apps/emqx_gateway_coap/src/emqx_coap_pubsub_handler.erl index 5e14ba9e4..da1f5e0ef 100644 --- a/apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_pubsub_handler.erl @@ -18,7 +18,7 @@ -module(emqx_coap_pubsub_handler). -include_lib("emqx/include/emqx_mqtt.hrl"). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_coap.hrl"). -export([handle_request/4]). diff --git a/apps/emqx_gateway_coap/src/emqx_coap_schema.erl b/apps/emqx_gateway_coap/src/emqx_coap_schema.erl new file mode 100644 index 000000000..b7ce88451 --- /dev/null +++ b/apps/emqx_gateway_coap/src/emqx_coap_schema.erl @@ -0,0 +1,95 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_coap_schema). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). + +-type duration() :: non_neg_integer(). + +-typerefl_from_string({duration/0, emqx_schema, to_duration}). + +-reflect_type([duration/0]). + +%% config schema provides +-export([fields/1, desc/1]). + +fields(coap) -> + [ + {heartbeat, + sc( + duration(), + #{ + default => <<"30s">>, + desc => ?DESC(coap_heartbeat) + } + )}, + {connection_required, + sc( + boolean(), + #{ + default => false, + desc => ?DESC(coap_connection_required) + } + )}, + {notify_type, + sc( + hoconsc:enum([non, con, qos]), + #{ + default => qos, + desc => ?DESC(coap_notify_type) + } + )}, + {subscribe_qos, + sc( + hoconsc:enum([qos0, qos1, qos2, coap]), + #{ + default => coap, + desc => ?DESC(coap_subscribe_qos) + } + )}, + {publish_qos, + sc( + hoconsc:enum([qos0, qos1, qos2, coap]), + #{ + default => coap, + desc => ?DESC(coap_publish_qos) + } + )}, + {mountpoint, emqx_gateway_schema:mountpoint()}, + {listeners, + sc( + ref(emqx_gateway_schema, udp_listeners), + #{desc => ?DESC(udp_listeners)} + )} + ] ++ emqx_gateway_schema:gateway_common_options(). + +desc(coap) -> + "The CoAP protocol gateway provides EMQX with the access capability of the CoAP protocol.\n" + "It allows publishing, subscribing, and receiving messages to EMQX in accordance\n" + "with a certain defined CoAP message format."; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% helpers + +sc(Type, Meta) -> + hoconsc:mk(Type, Meta). + +ref(Mod, Field) -> + hoconsc:ref(Mod, Field). diff --git a/apps/emqx_gateway/src/coap/emqx_coap_session.erl b/apps/emqx_gateway_coap/src/emqx_coap_session.erl similarity index 99% rename from apps/emqx_gateway/src/coap/emqx_coap_session.erl rename to apps/emqx_gateway_coap/src/emqx_coap_session.erl index 253f34d4d..5ae169675 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_session.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_session.erl @@ -15,10 +15,10 @@ %%-------------------------------------------------------------------- -module(emqx_coap_session). +-include("emqx_coap.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). %% API -export([ @@ -81,7 +81,7 @@ %%%------------------------------------------------------------------- -spec new() -> session(). new() -> - _ = emqx_misc:rand_seed(), + _ = emqx_utils:rand_seed(), #session{ transport_manager = emqx_coap_tm:new(), observe_manager = emqx_coap_observe_res:new_manager(), diff --git a/apps/emqx_gateway/src/coap/emqx_coap_tm.erl b/apps/emqx_gateway_coap/src/emqx_coap_tm.erl similarity index 97% rename from apps/emqx_gateway/src/coap/emqx_coap_tm.erl rename to apps/emqx_gateway_coap/src/emqx_coap_tm.erl index 1a0004f8c..68a7ae237 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_tm.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_tm.erl @@ -29,8 +29,8 @@ -export_type([manager/0, event_result/1]). +-include("emqx_coap.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). -type direction() :: in | out. @@ -80,6 +80,8 @@ -import(emqx_coap_medium, [empty/0, iter/4, reset/1, proto_out/2]). +-elvis([{elvis_style, no_if_expression, disable}]). + %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- @@ -270,12 +272,12 @@ cancel_state_timer(#state_machine{timers = Timers} = Machine) -> undefined -> Machine; Ref -> - _ = emqx_misc:cancel_timer(Ref), + _ = emqx_utils:cancel_timer(Ref), Machine#state_machine{timers = maps:remove(state_timer, Timers)} end. process_timer(SeqId, {Type, Interval, Msg}, Timers) -> - Ref = emqx_misc:start_timer(Interval, {state_machine, {SeqId, Type, Msg}}), + Ref = emqx_utils:start_timer(Interval, {state_machine, {SeqId, Type, Msg}}), Timers#{Type => Ref}. -spec delete_machine(manager_key(), manager()) -> manager(). @@ -291,7 +293,7 @@ delete_machine(Id, Manager) -> } -> lists:foreach( fun({_, Ref}) -> - emqx_misc:cancel_timer(Ref) + emqx_utils:cancel_timer(Ref) end, maps:to_list(Timers) ), @@ -401,9 +403,9 @@ alloc_message_id(MsgId, TM) -> next_message_id(MsgId) -> Next = MsgId + 1, - if - Next >= ?MAX_MESSAGE_ID -> - 1; + case Next >= ?MAX_MESSAGE_ID of true -> + 1; + false -> Next end. diff --git a/apps/emqx_gateway/src/coap/emqx_coap_transport.erl b/apps/emqx_gateway_coap/src/emqx_coap_transport.erl similarity index 96% rename from apps/emqx_gateway/src/coap/emqx_coap_transport.erl rename to apps/emqx_gateway_coap/src/emqx_coap_transport.erl index 1e6c5238a..daea13ba8 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_transport.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_transport.erl @@ -16,8 +16,8 @@ -module(emqx_coap_transport). +-include("emqx_coap.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). -define(ACK_TIMEOUT, 2000). -define(ACK_RANDOM_FACTOR, 1000). @@ -60,6 +60,12 @@ reply/2 ]). +-elvis([{elvis_style, atom_naming_convention, disable}]). +-elvis([{elvis_style, no_if_expression, disable}]). + +%%-------------------------------------------------------------------- +%% APIs + -spec new() -> transport(). new() -> new(undefined). @@ -113,7 +119,7 @@ idle(out, #coap_message{type = non} = Msg, _) -> timeouts => [{stop_timeout, ?NON_LIFETIME}] }); idle(out, Msg, Transport) -> - _ = emqx_misc:rand_seed(), + _ = emqx_utils:rand_seed(), Timeout = ?ACK_TIMEOUT + rand:uniform(?ACK_RANDOM_FACTOR), out(Msg, #{ next => wait_ack, diff --git a/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src b/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src new file mode 100644 index 000000000..decd13bef --- /dev/null +++ b/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src @@ -0,0 +1,10 @@ +{application, emqx_gateway_coap, [ + {description, "CoAP Gateway"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, emqx, emqx_gateway]}, + {env, []}, + {modules, []}, + {licenses, ["Apache 2.0"]}, + {links, []} +]}. diff --git a/apps/emqx_gateway/src/coap/emqx_coap_impl.erl b/apps/emqx_gateway_coap/src/emqx_gateway_coap.erl similarity index 86% rename from apps/emqx_gateway/src/coap/emqx_coap_impl.erl rename to apps/emqx_gateway_coap/src/emqx_gateway_coap.erl index bebcef237..6c495fbdb 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_impl.erl +++ b/apps/emqx_gateway_coap/src/emqx_gateway_coap.erl @@ -14,13 +14,29 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_coap_impl). - --behaviour(emqx_gateway_impl). +%% @doc The CoAP Gateway implement +-module(emqx_gateway_coap). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx_gateway/include/emqx_gateway.hrl"). +%% define a gateway named stomp +-gateway(#{ + name => coap, + callback_module => ?MODULE, + config_schema_module => emqx_coap_schema +}). + +%% callback_module must implement the emqx_gateway_impl behaviour +-behaviour(emqx_gateway_impl). + +%% callback for emqx_gateway_impl +-export([ + on_gateway_load/2, + on_gateway_update/3, + on_gateway_unload/2 +]). + -import( emqx_gateway_utils, [ @@ -30,31 +46,8 @@ ] ). -%% APIs --export([ - reg/0, - unreg/0 -]). - --export([ - on_gateway_load/2, - on_gateway_update/3, - on_gateway_unload/2 -]). - %%-------------------------------------------------------------------- -%% APIs -%%-------------------------------------------------------------------- - -reg() -> - RegistryOptions = [{cbkmod, ?MODULE}], - emqx_gateway_registry:reg(coap, RegistryOptions). - -unreg() -> - emqx_gateway_registry:unreg(coap). - -%%-------------------------------------------------------------------- -%% emqx_gateway_registry callbacks +%% emqx_gateway_impl callbacks %%-------------------------------------------------------------------- on_gateway_load( diff --git a/apps/emqx_gateway/test/emqx_coap_SUITE.erl b/apps/emqx_gateway_coap/test/emqx_coap_SUITE.erl similarity index 99% rename from apps/emqx_gateway/test/emqx_coap_SUITE.erl rename to apps/emqx_gateway_coap/test/emqx_coap_SUITE.erl index db99c3df1..9b6f7ce1f 100644 --- a/apps/emqx_gateway/test/emqx_coap_SUITE.erl +++ b/apps/emqx_gateway_coap/test/emqx_coap_SUITE.erl @@ -56,6 +56,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + application:load(emqx_gateway_coap), ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_mgmt_api_test_util:init_suite([emqx_authn, emqx_gateway]), ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]), diff --git a/apps/emqx_gateway/test/emqx_coap_api_SUITE.erl b/apps/emqx_gateway_coap/test/emqx_coap_api_SUITE.erl similarity index 98% rename from apps/emqx_gateway/test/emqx_coap_api_SUITE.erl rename to apps/emqx_gateway_coap/test/emqx_coap_api_SUITE.erl index 6c1354bc0..cec09a016 100644 --- a/apps/emqx_gateway/test/emqx_coap_api_SUITE.erl +++ b/apps/emqx_gateway_coap/test/emqx_coap_api_SUITE.erl @@ -19,7 +19,7 @@ -compile(export_all). -compile(nowarn_export_all). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_coap.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -56,6 +56,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + application:load(emqx_gateway_coap), ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_mgmt_api_test_util:init_suite([emqx_authn, emqx_gateway]), Config. @@ -91,7 +92,7 @@ t_send_request_api(_) -> Req ), #{<<"token">> := RToken, <<"payload">> := RPayload} = - emqx_json:decode(Response, [return_maps]), + emqx_utils_json:decode(Response, [return_maps]), ?assertEqual(Token, RToken), ?assertEqual(Payload, RPayload) end, diff --git a/apps/emqx_gateway_exproto/.gitignore b/apps/emqx_gateway_exproto/.gitignore new file mode 100644 index 000000000..922b0f989 --- /dev/null +++ b/apps/emqx_gateway_exproto/.gitignore @@ -0,0 +1,24 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ +src/emqx_exproto_pb.erl +src/emqx_exproto_v_1_connection_adapter_bhvr.erl +src/emqx_exproto_v_1_connection_adapter_client.erl +src/emqx_exproto_v_1_connection_handler_bhvr.erl +src/emqx_exproto_v_1_connection_handler_client.erl diff --git a/apps/emqx_gateway/src/exproto/README.md b/apps/emqx_gateway_exproto/README.md similarity index 100% rename from apps/emqx_gateway/src/exproto/README.md rename to apps/emqx_gateway_exproto/README.md diff --git a/apps/emqx_gateway/src/exproto/include/emqx_exproto.hrl b/apps/emqx_gateway_exproto/include/emqx_exproto.hrl similarity index 100% rename from apps/emqx_gateway/src/exproto/include/emqx_exproto.hrl rename to apps/emqx_gateway_exproto/include/emqx_exproto.hrl diff --git a/apps/emqx_gateway/src/exproto/protos/exproto.proto b/apps/emqx_gateway_exproto/priv/protos/exproto.proto similarity index 100% rename from apps/emqx_gateway/src/exproto/protos/exproto.proto rename to apps/emqx_gateway_exproto/priv/protos/exproto.proto diff --git a/apps/emqx_gateway_exproto/rebar.config b/apps/emqx_gateway_exproto/rebar.config new file mode 100644 index 000000000..473fa9b67 --- /dev/null +++ b/apps/emqx_gateway_exproto/rebar.config @@ -0,0 +1,36 @@ +{erl_opts, [debug_info]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, + {emqx_gateway, {path, "../emqx_gateway"}} +]}. + +{plugins, [ + {grpc_plugin, {git, "https://github.com/HJianBo/grpc_plugin", {tag, "v0.10.2"}}} +]}. + +{grpc, [ + {protos, ["priv/protos"]}, + {out_dir, "src"}, + {gpb_opts, [ + {module_name_prefix, "emqx_"}, + {module_name_suffix, "_pb"} + ]} +]}. + +{provider_hooks, [ + {pre, [ + {compile, {grpc, gen}}, + {clean, {grpc, clean}} + ]} +]}. + +{xref_ignores, [emqx_exproto_pb]}. + +{cover_excl_mods, [ + emqx_exproto_pb, + emqx_exproto_v_1_connection_adapter_client, + emqx_exproto_v_1_connection_adapter_bhvr, + emqx_exproto_v_1_connection_handler_client, + emqx_exproto_v_1_connection_handler_bhvr +]}. diff --git a/apps/emqx_gateway/src/exproto/emqx_exproto_channel.erl b/apps/emqx_gateway_exproto/src/emqx_exproto_channel.erl similarity index 99% rename from apps/emqx_gateway/src/exproto/emqx_exproto_channel.erl rename to apps/emqx_gateway_exproto/src/emqx_exproto_channel.erl index 301154df0..3b2c8d73b 100644 --- a/apps/emqx_gateway/src/exproto/emqx_exproto_channel.erl +++ b/apps/emqx_gateway_exproto/src/emqx_exproto_channel.erl @@ -15,7 +15,8 @@ %%-------------------------------------------------------------------- -module(emqx_exproto_channel). --include("src/exproto/include/emqx_exproto.hrl"). + +-include("emqx_exproto.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("emqx/include/types.hrl"). @@ -680,14 +681,14 @@ ensure_timer(Name, Channel = #channel{timers = Timers}) -> ensure_timer(Name, Time, Channel = #channel{timers = Timers}) -> Msg = maps:get(Name, ?TIMER_TABLE), - TRef = emqx_misc:start_timer(Time, Msg), + TRef = emqx_utils:start_timer(Time, Msg), Channel#channel{timers = Timers#{Name => TRef}}. reset_timer(Name, Channel) -> ensure_timer(Name, remove_timer_ref(Name, Channel)). cancel_timer(Name, Channel = #channel{timers = Timers}) -> - emqx_misc:cancel_timer(maps:get(Name, Timers, undefined)), + emqx_utils:cancel_timer(maps:get(Name, Timers, undefined)), remove_timer_ref(Name, Channel). remove_timer_ref(Name, Channel = #channel{timers = Timers}) -> @@ -791,4 +792,4 @@ proto_name_to_protocol(ProtoName) when is_binary(ProtoName) -> binary_to_atom(ProtoName). anonymous_clientid() -> - iolist_to_binary(["exproto-", emqx_misc:gen_id()]). + iolist_to_binary(["exproto-", emqx_utils:gen_id()]). diff --git a/apps/emqx_gateway/src/exproto/emqx_exproto_frame.erl b/apps/emqx_gateway_exproto/src/emqx_exproto_frame.erl similarity index 100% rename from apps/emqx_gateway/src/exproto/emqx_exproto_frame.erl rename to apps/emqx_gateway_exproto/src/emqx_exproto_frame.erl diff --git a/apps/emqx_gateway/src/exproto/emqx_exproto_gcli.erl b/apps/emqx_gateway_exproto/src/emqx_exproto_gcli.erl similarity index 99% rename from apps/emqx_gateway/src/exproto/emqx_exproto_gcli.erl rename to apps/emqx_gateway_exproto/src/emqx_exproto_gcli.erl index af15ef9d3..34883cdce 100644 --- a/apps/emqx_gateway/src/exproto/emqx_exproto_gcli.erl +++ b/apps/emqx_gateway_exproto/src/emqx_exproto_gcli.erl @@ -50,7 +50,7 @@ start_link(Pool, Id) -> gen_server:start_link( - {local, emqx_misc:proc_name(?MODULE, Id)}, + {local, emqx_utils:proc_name(?MODULE, Id)}, ?MODULE, [Pool, Id], [] diff --git a/apps/emqx_gateway/src/exproto/emqx_exproto_gsvr.erl b/apps/emqx_gateway_exproto/src/emqx_exproto_gsvr.erl similarity index 99% rename from apps/emqx_gateway/src/exproto/emqx_exproto_gsvr.erl rename to apps/emqx_gateway_exproto/src/emqx_exproto_gsvr.erl index 13bd49e55..5bbe7bf37 100644 --- a/apps/emqx_gateway/src/exproto/emqx_exproto_gsvr.erl +++ b/apps/emqx_gateway_exproto/src/emqx_exproto_gsvr.erl @@ -19,7 +19,7 @@ % -behaviour(emqx_exproto_v_1_connection_adapter_bhvr). --include("src/exproto/include/emqx_exproto.hrl"). +-include("emqx_exproto.hrl"). -include_lib("emqx/include/logger.hrl"). -define(IS_QOS(X), (X =:= 0 orelse X =:= 1 orelse X =:= 2)). diff --git a/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl b/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl new file mode 100644 index 000000000..eb44c030b --- /dev/null +++ b/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl @@ -0,0 +1,117 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_exproto_schema). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). + +-type ip_port() :: tuple() | integer(). + +-typerefl_from_string({ip_port/0, emqx_schema, to_ip_port}). + +-reflect_type([ + ip_port/0 +]). + +%% config schema provides +-export([fields/1, desc/1]). + +fields(exproto) -> + [ + {server, + sc( + ref(exproto_grpc_server), + #{ + required => true, + desc => ?DESC(exproto_server) + } + )}, + {handler, + sc( + ref(exproto_grpc_handler), + #{ + required => true, + desc => ?DESC(exproto_handler) + } + )}, + {mountpoint, emqx_gateway_schema:mountpoint()}, + {listeners, + sc(ref(emqx_gateway_schema, tcp_udp_listeners), #{desc => ?DESC(tcp_udp_listeners)})} + ] ++ emqx_gateway_schema:gateway_common_options(); +fields(exproto_grpc_server) -> + [ + {bind, + sc( + hoconsc:union([ip_port(), integer()]), + #{ + required => true, + desc => ?DESC(exproto_grpc_server_bind) + } + )}, + {ssl_options, + sc( + ref(ssl_server_opts), + #{ + required => {false, recursively}, + desc => ?DESC(exproto_grpc_server_ssl) + } + )} + ]; +fields(exproto_grpc_handler) -> + [ + {address, sc(binary(), #{required => true, desc => ?DESC(exproto_grpc_handler_address)})}, + {ssl_options, + sc( + ref(emqx_schema, "ssl_client_opts"), + #{ + required => {false, recursively}, + desc => ?DESC(exproto_grpc_handler_ssl) + } + )} + ]; +fields(ssl_server_opts) -> + emqx_schema:server_ssl_opts_schema( + #{ + depth => 10, + reuse_sessions => true, + versions => tls_all_available + }, + true + ). + +desc(exproto) -> + "Settings for EMQX extension protocol (exproto)."; +desc(exproto_grpc_server) -> + "Settings for the exproto gRPC server."; +desc(exproto_grpc_handler) -> + "Settings for the exproto gRPC connection handler."; +desc(ssl_server_opts) -> + "SSL configuration for the server."; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% helpers + +sc(Type, Meta) -> + hoconsc:mk(Type, Meta). + +ref(StructName) -> + ref(?MODULE, StructName). + +ref(Mod, Field) -> + hoconsc:ref(Mod, Field). diff --git a/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src new file mode 100644 index 000000000..09cf58338 --- /dev/null +++ b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src @@ -0,0 +1,10 @@ +{application, emqx_gateway_exproto, [ + {description, "ExProto Gateway"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, grpc, emqx, emqx_gateway]}, + {env, []}, + {modules, []}, + {licenses, ["Apache 2.0"]}, + {links, []} +]}. diff --git a/apps/emqx_gateway/src/exproto/emqx_exproto_impl.erl b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.erl similarity index 91% rename from apps/emqx_gateway/src/exproto/emqx_exproto_impl.erl rename to apps/emqx_gateway_exproto/src/emqx_gateway_exproto.erl index 0c25e5e08..ff105b931 100644 --- a/apps/emqx_gateway/src/exproto/emqx_exproto_impl.erl +++ b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.erl @@ -14,12 +14,28 @@ %% limitations under the License. %%-------------------------------------------------------------------- -%% @doc The ExProto Gateway Implement interface --module(emqx_exproto_impl). - --behaviour(emqx_gateway_impl). +%% @doc The ExProto Gateway implement +-module(emqx_gateway_exproto). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_gateway/include/emqx_gateway.hrl"). + +%% define a gateway named stomp +-gateway(#{ + name => exproto, + callback_module => ?MODULE, + config_schema_module => emqx_exproto_schema +}). + +%% callback_module must implement the emqx_gateway_impl behaviour +-behaviour(emqx_gateway_impl). + +%% callback for emqx_gateway_impl +-export([ + on_gateway_load/2, + on_gateway_update/3, + on_gateway_unload/2 +]). -import( emqx_gateway_utils, @@ -30,31 +46,8 @@ ] ). -%% APIs --export([ - reg/0, - unreg/0 -]). - --export([ - on_gateway_load/2, - on_gateway_update/3, - on_gateway_unload/2 -]). - %%-------------------------------------------------------------------- -%% APIs -%%-------------------------------------------------------------------- - -reg() -> - RegistryOptions = [{cbkmod, ?MODULE}], - emqx_gateway_registry:reg(exproto, RegistryOptions). - -unreg() -> - emqx_gateway_registry:unreg(exproto). - -%%-------------------------------------------------------------------- -%% emqx_gateway_registry callbacks +%% emqx_gateway_impl callbacks %%-------------------------------------------------------------------- on_gateway_load( @@ -156,7 +149,7 @@ start_grpc_server(GwName, Options = #{bind := ListenOn}) -> } }, SvrOptions = - case emqx_map_lib:deep_get([ssl, enable], Options, false) of + case emqx_utils_maps:deep_get([ssl, enable], Options, false) of false -> []; true -> @@ -208,7 +201,7 @@ start_grpc_client_channel(GwName, Options = #{address := Address}) -> }} ) end, - case emqx_map_lib:deep_get([ssl, enable], Options, false) of + case emqx_utils_maps:deep_get([ssl, enable], Options, false) of false -> SvrAddr = compose_http_uri(http, Host, Port), grpc_client_sup:create_channel_pool(GwName, SvrAddr, #{}); diff --git a/apps/emqx_gateway/test/emqx_exproto_SUITE.erl b/apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl similarity index 99% rename from apps/emqx_gateway/test/emqx_exproto_SUITE.erl rename to apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl index b476a40cb..264f6af95 100644 --- a/apps/emqx_gateway/test/emqx_exproto_SUITE.erl +++ b/apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl @@ -76,6 +76,7 @@ metrics() -> [tcp, ssl, udp, dtls]. init_per_group(GrpName, Cfg) -> + application:load(emqx_gateway_exproto), put(grpname, GrpName), Svrs = emqx_exproto_echo_svr:start(), emqx_common_test_helpers:start_apps([emqx_authn, emqx_gateway], fun set_special_cfg/1), diff --git a/apps/emqx_gateway/test/emqx_exproto_echo_svr.erl b/apps/emqx_gateway_exproto/test/emqx_exproto_echo_svr.erl similarity index 93% rename from apps/emqx_gateway/test/emqx_exproto_echo_svr.erl rename to apps/emqx_gateway_exproto/test/emqx_exproto_echo_svr.erl index b2e3ad4a7..e04990f5f 100644 --- a/apps/emqx_gateway/test/emqx_exproto_echo_svr.erl +++ b/apps/emqx_gateway_exproto/test/emqx_exproto_echo_svr.erl @@ -148,7 +148,7 @@ on_received_bytes(Stream, _Md) -> fun(Reqs) -> lists:foreach( fun(#{conn := Conn, bytes := Bytes}) -> - #{<<"type">> := Type} = Params = emqx_json:decode(Bytes, [return_maps]), + #{<<"type">> := Type} = Params = emqx_utils_json:decode(Bytes, [return_maps]), _ = handle_in(Conn, Type, Params) end, Reqs @@ -284,16 +284,16 @@ handle_out(Conn, ?TYPE_DISCONNECT) -> %% Frame frame_connect(ClientInfo, Password) -> - emqx_json:encode(#{ + emqx_utils_json:encode(#{ type => ?TYPE_CONNECT, clientinfo => ClientInfo, password => Password }). frame_connack(Code) -> - emqx_json:encode(#{type => ?TYPE_CONNACK, code => Code}). + emqx_utils_json:encode(#{type => ?TYPE_CONNACK, code => Code}). frame_publish(Topic, Qos, Payload) -> - emqx_json:encode(#{ + emqx_utils_json:encode(#{ type => ?TYPE_PUBLISH, topic => Topic, qos => Qos, @@ -301,19 +301,19 @@ frame_publish(Topic, Qos, Payload) -> }). frame_puback(Code) -> - emqx_json:encode(#{type => ?TYPE_PUBACK, code => Code}). + emqx_utils_json:encode(#{type => ?TYPE_PUBACK, code => Code}). frame_subscribe(Topic, Qos) -> - emqx_json:encode(#{type => ?TYPE_SUBSCRIBE, topic => Topic, qos => Qos}). + emqx_utils_json:encode(#{type => ?TYPE_SUBSCRIBE, topic => Topic, qos => Qos}). frame_suback(Code) -> - emqx_json:encode(#{type => ?TYPE_SUBACK, code => Code}). + emqx_utils_json:encode(#{type => ?TYPE_SUBACK, code => Code}). frame_unsubscribe(Topic) -> - emqx_json:encode(#{type => ?TYPE_UNSUBSCRIBE, topic => Topic}). + emqx_utils_json:encode(#{type => ?TYPE_UNSUBSCRIBE, topic => Topic}). frame_unsuback(Code) -> - emqx_json:encode(#{type => ?TYPE_UNSUBACK, code => Code}). + emqx_utils_json:encode(#{type => ?TYPE_UNSUBACK, code => Code}). frame_disconnect() -> - emqx_json:encode(#{type => ?TYPE_DISCONNECT}). + emqx_utils_json:encode(#{type => ?TYPE_DISCONNECT}). diff --git a/apps/emqx_gateway_lwm2m/.gitignore b/apps/emqx_gateway_lwm2m/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_gateway_lwm2m/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_gateway_lwm2m/README.md b/apps/emqx_gateway_lwm2m/README.md new file mode 100644 index 000000000..678d74dcf --- /dev/null +++ b/apps/emqx_gateway_lwm2m/README.md @@ -0,0 +1,61 @@ +# emqx_lwm2m + +[LwM2M (Lightweight Machine-to-Machine)](https://lwm2m.openmobilealliance.org/) +is a protocol designed for IoT devices and machine-to-machine communication. +It is a lightweight protocol that supports devices with limited processing power and memory. + + +The **LwM2M Gateway** in EMQX can accept LwM2M clients and translate theirevents +and messages into MQTT Publish messages. + +In the current implementation, it has the following limitations: +- Based UDP/DTLS transport. +- Only supports v1.0.2. The v1.1.x and v1.2.x is not supported yet. +- Not included LwM2M Bootstrap services. + +## Quick Start + +In EMQX 5.0, LwM2M gateways can be configured and enabled through the Dashboard. + +It can also be enabled via the HTTP API, and emqx.conf e.g, In emqx.conf: + +```properties +gateway.lwm2m { + xml_dir = "etc/lwm2m_xml/" + auto_observe = true + enable_stats = true + idle_timeout = "30s" + lifetime_max = "86400s" + lifetime_min = "1s" + mountpoint = "lwm2m/${endpoint_namea}/" + qmode_time_window = "22s" + update_msg_publish_condition = "contains_object_list" + translators { + command {qos = 0, topic = "dn/#"} + notify {qos = 0, topic = "up/notify"} + register {qos = 0, topic = "up/resp"} + response {qos = 0, topic = "up/resp"} + update {qos = 0, topic = "up/update"} + } + listeners { + udp { + default { + bind = "5783" + max_conn_rate = 1000 + max_connections = 1024000 + } + } + } +} +``` + +> Note: +> Configuring the gateway via emqx.conf requires changes on a per-node basis, +> but configuring it via Dashboard or the HTTP API will take effect across the cluster. +::: + +## Object definations + +emqx_lwm2m needs object definitions to parse data from lwm2m devices. Object definitions are declared by organizations in XML format, you could find those XMLs from [LwM2MRegistry](http://www.openmobilealliance.org/wp/OMNA/LwM2M/LwM2MRegistry.html), download and put them into the directory specified by `lwm2m.xml_dir`. If no associated object definition is found, response from device will be discarded and report an error message in log. + +More documentations: [LwM2M Gateway](https://www.emqx.io/docs/en/v5.0/gateway/lwm2m.html) diff --git a/apps/emqx_gateway/src/lwm2m/include/emqx_lwm2m.hrl b/apps/emqx_gateway_lwm2m/include/emqx_lwm2m.hrl similarity index 51% rename from apps/emqx_gateway/src/lwm2m/include/emqx_lwm2m.hrl rename to apps/emqx_gateway_lwm2m/include/emqx_lwm2m.hrl index 1f02a1637..e1a6ec0d6 100644 --- a/apps/emqx_gateway/src/lwm2m/include/emqx_lwm2m.hrl +++ b/apps/emqx_gateway_lwm2m/include/emqx_lwm2m.hrl @@ -36,8 +36,39 @@ -define(ERR_BAD_REQUEST, <<"Bad Request">>). -define(REG_PREFIX, <<"rd">>). +%%-------------------------------------------------------------------- +%% Data formats for transferring resource information, defined in +%% OMA-TS-LightweightM2M-V1_0_1-20170704-A + +%% 0: Plain text. 0 is numeric value used in CoAP Content-Format option. +%% The plain text format is used for "Read" and "Write" operations on singular +%% Resources. i.e: /3/0/0 +%% +%% This data format has a Media Type of "text/plain". -define(LWM2M_FORMAT_PLAIN_TEXT, 0). + +%% 40: Link format. 40 is numeric value used in CoAP Content-Format option. +%% -define(LWM2M_FORMAT_LINK, 40). + +%% 42: Opaque. 41 is numeric value used in CoAP Content-Format option. +%% The opaque format is used for "Read" and "Write" operations on singular +%% Resources where the value of the Resource is an opaque binary value. +%% i.e: firmware images or opaque value from top layer. +%% +%% This data format has a Media Type of "application/octet-stream". -define(LWM2M_FORMAT_OPAQUE, 42). + +%% 11542: TLV. 11542 is numeric value used in CoAP Content-Format option. +%% For "Read" and "Write" operation, the binary TLV format is used to represent +%% an array of values or a single value using a compact binary representation. +%% +%% This data format has a Media Type of "application/vnd.oma.lwm2m+tlv". -define(LWM2M_FORMAT_TLV, 11542). --define(LWMWM_FORMAT_JSON, 11543). + +%% 11543: JSON. 11543 is numeric value used in CoAP Content-Format option. +%% The client may support the JSON format for "Read" and "Write" operations to +%% represent multiple resource or single resource values. +%% +%% This data format has a Media Type of "application/vnd.oma.lwm2m+json". +-define(LWM2M_FORMAT_OMA_JSON, 11543). diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Access_Control-v1_0_1.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Access_Control-v1_0_1.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Access_Control-v1_0_1.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Access_Control-v1_0_1.xml diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Connectivity_Statistics-v1_0_1.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Connectivity_Statistics-v1_0_1.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Connectivity_Statistics-v1_0_1.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Connectivity_Statistics-v1_0_1.xml diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Device-v1_0_1.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Device-v1_0_1.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Device-v1_0_1.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Device-v1_0_1.xml diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Firmware_Update-v1_0_1.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Firmware_Update-v1_0_1.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Firmware_Update-v1_0_1.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Firmware_Update-v1_0_1.xml diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Location-v1_0.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Location-v1_0.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Location-v1_0.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Location-v1_0.xml diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Security-v1_0.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Security-v1_0.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Security-v1_0.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Security-v1_0.xml diff --git a/apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Server-v1_0.xml b/apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Server-v1_0.xml similarity index 100% rename from apps/emqx_gateway/src/lwm2m/lwm2m_xml/LWM2M_Server-v1_0.xml rename to apps/emqx_gateway_lwm2m/lwm2m_xml/LWM2M_Server-v1_0.xml diff --git a/apps/emqx_gateway_lwm2m/rebar.config b/apps/emqx_gateway_lwm2m/rebar.config new file mode 100644 index 000000000..c8675c3ba --- /dev/null +++ b/apps/emqx_gateway_lwm2m/rebar.config @@ -0,0 +1,4 @@ +{erl_opts, [debug_info]}. +{deps, [ {emqx, {path, "../../apps/emqx"}}, + {emqx_gateway, {path, "../../apps/emqx_gateway"}} + ]}. diff --git a/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src b/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src new file mode 100644 index 000000000..83a707395 --- /dev/null +++ b/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src @@ -0,0 +1,10 @@ +{application, emqx_gateway_lwm2m, [ + {description, "LwM2M Gateway"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, emqx, emqx_gateway, emqx_gateway_coap]}, + {env, []}, + {modules, []}, + {licenses, ["Apache 2.0"]}, + {links, []} +]}. diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_impl.erl b/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.erl similarity index 87% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_impl.erl rename to apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.erl index fa4537315..1c8f67863 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_impl.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.erl @@ -14,35 +14,37 @@ %% limitations under the License. %%-------------------------------------------------------------------- -%% @doc The LwM2M Gateway Implement interface --module(emqx_lwm2m_impl). - --behaviour(emqx_gateway_impl). +%% @doc The LwM2M Gateway implement +-module(emqx_gateway_lwm2m). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_gateway/include/emqx_gateway.hrl"). -%% APIs --export([ - reg/0, - unreg/0 -]). +%% define a gateway named stomp +-gateway(#{ + name => lwm2m, + callback_module => ?MODULE, + config_schema_module => emqx_lwm2m_schema +}). +%% callback_module must implement the emqx_gateway_impl behaviour +-behaviour(emqx_gateway_impl). + +%% callback for emqx_gateway_impl -export([ on_gateway_load/2, on_gateway_update/3, on_gateway_unload/2 ]). -%%-------------------------------------------------------------------- -%% APIs -%%-------------------------------------------------------------------- - -reg() -> - RegistryOptions = [{cbkmod, ?MODULE}], - emqx_gateway_registry:reg(lwm2m, RegistryOptions). - -unreg() -> - emqx_gateway_registry:unreg(lwm2m). +-import( + emqx_gateway_utils, + [ + normalize_config/1, + start_listeners/4, + stop_listeners/2 + ] +). %%-------------------------------------------------------------------- %% emqx_gateway_registry callbacks diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_api.erl similarity index 98% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_api.erl index 2cd53d6eb..ca32d03db 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_api.erl @@ -32,6 +32,8 @@ -import(hoconsc, [mk/2, ref/1, ref/2]). -import(emqx_dashboard_swagger, [error_codes/2]). +-elvis([{elvis_style, atom_naming_convention, disable}]). + namespace() -> "lwm2m". api_spec() -> @@ -225,7 +227,7 @@ to_operations(Obj, ObjDefinition) -> }. path_list(Path) -> - case binary:split(binary_util:trim(Path, $/), [<<$/>>], [global]) of + case binary:split(emqx_utils_binary:trim(Path, $/), [<<$/>>], [global]) of [ObjId, ObjInsId, ResId, ResInstId] -> [ObjId, ObjInsId, ResId, ResInstId]; [ObjId, ObjInsId, ResId] -> [ObjId, ObjInsId, ResId]; [ObjId, ObjInsId] -> [ObjId, ObjInsId]; diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_channel.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_channel.erl similarity index 98% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_channel.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_channel.erl index 16d0f9630..77652744a 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_channel.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_channel.erl @@ -16,9 +16,9 @@ -module(emqx_lwm2m_channel). +-include("emqx_lwm2m.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include_lib("emqx_gateway_coap/include/emqx_coap.hrl"). %% API -export([ @@ -105,7 +105,7 @@ info(conn_state, #channel{conn_state = ConnState}) -> info(clientinfo, #channel{clientinfo = ClientInfo}) -> ClientInfo; info(session, #channel{session = Session}) -> - emqx_misc:maybe_apply(fun emqx_lwm2m_session:info/1, Session); + emqx_utils:maybe_apply(fun emqx_lwm2m_session:info/1, Session); info(clientid, #channel{clientinfo = #{clientid := ClientId}}) -> ClientId; info(ctx, #channel{ctx = Ctx}) -> @@ -286,7 +286,7 @@ handle_call(discard, _From, Channel) -> % pendings = Pendings}) -> % ok = emqx_session:takeover(Session), % %% TODO: Should not drain deliver here (side effect) -% Delivers = emqx_misc:drain_deliver(), +% Delivers = emqx_utils:drain_deliver(), % AllPendings = lists:append(Delivers, Pendings), % shutdown_and_reply(takenover, AllPendings, Channel); @@ -390,7 +390,7 @@ set_peercert_infos(Peercert, ClientInfo) -> ClientInfo#{dn => DN, cn => CN}. make_timer(Name, Time, Msg, Channel = #channel{timers = Timers}) -> - TRef = emqx_misc:start_timer(Time, Msg), + TRef = emqx_utils:start_timer(Time, Msg), Channel#channel{timers = Timers#{Name => TRef}}. update_life_timer(#channel{session = Session, timers = Timers} = Channel) -> @@ -413,7 +413,7 @@ do_takeover(_DesireId, Msg, Channel) -> do_connect(Req, Result, Channel, Iter) -> case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun check_lwm2m_version/2, fun enrich_conninfo/2, @@ -464,14 +464,14 @@ check_lwm2m_version( _ -> false end, - if - IsValid -> + case IsValid of + true -> NConnInfo = ConnInfo#{ connected_at => erlang:system_time(millisecond), proto_ver => Ver }, {ok, Channel#channel{conninfo = NConnInfo}}; - true -> + _ -> ?SLOG(error, #{ msg => "reject_REGISTRE_request", reason => {unsupported_version, Ver} diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_cmd.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_cmd.erl similarity index 97% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_cmd.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_cmd.erl index d0b362dda..8e4286343 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_cmd.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_cmd.erl @@ -16,9 +16,9 @@ -module(emqx_lwm2m_cmd). +-include("emqx_lwm2m.hrl"). -include_lib("emqx/include/logger.hrl"). --include("src/coap/include/emqx_coap.hrl"). --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include_lib("emqx_gateway_coap/include/emqx_coap.hrl"). -export([ mqtt_to_coap/2, @@ -138,7 +138,7 @@ mqtt_to_coap(AlternatePath, InputCmd = #{<<"msgType">> := <<"discover">>, <<"dat [ {uri_path, FullPathList}, {uri_query, QueryList}, - {'accept', ?LWM2M_FORMAT_LINK} + {accept, ?LWM2M_FORMAT_LINK} ] ), InputCmd @@ -241,6 +241,7 @@ empty_ack_to_mqtt(Ref) -> coap_failure_to_mqtt(Ref, MsgType) -> make_base_response(maps:put(<<"msgType">>, MsgType, Ref)). +%% TODO: application/link-format content_to_mqtt(CoapPayload, <<"text/plain">>, Ref) -> emqx_lwm2m_message:text_to_json(extract_path(Ref), CoapPayload); content_to_mqtt(CoapPayload, <<"application/octet-stream">>, Ref) -> @@ -291,9 +292,9 @@ make_response(Code, Ref = #{}) -> BaseRsp = make_base_response(Ref), make_data_response(BaseRsp, Code). -make_response(Code, Ref = #{}, _Format, Result) -> +make_response(Code, Ref = #{}, Format, Result) -> BaseRsp = make_base_response(Ref), - make_data_response(BaseRsp, Code, _Format, Result). + make_data_response(BaseRsp, Code, Format, Result). %% The base response format is what included in the request: %% @@ -334,7 +335,7 @@ remove_tmp_fields(Ref) -> -spec path_list(Path :: binary()) -> {[PathWord :: binary()], [Query :: binary()]}. path_list(Path) -> - case binary:split(binary_util:trim(Path, $/), [<<$/>>], [global]) of + case binary:split(emqx_utils_binary:trim(Path, $/), [<<$/>>], [global]) of [ObjId, ObjInsId, ResId, LastPart] -> {ResInstId, QueryList} = query_list(LastPart), {[ObjId, ObjInsId, ResId, ResInstId], QueryList}; @@ -388,7 +389,7 @@ observe_seq(Options) -> add_alternate_path_prefix(<<"/">>, PathList) -> PathList; add_alternate_path_prefix(AlternatePath, PathList) -> - [binary_util:trim(AlternatePath, $/) | PathList]. + [emqx_utils_binary:trim(AlternatePath, $/) | PathList]. extract_path(Ref = #{}) -> drop_query( diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_message.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_message.erl similarity index 95% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_message.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_message.erl index f09a8ea3d..8b9ba2491 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_message.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_message.erl @@ -24,7 +24,7 @@ translate_json/1 ]). --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include("emqx_lwm2m.hrl"). tlv_to_json(BaseName, TlvData) -> DecodedTlv = emqx_lwm2m_tlv:parse(TlvData), @@ -97,7 +97,7 @@ tlv_single_resource(BaseName, Id, Value, ObjDefinition) -> [#{path => BaseName, value => Val}]. basename(OldBaseName, _ObjectId, ObjectInstanceId, ResourceId, 3) -> - case binary:split(binary_util:trim(OldBaseName, $/), [<<$/>>], [global]) of + case binary:split(emqx_utils_binary:trim(OldBaseName, $/), [<<$/>>], [global]) of [ObjId, ObjInsId, ResId] -> <<$/, ObjId/binary, $/, ObjInsId/binary, $/, ResId/binary>>; [ObjId, ObjInsId] -> @@ -113,13 +113,13 @@ basename(OldBaseName, _ObjectId, ObjectInstanceId, ResourceId, 3) -> >> end; basename(OldBaseName, _ObjectId, ObjectInstanceId, _ResourceId, 2) -> - case binary:split(binary_util:trim(OldBaseName, $/), [<<$/>>], [global]) of + case binary:split(emqx_utils_binary:trim(OldBaseName, $/), [<<$/>>], [global]) of [ObjId, ObjInsId, _ResId] -> <<$/, ObjId/binary, $/, ObjInsId/binary>>; [ObjId, ObjInsId] -> <<$/, ObjId/binary, $/, ObjInsId/binary>>; [ObjId] -> <<$/, ObjId/binary, $/, (integer_to_binary(ObjectInstanceId))/binary>> end. % basename(OldBaseName, _ObjectId, _ObjectInstanceId, _ResourceId, 1) -> -% case binary:split(binary_util:trim(OldBaseName, $/), [<<$/>>], [global]) of +% case binary:split(emqx_utils_binary:trim(OldBaseName, $/), [<<$/>>], [global]) of % [ObjId, _ObjInsId, _ResId] -> <<$/, ObjId/binary>>; % [ObjId, _ObjInsId] -> <<$/, ObjId/binary>>; % [ObjId] -> <<$/, ObjId/binary>> @@ -129,7 +129,7 @@ make_path(RelativePath, Id) -> <>. object_id(BaseName) -> - case binary:split(binary_util:trim(BaseName, $/), [<<$/>>], [global]) of + case binary:split(emqx_utils_binary:trim(BaseName, $/), [<<$/>>], [global]) of [ObjId] -> binary_to_integer(ObjId); [ObjId, _] -> binary_to_integer(ObjId); [ObjId, _, _] -> binary_to_integer(ObjId); @@ -137,7 +137,7 @@ object_id(BaseName) -> end. object_resource_id(BaseName) -> - case binary:split(binary_util:trim(BaseName, $/), [<<$/>>], [global]) of + case binary:split(emqx_utils_binary:trim(BaseName, $/), [<<$/>>], [global]) of [_ObjIdBin1] -> error({invalid_basename, BaseName}); [_ObjIdBin2, _] -> @@ -152,7 +152,7 @@ object_resource_id(BaseName) -> value(Value, ResourceId, ObjDefinition) -> case emqx_lwm2m_xml_object:get_resource_type(ResourceId, ObjDefinition) of "String" -> - % keep binary type since it is same as a string for jsx + % keep binary type since it is same as a string for emqx_utils_json Value; "Integer" -> Size = byte_size(Value) * 8, @@ -351,7 +351,7 @@ opaque_to_json(BaseName, Binary) -> [#{path => BaseName, value => base64:encode(Binary)}]. translate_json(JSONBin) -> - JSONTerm = emqx_json:decode(JSONBin, [return_maps]), + JSONTerm = emqx_utils_json:decode(JSONBin, [return_maps]), BaseName = maps:get(<<"bn">>, JSONTerm, <<>>), ElementList = maps:get(<<"e">>, JSONTerm, []), translate_element(BaseName, ElementList, []). @@ -371,8 +371,8 @@ translate_element(BaseName, [Element | ElementList], Acc) -> translate_element(BaseName, ElementList, NewAcc). full_path(BaseName, RelativePath) -> - Prefix = binary_util:rtrim(BaseName, $/), - Path = binary_util:ltrim(RelativePath, $/), + Prefix = emqx_utils_binary:rtrim(BaseName, $/), + Path = emqx_utils_binary:ltrim(RelativePath, $/), <>. get_element_value(#{<<"t">> := Value}) -> Value; @@ -412,9 +412,11 @@ byte_size_of_signed(UInt) -> byte_size_of_signed(UInt, N) -> BitSize = (8 * N - 1), Max = (1 bsl BitSize), - if - UInt =< Max -> N; - UInt > Max -> byte_size_of_signed(UInt, N + 1) + case UInt =< Max of + true -> + N; + false -> + byte_size_of_signed(UInt, N + 1) end. binary_to_number(NumStr) -> diff --git a/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl new file mode 100644 index 000000000..b674c3260 --- /dev/null +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl @@ -0,0 +1,184 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_lwm2m_schema). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). + +-type duration() :: non_neg_integer(). +-type duration_s() :: non_neg_integer(). + +-typerefl_from_string({duration/0, emqx_schema, to_duration}). +-typerefl_from_string({duration_s/0, emqx_schema, to_duration_s}). + +-reflect_type([duration/0, duration_s/0]). + +%% config schema provides +-export([fields/1, desc/1]). + +fields(lwm2m) -> + [ + {xml_dir, + sc( + binary(), + #{ + %% since this is not packaged with emqx, nor + %% present in the packages, we must let the user + %% specify it rather than creating a dynamic + %% default (especially difficult to handle when + %% generating docs). + example => <<"/etc/emqx/lwm2m_xml">>, + required => true, + desc => ?DESC(lwm2m_xml_dir) + } + )}, + {lifetime_min, + sc( + duration(), + #{ + default => <<"15s">>, + desc => ?DESC(lwm2m_lifetime_min) + } + )}, + {lifetime_max, + sc( + duration(), + #{ + default => <<"86400s">>, + desc => ?DESC(lwm2m_lifetime_max) + } + )}, + {qmode_time_window, + sc( + duration_s(), + #{ + default => <<"22s">>, + desc => ?DESC(lwm2m_qmode_time_window) + } + )}, + %% TODO: Support config resource path + {auto_observe, + sc( + boolean(), + #{ + default => false, + desc => ?DESC(lwm2m_auto_observe) + } + )}, + %% FIXME: not working now + {update_msg_publish_condition, + sc( + hoconsc:enum([always, contains_object_list]), + #{ + default => contains_object_list, + desc => ?DESC(lwm2m_update_msg_publish_condition) + } + )}, + {translators, + sc( + ref(lwm2m_translators), + #{ + required => true, + desc => ?DESC(lwm2m_translators) + } + )}, + {mountpoint, emqx_gateway_schema:mountpoint("lwm2m/${endpoint_name}/")}, + {listeners, sc(ref(emqx_gateway_schema, udp_listeners), #{desc => ?DESC(udp_listeners)})} + ] ++ emqx_gateway_schema:gateway_common_options(); +fields(lwm2m_translators) -> + [ + {command, + sc( + ref(translator), + #{ + desc => ?DESC(lwm2m_translators_command), + required => true + } + )}, + {response, + sc( + ref(translator), + #{ + desc => ?DESC(lwm2m_translators_response), + required => true + } + )}, + {notify, + sc( + ref(translator), + #{ + desc => ?DESC(lwm2m_translators_notify), + required => true + } + )}, + {register, + sc( + ref(translator), + #{ + desc => ?DESC(lwm2m_translators_register), + required => true + } + )}, + {update, + sc( + ref(translator), + #{ + desc => ?DESC(lwm2m_translators_update), + required => true + } + )} + ]; +fields(translator) -> + [ + {topic, + sc( + binary(), + #{ + required => true, + desc => ?DESC(translator_topic) + } + )}, + {qos, + sc( + emqx_schema:qos(), + #{ + default => 0, + desc => ?DESC(translator_qos) + } + )} + ]. + +desc(lwm2m) -> + "The LwM2M protocol gateway."; +desc(lwm2m_translators) -> + "MQTT topics that correspond to LwM2M events."; +desc(translator) -> + "MQTT topic that corresponds to a particular type of event."; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% helpers + +sc(Type, Meta) -> + hoconsc:mk(Type, Meta). + +ref(StructName) -> + ref(?MODULE, StructName). + +ref(Mod, Field) -> + hoconsc:ref(Mod, Field). diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_session.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_session.erl similarity index 95% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_session.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_session.erl index 19cd5c25d..e267692a6 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_session.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_session.erl @@ -15,11 +15,12 @@ %%-------------------------------------------------------------------- -module(emqx_lwm2m_session). +-include("emqx_lwm2m.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). --include("src/coap/include/emqx_coap.hrl"). --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx_gateway_coap/include/emqx_coap.hrl"). %% API -export([ @@ -378,18 +379,18 @@ is_alternate_path(LinkAttrs) -> true; [AttrKey, _] when AttrKey =/= <<>> -> false; - _BadAttr -> - throw({bad_attr, _BadAttr}) + BadAttr -> + throw({bad_attr, BadAttr}) end end, LinkAttrs ). -trim(Str) -> binary_util:trim(Str, $\s). +trim(Str) -> emqx_utils_binary:trim(Str, $\s). delink(Str) -> - Ltrim = binary_util:ltrim(Str, $<), - binary_util:rtrim(Ltrim, $>). + Ltrim = emqx_utils_binary:ltrim(Str, $<), + emqx_utils_binary:rtrim(Ltrim, $>). get_lifetime(#{<<"lt">> := LT}) -> case LT of @@ -513,12 +514,20 @@ observe_object_list(AlternatePath, ObjectList, Session) -> true -> Acc; false -> - try - emqx_lwm2m_xml_object_db:find_objectid(binary_to_integer(ObjId)), - observe_object(AlternatePath, ObjectPath, Acc) - catch - error:no_xml_definition -> - Acc + ObjId1 = binary_to_integer(ObjId), + case emqx_lwm2m_xml_object_db:find_objectid(ObjId1) of + {error, no_xml_definition} -> + ?tp( + warning, + ignore_observer_resource, + #{ + reason => no_xml_definition, + object_id => ObjId1 + } + ), + Acc; + _ -> + observe_object(AlternatePath, ObjectPath, Acc) end end end, @@ -538,15 +547,20 @@ deliver_auto_observe_to_coap(AlternatePath, TermData, Session) -> path => AlternatePath, data => TermData }), - {Req, Ctx} = emqx_lwm2m_cmd:mqtt_to_coap(AlternatePath, TermData), + {Req0, Ctx} = emqx_lwm2m_cmd:mqtt_to_coap(AlternatePath, TermData), + Req = alloc_token(Req0), maybe_do_deliver_to_coap(Ctx, Req, 0, false, Session). is_auto_observe() -> emqx:get_config([gateway, lwm2m, auto_observe]). +alloc_token(Req = #coap_message{}) -> + Req#coap_message{token = crypto:strong_rand_bytes(4)}. + %%-------------------------------------------------------------------- %% Response %%-------------------------------------------------------------------- + handle_coap_response( {Ctx = #{<<"msgType">> := EventType}, #coap_message{ method = CoapMsgMethod, @@ -665,10 +679,10 @@ send_to_coap(#session{queue = Queue} = Session) -> case queue:out(Queue) of {{value, {Timestamp, Ctx, Req}}, Q2} -> Now = ?NOW, - if - Timestamp =:= 0 orelse Timestamp > Now -> - send_to_coap(Ctx, Req, Session#session{queue = Q2}); + case Timestamp =:= 0 orelse Timestamp > Now of true -> + send_to_coap(Ctx, Req, Session#session{queue = Q2}); + false -> send_to_coap(Session#session{queue = Q2}) end; {empty, _} -> @@ -723,7 +737,7 @@ proto_publish( Epn, Qos, MountedTopic, - emqx_json:encode(Payload), + emqx_utils_json:encode(Payload), #{}, Headers ), @@ -772,7 +786,7 @@ deliver_to_coap(AlternatePath, JsonData, MQTT, CacheMode, WithContext, Session) is_binary(JsonData) -> try - TermData = emqx_json:decode(JsonData, [return_maps]), + TermData = emqx_utils_json:decode(JsonData, [return_maps]), deliver_to_coap(AlternatePath, TermData, MQTT, CacheMode, WithContext, Session) catch ExClass:Error:ST -> diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_tlv.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_tlv.erl similarity index 90% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_tlv.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_tlv.erl index 782bbec5e..314666638 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_tlv.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_tlv.erl @@ -25,7 +25,7 @@ -export([binary_to_hex_string/1]). -endif. --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include("emqx_lwm2m.hrl"). -define(TLV_TYPE_OBJECT_INSTANCE, 0). -define(TLV_TYPE_RESOURCE_INSTANCE, 1). @@ -37,13 +37,18 @@ -define(TLV_LEGNTH_16_BIT, 2). -define(TLV_LEGNTH_24_BIT, 3). -%---------------------------------------------------------------------------------------------------------------------------------------- -% [#{tlv_object_instance := Id11, value := Value11}, #{tlv_object_instance := Id12, value := Value12}, ...] +-elvis([{elvis_style, no_if_expression, disable}]). + +%%-------------------------------------------------------------------- +% [#{tlv_object_instance := Id11, value := Value11}, +% #{tlv_object_instance := Id12, value := Value12}, ...] % where Value11 and Value12 is a list: -% [#{tlv_resource_with_value => Id21, value => Value21}, #{tlv_multiple_resource => Id22, value = Value22}, ...] +% [#{tlv_resource_with_value => Id21, value => Value21}, +% #{tlv_multiple_resource => Id22, value = Value22}, ...] % where Value21 is a binary % Value22 is a list: -% [#{tlv_resource_instance => Id31, value => Value31}, #{tlv_resource_instance => Id32, value => Value32}, ...] +% [#{tlv_resource_instance => Id31, value => Value31}, +% #{tlv_resource_instance => Id32, value => Value32}, ...] % where Value31 and Value32 is a binary % % correspond to three levels: @@ -51,8 +56,9 @@ % 2) Resource Level % 3) Resource Instance Level % -% NOTE: TLV does not has object level, only has object instance level. It implies TLV can not represent multiple objects -%---------------------------------------------------------------------------------------------------------------------------------------- +% NOTE: TLV does not has object level, only has object instance level. +% It implies TLV can not represent multiple objects +%%-------------------------------------------------------------------- parse(Data) -> parse_loop(Data, []). diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_xml_object.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_xml_object.erl similarity index 98% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_xml_object.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_xml_object.erl index a4dc44f2c..3525f72aa 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_xml_object.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_xml_object.erl @@ -16,7 +16,7 @@ -module(emqx_lwm2m_xml_object). --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include("emqx_lwm2m.hrl"). -include_lib("xmerl/include/xmerl.hrl"). -export([ diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_xml_object_db.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_xml_object_db.erl similarity index 91% rename from apps/emqx_gateway/src/lwm2m/emqx_lwm2m_xml_object_db.erl rename to apps/emqx_gateway_lwm2m/src/emqx_lwm2m_xml_object_db.erl index 19335768f..2908a65e0 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_xml_object_db.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_xml_object_db.erl @@ -16,7 +16,7 @@ -module(emqx_lwm2m_xml_object_db). --include("src/lwm2m/include/emqx_lwm2m.hrl"). +-include("emqx_lwm2m.hrl"). -include_lib("xmerl/include/xmerl.hrl"). -include_lib("emqx/include/logger.hrl"). @@ -45,6 +45,8 @@ -record(state, {}). +-elvis([{elvis_style, atom_naming_convention, disable}]). + %% ------------------------------------------------------------------ %% API Function Definitions %% ------------------------------------------------------------------ @@ -57,6 +59,7 @@ start_link(XmlDir) -> gen_server:start_link({local, ?MODULE}, ?MODULE, [XmlDir], []). +-spec find_objectid(integer()) -> {error, no_xml_definition} | xmerl:xmlElement(). find_objectid(ObjectId) -> ObjectIdInt = case is_list(ObjectId) of @@ -65,9 +68,10 @@ find_objectid(ObjectId) -> end, case ets:lookup(?LWM2M_OBJECT_DEF_TAB, ObjectIdInt) of [] -> {error, no_xml_definition}; - [{ObjectId, Xml}] -> Xml + [{_ObjectId, Xml}] -> Xml end. +-spec find_name(string()) -> {error, no_xml_definition} | xmerl:xmlElement(). find_name(Name) -> NameBinary = case is_list(Name) of @@ -77,10 +81,11 @@ find_name(Name) -> case ets:lookup(?LWM2M_OBJECT_NAME_TO_ID_TAB, NameBinary) of [] -> {error, no_xml_definition}; - [{NameBinary, ObjectId}] -> + [{_NameBinary, ObjectId}] -> find_objectid(ObjectId) end. +-spec stop() -> ok. stop() -> gen_server:stop(?MODULE). @@ -121,10 +126,10 @@ code_change(_OldVsn, State, _Extra) -> load(BaseDir) -> Wild = filename:join(BaseDir, "*.xml"), Wild2 = - if - is_binary(Wild) -> - erlang:binary_to_list(Wild); + case is_binary(Wild) of true -> + erlang:binary_to_list(Wild); + false -> Wild end, case filelib:wildcard(Wild2) of diff --git a/apps/emqx_gateway/test/emqx_lwm2m_SUITE.erl b/apps/emqx_gateway_lwm2m/test/emqx_lwm2m_SUITE.erl similarity index 93% rename from apps/emqx_gateway/test/emqx_lwm2m_SUITE.erl rename to apps/emqx_gateway_lwm2m/test/emqx_lwm2m_SUITE.erl index f91bbf16e..9f388b07c 100644 --- a/apps/emqx_gateway/test/emqx_lwm2m_SUITE.erl +++ b/apps/emqx_gateway_lwm2m/test/emqx_lwm2m_SUITE.erl @@ -31,33 +31,11 @@ -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). --include("src/lwm2m/include/emqx_lwm2m.hrl"). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_lwm2m.hrl"). +-include_lib("emqx_gateway_coap/include/emqx_coap.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). - --define(CONF_DEFAULT, << - "\n" - "gateway.lwm2m {\n" - " xml_dir = \"../../lib/emqx_gateway/src/lwm2m/lwm2m_xml\"\n" - " lifetime_min = 1s\n" - " lifetime_max = 86400s\n" - " qmode_time_window = 22\n" - " auto_observe = false\n" - " mountpoint = \"lwm2m/${username}\"\n" - " update_msg_publish_condition = contains_object_list\n" - " translators {\n" - " command = {topic = \"/dn/#\", qos = 0}\n" - " response = {topic = \"/up/resp\", qos = 0}\n" - " notify = {topic = \"/up/notify\", qos = 0}\n" - " register = {topic = \"/up/resp\", qos = 0}\n" - " update = {topic = \"/up/resp\", qos = 0}\n" - " }\n" - " listeners.udp.default {\n" - " bind = 5783\n" - " }\n" - "}\n" ->>). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -record(coap_content, {content_format, payload = <<>>}). @@ -99,7 +77,8 @@ groups() -> %% case06_register_wrong_lifetime, %% now, will ignore wrong lifetime case07_register_alternate_path_01, case07_register_alternate_path_02, - case08_reregister + case08_reregister, + case09_auto_observe ]}, {test_grp_1_read, [RepeatOpt], [ case10_read, @@ -155,6 +134,7 @@ groups() -> init_per_suite(Config) -> %% load application first for minirest api searching application:load(emqx_gateway), + application:load(emqx_gateway_lwm2m), emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_authn]), Config. @@ -164,8 +144,15 @@ end_per_suite(Config) -> emqx_mgmt_api_test_util:end_suite([emqx_conf, emqx_authn]), Config. -init_per_testcase(_AllTestCase, Config) -> - ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), +init_per_testcase(TestCase, Config) -> + GatewayConfig = + case TestCase of + case09_auto_observe -> + default_config(#{auto_observe => true}); + _ -> + default_config() + end, + ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, GatewayConfig), {ok, _} = application:ensure_all_started(emqx_gateway), {ok, ClientUdpSock} = gen_udp:open(0, [binary, {active, false}]), @@ -187,7 +174,46 @@ end_per_testcase(_AllTestCase, Config) -> ok = application:stop(emqx_gateway). default_config() -> - ?CONF_DEFAULT. + default_config(#{}). + +default_config(Overrides) -> + XmlDir = filename:join( + [ + emqx_common_test_helpers:proj_root(), + "apps", + "emqx_gateway_lwm2m", + "lwm2m_xml" + ] + ), + iolist_to_binary( + io_lib:format( + "\n" + "gateway.lwm2m {\n" + " xml_dir = \"~s\"\n" + " lifetime_min = 1s\n" + " lifetime_max = 86400s\n" + " qmode_time_window = 22\n" + " auto_observe = ~w\n" + " mountpoint = \"lwm2m/${username}\"\n" + " update_msg_publish_condition = contains_object_list\n" + " translators {\n" + " command = {topic = \"/dn/#\", qos = 0}\n" + " response = {topic = \"/up/resp\", qos = 0}\n" + " notify = {topic = \"/up/notify\", qos = 0}\n" + " register = {topic = \"/up/resp\", qos = 0}\n" + " update = {topic = \"/up/resp\", qos = 0}\n" + " }\n" + " listeners.udp.default {\n" + " bind = ~w\n" + " }\n" + "}\n", + [ + XmlDir, + maps:get(auto_observe, Overrides, false), + maps:get(bind, Overrides, ?PORT) + ] + ) + ). default_port() -> ?PORT. @@ -376,7 +402,7 @@ case01_register_report(Config) -> timer:sleep(50), true = lists:member(SubTopic, test_mqtt_broker:get_subscrbied_topics()), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"msgType">> => <<"register">>, <<"data">> => #{ @@ -452,7 +478,7 @@ case02_update_deregister(Config) -> ?LOGT("Options got: ~p", [Opts]), Location = maps:get(location_path, Opts), - Register = emqx_json:encode( + Register = emqx_utils_json:encode( #{ <<"msgType">> => <<"register">>, <<"data">> => #{ @@ -495,7 +521,7 @@ case02_update_deregister(Config) -> } = test_recv_coap_response(UdpSock), {ok, changed} = Method2, MsgId2 = RspId2, - Update = emqx_json:encode( + Update = emqx_utils_json:encode( #{ <<"msgType">> => <<"update">>, <<"data">> => #{ @@ -728,7 +754,7 @@ case08_reregister(Config) -> timer:sleep(50), true = lists:member(SubTopic, test_mqtt_broker:get_subscrbied_topics()), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"msgType">> => <<"register">>, <<"data">> => #{ @@ -762,6 +788,52 @@ case08_reregister(Config) -> %% verify the lwm2m client is still online ?assertEqual(ReadResult, test_recv_mqtt_response(ReportTopic)). +case09_auto_observe(Config) -> + UdpSock = ?config(sock, Config), + Epn = "urn:oma:lwm2m:oma:3", + MsgId1 = 15, + RespTopic = list_to_binary("lwm2m/" ++ Epn ++ "/up/resp"), + emqtt:subscribe(?config(emqx_c, Config), RespTopic, qos0), + timer:sleep(200), + + ok = snabbkaffe:start_trace(), + + %% step 1, device register ... + test_send_coap_request( + UdpSock, + post, + sprintf("coap://127.0.0.1:~b/rd?ep=~ts<=345&lwm2m=1", [?PORT, Epn]), + #coap_content{ + content_format = <<"text/plain">>, + payload = << + ";rt=\"oma.lwm2m\";ct=11543," + ",,," + >> + }, + [], + MsgId1 + ), + #coap_message{method = Method1} = test_recv_coap_response(UdpSock), + ?assertEqual({ok, created}, Method1), + + #coap_message{ + method = Method2, + token = Token2, + options = Options2 + } = test_recv_coap_request(UdpSock), + ?assertEqual(get, Method2), + ?assertNotEqual(<<>>, Token2), + ?assertMatch( + #{ + observe := 0, + uri_path := [<<"lwm2m">>, <<"3">>, <<"0">>] + }, + Options2 + ), + + {ok, _} = ?block_until(#{?snk_kind := ignore_observer_resource}, 1000), + ok. + case10_read(Config) -> UdpSock = ?config(sock, Config), Epn = "urn:oma:lwm2m:oma:3", @@ -799,7 +871,7 @@ case10_read(Config) -> <<"path">> => <<"/3/0/0">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -830,7 +902,7 @@ case10_read(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -885,7 +957,7 @@ case10_read_bad_request(Config) -> <<"path">> => <<"/3333/0/0">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -907,7 +979,7 @@ case10_read_bad_request(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode(#{ + ReadResult = emqx_utils_json:encode(#{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, <<"msgType">> => <<"read">>, @@ -943,7 +1015,7 @@ case10_read_separate_ack(Config) -> <<"path">> => <<"/3/0/0">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -960,7 +1032,7 @@ case10_read_separate_ack(Config) -> ?assertEqual(<<>>, Payload2), test_send_empty_ack(UdpSock, "127.0.0.1", ?PORT, Request2), - ReadResultACK = emqx_json:encode( + ReadResultACK = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -985,7 +1057,7 @@ case10_read_separate_ack(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1028,7 +1100,7 @@ case11_read_object_tlv(Config) -> <<"path">> => <<"/3/0">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -1060,7 +1132,7 @@ case11_read_object_tlv(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1113,7 +1185,7 @@ case11_read_object_json(Config) -> <<"path">> => <<"/3/0">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -1143,7 +1215,7 @@ case11_read_object_json(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1195,7 +1267,7 @@ case12_read_resource_opaque(Config) -> <<"path">> => <<"/3/0/8">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -1221,7 +1293,7 @@ case12_read_resource_opaque(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1263,7 +1335,7 @@ case13_read_no_xml(Config) -> <<"msgType">> => <<"read">>, <<"data">> => #{<<"path">> => <<"/9723/0/0">>} }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), @@ -1288,7 +1360,7 @@ case13_read_no_xml(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1327,7 +1399,7 @@ case20_single_write(Config) -> <<"value">> => <<"12345">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1354,7 +1426,7 @@ case20_single_write(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1398,7 +1470,7 @@ case20_write(Config) -> ] } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1425,7 +1497,7 @@ case20_write(Config) -> ), timer:sleep(100), - WriteResult = emqx_json:encode( + WriteResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1475,7 +1547,7 @@ case21_write_object(Config) -> ] } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1502,7 +1574,7 @@ case21_write_object(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1546,7 +1618,7 @@ case22_write_error(Config) -> ] } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1567,7 +1639,7 @@ case22_write_error(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1605,7 +1677,7 @@ case_create_basic(Config) -> <<"basePath">> => <<"/5">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1631,7 +1703,7 @@ case_create_basic(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1666,7 +1738,7 @@ case_delete_basic(Config) -> <<"msgType">> => <<"delete">>, <<"data">> => #{<<"path">> => <<"/5/0">>} }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1692,7 +1764,7 @@ case_delete_basic(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1732,7 +1804,7 @@ case30_execute(Config) -> <<"args">> => <<"2,7">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1758,7 +1830,7 @@ case30_execute(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1796,7 +1868,7 @@ case31_execute_error(Config) -> <<"args">> => <<"2,7">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1822,7 +1894,7 @@ case31_execute_error(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1859,7 +1931,7 @@ case40_discover(Config) -> <<"path">> => <<"/3/0/7">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -1889,7 +1961,7 @@ case40_discover(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -1934,7 +2006,7 @@ case50_write_attribute(Config) -> <<"lt">> => <<"5">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(100), Request2 = test_recv_coap_request(UdpSock), @@ -1970,7 +2042,7 @@ case50_write_attribute(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -2007,7 +2079,7 @@ case60_observe(Config) -> <<"msgType">> => <<"observe">>, <<"data">> => #{<<"path">> => <<"/3/0/10">>} }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50), Request2 = test_recv_coap_request(UdpSock), @@ -2034,7 +2106,7 @@ case60_observe(Config) -> ), timer:sleep(100), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -2069,7 +2141,7 @@ case60_observe(Config) -> timer:sleep(100), #coap_message{} = test_recv_coap_response(UdpSock), - ReadResult2 = emqx_json:encode( + ReadResult2 = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, @@ -2101,7 +2173,7 @@ case60_observe(Config) -> <<"path">> => <<"/3/0/10">> } }, - CommandJson3 = emqx_json:encode(Command3), + CommandJson3 = emqx_utils_json:encode(Command3), test_mqtt_broker:publish(CommandTopic, CommandJson3, 0), timer:sleep(50), Request3 = test_recv_coap_request(UdpSock), @@ -2128,7 +2200,7 @@ case60_observe(Config) -> ), timer:sleep(100), - ReadResult3 = emqx_json:encode( + ReadResult3 = emqx_utils_json:encode( #{ <<"requestID">> => CmdId3, <<"cacheID">> => CmdId3, @@ -2170,7 +2242,7 @@ case60_observe(Config) -> %% MsgId1), %% #coap_message{method = Method1} = test_recv_coap_response(UdpSock), %% ?assertEqual({ok,created}, Method1), -%% ReadResult = emqx_json:encode( +%% ReadResult = emqx_utils_json:encode( %% #{<<"msgType">> => <<"register">>, %% <<"data">> => #{ %% <<"alternatePath">> => <<"/">>, @@ -2196,7 +2268,7 @@ case60_observe(Config) -> %% <<"path">> => <<"/19/0/0">> %% } %% }, -%% CommandJson = emqx_json:encode(Command), +%% CommandJson = emqx_utils_json:encode(Command), %% test_mqtt_broker:publish(CommandTopic, CommandJson, 0), %% timer:sleep(50), %% Request2 = test_recv_coap_request(UdpSock), @@ -2253,7 +2325,7 @@ case60_observe(Config) -> %% <<"value">> => base64:encode(<<12345:32>>) %% }}, %% -%% CommandJson = emqx_json:encode(Command), +%% CommandJson = emqx_utils_json:encode(Command), %% test_mqtt_broker:publish(CommandTopic, CommandJson, 0), %% timer:sleep(50), %% Request2 = test_recv_coap_request(UdpSock), @@ -2270,7 +2342,7 @@ case60_observe(Config) -> %% {ok, changed}, #coap_content{}, Request2, true), %% timer:sleep(100), %% -%% ReadResult = emqx_json:encode( +%% ReadResult = emqx_utils_json:encode( %% #{<<"requestID">> => CmdId, %% <<"cacheID">> => CmdId, %% <<"data">> => #{ @@ -2430,7 +2502,7 @@ send_read_command_1(CmdId, _UdpSock) -> <<"msgType">> => <<"read">>, <<"data">> => #{<<"path">> => <<"/3/0/0">>} }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(50). @@ -2456,7 +2528,7 @@ verify_read_response_1(CmdId, UdpSock) -> true ), - ReadResult = emqx_json:encode( + ReadResult = emqx_utils_json:encode( #{ <<"requestID">> => CmdId, <<"cacheID">> => CmdId, diff --git a/apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl b/apps/emqx_gateway_lwm2m/test/emqx_lwm2m_api_SUITE.erl similarity index 91% rename from apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl rename to apps/emqx_gateway_lwm2m/test/emqx_lwm2m_api_SUITE.erl index c40d1af55..6fa46ebbc 100644 --- a/apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl +++ b/apps/emqx_gateway_lwm2m/test/emqx_lwm2m_api_SUITE.erl @@ -23,34 +23,11 @@ -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). --include("src/lwm2m/include/emqx_lwm2m.hrl"). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_lwm2m.hrl"). +-include("emqx_gateway_coap/include/emqx_coap.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --define(CONF_DEFAULT, << - "\n" - "gateway.lwm2m {\n" - " xml_dir = \"../../lib/emqx_gateway/src/lwm2m/lwm2m_xml\"\n" - " lifetime_min = 100s\n" - " lifetime_max = 86400s\n" - " qmode_time_window = 200\n" - " auto_observe = false\n" - " mountpoint = \"lwm2m/${username}\"\n" - " update_msg_publish_condition = contains_object_list\n" - " translators {\n" - " command = {topic = \"/dn/#\", qos = 0}\n" - " response = {topic = \"/up/resp\", qos = 0}\n" - " notify = {topic = \"/up/notify\", qos = 0}\n" - " register = {topic = \"/up/resp\", qos = 0}\n" - " update = {topic = \"/up/resp\", qos = 0}\n" - " }\n" - " listeners.udp.default {\n" - " bind = 5783\n" - " }\n" - "}\n" ->>). - -define(assertExists(Map, Key), ?assertNotEqual(maps:get(Key, Map, undefined), undefined) ). @@ -81,8 +58,10 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), application:load(emqx_gateway), + application:load(emqx_gateway_lwm2m), + DefaultConfig = emqx_lwm2m_SUITE:default_config(), + ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, DefaultConfig), emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_authn]), Config. @@ -93,7 +72,8 @@ end_per_suite(Config) -> Config. init_per_testcase(_AllTestCase, Config) -> - ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), + DefaultConfig = emqx_lwm2m_SUITE:default_config(), + ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, DefaultConfig), {ok, _} = application:ensure_all_started(emqx_gateway), {ok, ClientUdpSock} = gen_udp:open(0, [binary, {active, false}]), @@ -151,7 +131,7 @@ t_lookup_read(Config) -> <<"path">> => <<"/3/0/0">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), ?LOGT("CommandJson=~p", [CommandJson]), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), @@ -198,7 +178,7 @@ t_lookup_discover(Config) -> <<"path">> => <<"/3/0/7">> } }, - CommandJson = emqx_json:encode(Command), + CommandJson = emqx_utils_json:encode(Command), test_mqtt_broker:publish(CommandTopic, CommandJson, 0), timer:sleep(200), @@ -370,10 +350,10 @@ no_received_request(ClientId, Path, Action) -> <<"codeMsg">> => <<"reply_not_received">>, <<"path">> => Path }, - ?assertEqual(NotReceived, emqx_json:decode(Response, [return_maps])). + ?assertEqual(NotReceived, emqx_utils_json:decode(Response, [return_maps])). normal_received_request(ClientId, Path, Action) -> Response = call_lookup_api(ClientId, Path, Action), - RCont = emqx_json:decode(Response, [return_maps]), + RCont = emqx_utils_json:decode(Response, [return_maps]), ?assertEqual(list_to_binary(ClientId), maps:get(<<"clientid">>, RCont, undefined)), ?assertEqual(Path, maps:get(<<"path">>, RCont, undefined)), ?assertEqual(Action, maps:get(<<"action">>, RCont, undefined)), diff --git a/apps/emqx_gateway/test/emqx_tlv_SUITE.erl b/apps/emqx_gateway_lwm2m/test/emqx_tlv_SUITE.erl similarity index 99% rename from apps/emqx_gateway/test/emqx_tlv_SUITE.erl rename to apps/emqx_gateway_lwm2m/test/emqx_tlv_SUITE.erl index 5dcef7e72..c413469ea 100644 --- a/apps/emqx_gateway/test/emqx_tlv_SUITE.erl +++ b/apps/emqx_gateway_lwm2m/test/emqx_tlv_SUITE.erl @@ -21,8 +21,8 @@ -define(LOGT(Format, Args), logger:debug("TEST_SUITE: " ++ Format, Args)). --include("src/lwm2m/include/emqx_lwm2m.hrl"). --include("src/coap/include/emqx_coap.hrl"). +-include("emqx_lwm2m.hrl"). +-include("emqx_gateway_coap/include/emqx_coap.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- diff --git a/apps/emqx_gateway_mqttsn/.gitignore b/apps/emqx_gateway_mqttsn/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_gateway_mqttsn/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_gateway_mqttsn/README.md b/apps/emqx_gateway_mqttsn/README.md new file mode 100644 index 000000000..dd72a86a5 --- /dev/null +++ b/apps/emqx_gateway_mqttsn/README.md @@ -0,0 +1,34 @@ +# emqx_mqttsn + +The MQTT-SN gateway is based on the +[MQTT-SN v1.2](https://www.oasis-open.org/committees/download.php/66091/MQTT-SN_spec_v1.2.pdf). + +## Quick Start + +In EMQX 5.0, MQTT-SN gateway can be configured and enabled through the Dashboard. + +It can also be enabled via the HTTP API or emqx.conf, e.g. In emqx.conf: + +```properties +gateway.mqttsn { + + mountpoint = "mqtt/sn" + + gateway_id = 1 + + broadcast = true + + enable_qos3 = true + + listeners.udp.default { + bind = 1884 + max_connections = 10240000 max_conn_rate = 1000 + } +} +``` + +> Note: +> Configuring the gateway via emqx.conf requires changes on a per-node basis, +> but configuring it via Dashboard or the HTTP API will take effect across the cluster. + +More documentations: [MQTT-SN Gateway](https://www.emqx.io/docs/en/v5.0/gateway/mqttsn.html) diff --git a/apps/emqx_gateway/src/mqttsn/include/emqx_sn.hrl b/apps/emqx_gateway_mqttsn/include/emqx_mqttsn.hrl similarity index 100% rename from apps/emqx_gateway/src/mqttsn/include/emqx_sn.hrl rename to apps/emqx_gateway_mqttsn/include/emqx_mqttsn.hrl diff --git a/apps/emqx_gateway_mqttsn/rebar.config b/apps/emqx_gateway_mqttsn/rebar.config new file mode 100644 index 000000000..c8675c3ba --- /dev/null +++ b/apps/emqx_gateway_mqttsn/rebar.config @@ -0,0 +1,4 @@ +{erl_opts, [debug_info]}. +{deps, [ {emqx, {path, "../../apps/emqx"}}, + {emqx_gateway, {path, "../../apps/emqx_gateway"}} + ]}. diff --git a/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src b/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src new file mode 100644 index 000000000..76f0f45b5 --- /dev/null +++ b/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src @@ -0,0 +1,10 @@ +{application, emqx_gateway_mqttsn, [ + {description, "MQTT-SN Gateway"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, emqx, emqx_gateway]}, + {env, []}, + {modules, []}, + {licenses, ["Apache 2.0"]}, + {links, []} +]}. diff --git a/apps/emqx_gateway/src/mqttsn/emqx_sn_impl.erl b/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.erl similarity index 76% rename from apps/emqx_gateway/src/mqttsn/emqx_sn_impl.erl rename to apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.erl index db730aee1..167ee465c 100644 --- a/apps/emqx_gateway/src/mqttsn/emqx_sn_impl.erl +++ b/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -14,13 +14,28 @@ %% limitations under the License. %%-------------------------------------------------------------------- -%% @doc The MQTT-SN Gateway Implement interface --module(emqx_sn_impl). - --behaviour(emqx_gateway_impl). +%% @doc The MQTT-SN Gateway implement interface +-module(emqx_gateway_mqttsn). -include_lib("emqx/include/logger.hrl"). +%% define a gateway named stomp +-gateway(#{ + name => mqttsn, + callback_module => ?MODULE, + config_schema_module => emqx_mqttsn_schema +}). + +%% callback_module must implement the emqx_gateway_impl behaviour +-behaviour(emqx_gateway_impl). + +%% callback for emqx_gateway_impl +-export([ + on_gateway_load/2, + on_gateway_update/3, + on_gateway_unload/2 +]). + -import( emqx_gateway_utils, [ @@ -30,31 +45,8 @@ ] ). -%% APIs --export([ - reg/0, - unreg/0 -]). - --export([ - on_gateway_load/2, - on_gateway_update/3, - on_gateway_unload/2 -]). - %%-------------------------------------------------------------------- -%% APIs -%%-------------------------------------------------------------------- - -reg() -> - RegistryOptions = [{cbkmod, ?MODULE}], - emqx_gateway_registry:reg(mqttsn, RegistryOptions). - -unreg() -> - emqx_gateway_registry:unreg(mqttsn). - -%%-------------------------------------------------------------------- -%% emqx_gateway_registry callbacks +%% emqx_gateway_impl callbacks %%-------------------------------------------------------------------- on_gateway_load( @@ -64,8 +56,8 @@ on_gateway_load( }, Ctx ) -> - %% We Also need to start `emqx_sn_broadcast` & - %% `emqx_sn_registry` process + %% We Also need to start `emqx_mqttsn_broadcast` & + %% `emqx_mqttsn_registry` process case maps:get(broadcast, Config, false) of false -> ok; @@ -73,23 +65,23 @@ on_gateway_load( %% FIXME: Port = 1884, SnGwId = maps:get(gateway_id, Config, undefined), - _ = emqx_sn_broadcast:start_link(SnGwId, Port), + _ = emqx_mqttsn_broadcast:start_link(SnGwId, Port), ok end, PredefTopics = maps:get(predefined, Config, []), - {ok, RegistrySvr} = emqx_sn_registry:start_link(GwName, PredefTopics), + {ok, RegistrySvr} = emqx_mqttsn_registry:start_link(GwName, PredefTopics), NConfig = maps:without( [broadcast, predefined], - Config#{registry => emqx_sn_registry:lookup_name(RegistrySvr)} + Config#{registry => emqx_mqttsn_registry:lookup_name(RegistrySvr)} ), Listeners = emqx_gateway_utils:normalize_config(NConfig), ModCfg = #{ - frame_mod => emqx_sn_frame, - chann_mod => emqx_sn_channel + frame_mod => emqx_mqttsn_frame, + chann_mod => emqx_mqttsn_channel }, case diff --git a/apps/emqx_gateway/src/mqttsn/emqx_sn_broadcast.erl b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_broadcast.erl similarity index 89% rename from apps/emqx_gateway/src/mqttsn/emqx_sn_broadcast.erl rename to apps/emqx_gateway_mqttsn/src/emqx_mqttsn_broadcast.erl index 5fc08ad7f..be0122e0e 100644 --- a/apps/emqx_gateway/src/mqttsn/emqx_sn_broadcast.erl +++ b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_broadcast.erl @@ -14,17 +14,11 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_sn_broadcast). +-module(emqx_mqttsn_broadcast). -behaviour(gen_server). --ifdef(TEST). -%% make rebar3 ct happy when testing with --suite path/to/module_SUITE.erl --include_lib("emqx_gateway/src/mqttsn/include/emqx_sn.hrl"). --else. -%% make mix happy --include("src/mqttsn/include/emqx_sn.hrl"). --endif. +-include("emqx_mqttsn.hrl"). -include_lib("emqx/include/logger.hrl"). -export([ @@ -65,7 +59,7 @@ stop() -> init([GwId, Port]) -> %% FIXME: - Duration = application:get_env(emqx_sn, advertise_duration, ?DEFAULT_DURATION), + Duration = application:get_env(emqx_mqttsn, advertise_duration, ?DEFAULT_DURATION), {ok, Sock} = gen_udp:open(0, [binary, {broadcast, true}]), {ok, ensure_advertise(#state{ @@ -121,7 +115,7 @@ send_advertise(#state{ addrs = Addrs, duration = Duration }) -> - Data = emqx_sn_frame:serialize_pkt(?SN_ADVERTISE_MSG(GwId, Duration), #{}), + Data = emqx_mqttsn_frame:serialize_pkt(?SN_ADVERTISE_MSG(GwId, Duration), #{}), lists:foreach( fun(Addr) -> ?SLOG(debug, #{ diff --git a/apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_channel.erl similarity index 97% rename from apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl rename to apps/emqx_gateway_mqttsn/src/emqx_mqttsn_channel.erl index 29dce90ee..ae1da5dac 100644 --- a/apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl +++ b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_channel.erl @@ -14,11 +14,11 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_sn_channel). +-module(emqx_mqttsn_channel). -behaviour(emqx_gateway_channel). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/types.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). @@ -51,7 +51,7 @@ %% Context ctx :: emqx_gateway_ctx:context(), %% Registry - registry :: emqx_sn_registry:registry(), + registry :: emqx_mqttsn_registry:registry(), %% Gateway Id gateway_id :: integer(), %% Enable QoS3 @@ -218,7 +218,7 @@ info(conn_state, #channel{conn_state = ConnState}) -> info(clientinfo, #channel{clientinfo = ClientInfo}) -> ClientInfo; info(session, #channel{session = Session}) -> - emqx_misc:maybe_apply(fun emqx_session:info/1, Session); + emqx_utils:maybe_apply(fun emqx_session:info/1, Session); info(will_msg, #channel{will_msg = WillMsg}) -> WillMsg; info(clientid, #channel{clientinfo = #{clientid := ClientId}}) -> @@ -282,7 +282,7 @@ enrich_clientinfo( feedvar(Override, Packet, ConnInfo, ClientInfo0), ClientInfo0 ), - {ok, NPacket, NClientInfo} = emqx_misc:pipeline( + {ok, NPacket, NClientInfo} = emqx_utils:pipeline( [ fun maybe_assign_clientid/2, %% FIXME: CALL After authentication successfully @@ -389,7 +389,12 @@ process_connect( clientinfo = ClientInfo } ) -> - SessFun = fun(_, _) -> emqx_session:init(#{max_inflight => 1}) end, + SessFun = fun(ClientInfoT, _) -> + Conf = emqx_cm:get_session_confs( + ClientInfoT, #{receive_maximum => 1, expiry_interval => 0} + ), + emqx_session:init(Conf) + end, case emqx_gateway_ctx:open_session( Ctx, @@ -409,7 +414,7 @@ process_connect( Channel#channel{session = Session} ); {ok, #{session := Session, present := true, pendings := Pendings}} -> - Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())), + Pendings1 = lists:usort(lists:append(Pendings, emqx_utils:drain_deliver())), NChannel = Channel#channel{ session = Session, resuming = true, @@ -473,7 +478,7 @@ handle_in( true -> <>; false -> - emqx_sn_registry:lookup_topic( + emqx_mqttsn_registry:lookup_topic( Registry, ?NEG_QOS_CLIENT_ID, TopicId @@ -590,7 +595,7 @@ handle_in( Channel = #channel{conn_state = idle} ) -> case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun enrich_conninfo/2, fun run_conn_hooks/2, @@ -619,7 +624,7 @@ handle_in( clientinfo = #{clientid := ClientId} } ) -> - case emqx_sn_registry:register_topic(Registry, ClientId, TopicName) of + case emqx_mqttsn_registry:register_topic(Registry, ClientId, TopicName) of TopicId when is_integer(TopicId) -> ?SLOG(debug, #{ msg => "registered_topic_name", @@ -713,7 +718,7 @@ handle_in(PubPkt = ?SN_PUBLISH_MSG(_Flags, TopicId0, MsgId, _Data), Channel) -> Id end, case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun check_qos3_enable/2, fun preproc_pub_pkt/2, @@ -773,7 +778,7 @@ handle_in( {ok, Channel} end; ?SN_RC_INVALID_TOPIC_ID -> - case emqx_sn_registry:lookup_topic(Registry, ClientId, TopicId) of + case emqx_mqttsn_registry:lookup_topic(Registry, ClientId, TopicId) of undefined -> {ok, Channel}; TopicName -> @@ -872,7 +877,7 @@ handle_in( end; handle_in(SubPkt = ?SN_SUBSCRIBE_MSG(_, MsgId, _), Channel) -> case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun preproc_subs_type/2, fun check_subscribe_authz/2, @@ -906,7 +911,7 @@ handle_in( Channel ) -> case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun preproc_unsub_type/2, fun run_client_unsub_hook/2, @@ -1088,7 +1093,7 @@ convert_topic_id_to_name( clientinfo = #{clientid := ClientId} } ) -> - case emqx_sn_registry:lookup_topic(Registry, ClientId, TopicId) of + case emqx_mqttsn_registry:lookup_topic(Registry, ClientId, TopicId) of undefined -> {error, ?SN_RC_INVALID_TOPIC_ID}; TopicName -> @@ -1197,7 +1202,7 @@ preproc_subs_type( %% If the gateway is able accept the subscription, %% it assigns a topic id to the received topic name %% and returns it within a SUBACK message - case emqx_sn_registry:register_topic(Registry, ClientId, TopicName) of + case emqx_mqttsn_registry:register_topic(Registry, ClientId, TopicName) of {error, too_large} -> {error, ?SN_RC2_EXCEED_LIMITATION}; {error, wildcard_topic} -> @@ -1223,7 +1228,7 @@ preproc_subs_type( } ) -> case - emqx_sn_registry:lookup_topic( + emqx_mqttsn_registry:lookup_topic( Registry, ClientId, TopicId @@ -1339,7 +1344,7 @@ preproc_unsub_type( } ) -> case - emqx_sn_registry:lookup_topic( + emqx_mqttsn_registry:lookup_topic( Registry, ClientId, TopicId @@ -1760,7 +1765,7 @@ message_to_packet( ?QOS_0 -> 0; _ -> MsgId end, - case emqx_sn_registry:lookup_topic_id(Registry, ClientId, Topic) of + case emqx_mqttsn_registry:lookup_topic_id(Registry, ClientId, Topic) of {predef, PredefTopicId} -> Flags = #mqtt_sn_flags{qos = QoS, topic_id_type = ?SN_PREDEFINED_TOPIC}, ?SN_PUBLISH_MSG(Flags, PredefTopicId, NMsgId, Payload); @@ -1818,7 +1823,7 @@ handle_call( ) -> ok = emqx_session:takeover(Session), %% TODO: Should not drain deliver here (side effect) - Delivers = emqx_misc:drain_deliver(), + Delivers = emqx_utils:drain_deliver(), AllPendings = lists:append(Delivers, Pendings), shutdown_and_reply(takenover, AllPendings, Channel); %handle_call(list_authz_cache, _From, Channel) -> @@ -1927,9 +1932,9 @@ ensure_registered_topic_name( Channel = #channel{registry = Registry} ) -> ClientId = clientid(Channel), - case emqx_sn_registry:lookup_topic_id(Registry, ClientId, TopicName) of + case emqx_mqttsn_registry:lookup_topic_id(Registry, ClientId, TopicName) of undefined -> - case emqx_sn_registry:register_topic(Registry, ClientId, TopicName) of + case emqx_mqttsn_registry:register_topic(Registry, ClientId, TopicName) of {error, Reason} -> {error, Reason}; TopicId -> {ok, TopicId} end; @@ -2040,15 +2045,15 @@ handle_deliver( ignore_local(Delivers, Subscriber, Session, Ctx) -> Subs = emqx_session:info(subscriptions, Session), - lists:dropwhile( + lists:filter( fun({deliver, Topic, #message{from = Publisher}}) -> case maps:find(Topic, Subs) of {ok, #{nl := 1}} when Subscriber =:= Publisher -> ok = metrics_inc(Ctx, 'delivery.dropped'), ok = metrics_inc(Ctx, 'delivery.dropped.no_local'), - true; + false; _ -> - false + true end end, Delivers @@ -2242,7 +2247,7 @@ ensure_register_timer(Channel) -> ensure_register_timer(RetryTimes, Channel = #channel{timers = Timers}) -> Msg = maps:get(register_timer, ?TIMER_TABLE), - TRef = emqx_misc:start_timer(?REGISTER_TIMEOUT, {Msg, RetryTimes}), + TRef = emqx_utils:start_timer(?REGISTER_TIMEOUT, {Msg, RetryTimes}), Channel#channel{timers = Timers#{register_timer => TRef}}. cancel_timer(Name, Channel = #channel{timers = Timers}) -> @@ -2250,7 +2255,7 @@ cancel_timer(Name, Channel = #channel{timers = Timers}) -> undefined -> Channel; TRef -> - emqx_misc:cancel_timer(TRef), + emqx_utils:cancel_timer(TRef), Channel#channel{timers = maps:without([Name], Timers)} end. @@ -2265,7 +2270,7 @@ ensure_timer(Name, Channel = #channel{timers = Timers}) -> ensure_timer(Name, Time, Channel = #channel{timers = Timers}) -> Msg = maps:get(Name, ?TIMER_TABLE), - TRef = emqx_misc:start_timer(Time, Msg), + TRef = emqx_utils:start_timer(Time, Msg), Channel#channel{timers = Timers#{Name => TRef}}. reset_timer(Name, Channel) -> diff --git a/apps/emqx_gateway/src/mqttsn/emqx_sn_frame.erl b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_frame.erl similarity index 96% rename from apps/emqx_gateway/src/mqttsn/emqx_sn_frame.erl rename to apps/emqx_gateway_mqttsn/src/emqx_mqttsn_frame.erl index 39bd9e889..3be2f1dc2 100644 --- a/apps/emqx_gateway/src/mqttsn/emqx_sn_frame.erl +++ b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_frame.erl @@ -16,11 +16,11 @@ %%-------------------------------------------------------------------- %% @doc The frame parser for MQTT-SN protocol --module(emqx_sn_frame). +-module(emqx_mqttsn_frame). -behaviour(emqx_gateway_frame). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -export([ initial_parse_state/1, @@ -58,10 +58,10 @@ serialize_opts() -> %% Parse MQTT-SN Message %%-------------------------------------------------------------------- -parse(<<16#01:?byte, Len:?short, Type:?byte, Var/binary>>, _State) -> - {ok, parse(Type, Len - 4, Var), <<>>, _State}; -parse(<>, _State) -> - {ok, parse(Type, Len - 2, Var), <<>>, _State}. +parse(<<16#01:?byte, Len:?short, Type:?byte, Var/binary>>, State) -> + {ok, parse(Type, Len - 4, Var), <<>>, State}; +parse(<>, State) -> + {ok, parse(Type, Len - 2, Var), <<>>, State}. parse(Type, Len, Var) when Len =:= size(Var) -> #mqtt_sn_message{type = Type, variable = parse_var(Type, Var)}; @@ -160,9 +160,11 @@ parse_topic(2#11, Topic) -> Topic. serialize_pkt(#mqtt_sn_message{type = Type, variable = Var}, Opts) -> VarBin = serialize(Type, Var, Opts), VarLen = size(VarBin), - if - VarLen < 254 -> <<(VarLen + 2), Type, VarBin/binary>>; - true -> <<16#01, (VarLen + 4):?short, Type, VarBin/binary>> + case VarLen < 254 of + true -> + <<(VarLen + 2), Type, VarBin/binary>>; + false -> + <<16#01, (VarLen + 4):?short, Type, VarBin/binary>> end. serialize(?SN_ADVERTISE, {GwId, Duration}, _Opts) -> @@ -438,7 +440,7 @@ format(?SN_DISCONNECT_MSG(Duration)) -> format(#mqtt_sn_message{type = Type, variable = Var}) -> io_lib:format( "mqtt_sn_message(type=~s, Var=~w)", - [emqx_sn_frame:message_type(Type), Var] + [emqx_mqttsn_frame:message_type(Type), Var] ). is_message(#mqtt_sn_message{type = Type}) when diff --git a/apps/emqx_gateway/src/mqttsn/emqx_sn_registry.erl b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_registry.erl similarity index 90% rename from apps/emqx_gateway/src/mqttsn/emqx_sn_registry.erl rename to apps/emqx_gateway_mqttsn/src/emqx_mqttsn_registry.erl index 689aab8ce..9db355a9b 100644 --- a/apps/emqx_gateway/src/mqttsn/emqx_sn_registry.erl +++ b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_registry.erl @@ -15,13 +15,11 @@ %%-------------------------------------------------------------------- %% @doc The MQTT-SN Topic Registry -%% -%% XXX: --module(emqx_sn_registry). +-module(emqx_mqttsn_registry). -behaviour(gen_server). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -include_lib("emqx/include/logger.hrl"). -export([start_link/2]). @@ -53,11 +51,11 @@ -export([lookup_name/1]). --define(SN_SHARD, emqx_sn_shard). +-define(SN_SHARD, emqx_mqttsn_shard). -record(state, {tabname, max_predef_topic_id = 0}). --record(emqx_sn_registry, {key, value}). +-record(emqx_mqttsn_registry, {key, value}). -type registry() :: {Tab :: atom(), RegistryPid :: pid()}. @@ -126,7 +124,7 @@ lookup_name(Pid) -> %%----------------------------------------------------------------------------- name(InstaId) -> - list_to_atom(lists:concat([emqx_sn_, InstaId, '_registry'])). + list_to_atom(lists:concat([emqx_mqttsn_, InstaId, '_registry'])). init([InstaId, PredefTopics]) -> %% {predef, TopicId} -> TopicName @@ -136,8 +134,8 @@ init([InstaId, PredefTopics]) -> Tab = name(InstaId), ok = mria:create_table(Tab, [ {storage, ram_copies}, - {record_name, emqx_sn_registry}, - {attributes, record_info(fields, emqx_sn_registry)}, + {record_name, emqx_mqttsn_registry}, + {attributes, record_info(fields, emqx_mqttsn_registry)}, {storage_properties, [{ets, [{read_concurrency, true}]}]}, {rlog_shard, ?SN_SHARD} ]), @@ -145,17 +143,17 @@ init([InstaId, PredefTopics]) -> MaxPredefId = lists:foldl( fun(#{id := TopicId, topic := TopicName0}, AccId) -> TopicName = iolist_to_binary(TopicName0), - mria:dirty_write(Tab, #emqx_sn_registry{ + mria:dirty_write(Tab, #emqx_mqttsn_registry{ key = {predef, TopicId}, value = TopicName }), - mria:dirty_write(Tab, #emqx_sn_registry{ + mria:dirty_write(Tab, #emqx_mqttsn_registry{ key = {predef, TopicName}, value = TopicId }), - if - TopicId > AccId -> TopicId; - true -> AccId + case TopicId > AccId of + true -> TopicId; + false -> AccId end end, 0, @@ -193,7 +191,7 @@ handle_call( handle_call({unregister, ClientId}, _From, State = #state{tabname = Tab}) -> Registry = mnesia:dirty_match_object( Tab, - {emqx_sn_registry, {ClientId, '_'}, '_'} + {emqx_mqttsn_registry, {ClientId, '_'}, '_'} ), lists:foreach( fun(R) -> @@ -234,7 +232,7 @@ code_change(_OldVsn, State, _Extra) -> do_register(Tab, ClientId, TopicId, TopicName) -> mnesia:write( Tab, - #emqx_sn_registry{ + #emqx_mqttsn_registry{ key = {ClientId, next_topic_id}, value = TopicId + 1 }, @@ -242,7 +240,7 @@ do_register(Tab, ClientId, TopicId, TopicName) -> ), mnesia:write( Tab, - #emqx_sn_registry{ + #emqx_mqttsn_registry{ key = {ClientId, TopicName}, value = TopicId }, @@ -250,7 +248,7 @@ do_register(Tab, ClientId, TopicId, TopicName) -> ), mnesia:write( Tab, - #emqx_sn_registry{ + #emqx_mqttsn_registry{ key = {ClientId, TopicId}, value = TopicName }, @@ -261,6 +259,6 @@ do_register(Tab, ClientId, TopicId, TopicName) -> next_topic_id(Tab, PredefId, ClientId) -> case mnesia:dirty_read(Tab, {ClientId, next_topic_id}) of - [#emqx_sn_registry{value = Id}] -> Id; + [#emqx_mqttsn_registry{value = Id}] -> Id; [] -> PredefId + 1 end. diff --git a/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl new file mode 100644 index 000000000..cb33cbe95 --- /dev/null +++ b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl @@ -0,0 +1,107 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_mqttsn_schema). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). + +%% config schema provides +-export([fields/1, desc/1]). + +fields(mqttsn) -> + [ + {gateway_id, + sc( + integer(), + #{ + default => 1, + required => true, + desc => ?DESC(mqttsn_gateway_id) + } + )}, + {broadcast, + sc( + boolean(), + #{ + default => false, + desc => ?DESC(mqttsn_broadcast) + } + )}, + %% TODO: rename + {enable_qos3, + sc( + boolean(), + #{ + default => true, + desc => ?DESC(mqttsn_enable_qos3) + } + )}, + {subs_resume, + sc( + boolean(), + #{ + default => false, + desc => ?DESC(mqttsn_subs_resume) + } + )}, + {predefined, + sc( + hoconsc:array(ref(mqttsn_predefined)), + #{ + default => [], + required => {false, recursively}, + desc => ?DESC(mqttsn_predefined) + } + )}, + {mountpoint, emqx_gateway_schema:mountpoint()}, + {listeners, sc(ref(emqx_gateway_schema, udp_listeners), #{desc => ?DESC(udp_listeners)})} + ] ++ emqx_gateway_schema:gateway_common_options(); +fields(mqttsn_predefined) -> + [ + {id, + sc(integer(), #{ + required => true, + desc => ?DESC(mqttsn_predefined_id) + })}, + + {topic, + sc(binary(), #{ + required => true, + desc => ?DESC(mqttsn_predefined_topic) + })} + ]. + +desc(mqttsn) -> + "The MQTT-SN (MQTT for Sensor Networks) protocol gateway."; +desc(mqttsn_predefined) -> + "The pre-defined topic name corresponding to the pre-defined topic\n" + "ID of N.\n\n" + "Note: the pre-defined topic ID of 0 is reserved."; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% internal functions + +sc(Type, Meta) -> + hoconsc:mk(Type, Meta). + +ref(StructName) -> + ref(?MODULE, StructName). + +ref(Mod, Field) -> + hoconsc:ref(Mod, Field). diff --git a/apps/emqx_gateway/test/broadcast_test.py b/apps/emqx_gateway_mqttsn/test/broadcast_test.py similarity index 100% rename from apps/emqx_gateway/test/broadcast_test.py rename to apps/emqx_gateway_mqttsn/test/broadcast_test.py diff --git a/apps/emqx_gateway/test/emqx_sn_frame_SUITE.erl b/apps/emqx_gateway_mqttsn/test/emqx_sn_frame_SUITE.erl similarity index 97% rename from apps/emqx_gateway/test/emqx_sn_frame_SUITE.erl rename to apps/emqx_gateway_mqttsn/test/emqx_sn_frame_SUITE.erl index aa3fed707..86cc0cf7e 100644 --- a/apps/emqx_gateway/test/emqx_sn_frame_SUITE.erl +++ b/apps/emqx_gateway_mqttsn/test/emqx_sn_frame_SUITE.erl @@ -19,7 +19,7 @@ -compile(export_all). -compile(nowarn_export_all). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- @@ -30,11 +30,11 @@ all() -> emqx_common_test_helpers:all(?MODULE). parse(D) -> - {ok, P, _Rest, _State} = emqx_sn_frame:parse(D, #{}), + {ok, P, _Rest, _State} = emqx_mqttsn_frame:parse(D, #{}), P. serialize_pkt(P) -> - emqx_sn_frame:serialize_pkt(P, #{}). + emqx_mqttsn_frame:serialize_pkt(P, #{}). %%-------------------------------------------------------------------- %% Test cases diff --git a/apps/emqx_gateway/test/emqx_sn_protocol_SUITE.erl b/apps/emqx_gateway_mqttsn/test/emqx_sn_protocol_SUITE.erl similarity index 99% rename from apps/emqx_gateway/test/emqx_sn_protocol_SUITE.erl rename to apps/emqx_gateway_mqttsn/test/emqx_sn_protocol_SUITE.erl index adc1e7382..04b1b5fb2 100644 --- a/apps/emqx_gateway/test/emqx_sn_protocol_SUITE.erl +++ b/apps/emqx_gateway_mqttsn/test/emqx_sn_protocol_SUITE.erl @@ -27,7 +27,7 @@ ] ). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -97,6 +97,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + application:load(emqx_gateway_mqttsn), ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_authn, emqx_gateway]), Config. @@ -270,7 +271,7 @@ t_subscribe_case03(_) -> %% In this case We use predefined topic name to register and subscribe, %% and expect to receive the corresponding predefined topic id but not a new %% generated topic id from broker. We design this case to illustrate -%% emqx_sn_gateway's compatibility of dealing with predefined and normal +%% MQTT-SN Gateway's compatibility of dealing with predefined and normal %% topics. %% %% Once we give more restrictions to different topic id type, this case diff --git a/apps/emqx_gateway/test/emqx_sn_registry_SUITE.erl b/apps/emqx_gateway_mqttsn/test/emqx_sn_registry_SUITE.erl similarity index 98% rename from apps/emqx_gateway/test/emqx_sn_registry_SUITE.erl rename to apps/emqx_gateway_mqttsn/test/emqx_sn_registry_SUITE.erl index 739255e71..4d89a802d 100644 --- a/apps/emqx_gateway/test/emqx_sn_registry_SUITE.erl +++ b/apps/emqx_gateway_mqttsn/test/emqx_sn_registry_SUITE.erl @@ -21,7 +21,7 @@ -include_lib("eunit/include/eunit.hrl"). --define(REGISTRY, emqx_sn_registry). +-define(REGISTRY, emqx_mqttsn_registry). -define(MAX_PREDEF_ID, 2). -define(PREDEF_TOPICS, [ #{id => 1, topic => <<"/predefined/topic/name/hello">>}, @@ -66,7 +66,7 @@ t_register(Config) -> ?assertEqual(<<"Topic2">>, ?REGISTRY:lookup_topic(Reg, <<"ClientId">>, ?MAX_PREDEF_ID + 2)), ?assertEqual(?MAX_PREDEF_ID + 1, ?REGISTRY:lookup_topic_id(Reg, <<"ClientId">>, <<"Topic1">>)), ?assertEqual(?MAX_PREDEF_ID + 2, ?REGISTRY:lookup_topic_id(Reg, <<"ClientId">>, <<"Topic2">>)), - emqx_sn_registry:unregister_topic(Reg, <<"ClientId">>), + emqx_mqttsn_registry:unregister_topic(Reg, <<"ClientId">>), ?assertEqual(undefined, ?REGISTRY:lookup_topic(Reg, <<"ClientId">>, ?MAX_PREDEF_ID + 1)), ?assertEqual(undefined, ?REGISTRY:lookup_topic(Reg, <<"ClientId">>, ?MAX_PREDEF_ID + 2)), ?assertEqual(undefined, ?REGISTRY:lookup_topic_id(Reg, <<"ClientId">>, <<"Topic1">>)), diff --git a/apps/emqx_gateway/test/intergration_test/Makefile b/apps/emqx_gateway_mqttsn/test/intergration_test/Makefile similarity index 100% rename from apps/emqx_gateway/test/intergration_test/Makefile rename to apps/emqx_gateway_mqttsn/test/intergration_test/Makefile diff --git a/apps/emqx_gateway/test/intergration_test/README.md b/apps/emqx_gateway_mqttsn/test/intergration_test/README.md similarity index 100% rename from apps/emqx_gateway/test/intergration_test/README.md rename to apps/emqx_gateway_mqttsn/test/intergration_test/README.md diff --git a/apps/emqx_gateway/test/intergration_test/add_emqx_sn_to_project.py b/apps/emqx_gateway_mqttsn/test/intergration_test/add_emqx_sn_to_project.py similarity index 100% rename from apps/emqx_gateway/test/intergration_test/add_emqx_sn_to_project.py rename to apps/emqx_gateway_mqttsn/test/intergration_test/add_emqx_sn_to_project.py diff --git a/apps/emqx_gateway/test/intergration_test/client/case1_qos0pub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case1_qos0pub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case1_qos0pub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case1_qos0pub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case1_qos0sub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case1_qos0sub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case1_qos0sub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case1_qos0sub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case2_qos0pub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case2_qos0pub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case2_qos0pub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case2_qos0pub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case2_qos0sub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case2_qos0sub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case2_qos0sub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case2_qos0sub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case3_qos0pub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case3_qos0pub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case3_qos0pub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case3_qos0pub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case3_qos0sub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case3_qos0sub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case3_qos0sub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case3_qos0sub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case4_qos3pub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case4_qos3pub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case4_qos3pub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case4_qos3pub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case4_qos3sub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case4_qos3sub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case4_qos3sub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case4_qos3sub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case5_qos3pub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case5_qos3pub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case5_qos3pub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case5_qos3pub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case5_qos3sub.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case5_qos3sub.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case5_qos3sub.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case5_qos3sub.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case6_sleep.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case6_sleep.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case6_sleep.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case6_sleep.c diff --git a/apps/emqx_gateway/test/intergration_test/client/case7_double_connect.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/case7_double_connect.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/case7_double_connect.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/case7_double_connect.c diff --git a/apps/emqx_gateway/test/intergration_test/client/int_test_result.c b/apps/emqx_gateway_mqttsn/test/intergration_test/client/int_test_result.c similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/int_test_result.c rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/int_test_result.c diff --git a/apps/emqx_gateway/test/intergration_test/client/int_test_result.h b/apps/emqx_gateway_mqttsn/test/intergration_test/client/int_test_result.h similarity index 100% rename from apps/emqx_gateway/test/intergration_test/client/int_test_result.h rename to apps/emqx_gateway_mqttsn/test/intergration_test/client/int_test_result.h diff --git a/apps/emqx_gateway/test/intergration_test/disable_qos3.py b/apps/emqx_gateway_mqttsn/test/intergration_test/disable_qos3.py similarity index 100% rename from apps/emqx_gateway/test/intergration_test/disable_qos3.py rename to apps/emqx_gateway_mqttsn/test/intergration_test/disable_qos3.py diff --git a/apps/emqx_gateway/test/intergration_test/enable_qos3.py b/apps/emqx_gateway_mqttsn/test/intergration_test/enable_qos3.py similarity index 100% rename from apps/emqx_gateway/test/intergration_test/enable_qos3.py rename to apps/emqx_gateway_mqttsn/test/intergration_test/enable_qos3.py diff --git a/apps/emqx_gateway/test/props/emqx_sn_proper_types.erl b/apps/emqx_gateway_mqttsn/test/props/emqx_sn_proper_types.erl similarity index 99% rename from apps/emqx_gateway/test/props/emqx_sn_proper_types.erl rename to apps/emqx_gateway_mqttsn/test/props/emqx_sn_proper_types.erl index 2869a8958..70b13ef8f 100644 --- a/apps/emqx_gateway/test/props/emqx_sn_proper_types.erl +++ b/apps/emqx_gateway_mqttsn/test/props/emqx_sn_proper_types.erl @@ -16,7 +16,7 @@ -module(emqx_sn_proper_types). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -include_lib("proper/include/proper.hrl"). -compile({no_auto_import, [register/1]}). diff --git a/apps/emqx_gateway/test/props/prop_emqx_sn_frame.erl b/apps/emqx_gateway_mqttsn/test/props/prop_emqx_sn_frame.erl similarity index 94% rename from apps/emqx_gateway/test/props/prop_emqx_sn_frame.erl rename to apps/emqx_gateway_mqttsn/test/props/prop_emqx_sn_frame.erl index f2dfbb8e9..0abe2485c 100644 --- a/apps/emqx_gateway/test/props/prop_emqx_sn_frame.erl +++ b/apps/emqx_gateway_mqttsn/test/props/prop_emqx_sn_frame.erl @@ -16,7 +16,7 @@ -module(prop_emqx_sn_frame). --include("src/mqttsn/include/emqx_sn.hrl"). +-include("emqx_mqttsn.hrl"). -include_lib("proper/include/proper.hrl"). -compile({no_auto_import, [register/1]}). @@ -32,11 +32,11 @@ ). parse(D) -> - {ok, P, _Rest, _State} = emqx_sn_frame:parse(D, #{}), + {ok, P, _Rest, _State} = emqx_mqttsn_frame:parse(D, #{}), P. serialize(P) -> - emqx_sn_frame:serialize_pkt(P, #{}). + emqx_mqttsn_frame:serialize_pkt(P, #{}). %%-------------------------------------------------------------------- %% Properties diff --git a/apps/emqx_gateway_stomp/.gitignore b/apps/emqx_gateway_stomp/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_gateway_stomp/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_gateway_stomp/README.md b/apps/emqx_gateway_stomp/README.md new file mode 100644 index 000000000..0c41ff520 --- /dev/null +++ b/apps/emqx_gateway_stomp/README.md @@ -0,0 +1,31 @@ +# emqx_stomp + +The Stomp Gateway is based on the +[Stomp v1.2](https://stomp.github.io/stomp-specification-1.2.html) and is +compatible with the Stomp v1.0 and v1.1 specification. + +## Quick Start + +In EMQX 5.0, Stomp gateway can be configured and enabled through the Dashboard. + +It can also be enabled via the HTTP API or emqx.conf, e.g. In emqx.conf: + +```properties +gateway.stomp { + + mountpoint = "stomp/" + + listeners.tcp.default { + bind = 61613 + acceptors = 16 + max_connections = 1024000 + max_conn_rate = 1000 + } +} +``` + +> Note: +> Configuring the gateway via emqx.conf requires changes on a per-node basis, +> but configuring it via Dashboard or the HTTP API will take effect across the cluster. + +More documentations: [Stomp Gateway](https://www.emqx.io/docs/en/v5.0/gateway/stomp.html) diff --git a/apps/emqx_gateway/src/stomp/include/emqx_stomp.hrl b/apps/emqx_gateway_stomp/include/emqx_stomp.hrl similarity index 100% rename from apps/emqx_gateway/src/stomp/include/emqx_stomp.hrl rename to apps/emqx_gateway_stomp/include/emqx_stomp.hrl diff --git a/apps/emqx_gateway_stomp/rebar.config b/apps/emqx_gateway_stomp/rebar.config new file mode 100644 index 000000000..cfeb0a195 --- /dev/null +++ b/apps/emqx_gateway_stomp/rebar.config @@ -0,0 +1,6 @@ +{erl_opts, [debug_info]}. +{deps, [ + {emqx, {path, "../../apps/emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, + {emqx_gateway, {path, "../../apps/emqx_gateway"}} +]}. diff --git a/apps/emqx_gateway_stomp/src/emqx_gateway_stomp.app.src b/apps/emqx_gateway_stomp/src/emqx_gateway_stomp.app.src new file mode 100644 index 000000000..38da1e18b --- /dev/null +++ b/apps/emqx_gateway_stomp/src/emqx_gateway_stomp.app.src @@ -0,0 +1,10 @@ +{application, emqx_gateway_stomp, [ + {description, "Stomp Gateway"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, emqx, emqx_gateway]}, + {env, []}, + {modules, []}, + {licenses, ["Apache 2.0"]}, + {links, []} +]}. diff --git a/apps/emqx_gateway/src/stomp/emqx_stomp_impl.erl b/apps/emqx_gateway_stomp/src/emqx_gateway_stomp.erl similarity index 83% rename from apps/emqx_gateway/src/stomp/emqx_stomp_impl.erl rename to apps/emqx_gateway_stomp/src/emqx_gateway_stomp.erl index c2907c262..b8c2f0166 100644 --- a/apps/emqx_gateway/src/stomp/emqx_stomp_impl.erl +++ b/apps/emqx_gateway_stomp/src/emqx_gateway_stomp.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -14,13 +14,29 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_stomp_impl). - --behaviour(emqx_gateway_impl). +%% @doc The Stomp Gateway implement +-module(emqx_gateway_stomp). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx_gateway/include/emqx_gateway.hrl"). +%% define a gateway named stomp +-gateway(#{ + name => stomp, + callback_module => ?MODULE, + config_schema_module => emqx_stomp_schema +}). + +%% callback_module must implement the emqx_gateway_impl behaviour +-behaviour(emqx_gateway_impl). + +%% callback for emqx_gateway_impl +-export([ + on_gateway_load/2, + on_gateway_update/3, + on_gateway_unload/2 +]). + -import( emqx_gateway_utils, [ @@ -30,33 +46,8 @@ ] ). -%% APIs --export([ - reg/0, - unreg/0 -]). - --export([ - on_gateway_load/2, - on_gateway_update/3, - on_gateway_unload/2 -]). - %%-------------------------------------------------------------------- -%% APIs -%%-------------------------------------------------------------------- - --spec reg() -> ok | {error, any()}. -reg() -> - RegistryOptions = [{cbkmod, ?MODULE}], - emqx_gateway_registry:reg(stomp, RegistryOptions). - --spec unreg() -> ok | {error, any()}. -unreg() -> - emqx_gateway_registry:unreg(stomp). - -%%-------------------------------------------------------------------- -%% emqx_gateway_registry callbacks +%% emqx_gateway_impl callbacks %%-------------------------------------------------------------------- on_gateway_load( diff --git a/apps/emqx_gateway/src/stomp/emqx_stomp_channel.erl b/apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl similarity index 99% rename from apps/emqx_gateway/src/stomp/emqx_stomp_channel.erl rename to apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl index b95bb827c..316432dea 100644 --- a/apps/emqx_gateway/src/stomp/emqx_stomp_channel.erl +++ b/apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl @@ -18,7 +18,7 @@ -behaviour(emqx_gateway_channel). --include("src/stomp/include/emqx_stomp.hrl"). +-include("emqx_stomp.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/logger.hrl"). @@ -252,7 +252,7 @@ enrich_clientinfo( feedvar(Override, Packet, ConnInfo, ClientInfo0), ClientInfo0 ), - {ok, NPacket, NClientInfo} = emqx_misc:pipeline( + {ok, NPacket, NClientInfo} = emqx_utils:pipeline( [ fun maybe_assign_clientid/2, fun parse_heartbeat/2, @@ -416,7 +416,7 @@ handle_in( {error, unexpected_connect, Channel}; handle_in(Packet = ?PACKET(?CMD_CONNECT), Channel) -> case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun enrich_conninfo/2, fun negotiate_version/2, @@ -474,7 +474,7 @@ handle_in( Topic = header(<<"destination">>, Headers), Ack = header(<<"ack">>, Headers, <<"auto">>), case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun parse_topic_filter/2, fun check_subscribed_status/2, @@ -796,7 +796,7 @@ handle_call( reply({error, no_subid}, Channel); SubId -> case - emqx_misc:pipeline( + emqx_utils:pipeline( [ fun parse_topic_filter/2, fun check_subscribed_status/2 @@ -869,7 +869,7 @@ handle_call(discard, _From, Channel) -> % pendings = Pendings}) -> % ok = emqx_session:takeover(Session), % %% TODO: Should not drain deliver here (side effect) -% Delivers = emqx_misc:drain_deliver(), +% Delivers = emqx_utils:drain_deliver(), % AllPendings = lists:append(Delivers, Pendings), % shutdown_and_reply(takenover, AllPendings, Channel); @@ -1289,7 +1289,7 @@ ensure_timer(Name, Channel = #channel{timers = Timers}) -> ensure_timer(Name, Time, Channel = #channel{timers = Timers}) -> Msg = maps:get(Name, ?TIMER_TABLE), - TRef = emqx_misc:start_timer(Time, Msg), + TRef = emqx_utils:start_timer(Time, Msg), Channel#channel{timers = Timers#{Name => TRef}}. reset_timer(Name, Channel) -> diff --git a/apps/emqx_gateway/src/stomp/emqx_stomp_frame.erl b/apps/emqx_gateway_stomp/src/emqx_stomp_frame.erl similarity index 99% rename from apps/emqx_gateway/src/stomp/emqx_stomp_frame.erl rename to apps/emqx_gateway_stomp/src/emqx_stomp_frame.erl index 47e045412..4913d6b2a 100644 --- a/apps/emqx_gateway/src/stomp/emqx_stomp_frame.erl +++ b/apps/emqx_gateway_stomp/src/emqx_stomp_frame.erl @@ -70,7 +70,7 @@ -behaviour(emqx_gateway_frame). --include("src/stomp/include/emqx_stomp.hrl"). +-include("emqx_stomp.hrl"). -export([ initial_parse_state/1, diff --git a/apps/emqx_gateway/src/stomp/emqx_stomp_heartbeat.erl b/apps/emqx_gateway_stomp/src/emqx_stomp_heartbeat.erl similarity index 97% rename from apps/emqx_gateway/src/stomp/emqx_stomp_heartbeat.erl rename to apps/emqx_gateway_stomp/src/emqx_stomp_heartbeat.erl index 88720c513..2e4239bdc 100644 --- a/apps/emqx_gateway/src/stomp/emqx_stomp_heartbeat.erl +++ b/apps/emqx_gateway_stomp/src/emqx_stomp_heartbeat.erl @@ -17,7 +17,7 @@ %% @doc Stomp heartbeat. -module(emqx_stomp_heartbeat). --include("src/stomp/include/emqx_stomp.hrl"). +-include("emqx_stomp.hrl"). -export([ init/1, @@ -36,6 +36,8 @@ outgoing => #heartbeater{} }. +-elvis([{elvis_style, no_if_expression, disable}]). + %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- diff --git a/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl b/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl new file mode 100644 index 000000000..b1c6a92e2 --- /dev/null +++ b/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl @@ -0,0 +1,80 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_stomp_schema). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). + +%% config schema provides +-export([fields/1, desc/1]). + +fields(stomp) -> + [ + {frame, sc(ref(stomp_frame))}, + {mountpoint, emqx_gateway_schema:mountpoint()}, + {listeners, sc(ref(emqx_gateway_schema, tcp_listeners), #{desc => ?DESC(tcp_listeners)})} + ] ++ emqx_gateway_schema:gateway_common_options(); +fields(stomp_frame) -> + [ + {max_headers, + sc( + non_neg_integer(), + #{ + default => 10, + desc => ?DESC(stom_frame_max_headers) + } + )}, + {max_headers_length, + sc( + non_neg_integer(), + #{ + default => 1024, + desc => ?DESC(stomp_frame_max_headers_length) + } + )}, + {max_body_length, + sc( + integer(), + #{ + default => 65536, + desc => ?DESC(stom_frame_max_body_length) + } + )} + ]. + +desc(stomp) -> + "The STOMP protocol gateway provides EMQX with the ability to access STOMP\n" + "(Simple (or Streaming) Text Orientated Messaging Protocol) protocol."; +desc(stomp_frame) -> + "Size limits for the STOMP frames."; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% internal functions + +sc(Type) -> + sc(Type, #{}). + +sc(Type, Meta) -> + hoconsc:mk(Type, Meta). + +ref(StructName) -> + ref(?MODULE, StructName). + +ref(Mod, Field) -> + hoconsc:ref(Mod, Field). diff --git a/apps/emqx_gateway/test/emqx_stomp_SUITE.erl b/apps/emqx_gateway_stomp/test/emqx_stomp_SUITE.erl similarity index 99% rename from apps/emqx_gateway/test/emqx_stomp_SUITE.erl rename to apps/emqx_gateway_stomp/test/emqx_stomp_SUITE.erl index 2cf245ce2..4323cf32f 100644 --- a/apps/emqx_gateway/test/emqx_stomp_SUITE.erl +++ b/apps/emqx_gateway_stomp/test/emqx_stomp_SUITE.erl @@ -17,7 +17,7 @@ -module(emqx_stomp_SUITE). -include_lib("eunit/include/eunit.hrl"). --include("src/stomp/include/emqx_stomp.hrl"). +-include("emqx_stomp.hrl"). -compile(export_all). -compile(nowarn_export_all). @@ -53,6 +53,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). %%-------------------------------------------------------------------- init_per_suite(Cfg) -> + application:load(emqx_gateway_stomp), ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT), emqx_mgmt_api_test_util:init_suite([emqx_authn, emqx_gateway]), Cfg. diff --git a/apps/emqx_gateway/test/emqx_stomp_heartbeat_SUITE.erl b/apps/emqx_gateway_stomp/test/emqx_stomp_heartbeat_SUITE.erl similarity index 100% rename from apps/emqx_gateway/test/emqx_stomp_heartbeat_SUITE.erl rename to apps/emqx_gateway_stomp/test/emqx_stomp_heartbeat_SUITE.erl diff --git a/apps/emqx_machine/README.md b/apps/emqx_machine/README.md new file mode 100644 index 000000000..a8221ba73 --- /dev/null +++ b/apps/emqx_machine/README.md @@ -0,0 +1,48 @@ +# EMQX machine + +This application manages other OTP applications in EMQX and serves as the entry point when BEAM VM starts up. +It prepares the node before starting mnesia/mria, as well as EMQX business logic. +It keeps track of the business applications storing data in Mnesia, which need to be restarted when the node joins the cluster by registering `ekka` callbacks. +Also it kicks off autoclustering (EMQX cluster discovery) on the core nodes. + +`emqx_machine` application doesn't do much on its own, but it facilitates the environment for running other EMQX applications. + +# Features + +## Global GC + +`emqx_global_gc` is a gen_server that forces garbage collection of all Erlang processes running in the BEAM VM. +This is meant to save the RAM. + +## Restricted shell + +`emqx_restricted_shell` module prevents user from accidentally issuing Erlang shell commands that can stop the remote node. + +## Signal handler + +`emqx_machine_signal_handler` handles POSIX signals sent to BEAM VM process. +It helps to shut down EMQX broker gracefully when it receives `SIGTERM` signal. + +## Cover + +`emqx_cover` is a helper module that helps to collect coverage reports during testing. + +# Limitation + +Currently `emqx_machine` boots the business apps before starting autocluster, so a fresh node joining the cluster actually starts business application twice: first in the singleton mode, and then in clustered mode. + +# Documentation links + +Configuration: [node.global_gc_interval](https://www.emqx.io/docs/en/v5.0/configuration/configuration-manual.html#node-and-cookie) + +# Configurations + +The following application environment variables are used: + +- `emqx_machine.global_gc_interval`: interval at which global GC is run +- `emqx_machine.custom_shard_transports`: contains a map that allows to fine tune transport (`rpc` or `gen_rpc`) used to send Mria transactions from the core node to the replicant +- `emqx_machine.backtrace_depth`: set maximum depth of Erlang stacktraces in crash reports + + +# Contributing +Please see our [contributing.md](../../CONTRIBUTING.md). diff --git a/apps/emqx_machine/rebar.config b/apps/emqx_machine/rebar.config index 9f17b7657..dee2902a5 100644 --- a/apps/emqx_machine/rebar.config +++ b/apps/emqx_machine/rebar.config @@ -1,5 +1,8 @@ %% -*- mode: erlang -*- -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_machine/src/emqx_cover.erl b/apps/emqx_machine/src/emqx_cover.erl new file mode 100644 index 000000000..c6f610746 --- /dev/null +++ b/apps/emqx_machine/src/emqx_cover.erl @@ -0,0 +1,214 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module is NOT used in production. +%% It is used to collect coverage data when running blackbox test +-module(emqx_cover). + +-include_lib("covertool/include/covertool.hrl"). + +-ifdef(EMQX_ENTERPRISE). +-define(OUTPUT_APPNAME, 'EMQX Enterprise'). +-else. +-define(OUTPUT_APPNAME, 'EMQX'). +-endif. + +-export([ + start/0, + start/1, + abort/0, + export_and_stop/1, + lookup_source/1 +]). + +%% This is a ETS table to keep a mapping of module name (atom) to +%% .erl file path (relative path from project root) +%% We needed this ETS table because the source file information +%% is missing from the .beam metadata sicne we are using 'deterministic' +%% compile flag. +-define(SRC, emqx_cover_module_src). + +%% @doc Start cover. +%% All emqx_ modules will be cover-compiled, this may cause +%% some excessive RAM consumption and result in warning logs. +start() -> + start(#{}). + +%% @doc Start cover. +%% All emqx_ modules will be cover-compiled, this may cause +%% some excessive RAM consumption and result in warning logs. +%% Supported options: +%% - project_root: the directory to search for .erl source code +%% - debug_secret_file: only applicable to EMQX Enterprise +start(Opts) -> + ok = abort(), + DefaultDir = os_env("EMQX_PROJECT_ROOT"), + ProjRoot = maps:get(project_root, Opts, DefaultDir), + case ProjRoot =:= "" of + true -> + io:format("Project source code root dir is not provided.~n"), + io:format( + "You may either start EMQX node with environment variable EMQX_PROJECT_ROOT set~n" + ), + io:format("Or provide #{project_root => \"/path/to/emqx/\"} as emqx_cover:start arg~n"), + exit(project_root_is_not_set); + false -> + ok + end, + %% spawn a ets table owner + %% this implementation is kept dead-simple + %% because there is no concurrency requirement + Parent = self(), + {Pid, Ref} = + erlang:spawn_monitor( + fun() -> + true = register(?SRC, self()), + _ = ets:new(?SRC, [named_table, public]), + _ = Parent ! {started, self()}, + receive + stop -> + ok + end + end + ), + receive + {started, Pid} -> + ok; + {'DOWN', Ref, process, Pid, Reason} -> + throw({failed_to_start, Reason}) + after 1000 -> + throw({failed_to_start, timeout}) + end, + Modules = modules(Opts), + case cover:start() of + {ok, _Pid} -> + ok; + {error, {already_started, _Pid}} -> + ok; + Other -> + throw(Other) + end, + ok = cover_compile(Modules), + io:format("cover-compiled ~p modules~n", [length(Modules)]), + ok = put_project_root(ProjRoot), + ok = do_build_source_mapping(ProjRoot, Modules), + CachedModulesCount = ets:info(?SRC, size), + io:format("source-cached ~p modules~n", [CachedModulesCount]), + ok. + +%% @doc Abort cover data collection without exporting. +abort() -> + _ = cover:stop(), + case whereis(?SRC) of + undefined -> + ok; + Pid -> + Ref = monitor(process, Pid), + exit(Pid, kill), + receive + {'DOWN', Ref, process, Pid, _} -> + ok + end + end, + ok. + +%% @doc Export coverage report (xml) format. +%% e.g. `emqx_cover:export_and_stop("/tmp/cover.xml").' +export_and_stop(Path) when is_list(Path) -> + ProjectRoot = get_project_root(), + Config = #config{ + appname = ?OUTPUT_APPNAME, + sources = [ProjectRoot], + output = Path, + lookup_source = fun ?MODULE:lookup_source/1 + }, + covertool:generate_report(Config, cover:modules()). + +get_project_root() -> + [{_, Dir}] = ets:lookup(?SRC, {root, ?OUTPUT_APPNAME}), + Dir. + +put_project_root(Dir) -> + _ = ets:insert(?SRC, {{root, ?OUTPUT_APPNAME}, Dir}), + ok. + +do_build_source_mapping(Dir, Modules0) -> + Modules = sets:from_list(Modules0, [{version, 2}]), + All = filelib:wildcard("**/*.erl", Dir), + lists:foreach( + fun(Path) -> + ModuleNameStr = filename:basename(Path, ".erl"), + Module = list_to_atom(ModuleNameStr), + case sets:is_element(Module, Modules) of + true -> + ets:insert(?SRC, {Module, Path}); + false -> + ok + end + end, + All + ), + ok. + +lookup_source(Module) -> + case ets:lookup(?SRC, Module) of + [{_, Path}] -> + Path; + [] -> + false + end. + +modules(_Opts) -> + %% TODO better filter based on Opts, + %% e.g. we may want to see coverage info for ehttpc + Filter = fun is_emqx_module/1, + find_modules(Filter). + +cover_compile(Modules) -> + Results = cover:compile_beam(Modules), + Errors = lists:filter( + fun + ({ok, _}) -> false; + (_) -> true + end, + Results + ), + case Errors of + [] -> + ok; + _ -> + io:format("failed_to_cover_compile:~n~p~n", [Errors]), + throw(failed_to_cover_compile) + end. + +find_modules(Filter) -> + All = code:all_loaded(), + F = fun({M, _BeamPath}) -> Filter(M) andalso {true, M} end, + lists:filtermap(F, All). + +is_emqx_module(?MODULE) -> + %% do not cover-compile self + false; +is_emqx_module(Module) -> + case erlang:atom_to_binary(Module, utf8) of + <<"emqx", _/binary>> -> + true; + _ -> + false + end. + +os_env(Name) -> + os:getenv(Name, ""). diff --git a/apps/emqx_machine/src/emqx_global_gc.erl b/apps/emqx_machine/src/emqx_global_gc.erl index f17ab2d16..121855e68 100644 --- a/apps/emqx_machine/src/emqx_global_gc.erl +++ b/apps/emqx_machine/src/emqx_global_gc.erl @@ -86,7 +86,7 @@ ensure_timer(State) -> disabled -> State; Interval when is_integer(Interval) -> - TRef = emqx_misc:start_timer(Interval, run), + TRef = emqx_utils:start_timer(Interval, run), State#{timer := TRef} end. diff --git a/apps/emqx_machine/src/emqx_machine.app.src b/apps/emqx_machine/src/emqx_machine.app.src index fdfd2b28f..a44d2b36e 100644 --- a/apps/emqx_machine/src/emqx_machine.app.src +++ b/apps/emqx_machine/src/emqx_machine.app.src @@ -3,10 +3,10 @@ {id, "emqx_machine"}, {description, "The EMQX Machine"}, % strict semver, bump manually! - {vsn, "0.1.2"}, + {vsn, "0.2.3"}, {modules, []}, {registered, []}, - {applications, [kernel, stdlib]}, + {applications, [kernel, stdlib, emqx_ctl]}, {mod, {emqx_machine_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_machine/src/emqx_machine.erl b/apps/emqx_machine/src/emqx_machine.erl index ec0aff55b..aa8f03ae5 100644 --- a/apps/emqx_machine/src/emqx_machine.erl +++ b/apps/emqx_machine/src/emqx_machine.erl @@ -19,6 +19,7 @@ -export([ start/0, graceful_shutdown/0, + brutal_shutdown/0, is_ready/0, node_status/0, @@ -29,6 +30,7 @@ %% @doc EMQX boot entrypoint. start() -> + emqx_mgmt_cli:load(), case os:type() of {win32, nt} -> ok; @@ -41,11 +43,15 @@ start() -> start_sysmon(), configure_shard_transports(), ekka:start(), - ok = print_otp_version_warning(). + ok. graceful_shutdown() -> emqx_machine_terminator:graceful_wait(). +%% only used when failed to boot +brutal_shutdown() -> + init:stop(). + set_backtrace_depth() -> {ok, Depth} = application:get_env(emqx_machine, backtrace_depth), _ = erlang:system_flag(backtrace_depth, Depth), @@ -55,17 +61,6 @@ set_backtrace_depth() -> is_ready() -> emqx_machine_terminator:is_running(). --if(?OTP_RELEASE > 22). -print_otp_version_warning() -> ok. --else. -print_otp_version_warning() -> - ?ULOG( - "WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n", - [?OTP_RELEASE] - ). -% OTP_RELEASE > 22 --endif. - start_sysmon() -> _ = application:load(system_monitor), application:set_env(system_monitor, node_status_fun, {?MODULE, node_status}), @@ -82,7 +77,7 @@ start_sysmon() -> end. node_status() -> - emqx_json:encode(#{ + emqx_utils_json:encode(#{ backend => mria_rlog:backend(), role => mria_rlog:role() }). diff --git a/apps/emqx_machine/src/emqx_machine_boot.erl b/apps/emqx_machine/src/emqx_machine_boot.erl index 4b3e5ea7d..e3f84079b 100644 --- a/apps/emqx_machine/src/emqx_machine_boot.erl +++ b/apps/emqx_machine/src/emqx_machine_boot.erl @@ -21,6 +21,7 @@ -export([stop_apps/0, ensure_apps_started/0]). -export([sorted_reboot_apps/0]). -export([start_autocluster/0]). +-export([stop_port_apps/0]). -dialyzer({no_match, [basic_reboot_apps/0]}). @@ -61,6 +62,20 @@ stop_apps() -> _ = emqx_alarm_handler:unload(), lists:foreach(fun stop_one_app/1, lists:reverse(sorted_reboot_apps())). +%% Those port apps are terminated after the main apps +%% Don't need to stop when reboot. +stop_port_apps() -> + Loaded = application:loaded_applications(), + lists:foreach( + fun(App) -> + case lists:keymember(App, 1, Loaded) of + true -> stop_one_app(App); + false -> ok + end + end, + [os_mon, jq] + ). + stop_one_app(App) -> ?SLOG(debug, #{msg => "stopping_app", app => App}), try @@ -134,8 +149,14 @@ basic_reboot_apps() -> emqx_plugins ], case emqx_release:edition() of - ce -> CE; - ee -> CE ++ [] + ce -> + CE; + ee -> + CE ++ + [ + emqx_eviction_agent, + emqx_node_rebalance + ] end. sorted_reboot_apps() -> diff --git a/apps/emqx_machine/src/emqx_machine_terminator.erl b/apps/emqx_machine/src/emqx_machine_terminator.erl index 314b8c705..77c53a64d 100644 --- a/apps/emqx_machine/src/emqx_machine_terminator.erl +++ b/apps/emqx_machine/src/emqx_machine_terminator.erl @@ -41,7 +41,7 @@ -define(DO_IT, graceful_shutdown). %% @doc This API is called to shutdown the Erlang VM by RPC call from remote shell node. -%% The shutown of apps is delegated to a to a process instead of doing it in the RPC spawned +%% The shutdown of apps is delegated to a to a process instead of doing it in the RPC spawned %% process which has a remote group leader. start_link() -> {ok, _} = gen_server:start_link({local, ?TERMINATOR}, ?MODULE, [], []). @@ -87,6 +87,8 @@ handle_cast(_Cast, State) -> handle_call(?DO_IT, _From, State) -> try + %% stop port apps before stopping other apps. + emqx_machine_boot:stop_port_apps(), emqx_machine_boot:stop_apps() catch C:E:St -> diff --git a/apps/emqx_machine/src/user_default.erl b/apps/emqx_machine/src/user_default.erl index ce5397c26..3e561b929 100644 --- a/apps/emqx_machine/src/user_default.erl +++ b/apps/emqx_machine/src/user_default.erl @@ -26,6 +26,14 @@ %% API -export([lock/0, unlock/0]). +-export([t/1, t2/1, t/2, t2/2, t/3, t2/3]). lock() -> emqx_restricted_shell:lock(). unlock() -> emqx_restricted_shell:unlock(). + +t(M) -> recon_trace:calls({M, '_', return_trace}, 300). +t2(M) -> recon_trace:calls({M, '_', return_trace}, 300, [{args, arity}]). +t(M, F) -> recon_trace:calls({M, F, return_trace}, 300). +t2(M, F) -> recon_trace:calls({M, F, return_trace}, 300, [{args, arity}]). +t(M, F, A) -> recon_trace:calls({M, F, A}, 300). +t2(M, F, A) -> recon_trace:calls({M, F, A}, 300, [{args, arity}]). diff --git a/apps/emqx_machine/test/emqx_machine_SUITE.erl b/apps/emqx_machine/test/emqx_machine_SUITE.erl index 691cda677..02d03d983 100644 --- a/apps/emqx_machine/test/emqx_machine_SUITE.erl +++ b/apps/emqx_machine/test/emqx_machine_SUITE.erl @@ -103,3 +103,13 @@ t_custom_shard_transports(_Config) -> emqx_machine:start(), ?assertEqual(distr, mria_config:shard_transport(Shard)), ok. + +t_node_status(_Config) -> + JSON = emqx_machine:node_status(), + ?assertMatch( + #{ + <<"backend">> := _, + <<"role">> := <<"core">> + }, + jsx:decode(JSON) + ). diff --git a/apps/emqx_management/README.md b/apps/emqx_management/README.md index fa37d0f0f..aa5d0c606 100644 --- a/apps/emqx_management/README.md +++ b/apps/emqx_management/README.md @@ -1,12 +1,42 @@ -# emqx-management +# EMQX Management -EMQX Management API +EMQX Management offers various interfaces for administrators to interact with +the system, either by a remote console attached to a running node, a CLI (i.e. +`./emqx ctl`), or through its rich CRUD-style REST API (mostly used by EMQX' +dashboard). The system enables administrators to modify both cluster and +individual node configurations, and provides the ability to view and reset +different statistics and metrics. -## How to Design RESTful API? +## Functionality -http://restful-api-design.readthedocs.io/en/latest/scope.html +Amongst others it allows to manage -default application see: -header: -authorization: Basic YWRtaW46cHVibGlj +* Alarms +* API Keys +* Banned clients, users or hosts +* Clients (and sessions) including their topic subscriptions +* Configurations +* Manage plugins +* Fixed subscriptions +* Topics + +Moreover it lets you + +* modify hot and non-hot updatable configuration values, +* publish messages, as well as bulk messages, +* create trace files, +* and last but not least monitor system status. + +## Implementation Notes + +API endpoints are implemented using the `minirest` framework in combination with +HOCON schema and OpenAPI 3.0 specifications. + +## TODO/FIXME + +At its current state there are some reverse dependencies from other applications +that do calls directly into `emqx_mgmt`. + +Also, and somewhat related, its bpapi proto modules do calls directly into +other applications. diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf deleted file mode 100644 index 0ab09520e..000000000 --- a/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf +++ /dev/null @@ -1,84 +0,0 @@ -emqx_mgmt_api_alarms { - - list_alarms_api { - desc { - en: """List currently activated alarms or historical alarms, determined by query parameters.""" - zh: """列出当前激活的告警或历史告警,由查询参数决定。""" - } - } - - delete_alarms_api { - desc { - en: """Remove all historical alarms.""" - zh: """删除所有历史告警。""" - } - } - - delete_alarms_api_response204 { - desc { - en: """Historical alarms have been cleared successfully.""" - zh: """历史告警已成功清除。""" - } - } - - get_alarms_qs_activated { - desc { - en: """It is used to specify the alarm type of the query. -When true, it returns the currently activated alarm, -and when it is false, it returns the historical alarm. -The default is false.""" - zh: """用于指定查询的告警类型, -为 true 时返回当前激活的告警,为 false 时返回历史告警,默认为 false。""" - } - } - - node { - desc { - en: """The name of the node that triggered this alarm.""" - zh: """触发此告警的节点名称。""" - } - } - - name { - desc { - en: """Alarm name, used to distinguish different alarms.""" - zh: """告警名称,用于区分不同的告警。""" - } - } - - message { - desc { - en: """Alarm message, which describes the alarm content in a human-readable format.""" - zh: """告警消息,以人类可读的方式描述告警内容。""" - } - } - - details { - desc { - en: """Alarm details, provides more alarm information, mainly for program processing.""" - zh: """告警详情,提供了更多的告警信息,主要提供给程序处理。""" - } - } - - duration { - desc { - en: """Indicates how long the alarm has lasted, in milliseconds.""" - zh: """表明告警已经持续了多久,单位:毫秒。""" - } - } - - activate_at { - desc { - en: """Alarm start time, using rfc3339 standard time format.""" - zh: """告警开始时间,使用 rfc3339 标准时间格式。""" - } - } - - deactivate_at { - desc { - en: """Alarm end time, using rfc3339 standard time format.""" - zh: """告警结束时间,使用 rfc3339 标准时间格式。""" - } - } - -} diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf deleted file mode 100644 index 3045cb293..000000000 --- a/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf +++ /dev/null @@ -1,98 +0,0 @@ -emqx_mgmt_api_banned { - - list_banned_api { - desc { - en: """List all currently banned client IDs, usernames and IP addresses.""" - zh: """列出目前所有被封禁的客户端 ID、用户名和 IP 地址。""" - } - } - - create_banned_api { - desc { - en: """Add a client ID, username or IP address to the blacklist.""" - zh: """添加一个客户端 ID、用户名或者 IP 地址到黑名单。""" - } - } - - create_banned_api_response400 { - desc { - en: """Bad request, possibly due to wrong parameters or the existence of a banned object.""" - zh: """错误的请求,可能是参数错误或封禁对象已存在等原因。""" - } - } - - delete_banned_api { - desc { - en: """Remove a client ID, username or IP address from the blacklist.""" - zh: """将一个客户端 ID、用户名或者 IP 地址从黑名单中删除。""" - } - } - - delete_banned_api_response404 { - desc { - en: """The banned object was not found in the blacklist.""" - zh: """未在黑名单中找到该封禁对象。""" - } - } - - as { - desc { - en: """Ban method, which can be client ID, username or IP address.""" - zh: """封禁方式,可以通过客户端 ID、用户名或者 IP 地址等方式进行封禁。""" - } - label { - en: """Ban Method""" - zh: """封禁方式""" - } - } - who { - desc { - en: """Ban object, specific client ID, username or IP address.""" - zh: """封禁对象,具体的客户端 ID、用户名或者 IP 地址。""" - } - label { - en: """Ban Object""" - zh: """封禁对象""" - } - } - by { - desc { - en: """Initiator of the ban.""" - zh: """封禁的发起者。""" - } - label { - en: """Ban Initiator""" - zh: """封禁发起者""" - } - } - reason { - desc { - en: """Ban reason, record the reason why the current object was banned.""" - zh: """封禁原因,记录当前对象被封禁的原因。""" - } - label { - en: """Ban Reason""" - zh: """封禁原因""" - } - } - at { - desc { - en: """The start time of the ban, the format is rfc3339, the default is the time when the operation was initiated.""" - zh: """封禁的起始时间,格式为 rfc3339,默认为发起操作的时间。""" - } - label { - en: """Ban Time""" - zh: """封禁时间""" - } - } - until { - desc { - en: """The end time of the ban, the format is rfc3339, the default is the time when the operation was initiated + 5 minutes.""" - zh: """封禁的结束时间,式为 rfc3339,默认为发起操作的时间 + 5 分钟。""" - } - label { - en: """Ban End Time""" - zh: """封禁结束时间""" - } - } -} diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf deleted file mode 100644 index d845bff4b..000000000 --- a/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf +++ /dev/null @@ -1,172 +0,0 @@ - -emqx_mgmt_api_publish { - publish_api { - desc { - en: """ -Publish one message.
-Possible HTTP status response codes are:
-200: The message is delivered to at least one subscriber;
-202: No matched subscribers;
-400: Message is invalid. for example bad topic name, or QoS is out of range;
-503: Failed to deliver the message to subscriber(s);
-""" - zh: """ -发布一个消息。
-可能的 HTTP 状态码如下:
-200: 消息被成功发送到至少一个订阅。
-202: 没有匹配到任何订阅。
-400: 消息编码错误,如非法主题,或 QoS 超出范围等。
-503: 服务重启等过程中导致转发失败。

-""" - } - } - publish_bulk_api { - desc { - en: """ -Publish a batch of messages.
-Possible HTTP response status code are:
-200: All messages are delivered to at least one subscriber;
-202: At least one message was not delivered to any subscriber;
-400: At least one message is invalid. For example bad topic name, or QoS is out of range;
-503: Failed to deliver at least one of the messages;
- -In case there is at lest one invalid message in the batch, the HTTP response body -is the same as for /publish API.
-Otherwise the HTTP response body is an array of JSON objects indicating the publish -result of each individual message in the batch. -""" - zh: """ -批量发布一组消息。
-可能的 HTTP 状态码如下:
-200: 所有的消息都被成功发送到至少一个订阅。
-202: 至少有一个消息没有匹配到任何订阅。
-400: 至少有一个消息编码错误,如非法主题,或 QoS 超出范围等。
-503: 至少有一个小因为服务重启的原因导致转发失败。
- -请求的 Body 或者 Body 中包含的某个消息无法通过 API 规范的类型检查时,HTTP 响应的消息与发布单个消息的 API - /publish 是一样的。 -如果所有的消息都是合法的,那么 HTTP 返回的内容是一个 JSON 数组,每个元素代表了该消息转发的状态。 - -""" - } - } - - topic_name { - desc { - en: "Topic Name" - zh: "主题名称" - } - } - qos { - desc { - en: "MQTT message QoS" - zh: "MQTT 消息的 QoS" - } - } - clientid { - desc { - en: "Each message can be published as if it is done on behalf of an MQTT client whos ID can be specified in this field." - zh: "每个消息都可以带上一个 MQTT 客户端 ID,用于模拟 MQTT 客户端的发布行为。" - } - } - payload { - desc { - en: "The MQTT message payload." - zh: "MQTT 消息体。" - } - } - retain { - desc { - en: "A boolean field to indicate if this message should be retained." - zh: "布尔型字段,用于表示该消息是否保留消息。" - } - } - payload_encoding { - desc { - en: "MQTT Payload Encoding, base64 or plain. When set to base64, the message is decoded before it is published." - zh: "MQTT 消息体的编码方式,可以是 base64plain。当设置为 base64 时,消息在发布前会先被解码。" - } - } - message_id { - desc { - en: "A globally unique message ID for correlation/tracing." - zh: "全局唯一的一个消息 ID,方便用于关联和追踪。" - } - } - reason_code { - desc { - en: """ -The MQTT reason code, as the same ones used in PUBACK packet.
-Currently supported codes are:
- -16(0x10): No matching subscribers;
-131(0x81): Error happened when dispatching the message. e.g. during EMQX restart;
-144(0x90): Topic name invalid;
-151(0x97): Publish rate limited, or message size exceeded limit. The global size limit can be configured with mqtt.max_packet_size
-NOTE: The message size is estimated with the received topic and payload size, meaning the actual size of serialized bytes (when sent to MQTT subscriber) -might be slightly over the limit. -""" - zh: """ -MQTT 消息发布的错误码,这些错误码也是 MQTT 规范中 PUBACK 消息可能携带的错误码。
-当前支持如下错误码:
- -16(0x10):没能匹配到任何订阅;
-131(0x81):消息转发时发生错误,例如 EMQX 服务重启;
-144(0x90):主题名称非法;
-151(0x97):受到了速率限制,或者消息尺寸过大。全局消息大小限制可以通过配置项 mqtt.max_packet_size 来进行修改。
-注意:消息尺寸的是通过主题和消息体的字节数进行估算的。具体发布时所占用的字节数可能会稍大于这个估算的值。 -""" - } - } - error_message { - desc { - en: "Describes the failure reason in detail." - zh: "失败的详细原因。" - } - } - message_properties { - desc { - en: "The Properties of the PUBLISH message." - zh: "PUBLISH 消息里的 Property 字段。" - } - } - msg_payload_format_indicator { - desc { - en: """0 (0x00) Byte Indicates that the Payload is unspecified bytes, which is equivalent to not sending a Payload Format Indicator. - -1 (0x01) Byte Indicates that the Payload is UTF-8 Encoded Character Data. The UTF-8 data in the Payload MUST be well-formed UTF-8 as defined by the Unicode specification and restated in RFC 3629. -""" - zh: "载荷格式指示标识符,0 表示载荷是未指定格式的数据,相当于没有发送载荷格式指示;1 表示载荷是 UTF-8 编码的字符数据,载荷中的 UTF-8 数据必须是按照 Unicode 的规范和 RFC 3629 的标准要求进行编码的。" - } - } - msg_message_expiry_interval { - desc { - en: "Identifier of the Message Expiry Interval. If the Message Expiry Interval has passed and the Server has not managed to start onward delivery to a matching subscriber, then it MUST delete the copy of the message for that subscriber." - zh: "消息过期间隔标识符,以秒为单位。当消失已经过期时,如果服务端还没有开始向匹配的订阅者投递该消息,则服务端会删除该订阅者的消息副本。如果不设置,则消息永远不会过期" - } - } - msg_response_topic { - desc { - en: "Identifier of the Response Topic.The Response Topic MUST be a UTF-8 Encoded, It MUST NOT contain wildcard characters." - zh: "响应主题标识符, UTF-8 编码的字符串,用作响应消息的主题名。响应主题不能包含通配符,也不能包含多个主题,否则将造成协议错误。当存在响应主题时,消息将被视作请求报文。服务端在收到应用消息时必须将响应主题原封不动的发送给所有的订阅者。" - } - } - msg_correlation_data { - desc { - en: "Identifier of the Correlation Data. The Server MUST send the Correlation Data unaltered to all subscribers receiving the Application Message." - zh: "对比数据标识符,服务端在收到应用消息时必须原封不动的把对比数据发送给所有的订阅者。对比数据只对请求消息(Request Message)的发送端和响应消息(Response Message)的接收端有意义。" - } - } - msg_user_properties { - desc { - en: "The User-Property key-value pairs. Note: in case there are duplicated keys, only the last one will be used." - zh: "指定 MQTT 消息的 User Property 键值对。注意,如果出现重复的键,只有最后一个会保留。" - } - } - msg_content_type { - desc { - en: "The Content Type MUST be a UTF-8 Encoded String." - zh: "内容类型标识符,以 UTF-8 格式编码的字符串,用来描述应用消息的内容,服务端必须把收到的应用消息中的内容类型原封不动的发送给所有的订阅者。" - } - } -} diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf deleted file mode 100644 index fae17b35d..000000000 --- a/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf +++ /dev/null @@ -1,44 +0,0 @@ -emqx_mgmt_api_status { - get_status_api { - desc { - en: "Serves as a health check for the node. Returns a plain text response" - " describing the status of the node. This endpoint requires no" - " authentication.\n" - "\n" - "Returns status code 200 if the EMQX application is up and running, " - "503 otherwise." - "\n" - "This API was introduced in v5.0.10." - "\n" - "The GET `/status` endpoint (without the `/api/...` prefix) is also an alias" - " to this endpoint and works in the same way. This alias has been available since" - " v5.0.0." - zh: "作为节点的健康检查。 返回一个纯文本的响应,描述节点的状态。\n" - "\n" - "如果 EMQX 应用程序已经启动并运行,返回状态代码 200,否则返回 503。\n" - "\n" - "这个API是在v5.0.10中引入的。" - "\n" - "GET `/status`端点(没有`/api/...`前缀)也是这个端点的一个别名,工作方式相同。" - " 这个别名从v5.0.0开始就有了。" - } - } - - get_status_response200 { - desc { - en: "Node emqx@127.0.0.1 is started\n" - "emqx is running" - zh: "Node emqx@127.0.0.1 is started\n" - "emqx is running" - } - } - - get_status_response503 { - desc { - en: "Node emqx@127.0.0.1 is stopped\n" - "emqx is not_running" - zh: "Node emqx@127.0.0.1 is stopped\n" - "emqx is not_running" - } - } -} diff --git a/apps/emqx_management/include/emqx_mgmt.hrl b/apps/emqx_management/include/emqx_mgmt.hrl index b68a9a634..7f6b5a675 100644 --- a/apps/emqx_management/include/emqx_mgmt.hrl +++ b/apps/emqx_management/include/emqx_mgmt.hrl @@ -16,4 +16,4 @@ -define(MANAGEMENT_SHARD, emqx_management_shard). --define(MAX_ROW_LIMIT, 100). +-define(DEFAULT_ROW_LIMIT, 100). diff --git a/apps/emqx_management/rebar.config b/apps/emqx_management/rebar.config index 73cbf471f..b2f5a40af 100644 --- a/apps/emqx_management/rebar.config +++ b/apps/emqx_management/rebar.config @@ -1,6 +1,9 @@ %% -*- mode: erlang -*- -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {edoc_opts, [{preprocess, true}]}. {erl_opts, [ diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index 5df8fe4df..34f3dd1fe 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,10 +2,10 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.10"}, + {vsn, "5.0.21"}, {modules, []}, {registered, [emqx_management_sup]}, - {applications, [kernel, stdlib, emqx_plugins, minirest, emqx]}, + {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, {mod, {emqx_mgmt_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_management/src/emqx_mgmt.erl b/apps/emqx_management/src/emqx_mgmt.erl index 6b38e8ca0..9553730ec 100644 --- a/apps/emqx_management/src/emqx_mgmt.erl +++ b/apps/emqx_management/src/emqx_mgmt.erl @@ -21,8 +21,6 @@ -elvis([{elvis_style, god_modules, disable}]). -include_lib("stdlib/include/qlc.hrl"). --include_lib("emqx/include/emqx.hrl"). --include_lib("emqx/include/emqx_mqtt.hrl"). %% Nodes and Brokers API -export([ @@ -71,8 +69,6 @@ list_subscriptions/1, list_subscriptions_via_topic/2, list_subscriptions_via_topic/3, - lookup_subscriptions/1, - lookup_subscriptions/2, do_list_subscriptions/0 ]). @@ -104,9 +100,10 @@ ]). %% Common Table API --export([max_row_limit/0]). - --define(APP, emqx_management). +-export([ + default_row_limit/0, + vm_stats/0 +]). -elvis([{elvis_style, god_modules, disable}]). @@ -115,8 +112,8 @@ %%-------------------------------------------------------------------- list_nodes() -> - Running = mria_mnesia:cluster_nodes(running), - Stopped = mria_mnesia:cluster_nodes(stopped), + Running = emqx:cluster_nodes(running), + Stopped = emqx:cluster_nodes(stopped), DownNodes = lists:map(fun stopped_node_info/1, Stopped), [{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes. @@ -126,7 +123,7 @@ lookup_node(Node) -> node_info() -> {UsedRatio, Total} = get_sys_memory(), - Info = maps:from_list([{K, list_to_binary(V)} || {K, V} <- emqx_vm:loads()]), + Info = maps:from_list(emqx_vm:loads()), BrokerInfo = emqx_sys:info(), Info#{ node => node(), @@ -144,13 +141,32 @@ node_info() -> uptime => proplists:get_value(uptime, BrokerInfo), version => iolist_to_binary(proplists:get_value(version, BrokerInfo)), edition => emqx_release:edition_longstr(), - role => mria_rlog:role() + role => mria_rlog:role(), + log_path => log_path(), + sys_path => iolist_to_binary(code:root_dir()) }. +log_path() -> + RootDir = code:root_dir(), + Configs = logger:get_handler_config(), + case get_log_path(Configs) of + undefined -> + <<"log.file.enable is false, not logging to file.">>; + Path -> + iolist_to_binary(filename:join(RootDir, Path)) + end. + +get_log_path([#{config := #{file := Path}} | _LoggerConfigs]) -> + filename:dirname(Path); +get_log_path([_LoggerConfig | LoggerConfigs]) -> + get_log_path(LoggerConfigs); +get_log_path([]) -> + undefined. + get_sys_memory() -> case os:type() of {unix, linux} -> - load_ctl:get_sys_memory(); + emqx_mgmt_cache:get_sys_memory(); _ -> {0, 0} end. @@ -159,14 +175,31 @@ node_info(Nodes) -> emqx_rpc:unwrap_erpc(emqx_management_proto_v3:node_info(Nodes)). stopped_node_info(Node) -> - #{name => Node, node_status => 'stopped'}. + {Node, #{node => Node, node_status => 'stopped'}}. + +vm_stats() -> + Idle = + case cpu_sup:util([detailed]) of + %% Not support for Windows + {_, 0, 0, _} -> 0; + {_Num, _Use, IdleList, _} -> proplists:get_value(idle, IdleList, 0) + end, + RunQueue = erlang:statistics(run_queue), + {MemUsedRatio, MemTotal} = get_sys_memory(), + [ + {run_queue, RunQueue}, + {cpu_idle, Idle}, + {cpu_use, 100 - Idle}, + {total_memory, MemTotal}, + {used_memory, erlang:round(MemTotal * MemUsedRatio)} + ]. %%-------------------------------------------------------------------- %% Brokers %%-------------------------------------------------------------------- list_brokers() -> - Running = mria_mnesia:running_nodes(), + Running = emqx:running_nodes(), [{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)]. lookup_broker(Node) -> @@ -174,8 +207,13 @@ lookup_broker(Node) -> Broker. broker_info() -> - Info = maps:from_list([{K, iolist_to_binary(V)} || {K, V} <- emqx_sys:info()]), - Info#{node => node(), otp_release => otp_rel(), node_status => 'Running'}. + Info = lists:foldl(fun convert_broker_info/2, #{}, emqx_sys:info()), + Info#{node => node(), otp_release => otp_rel(), node_status => 'running'}. + +convert_broker_info({uptime, Uptime}, M) -> + M#{uptime => emqx_datetime:human_readable_duration_string(Uptime)}; +convert_broker_info({K, V}, M) -> + M#{K => iolist_to_binary(V)}. broker_info(Nodes) -> emqx_rpc:unwrap_erpc(emqx_management_proto_v3:broker_info(Nodes)). @@ -185,7 +223,7 @@ broker_info(Nodes) -> %%-------------------------------------------------------------------- get_metrics() -> - nodes_info_count([get_metrics(Node) || Node <- mria_mnesia:running_nodes()]). + nodes_info_count([get_metrics(Node) || Node <- emqx:running_nodes()]). get_metrics(Node) -> unwrap_rpc(emqx_proto_v1:get_metrics(Node)). @@ -200,13 +238,20 @@ get_stats() -> 'subscriptions.shared.count', 'subscriptions.shared.max' ], - CountStats = nodes_info_count([ - begin - Stats = get_stats(Node), - delete_keys(Stats, GlobalStatsKeys) - end - || Node <- mria_mnesia:running_nodes() - ]), + CountStats = nodes_info_count( + lists:foldl( + fun(Node, Acc) -> + case get_stats(Node) of + {error, _} -> + Acc; + Stats -> + [delete_keys(Stats, GlobalStatsKeys) | Acc] + end + end, + [], + emqx:running_nodes() + ) + ), GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))), maps:merge(CountStats, GlobalStats). @@ -237,15 +282,15 @@ nodes_info_count(PropList) -> lookup_client({clientid, ClientId}, FormatFun) -> lists:append([ lookup_client(Node, {clientid, ClientId}, FormatFun) - || Node <- mria_mnesia:running_nodes() + || Node <- emqx:running_nodes() ]); lookup_client({username, Username}, FormatFun) -> lists:append([ lookup_client(Node, {username, Username}, FormatFun) - || Node <- mria_mnesia:running_nodes() + || Node <- emqx:running_nodes() ]). -lookup_client(Node, Key, {M, F}) -> +lookup_client(Node, Key, FormatFun) -> case unwrap_rpc(emqx_cm_proto_v1:lookup_client(Node, Key)) of {error, Err} -> {error, Err}; @@ -253,18 +298,23 @@ lookup_client(Node, Key, {M, F}) -> lists:map( fun({Chan, Info0, Stats}) -> Info = Info0#{node => Node}, - M:F({Chan, Info, Stats}) + maybe_format(FormatFun, {Chan, Info, Stats}) end, L ) end. -kickout_client({ClientID, FormatFun}) -> - case lookup_client({clientid, ClientID}, FormatFun) of +maybe_format(undefined, A) -> + A; +maybe_format({M, F}, A) -> + M:F(A). + +kickout_client(ClientId) -> + case lookup_client({clientid, ClientId}, undefined) of [] -> {error, not_found}; _ -> - Results = [kickout_client(Node, ClientID) || Node <- mria_mnesia:running_nodes()], + Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()], check_results(Results) end. @@ -275,35 +325,40 @@ list_authz_cache(ClientId) -> call_client(ClientId, list_authz_cache). list_client_subscriptions(ClientId) -> - Results = [client_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()], - Filter = - fun - ({error, _}) -> - false; - ({_Node, List}) -> - erlang:is_list(List) andalso 0 < erlang:length(List) - end, - case lists:filter(Filter, Results) of - [] -> []; - [Result | _] -> Result + case lookup_client({clientid, ClientId}, undefined) of + [] -> + {error, not_found}; + _ -> + Results = [client_subscriptions(Node, ClientId) || Node <- emqx:running_nodes()], + Filter = + fun + ({error, _}) -> + false; + ({_Node, List}) -> + erlang:is_list(List) andalso 0 < erlang:length(List) + end, + case lists:filter(Filter, Results) of + [] -> []; + [Result | _] -> Result + end end. client_subscriptions(Node, ClientId) -> {Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}. clean_authz_cache(ClientId) -> - Results = [clean_authz_cache(Node, ClientId) || Node <- mria_mnesia:running_nodes()], + Results = [clean_authz_cache(Node, ClientId) || Node <- emqx:running_nodes()], check_results(Results). clean_authz_cache(Node, ClientId) -> unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)). clean_authz_cache_all() -> - Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria_mnesia:running_nodes()], + Results = [{Node, clean_authz_cache_all(Node)} || Node <- emqx:running_nodes()], wrap_results(Results). clean_pem_cache_all() -> - Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria_mnesia:running_nodes()], + Results = [{Node, clean_pem_cache_all(Node)} || Node <- emqx:running_nodes()], wrap_results(Results). wrap_results(Results) -> @@ -331,7 +386,7 @@ set_keepalive(_ClientId, _Interval) -> %% @private call_client(ClientId, Req) -> - Results = [call_client(Node, ClientId, Req) || Node <- mria_mnesia:running_nodes()], + Results = [call_client(Node, ClientId, Req) || Node <- emqx:running_nodes()], Expected = lists:filter( fun ({error, _}) -> false; @@ -368,17 +423,11 @@ call_client(Node, ClientId, Req) -> %% Subscriptions %%-------------------------------------------------------------------- --spec do_list_subscriptions() -> [map()]. +-spec do_list_subscriptions() -> no_return(). do_list_subscriptions() -> - case check_row_limit([mqtt_subproperty]) of - false -> - throw(max_row_limit); - ok -> - [ - #{topic => Topic, clientid => ClientId, options => Options} - || {{Topic, ClientId}, Options} <- ets:tab2list(mqtt_subproperty) - ] - end. + %% [FIXME] Add function to `emqx_broker` that returns list of subscriptions + %% and either redirect from here or bpapi directly (EMQX-8993). + throw(not_implemented). list_subscriptions(Node) -> unwrap_rpc(emqx_management_proto_v3:list_subscriptions(Node)). @@ -386,7 +435,7 @@ list_subscriptions(Node) -> list_subscriptions_via_topic(Topic, FormatFun) -> lists:append([ list_subscriptions_via_topic(Node, Topic, FormatFun) - || Node <- mria_mnesia:running_nodes() + || Node <- emqx:running_nodes() ]). list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> @@ -395,18 +444,12 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> Result -> M:F(Result) end. -lookup_subscriptions(ClientId) -> - lists:append([lookup_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()]). - -lookup_subscriptions(Node, ClientId) -> - unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId)). - %%-------------------------------------------------------------------- %% PubSub %%-------------------------------------------------------------------- subscribe(ClientId, TopicTables) -> - subscribe(mria_mnesia:running_nodes(), ClientId, TopicTables). + subscribe(emqx:running_nodes(), ClientId, TopicTables). subscribe([Node | Nodes], ClientId, TopicTables) -> case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of @@ -431,7 +474,7 @@ publish(Msg) -> -spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe(ClientId, Topic) -> - unsubscribe(mria_mnesia:running_nodes(), ClientId, Topic). + unsubscribe(emqx:running_nodes(), ClientId, Topic). -spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. @@ -454,7 +497,7 @@ do_unsubscribe(ClientId, Topic) -> -spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe_batch(ClientId, Topics) -> - unsubscribe_batch(mria_mnesia:running_nodes(), ClientId, Topics). + unsubscribe_batch(emqx:running_nodes(), ClientId, Topics). -spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe_batch, _} | {error, channel_not_found}. @@ -479,7 +522,7 @@ do_unsubscribe_batch(ClientId, Topics) -> %%-------------------------------------------------------------------- get_alarms(Type) -> - [{Node, get_alarms(Node, Type)} || Node <- mria_mnesia:running_nodes()]. + [{Node, get_alarms(Node, Type)} || Node <- emqx:running_nodes()]. get_alarms(Node, Type) -> add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))). @@ -488,7 +531,7 @@ deactivate(Node, Name) -> unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)). delete_all_deactivated_alarms() -> - [delete_all_deactivated_alarms(Node) || Node <- mria_mnesia:running_nodes()]. + [delete_all_deactivated_alarms(Node) || Node <- emqx:running_nodes()]. delete_all_deactivated_alarms(Node) -> unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)). @@ -536,24 +579,11 @@ unwrap_rpc(Res) -> otp_rel() -> iolist_to_binary([emqx_vm:get_otp_version(), "/", erlang:system_info(version)]). -check_row_limit(Tables) -> - check_row_limit(Tables, max_row_limit()). - -check_row_limit([], _Limit) -> - ok; -check_row_limit([Tab | Tables], Limit) -> - case table_size(Tab) > Limit of - true -> false; - false -> check_row_limit(Tables, Limit) - end. - check_results(Results) -> case lists:any(fun(Item) -> Item =:= ok end, Results) of true -> ok; false -> unwrap_rpc(lists:last(Results)) end. -max_row_limit() -> - ?MAX_ROW_LIMIT. - -table_size(Tab) -> ets:info(Tab, size). +default_row_limit() -> + ?DEFAULT_ROW_LIMIT. diff --git a/apps/emqx_management/src/emqx_mgmt_api.erl b/apps/emqx_management/src/emqx_mgmt_api.erl index 893007ebf..8365b983c 100644 --- a/apps/emqx_management/src/emqx_mgmt_api.erl +++ b/apps/emqx_management/src/emqx_mgmt_api.erl @@ -20,12 +20,10 @@ -elvis([{elvis_style, dont_repeat_yourself, #{min_complexity => 100}}]). --define(FRESH_SELECT, fresh_select). -define(LONG_QUERY_TIMEOUT, 50000). -export([ - paginate/3, - paginate/4 + paginate/3 ]). %% first_next query APIs @@ -35,6 +33,10 @@ b2i/1 ]). +-ifdef(TEST). +-export([paginate_test_format/1]). +-endif. + -export_type([ match_spec_and_filter/0 ]). @@ -59,14 +61,14 @@ -export([do_query/2, apply_total_query/1]). -paginate(Tables, Params, {Module, FormatFun}) -> - Qh = query_handle(Tables), - Count = count(Tables), - do_paginate(Qh, Count, Params, {Module, FormatFun}). - -paginate(Tables, MatchSpec, Params, {Module, FormatFun}) -> - Qh = query_handle(Tables, MatchSpec), - Count = count(Tables, MatchSpec), +-spec paginate(atom(), map(), {atom(), atom()}) -> + #{ + meta => #{page => pos_integer(), limit => pos_integer(), count => pos_integer()}, + data => list(term()) + }. +paginate(Table, Params, {Module, FormatFun}) -> + Qh = query_handle(Table), + Count = count(Table), do_paginate(Qh, Count, Params, {Module, FormatFun}). do_paginate(Qh, Count, Params, {Module, FormatFun}) -> @@ -87,57 +89,17 @@ do_paginate(Qh, Count, Params, {Module, FormatFun}) -> data => [erlang:apply(Module, FormatFun, [Row]) || Row <- Rows] }. -query_handle(Table) when is_atom(Table) -> - qlc:q([R || R <- ets:table(Table)]); -query_handle({Table, Opts}) when is_atom(Table) -> - qlc:q([R || R <- ets:table(Table, Opts)]); -query_handle([Table]) when is_atom(Table) -> - qlc:q([R || R <- ets:table(Table)]); -query_handle([{Table, Opts}]) when is_atom(Table) -> - qlc:q([R || R <- ets:table(Table, Opts)]); -query_handle(Tables) -> - % - qlc:append([query_handle(T) || T <- Tables]). +query_handle(Table) -> + qlc:q([R || R <- ets:table(Table)]). -query_handle(Table, MatchSpec) when is_atom(Table) -> - Options = {traverse, {select, MatchSpec}}, - qlc:q([R || R <- ets:table(Table, Options)]); -query_handle([Table], MatchSpec) when is_atom(Table) -> - Options = {traverse, {select, MatchSpec}}, - qlc:q([R || R <- ets:table(Table, Options)]); -query_handle(Tables, MatchSpec) -> - Options = {traverse, {select, MatchSpec}}, - qlc:append([qlc:q([E || E <- ets:table(T, Options)]) || T <- Tables]). +count(Table) -> + ets:info(Table, size). -count(Table) when is_atom(Table) -> - ets:info(Table, size); -count({Table, _}) when is_atom(Table) -> - ets:info(Table, size); -count([Table]) when is_atom(Table) -> - ets:info(Table, size); -count([{Table, _}]) when is_atom(Table) -> - ets:info(Table, size); -count(Tables) -> - lists:sum([count(T) || T <- Tables]). - -count(Table, MatchSpec) when is_atom(Table) -> - [{MatchPattern, Where, _Re}] = MatchSpec, - NMatchSpec = [{MatchPattern, Where, [true]}], - ets:select_count(Table, NMatchSpec); -count([Table], MatchSpec) when is_atom(Table) -> - count(Table, MatchSpec); -count(Tables, MatchSpec) -> - lists:sum([count(T, MatchSpec) || T <- Tables]). - -page(Params) when is_map(Params) -> - maps:get(<<"page">>, Params, 1); page(Params) -> - proplists:get_value(<<"page">>, Params, <<"1">>). + maps:get(<<"page">>, Params, 1). limit(Params) when is_map(Params) -> - maps:get(<<"limit">>, Params, emqx_mgmt:max_row_limit()); -limit(Params) -> - proplists:get_value(<<"limit">>, Params, emqx_mgmt:max_row_limit()). + maps:get(<<"limit">>, Params, emqx_mgmt:default_row_limit()). %%-------------------------------------------------------------------- %% Node Query @@ -174,13 +136,12 @@ do_node_query( case do_query(Node, QueryState) of {error, {badrpc, R}} -> {error, Node, {badrpc, R}}; - {Rows, NQueryState = #{continuation := ?FRESH_SELECT}} -> - {_, NResultAcc} = accumulate_query_rows(Node, Rows, NQueryState, ResultAcc), - NResultAcc; - {Rows, NQueryState} -> + {Rows, NQueryState = #{complete := Complete}} -> case accumulate_query_rows(Node, Rows, NQueryState, ResultAcc) of {enough, NResultAcc} -> - NResultAcc; + finalize_query(NResultAcc, NQueryState); + {_, NResultAcc} when Complete -> + finalize_query(NResultAcc, NQueryState); {more, NResultAcc} -> do_node_query(Node, NQueryState, NResultAcc) end @@ -202,7 +163,7 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) -> {error, page_limit_invalid}; Meta -> {_CodCnt, NQString} = parse_qstring(QString, QSchema), - Nodes = mria_mnesia:running_nodes(), + Nodes = emqx:running_nodes(), ResultAcc = init_query_result(), QueryState = init_query_state(Tab, NQString, MsFun, Meta), NResultAcc = do_cluster_query( @@ -212,8 +173,6 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) -> end. %% @private -do_cluster_query([], _QueryState, ResultAcc) -> - ResultAcc; do_cluster_query( [Node | Tail] = Nodes, QueryState, @@ -222,31 +181,29 @@ do_cluster_query( case do_query(Node, QueryState) of {error, {badrpc, R}} -> {error, Node, {badrpc, R}}; - {Rows, NQueryState} -> + {Rows, NQueryState = #{complete := Complete}} -> case accumulate_query_rows(Node, Rows, NQueryState, ResultAcc) of {enough, NResultAcc} -> - maybe_collect_total_from_tail_nodes(Tail, NQueryState, NResultAcc); + FQueryState = maybe_collect_total_from_tail_nodes(Tail, NQueryState), + FComplete = Complete andalso Tail =:= [], + finalize_query(NResultAcc, mark_complete(FQueryState, FComplete)); + {more, NResultAcc} when not Complete -> + do_cluster_query(Nodes, NQueryState, NResultAcc); + {more, NResultAcc} when Tail =/= [] -> + do_cluster_query(Tail, reset_query_state(NQueryState), NResultAcc); {more, NResultAcc} -> - NextNodes = - case NQueryState of - #{continuation := ?FRESH_SELECT} -> Tail; - _ -> Nodes - end, - do_cluster_query(NextNodes, NQueryState, NResultAcc) + finalize_query(NResultAcc, NQueryState) end end. -maybe_collect_total_from_tail_nodes([], _QueryState, ResultAcc) -> - ResultAcc; -maybe_collect_total_from_tail_nodes(Nodes, QueryState, ResultAcc) -> - case counting_total_fun(QueryState) of - false -> - ResultAcc; - _Fun -> - collect_total_from_tail_nodes(Nodes, QueryState, ResultAcc) - end. +maybe_collect_total_from_tail_nodes([], QueryState) -> + QueryState; +maybe_collect_total_from_tail_nodes(Nodes, QueryState = #{total := _}) -> + collect_total_from_tail_nodes(Nodes, QueryState); +maybe_collect_total_from_tail_nodes(_Nodes, QueryState) -> + QueryState. -collect_total_from_tail_nodes(Nodes, QueryState, ResultAcc = #{total := TotalAcc}) -> +collect_total_from_tail_nodes(Nodes, QueryState = #{total := TotalAcc}) -> %% XXX: badfun risk? if the FuzzyFun is an anonumous func in local node case rpc:multicall(Nodes, ?MODULE, apply_total_query, [QueryState], ?LONG_QUERY_TIMEOUT) of {_, [Node | _]} -> @@ -257,7 +214,8 @@ collect_total_from_tail_nodes(Nodes, QueryState, ResultAcc = #{total := TotalAcc [{Node, {badrpc, Reason}} | _] -> {error, Node, {badrpc, Reason}}; [] -> - ResultAcc#{total => ResL ++ TotalAcc} + NTotalAcc = maps:merge(TotalAcc, maps:from_list(ResL)), + QueryState#{total := NTotalAcc} end end. @@ -266,13 +224,14 @@ collect_total_from_tail_nodes(Nodes, QueryState, ResultAcc = #{total := TotalAcc %%-------------------------------------------------------------------- %% QueryState :: -%% #{continuation := ets:continuation(), +%% #{continuation => ets:continuation(), %% page := pos_integer(), %% limit := pos_integer(), -%% total := [{node(), non_neg_integer()}], +%% total => #{node() => non_neg_integer()}, %% table := atom(), -%% qs := {Qs, Fuzzy} %% parsed query params -%% msfun := query_to_match_spec_fun() +%% qs := {Qs, Fuzzy}, %% parsed query params +%% msfun := query_to_match_spec_fun(), +%% complete := boolean() %% } init_query_state(Tab, QString, MsFun, _Meta = #{page := Page, limit := Limit}) -> #{match_spec := Ms, fuzzy_fun := FuzzyFun} = erlang:apply(MsFun, [Tab, QString]), @@ -285,17 +244,31 @@ init_query_state(Tab, QString, MsFun, _Meta = #{page := Page, limit := Limit}) - true = is_list(Args), {type, external} = erlang:fun_info(NamedFun, type) end, - #{ + QueryState = #{ page => Page, limit => Limit, table => Tab, qs => QString, msfun => MsFun, - mactch_spec => Ms, + match_spec => Ms, fuzzy_fun => FuzzyFun, - total => [], - continuation => ?FRESH_SELECT - }. + complete => false + }, + case counting_total_fun(QueryState) of + false -> + QueryState; + Fun when is_function(Fun) -> + QueryState#{total => #{}} + end. + +reset_query_state(QueryState) -> + maps:remove(continuation, mark_complete(QueryState, false)). + +mark_complete(QueryState) -> + mark_complete(QueryState, true). + +mark_complete(QueryState, Complete) -> + QueryState#{complete => Complete}. %% @private This function is exempt from BPAPI do_query(Node, QueryState) when Node =:= node() -> @@ -318,47 +291,50 @@ do_select( Node, QueryState0 = #{ table := Tab, - mactch_spec := Ms, - fuzzy_fun := FuzzyFun, - continuation := Continuation, - limit := Limit + match_spec := Ms, + limit := Limit, + complete := false } ) -> QueryState = maybe_apply_total_query(Node, QueryState0), Result = - case Continuation of - ?FRESH_SELECT -> + case maps:get(continuation, QueryState, undefined) of + undefined -> ets:select(Tab, Ms, Limit); - _ -> + Continuation -> %% XXX: Repair is necessary because we pass Continuation back %% and forth through the nodes in the `do_cluster_query` ets:select(ets:repair_continuation(Continuation, Ms)) end, case Result of - '$end_of_table' -> - {[], QueryState#{continuation => ?FRESH_SELECT}}; + {Rows, '$end_of_table'} -> + NRows = maybe_apply_fuzzy_filter(Rows, QueryState), + {NRows, mark_complete(QueryState)}; {Rows, NContinuation} -> - NRows = - case FuzzyFun of - undefined -> - Rows; - {FilterFun, Args0} when is_function(FilterFun), is_list(Args0) -> - lists:filter( - fun(E) -> erlang:apply(FilterFun, [E | Args0]) end, - Rows - ) - end, - {NRows, QueryState#{continuation => NContinuation}} + NRows = maybe_apply_fuzzy_filter(Rows, QueryState), + {NRows, QueryState#{continuation => NContinuation}}; + '$end_of_table' -> + {[], mark_complete(QueryState)} end. -maybe_apply_total_query(Node, QueryState = #{total := TotalAcc}) -> - case proplists:get_value(Node, TotalAcc, undefined) of - undefined -> - Total = apply_total_query(QueryState), - QueryState#{total := [{Node, Total} | TotalAcc]}; - _ -> - QueryState - end. +maybe_apply_fuzzy_filter(Rows, #{fuzzy_fun := undefined}) -> + Rows; +maybe_apply_fuzzy_filter(Rows, #{fuzzy_fun := {FilterFun, Args}}) -> + lists:filter( + fun(E) -> erlang:apply(FilterFun, [E | Args]) end, + Rows + ). + +maybe_apply_total_query(Node, QueryState = #{total := Acc}) -> + case Acc of + #{Node := _} -> + QueryState; + #{} -> + NodeTotal = apply_total_query(QueryState), + QueryState#{total := Acc#{Node => NodeTotal}} + end; +maybe_apply_total_query(_Node, QueryState = #{}) -> + QueryState. apply_total_query(QueryState = #{table := Tab}) -> case counting_total_fun(QueryState) of @@ -369,9 +345,7 @@ apply_total_query(QueryState = #{table := Tab}) -> Fun(Tab) end. -counting_total_fun(_QueryState = #{qs := {[], []}}) -> - fun(Tab) -> ets:info(Tab, size) end; -counting_total_fun(_QueryState = #{mactch_spec := Ms, fuzzy_fun := undefined}) -> +counting_total_fun(_QueryState = #{match_spec := Ms, fuzzy_fun := undefined}) -> %% XXX: Calculating the total number of data that match a certain %% condition under a large table is very expensive because the %% entire ETS table needs to be scanned. @@ -385,20 +359,23 @@ counting_total_fun(_QueryState = #{mactch_spec := Ms, fuzzy_fun := undefined}) - counting_total_fun(_QueryState = #{fuzzy_fun := FuzzyFun}) when FuzzyFun =/= undefined -> %% XXX: Calculating the total number for a fuzzy searching is very very expensive %% so it is not supported now - false. + false; +counting_total_fun(_QueryState = #{qs := {[], []}}) -> + fun(Tab) -> ets:info(Tab, size) end. %% ResultAcc :: #{count := integer(), %% cursor := integer(), %% rows := [{node(), Rows :: list()}], -%% total := [{node() => integer()}] +%% overflow := boolean(), +%% hasnext => boolean() %% } init_query_result() -> - #{cursor => 0, count => 0, rows => [], total => []}. + #{cursor => 0, count => 0, rows => [], overflow => false}. accumulate_query_rows( Node, Rows, - _QueryState = #{page := Page, limit := Limit, total := TotalAcc}, + _QueryState = #{page := Page, limit := Limit}, ResultAcc = #{cursor := Cursor, count := Count, rows := RowsAcc} ) -> PageStart = (Page - 1) * Limit + 1, @@ -406,24 +383,35 @@ accumulate_query_rows( Len = length(Rows), case Cursor + Len of NCursor when NCursor < PageStart -> - {more, ResultAcc#{cursor => NCursor, total => TotalAcc}}; + {more, ResultAcc#{cursor => NCursor}}; NCursor when NCursor < PageEnd -> + SubRows = lists:nthtail(max(0, PageStart - Cursor - 1), Rows), {more, ResultAcc#{ cursor => NCursor, - count => Count + length(Rows), - total => TotalAcc, - rows => [{Node, Rows} | RowsAcc] + count => Count + length(SubRows), + rows => [{Node, SubRows} | RowsAcc] }}; NCursor when NCursor >= PageEnd -> SubRows = lists:sublist(Rows, Limit - Count), {enough, ResultAcc#{ cursor => NCursor, count => Count + length(SubRows), - total => TotalAcc, - rows => [{Node, SubRows} | RowsAcc] + rows => [{Node, SubRows} | RowsAcc], + % there are more rows than can fit in the page + overflow => (Limit - Count) < Len }} end. +finalize_query(Result = #{overflow := Overflow}, QueryState = #{complete := Complete}) -> + HasNext = Overflow orelse not Complete, + maybe_accumulate_totals(Result#{hasnext => HasNext}, QueryState). + +maybe_accumulate_totals(Result, #{total := TotalAcc}) -> + QueryTotal = maps:fold(fun(_Node, T, N) -> N + T end, 0, TotalAcc), + Result#{total => QueryTotal}; +maybe_accumulate_totals(Result, _QueryState) -> + Result. + %%-------------------------------------------------------------------- %% Internal Functions %%-------------------------------------------------------------------- @@ -520,16 +508,22 @@ is_fuzzy_key(<<"match_", _/binary>>) -> is_fuzzy_key(_) -> false. -format_query_result(_FmtFun, _Meta, Error = {error, _Node, _Reason}) -> +format_query_result(_FmtFun, _MetaIn, Error = {error, _Node, _Reason}) -> Error; format_query_result( - FmtFun, Meta, _ResultAcc = #{total := TotalAcc, rows := RowsAcc} + FmtFun, MetaIn, ResultAcc = #{hasnext := HasNext, rows := RowsAcc} ) -> - Total = lists:foldr(fun({_Node, T}, N) -> N + T end, 0, TotalAcc), + Meta = + case ResultAcc of + #{total := QueryTotal} -> + %% The `count` is used in HTTP API to indicate the total number of + %% queries that can be read + MetaIn#{hasnext => HasNext, count => QueryTotal}; + #{} -> + MetaIn#{hasnext => HasNext} + end, #{ - %% The `count` is used in HTTP API to indicate the total number of - %% queries that can be read - meta => Meta#{count => Total}, + meta => Meta, data => lists:flatten( lists:foldl( fun({Node, Rows}, Acc) -> @@ -552,7 +546,7 @@ parse_pager_params(Params) -> Limit = b2i(limit(Params)), case Page > 0 andalso Limit > 0 of true -> - #{page => Page, limit => Limit, count => 0}; + #{page => Page, limit => Limit}; false -> false end. @@ -572,7 +566,7 @@ to_type(V, TargetType) -> to_type_(V, atom) -> to_atom(V); to_type_(V, integer) -> to_integer(V); to_type_(V, timestamp) -> to_timestamp(V); -to_type_(V, ip) -> aton(V); +to_type_(V, ip) -> to_ip(V); to_type_(V, ip_port) -> to_ip_port(V); to_type_(V, _) -> V. @@ -591,14 +585,16 @@ to_timestamp(I) when is_integer(I) -> to_timestamp(B) when is_binary(B) -> binary_to_integer(B). -aton(B) when is_binary(B) -> - list_to_tuple([binary_to_integer(T) || T <- re:split(B, "[.]")]). +to_ip(IP0) when is_binary(IP0) -> + ensure_ok(inet:parse_address(binary_to_list(IP0))). to_ip_port(IPAddress) -> - [IP0, Port0] = string:tokens(binary_to_list(IPAddress), ":"), - {ok, IP} = inet:parse_address(IP0), - Port = list_to_integer(Port0), - {IP, Port}. + ensure_ok(emqx_schema:to_ip_port(IPAddress)). + +ensure_ok({ok, V}) -> + V; +ensure_ok({error, _R} = E) -> + throw(E). b2i(Bin) when is_binary(Bin) -> binary_to_integer(Bin); @@ -612,40 +608,115 @@ b2i(Any) -> -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -params2qs_test() -> +params2qs_test_() -> QSchema = [ {<<"str">>, binary}, {<<"int">>, integer}, + {<<"binatom">>, atom}, {<<"atom">>, atom}, {<<"ts">>, timestamp}, {<<"gte_range">>, integer}, {<<"lte_range">>, integer}, {<<"like_fuzzy">>, binary}, - {<<"match_topic">>, binary} + {<<"match_topic">>, binary}, + {<<"ip">>, ip}, + {<<"ip_port">>, ip_port} ], QString = [ {<<"str">>, <<"abc">>}, {<<"int">>, <<"123">>}, - {<<"atom">>, <<"connected">>}, + {<<"binatom">>, <<"connected">>}, + {<<"atom">>, ok}, {<<"ts">>, <<"156000">>}, {<<"gte_range">>, <<"1">>}, {<<"lte_range">>, <<"5">>}, {<<"like_fuzzy">>, <<"user">>}, - {<<"match_topic">>, <<"t/#">>} + {<<"match_topic">>, <<"t/#">>}, + {<<"ip">>, <<"127.0.0.1">>}, + {<<"ip_port">>, <<"127.0.0.1:8888">>} ], ExpectedQs = [ {str, '=:=', <<"abc">>}, {int, '=:=', 123}, - {atom, '=:=', connected}, + {binatom, '=:=', connected}, + {atom, '=:=', ok}, {ts, '=:=', 156000}, - {range, '>=', 1, '=<', 5} + {range, '>=', 1, '=<', 5}, + {ip, '=:=', {127, 0, 0, 1}}, + {ip_port, '=:=', {{127, 0, 0, 1}, 8888}} ], FuzzyNQString = [ {fuzzy, like, <<"user">>}, {topic, match, <<"t/#">>} ], - ?assertEqual({7, {ExpectedQs, FuzzyNQString}}, parse_qstring(QString, QSchema)), - {0, {[], []}} = parse_qstring([{not_a_predefined_params, val}], QSchema). + [ + ?_assertEqual({10, {ExpectedQs, FuzzyNQString}}, parse_qstring(QString, QSchema)), + ?_assertEqual({0, {[], []}}, parse_qstring([{not_a_predefined_params, val}], QSchema)), + ?_assertEqual( + {1, {[{ip, '=:=', {0, 0, 0, 0, 0, 0, 0, 1}}], []}}, + parse_qstring([{<<"ip">>, <<"::1">>}], QSchema) + ), + ?_assertEqual( + {1, {[{ip_port, '=:=', {{0, 0, 0, 0, 0, 0, 0, 1}, 8888}}], []}}, + parse_qstring([{<<"ip_port">>, <<"::1:8888">>}], QSchema) + ), + ?_assertThrow( + {bad_value_type, {<<"ip">>, ip, <<"helloworld">>}}, + parse_qstring([{<<"ip">>, <<"helloworld">>}], QSchema) + ), + ?_assertThrow( + {bad_value_type, {<<"ip_port">>, ip_port, <<"127.0.0.1">>}}, + parse_qstring([{<<"ip_port">>, <<"127.0.0.1">>}], QSchema) + ), + ?_assertThrow( + {bad_value_type, {<<"ip_port">>, ip_port, <<"helloworld:abcd">>}}, + parse_qstring([{<<"ip_port">>, <<"helloworld:abcd">>}], QSchema) + ) + ]. +paginate_test_format(Row) -> + Row. + +paginate_test_() -> + _ = ets:new(?MODULE, [named_table]), + Size = 1000, + MyLimit = 10, + ets:insert(?MODULE, [{I, foo} || I <- lists:seq(1, Size)]), + DefaultLimit = emqx_mgmt:default_row_limit(), + NoParamsResult = paginate(?MODULE, #{}, {?MODULE, paginate_test_format}), + PaginateResults = [ + paginate( + ?MODULE, #{<<"page">> => I, <<"limit">> => MyLimit}, {?MODULE, paginate_test_format} + ) + || I <- lists:seq(1, floor(Size / MyLimit)) + ], + [ + ?_assertMatch( + #{meta := #{count := Size, page := 1, limit := DefaultLimit}}, NoParamsResult + ), + ?_assertEqual(DefaultLimit, length(maps:get(data, NoParamsResult))), + ?_assertEqual( + #{data => [], meta => #{count => Size, limit => DefaultLimit, page => 100}}, + paginate(?MODULE, #{<<"page">> => <<"100">>}, {?MODULE, paginate_test_format}) + ) + ] ++ assert_paginate_results(PaginateResults, Size, MyLimit). + +assert_paginate_results(Results, Size, Limit) -> + AllData = lists:flatten([Data || #{data := Data} <- Results]), + [ + begin + Result = lists:nth(I, Results), + [ + ?_assertMatch(#{meta := #{count := Size, limit := Limit, page := I}}, Result), + ?_assertEqual(Limit, length(maps:get(data, Result))) + ] + end + || I <- lists:seq(1, floor(Size / Limit)) + ] ++ + [ + ?_assertEqual(floor(Size / Limit), length(Results)), + ?_assertEqual(Size, length(AllData)), + ?_assertEqual(Size, sets:size(sets:from_list(AllData))) + ]. -endif. diff --git a/apps/emqx_management/src/emqx_mgmt_api_app.erl b/apps/emqx_management/src/emqx_mgmt_api_api_keys.erl similarity index 99% rename from apps/emqx_management/src/emqx_mgmt_api_app.erl rename to apps/emqx_management/src/emqx_mgmt_api_api_keys.erl index d317bea70..c39b11273 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_app.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_api_keys.erl @@ -13,7 +13,7 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_mgmt_api_app). +-module(emqx_mgmt_api_api_keys). -behaviour(minirest_api). diff --git a/apps/emqx_management/src/emqx_mgmt_api_banned.erl b/apps/emqx_management/src/emqx_mgmt_api_banned.erl index 0a5cc3afe..508cf7d07 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_banned.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_banned.erl @@ -165,7 +165,7 @@ banned(post, #{body := Body}) -> {ok, Banned} -> {200, format(Banned)}; {error, {already_exist, Old}} -> - OldBannedFormat = emqx_json:encode(format(Old)), + OldBannedFormat = emqx_utils_json:encode(format(Old)), {400, 'ALREADY_EXISTS', OldBannedFormat} end end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_clients.erl b/apps/emqx_management/src/emqx_mgmt_api_clients.erl index bf025bfc7..681c851bf 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_clients.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_clients.erl @@ -65,7 +65,6 @@ {<<"ip_address">>, ip}, {<<"conn_state">>, atom}, {<<"clean_start">>, atom}, - {<<"proto_name">>, binary}, {<<"proto_ver">>, integer}, {<<"like_clientid">>, binary}, {<<"like_username">>, binary}, @@ -77,9 +76,10 @@ -define(FORMAT_FUN, {?MODULE, format_channel_info}). --define(CLIENT_ID_NOT_FOUND, - <<"{\"code\": \"RESOURCE_NOT_FOUND\", \"reason\": \"Client id not found\"}">> -). +-define(CLIENTID_NOT_FOUND, #{ + code => 'CLIENTID_NOT_FOUND', + message => <<"Client ID not found">> +}). api_spec() -> emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true, translate_body => true}). @@ -145,14 +145,6 @@ schema("/clients") -> required => false, description => <<"Whether the client uses a new session">> })}, - {proto_name, - hoconsc:mk(hoconsc:enum(['MQTT', 'CoAP', 'LwM2M', 'MQTT-SN']), #{ - in => query, - required => false, - description => - <<"Client protocol name, ", - "the possible values are MQTT,CoAP,LwM2M,MQTT-SN">> - })}, {proto_ver, hoconsc:mk(binary(), #{ in => query, @@ -228,7 +220,7 @@ schema("/clients/:clientid") -> responses => #{ 200 => hoconsc:mk(hoconsc:ref(?MODULE, client), #{}), 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } }, @@ -241,7 +233,7 @@ schema("/clients/:clientid") -> responses => #{ 204 => <<"Kick out client successfully">>, 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -256,7 +248,7 @@ schema("/clients/:clientid/authorization/cache") -> responses => #{ 200 => hoconsc:mk(hoconsc:ref(?MODULE, authz_cache), #{}), 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } }, @@ -265,9 +257,9 @@ schema("/clients/:clientid/authorization/cache") -> tags => ?TAGS, parameters => [{clientid, hoconsc:mk(binary(), #{in => path})}], responses => #{ - 204 => <<"Kick out client successfully">>, + 204 => <<"Clean client authz cache successfully">>, 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -284,7 +276,7 @@ schema("/clients/:clientid/subscriptions") -> hoconsc:array(hoconsc:ref(emqx_mgmt_api_subscriptions, subscription)), #{} ), 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -300,7 +292,7 @@ schema("/clients/:clientid/subscribe") -> responses => #{ 200 => hoconsc:ref(emqx_mgmt_api_subscriptions, subscription), 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -316,7 +308,7 @@ schema("/clients/:clientid/subscribe/bulk") -> responses => #{ 200 => hoconsc:array(hoconsc:ref(emqx_mgmt_api_subscriptions, subscription)), 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -332,7 +324,7 @@ schema("/clients/:clientid/unsubscribe") -> responses => #{ 204 => <<"Unsubscribe OK">>, 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -348,7 +340,7 @@ schema("/clients/:clientid/unsubscribe/bulk") -> responses => #{ 204 => <<"Unsubscribe OK">>, 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -364,7 +356,7 @@ schema("/clients/:clientid/keepalive") -> responses => #{ 200 => hoconsc:mk(hoconsc:ref(?MODULE, client), #{}), 404 => emqx_dashboard_swagger:error_codes( - ['CLIENTID_NOT_FOUND'], <<"Client id not found">> + ['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ) } } @@ -558,8 +550,8 @@ fields(keepalive) -> ]; fields(subscribe) -> [ - {topic, hoconsc:mk(binary(), #{desc => <<"Topic">>})}, - {qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})}, + {topic, hoconsc:mk(binary(), #{required => true, desc => <<"Topic">>})}, + {qos, hoconsc:mk(emqx_schema:qos(), #{default => 0, desc => <<"QoS">>})}, {nl, hoconsc:mk(integer(), #{default => 0, desc => <<"No Local">>})}, {rap, hoconsc:mk(integer(), #{default => 0, desc => <<"Retain as Published">>})}, {rh, hoconsc:mk(integer(), #{default => 0, desc => <<"Retain Handling">>})} @@ -606,6 +598,8 @@ unsubscribe_batch(post, #{bindings := #{clientid := ClientID}, body := TopicInfo subscriptions(get, #{bindings := #{clientid := ClientID}}) -> case emqx_mgmt:list_client_subscriptions(ClientID) of + {error, not_found} -> + {404, ?CLIENTID_NOT_FOUND}; [] -> {200, []}; {Node, Subs} -> @@ -630,7 +624,7 @@ set_keepalive(put, #{bindings := #{clientid := ClientID}, body := Body}) -> {ok, Interval} -> case emqx_mgmt:set_keepalive(emqx_mgmt_util:urldecode(ClientID), Interval) of ok -> lookup(#{clientid => ClientID}); - {error, not_found} -> {404, ?CLIENT_ID_NOT_FOUND}; + {error, not_found} -> {404, ?CLIENTID_NOT_FOUND}; {error, Reason} -> {400, #{code => 'PARAMS_ERROR', message => Reason}} end end. @@ -650,7 +644,7 @@ list_clients(QString) -> fun ?MODULE:format_channel_info/2 ); Node0 -> - case emqx_misc:safe_to_existing_atom(Node0) of + case emqx_utils:safe_to_existing_atom(Node0) of {ok, Node1} -> QStringWithoutNode = maps:without([<<"node">>], QString), emqx_mgmt_api:node_query( @@ -678,15 +672,15 @@ list_clients(QString) -> lookup(#{clientid := ClientID}) -> case emqx_mgmt:lookup_client({clientid, ClientID}, ?FORMAT_FUN) of [] -> - {404, ?CLIENT_ID_NOT_FOUND}; + {404, ?CLIENTID_NOT_FOUND}; ClientInfo -> {200, hd(ClientInfo)} end. kickout(#{clientid := ClientID}) -> - case emqx_mgmt:kickout_client({ClientID, ?FORMAT_FUN}) of + case emqx_mgmt:kickout_client(ClientID) of {error, not_found} -> - {404, ?CLIENT_ID_NOT_FOUND}; + {404, ?CLIENTID_NOT_FOUND}; _ -> {204} end. @@ -694,7 +688,7 @@ kickout(#{clientid := ClientID}) -> get_authz_cache(#{clientid := ClientID}) -> case emqx_mgmt:list_authz_cache(ClientID) of {error, not_found} -> - {404, ?CLIENT_ID_NOT_FOUND}; + {404, ?CLIENTID_NOT_FOUND}; {error, Reason} -> Message = list_to_binary(io_lib:format("~p", [Reason])), {500, #{code => <<"UNKNOW_ERROR">>, message => Message}}; @@ -708,7 +702,7 @@ clean_authz_cache(#{clientid := ClientID}) -> ok -> {204}; {error, not_found} -> - {404, ?CLIENT_ID_NOT_FOUND}; + {404, ?CLIENTID_NOT_FOUND}; {error, Reason} -> Message = list_to_binary(io_lib:format("~p", [Reason])), {500, #{code => <<"UNKNOW_ERROR">>, message => Message}} @@ -718,7 +712,7 @@ subscribe(#{clientid := ClientID, topic := Topic} = Sub) -> Opts = maps:with([qos, nl, rap, rh], Sub), case do_subscribe(ClientID, Topic, Opts) of {error, channel_not_found} -> - {404, ?CLIENT_ID_NOT_FOUND}; + {404, ?CLIENTID_NOT_FOUND}; {error, Reason} -> Message = list_to_binary(io_lib:format("~p", [Reason])), {500, #{code => <<"UNKNOW_ERROR">>, message => Message}}; @@ -727,21 +721,24 @@ subscribe(#{clientid := ClientID, topic := Topic} = Sub) -> end. subscribe_batch(#{clientid := ClientID, topics := Topics}) -> - case lookup(#{clientid => ClientID}) of - {200, _} -> + %% We use emqx_channel instead of emqx_channel_info (used by the emqx_mgmt:lookup_client/2), + %% as the emqx_channel_info table will only be populated after the hook `client.connected` + %% has returned. So if one want to subscribe topics in this hook, it will fail. + case ets:lookup(emqx_channel, ClientID) of + [] -> + {404, ?CLIENTID_NOT_FOUND}; + _ -> ArgList = [ [ClientID, Topic, maps:with([qos, nl, rap, rh], Sub)] || #{topic := Topic} = Sub <- Topics ], - {200, emqx_mgmt_util:batch_operation(?MODULE, do_subscribe, ArgList)}; - {404, ?CLIENT_ID_NOT_FOUND} -> - {404, ?CLIENT_ID_NOT_FOUND} + {200, emqx_mgmt_util:batch_operation(?MODULE, do_subscribe, ArgList)} end. unsubscribe(#{clientid := ClientID, topic := Topic}) -> case do_unsubscribe(ClientID, Topic) of {error, channel_not_found} -> - {404, ?CLIENT_ID_NOT_FOUND}; + {404, ?CLIENTID_NOT_FOUND}; {unsubscribe, [{Topic, #{}}]} -> {204} end. @@ -751,8 +748,8 @@ unsubscribe_batch(#{clientid := ClientID, topics := Topics}) -> {200, _} -> _ = emqx_mgmt:unsubscribe_batch(ClientID, Topics), {204}; - {404, ?CLIENT_ID_NOT_FOUND} -> - {404, ?CLIENT_ID_NOT_FOUND} + {404, NotFound} -> + {404, NotFound} end. %%-------------------------------------------------------------------- @@ -830,8 +827,6 @@ ms(ip_address, X) -> #{conninfo => #{peername => {X, '_'}}}; ms(clean_start, X) -> #{conninfo => #{clean_start => X}}; -ms(proto_name, X) -> - #{conninfo => #{proto_name => X}}; ms(proto_ver, X) -> #{conninfo => #{proto_ver => X}}; ms(connected_at, X) -> @@ -865,8 +860,8 @@ format_channel_info(ChannInfo = {_, _ClientInfo, _ClientStats}) -> format_channel_info(WhichNode, {_, ClientInfo0, ClientStats}) -> Node = maps:get(node, ClientInfo0, WhichNode), - ClientInfo1 = emqx_map_lib:deep_remove([conninfo, clientid], ClientInfo0), - ClientInfo2 = emqx_map_lib:deep_remove([conninfo, username], ClientInfo1), + ClientInfo1 = emqx_utils_maps:deep_remove([conninfo, clientid], ClientInfo0), + ClientInfo2 = emqx_utils_maps:deep_remove([conninfo, username], ClientInfo1), StatsMap = maps:without( [memory, next_pkt_id, total_heap_size], maps:from_list(ClientStats) @@ -879,7 +874,8 @@ format_channel_info(WhichNode, {_, ClientInfo0, ClientStats}) -> ClientInfoMap2 = maps:put(node, Node, ClientInfoMap1), ClientInfoMap3 = maps:put(ip_address, IpAddress, ClientInfoMap2), ClientInfoMap4 = maps:put(port, Port, ClientInfoMap3), - ClientInfoMap = maps:put(connected, Connected, ClientInfoMap4), + ClientInfoMap5 = convert_expiry_interval_unit(ClientInfoMap4), + ClientInfoMap = maps:put(connected, Connected, ClientInfoMap5), RemoveList = [ @@ -949,6 +945,9 @@ peername_dispart({Addr, Port}) -> %% PortBinary = integer_to_binary(Port), {AddrBinary, Port}. +convert_expiry_interval_unit(ClientInfoMap = #{expiry_interval := Interval}) -> + ClientInfoMap#{expiry_interval := Interval div 1000}. + format_authz_cache({{PubSub, Topic}, {AuthzResult, Timestamp}}) -> #{ access => PubSub, @@ -959,4 +958,4 @@ format_authz_cache({{PubSub, Topic}, {AuthzResult, Timestamp}}) -> to_topic_info(Data) -> M = maps:with([<<"topic">>, <<"qos">>, <<"nl">>, <<"rap">>, <<"rh">>], Data), - emqx_map_lib:safe_atom_key_map(M). + emqx_utils_maps:safe_atom_key_map(M). diff --git a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl index 37e94200e..e74b6c362 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl @@ -101,7 +101,7 @@ cluster_info(get, _) -> ClusterName = application:get_env(ekka, cluster_name, emqxcl), Info = #{ name => ClusterName, - nodes => mria_mnesia:running_nodes(), + nodes => emqx:running_nodes(), self => node() }, {200, Info}. diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index eec5793d0..c8b21449e 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -28,43 +28,25 @@ config_reset/3, configs/3, get_full_config/0, - global_zone_configs/3 + global_zone_configs/3, + limiter/3 ]). --export([gen_schema/1]). - -define(PREFIX, "/configs/"). -define(PREFIX_RESET, "/configs_reset/"). -define(ERR_MSG(MSG), list_to_binary(io_lib:format("~p", [MSG]))). -define(OPTS, #{rawconf_with_defaults => true, override_to => cluster}). -define(TAGS, ["Configs"]). --define(EXCLUDES, - [ - <<"exhook">>, - <<"gateway">>, - <<"plugins">>, - <<"bridges">>, - <<"rule_engine">>, - <<"authorization">>, - <<"authentication">>, - <<"rpc">>, - <<"db">>, - <<"connectors">>, - <<"slow_subs">>, - <<"psk_authentication">>, - <<"topic_metrics">>, - <<"rewrite">>, - <<"auto_subscribe">>, - <<"retainer">>, - <<"statsd">>, - <<"delayed">>, - <<"event_message">>, - <<"prometheus">>, - <<"telemetry">>, - <<"listeners">> - ] ++ global_zone_roots() -). +-define(ROOT_KEYS, [ + <<"dashboard">>, + <<"alarm">>, + <<"sys_topics">>, + <<"sysmon">>, + <<"log">>, + <<"persistent_session_store">>, + <<"zones">> +]). api_spec() -> emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). @@ -75,7 +57,8 @@ paths() -> [ "/configs", "/configs_reset/:rootname", - "/configs/global_zone" + "/configs/global_zone", + "/configs/limiter" ] ++ lists:map(fun({Name, _Type}) -> ?PREFIX ++ binary_to_list(Name) end, config_list()). @@ -165,6 +148,28 @@ schema("/configs/global_zone") -> } } }; +schema("/configs/limiter") -> + #{ + 'operationId' => limiter, + get => #{ + tags => ?TAGS, + description => <<"Get the node-level limiter configs">>, + responses => #{ + 200 => hoconsc:mk(hoconsc:ref(emqx_limiter_schema, limiter)), + 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"config not found">>) + } + }, + put => #{ + tags => ?TAGS, + description => <<"Update the node-level limiter configs">>, + 'requestBody' => hoconsc:mk(hoconsc:ref(emqx_limiter_schema, limiter)), + responses => #{ + 200 => hoconsc:mk(hoconsc:ref(emqx_limiter_schema, limiter)), + 400 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED']), + 403 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED']) + } + } + }; schema(Path) -> {RootKey, {_Root, Schema}} = find_schema(Path), #{ @@ -212,43 +217,41 @@ fields(Field) -> %%%============================================================================================== %% HTTP API Callbacks config(get, _Params, Req) -> + [Path] = conf_path(Req), + {200, get_raw_config(Path)}; +config(put, #{body := NewConf}, Req) -> Path = conf_path(Req), - {ok, Conf} = emqx_map_lib:deep_find(Path, get_full_config()), - {200, Conf}; -config(put, #{body := Body}, Req) -> - Path = conf_path(Req), - case emqx_conf:update(Path, Body, ?OPTS) of + case emqx_conf:update(Path, NewConf, ?OPTS) of {ok, #{raw_config := RawConf}} -> {200, RawConf}; - {error, {permission_denied, Reason}} -> - {403, #{code => 'UPDATE_FAILED', message => Reason}}; {error, Reason} -> {400, #{code => 'UPDATE_FAILED', message => ?ERR_MSG(Reason)}} end. global_zone_configs(get, _Params, _Req) -> - Paths = global_zone_roots(), - Zones = lists:foldl( - fun(Path, Acc) -> maps:merge(Acc, get_config_with_default(Path)) end, - #{}, - Paths - ), - {200, Zones}; + {200, get_zones()}; global_zone_configs(put, #{body := Body}, _Req) -> + PrevZones = get_zones(), Res = maps:fold( fun(Path, Value, Acc) -> - case emqx_conf:update([Path], Value, ?OPTS) of - {ok, #{raw_config := RawConf}} -> - Acc#{Path => RawConf}; - {error, Reason} -> - ?SLOG(error, #{ - msg => "update global zone failed", - reason => Reason, - path => Path, - value => Value - }), - Acc + PrevValue = maps:get(Path, PrevZones), + case Value =/= PrevValue of + true -> + case emqx_conf:update([Path], Value, ?OPTS) of + {ok, #{raw_config := RawConf}} -> + Acc#{Path => RawConf}; + {error, Reason} -> + ?SLOG(error, #{ + msg => "update global zone failed", + reason => Reason, + path => Path, + value => Value + }), + Acc + end; + false -> + Acc#{Path => Value} end end, #{}, @@ -265,8 +268,6 @@ config_reset(post, _Params, Req) -> case emqx_conf:reset(Path, ?OPTS) of {ok, _} -> {200}; - {error, {permission_denied, Reason}} -> - {403, #{code => 'REST_FAILED', message => Reason}}; {error, no_default_value} -> {400, #{code => 'NO_DEFAULT_VALUE', message => <<"No Default Value.">>}}; {error, Reason} -> @@ -277,7 +278,7 @@ configs(get, Params, _Req) -> QS = maps:get(query_string, Params, #{}), Node = maps:get(<<"node">>, QS, node()), case - lists:member(Node, mria_mnesia:running_nodes()) andalso + lists:member(Node, emqx:running_nodes()) andalso emqx_management_proto_v2:get_full_config(Node) of false -> @@ -290,19 +291,52 @@ configs(get, Params, _Req) -> {200, Res} end. +limiter(get, _Params, _Req) -> + {200, format_limiter_config(get_raw_config(limiter))}; +limiter(put, #{body := NewConf}, _Req) -> + case emqx_conf:update([limiter], NewConf, ?OPTS) of + {ok, #{raw_config := RawConf}} -> + {200, format_limiter_config(RawConf)}; + {error, {permission_denied, Reason}} -> + {403, #{code => 'UPDATE_FAILED', message => Reason}}; + {error, Reason} -> + {400, #{code => 'UPDATE_FAILED', message => ?ERR_MSG(Reason)}} + end. + +format_limiter_config(RawConf) -> + Shorts = lists:map(fun erlang:atom_to_binary/1, emqx_limiter_schema:short_paths()), + maps:with(Shorts, RawConf). + conf_path_reset(Req) -> <<"/api/v5", ?PREFIX_RESET, Path/binary>> = cowboy_req:path(Req), string:lexemes(Path, "/ "). get_full_config() -> emqx_config:fill_defaults( - maps:without( - ?EXCLUDES, + maps:with( + ?ROOT_KEYS, emqx:get_raw_config([]) ), #{obfuscate_sensitive_values => true} ). +get_raw_config(Path) -> + #{Path := Conf} = + emqx_config:fill_defaults( + #{Path => emqx:get_raw_config([Path])}, + #{obfuscate_sensitive_values => true} + ), + Conf. + +get_zones() -> + lists:foldl( + fun(Path, Acc) -> + maps:merge(Acc, get_config_with_default(Path)) + end, + #{}, + global_zone_roots() + ). + get_config_with_default(Path) -> emqx_config:fill_defaults(#{Path => emqx:get_raw_config([Path])}). @@ -315,43 +349,14 @@ conf_path_from_querystr(Req) -> config_list() -> Mod = emqx_conf:schema_module(), Roots = hocon_schema:roots(Mod), - lists:foldl(fun(Key, Acc) -> lists:keydelete(Key, 1, Acc) end, Roots, ?EXCLUDES). + lists:foldl(fun(Key, Acc) -> [lists:keyfind(Key, 1, Roots) | Acc] end, [], ?ROOT_KEYS). conf_path(Req) -> <<"/api/v5", ?PREFIX, Path/binary>> = cowboy_req:path(Req), string:lexemes(Path, "/ "). -%% TODO: generate from hocon schema -gen_schema(Conf) when is_boolean(Conf) -> - with_default_value(#{type => boolean}, Conf); -gen_schema(Conf) when is_binary(Conf); is_atom(Conf) -> - with_default_value(#{type => string}, Conf); -gen_schema(Conf) when is_number(Conf) -> - with_default_value(#{type => number}, Conf); -gen_schema(Conf) when is_list(Conf) -> - case io_lib:printable_unicode_list(Conf) of - true -> - gen_schema(unicode:characters_to_binary(Conf)); - false -> - #{type => array, items => gen_schema(hd(Conf))} - end; -gen_schema(Conf) when is_map(Conf) -> - #{ - type => object, - properties => - maps:map(fun(_K, V) -> gen_schema(V) end, Conf) - }; -gen_schema(_Conf) -> - %% the conf is not of JSON supported type, it may have been converted - %% by the hocon schema - #{type => string}. - -with_default_value(Type, Value) -> - Type#{example => emqx_map_lib:binary_string(Value)}. - global_zone_roots() -> - lists:map(fun({K, _}) -> K end, global_zone_schema()). + lists:map(fun({K, _}) -> list_to_binary(K) end, global_zone_schema()). global_zone_schema() -> - Roots = hocon_schema:roots(emqx_zone_schema), - lists:map(fun({RootKey, {_Root, Schema}}) -> {RootKey, Schema} end, Roots). + emqx_zone_schema:zone_without_hidden(). diff --git a/apps/emqx_management/src/emqx_mgmt_api_key_schema.erl b/apps/emqx_management/src/emqx_mgmt_api_key_schema.erl new file mode 100644 index 000000000..556e4308f --- /dev/null +++ b/apps/emqx_management/src/emqx_mgmt_api_key_schema.erl @@ -0,0 +1,44 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_mgmt_api_key_schema). + +-include_lib("hocon/include/hoconsc.hrl"). + +-export([ + roots/0, + fields/1, + namespace/0, + desc/1 +]). + +namespace() -> api_key. +roots() -> ["api_key"]. + +fields("api_key") -> + [ + {bootstrap_file, + ?HOCON( + binary(), + #{ + desc => ?DESC(bootstrap_file), + required => false, + default => <<>> + } + )} + ]. + +desc("api_key") -> + ?DESC(api_key). diff --git a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl index 7bf68ee4d..152ccc599 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl @@ -293,12 +293,14 @@ listeners_type() -> listeners_info(Opts) -> Listeners = hocon_schema:fields(emqx_schema, "listeners"), lists:map( - fun({Type, #{type := ?MAP(_Name, ?R_REF(Mod, Field))}}) -> - Fields0 = hocon_schema:fields(Mod, Field), + fun({ListenerType, Schema}) -> + Type = emqx_schema:get_tombstone_map_value_type(Schema), + ?R_REF(Mod, StructName) = Type, + Fields0 = hocon_schema:fields(Mod, StructName), Fields1 = lists:keydelete("authentication", 1, Fields0), Fields3 = required_bind(Fields1, Opts), - Ref = listeners_ref(Type, Opts), - TypeAtom = list_to_existing_atom(Type), + Ref = listeners_ref(ListenerType, Opts), + TypeAtom = list_to_existing_atom(ListenerType), #{ ref => ?R_REF(Ref), schema => [ @@ -390,7 +392,7 @@ crud_listeners_by_id(put, #{bindings := #{id := Id}, body := Body0}) -> undefined -> {404, #{code => 'BAD_LISTENER_ID', message => ?LISTENER_NOT_FOUND}}; PrevConf -> - MergeConfT = emqx_map_lib:deep_merge(PrevConf, Conf), + MergeConfT = emqx_utils_maps:deep_merge(PrevConf, Conf), MergeConf = emqx_listeners:ensure_override_limiter_conf(MergeConfT, Conf), case update(Path, MergeConf) of {ok, #{raw_config := _RawConf}} -> @@ -483,7 +485,7 @@ err_msg_str(Reason) -> io_lib:format("~p", [Reason]). list_listeners() -> - [list_listeners(Node) || Node <- mria_mnesia:running_nodes()]. + [list_listeners(Node) || Node <- emqx:running_nodes()]. list_listeners(Node) -> wrap_rpc(emqx_management_proto_v2:list_listeners(Node)). @@ -642,7 +644,7 @@ create(Path, Conf) -> wrap(emqx_conf:update(Path, {create, Conf}, ?OPTS(cluster))). ensure_remove(Path) -> - wrap(emqx_conf:remove(Path, ?OPTS(cluster))). + wrap(emqx_conf:tombstone(Path, ?OPTS(cluster))). wrap({error, {post_config_update, emqx_listeners, Reason}}) -> {error, Reason}; wrap({error, {pre_config_update, emqx_listeners, Reason}}) -> {error, Reason}; diff --git a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl index 72b616fae..0fcc45d8e 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl @@ -59,7 +59,7 @@ metrics(get, #{query_string := Qs}) -> maps:from_list( emqx_mgmt:get_metrics(Node) ++ [{node, Node}] ) - || Node <- mria_mnesia:running_nodes() + || Node <- emqx:running_nodes() ], {200, Data} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_nodes.erl b/apps/emqx_management/src/emqx_mgmt_api_nodes.erl index 64ef3c1ef..ecf465f43 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_nodes.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_nodes.erl @@ -17,7 +17,6 @@ -behaviour(minirest_api). --include_lib("emqx/include/emqx.hrl"). -include_lib("typerefl/include/types.hrl"). -import(hoconsc, [mk/2, ref/1, ref/2, enum/1, array/1]). @@ -25,8 +24,6 @@ -define(NODE_METRICS_MODULE, emqx_mgmt_api_metrics). -define(NODE_STATS_MODULE, emqx_mgmt_api_stats). --define(SOURCE_ERROR, 'SOURCE_ERROR'). - %% Swagger specs from hocon schema -export([ api_spec/0, @@ -88,7 +85,7 @@ schema("/nodes/:node") -> ref(node_info), #{desc => <<"Get node info successfully">>} ), - 400 => node_error() + 404 => not_found() } } }; @@ -106,7 +103,7 @@ schema("/nodes/:node/metrics") -> ref(?NODE_METRICS_MODULE, node_metrics), #{desc => <<"Get node metrics successfully">>} ), - 400 => node_error() + 404 => not_found() } } }; @@ -124,7 +121,7 @@ schema("/nodes/:node/stats") -> ref(?NODE_STATS_MODULE, node_stats_data), #{desc => <<"Get node stats successfully">>} ), - 400 => node_error() + 404 => not_found() } } }. @@ -136,7 +133,7 @@ fields(node_name) -> [ {node, mk( - atom(), + binary(), #{ in => path, description => <<"Node name">>, @@ -159,18 +156,18 @@ fields(node_info) -> )}, {load1, mk( - string(), - #{desc => <<"CPU average load in 1 minute">>, example => "2.66"} + float(), + #{desc => <<"CPU average load in 1 minute">>, example => 2.66} )}, {load5, mk( - string(), - #{desc => <<"CPU average load in 5 minute">>, example => "2.66"} + float(), + #{desc => <<"CPU average load in 5 minute">>, example => 2.66} )}, {load15, mk( - string(), - #{desc => <<"CPU average load in 15 minute">>, example => "2.66"} + float(), + #{desc => <<"CPU average load in 15 minute">>, example => 2.66} )}, {max_fds, mk( @@ -250,74 +247,46 @@ nodes(get, _Params) -> list_nodes(#{}). node(get, #{bindings := #{node := NodeName}}) -> - get_node(NodeName). + emqx_utils_api:with_node(NodeName, to_ok_result_fun(fun get_node/1)). node_metrics(get, #{bindings := #{node := NodeName}}) -> - get_metrics(NodeName). + emqx_utils_api:with_node(NodeName, to_ok_result_fun(fun emqx_mgmt:get_metrics/1)). node_stats(get, #{bindings := #{node := NodeName}}) -> - get_stats(NodeName). + emqx_utils_api:with_node(NodeName, to_ok_result_fun(fun emqx_mgmt:get_stats/1)). %%-------------------------------------------------------------------- %% api apply list_nodes(#{}) -> - NodesInfo = [format(Node, NodeInfo) || {Node, NodeInfo} <- emqx_mgmt:list_nodes()], + NodesInfo = [format(NodeInfo) || {_Node, NodeInfo} <- emqx_mgmt:list_nodes()], {200, NodesInfo}. get_node(Node) -> - case emqx_mgmt:lookup_node(Node) of - {error, _} -> - {400, #{code => 'SOURCE_ERROR', message => <<"rpc_failed">>}}; - NodeInfo -> - {200, format(Node, NodeInfo)} - end. - -get_metrics(Node) -> - case emqx_mgmt:get_metrics(Node) of - {error, _} -> - {400, #{code => 'SOURCE_ERROR', message => <<"rpc_failed">>}}; - Metrics -> - {200, Metrics} - end. - -get_stats(Node) -> - case emqx_mgmt:get_stats(Node) of - {error, _} -> - {400, #{code => 'SOURCE_ERROR', message => <<"rpc_failed">>}}; - Stats -> - {200, Stats} - end. + format(emqx_mgmt:lookup_node(Node)). %%-------------------------------------------------------------------- %% internal function -format(_Node, Info = #{memory_total := Total, memory_used := Used}) -> - RootDir = list_to_binary(code:root_dir()), - LogPath = - case log_path() of - undefined -> - <<"log.file_handler.default.enable is false,only log to console">>; - Path -> - filename:join(RootDir, Path) - end, +format(Info = #{memory_total := Total, memory_used := Used}) -> Info#{ memory_total := emqx_mgmt_util:kmg(Total), - memory_used := emqx_mgmt_util:kmg(Used), - sys_path => RootDir, - log_path => LogPath - }. + memory_used := emqx_mgmt_util:kmg(Used) + }; +format(Info) when is_map(Info) -> + Info. -log_path() -> - Configs = logger:get_handler_config(), - get_log_path(Configs). +to_ok_result({error, _} = Error) -> + Error; +to_ok_result({ok, _} = Ok) -> + Ok; +to_ok_result(Result) -> + {ok, Result}. -get_log_path([#{config := #{file := Path}} | _LoggerConfigs]) -> - filename:dirname(Path); -get_log_path([_LoggerConfig | LoggerConfigs]) -> - get_log_path(LoggerConfigs); -get_log_path([]) -> - undefined. +to_ok_result_fun(Fun) when is_function(Fun) -> + fun(Arg) -> + to_ok_result(Fun(Arg)) + end. -node_error() -> - emqx_dashboard_swagger:error_codes([?SOURCE_ERROR], <<"Node error">>). +not_found() -> + emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Node not found">>). diff --git a/apps/emqx_management/src/emqx_mgmt_api_plugins.erl b/apps/emqx_management/src/emqx_mgmt_api_plugins.erl index 28deac98d..92814d112 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_plugins.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_plugins.erl @@ -17,7 +17,6 @@ -behaviour(minirest_api). --include_lib("kernel/include/file.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). %%-include_lib("emqx_plugins/include/emqx_plugins.hrl"). @@ -49,6 +48,9 @@ -define(NAME_RE, "^[A-Za-z]+[A-Za-z0-9-_.]*$"). -define(TAGS, [<<"Plugins">>]). +%% Plugin NameVsn must follow the pattern -, +%% app_name must be a snake_case (no '-' allowed). +-define(VSN_WILDCARD, "-*.tar.gz"). namespace() -> "plugins". @@ -69,10 +71,10 @@ schema("/plugins") -> #{ 'operationId' => list_plugins, get => #{ + summary => <<"List all installed plugins">>, description => - "List all install plugins.
" "Plugins are launched in top-down order.
" - "Using `POST /plugins/{name}/move` to change the boot order.", + "Use `POST /plugins/{name}/move` to change the boot order.", tags => ?TAGS, responses => #{ 200 => hoconsc:array(hoconsc:ref(plugin)) @@ -83,8 +85,9 @@ schema("/plugins/install") -> #{ 'operationId' => upload_install, post => #{ + summary => <<"Install a new plugin">>, description => - "Install a plugin(plugin-vsn.tar.gz)." + "Upload a plugin tarball (plugin-vsn.tar.gz)." "Follow [emqx-plugin-template](https://github.com/emqx/emqx-plugin-template) " "to develop plugin.", tags => ?TAGS, @@ -113,7 +116,8 @@ schema("/plugins/:name") -> #{ 'operationId' => plugin, get => #{ - description => "Describe a plugin according `release.json` and `README.md`.", + summary => <<"Get a plugin description">>, + description => "Describs plugin according to its `release.json` and `README.md`.", tags => ?TAGS, parameters => [hoconsc:ref(name)], responses => #{ @@ -122,7 +126,8 @@ schema("/plugins/:name") -> } }, delete => #{ - description => "Uninstall a plugin package.", + summary => <<"Delete a plugin">>, + description => "Uninstalls a previously uploaded plugin package.", tags => ?TAGS, parameters => [hoconsc:ref(name)], responses => #{ @@ -135,6 +140,7 @@ schema("/plugins/:name/:action") -> #{ 'operationId' => update_plugin, put => #{ + summary => <<"Trigger action on an installed plugin">>, description => "start/stop a installed plugin.
" "- **start**: start the plugin.
" @@ -154,6 +160,7 @@ schema("/plugins/:name/move") -> #{ 'operationId' => update_boot_order, post => #{ + summary => <<"Move plugin within plugin hiearchy">>, description => "Setting the boot order of plugins.", tags => ?TAGS, parameters => [hoconsc:ref(name)], @@ -324,14 +331,13 @@ get_plugins() -> upload_install(post, #{body := #{<<"plugin">> := Plugin}}) when is_map(Plugin) -> [{FileName, Bin}] = maps:to_list(maps:without([type], Plugin)), %% File bin is too large, we use rpc:multicall instead of cluster_rpc:multicall - %% TODO what happens when a new node join in? - %% emqx_plugins_monitor should copy plugins from other core node when boot-up. - case emqx_plugins:describe(string:trim(FileName, trailing, ".tar.gz")) of + NameVsn = string:trim(FileName, trailing, ".tar.gz"), + case emqx_plugins:describe(NameVsn) of {error, #{error := "bad_info_file", return := {enoent, _}}} -> case emqx_plugins:parse_name_vsn(FileName) of {ok, AppName, _Vsn} -> AppDir = filename:join(emqx_plugins:install_dir(), AppName), - case filelib:wildcard(AppDir ++ "*.tar.gz") of + case filelib:wildcard(AppDir ++ ?VSN_WILDCARD) of [] -> do_install_package(FileName, Bin); OtherVsn -> @@ -346,6 +352,7 @@ upload_install(post, #{body := #{<<"plugin">> := Plugin}}) when is_map(Plugin) - }} end; {error, Reason} -> + emqx_plugins:delete_package(NameVsn), {400, #{ code => 'BAD_PLUGIN_INFO', message => iolist_to_binary([Reason, ":", FileName]) @@ -367,9 +374,24 @@ upload_install(post, #{}) -> do_install_package(FileName, Bin) -> %% TODO: handle bad nodes {[_ | _] = Res, []} = emqx_mgmt_api_plugins_proto_v1:install_package(FileName, Bin), - %% TODO: handle non-OKs - [] = lists:filter(fun(R) -> R =/= ok end, Res), - {200}. + case lists:filter(fun(R) -> R =/= ok end, Res) of + [] -> + {200}; + Filtered -> + %% crash if we have unexpected errors or results + [] = lists:filter( + fun + ({error, {failed, _}}) -> true; + ({error, _}) -> false + end, + Filtered + ), + {error, #{error := Reason}} = hd(Filtered), + {400, #{ + code => 'BAD_PLUGIN_INFO', + message => iolist_to_binary([Reason, ":", FileName]) + }} + end. plugin(get, #{bindings := #{name := Name}}) -> {Plugins, _} = emqx_mgmt_api_plugins_proto_v1:describe_package(Name), @@ -406,9 +428,18 @@ update_boot_order(post, #{bindings := #{name := Name}, body := Body}) -> %% For RPC upload_install/2 install_package(FileName, Bin) -> File = filename:join(emqx_plugins:install_dir(), FileName), + ok = filelib:ensure_dir(File), ok = file:write_file(File, Bin), PackageName = string:trim(FileName, trailing, ".tar.gz"), - emqx_plugins:ensure_installed(PackageName). + case emqx_plugins:ensure_installed(PackageName) of + {error, #{return := not_found}} = NotFound -> + NotFound; + {error, _Reason} = Error -> + _ = file:delete(File), + Error; + Result -> + Result + end. %% For RPC plugin get describe_package(Name) -> @@ -432,8 +463,8 @@ delete_package(Name) -> %% for RPC plugin update ensure_action(Name, start) -> - _ = emqx_plugins:ensure_enabled(Name), _ = emqx_plugins:ensure_started(Name), + _ = emqx_plugins:ensure_enabled(Name), ok; ensure_action(Name, stop) -> _ = emqx_plugins:ensure_stopped(Name), diff --git a/apps/emqx_management/src/emqx_mgmt_api_publish.erl b/apps/emqx_management/src/emqx_mgmt_api_publish.erl index 672de661b..ba486ab89 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_publish.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_publish.erl @@ -50,6 +50,7 @@ schema("/publish") -> #{ 'operationId' => publish, post => #{ + summary => <<"Publish a message">>, description => ?DESC(publish_api), tags => [<<"Publish">>], 'requestBody' => hoconsc:mk(hoconsc:ref(?MODULE, publish_message)), @@ -65,6 +66,7 @@ schema("/publish/bulk") -> #{ 'operationId' => publish_batch, post => #{ + summary => <<"Publish a batch of messages">>, description => ?DESC(publish_bulk_api), tags => [<<"Publish">>], 'requestBody' => hoconsc:mk(hoconsc:array(hoconsc:ref(?MODULE, publish_message)), #{}), @@ -104,9 +106,7 @@ fields(message) -> })}, {clientid, hoconsc:mk(binary(), #{ - desc => ?DESC(clientid), - required => false, - example => <<"api_example_client">> + deprecated => {since, "v5.0.14"} })}, {payload, hoconsc:mk(binary(), #{ @@ -254,7 +254,6 @@ is_ok_deliver({_NodeOrShare, _MatchedTopic, {error, _}}) -> false. %% %%%%%% Below error codes are not implemented so far %%%% %% %% If HTTP request passes HTTP authentication, it is considered trusted. -%% In the future, we may choose to check ACL for the provided MQTT Client ID %% 135 Not authorized 401 %% %% %%%%%% Below error codes are not applicable %%%%%%% @@ -326,7 +325,6 @@ make_message(Map) -> Encoding = maps:get(<<"payload_encoding">>, Map, plain), case decode_payload(Encoding, maps:get(<<"payload">>, Map)) of {ok, Payload} -> - From = maps:get(<<"clientid">>, Map, http_api), QoS = maps:get(<<"qos">>, Map, 0), Topic = maps:get(<<"topic">>, Map), Retain = maps:get(<<"retain">>, Map, false), @@ -346,7 +344,9 @@ make_message(Map) -> error:_Reason -> throw(invalid_topic_name) end, - Message = emqx_message:make(From, QoS, Topic, Payload, #{retain => Retain}, Headers), + Message = emqx_message:make( + http_api, QoS, Topic, Payload, #{retain => Retain}, Headers + ), Size = emqx_message:estimate_size(Message), (Size > size_limit()) andalso throw(packet_too_large), {ok, Message}; diff --git a/apps/emqx_management/src/emqx_mgmt_api_stats.erl b/apps/emqx_management/src/emqx_mgmt_api_stats.erl index 19bb3e737..5f4bbce65 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_stats.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_stats.erl @@ -127,9 +127,21 @@ list(get, #{query_string := Qs}) -> true -> {200, emqx_mgmt:get_stats()}; _ -> - Data = [ - maps:from_list(emqx_mgmt:get_stats(Node) ++ [{node, Node}]) - || Node <- mria_mnesia:running_nodes() - ], + Data = lists:foldl( + fun(Node, Acc) -> + case emqx_mgmt:get_stats(Node) of + {error, _Err} -> + Acc; + Stats when is_list(Stats) -> + Data = maps:from_list([{node, Node} | Stats]), + [Data | Acc] + end + end, + [], + emqx:running_nodes() + ), {200, Data} end. + +%%%============================================================================================== +%% Internal diff --git a/apps/emqx_management/src/emqx_mgmt_api_status.erl b/apps/emqx_management/src/emqx_mgmt_api_status.erl index 7d5c18e59..c0ee42e2b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_status.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_status.erl @@ -45,6 +45,17 @@ schema("/status") -> #{ 'operationId' => get_status, get => #{ + parameters => [ + {format, + hoconsc:mk( + string(), + #{ + in => query, + default => <<"text">>, + desc => ?DESC(get_status_api_format) + } + )} + ], description => ?DESC(get_status_api), tags => ?TAGS, security => [], @@ -70,7 +81,16 @@ path() -> "/status". init(Req0, State) -> - {Code, Headers, Body} = running_status(), + Format = + try + QS = cowboy_req:parse_qs(Req0), + {_, F} = lists:keyfind(<<"format">>, 1, QS), + F + catch + _:_ -> + <<"text">> + end, + {Code, Headers, Body} = running_status(Format), Req = cowboy_req:reply(Code, Headers, Body, Req0), {ok, Req, State}. @@ -78,29 +98,52 @@ init(Req0, State) -> %% API Handler funcs %%-------------------------------------------------------------------- -get_status(get, _Params) -> - running_status(). +get_status(get, Params) -> + Format = maps:get(<<"format">>, maps:get(query_string, Params, #{}), <<"text">>), + running_status(iolist_to_binary(Format)). -running_status() -> +running_status(Format) -> case emqx_dashboard_listener:is_ready(timer:seconds(20)) of true -> - BrokerStatus = broker_status(), AppStatus = application_status(), - Body = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]), + Body = do_get_status(AppStatus, Format), StatusCode = case AppStatus of running -> 200; not_running -> 503 end, + ContentType = + case Format of + <<"json">> -> <<"applicatin/json">>; + _ -> <<"text/plain">> + end, Headers = #{ - <<"content-type">> => <<"text/plain">>, + <<"content-type">> => ContentType, <<"retry-after">> => <<"15">> }, - {StatusCode, Headers, list_to_binary(Body)}; + {StatusCode, Headers, iolist_to_binary(Body)}; false -> {503, #{<<"retry-after">> => <<"15">>}, <<>>} end. +do_get_status(AppStatus, <<"json">>) -> + BrokerStatus = broker_status(), + emqx_utils_json:encode(#{ + node_name => atom_to_binary(node(), utf8), + rel_vsn => vsn(), + broker_status => atom_to_binary(BrokerStatus), + app_status => atom_to_binary(AppStatus) + }); +do_get_status(AppStatus, _) -> + BrokerStatus = broker_status(), + io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]). + +vsn() -> + iolist_to_binary([ + emqx_release:edition_vsn_prefix(), + emqx_release:version() + ]). + broker_status() -> case emqx:is_running() of true -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl b/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl index b3380f4d1..1b69835f9 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl @@ -149,7 +149,7 @@ subscriptions(get, #{query_string := QString}) -> fun ?MODULE:format/2 ); Node0 -> - case emqx_misc:safe_to_existing_atom(Node0) of + case emqx_utils:safe_to_existing_atom(Node0) of {ok, Node1} -> emqx_mgmt_api:node_query( Node1, @@ -173,11 +173,11 @@ subscriptions(get, #{query_string := QString}) -> {200, Result} end. -format(WhichNode, {{_Subscriber, Topic}, Options}) -> +format(WhichNode, {{Topic, _Subscriber}, Options}) -> maps:merge( #{ topic => get_topic(Topic, Options), - clientid => maps:get(subid, Options), + clientid => maps:get(subid, Options, null), node => WhichNode }, maps:with([qos, nl, rap, rh], Options) @@ -205,14 +205,14 @@ gen_match_spec([], MtchHead) -> gen_match_spec([{Key, '=:=', Value} | More], MtchHead) -> gen_match_spec(More, update_ms(Key, Value, MtchHead)). -update_ms(clientid, X, {{Pid, Topic}, Opts}) -> - {{Pid, Topic}, Opts#{subid => X}}; -update_ms(topic, X, {{Pid, _Topic}, Opts}) -> - {{Pid, X}, Opts}; -update_ms(share_group, X, {{Pid, Topic}, Opts}) -> - {{Pid, Topic}, Opts#{share => X}}; -update_ms(qos, X, {{Pid, Topic}, Opts}) -> - {{Pid, Topic}, Opts#{qos => X}}. +update_ms(clientid, X, {{Topic, Pid}, Opts}) -> + {{Topic, Pid}, Opts#{subid => X}}; +update_ms(topic, X, {{_Topic, Pid}, Opts}) -> + {{X, Pid}, Opts}; +update_ms(share_group, X, {{Topic, Pid}, Opts}) -> + {{Topic, Pid}, Opts#{share => X}}; +update_ms(qos, X, {{Topic, Pid}, Opts}) -> + {{Topic, Pid}, Opts#{qos => X}}. fuzzy_filter_fun([]) -> undefined; @@ -221,5 +221,5 @@ fuzzy_filter_fun(Fuzzy) -> run_fuzzy_filter(_, []) -> true; -run_fuzzy_filter(E = {{_, Topic}, _}, [{topic, match, TopicFilter} | Fuzzy]) -> +run_fuzzy_filter(E = {{Topic, _}, _}, [{topic, match, TopicFilter} | Fuzzy]) -> emqx_topic:match(Topic, TopicFilter) andalso run_fuzzy_filter(E, Fuzzy). diff --git a/apps/emqx_management/src/emqx_mgmt_api_topics.erl b/apps/emqx_management/src/emqx_mgmt_api_topics.erl index a64badd3a..4100269e5 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_topics.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_topics.erl @@ -75,7 +75,7 @@ schema("/topics/:topic") -> tags => ?TAGS, parameters => [topic_param(path)], responses => #{ - 200 => hoconsc:mk(hoconsc:ref(topic), #{}), + 200 => hoconsc:mk(hoconsc:array(hoconsc:ref(topic)), #{}), 404 => emqx_dashboard_swagger:error_codes(['TOPIC_NOT_FOUND'], <<"Topic not found">>) } @@ -130,8 +130,9 @@ lookup(#{topic := Topic}) -> case emqx_router:lookup_routes(Topic) of [] -> {404, #{code => ?TOPIC_NOT_FOUND, message => <<"Topic not found">>}}; - [Route] -> - {200, format(Route)} + Routes when is_list(Routes) -> + Formatted = [format(Route) || Route <- Routes], + {200, Formatted} end. %%%============================================================================================== diff --git a/apps/emqx_management/src/emqx_mgmt_api_trace.erl b/apps/emqx_management/src/emqx_mgmt_api_trace.erl index d90aea9ef..25cc2734f 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_trace.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_trace.erl @@ -20,6 +20,7 @@ -include_lib("kernel/include/file.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -export([ api_spec/0, @@ -46,9 +47,11 @@ get_trace_size/0 ]). +-define(MAX_SINT32, 2147483647). + -define(TO_BIN(_B_), iolist_to_binary(_B_)). -define(NOT_FOUND(N), {404, #{code => 'NOT_FOUND', message => ?TO_BIN([N, " NOT FOUND"])}}). --define(BAD_REQUEST(C, M), {400, #{code => C, message => ?TO_BIN(M)}}). +-define(SERVICE_UNAVAILABLE(C, M), {503, #{code => C, message => ?TO_BIN(M)}}). -define(TAGS, [<<"Trace">>]). namespace() -> "trace". @@ -91,7 +94,8 @@ schema("/trace") -> 409 => emqx_dashboard_swagger:error_codes( [ 'ALREADY_EXISTS', - 'DUPLICATE_CONDITION' + 'DUPLICATE_CONDITION', + 'BAD_TYPE' ], <<"trace already exists">> ) @@ -147,8 +151,9 @@ schema("/trace/:name/download") -> #{schema => #{type => "string", format => "binary"}} } }, - 400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Node Not Found">>), - 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) + 404 => emqx_dashboard_swagger:error_codes( + ['NOT_FOUND', 'NODE_ERROR'], <<"Trace Name or Node Not Found">> + ) } } }; @@ -183,8 +188,15 @@ schema("/trace/:name/log") -> {items, hoconsc:mk(binary(), #{example => "TEXT-LOG-ITEMS"})}, {meta, fields(bytes) ++ fields(position)} ], - 400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Trace Log Failed">>), - 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) + 400 => emqx_dashboard_swagger:error_codes( + ['BAD_REQUEST'], <<"Bad input parameter">> + ), + 404 => emqx_dashboard_swagger:error_codes( + ['NOT_FOUND', 'NODE_ERROR'], <<"Trace Name or Node Not Found">> + ), + 503 => emqx_dashboard_swagger:error_codes( + ['SERVICE_UNAVAILABLE'], <<"Requested chunk size too big">> + ) } } }. @@ -254,6 +266,19 @@ fields(trace) -> example => running } )}, + {payload_encode, + hoconsc:mk(hoconsc:enum([hex, text, hidden]), #{ + desc => + "" + "Determine the format of the payload format in the trace file.
\n" + "`text`: Text-based protocol or plain text protocol.\n" + " It is recommended when payload is JSON encoded.
\n" + "`hex`: Binary hexadecimal encode." + "It is recommended when payload is a custom binary protocol.
\n" + "`hidden`: payload is obfuscated as `******`" + "", + default => text + })}, {start_at, hoconsc:mk( emqx_datetime:epoch_second(), @@ -312,12 +337,16 @@ fields(bytes) -> [ {bytes, hoconsc:mk( - integer(), + %% This seems to be the minimum max value we may encounter + %% across different OS + range(0, ?MAX_SINT32), #{ - desc => "Maximum number of bytes to store in request", + desc => "Maximum number of bytes to send in response", in => query, required => false, - default => 1000 + default => 1000, + minimum => 0, + maximum => ?MAX_SINT32 } )} ]; @@ -361,7 +390,7 @@ trace(get, _Params) -> fun(#{start_at := A}, #{start_at := B}) -> A > B end, emqx_trace:format(List0) ), - Nodes = mria_mnesia:running_nodes(), + Nodes = emqx:running_nodes(), TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)), AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize), Now = erlang:system_time(second), @@ -406,6 +435,11 @@ trace(post, #{body := Param}) -> code => 'DUPLICATE_CONDITION', message => ?TO_BIN([Name, " Duplication Condition"]) }}; + {error, {bad_type, _}} -> + {409, #{ + code => 'BAD_TYPE', + message => <<"Rolling upgrade in progress, create failed">> + }}; {error, Reason} -> {400, #{ code => 'INVALID_PARAMS', @@ -430,7 +464,7 @@ format_trace(Trace0) -> LogSize = lists:foldl( fun(Node, Acc) -> Acc#{Node => 0} end, #{}, - mria_mnesia:running_nodes() + emqx:running_nodes() ), Trace2 = maps:without([enable, filter], Trace1), Trace2#{ @@ -461,16 +495,31 @@ download_trace_log(get, #{bindings := #{name := Name}, query_string := Query}) - case parse_node(Query, undefined) of {ok, Node} -> TraceFiles = collect_trace_file(Node, TraceLog), - ZipDir = emqx_trace:zip_dir(), + %% We generate a session ID so that we name files + %% with unique names. Then we won't cause + %% overwrites for concurrent requests. + SessionId = emqx_utils:gen_id(), + ZipDir = filename:join([emqx_trace:zip_dir(), SessionId]), + ok = file:make_dir(ZipDir), + %% Write files to ZipDir and create an in-memory zip file Zips = group_trace_file(ZipDir, TraceLog, TraceFiles), - FileName = binary_to_list(Name) ++ ".zip", - ZipFileName = filename:join([ZipDir, FileName]), - {ok, ZipFile} = zip:zip(ZipFileName, Zips, [{cwd, ZipDir}]), - %% emqx_trace:delete_files_after_send(ZipFileName, Zips), - %% TODO use file replace file_binary.(delete file after send is not ready now). - {ok, Binary} = file:read_file(ZipFile), - ZipName = filename:basename(ZipFile), - _ = file:delete(ZipFile), + ZipName = binary_to_list(Name) ++ ".zip", + Binary = + try + {ok, {ZipName, Bin}} = zip:zip(ZipName, Zips, [memory, {cwd, ZipDir}]), + Bin + after + %% emqx_trace:delete_files_after_send(ZipFileName, Zips), + %% TODO use file replace file_binary.(delete file after send is not ready now). + ok = file:del_dir_r(ZipDir) + end, + ?tp(trace_api_download_trace_log, #{ + files => Zips, + name => Name, + session_id => SessionId, + zip_dir => ZipDir, + zip_name => ZipName + }), Headers = #{ <<"content-type">> => <<"application/x-zip">>, <<"content-disposition">> => iolist_to_binary( @@ -479,7 +528,7 @@ download_trace_log(get, #{bindings := #{name := Name}, query_string := Query}) - }, {200, Headers, {file_binary, ZipName, Binary}}; {error, not_found} -> - ?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) + ?NOT_FOUND(<<"Node">>) end; {error, not_found} -> ?NOT_FOUND(Name) @@ -511,13 +560,13 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) -> ). collect_trace_file(undefined, TraceLog) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = emqx:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)); collect_trace_file(Node, TraceLog) -> wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)). collect_trace_file_detail(TraceLog) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = emqx:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)). wrap_rpc({GoodRes, BadNodes}) -> @@ -563,11 +612,19 @@ stream_log_file(get, #{bindings := #{name := Name}, query_string := Query}) -> {200, #{meta => Meta, items => <<"">>}}; {error, not_found} -> ?NOT_FOUND(Name); + {error, enomem} -> + ?SLOG(warning, #{ + code => not_enough_mem, + msg => "Requested chunk size too big", + bytes => Bytes, + name => Name + }), + ?SERVICE_UNAVAILABLE('SERVICE_UNAVAILABLE', <<"Requested chunk size too big">>); {badrpc, nodedown} -> - ?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) + ?NOT_FOUND(<<"Node">>) end; {error, not_found} -> - ?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) + ?NOT_FOUND(<<"Node">>) end. -spec get_trace_size() -> #{{node(), file:name_all()} => non_neg_integer()}. @@ -639,7 +696,7 @@ parse_node(Query, Default) -> {ok, Default}; {ok, NodeBin} -> Node = binary_to_existing_atom(NodeBin), - true = lists:member(Node, mria_mnesia:running_nodes()), + true = lists:member(Node, emqx:running_nodes()), {ok, Node} end catch diff --git a/apps/emqx_management/src/emqx_mgmt_app.erl b/apps/emqx_management/src/emqx_mgmt_app.erl index 164ac1b36..b4cf9091a 100644 --- a/apps/emqx_management/src/emqx_mgmt_app.erl +++ b/apps/emqx_management/src/emqx_mgmt_app.erl @@ -28,10 +28,13 @@ -include("emqx_mgmt.hrl"). start(_Type, _Args) -> - {ok, Sup} = emqx_mgmt_sup:start_link(), ok = mria_rlog:wait_for_shards([?MANAGEMENT_SHARD], infinity), - emqx_mgmt_cli:load(), - {ok, Sup}. + case emqx_mgmt_auth:init_bootstrap_file() of + ok -> + emqx_mgmt_sup:start_link(); + {error, Reason} -> + {error, Reason} + end. stop(_State) -> ok. diff --git a/apps/emqx_management/src/emqx_mgmt_auth.erl b/apps/emqx_management/src/emqx_mgmt_auth.erl index 3d97e53bc..12a7a6641 100644 --- a/apps/emqx_management/src/emqx_mgmt_auth.erl +++ b/apps/emqx_management/src/emqx_mgmt_auth.erl @@ -15,6 +15,7 @@ %%-------------------------------------------------------------------- -module(emqx_mgmt_auth). -include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/logger.hrl"). %% API -export([mnesia/1]). @@ -25,7 +26,8 @@ read/1, update/4, delete/1, - list/0 + list/0, + init_bootstrap_file/0 ]). -export([authorize/3]). @@ -34,9 +36,14 @@ -export([ do_update/4, do_delete/1, - do_create_app/3 + do_create_app/3, + do_force_create_app/3 ]). +-ifdef(TEST). +-export([create/5]). +-endif. + -define(APP, emqx_app). -record(?APP, { @@ -45,7 +52,7 @@ api_secret_hash = <<>> :: binary() | '_', enable = true :: boolean() | '_', desc = <<>> :: binary() | '_', - expired_at = 0 :: integer() | undefined | '_', + expired_at = 0 :: integer() | undefined | infinity | '_', created_at = 0 :: integer() | '_' }). @@ -58,9 +65,19 @@ mnesia(boot) -> {attributes, record_info(fields, ?APP)} ]). +-spec init_bootstrap_file() -> ok | {error, _}. +init_bootstrap_file() -> + File = bootstrap_file(), + ?SLOG(debug, #{msg => "init_bootstrap_api_keys_from_file", file => File}), + init_bootstrap_file(File). + create(Name, Enable, ExpiredAt, Desc) -> - case mnesia:table_info(?APP, size) < 30 of - true -> create_app(Name, Enable, ExpiredAt, Desc); + ApiSecret = generate_api_secret(), + create(Name, ApiSecret, Enable, ExpiredAt, Desc). + +create(Name, ApiSecret, Enable, ExpiredAt, Desc) -> + case mnesia:table_info(?APP, size) < 100 of + true -> create_app(Name, ApiSecret, Enable, ExpiredAt, Desc); false -> {error, "Maximum ApiKey"} end. @@ -148,8 +165,7 @@ to_map(#?APP{name = N, api_key = K, enable = E, expired_at = ET, created_at = CT is_expired(undefined) -> false; is_expired(ExpiredTime) -> ExpiredTime < erlang:system_time(second). -create_app(Name, Enable, ExpiredAt, Desc) -> - ApiSecret = generate_api_secret(), +create_app(Name, ApiSecret, Enable, ExpiredAt, Desc) -> App = #?APP{ name = Name, @@ -158,17 +174,21 @@ create_app(Name, Enable, ExpiredAt, Desc) -> desc = Desc, created_at = erlang:system_time(second), api_secret_hash = emqx_dashboard_admin:hash(ApiSecret), - api_key = list_to_binary(emqx_misc:gen_id(16)) + api_key = list_to_binary(emqx_utils:gen_id(16)) }, case create_app(App) of - {error, api_key_already_existed} -> create_app(Name, Enable, ExpiredAt, Desc); - {ok, Res} -> {ok, Res#{api_secret => ApiSecret}}; - Error -> Error + {ok, Res} -> + {ok, Res#{api_secret => ApiSecret}}; + Error -> + Error end. create_app(App = #?APP{api_key = ApiKey, name = Name}) -> trans(fun ?MODULE:do_create_app/3, [App, ApiKey, Name]). +force_create_app(NamePrefix, App = #?APP{api_key = ApiKey}) -> + trans(fun ?MODULE:do_force_create_app/3, [App, ApiKey, NamePrefix]). + do_create_app(App, ApiKey, Name) -> case mnesia:read(?APP, Name) of [_] -> @@ -183,6 +203,22 @@ do_create_app(App, ApiKey, Name) -> end end. +do_force_create_app(App, ApiKey, NamePrefix) -> + case mnesia:match_object(?APP, #?APP{api_key = ApiKey, _ = '_'}, read) of + [] -> + NewName = generate_unique_name(NamePrefix), + ok = mnesia:write(App#?APP{name = NewName}); + [#?APP{name = Name}] -> + ok = mnesia:write(App#?APP{name = Name}) + end. + +generate_unique_name(NamePrefix) -> + New = list_to_binary(NamePrefix ++ emqx_utils:gen_id(16)), + case mnesia:read(?APP, New) of + [] -> New; + _ -> generate_unique_name(NamePrefix) + end. + trans(Fun, Args) -> case mria:transaction(?COMMON_SHARD, Fun, Args) of {atomic, Res} -> {ok, Res}; @@ -192,3 +228,84 @@ trans(Fun, Args) -> generate_api_secret() -> Random = crypto:strong_rand_bytes(32), emqx_base62:encode(Random). + +bootstrap_file() -> + case emqx:get_config([api_key, bootstrap_file], <<>>) of + %% For compatible remove until 5.1.0 + <<>> -> + emqx:get_config([dashboard, bootstrap_users_file], <<>>); + File -> + File + end. + +init_bootstrap_file(<<>>) -> + ok; +init_bootstrap_file(File) -> + case file:open(File, [read, binary]) of + {ok, Dev} -> + {ok, MP} = re:compile(<<"(\.+):(\.+$)">>, [ungreedy]), + init_bootstrap_file(File, Dev, MP); + {error, Reason0} -> + Reason = emqx_utils:explain_posix(Reason0), + ?SLOG( + error, + #{ + msg => "failed_to_open_the_bootstrap_file", + file => File, + reason => Reason + } + ), + {error, Reason} + end. + +init_bootstrap_file(File, Dev, MP) -> + try + add_bootstrap_file(File, Dev, MP, 1) + catch + throw:Error -> {error, Error}; + Type:Reason:Stacktrace -> {error, {Type, Reason, Stacktrace}} + after + file:close(Dev) + end. + +-define(BOOTSTRAP_TAG, <<"Bootstrapped From File">>). + +add_bootstrap_file(File, Dev, MP, Line) -> + case file:read_line(Dev) of + {ok, Bin} -> + case re:run(Bin, MP, [global, {capture, all_but_first, binary}]) of + {match, [[AppKey, ApiSecret]]} -> + App = + #?APP{ + enable = true, + expired_at = infinity, + desc = ?BOOTSTRAP_TAG, + created_at = erlang:system_time(second), + api_secret_hash = emqx_dashboard_admin:hash(ApiSecret), + api_key = AppKey + }, + case force_create_app("from_bootstrap_file_", App) of + {ok, ok} -> + add_bootstrap_file(File, Dev, MP, Line + 1); + {error, Reason} -> + throw(#{file => File, line => Line, content => Bin, reason => Reason}) + end; + _ -> + Reason = "invalid_format", + ?SLOG( + error, + #{ + msg => "failed_to_load_bootstrap_file", + file => File, + line => Line, + content => Bin, + reason => Reason + } + ), + throw(#{file => File, line => Line, content => Bin, reason => Reason}) + end; + eof -> + ok; + {error, Reason} -> + throw(#{file => File, line => Line, reason => Reason}) + end. diff --git a/apps/emqx_management/src/emqx_mgmt_cache.erl b/apps/emqx_management/src/emqx_mgmt_cache.erl new file mode 100644 index 000000000..e7f9ac0b1 --- /dev/null +++ b/apps/emqx_management/src/emqx_mgmt_cache.erl @@ -0,0 +1,108 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_mgmt_cache). + +-behaviour(gen_server). + +-define(SYS_MEMORY_KEY, sys_memory). +-define(EXPIRED_MS, 3000). +%% -100ms to early update cache +-define(REFRESH_MS, ?EXPIRED_MS - 100). +-define(DEFAULT_BAD_MEMORY, {0, 0}). + +-export([start_link/0, get_sys_memory/0]). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). + +get_sys_memory() -> + case get_memory_from_cache() of + {ok, CacheMem} -> + erlang:send(?MODULE, refresh_sys_memory), + CacheMem; + stale -> + get_sys_memory_sync() + end. + +get_sys_memory_sync() -> + try + gen_server:call(?MODULE, get_sys_memory, ?EXPIRED_MS) + catch + exit:{timeout, _} -> + ?DEFAULT_BAD_MEMORY + end. + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +init([]) -> + _ = ets:new(?MODULE, [set, named_table, public, {keypos, 1}]), + {ok, #{latest_refresh => 0}}. + +handle_call(get_sys_memory, _From, State) -> + {Mem, NewState} = refresh_sys_memory(State), + {reply, Mem, NewState}; +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info(refresh_sys_memory, State) -> + {_, NewState} = refresh_sys_memory(State), + {noreply, NewState}; +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== + +refresh_sys_memory(State = #{latest_refresh := LatestRefresh}) -> + Now = now_millisecond(), + case Now - LatestRefresh >= ?REFRESH_MS of + true -> + do_refresh_sys_memory(Now, State); + false -> + case get_memory_from_cache() of + stale -> do_refresh_sys_memory(Now, State); + {ok, Mem} -> {Mem, State} + end + end. + +do_refresh_sys_memory(RefreshAt, State) -> + NewMem = load_ctl:get_sys_memory(), + NewExpiredAt = now_millisecond() + ?EXPIRED_MS, + ets:insert(?MODULE, {?SYS_MEMORY_KEY, {NewMem, NewExpiredAt}}), + {NewMem, State#{latest_refresh => RefreshAt}}. + +get_memory_from_cache() -> + case ets:lookup(?MODULE, ?SYS_MEMORY_KEY) of + [] -> + stale; + [{_, {Mem, ExpiredAt}}] -> + case now_millisecond() < ExpiredAt of + true -> {ok, Mem}; + false -> stale + end + end. + +now_millisecond() -> + erlang:system_time(millisecond). diff --git a/apps/emqx_management/src/emqx_mgmt_cli.erl b/apps/emqx_management/src/emqx_mgmt_cli.erl index 8d19716ce..448940904 100644 --- a/apps/emqx_management/src/emqx_mgmt_cli.erl +++ b/apps/emqx_management/src/emqx_mgmt_cli.erl @@ -213,7 +213,7 @@ subscriptions(["show", ClientId]) -> [] -> emqx_ctl:print("Not Found.~n"); [{_, Pid}] -> - case ets:match_object(emqx_suboption, {{Pid, '_'}, '_'}) of + case ets:match_object(emqx_suboption, {{'_', Pid}, '_'}) of [] -> emqx_ctl:print("Not Found.~n"); Suboption -> [print({emqx_suboption, Sub}) || Sub <- Suboption] end @@ -315,7 +315,7 @@ vm([]) -> vm(["all"]) -> [vm([Name]) || Name <- ["load", "memory", "process", "io", "ports"]]; vm(["load"]) -> - [emqx_ctl:print("cpu/~-20s: ~ts~n", [L, V]) || {L, V} <- emqx_vm:loads()]; + [emqx_ctl:print("cpu/~-20s: ~w~n", [L, V]) || {L, V} <- emqx_vm:loads()]; vm(["memory"]) -> [emqx_ctl:print("memory/~-17s: ~w~n", [Cat, Val]) || {Cat, Val} <- erlang:memory()]; vm(["process"]) -> @@ -356,7 +356,7 @@ mnesia(_) -> %% @doc Logger Command log(["set-level", Level]) -> - case emqx_misc:safe_to_existing_atom(Level) of + case emqx_utils:safe_to_existing_atom(Level) of {ok, Level1} -> case emqx_logger:set_log_level(Level1) of ok -> emqx_ctl:print("~ts~n", [Level]); @@ -369,7 +369,7 @@ log(["primary-level"]) -> Level = emqx_logger:get_primary_log_level(), emqx_ctl:print("~ts~n", [Level]); log(["primary-level", Level]) -> - case emqx_misc:safe_to_existing_atom(Level) of + case emqx_utils:safe_to_existing_atom(Level) of {ok, Level1} -> _ = emqx_logger:set_primary_log_level(Level1), ok; @@ -392,7 +392,7 @@ log(["handlers", "list"]) -> ], ok; log(["handlers", "start", HandlerId]) -> - case emqx_misc:safe_to_existing_atom(HandlerId) of + case emqx_utils:safe_to_existing_atom(HandlerId) of {ok, HandlerId1} -> case emqx_logger:start_log_handler(HandlerId1) of ok -> @@ -406,7 +406,7 @@ log(["handlers", "start", HandlerId]) -> emqx_ctl:print("[error] invalid handler:~ts~n", [HandlerId]) end; log(["handlers", "stop", HandlerId]) -> - case emqx_misc:safe_to_existing_atom(HandlerId) of + case emqx_utils:safe_to_existing_atom(HandlerId) of {ok, HandlerId1} -> case emqx_logger:stop_log_handler(HandlerId1) of ok -> @@ -420,9 +420,9 @@ log(["handlers", "stop", HandlerId]) -> emqx_ctl:print("[error] invalid handler:~ts~n", [HandlerId]) end; log(["handlers", "set-level", HandlerId, Level]) -> - case emqx_misc:safe_to_existing_atom(HandlerId) of + case emqx_utils:safe_to_existing_atom(HandlerId) of {ok, HandlerId1} -> - case emqx_misc:safe_to_existing_atom(Level) of + case emqx_utils:safe_to_existing_atom(Level) of {ok, Level1} -> case emqx_logger:set_log_handler_level(HandlerId1, Level1) of ok -> @@ -615,20 +615,25 @@ listeners([]) -> {error, _} -> []; MC -> [{max_conns, MC}] end, + ShutdownCount = + case emqx_listeners:shutdown_count(ID, Bind) of + {error, _} -> []; + SC -> [{shutdown_count, SC}] + end, Info = [ {listen_on, {string, emqx_listeners:format_bind(Bind)}}, {acceptors, Acceptors}, {proxy_protocol, ProxyProtocol}, {running, Running} - ] ++ CurrentConns ++ MaxConn, + ] ++ CurrentConns ++ MaxConn ++ ShutdownCount, emqx_ctl:print("~ts~n", [ID]), lists:foreach(fun indent_print/1, Info) end, emqx_listeners:list() ); listeners(["stop", ListenerId]) -> - case emqx_misc:safe_to_existing_atom(ListenerId) of + case emqx_utils:safe_to_existing_atom(ListenerId) of {ok, ListenerId1} -> case emqx_listeners:stop_listener(ListenerId1) of ok -> @@ -640,7 +645,7 @@ listeners(["stop", ListenerId]) -> emqx_ctl:print("Invalid listener: ~0p~n", [ListenerId]) end; listeners(["start", ListenerId]) -> - case emqx_misc:safe_to_existing_atom(ListenerId) of + case emqx_utils:safe_to_existing_atom(ListenerId) of {ok, ListenerId1} -> case emqx_listeners:start_listener(ListenerId1) of ok -> @@ -652,7 +657,7 @@ listeners(["start", ListenerId]) -> emqx_ctl:print("Invalid listener: ~0p~n", [ListenerId]) end; listeners(["restart", ListenerId]) -> - case emqx_misc:safe_to_existing_atom(ListenerId) of + case emqx_utils:safe_to_existing_atom(ListenerId) of {ok, ListenerId1} -> case emqx_listeners:restart_listener(ListenerId1) of ok -> @@ -829,7 +834,7 @@ print({emqx_topic, #route{topic = Topic, dest = {_, Node}}}) -> emqx_ctl:print("~ts -> ~ts~n", [Topic, Node]); print({emqx_topic, #route{topic = Topic, dest = Node}}) -> emqx_ctl:print("~ts -> ~ts~n", [Topic, Node]); -print({emqx_suboption, {{Pid, Topic}, Options}}) when is_pid(Pid) -> +print({emqx_suboption, {{Topic, Pid}, Options}}) when is_pid(Pid) -> SubId = maps:get(subid, Options), QoS = maps:get(qos, Options, 0), NL = maps:get(nl, Options, 0), diff --git a/apps/emqx_management/src/emqx_mgmt_sup.erl b/apps/emqx_management/src/emqx_mgmt_sup.erl index 329532fa1..713ff87dc 100644 --- a/apps/emqx_management/src/emqx_mgmt_sup.erl +++ b/apps/emqx_management/src/emqx_mgmt_sup.erl @@ -26,4 +26,21 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - {ok, {{one_for_one, 1, 5}, []}}. + Workers = + case os:type() of + {unix, linux} -> + [child_spec(emqx_mgmt_cache, 5000, worker)]; + _ -> + [] + end, + {ok, {{one_for_one, 1, 5}, Workers}}. + +child_spec(Mod, Shutdown, Type) -> + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => permanent, + shutdown => Shutdown, + type => Type, + modules => [Mod] + }. diff --git a/apps/emqx_management/src/emqx_mgmt_util.erl b/apps/emqx_management/src/emqx_mgmt_util.erl index c0d9e6036..b81b39b07 100644 --- a/apps/emqx_management/src/emqx_mgmt_util.erl +++ b/apps/emqx_management/src/emqx_mgmt_util.erl @@ -302,7 +302,7 @@ page_params() -> name => limit, in => query, description => <<"Page size">>, - schema => #{type => integer, default => emqx_mgmt:max_row_limit()} + schema => #{type => integer, default => emqx_mgmt:default_row_limit()} } ]. diff --git a/apps/emqx_management/test/emqx_mgmt_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_SUITE.erl new file mode 100644 index 000000000..3eb37060e --- /dev/null +++ b/apps/emqx_management/test/emqx_mgmt_SUITE.erl @@ -0,0 +1,387 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_mgmt_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-export([ident/1]). + +-define(FORMATFUN, {?MODULE, ident}). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_management]), + Config. + +end_per_suite(_) -> + emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]). + +init_per_testcase(TestCase, Config) -> + meck:expect(emqx, running_nodes, 0, [node()]), + emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config). + +end_per_testcase(TestCase, Config) -> + meck:unload(emqx), + emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config). + +t_list_nodes(init, Config) -> + meck:expect( + emqx, + cluster_nodes, + fun + (running) -> [node()]; + (stopped) -> ['stopped@node'] + end + ), + Config; +t_list_nodes('end', _Config) -> + ok. + +t_list_nodes(_) -> + NodeInfos = emqx_mgmt:list_nodes(), + Node = node(), + ?assertMatch( + [ + {Node, #{node := Node, node_status := 'running'}}, + {'stopped@node', #{node := 'stopped@node', node_status := 'stopped'}} + ], + NodeInfos + ). + +t_lookup_node(init, Config) -> + meck:new(os, [passthrough, unstick, no_link]), + OsType = os:type(), + meck:expect(os, type, 0, {win32, winME}), + [{os_type, OsType} | Config]; +t_lookup_node('end', Config) -> + %% We need to restore the original behavior so that rebar3 doesn't crash. If + %% we'd `meck:unload(os)` or not set `no_link` then `ct` crashes calling + %% `os` with "The code server called the unloaded module `os'". + OsType = ?config(os_type, Config), + meck:expect(os, type, 0, OsType), + ok. + +t_lookup_node(_) -> + Node = node(), + ?assertMatch( + #{node := Node, node_status := 'running', memory_total := 0}, + emqx_mgmt:lookup_node(node()) + ), + ?assertMatch( + {error, _}, + emqx_mgmt:lookup_node('fake@nohost') + ), + ok. + +t_list_brokers(_) -> + Node = node(), + ?assertMatch( + [{Node, #{node := Node, node_status := running, uptime := _}}], + emqx_mgmt:list_brokers() + ). + +t_lookup_broker(_) -> + Node = node(), + ?assertMatch( + #{node := Node, node_status := running, uptime := _}, + emqx_mgmt:lookup_broker(Node) + ). + +t_get_metrics(_) -> + Metrics = emqx_mgmt:get_metrics(), + ?assert(maps:size(Metrics) > 0), + ?assertMatch( + Metrics, maps:from_list(emqx_mgmt:get_metrics(node())) + ). + +t_lookup_client(init, Config) -> + setup_clients(Config); +t_lookup_client('end', Config) -> + disconnect_clients(Config). + +t_lookup_client(_Config) -> + [{Chan, Info, Stats}] = emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN), + ?assertEqual( + [{Chan, Info, Stats}], + emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN) + ), + ?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']), + ?assertMatch( + [_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN) + ). + +t_kickout_client(init, Config) -> + process_flag(trap_exit, true), + setup_clients(Config); +t_kickout_client('end', _Config) -> + ok. + +t_kickout_client(Config) -> + [C | _] = ?config(clients, Config), + ok = emqx_mgmt:kickout_client(<<"client1">>), + receive + {'EXIT', C, Reason} -> + ?assertEqual({shutdown, tcp_closed}, Reason); + Foo -> + error({unexpected, Foo}) + after 1000 -> + error(timeout) + end, + ?assertEqual({error, not_found}, emqx_mgmt:kickout_client(<<"notfound">>)). + +t_list_authz_cache(init, Config) -> + setup_clients(Config); +t_list_authz_cache('end', Config) -> + disconnect_clients(Config). + +t_list_authz_cache(_) -> + ?assertNotMatch({error, _}, emqx_mgmt:list_authz_cache(<<"client1">>)), + ?assertMatch({error, not_found}, emqx_mgmt:list_authz_cache(<<"notfound">>)). + +t_list_client_subscriptions(init, Config) -> + setup_clients(Config); +t_list_client_subscriptions('end', Config) -> + disconnect_clients(Config). + +t_list_client_subscriptions(Config) -> + [Client | _] = ?config(clients, Config), + ?assertEqual([], emqx_mgmt:list_client_subscriptions(<<"client1">>)), + emqtt:subscribe(Client, <<"t/#">>), + ?assertMatch({_, [{<<"t/#">>, _Opts}]}, emqx_mgmt:list_client_subscriptions(<<"client1">>)), + ?assertEqual({error, not_found}, emqx_mgmt:list_client_subscriptions(<<"notfound">>)). + +t_clean_cache(init, Config) -> + setup_clients(Config); +t_clean_cache('end', Config) -> + disconnect_clients(Config). + +t_clean_cache(_Config) -> + ?assertNotMatch( + {error, _}, + emqx_mgmt:clean_authz_cache(<<"client1">>) + ), + ?assertNotMatch( + {error, _}, + emqx_mgmt:clean_authz_cache_all() + ), + ?assertNotMatch( + {error, _}, + emqx_mgmt:clean_pem_cache_all() + ), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']), + ?assertMatch( + {error, [{'fake@nonode', {error, _}}]}, + emqx_mgmt:clean_authz_cache_all() + ), + ?assertMatch( + {error, [{'fake@nonode', {error, _}}]}, + emqx_mgmt:clean_pem_cache_all() + ). + +t_set_client_props(init, Config) -> + setup_clients(Config); +t_set_client_props('end', Config) -> + disconnect_clients(Config). + +t_set_client_props(_Config) -> + ?assertEqual( + % [FIXME] not implemented at this point? + ignored, + emqx_mgmt:set_ratelimit_policy(<<"client1">>, foo) + ), + ?assertEqual( + {error, not_found}, + emqx_mgmt:set_ratelimit_policy(<<"notfound">>, foo) + ), + ?assertEqual( + % [FIXME] not implemented at this point? + ignored, + emqx_mgmt:set_quota_policy(<<"client1">>, foo) + ), + ?assertEqual( + {error, not_found}, + emqx_mgmt:set_quota_policy(<<"notfound">>, foo) + ), + ?assertEqual( + ok, + emqx_mgmt:set_keepalive(<<"client1">>, 3600) + ), + ?assertMatch( + {error, _}, + emqx_mgmt:set_keepalive(<<"client1">>, true) + ), + ?assertEqual( + {error, not_found}, + emqx_mgmt:set_keepalive(<<"notfound">>, 3600) + ), + ok. + +t_list_subscriptions_via_topic(init, Config) -> + setup_clients(Config); +t_list_subscriptions_via_topic('end', Config) -> + disconnect_clients(Config). + +t_list_subscriptions_via_topic(Config) -> + [Client | _] = ?config(clients, Config), + ?assertEqual([], emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN)), + emqtt:subscribe(Client, <<"t/#">>), + ?assertMatch( + [{{<<"t/#">>, _SubPid}, _Opts}], + emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN) + ). + +t_pubsub_api(init, Config) -> + setup_clients(Config); +t_pubsub_api('end', Config) -> + disconnect_clients(Config). + +-define(TT(Topic), {Topic, #{qos => 0}}). + +t_pubsub_api(Config) -> + [Client | _] = ?config(clients, Config), + ?assertEqual([], emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN)), + ?assertMatch( + {subscribe, _, _}, + emqx_mgmt:subscribe(<<"client1">>, [?TT(<<"t/#">>), ?TT(<<"t1/#">>), ?TT(<<"t2/#">>)]) + ), + timer:sleep(100), + ?assertMatch( + [{{<<"t/#">>, _SubPid}, _Opts}], + emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN) + ), + Message = emqx_message:make(?MODULE, 0, <<"t/foo">>, <<"helloworld">>, #{}, #{}), + emqx_mgmt:publish(Message), + Recv = + receive + {publish, #{client_pid := Client, payload := <<"helloworld">>}} -> + ok + after 100 -> + timeout + end, + ?assertEqual(ok, Recv), + ?assertEqual({error, channel_not_found}, emqx_mgmt:subscribe(<<"notfound">>, [?TT(<<"t/#">>)])), + ?assertNotMatch({error, _}, emqx_mgmt:unsubscribe(<<"client1">>, <<"t/#">>)), + ?assertEqual({error, channel_not_found}, emqx_mgmt:unsubscribe(<<"notfound">>, <<"t/#">>)), + Node = node(), + ?assertMatch( + {Node, [{<<"t1/#">>, _}, {<<"t2/#">>, _}]}, + emqx_mgmt:list_client_subscriptions(<<"client1">>) + ), + ?assertMatch( + {unsubscribe, [{<<"t1/#">>, _}, {<<"t2/#">>, _}]}, + emqx_mgmt:unsubscribe_batch(<<"client1">>, [<<"t1/#">>, <<"t2/#">>]) + ), + timer:sleep(100), + ?assertMatch([], emqx_mgmt:list_client_subscriptions(<<"client1">>)), + ?assertEqual( + {error, channel_not_found}, + emqx_mgmt:unsubscribe_batch(<<"notfound">>, [<<"t1/#">>, <<"t2/#">>]) + ). + +t_alarms(init, Config) -> + [ + emqx_mgmt:deactivate(Node, Name) + || {Node, ActiveAlarms} <- emqx_mgmt:get_alarms(activated), #{name := Name} <- ActiveAlarms + ], + emqx_mgmt:delete_all_deactivated_alarms(), + Config; +t_alarms('end', Config) -> + Config. + +t_alarms(_) -> + Node = node(), + ?assertEqual( + [{node(), []}], + emqx_mgmt:get_alarms(all) + ), + emqx_alarm:activate(foo), + ?assertMatch( + [{Node, [#{name := foo, activated := true, duration := _}]}], + emqx_mgmt:get_alarms(all) + ), + emqx_alarm:activate(bar), + ?assertMatch( + [{Node, [#{name := foo, activated := true}, #{name := bar, activated := true}]}], + sort_alarms(emqx_mgmt:get_alarms(all)) + ), + ?assertEqual( + ok, + emqx_mgmt:deactivate(node(), bar) + ), + ?assertMatch( + [{Node, [#{name := foo, activated := true}, #{name := bar, activated := false}]}], + sort_alarms(emqx_mgmt:get_alarms(all)) + ), + ?assertMatch( + [{Node, [#{name := foo, activated := true}]}], + emqx_mgmt:get_alarms(activated) + ), + ?assertMatch( + [{Node, [#{name := bar, activated := false}]}], + emqx_mgmt:get_alarms(deactivated) + ), + ?assertEqual( + [ok], + emqx_mgmt:delete_all_deactivated_alarms() + ), + ?assertMatch( + [{Node, [#{name := foo, activated := true}]}], + emqx_mgmt:get_alarms(all) + ), + ?assertEqual( + {error, not_found}, + emqx_mgmt:deactivate(node(), bar) + ). + +t_banned(_) -> + Banned = #{ + who => {clientid, <<"TestClient">>}, + by => <<"banned suite">>, + reason => <<"test">>, + at => erlang:system_time(second), + until => erlang:system_time(second) + 1 + }, + ?assertMatch( + {ok, _}, + emqx_mgmt:create_banned(Banned) + ), + ?assertEqual( + ok, + emqx_mgmt:delete_banned({clientid, <<"TestClient">>}) + ). + +%%% helpers +ident(Arg) -> + Arg. + +sort_alarms([{Node, Alarms}]) -> + [{Node, lists:sort(fun(#{activate_at := A}, #{activate_at := B}) -> A < B end, Alarms)}]. + +setup_clients(Config) -> + {ok, C} = emqtt:start_link([{clientid, <<"client1">>}, {username, <<"user1">>}]), + {ok, _} = emqtt:connect(C), + [{clients, [C]} | Config]. + +disconnect_clients(Config) -> + Clients = ?config(clients, Config), + lists:foreach(fun emqtt:disconnect/1, Clients). diff --git a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl index a14305d8b..a53ffc9c4 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl @@ -67,7 +67,7 @@ t_cluster_query(_Config) -> %% assert: AllPage = Page1 + Page2 + Page3 + Page4 %% !!!Note: this equation requires that the queried tables must be ordered_set - {200, ClientsPage2} = query_clients(Node1, #{<<"page">> => 2, <<"limit">> => 5}), + {200, ClientsPage2} = query_clients(Node1, #{<<"page">> => <<"2">>, <<"limit">> => 5}), {200, ClientsPage3} = query_clients(Node2, #{<<"page">> => 3, <<"limit">> => 5}), {200, ClientsPage4} = query_clients(Node1, #{<<"page">> => 4, <<"limit">> => 5}), GetClientIds = fun(L) -> lists:map(fun(#{clientid := Id}) -> Id end, L) end, @@ -79,6 +79,78 @@ t_cluster_query(_Config) -> ) ), + %% Scroll past count + {200, ClientsPage10} = query_clients(Node1, #{<<"page">> => <<"10">>, <<"limit">> => 5}), + ?assertEqual( + #{data => [], meta => #{page => 10, limit => 5, count => 20, hasnext => false}}, + ClientsPage10 + ), + + %% Node queries + {200, ClientsNode2} = query_clients(Node1, #{<<"node">> => Node2}), + ?assertEqual({200, ClientsNode2}, query_clients(Node2, #{<<"node">> => Node2})), + ?assertMatch( + #{page := 1, limit := 100, count := 10}, + maps:get(meta, ClientsNode2) + ), + ?assertMatch(10, length(maps:get(data, ClientsNode2))), + + {200, ClientsNode2Page1} = query_clients(Node2, #{<<"node">> => Node2, <<"limit">> => 5}), + {200, ClientsNode2Page2} = query_clients(Node1, #{ + <<"node">> => Node2, <<"page">> => <<"2">>, <<"limit">> => 5 + }), + {200, ClientsNode2Page3} = query_clients(Node2, #{ + <<"node">> => Node2, <<"page">> => 3, <<"limit">> => 5 + }), + {200, ClientsNode2Page4} = query_clients(Node1, #{ + <<"node">> => Node2, <<"page">> => 4, <<"limit">> => 5 + }), + ?assertEqual( + GetClientIds(maps:get(data, ClientsNode2)), + GetClientIds( + lists:append([ + maps:get(data, Page) + || Page <- [ + ClientsNode2Page1, + ClientsNode2Page2, + ClientsNode2Page3, + ClientsNode2Page4 + ] + ]) + ) + ), + + %% Scroll past count + {200, ClientsNode2Page10} = query_clients(Node1, #{ + <<"node">> => Node2, <<"page">> => <<"10">>, <<"limit">> => 5 + }), + ?assertEqual( + #{data => [], meta => #{page => 10, limit => 5, count => 10, hasnext => false}}, + ClientsNode2Page10 + ), + + %% Query with bad params + ?assertEqual( + {400, #{ + code => <<"INVALID_PARAMETER">>, + message => <<"page_limit_invalid">> + }}, + query_clients(Node1, #{<<"page">> => -1}) + ), + ?assertEqual( + {400, #{ + code => <<"INVALID_PARAMETER">>, + message => <<"page_limit_invalid">> + }}, + query_clients(Node1, #{<<"node">> => Node1, <<"page">> => -1}) + ), + + %% Query bad node + ?assertMatch( + {500, #{code := <<"NODE_DOWN">>}}, + query_clients(Node1, #{<<"node">> => 'nonode@nohost'}) + ), + %% exact match can return non-zero total {200, ClientsNode1} = query_clients(Node2, #{<<"username">> => <<"corenode1@127.0.0.1">>}), ?assertMatch( @@ -87,12 +159,11 @@ t_cluster_query(_Config) -> ), %% fuzzy searching can't return total - {200, ClientsNode2} = query_clients(Node2, #{<<"like_username">> => <<"corenode2">>}), - ?assertMatch( - #{count := 0}, - maps:get(meta, ClientsNode2) - ), - ?assertMatch(10, length(maps:get(data, ClientsNode2))), + {200, ClientsFuzzyNode2} = query_clients(Node2, #{<<"like_username">> => <<"corenode2">>}), + MetaNode2 = maps:get(meta, ClientsFuzzyNode2), + ?assertNotMatch(#{count := _}, MetaNode2), + ?assertMatch(#{hasnext := false}, MetaNode2), + ?assertMatch(10, length(maps:get(data, ClientsFuzzyNode2))), _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1), _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs2) @@ -102,6 +173,23 @@ t_cluster_query(_Config) -> end, ok. +t_bad_rpc(_) -> + emqx_mgmt_api_test_util:init_suite(), + process_flag(trap_exit, true), + ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)], + Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]), + try + meck:expect(emqx, running_nodes, 0, ['fake@nohost']), + {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path), + %% good cop, bad cop + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nohost']), + {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path) + after + _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1), + meck:unload(emqx), + emqx_mgmt_api_test_util:end_suite() + end. + %%-------------------------------------------------------------------- %% helpers %%-------------------------------------------------------------------- diff --git a/apps/emqx_management/test/emqx_mgmt_api_alarms_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_alarms_SUITE.erl index adff41214..dc7be7671 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_alarms_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_alarms_SUITE.erl @@ -40,6 +40,9 @@ t_alarms_api(_) -> get_alarms(1, true), get_alarms(1, false). +t_alarm_cpu(_) -> + ok. + t_delete_alarms_api(_) -> Path = emqx_mgmt_api_test_util:api_path(["alarms"]), {ok, _} = emqx_mgmt_api_test_util:request_api(delete, Path), @@ -53,11 +56,11 @@ get_alarms(AssertCount, Activated) -> Qs = "activated=" ++ Activated, Headers = emqx_mgmt_api_test_util:auth_header_(), {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, Qs, Headers), - Data = emqx_json:decode(Response, [return_maps]), + Data = emqx_utils_json:decode(Response, [return_maps]), Meta = maps:get(<<"meta">>, Data), Page = maps:get(<<"page">>, Meta), Limit = maps:get(<<"limit">>, Meta), Count = maps:get(<<"count">>, Meta), ?assertEqual(Page, 1), - ?assertEqual(Limit, emqx_mgmt:max_row_limit()), + ?assertEqual(Limit, emqx_mgmt:default_row_limit()), ?assert(Count >= AssertCount). diff --git a/apps/emqx_management/test/emqx_mgmt_api_app_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_api_keys_SUITE.erl similarity index 68% rename from apps/emqx_management/test/emqx_mgmt_api_app_SUITE.erl rename to apps/emqx_management/test/emqx_mgmt_api_api_keys_SUITE.erl index a3aaf8f58..1a396d795 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_app_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_api_keys_SUITE.erl @@ -13,7 +13,7 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_mgmt_api_app_SUITE). +-module(emqx_mgmt_api_api_keys_SUITE). -compile(export_all). -compile(nowarn_export_all). @@ -25,15 +25,62 @@ suite() -> [{timetrap, {minutes, 1}}]. groups() -> [ {parallel, [parallel], [t_create, t_update, t_delete, t_authorize, t_create_unexpired_app]}, - {sequence, [], [t_create_failed]} + {sequence, [], [t_bootstrap_file, t_create_failed]} ]. init_per_suite(Config) -> - emqx_mgmt_api_test_util:init_suite(), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. end_per_suite(_) -> - emqx_mgmt_api_test_util:end_suite(). + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_bootstrap_file(_) -> + TestPath = <<"/api/v5/status">>, + Bin = <<"test-1:secret-1\ntest-2:secret-2">>, + File = "./bootstrap_api_keys.txt", + ok = file:write_file(File, Bin), + emqx:update_config([api_key, bootstrap_file], File), + ok = emqx_mgmt_auth:init_bootstrap_file(), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"secret-1">>)), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"secret-2">>)), + ?assertMatch({error, _}, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"secret-1">>)), + + %% relaunch to check if the table is changed. + Bin1 = <<"test-1:new-secret-1\ntest-2:new-secret-2">>, + ok = file:write_file(File, Bin1), + ok = emqx_mgmt_auth:init_bootstrap_file(), + ?assertMatch({error, _}, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"secret-1">>)), + ?assertMatch({error, _}, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"secret-2">>)), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"new-secret-1">>)), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"new-secret-2">>)), + + %% Compatibility + Bin2 = <<"test-3:new-secret-3\ntest-4:new-secret-4">>, + ok = file:write_file(File, Bin2), + emqx:update_config([api_key, bootstrap_file], <<>>), + emqx:update_config([dashboard, bootstrap_users_file], File), + ok = emqx_mgmt_auth:init_bootstrap_file(), + ?assertMatch(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"new-secret-1">>)), + ?assertMatch(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"new-secret-2">>)), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-3">>, <<"new-secret-3">>)), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-4">>, <<"new-secret-4">>)), + + %% not found + NotFoundFile = "./bootstrap_apps_not_exist.txt", + emqx:update_config([api_key, bootstrap_file], NotFoundFile), + ?assertMatch({error, "No such file or directory"}, emqx_mgmt_auth:init_bootstrap_file()), + + %% bad format + BadBin = <<"test-1:secret-11\ntest-2 secret-12">>, + ok = file:write_file(File, BadBin), + emqx:update_config([api_key, bootstrap_file], File), + ?assertMatch({error, #{reason := "invalid_format"}}, emqx_mgmt_auth:init_bootstrap_file()), + ?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"secret-11">>)), + ?assertMatch({error, _}, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"secret-12">>)), + emqx:update_config([api_key, bootstrap_file], <<>>), + emqx:update_config([dashboard, bootstrap_users_file], <<>>), + ok. t_create(_Config) -> Name = <<"EMQX-API-KEY-1">>, @@ -69,7 +116,7 @@ t_create_failed(_Config) -> ?assertEqual(BadRequest, create_app(LongName)), {ok, List} = list_app(), - CreateNum = 30 - erlang:length(List), + CreateNum = 100 - erlang:length(List), Names = lists:map( fun(Seq) -> <<"EMQX-API-FAILED-KEY-", (integer_to_binary(Seq))/binary>> @@ -178,21 +225,23 @@ t_create_unexpired_app(_Config) -> ok. list_app() -> + AuthHeader = emqx_dashboard_SUITE:auth_header_(), Path = emqx_mgmt_api_test_util:api_path(["api_key"]), - case emqx_mgmt_api_test_util:request_api(get, Path) of - {ok, Apps} -> {ok, emqx_json:decode(Apps, [return_maps])}; + case emqx_mgmt_api_test_util:request_api(get, Path, AuthHeader) of + {ok, Apps} -> {ok, emqx_utils_json:decode(Apps, [return_maps])}; Error -> Error end. read_app(Name) -> + AuthHeader = emqx_dashboard_SUITE:auth_header_(), Path = emqx_mgmt_api_test_util:api_path(["api_key", Name]), - case emqx_mgmt_api_test_util:request_api(get, Path) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + case emqx_mgmt_api_test_util:request_api(get, Path, AuthHeader) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. create_app(Name) -> - AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + AuthHeader = emqx_dashboard_SUITE:auth_header_(), Path = emqx_mgmt_api_test_util:api_path(["api_key"]), ExpiredAt = to_rfc3339(erlang:system_time(second) + 1000), App = #{ @@ -202,28 +251,29 @@ create_app(Name) -> enable => true }, case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, App) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. create_unexpired_app(Name, Params) -> - AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + AuthHeader = emqx_dashboard_SUITE:auth_header_(), Path = emqx_mgmt_api_test_util:api_path(["api_key"]), App = maps:merge(#{name => Name, desc => <<"Note"/utf8>>, enable => true}, Params), case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, App) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. delete_app(Name) -> + AuthHeader = emqx_dashboard_SUITE:auth_header_(), DeletePath = emqx_mgmt_api_test_util:api_path(["api_key", Name]), - emqx_mgmt_api_test_util:request_api(delete, DeletePath). + emqx_mgmt_api_test_util:request_api(delete, DeletePath, AuthHeader). update_app(Name, Change) -> - AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + AuthHeader = emqx_dashboard_SUITE:auth_header_(), UpdatePath = emqx_mgmt_api_test_util:api_path(["api_key", Name]), case emqx_mgmt_api_test_util:request_api(put, UpdatePath, "", AuthHeader, Change) of - {ok, Update} -> {ok, emqx_json:decode(Update, [return_maps])}; + {ok, Update} -> {ok, emqx_utils_json:decode(Update, [return_maps])}; Error -> Error end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_banned_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_banned_SUITE.erl index c765f00bc..9f1b560f7 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_banned_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_banned_SUITE.erl @@ -160,7 +160,7 @@ t_delete(_Config) -> list_banned() -> Path = emqx_mgmt_api_test_util:api_path(["banned"]), case emqx_mgmt_api_test_util:request_api(get, Path) of - {ok, Apps} -> {ok, emqx_json:decode(Apps, [return_maps])}; + {ok, Apps} -> {ok, emqx_utils_json:decode(Apps, [return_maps])}; Error -> Error end. @@ -168,7 +168,7 @@ create_banned(Banned) -> AuthHeader = emqx_mgmt_api_test_util:auth_header_(), Path = emqx_mgmt_api_test_util:api_path(["banned"]), case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Banned) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_clients_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_clients_SUITE.erl index 8dd1dabfc..6d7733b22 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_clients_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_clients_SUITE.erl @@ -44,7 +44,10 @@ t_clients(_) -> AuthHeader = emqx_mgmt_api_test_util:auth_header_(), {ok, C1} = emqtt:start_link(#{ - username => Username1, clientid => ClientId1, proto_ver => v5 + username => Username1, + clientid => ClientId1, + proto_ver => v5, + properties => #{'Session-Expiry-Interval' => 120} }), {ok, _} = emqtt:connect(C1), {ok, C2} = emqtt:start_link(#{username => Username2, clientid => ClientId2}), @@ -55,26 +58,34 @@ t_clients(_) -> %% get /clients ClientsPath = emqx_mgmt_api_test_util:api_path(["clients"]), {ok, Clients} = emqx_mgmt_api_test_util:request_api(get, ClientsPath), - ClientsResponse = emqx_json:decode(Clients, [return_maps]), + ClientsResponse = emqx_utils_json:decode(Clients, [return_maps]), ClientsMeta = maps:get(<<"meta">>, ClientsResponse), ClientsPage = maps:get(<<"page">>, ClientsMeta), ClientsLimit = maps:get(<<"limit">>, ClientsMeta), ClientsCount = maps:get(<<"count">>, ClientsMeta), ?assertEqual(ClientsPage, 1), - ?assertEqual(ClientsLimit, emqx_mgmt:max_row_limit()), + ?assertEqual(ClientsLimit, emqx_mgmt:default_row_limit()), ?assertEqual(ClientsCount, 2), %% get /clients/:clientid Client1Path = emqx_mgmt_api_test_util:api_path(["clients", binary_to_list(ClientId1)]), {ok, Client1} = emqx_mgmt_api_test_util:request_api(get, Client1Path), - Client1Response = emqx_json:decode(Client1, [return_maps]), + Client1Response = emqx_utils_json:decode(Client1, [return_maps]), ?assertEqual(Username1, maps:get(<<"username">>, Client1Response)), ?assertEqual(ClientId1, maps:get(<<"clientid">>, Client1Response)), + ?assertEqual(120, maps:get(<<"expiry_interval">>, Client1Response)), %% delete /clients/:clientid kickout Client2Path = emqx_mgmt_api_test_util:api_path(["clients", binary_to_list(ClientId2)]), {ok, _} = emqx_mgmt_api_test_util:request_api(delete, Client2Path), - timer:sleep(300), + Kick = + receive + {'EXIT', C2, _} -> + ok + after 300 -> + timeout + end, + ?assertEqual(ok, Kick), AfterKickoutResponse2 = emqx_mgmt_api_test_util:request_api(get, Client2Path), ?assertEqual({error, {"HTTP/1.1", 404, "Not Found"}}, AfterKickoutResponse2), @@ -103,7 +114,7 @@ t_clients(_) -> SubscribeBody ), timer:sleep(100), - [{AfterSubTopic, #{qos := AfterSubQos}}] = emqx_mgmt:lookup_subscriptions(ClientId1), + {_, [{AfterSubTopic, #{qos := AfterSubQos}}]} = emqx_mgmt:list_client_subscriptions(ClientId1), ?assertEqual(AfterSubTopic, Topic), ?assertEqual(AfterSubQos, Qos), @@ -119,7 +130,7 @@ t_clients(_) -> "", AuthHeader ), - [SubscriptionsData] = emqx_json:decode(SubscriptionsRes, [return_maps]), + [SubscriptionsData] = emqx_utils_json:decode(SubscriptionsRes, [return_maps]), ?assertMatch( #{ <<"clientid">> := ClientId1, @@ -148,7 +159,7 @@ t_clients(_) -> UnSubscribeBody ), timer:sleep(100), - ?assertEqual([], emqx_mgmt:lookup_subscriptions(Client1)), + ?assertEqual([], emqx_mgmt:list_client_subscriptions(ClientId1)), %% testcase cleanup, kickout client1 {ok, _} = emqx_mgmt_api_test_util:request_api(delete, Client1Path), @@ -199,7 +210,7 @@ t_query_clients_with_time(_) -> GteParamRfc3339 ++ GteParamStamp ], DecodedResults = [ - emqx_json:decode(Response, [return_maps]) + emqx_utils_json:decode(Response, [return_maps]) || {ok, Response} <- RequestResults ], {LteResponseDecodeds, GteResponseDecodeds} = lists:split(4, DecodedResults), @@ -236,13 +247,56 @@ t_keepalive(_Config) -> {ok, C1} = emqtt:start_link(#{username => Username, clientid => ClientId}), {ok, _} = emqtt:connect(C1), {ok, NewClient} = emqx_mgmt_api_test_util:request_api(put, Path, <<"">>, AuthHeader, Body), - #{<<"keepalive">> := 11} = emqx_json:decode(NewClient, [return_maps]), + #{<<"keepalive">> := 11} = emqx_utils_json:decode(NewClient, [return_maps]), [Pid] = emqx_cm:lookup_channels(list_to_binary(ClientId)), #{conninfo := #{keepalive := Keepalive}} = emqx_connection:info(Pid), ?assertEqual(11, Keepalive), emqtt:disconnect(C1), ok. +t_client_id_not_found(_Config) -> + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Http = {"HTTP/1.1", 404, "Not Found"}, + Body = "{\"code\":\"CLIENTID_NOT_FOUND\",\"message\":\"Client ID not found\"}", + + PathFun = fun(Suffix) -> + emqx_mgmt_api_test_util:api_path(["clients", "no_existed_clientid"] ++ Suffix) + end, + ReqFun = fun(Method, Path) -> + emqx_mgmt_api_test_util:request_api( + Method, Path, "", AuthHeader, [], #{return_all => true} + ) + end, + + PostFun = fun(Method, Path, Data) -> + emqx_mgmt_api_test_util:request_api( + Method, Path, "", AuthHeader, Data, #{return_all => true} + ) + end, + + %% Client lookup + ?assertMatch({error, {Http, _, Body}}, ReqFun(get, PathFun([]))), + %% Client kickout + ?assertMatch({error, {Http, _, Body}}, ReqFun(delete, PathFun([]))), + %% Client Subscription list + ?assertMatch({error, {Http, _, Body}}, ReqFun(get, PathFun(["subscriptions"]))), + %% AuthZ Cache lookup + ?assertMatch({error, {Http, _, Body}}, ReqFun(get, PathFun(["authorization", "cache"]))), + %% AuthZ Cache clean + ?assertMatch({error, {Http, _, Body}}, ReqFun(delete, PathFun(["authorization", "cache"]))), + %% Client Subscribe + SubBody = #{topic => <<"testtopic">>, qos => 1, nl => 1, rh => 1}, + ?assertMatch({error, {Http, _, Body}}, PostFun(post, PathFun(["subscribe"]), SubBody)), + ?assertMatch( + {error, {Http, _, Body}}, PostFun(post, PathFun(["subscribe", "bulk"]), [SubBody]) + ), + %% Client Unsubscribe + UnsubBody = #{topic => <<"testtopic">>}, + ?assertMatch({error, {Http, _, Body}}, PostFun(post, PathFun(["unsubscribe"]), UnsubBody)), + ?assertMatch( + {error, {Http, _, Body}}, PostFun(post, PathFun(["unsubscribe", "bulk"]), [UnsubBody]) + ). + time_string_to_epoch_millisecond(DateTime) -> time_string_to_epoch(DateTime, millisecond). diff --git a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl index d26f4480b..34b8ccd8f 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl @@ -55,15 +55,20 @@ t_update(_Config) -> %% update ok {ok, SysMon} = get_config(<<"sysmon">>), #{<<"vm">> := #{<<"busy_port">> := BusyPort}} = SysMon, - NewSysMon = emqx_map_lib:deep_put([<<"vm">>, <<"busy_port">>], SysMon, not BusyPort), + NewSysMon = #{<<"vm">> => #{<<"busy_port">> => not BusyPort}}, {ok, #{}} = update_config(<<"sysmon">>, NewSysMon), {ok, SysMon1} = get_config(<<"sysmon">>), #{<<"vm">> := #{<<"busy_port">> := BusyPort1}} = SysMon1, ?assertEqual(BusyPort, not BusyPort1), assert_busy_port(BusyPort1), + %% Make sure the override config is updated, and remove the default value. + ?assertMatch( + #{<<"vm">> := #{<<"busy_port">> := BusyPort1}}, + maps:get(<<"sysmon">>, emqx_config:read_override_conf(#{override_to => cluster})) + ), %% update failed - ErrorSysMon = emqx_map_lib:deep_put([<<"vm">>, <<"busy_port">>], SysMon, "123"), + ErrorSysMon = emqx_utils_maps:deep_put([<<"vm">>, <<"busy_port">>], SysMon, "123"), ?assertMatch( {error, {"HTTP/1.1", 400, _}}, update_config(<<"sysmon">>, ErrorSysMon) @@ -78,7 +83,7 @@ t_update(_Config) -> assert_busy_port(true), %% reset no_default_value config - NewSysMon1 = emqx_map_lib:deep_put([<<"vm">>, <<"busy_port">>], SysMon, false), + NewSysMon1 = emqx_utils_maps:deep_put([<<"vm">>, <<"busy_port">>], SysMon, false), {ok, #{}} = update_config(<<"sysmon">>, NewSysMon1), ?assertMatch({error, {"HTTP/1.1", 400, _}}, reset_config(<<"sysmon">>, "")), {ok, SysMon4} = get_config(<<"sysmon">>), @@ -94,57 +99,80 @@ t_log(_Config) -> {ok, Log} = get_config("log"), File = "log/emqx-test.log", %% update handler - Log1 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"enable">>], Log, true), - Log2 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"file">>], Log1, File), + Log1 = emqx_utils_maps:deep_put([<<"file">>, <<"default">>, <<"enable">>], Log, true), + Log2 = emqx_utils_maps:deep_put([<<"file">>, <<"default">>, <<"to">>], Log1, File), {ok, #{}} = update_config(<<"log">>, Log2), {ok, Log3} = logger:get_handler_config(default), ?assertMatch(#{config := #{file := File}}, Log3), - ErrLog1 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"enable">>], Log, 1), + ErrLog1 = emqx_utils_maps:deep_put([<<"file">>, <<"default">>, <<"enable">>], Log, 1), ?assertMatch({error, {"HTTP/1.1", 400, _}}, update_config(<<"log">>, ErrLog1)), - ErrLog2 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"enabfe">>], Log, true), + ErrLog2 = emqx_utils_maps:deep_put( + [<<"file">>, <<"default">>, <<"enabfe">>], Log, true + ), ?assertMatch({error, {"HTTP/1.1", 400, _}}, update_config(<<"log">>, ErrLog2)), %% add new handler File1 = "log/emqx-test1.log", - Handler = emqx_map_lib:deep_get([<<"file_handlers">>, <<"default">>], Log2), - NewLog1 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"new">>], Log2, Handler), - NewLog2 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"new">>, <<"file">>], NewLog1, File1), + Handler = emqx_utils_maps:deep_get([<<"file">>, <<"default">>], Log2), + NewLog1 = emqx_utils_maps:deep_put([<<"file">>, <<"new">>], Log2, Handler), + NewLog2 = emqx_utils_maps:deep_put( + [<<"file">>, <<"new">>, <<"to">>], NewLog1, File1 + ), {ok, #{}} = update_config(<<"log">>, NewLog2), {ok, Log4} = logger:get_handler_config(new), ?assertMatch(#{config := #{file := File1}}, Log4), %% disable new handler - Disable = emqx_map_lib:deep_put([<<"file_handlers">>, <<"new">>, <<"enable">>], NewLog2, false), + Disable = emqx_utils_maps:deep_put( + [<<"file">>, <<"new">>, <<"enable">>], NewLog2, false + ), {ok, #{}} = update_config(<<"log">>, Disable), ?assertEqual({error, {not_found, new}}, logger:get_handler_config(new)), ok. t_global_zone(_Config) -> {ok, Zones} = get_global_zone(), - ZonesKeys = lists:map(fun({K, _}) -> K end, hocon_schema:roots(emqx_zone_schema)), + ZonesKeys = lists:map( + fun({K, _}) -> list_to_binary(K) end, emqx_zone_schema:zone_without_hidden() + ), ?assertEqual(lists:usort(ZonesKeys), lists:usort(maps:keys(Zones))), ?assertEqual( emqx_config:get_zone_conf(no_default, [mqtt, max_qos_allowed]), - emqx_map_lib:deep_get([<<"mqtt">>, <<"max_qos_allowed">>], Zones) + emqx_utils_maps:deep_get([<<"mqtt">>, <<"max_qos_allowed">>], Zones) ), - NewZones = emqx_map_lib:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 1), + NewZones = emqx_utils_maps:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 1), {ok, #{}} = update_global_zone(NewZones), ?assertEqual(1, emqx_config:get_zone_conf(no_default, [mqtt, max_qos_allowed])), + %% Make sure the override config is updated, and remove the default value. + ?assertMatch(#{<<"max_qos_allowed">> := 1}, read_conf(<<"mqtt">>)), - BadZones = emqx_map_lib:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 3), + BadZones = emqx_utils_maps:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 3), ?assertMatch({error, {"HTTP/1.1", 400, _}}, update_global_zone(BadZones)), %% Remove max_qos_allowed from raw config, but we still get default value(2). Mqtt0 = emqx_conf:get_raw([<<"mqtt">>]), - ?assertEqual(1, emqx_map_lib:deep_get([<<"max_qos_allowed">>], Mqtt0)), + ?assertEqual(1, emqx_utils_maps:deep_get([<<"max_qos_allowed">>], Mqtt0)), Mqtt1 = maps:remove(<<"max_qos_allowed">>, Mqtt0), ok = emqx_config:put_raw([<<"mqtt">>], Mqtt1), Mqtt2 = emqx_conf:get_raw([<<"mqtt">>]), ?assertNot(maps:is_key(<<"max_qos_allowed">>, Mqtt2), Mqtt2), {ok, #{<<"mqtt">> := Mqtt3}} = get_global_zone(), %% the default value is 2 - ?assertEqual(2, emqx_map_lib:deep_get([<<"max_qos_allowed">>], Mqtt3)), + ?assertEqual(2, emqx_utils_maps:deep_get([<<"max_qos_allowed">>], Mqtt3)), ok = emqx_config:put_raw([<<"mqtt">>], Mqtt0), + + DefaultZones = emqx_utils_maps:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 2), + {ok, #{}} = update_global_zone(DefaultZones), + #{<<"mqtt">> := Mqtt} = emqx_config:fill_defaults(emqx_schema, #{<<"mqtt">> => #{}}, #{}), + Default = maps:map( + fun + (_, V) when is_boolean(V) -> V; + (_, V) when is_atom(V) -> atom_to_binary(V); + (_, V) -> V + end, + Mqtt + ), + ?assertEqual(Default, read_conf(<<"mqtt">>)), ok. get_global_zone() -> @@ -169,7 +197,7 @@ t_dashboard(_Config) -> Https1 = #{enable => true, bind => 18084}, ?assertMatch( {error, {"HTTP/1.1", 400, _}}, - update_config("dashboard", Dashboard#{<<"https">> => Https1}) + update_config("dashboard", Dashboard#{<<"listeners">> => Listeners#{<<"https">> => Https1}}) ), Https2 = #{ @@ -179,40 +207,46 @@ t_dashboard(_Config) -> cacertfile => "etc/certs/badcacert.pem", certfile => "etc/certs/badcert.pem" }, - Dashboard2 = Dashboard#{listeners => Listeners#{https => Https2}}, + Dashboard2 = Dashboard#{<<"listeners">> => Listeners#{<<"https">> => Https2}}, ?assertMatch( {error, {"HTTP/1.1", 400, _}}, update_config("dashboard", Dashboard2) ), - Keyfile = emqx_common_test_helpers:app_path(emqx, filename:join(["etc", "certs", "key.pem"])), - Certfile = emqx_common_test_helpers:app_path(emqx, filename:join(["etc", "certs", "cert.pem"])), - Cacertfile = emqx_common_test_helpers:app_path( + KeyFile = emqx_common_test_helpers:app_path(emqx, filename:join(["etc", "certs", "key.pem"])), + CertFile = emqx_common_test_helpers:app_path(emqx, filename:join(["etc", "certs", "cert.pem"])), + CacertFile = emqx_common_test_helpers:app_path( emqx, filename:join(["etc", "certs", "cacert.pem"]) ), Https3 = #{ - enable => true, - bind => 18084, - keyfile => Keyfile, - cacertfile => Cacertfile, - certfile => Certfile + <<"enable">> => true, + <<"bind">> => 18084, + <<"keyfile">> => list_to_binary(KeyFile), + <<"cacertfile">> => list_to_binary(CacertFile), + <<"certfile">> => list_to_binary(CertFile) }, - Dashboard3 = Dashboard#{listeners => Listeners#{https => Https3}}, + Dashboard3 = Dashboard#{<<"listeners">> => Listeners#{<<"https">> => Https3}}, ?assertMatch({ok, _}, update_config("dashboard", Dashboard3)), - Dashboard4 = Dashboard#{listeners => Listeners#{https => #{enable => false}}}, + Dashboard4 = Dashboard#{<<"listeners">> => Listeners#{<<"https">> => #{<<"enable">> => false}}}, ?assertMatch({ok, _}, update_config("dashboard", Dashboard4)), + {ok, Dashboard41} = get_config("dashboard"), + ?assertEqual( + Https3#{<<"enable">> => false}, + read_conf([<<"dashboard">>, <<"listeners">>, <<"https">>]), + Dashboard41 + ), ?assertMatch({ok, _}, update_config("dashboard", Dashboard)), {ok, Dashboard1} = get_config("dashboard"), ?assertNotEqual(Dashboard, Dashboard1), - timer:sleep(1000), + timer:sleep(1500), ok. t_configs_node({'init', Config}) -> Node = node(), - meck:expect(mria_mnesia, running_nodes, fun() -> [Node, bad_node, other_node] end), + meck:expect(emqx, running_nodes, fun() -> [Node, bad_node, other_node] end), meck:expect( emqx_management_proto_v2, get_full_config, @@ -224,7 +258,7 @@ t_configs_node({'init', Config}) -> ), Config; t_configs_node({'end', _}) -> - meck:unload([mria_mnesia, emqx_management_proto_v2]); + meck:unload([emqx, emqx_management_proto_v2]); t_configs_node(_) -> Node = atom_to_list(node()), @@ -235,7 +269,7 @@ t_configs_node(_) -> ?assertEqual(error, ExpType), ?assertMatch({{_, 404, _}, _, _}, ExpRes), {_, _, Body} = ExpRes, - ?assertMatch(#{<<"code">> := <<"NOT_FOUND">>}, emqx_json:decode(Body, [return_maps])), + ?assertMatch(#{<<"code">> := <<"NOT_FOUND">>}, emqx_utils_json:decode(Body, [return_maps])), ?assertMatch({error, {_, 500, _}}, get_configs("bad_node")). @@ -245,7 +279,7 @@ get_config(Name) -> Path = emqx_mgmt_api_test_util:api_path(["configs", Name]), case emqx_mgmt_api_test_util:request_api(get, Path) of {ok, Res} -> - {ok, emqx_json:decode(Res, [return_maps])}; + {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. @@ -264,8 +298,8 @@ get_configs(Node, Opts) -> end, URI = emqx_mgmt_api_test_util:api_path(Path), case emqx_mgmt_api_test_util:request_api(get, URI, [], [], [], Opts) of - {ok, {_, _, Res}} -> {ok, emqx_json:decode(Res, [return_maps])}; - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, {_, _, Res}} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. @@ -273,7 +307,7 @@ update_config(Name, Change) -> AuthHeader = emqx_mgmt_api_test_util:auth_header_(), UpdatePath = emqx_mgmt_api_test_util:api_path(["configs", Name]), case emqx_mgmt_api_test_util:request_api(put, UpdatePath, "", AuthHeader, Change) of - {ok, Update} -> {ok, emqx_json:decode(Update, [return_maps])}; + {ok, Update} -> {ok, emqx_utils_json:decode(Update, [return_maps])}; Error -> Error end. @@ -288,3 +322,11 @@ reset_config(Name, Key) -> {ok, []} -> ok; Error -> Error end. + +read_conf(RootKeys) when is_list(RootKeys) -> + case emqx_config:read_override_conf(#{override_to => cluster}) of + undefined -> undefined; + Conf -> emqx_utils_maps:deep_get(RootKeys, Conf, undefined) + end; +read_conf(RootKey) -> + read_conf([RootKey]). diff --git a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl index e5c47ac4d..977c81c2b 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl @@ -20,19 +20,93 @@ -include_lib("eunit/include/eunit.hrl"). +-define(PORT(Base), (Base + ?LINE)). +-define(PORT, ?PORT(20000)). + all() -> - emqx_common_test_helpers:all(?MODULE). + [ + {group, with_defaults_in_file}, + {group, without_defaults_in_file} + ]. + +groups() -> + AllTests = emqx_common_test_helpers:all(?MODULE), + [ + {with_defaults_in_file, AllTests}, + {without_defaults_in_file, AllTests} + ]. init_per_suite(Config) -> - emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -end_per_suite(_) -> - emqx_conf:remove([listeners, tcp, new], #{override_to => cluster}), - emqx_conf:remove([listeners, tcp, new1], #{override_to => local}), +end_per_suite(_Config) -> + ok. + +init_per_group(without_defaults_in_file, Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + Config; +init_per_group(with_defaults_in_file, Config) -> + %% we have to materialize the config file with default values for this test group + %% because we want to test the deletion of non-existing listener + %% if there is no config file, the such deletion would result in a deletion + %% of the default listener. + Name = atom_to_list(?MODULE) ++ "-default-listeners", + TmpConfFullPath = inject_tmp_config_content(Name, default_listeners_hocon_text()), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + [{injected_conf_file, TmpConfFullPath} | Config]. + +end_per_group(Group, Config) -> + emqx_conf:tombstone([listeners, tcp, new], #{override_to => cluster}), + emqx_conf:tombstone([listeners, tcp, new1], #{override_to => local}), + case Group =:= with_defaults_in_file of + true -> + {_, File} = lists:keyfind(injected_conf_file, 1, Config), + ok = file:delete(File); + false -> + ok + end, emqx_mgmt_api_test_util:end_suite([emqx_conf]). -t_list_listeners(_) -> +init_per_testcase(Case, Config) -> + try + ?MODULE:Case({init, Config}) + catch + error:function_clause -> + Config + end. + +end_per_testcase(Case, Config) -> + try + ?MODULE:Case({'end', Config}) + catch + error:function_clause -> + ok + end. + +t_max_connection_default({init, Config}) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]), + Port = integer_to_binary(?PORT), + Bin = <<"listeners.tcp.max_connection_test {bind = \"0.0.0.0:", Port/binary, "\"}">>, + TmpConfName = atom_to_list(?FUNCTION_NAME) ++ ".conf", + TmpConfFullPath = inject_tmp_config_content(TmpConfName, Bin), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + [{tmp_config_file, TmpConfFullPath} | Config]; +t_max_connection_default({'end', Config}) -> + ok = file:delete(proplists:get_value(tmp_config_file, Config)); +t_max_connection_default(Config) when is_list(Config) -> + #{<<"listeners">> := Listeners} = emqx_mgmt_api_listeners:do_list_listeners(), + Target = lists:filter( + fun(#{<<"id">> := Id}) -> Id =:= 'tcp:max_connection_test' end, + Listeners + ), + DefaultMaxConn = emqx_listeners:default_max_conn(), + ?assertMatch([#{<<"max_connections">> := DefaultMaxConn}], Target), + NewPath = emqx_mgmt_api_test_util:api_path(["listeners", "tcp:max_connection_test"]), + ?assertMatch(#{<<"max_connections">> := DefaultMaxConn}, request(get, NewPath, [], [])), + emqx_conf:tombstone([listeners, tcp, max_connection_test], #{override_to => cluster}), + ok. + +t_list_listeners(Config) when is_list(Config) -> Path = emqx_mgmt_api_test_util:api_path(["listeners"]), Res = request(get, Path, [], []), #{<<"listeners">> := Expect} = emqx_mgmt_api_listeners:do_list_listeners(), @@ -40,7 +114,7 @@ t_list_listeners(_) -> %% POST /listeners ListenerId = <<"tcp:default">>, - NewListenerId = <<"tcp:new">>, + NewListenerId = <<"tcp:new11">>, OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]), @@ -52,14 +126,17 @@ t_list_listeners(_) -> ?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])), OriginListener2 = maps:remove(<<"id">>, OriginListener), + Port = integer_to_binary(?PORT), NewConf = OriginListener2#{ - <<"name">> => <<"new">>, - <<"bind">> => <<"0.0.0.0:2883">> + <<"name">> => <<"new11">>, + <<"bind">> => <<"0.0.0.0:", Port/binary>>, + <<"max_connections">> := <<"infinity">> }, Create = request(post, Path, [], NewConf), ?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))), Get1 = request(get, NewPath, [], []), ?assertMatch(Create, Get1), + ?assertMatch(#{<<"max_connections">> := <<"infinity">>}, Create), ?assert(is_running(NewListenerId)), %% delete @@ -68,39 +145,39 @@ t_list_listeners(_) -> ?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])), ok. -t_tcp_crud_listeners_by_id(_) -> +t_tcp_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"tcp:default">>, NewListenerId = <<"tcp:new">>, MinListenerId = <<"tcp:min">>, BadId = <<"tcp:bad">>, Type = <<"tcp">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 31000). -t_ssl_crud_listeners_by_id(_) -> +t_ssl_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"ssl:default">>, NewListenerId = <<"ssl:new">>, MinListenerId = <<"ssl:min">>, BadId = <<"ssl:bad">>, Type = <<"ssl">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 32000). -t_ws_crud_listeners_by_id(_) -> +t_ws_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"ws:default">>, NewListenerId = <<"ws:new">>, MinListenerId = <<"ws:min">>, BadId = <<"ws:bad">>, Type = <<"ws">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 33000). -t_wss_crud_listeners_by_id(_) -> +t_wss_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"wss:default">>, NewListenerId = <<"wss:new">>, MinListenerId = <<"wss:min">>, BadId = <<"wss:bad">>, Type = <<"wss">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 34000). -t_api_listeners_list_not_ready(_Config) -> +t_api_listeners_list_not_ready(Config) when is_list(Config) -> net_kernel:start(['listeners@127.0.0.1', longnames]), ct:timetrap({seconds, 120}), snabbkaffe:fix_ct_logging(), @@ -119,8 +196,8 @@ t_api_listeners_list_not_ready(_Config) -> L3 = get_tcp_listeners(Node2), Comment = #{ - node1 => rpc:call(Node1, mria_mnesia, running_nodes, []), - node2 => rpc:call(Node2, mria_mnesia, running_nodes, []) + node1 => rpc:call(Node1, emqx, running_nodes, []), + node2 => rpc:call(Node2, emqx, running_nodes, []) }, ?assert(length(L1) > length(L2), Comment), @@ -130,6 +207,61 @@ t_api_listeners_list_not_ready(_Config) -> emqx_common_test_helpers:stop_slave(Node2) end. +t_clear_certs(Config) when is_list(Config) -> + ListenerId = <<"ssl:default">>, + NewListenerId = <<"ssl:clear">>, + + OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), + NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]), + ConfTempT = request(get, OriginPath, [], []), + Port = integer_to_binary(?PORT), + ConfTemp = ConfTempT#{ + <<"id">> => NewListenerId, + <<"bind">> => <<"0.0.0.0:", Port/binary>> + }, + + %% create, make sure the cert files are created + NewConf = emqx_utils_maps:deep_put( + [<<"ssl_options">>, <<"certfile">>], ConfTemp, cert_file("certfile") + ), + NewConf2 = emqx_utils_maps:deep_put( + [<<"ssl_options">>, <<"keyfile">>], NewConf, cert_file("keyfile") + ), + + _ = request(post, NewPath, [], NewConf2), + ListResult1 = list_pem_dir("ssl", "clear"), + ?assertMatch({ok, [_, _]}, ListResult1), + + %% update + UpdateConf = emqx_utils_maps:deep_put( + [<<"ssl_options">>, <<"keyfile">>], NewConf2, cert_file("keyfile2") + ), + _ = request(put, NewPath, [], UpdateConf), + ListResult2 = list_pem_dir("ssl", "clear"), + + %% make sure the old cret file is deleted + ?assertMatch({ok, [_, _]}, ListResult2), + + {ok, ResultList1} = ListResult1, + {ok, ResultList2} = ListResult2, + + FindKeyFile = fun(List) -> + case lists:search(fun(E) -> lists:prefix("key", E) end, List) of + {value, Value} -> + Value; + _ -> + ?assert(false, "Can't find keyfile") + end + end, + + %% check the keyfile has changed + ?assertNotEqual(FindKeyFile(ResultList1), FindKeyFile(ResultList2)), + + %% remove, check all cert files are deleted + _ = delete(NewPath), + ?assertMatch({error, not_dir}, list_pem_dir("ssl", "clear")), + ok. + get_tcp_listeners(Node) -> Query = #{query_string => #{<<"type">> => tcp}}, {200, L} = rpc:call(Node, emqx_mgmt_api_listeners, list_listeners, [get, Query]), @@ -162,7 +294,7 @@ cluster(Specs) -> end} ]). -crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> +crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, PortBase) -> OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]), OriginListener = request(get, OriginPath, [], []), @@ -170,15 +302,17 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> %% create with full options ?assertEqual({error, not_found}, is_running(NewListenerId)), ?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])), + Port1 = integer_to_binary(?PORT(PortBase)), + Port2 = integer_to_binary(?PORT(PortBase)), NewConf = OriginListener#{ <<"id">> => NewListenerId, - <<"bind">> => <<"0.0.0.0:2883">> + <<"bind">> => <<"0.0.0.0:", Port1/binary>> }, Create = request(post, NewPath, [], NewConf), ?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))), Get1 = request(get, NewPath, [], []), ?assertMatch(Create, Get1), - ?assert(is_running(NewListenerId)), + ?assertEqual({true, NewListenerId}, {is_running(NewListenerId), NewListenerId}), %% create with required options MinPath = emqx_mgmt_api_test_util:api_path(["listeners", MinListenerId]), @@ -196,7 +330,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> } -> #{ <<"id">> => MinListenerId, - <<"bind">> => <<"0.0.0.0:3883">>, + <<"bind">> => <<"0.0.0.0:", Port2/binary>>, <<"type">> => Type, <<"ssl_options">> => #{ <<"cacertfile">> => CaCertFile, @@ -207,7 +341,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> _ -> #{ <<"id">> => MinListenerId, - <<"bind">> => <<"0.0.0.0:3883">>, + <<"bind">> => <<"0.0.0.0:", Port2/binary>>, <<"type">> => Type } end, @@ -221,7 +355,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> BadPath = emqx_mgmt_api_test_util:api_path(["listeners", BadId]), BadConf = OriginListener#{ <<"id">> => BadId, - <<"bind">> => <<"0.0.0.0:2883">> + <<"bind">> => <<"0.0.0.0:", Port1/binary>> }, ?assertMatch({error, {"HTTP/1.1", 400, _}}, request(post, BadPath, [], BadConf)), @@ -257,12 +391,12 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> ?assertEqual([], delete(NewPath)), ok. -t_delete_nonexistent_listener(_) -> +t_delete_nonexistent_listener(Config) when is_list(Config) -> NonExist = emqx_mgmt_api_test_util:api_path(["listeners", "tcp:nonexistent"]), ?assertEqual([], delete(NonExist)), ok. -t_action_listeners(_) -> +t_action_listeners(Config) when is_list(Config) -> ID = "tcp:default", action_listener(ID, "stop", false), action_listener(ID, "start", true), @@ -279,7 +413,7 @@ action_listener(ID, Action, Running) -> request(Method, Url, QueryParams, Body) -> AuthHeader = emqx_mgmt_api_test_util:auth_header_(), case emqx_mgmt_api_test_util:request_api(Method, Url, QueryParams, AuthHeader, Body) of - {ok, Res} -> emqx_json:decode(Res, [return_maps]); + {ok, Res} -> emqx_utils_json:decode(Res, [return_maps]); Error -> Error end. @@ -293,3 +427,39 @@ listener_stats(Listener, ExpectedStats) -> is_running(Id) -> emqx_listeners:is_running(binary_to_atom(Id)). + +list_pem_dir(Type, Name) -> + ListenerDir = emqx_listeners:certs_dir(Type, Name), + Dir = filename:join([emqx:mutable_certs_dir(), ListenerDir]), + case filelib:is_dir(Dir) of + true -> + file:list_dir(Dir); + _ -> + {error, not_dir} + end. + +data_file(Name) -> + Dir = code:lib_dir(emqx, test), + {ok, Bin} = file:read_file(filename:join([Dir, "data", Name])), + Bin. + +cert_file(Name) -> + data_file(filename:join(["certs", Name])). + +default_listeners_hocon_text() -> + Sc = #{roots => emqx_schema:fields("listeners")}, + Listeners = hocon_tconf:make_serializable(Sc, #{}, #{}), + Config = #{<<"listeners">> => Listeners}, + hocon_pp:do(Config, #{}). + +%% inject a 'include' at the end of emqx.conf.all +%% the 'include' can be kept after test, +%% as long as the file has been deleted it is a no-op +inject_tmp_config_content(TmpFile, Content) -> + Etc = filename:join(["etc", "emqx.conf.all"]), + Inc = filename:join(["etc", TmpFile]), + ConfFile = emqx_common_test_helpers:app_path(emqx_conf, Etc), + TmpFileFullPath = emqx_common_test_helpers:app_path(emqx_conf, Inc), + ok = file:write_file(TmpFileFullPath, Content), + ok = file:write_file(ConfFile, ["\ninclude \"", TmpFileFullPath, "\"\n"], [append]), + TmpFileFullPath. diff --git a/apps/emqx_management/test/emqx_mgmt_api_metrics_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_metrics_SUITE.erl index 93cb69f0a..7ecfe9817 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_metrics_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_metrics_SUITE.erl @@ -32,13 +32,13 @@ end_per_suite(_) -> t_metrics_api(_) -> {ok, MetricsResponse} = request_helper("metrics?aggregate=true"), - MetricsFromAPI = emqx_json:decode(MetricsResponse, [return_maps]), + MetricsFromAPI = emqx_utils_json:decode(MetricsResponse, [return_maps]), AggregateMetrics = emqx_mgmt:get_metrics(), match_helper(AggregateMetrics, MetricsFromAPI). t_single_node_metrics_api(_) -> {ok, MetricsResponse} = request_helper("metrics"), - [MetricsFromAPI] = emqx_json:decode(MetricsResponse, [return_maps]), + [MetricsFromAPI] = emqx_utils_json:decode(MetricsResponse, [return_maps]), LocalNodeMetrics = maps:from_list( emqx_mgmt:get_metrics(node()) ++ [{node, to_bin(node())}] ), diff --git a/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl index 2bbdf938d..b356bf905 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl @@ -24,18 +24,18 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_mgmt_api_test_util:init_suite([emqx_conf]), + emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_management]), Config. end_per_suite(_) -> - emqx_mgmt_api_test_util:end_suite([emqx_conf]). + emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]). init_per_testcase(t_log_path, Config) -> emqx_config_logger:add_handler(), Log = emqx_conf:get_raw([log], #{}), File = "log/emqx-test.log", - Log1 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"enable">>], Log, true), - Log2 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"file">>], Log1, File), + Log1 = emqx_utils_maps:deep_put([<<"file">>, <<"default">>, <<"enable">>], Log, true), + Log2 = emqx_utils_maps:deep_put([<<"file">>, <<"default">>, <<"to">>], Log1, File), {ok, #{}} = emqx_conf:update([log], Log2, #{rawconf_with_defaults => true}), Config; init_per_testcase(_, Config) -> @@ -43,7 +43,7 @@ init_per_testcase(_, Config) -> end_per_testcase(t_log_path, Config) -> Log = emqx_conf:get_raw([log], #{}), - Log1 = emqx_map_lib:deep_put([<<"file_handlers">>, <<"default">>, <<"enable">>], Log, false), + Log1 = emqx_utils_maps:deep_put([<<"file">>, <<"default">>, <<"enable">>], Log, false), {ok, #{}} = emqx_conf:update([log], Log1, #{rawconf_with_defaults => true}), emqx_config_logger:remove_handler(), Config; @@ -53,7 +53,7 @@ end_per_testcase(_, Config) -> t_nodes_api(_) -> NodesPath = emqx_mgmt_api_test_util:api_path(["nodes"]), {ok, Nodes} = emqx_mgmt_api_test_util:request_api(get, NodesPath), - NodesResponse = emqx_json:decode(Nodes, [return_maps]), + NodesResponse = emqx_utils_json:decode(Nodes, [return_maps]), LocalNodeInfo = hd(NodesResponse), Node = binary_to_atom(maps:get(<<"node">>, LocalNodeInfo), utf8), ?assertEqual(Node, node()), @@ -63,19 +63,19 @@ t_nodes_api(_) -> NodePath = emqx_mgmt_api_test_util:api_path(["nodes", atom_to_list(node())]), {ok, NodeInfo} = emqx_mgmt_api_test_util:request_api(get, NodePath), NodeNameResponse = - binary_to_atom(maps:get(<<"node">>, emqx_json:decode(NodeInfo, [return_maps])), utf8), + binary_to_atom(maps:get(<<"node">>, emqx_utils_json:decode(NodeInfo, [return_maps])), utf8), ?assertEqual(node(), NodeNameResponse), BadNodePath = emqx_mgmt_api_test_util:api_path(["nodes", "badnode"]), ?assertMatch( - {error, {_, 400, _}}, + {error, {_, 404, _}}, emqx_mgmt_api_test_util:request_api(get, BadNodePath) ). t_log_path(_) -> NodePath = emqx_mgmt_api_test_util:api_path(["nodes", atom_to_list(node())]), {ok, NodeInfo} = emqx_mgmt_api_test_util:request_api(get, NodePath), - #{<<"log_path">> := Path} = emqx_json:decode(NodeInfo, [return_maps]), + #{<<"log_path">> := Path} = emqx_utils_json:decode(NodeInfo, [return_maps]), ?assertEqual( <<"log">>, filename:basename(Path) @@ -85,7 +85,7 @@ t_node_stats_api(_) -> StatsPath = emqx_mgmt_api_test_util:api_path(["nodes", atom_to_binary(node(), utf8), "stats"]), SystemStats = emqx_mgmt:get_stats(), {ok, StatsResponse} = emqx_mgmt_api_test_util:request_api(get, StatsPath), - Stats = emqx_json:decode(StatsResponse, [return_maps]), + Stats = emqx_utils_json:decode(StatsResponse, [return_maps]), Fun = fun(Key) -> ?assertEqual(maps:get(Key, SystemStats), maps:get(atom_to_binary(Key, utf8), Stats)) @@ -94,7 +94,7 @@ t_node_stats_api(_) -> BadNodePath = emqx_mgmt_api_test_util:api_path(["nodes", "badnode", "stats"]), ?assertMatch( - {error, {_, 400, _}}, + {error, {_, 404, _}}, emqx_mgmt_api_test_util:request_api(get, BadNodePath) ). @@ -103,7 +103,7 @@ t_node_metrics_api(_) -> emqx_mgmt_api_test_util:api_path(["nodes", atom_to_binary(node(), utf8), "metrics"]), SystemMetrics = emqx_mgmt:get_metrics(), {ok, MetricsResponse} = emqx_mgmt_api_test_util:request_api(get, MetricsPath), - Metrics = emqx_json:decode(MetricsResponse, [return_maps]), + Metrics = emqx_utils_json:decode(MetricsResponse, [return_maps]), Fun = fun(Key) -> ?assertEqual(maps:get(Key, SystemMetrics), maps:get(atom_to_binary(Key, utf8), Metrics)) @@ -112,7 +112,7 @@ t_node_metrics_api(_) -> BadNodePath = emqx_mgmt_api_test_util:api_path(["nodes", "badnode", "metrics"]), ?assertMatch( - {error, {_, 400, _}}, + {error, {_, 404, _}}, emqx_mgmt_api_test_util:request_api(get, BadNodePath) ). @@ -152,7 +152,7 @@ cluster(Specs) -> Env = [{emqx, boot_modules, []}], emqx_common_test_helpers:emqx_cluster(Specs, [ {env, Env}, - {apps, [emqx_conf]}, + {apps, [emqx_conf, emqx_management]}, {load_schema, false}, {join_to, true}, {env_handler, fun diff --git a/apps/emqx_management/test/emqx_mgmt_api_plugins_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_plugins_SUITE.erl index fd8d8b02e..62fed8211 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_plugins_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_plugins_SUITE.erl @@ -20,7 +20,8 @@ -include_lib("eunit/include/eunit.hrl"). --define(EMQX_PLUGIN_TEMPLATE_VSN, "5.0-rc.1"). +-define(EMQX_PLUGIN_TEMPLATE_NAME, "emqx_plugin_template"). +-define(EMQX_PLUGIN_TEMPLATE_VSN, "5.0.0"). -define(PACKAGE_SUFFIX, ".tar.gz"). all() -> @@ -30,12 +31,11 @@ init_per_suite(Config) -> WorkDir = proplists:get_value(data_dir, Config), ok = filelib:ensure_dir(WorkDir), DemoShDir1 = string:replace(WorkDir, "emqx_mgmt_api_plugins", "emqx_plugins"), - DemoShDir = string:replace(DemoShDir1, "emqx_management", "emqx_plugins"), + DemoShDir = lists:flatten(string:replace(DemoShDir1, "emqx_management", "emqx_plugins")), OrigInstallDir = emqx_plugins:get_config(install_dir, undefined), ok = filelib:ensure_dir(DemoShDir), emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_plugins]), emqx_plugins:put_config(install_dir, DemoShDir), - [{demo_sh_dir, DemoShDir}, {orig_install_dir, OrigInstallDir} | Config]. end_per_suite(Config) -> @@ -48,18 +48,20 @@ end_per_suite(Config) -> emqx_mgmt_api_test_util:end_suite([emqx_plugins, emqx_conf]), ok. -todo_t_plugins(Config) -> +t_plugins(Config) -> DemoShDir = proplists:get_value(demo_sh_dir, Config), - PackagePath = build_demo_plugin_package(DemoShDir), + PackagePath = get_demo_plugin_package(DemoShDir), ct:pal("package_location:~p install dir:~p", [PackagePath, emqx_plugins:install_dir()]), NameVsn = filename:basename(PackagePath, ?PACKAGE_SUFFIX), + ok = emqx_plugins:ensure_uninstalled(NameVsn), ok = emqx_plugins:delete_package(NameVsn), ok = install_plugin(PackagePath), {ok, StopRes} = describe_plugins(NameVsn), + Node = atom_to_binary(node()), ?assertMatch( #{ <<"running_status">> := [ - #{<<"node">> := <<"test@127.0.0.1">>, <<"status">> := <<"stopped">>} + #{<<"node">> := Node, <<"status">> := <<"stopped">>} ] }, StopRes @@ -70,7 +72,7 @@ todo_t_plugins(Config) -> ?assertMatch( #{ <<"running_status">> := [ - #{<<"node">> := <<"test@127.0.0.1">>, <<"status">> := <<"running">>} + #{<<"node">> := Node, <<"status">> := <<"running">>} ] }, StartRes @@ -80,7 +82,7 @@ todo_t_plugins(Config) -> ?assertMatch( #{ <<"running_status">> := [ - #{<<"node">> := <<"test@127.0.0.1">>, <<"status">> := <<"stopped">>} + #{<<"node">> := Node, <<"status">> := <<"stopped">>} ] }, StopRes2 @@ -88,17 +90,60 @@ todo_t_plugins(Config) -> {ok, []} = uninstall_plugin(NameVsn), ok. +t_install_plugin_matching_exisiting_name(Config) -> + DemoShDir = proplists:get_value(demo_sh_dir, Config), + PackagePath = get_demo_plugin_package(DemoShDir), + NameVsn = filename:basename(PackagePath, ?PACKAGE_SUFFIX), + ok = emqx_plugins:ensure_uninstalled(NameVsn), + ok = emqx_plugins:delete_package(NameVsn), + NameVsn1 = ?EMQX_PLUGIN_TEMPLATE_NAME ++ "_a" ++ "-" ++ ?EMQX_PLUGIN_TEMPLATE_VSN, + PackagePath1 = create_renamed_package(PackagePath, NameVsn1), + NameVsn1 = filename:basename(PackagePath1, ?PACKAGE_SUFFIX), + ok = emqx_plugins:ensure_uninstalled(NameVsn1), + ok = emqx_plugins:delete_package(NameVsn1), + %% First, install plugin "emqx_plugin_template_a", then: + %% "emqx_plugin_template" which matches the beginning + %% of the previously installed plugin name + ok = install_plugin(PackagePath1), + ok = install_plugin(PackagePath), + {ok, _} = describe_plugins(NameVsn), + {ok, _} = describe_plugins(NameVsn1), + {ok, _} = uninstall_plugin(NameVsn), + {ok, _} = uninstall_plugin(NameVsn1). + +t_bad_plugin(Config) -> + DemoShDir = proplists:get_value(demo_sh_dir, Config), + PackagePathOrig = get_demo_plugin_package(DemoShDir), + PackagePath = filename:join([ + filename:dirname(PackagePathOrig), + "bad_plugin-1.0.0.tar.gz" + ]), + ct:pal("package_location:~p orig:~p", [PackagePath, PackagePathOrig]), + %% rename plugin tarball + file:copy(PackagePathOrig, PackagePath), + file:delete(PackagePathOrig), + {ok, {{"HTTP/1.1", 400, "Bad Request"}, _, _}} = install_plugin(PackagePath), + ?assertEqual( + {error, enoent}, + file:delete( + filename:join([ + emqx_plugins:install_dir(), + filename:basename(PackagePath) + ]) + ) + ). + list_plugins() -> Path = emqx_mgmt_api_test_util:api_path(["plugins"]), case emqx_mgmt_api_test_util:request_api(get, Path) of - {ok, Apps} -> {ok, emqx_json:decode(Apps, [return_maps])}; + {ok, Apps} -> {ok, emqx_utils_json:decode(Apps, [return_maps])}; Error -> Error end. describe_plugins(Name) -> Path = emqx_mgmt_api_test_util:api_path(["plugins", Name]), case emqx_mgmt_api_test_util:request_api(get, Path) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. @@ -127,7 +172,7 @@ update_boot_order(Name, MoveBody) -> Auth = emqx_mgmt_api_test_util:auth_header_(), Path = emqx_mgmt_api_test_util:api_path(["plugins", Name, "move"]), case emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, MoveBody) of - {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; Error -> Error end. @@ -135,11 +180,33 @@ uninstall_plugin(Name) -> DeletePath = emqx_mgmt_api_test_util:api_path(["plugins", Name]), emqx_mgmt_api_test_util:request_api(delete, DeletePath). -build_demo_plugin_package(Dir) -> - #{package := Pkg} = emqx_plugins_SUITE:build_demo_plugin_package(), - FileName = "emqx_plugin_template-" ++ ?EMQX_PLUGIN_TEMPLATE_VSN ++ ?PACKAGE_SUFFIX, +get_demo_plugin_package(Dir) -> + #{package := Pkg} = emqx_plugins_SUITE:get_demo_plugin_package(), + FileName = ?EMQX_PLUGIN_TEMPLATE_NAME ++ "-" ++ ?EMQX_PLUGIN_TEMPLATE_VSN ++ ?PACKAGE_SUFFIX, PluginPath = "./" ++ FileName, Pkg = filename:join([Dir, FileName]), _ = os:cmd("cp " ++ Pkg ++ " " ++ PluginPath), true = filelib:is_regular(PluginPath), PluginPath. + +create_renamed_package(PackagePath, NewNameVsn) -> + {ok, Content} = erl_tar:extract(PackagePath, [compressed, memory]), + {ok, NewName, _Vsn} = emqx_plugins:parse_name_vsn(NewNameVsn), + NewNameB = atom_to_binary(NewName, utf8), + Content1 = lists:map( + fun({F, B}) -> + [_ | PathPart] = filename:split(F), + B1 = update_release_json(PathPart, B, NewNameB), + {filename:join([NewNameVsn | PathPart]), B1} + end, + Content + ), + NewPackagePath = filename:join(filename:dirname(PackagePath), NewNameVsn ++ ?PACKAGE_SUFFIX), + ok = erl_tar:create(NewPackagePath, Content1, [compressed]), + NewPackagePath. + +update_release_json(["release.json"], FileContent, NewName) -> + ContentMap = emqx_utils_json:decode(FileContent, [return_maps]), + emqx_utils_json:encode(ContentMap#{<<"name">> => NewName}); +update_release_json(_FileName, FileContent, _NewName) -> + FileContent. diff --git a/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl index 44b8069f5..303a73b41 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl @@ -352,4 +352,4 @@ receive_assert(Topic, Qos, Payload) -> end. decode_json(In) -> - emqx_json:decode(In, [return_maps]). + emqx_utils_json:decode(In, [return_maps]). diff --git a/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl index 7236ac9e4..2550bdbe2 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl @@ -24,16 +24,18 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + meck:expect(emqx, running_nodes, 0, [node(), 'fake@node']), emqx_mgmt_api_test_util:init_suite(), Config. end_per_suite(_) -> + meck:unload(emqx), emqx_mgmt_api_test_util:end_suite(). t_stats_api(_) -> S = emqx_mgmt_api_test_util:api_path(["stats?aggregate=false"]), {ok, S1} = emqx_mgmt_api_test_util:request_api(get, S), - [Stats1] = emqx_json:decode(S1, [return_maps]), + [Stats1] = emqx_utils_json:decode(S1, [return_maps]), SystemStats1 = emqx_mgmt:get_stats(), Fun1 = fun(Key) -> @@ -43,7 +45,7 @@ t_stats_api(_) -> StatsPath = emqx_mgmt_api_test_util:api_path(["stats?aggregate=true"]), SystemStats = emqx_mgmt:get_stats(), {ok, StatsResponse} = emqx_mgmt_api_test_util:request_api(get, StatsPath), - Stats = emqx_json:decode(StatsResponse, [return_maps]), + Stats = emqx_utils_json:decode(StatsResponse, [return_maps]), ?assertEqual(erlang:length(maps:keys(SystemStats)), erlang:length(maps:keys(Stats))), Fun = fun(Key) -> diff --git a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl index f0200c410..e8e0b4ac9 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl @@ -38,7 +38,10 @@ all() -> get_status_tests() -> [ t_status_ok, - t_status_not_ok + t_status_not_ok, + t_status_text_format, + t_status_json_format, + t_status_bad_format_qs ]. groups() -> @@ -87,8 +90,10 @@ do_request(Opts) -> headers := Headers, body := Body0 } = Opts, + QS = maps:get(qs, Opts, ""), URL = ?HOST ++ filename:join(Path0), - {ok, #{host := Host, port := Port, path := Path}} = emqx_http_lib:uri_parse(URL), + {ok, #{host := Host, port := Port, path := Path1}} = emqx_http_lib:uri_parse(URL), + Path = Path1 ++ QS, %% we must not use `httpc' here, because it keeps retrying when it %% receives a 503 with `retry-after' header, and there's no option %% to stop that behavior... @@ -165,3 +170,73 @@ t_status_not_ok(Config) -> Headers ), ok. + +t_status_text_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=text", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. + +t_status_json_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=json", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + #{<<"app_status">> := <<"running">>}, + emqx_utils_json:decode(Resp) + ), + ok. + +t_status_bad_format_qs(Config) -> + lists:foreach( + fun(QS) -> + test_status_bad_format_qs(QS, Config) + end, + [ + "?a=b", + "?format=", + "?format=x" + ] + ). + +%% when query-sting is invalid, fallback to text format +test_status_bad_format_qs(QS, Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => QS, + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. diff --git a/apps/emqx_management/test/emqx_mgmt_api_subscription_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_subscription_SUITE.erl index 965ed0997..a23d70f2f 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_subscription_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_subscription_SUITE.erl @@ -19,6 +19,7 @@ -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). -define(CLIENTID, <<"api_clientid">>). -define(USERNAME, <<"api_username">>). @@ -44,9 +45,8 @@ init_per_suite(Config) -> end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite(). -t_subscription_api(_) -> - {ok, Client} = emqtt:start_link(#{username => ?USERNAME, clientid => ?CLIENTID, proto_ver => v5}), - {ok, _} = emqtt:connect(Client), +t_subscription_api(Config) -> + Client = proplists:get_value(client, Config), {ok, _, _} = emqtt:subscribe( Client, [ {?TOPIC1, [{rh, ?TOPIC1RH}, {rap, ?TOPIC1RAP}, {nl, ?TOPIC1NL}, {qos, ?TOPIC1QOS}]} @@ -55,10 +55,10 @@ t_subscription_api(_) -> {ok, _, _} = emqtt:subscribe(Client, ?TOPIC2), Path = emqx_mgmt_api_test_util:api_path(["subscriptions"]), {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path), - Data = emqx_json:decode(Response, [return_maps]), + Data = emqx_utils_json:decode(Response, [return_maps]), Meta = maps:get(<<"meta">>, Data), ?assertEqual(1, maps:get(<<"page">>, Meta)), - ?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, Meta)), + ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, Meta)), ?assertEqual(2, maps:get(<<"count">>, Meta)), Subscriptions = maps:get(<<"data">>, Data), ?assertEqual(length(Subscriptions), 2), @@ -84,40 +84,90 @@ t_subscription_api(_) -> ?assertEqual(maps:get(<<"topic">>, Subscriptions2), ?TOPIC2), ?assertEqual(maps:get(<<"clientid">>, Subscriptions2), ?CLIENTID), - QS = uri_string:compose_query([ + QS = [ {"clientid", ?CLIENTID}, {"topic", ?TOPIC2_TOPIC_ONLY}, {"node", atom_to_list(node())}, {"qos", "0"}, {"share_group", "test_group"}, {"match_topic", "t/#"} - ]), + ], Headers = emqx_mgmt_api_test_util:auth_header_(), - {ok, ResponseTopic2} = emqx_mgmt_api_test_util:request_api(get, Path, QS, Headers), - DataTopic2 = emqx_json:decode(ResponseTopic2, [return_maps]), - Meta2 = maps:get(<<"meta">>, DataTopic2), + DataTopic2 = #{<<"meta">> := Meta2} = request_json(get, QS, Headers), ?assertEqual(1, maps:get(<<"page">>, Meta2)), - ?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, Meta2)), + ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, Meta2)), ?assertEqual(1, maps:get(<<"count">>, Meta2)), SubscriptionsList2 = maps:get(<<"data">>, DataTopic2), - ?assertEqual(length(SubscriptionsList2), 1), + ?assertEqual(length(SubscriptionsList2), 1). - MatchQs = uri_string:compose_query([ +t_subscription_fuzzy_search(Config) -> + Client = proplists:get_value(client, Config), + Topics = [ + <<"t/foo">>, + <<"t/foo/bar">>, + <<"t/foo/baz">>, + <<"topic/foo/bar">>, + <<"topic/foo/baz">> + ], + _ = [{ok, _, _} = emqtt:subscribe(Client, T) || T <- Topics], + + Headers = emqx_mgmt_api_test_util:auth_header_(), + MatchQs = [ {"clientid", ?CLIENTID}, {"node", atom_to_list(node())}, - {"qos", "0"}, {"match_topic", "t/#"} - ]), + ], - {ok, MatchRes} = emqx_mgmt_api_test_util:request_api(get, Path, MatchQs, Headers), - MatchData = emqx_json:decode(MatchRes, [return_maps]), - MatchMeta = maps:get(<<"meta">>, MatchData), - ?assertEqual(1, maps:get(<<"page">>, MatchMeta)), - ?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, MatchMeta)), - %% count equals 0 in fuzzy searching - ?assertEqual(0, maps:get(<<"count">>, MatchMeta)), - MatchSubs = maps:get(<<"data">>, MatchData), - ?assertEqual(1, length(MatchSubs)), + MatchData1 = #{<<"meta">> := MatchMeta1} = request_json(get, MatchQs, Headers), + ?assertEqual(1, maps:get(<<"page">>, MatchMeta1)), + ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, MatchMeta1)), + %% count is undefined in fuzzy searching + ?assertNot(maps:is_key(<<"count">>, MatchMeta1)), + ?assertMatch(3, length(maps:get(<<"data">>, MatchData1))), + ?assertEqual(false, maps:get(<<"hasnext">>, MatchMeta1)), + LimitMatchQuery = [ + {"clientid", ?CLIENTID}, + {"match_topic", "+/+/+"}, + {"limit", "3"} + ], + + MatchData2 = #{<<"meta">> := MatchMeta2} = request_json(get, LimitMatchQuery, Headers), + ?assertEqual(#{<<"page">> => 1, <<"limit">> => 3, <<"hasnext">> => true}, MatchMeta2), + ?assertEqual(3, length(maps:get(<<"data">>, MatchData2))), + + MatchData2P2 = + #{<<"meta">> := MatchMeta2P2} = + request_json(get, [{"page", "2"} | LimitMatchQuery], Headers), + ?assertEqual(#{<<"page">> => 2, <<"limit">> => 3, <<"hasnext">> => false}, MatchMeta2P2), + ?assertEqual(1, length(maps:get(<<"data">>, MatchData2P2))). + +%% checks that we can list when there are subscriptions made by +%% `emqx:subscribe'. +t_list_with_internal_subscription(_Config) -> + emqx:subscribe(<<"some/topic">>), + QS = [], + Headers = emqx_mgmt_api_test_util:auth_header_(), + ?assertMatch( + #{<<"data">> := [#{<<"clientid">> := null}]}, + request_json(get, QS, Headers) + ), + ok. + +request_json(Method, Query, Headers) when is_list(Query) -> + Qs = uri_string:compose_query(Query), + {ok, MatchRes} = emqx_mgmt_api_test_util:request_api(Method, path(), Qs, Headers), + emqx_utils_json:decode(MatchRes, [return_maps]). + +path() -> + emqx_mgmt_api_test_util:api_path(["subscriptions"]). + +init_per_testcase(_TC, Config) -> + {ok, Client} = emqtt:start_link(#{username => ?USERNAME, clientid => ?CLIENTID, proto_ver => v5}), + {ok, _} = emqtt:connect(Client), + [{client, Client} | Config]. + +end_per_testcase(_TC, Config) -> + Client = proplists:get_value(client, Config), emqtt:disconnect(Client). diff --git a/apps/emqx_management/test/emqx_mgmt_api_test_util.erl b/apps/emqx_management/test/emqx_mgmt_api_test_util.erl index 5bb0ba818..985b95d5b 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_test_util.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_test_util.erl @@ -24,14 +24,22 @@ init_suite() -> init_suite([]). init_suite(Apps) -> + init_suite(Apps, fun set_special_configs/1, #{}). + +init_suite(Apps, SetConfigs) when is_function(SetConfigs) -> + init_suite(Apps, SetConfigs, #{}). + +init_suite(Apps, SetConfigs, Opts) -> mria:start(), application:load(emqx_management), - emqx_common_test_helpers:start_apps(Apps ++ [emqx_dashboard], fun set_special_configs/1). + emqx_common_test_helpers:start_apps(Apps ++ [emqx_dashboard], SetConfigs, Opts), + emqx_common_test_http:create_default_app(). end_suite() -> end_suite([]). end_suite(Apps) -> + emqx_common_test_http:delete_default_app(), application:unload(emqx_management), emqx_common_test_helpers:stop_apps(Apps ++ [emqx_dashboard]), emqx_config:delete_override_conf_files(), @@ -43,8 +51,24 @@ set_special_configs(emqx_dashboard) -> set_special_configs(_App) -> ok. +%% there is no difference between the 'request' and 'request_api' +%% the 'request' is only to be compatible with the 'emqx_dashboard_api_test_helpers:request' +request(Method, Url) -> + request(Method, Url, []). + +request(Method, Url, Body) -> + request_api_with_body(Method, Url, Body). + +uri(Parts) -> + emqx_dashboard_api_test_helpers:uri(Parts). + +%% compatible_mode will return as same as 'emqx_dashboard_api_test_helpers:request' +request_api_with_body(Method, Url, Body) -> + Opts = #{compatible_mode => true, httpc_req_opts => [{body_format, binary}]}, + request_api(Method, Url, [], auth_header_(), Body, Opts). + request_api(Method, Url) -> - request_api(Method, Url, [], [], [], #{}). + request_api(Method, Url, auth_header_()). request_api(Method, Url, AuthOrHeaders) -> request_api(Method, Url, [], AuthOrHeaders, [], #{}). @@ -84,16 +108,21 @@ request_api(Method, Url, QueryParams, AuthOrHeaders, Body, Opts) when end, do_request_api( Method, - {NewUrl, build_http_header(AuthOrHeaders), "application/json", emqx_json:encode(Body)}, + {NewUrl, build_http_header(AuthOrHeaders), "application/json", + emqx_utils_json:encode(Body)}, Opts ). do_request_api(Method, Request, Opts) -> ReturnAll = maps:get(return_all, Opts, false), - ct:pal("Method: ~p, Request: ~p", [Method, Request]), - case httpc:request(Method, Request, [], []) of + CompatibleMode = maps:get(compatible_mode, Opts, false), + HttpcReqOpts = maps:get(httpc_req_opts, Opts, []), + ct:pal("Method: ~p, Request: ~p, Opts: ~p", [Method, Request, Opts]), + case httpc:request(Method, Request, [], HttpcReqOpts) of {error, socket_closed_remotely} -> {error, socket_closed_remotely}; + {ok, {{_, Code, _}, _Headers, Body}} when CompatibleMode -> + {ok, Code, Body}; {ok, {{"HTTP/1.1", Code, _} = Reason, Headers, Body}} when Code >= 200 andalso Code =< 299 andalso ReturnAll -> @@ -109,10 +138,7 @@ do_request_api(Method, Request, Opts) -> end. auth_header_() -> - Username = <<"admin">>, - Password = <<"public">>, - {ok, Token} = emqx_dashboard_admin:sign_token(Username, Password), - {"Authorization", "Bearer " ++ binary_to_list(Token)}. + emqx_common_test_http:default_auth_header(). build_http_header(X) when is_list(X) -> X; @@ -131,7 +157,7 @@ api_path_without_base_path(Parts) -> %% %% Usage with RequestData: %% Payload = [{upload_type, <<"user_picture">>}], -%% PayloadContent = jsx:encode(Payload), +%% PayloadContent = emqx_utils_json:encode(Payload), %% RequestData = [ %% {<<"payload">>, PayloadContent} %% ] diff --git a/apps/emqx_management/test/emqx_mgmt_api_topics_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_topics_SUITE.erl index dcea88d59..659ae0d44 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_topics_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_topics_SUITE.erl @@ -19,18 +19,25 @@ -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(ROUTE_TAB, emqx_route). all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_mgmt_api_test_util:init_suite(), - Config. + Slave = emqx_common_test_helpers:start_slave(some_node, []), + [{slave, Slave} | Config]. -end_per_suite(_) -> +end_per_suite(Config) -> + Slave = ?config(slave, Config), + emqx_common_test_helpers:stop_slave(Slave), + mria:clear_table(?ROUTE_TAB), emqx_mgmt_api_test_util:end_suite(). -t_nodes_api(_) -> +t_nodes_api(Config) -> Node = atom_to_binary(node(), utf8), Topic = <<"test_topic">>, {ok, Client} = emqtt:start_link(#{ @@ -42,10 +49,10 @@ t_nodes_api(_) -> %% list all Path = emqx_mgmt_api_test_util:api_path(["topics"]), {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path), - RoutesData = emqx_json:decode(Response, [return_maps]), + RoutesData = emqx_utils_json:decode(Response, [return_maps]), Meta = maps:get(<<"meta">>, RoutesData), ?assertEqual(1, maps:get(<<"page">>, Meta)), - ?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, Meta)), + ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, Meta)), ?assertEqual(1, maps:get(<<"count">>, Meta)), Data = maps:get(<<"data">>, RoutesData), Route = erlang:hd(Data), @@ -61,7 +68,7 @@ t_nodes_api(_) -> ]), Headers = emqx_mgmt_api_test_util:auth_header_(), {ok, MatchResponse} = emqx_mgmt_api_test_util:request_api(get, Path, QS, Headers), - MatchData = emqx_json:decode(MatchResponse, [return_maps]), + MatchData = emqx_utils_json:decode(MatchResponse, [return_maps]), ?assertMatch( #{<<"count">> := 1, <<"page">> := 1, <<"limit">> := 100}, maps:get(<<"meta">>, MatchData) @@ -72,8 +79,17 @@ t_nodes_api(_) -> ), %% get topics/:topic + %% We add another route here to ensure that the response handles + %% multiple routes for a single topic + Slave = ?config(slave, Config), + ok = emqx_router:add_route(Topic, Slave), RoutePath = emqx_mgmt_api_test_util:api_path(["topics", Topic]), {ok, RouteResponse} = emqx_mgmt_api_test_util:request_api(get, RoutePath), - RouteData = emqx_json:decode(RouteResponse, [return_maps]), - ?assertEqual(Topic, maps:get(<<"topic">>, RouteData)), - ?assertEqual(Node, maps:get(<<"node">>, RouteData)). + ok = emqx_router:delete_route(Topic, Slave), + + [ + #{<<"topic">> := Topic, <<"node">> := Node1}, + #{<<"topic">> := Topic, <<"node">> := Node2} + ] = emqx_utils_json:decode(RouteResponse, [return_maps]), + + ?assertEqual(lists:usort([Node, atom_to_binary(Slave)]), lists:usort([Node1, Node2])). diff --git a/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl index 8e8c5b06f..0102eb56c 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl @@ -19,17 +19,11 @@ -compile(export_all). -compile(nowarn_export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --include_lib("emqx/include/emqx.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("stdlib/include/zip.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --define(HOST, "http://127.0.0.1:18083/"). --define(API_VERSION, "v5"). --define(BASE_PATH, "api"). - %%-------------------------------------------------------------------- %% Setups %%-------------------------------------------------------------------- @@ -47,14 +41,14 @@ end_per_suite(_) -> t_http_test(_Config) -> emqx_trace:clear(), load(), - Header = auth_header_(), %% list - {ok, Empty} = request_api(get, api_path("trace"), Header), + {ok, Empty} = request_api(get, api_path("trace")), ?assertEqual([], json(Empty)), %% create ErrorTrace = #{}, - {error, {"HTTP/1.1", 400, "Bad Request"}, Body} = - request_api(post, api_path("trace"), Header, ErrorTrace), + Opts = #{return_all => true}, + {error, {{"HTTP/1.1", 400, "Bad Request"}, _, Body}} = + emqx_mgmt_api_test_util:request_api(post, api_path("trace"), [], [], ErrorTrace, Opts), ?assertMatch(#{<<"code">> := <<"BAD_REQUEST">>}, json(Body)), Name = <<"test-name">>, @@ -64,15 +58,15 @@ t_http_test(_Config) -> {<<"topic">>, <<"/x/y/z">>} ], - {ok, Create} = request_api(post, api_path("trace"), Header, Trace), + {ok, Create} = request_api(post, api_path("trace"), Trace), ?assertMatch(#{<<"name">> := Name}, json(Create)), - {ok, List} = request_api(get, api_path("trace"), Header), + {ok, List} = request_api(get, api_path("trace")), [Data] = json(List), ?assertEqual(Name, maps:get(<<"name">>, Data)), %% update - {ok, Update} = request_api(put, api_path("trace/test-name/stop"), Header, #{}), + {ok, Update} = request_api(put, api_path("trace/test-name/stop"), #{}), ?assertEqual( #{ <<"enable">> => false, @@ -82,10 +76,10 @@ t_http_test(_Config) -> ), ?assertMatch( - {error, {"HTTP/1.1", 404, _}, _}, - request_api(put, api_path("trace/test-name-not-found/stop"), Header, #{}) + {error, {"HTTP/1.1", 404, _}}, + request_api(put, api_path("trace/test-name-not-found/stop"), #{}) ), - {ok, List1} = request_api(get, api_path("trace"), Header), + {ok, List1} = request_api(get, api_path("trace")), [Data1] = json(List1), Node = atom_to_binary(node()), ?assertMatch( @@ -102,11 +96,11 @@ t_http_test(_Config) -> ), %% delete - {ok, Delete} = request_api(delete, api_path("trace/test-name"), Header), + {ok, Delete} = request_api(delete, api_path("trace/test-name")), ?assertEqual(<<>>, Delete), - {error, {"HTTP/1.1", 404, "Not Found"}, DeleteNotFound} = - request_api(delete, api_path("trace/test-name"), Header), + {error, {{"HTTP/1.1", 404, "Not Found"}, _, DeleteNotFound}} = + emqx_mgmt_api_test_util:request_api(delete, api_path("trace/test-name"), [], [], [], Opts), ?assertEqual( #{ <<"code">> => <<"NOT_FOUND">>, @@ -115,14 +109,14 @@ t_http_test(_Config) -> json(DeleteNotFound) ), - {ok, List2} = request_api(get, api_path("trace"), Header), + {ok, List2} = request_api(get, api_path("trace")), ?assertEqual([], json(List2)), %% clear - {ok, Create1} = request_api(post, api_path("trace"), Header, Trace), + {ok, Create1} = request_api(post, api_path("trace"), Trace), ?assertMatch(#{<<"name">> := Name}, json(Create1)), - {ok, Clear} = request_api(delete, api_path("trace"), Header), + {ok, Clear} = request_api(delete, api_path("trace")), ?assertEqual(<<>>, Clear), unload(), @@ -130,27 +124,26 @@ t_http_test(_Config) -> t_create_failed(_Config) -> load(), - Header = auth_header_(), Trace = [{<<"type">>, <<"topic">>}, {<<"topic">>, <<"/x/y/z">>}], BadName1 = {<<"name">>, <<"test/bad">>}, ?assertMatch( - {error, {"HTTP/1.1", 400, _}, _}, - request_api(post, api_path("trace"), Header, [BadName1 | Trace]) + {error, {"HTTP/1.1", 400, _}}, + request_api(post, api_path("trace"), [BadName1 | Trace]) ), BadName2 = {<<"name">>, list_to_binary(lists:duplicate(257, "t"))}, ?assertMatch( - {error, {"HTTP/1.1", 400, _}, _}, - request_api(post, api_path("trace"), Header, [BadName2 | Trace]) + {error, {"HTTP/1.1", 400, _}}, + request_api(post, api_path("trace"), [BadName2 | Trace]) ), %% already_exist GoodName = {<<"name">>, <<"test-name-0">>}, - {ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]), + {ok, Create} = request_api(post, api_path("trace"), [GoodName | Trace]), ?assertMatch(#{<<"name">> := <<"test-name-0">>}, json(Create)), ?assertMatch( - {error, {"HTTP/1.1", 409, _}, _}, - request_api(post, api_path("trace"), Header, [GoodName | Trace]) + {error, {"HTTP/1.1", 409, _}}, + request_api(post, api_path("trace"), [GoodName | Trace]) ), %% MAX Limited @@ -168,17 +161,25 @@ t_create_failed(_Config) -> ), GoodName1 = {<<"name">>, <<"test-name-1">>}, ?assertMatch( - {error, {"HTTP/1.1", 400, _}, _}, - request_api(post, api_path("trace"), Header, [GoodName1 | Trace]) + {error, {"HTTP/1.1", 400, _}}, + request_api(post, api_path("trace"), [GoodName1 | Trace]) ), %% clear - ?assertMatch({ok, _}, request_api(delete, api_path("trace"), Header, [])), - {ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]), + ?assertMatch({ok, _}, request_api(delete, api_path("trace"), [])), + {ok, Create1} = request_api(post, api_path("trace"), [GoodName | Trace]), + ?assertMatch(#{<<"name">> := <<"test-name-0">>}, json(Create1)), %% new name but same trace GoodName2 = {<<"name">>, <<"test-name-1">>}, ?assertMatch( - {error, {"HTTP/1.1", 409, _}, _}, - request_api(post, api_path("trace"), Header, [GoodName2 | Trace]) + {error, {"HTTP/1.1", 409, _}}, + request_api(post, api_path("trace"), [GoodName2 | Trace]) + ), + %% new name but bad payload-encode + GoodName3 = {<<"name">>, <<"test-name-2">>}, + PayloadEncode = {<<"payload_encode">>, <<"bad">>}, + ?assertMatch( + {error, {"HTTP/1.1", 400, _}}, + request_api(post, api_path("trace"), [GoodName3, PayloadEncode | Trace]) ), unload(), @@ -200,14 +201,13 @@ t_log_file(_Config) -> || _ <- lists:seq(1, 5) ], ok = emqx_trace_handler_SUITE:filesync(Name, clientid), - Header = auth_header_(), ?assertMatch( - {error, {"HTTP/1.1", 404, "Not Found"}, _}, - request_api(get, api_path("trace/test_client_not_found/log_detail"), Header) + {error, {"HTTP/1.1", 404, "Not Found"}}, + request_api(get, api_path("trace/test_client_not_found/log_detail")) ), - {ok, Detail} = request_api(get, api_path("trace/test_client_id/log_detail"), Header), + {ok, Detail} = request_api(get, api_path("trace/test_client_id/log_detail")), ?assertMatch([#{<<"mtime">> := _, <<"size">> := _, <<"node">> := _}], json(Detail)), - {ok, Binary} = request_api(get, api_path("trace/test_client_id/download"), Header), + {ok, Binary} = request_api(get, api_path("trace/test_client_id/download")), {ok, [ Comment, #zip_file{ @@ -219,7 +219,7 @@ t_log_file(_Config) -> ZipNamePrefix = lists:flatten(io_lib:format("~s-trace_~s", [node(), Name])), ?assertNotEqual(nomatch, re:run(ZipName, [ZipNamePrefix])), Path = api_path("trace/test_client_id/download?node=" ++ atom_to_list(node())), - {ok, Binary2} = request_api(get, Path, Header), + {ok, Binary2} = request_api(get, Path), ?assertMatch( {ok, [ Comment, @@ -230,25 +230,22 @@ t_log_file(_Config) -> ]}, zip:table(Binary2) ), - {error, {_, 400, _}, _} = + {error, {_, 404, _}} = request_api( get, - api_path("trace/test_client_id/download?node=unknonwn_node"), - Header + api_path("trace/test_client_id/download?node=unknown_node") ), - {error, {_, 400, _}, _} = + {error, {_, 404, _}} = request_api( get, % known atom but unknown node - api_path("trace/test_client_id/download?node=undefined"), - Header + api_path("trace/test_client_id/download?node=undefined") ), ?assertMatch( - {error, {"HTTP/1.1", 404, "Not Found"}, _}, + {error, {"HTTP/1.1", 404, "Not Found"}}, request_api( get, - api_path("trace/test_client_not_found/download?node=" ++ atom_to_list(node())), - Header + api_path("trace/test_client_not_found/download?node=" ++ atom_to_list(node())) ) ), ok = emqtt:disconnect(Client), @@ -295,74 +292,99 @@ t_stream_log(_Config) -> ct:pal("FileName: ~p", [File]), {ok, FileBin} = file:read_file(File), ct:pal("FileBin: ~p ~s", [byte_size(FileBin), FileBin]), - Header = auth_header_(), - {ok, Binary} = request_api(get, api_path("trace/test_stream_log/log?bytes=10"), Header), + {ok, Binary} = request_api(get, api_path("trace/test_stream_log/log?bytes=10")), #{<<"meta">> := Meta, <<"items">> := Bin} = json(Binary), ?assertEqual(10, byte_size(Bin)), ?assertEqual(#{<<"position">> => 10, <<"bytes">> => 10}, Meta), Path = api_path("trace/test_stream_log/log?position=20&bytes=10"), - {ok, Binary1} = request_api(get, Path, Header), + {ok, Binary1} = request_api(get, Path), #{<<"meta">> := Meta1, <<"items">> := Bin1} = json(Binary1), ?assertEqual(#{<<"position">> => 30, <<"bytes">> => 10}, Meta1), ?assertEqual(10, byte_size(Bin1)), - {error, {_, 400, _}, _} = + ct:pal("~p vs ~p", [Bin, Bin1]), + %% in theory they could be the same but we know they shouldn't + ?assertNotEqual(Bin, Bin1), + BadReqPath = api_path("trace/test_stream_log/log?&bytes=1000000000000"), + {error, {_, 400, _}} = request_api(get, BadReqPath), + meck:new(file, [passthrough, unstick]), + meck:expect(file, read, 2, {error, enomem}), + {error, {_, 503, _}} = request_api(get, Path), + meck:unload(file), + {error, {_, 404, _}} = request_api( get, - api_path("trace/test_stream_log/log?node=unknonwn_node"), - Header + api_path("trace/test_stream_log/log?node=unknown_node") ), - {error, {_, 400, _}, _} = + {error, {_, 404, _}} = request_api( get, % known atom but not a node - api_path("trace/test_stream_log/log?node=undefined"), - Header + api_path("trace/test_stream_log/log?node=undefined") ), - {error, {_, 404, _}, _} = + {error, {_, 404, _}} = request_api( get, - api_path("trace/test_stream_log_not_found/log"), - Header + api_path("trace/test_stream_log_not_found/log") ), unload(), ok. +t_trace_files_are_deleted_after_download(_Config) -> + ClientId = <<"client-test-delete-after-download">>, + Now = erlang:system_time(second), + Name = <<"test_client_id">>, + load(), + create_trace(Name, ClientId, Now), + {ok, Client} = emqtt:start_link([{clean_start, true}, {clientid, ClientId}]), + {ok, _} = emqtt:connect(Client), + [ + begin + _ = emqtt:ping(Client) + end + || _ <- lists:seq(1, 5) + ], + ok = emqtt:disconnect(Client), + ok = emqx_trace_handler_SUITE:filesync(Name, clientid), + + %% Check that files have been removed after download and that zip + %% directories uses unique session ids + ?check_trace( + begin + %% Download two zip files + Path = api_path(["trace/", binary_to_list(Name), "/download"]), + {ok, Binary1} = request_api(get, Path), + {ok, Binary2} = request_api(get, Path), + ?assertMatch({ok, _}, zip:table(Binary1)), + ?assertMatch({ok, _}, zip:table(Binary2)) + end, + fun(Trace) -> + [ + #{session_id := SessionId1, zip_dir := ZipDir1}, + #{session_id := SessionId2, zip_dir := ZipDir2} + ] = ?of_kind(trace_api_download_trace_log, Trace), + ?assertEqual({error, enoent}, file:list_dir(ZipDir1)), + ?assertEqual({error, enoent}, file:list_dir(ZipDir2)), + ?assertNotEqual(SessionId1, SessionId2), + ?assertNotEqual(ZipDir1, ZipDir2) + end + ), + ok. + to_rfc3339(Second) -> list_to_binary(calendar:system_time_to_rfc3339(Second)). -auth_header_() -> - auth_header_("admin", "public"). +request_api(Method, Url) -> + request_api(Method, Url, []). -auth_header_(User, Pass) -> - Encoded = base64:encode_to_string(lists:append([User, ":", Pass])), - {"Authorization", "Basic " ++ Encoded}. - -request_api(Method, Url, Auth) -> do_request_api(Method, {Url, [Auth]}). - -request_api(Method, Url, Auth, Body) -> - Request = {Url, [Auth], "application/json", emqx_json:encode(Body)}, - do_request_api(Method, Request). - -do_request_api(Method, Request) -> - ct:pal("Method: ~p, Request: ~p", [Method, Request]), - case httpc:request(Method, Request, [], [{body_format, binary}]) of - {error, socket_closed_remotely} -> - {error, socket_closed_remotely}; - {error, {shutdown, server_closed}} -> - {error, server_closed}; - {ok, {{"HTTP/1.1", Code, _}, _Headers, Return}} when - Code =:= 200 orelse Code =:= 201 orelse Code =:= 204 - -> - {ok, Return}; - {ok, {Reason, _Header, Body}} -> - {error, Reason, Body} - end. +request_api(Method, Url, Body) -> + Opts = #{httpc_req_opts => [{body_format, binary}]}, + emqx_mgmt_api_test_util:request_api(Method, Url, [], [], Body, Opts). api_path(Path) -> - ?HOST ++ filename:join([?BASE_PATH, ?API_VERSION, Path]). + emqx_mgmt_api_test_util:api_path([Path]). json(Data) -> - {ok, Jsx} = emqx_json:safe_decode(Data, [return_maps]), + {ok, Jsx} = emqx_utils_json:safe_decode(Data, [return_maps]), Jsx. load() -> diff --git a/apps/emqx_modules/README.md b/apps/emqx_modules/README.md new file mode 100644 index 000000000..dfa349514 --- /dev/null +++ b/apps/emqx_modules/README.md @@ -0,0 +1,53 @@ +# EMQX Modules + +The application provides some minor functional modules that are not included in the MQTT +protocol standard, including "Delayed Publish", "Topic Rewrite", "Topic Metrics" and "Telemetry". + + +## Delayed Publish + +After enabling this module, messages sent by the clients with the topic prefixed with +`$delayed/{Interval}/{Topic}` will be delayed by `{Interval}` seconds before +being published to the `{Topic}`. + +More introduction about [Delayed Publish](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-delayed-publish.html). + +See [Enabling/Disabling Delayed Publish via HTTP API](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1delayed/put). + + +## Topic Rewrite + +Topic Rewrite allows users to configure rules to change the topic strings that +the client requests to subscribe or publish. + +This feature is very useful when there's a need to transform between different topic structures. +For example, an old device that has already been issued and cannot +be upgraded may use old topic designs, but for some reason, we adjusted the format of topics. We can use this feature to rewrite the old topics as the new format to eliminate these differences. + +More introduction about [Topic Rewrite](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-topic-rewrite.html). + +See [List all rewrite rules](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_rewrite/get) +and [Create or Update rewrite rules](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_rewrite/put). + + +## Topic Metrics + +Topic Metrics is used for users to specify monitoring of certain topics and to +count the number of messages, QoS distribution, and rate for all messages on that topic. + +More introduction about [Topic Metrics](https://www.emqx.io/docs/en/v5.0/dashboard/diagnose.html#topic-metrics). + +See HTTP API docs to [List all monitored topics](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics/get), +[Create topic metrics](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics/post) +and [Get the monitored result](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics~1%7Btopic%7D/get). + + +## Telemetry + +Telemetry is used for collecting non-sensitive information about the EMQX cluster. + +More introduction about [Telemetry](https://www.emqx.io/docs/en/v5.0/telemetry/telemetry.html#telemetry). + +See HTTP API docs to [Enable/Disable telemetry](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1status/put), +[Get the enabled status](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1status/get) +and [Get the data of the module collected](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1data/get). diff --git a/apps/emqx_modules/i18n/emqx_delayed_api_i18n.conf b/apps/emqx_modules/i18n/emqx_delayed_api_i18n.conf deleted file mode 100644 index d16d61ccf..000000000 --- a/apps/emqx_modules/i18n/emqx_delayed_api_i18n.conf +++ /dev/null @@ -1,164 +0,0 @@ -emqx_delayed_api { - - view_status_api { - desc { - en: "Get delayed status" - zh: "查看慢订阅状态" - } - } - - update_api { - desc { - en: "Enable or disable delayed, set max delayed messages" - zh: "开启或者关闭功能,或者设置延迟消息数量上限" - } - } - - update_success { - desc { - en: "Enable or disable delayed successfully" - zh: "开启或者关闭功能操作成功" - } - } - - illegality_limit { - desc { - en: "Max limit illegality" - zh: "数量上限不合法" - } - } - - get_message_api { - desc { - en: "View delayed message" - zh: "查看延迟消息" - } - } - - node { - desc { - en: "The node where message from" - zh: "消息的来源节点" - } - } - - msgid { - desc { - en: "Delayed Message ID" - zh: "延迟消息 ID" - } - } - - bad_msgid_format { - desc { - en: "Bad Message ID format" - zh: "消息 ID 格式错误" - } - } - - msgid_not_found { - desc { - en: "Message ID not found" - zh: "未找到对应消息" - } - } - - delete_api { - desc { - en: "Delete delayed message" - zh: "删除延迟消息" - } - } - - list_api { - desc { - en: "List delayed messages" - zh: "查看延迟消息列表" - } - } - - view_page { - desc { - en: "View page" - zh: "查看的页数" - } - } - - view_limit { - desc { - en: "Page limit" - zh: "每页数量" - } - } - - count { - desc { - en: "Count of delayed messages" - zh: "延迟消息总数" - } - } - - publish_at { - desc { - en: "Clinet publish message time, in RFC 3339 format" - zh: "客户端发送时间, RFC 3339 格式" - } - } - - delayed_interval { - desc { - en: "Delayed interval(second)" - zh: "延迟时间(秒)" - } - } - - delayed_remaining { - desc { - en: "Delayed remaining(second)" - zh: "剩余时间(秒)" - } - } - - expected_at { - desc { - en: "Expect publish time, in RFC 3339 format" - zh: "期望的发送时间, RFC 3339 格式" - } - } - - topic { - desc { - en: "Topic" - zh: "主题" - } - } - - qos { - desc { - en: "QoS" - zh: "QoS" - } - } - - from_clientid { - desc { - en: "From ClientID" - zh: "消息的 ClientID" - } - } - - from_username { - desc { - en: "From Username" - zh: "消息的 Username" - } - } - - payload { - desc { - en: "Payload, base64 encoded. Payload will be set to 'PAYLOAD_TO_LARGE' if its length is larger than 2048 bytes" - zh: "消息内容, base64 格式。如果消息的大小超过 2048 字节,则消息内容会被设置为 'PAYLOAD_TO_LARGE'" - } - } - -} diff --git a/apps/emqx_modules/i18n/emqx_modules_schema_i18n.conf b/apps/emqx_modules/i18n/emqx_modules_schema_i18n.conf deleted file mode 100644 index 248f85341..000000000 --- a/apps/emqx_modules/i18n/emqx_modules_schema_i18n.conf +++ /dev/null @@ -1,90 +0,0 @@ -emqx_modules_schema { - - rewrite { - desc { - en: """The topic rewriting function of EMQX supports rewriting topic A to topic B when the client subscribes to topics, publishes messages, and cancels subscriptions according to user-configured rules. -Each rewrite rule consists of three parts: subject filter, regular expression, and target expression. -Under the premise that the subject rewriting function is enabled, when EMQX receives a subject-based MQTT message such as a `PUBLISH` message, -it will use the subject of the message to sequentially match the subject filter part of the rule in the configuration file. If the match is successful, -the regular expression is used to extract the information in the subject, and then replaced with the target expression to form a new subject. -Variables in the format of `$N` can be used in the target expression to match the elements extracted from the regular expression. -The value of `$N` is the Nth element extracted from the regular expression. For example, `$1` is the regular expression. The first element extracted by the expression. -It should be noted that EMQX uses reverse order to read the rewrite rules in the configuration file. -When a topic can match the topic filter of multiple topic rewrite rules at the same time, EMQX will only use the first rule it matches. Rewrite. -If the regular expression in this rule does not match the subject of the MQTT message, the rewriting will fail, and no other rules will be attempted for rewriting. -Therefore, users need to carefully design MQTT message topics and topic rewriting rules when using them.""" - zh: """EMQX 的主题重写功能支持根据用户配置的规则在客户端订阅主题、发布消息、取消订阅的时候将 A 主题重写为 B 主题。 -重写规则分为 Pub 规则和 Sub 规则,Pub 规则匹配 PUSHLISH 报文携带的主题,Sub 规则匹配 SUBSCRIBE、UNSUBSCRIBE 报文携带的主题。 -每条重写规则都由主题过滤器、正则表达式、目标表达式三部分组成。 -在主题重写功能开启的前提下,EMQX 在收到诸如 PUBLISH 报文等带有主题的 MQTT 报文时,将使用报文中的主题去依次匹配配置文件中规则的主题过滤器部分,一旦成功匹配,则使用正则表达式提取主题中的信息,然后替换至目标表达式以构成新的主题。 -目标表达式中可以使用 `$N` 这种格式的变量匹配正则表达中提取出来的元素,`$N` 的值为正则表达式中提取出来的第 N 个元素,比如 `$1` 即为正则表达式提取的第一个元素。 -需要注意的是,EMQX 使用倒序读取配置文件中的重写规则,当一条主题可以同时匹配多条主题重写规则的主题过滤器时,EMQX 仅会使用它匹配到的第一条规则进行重写,如果该条规则中的正则表达式与 MQTT 报文主题不匹配,则重写失败,不会再尝试使用其他的规则进行重写。 -因此用户在使用时需要谨慎的设计 MQTT 报文主题以及主题重写规则。""" - } - label { - en: """Topic Rewrite""" - zh: """主题重写""" - } - } - - tr_source_topic { - desc { - en: """Source topic, specified by the client.""" - zh: """源主题,客户端业务指定的主题""" - } - label { - en: """Source Topic""" - zh: """源主题""" - } - } - - tr_action { - desc { - en: """Topic rewriting takes effect on the type of operation: - - `subscribe`: Rewrite topic when client do subscribe. - - `publish`: Rewrite topic when client do publish. - - `all`: Both""" - - zh: """主题重写在哪种操作上生效: - - `subscribe`:订阅时重写主题; - - `publish`:发布时重写主题; - -`all`:全部重写主题""" - } - label { - en: """Action""" - zh: """Action""" - } - } - - tr_re { - desc { - en: """Regular expressions""" - zh: """正则表达式""" - } - } - - tr_dest_topic { - desc { - en: """Destination topic.""" - zh: """目标主题。""" - } - label { - en: """Destination Topic""" - zh: """目标主题""" - } - } - - enable { - desc { - en: "Enable this feature" - zh: "是否开启该功能" - } - } - - max_delayed_messages { - desc { - en: "Maximum number of delayed messages (0 is no limit)." - zh: "延迟消息的数量上限(0 代表无限)" - } - } -} diff --git a/apps/emqx_modules/i18n/emqx_rewrite_api_i18n.conf b/apps/emqx_modules/i18n/emqx_rewrite_api_i18n.conf deleted file mode 100644 index 91f15cb8c..000000000 --- a/apps/emqx_modules/i18n/emqx_rewrite_api_i18n.conf +++ /dev/null @@ -1,25 +0,0 @@ -emqx_rewrite_api { - - list_topic_rewrite_api { - desc { - en: """List all rewrite rules""" - zh: """列出全部主题重写规则""" - } - } - - update_topic_rewrite_api { - desc { - en: """Update all rewrite rules""" - zh: """更新全部主题重写规则""" - } - } - - - update_topic_rewrite_api_response413 { - desc { - en: """Rules count exceed max limit""" - zh: """超出主题重写规则数量上限""" - } - } - -} diff --git a/apps/emqx_modules/i18n/emqx_telemetry_api_i18n.conf b/apps/emqx_modules/i18n/emqx_telemetry_api_i18n.conf deleted file mode 100644 index a8f562065..000000000 --- a/apps/emqx_modules/i18n/emqx_telemetry_api_i18n.conf +++ /dev/null @@ -1,121 +0,0 @@ -emqx_telemetry_api { - - get_telemetry_status_api { - desc { - en: """Get telemetry status""" - zh: """获取遥测状态""" - } - } - - update_telemetry_status_api { - desc { - en: """Enable or disable telemetry""" - zh: """更新遥测状态""" - } - } - - get_telemetry_data_api { - desc { - en: """Get telemetry data""" - zh: """获取遥测数据""" - } - } - - enable { - desc { - en: """Enable telemetry""" - zh: """启用遥测""" - } - } - - emqx_version { - desc { - en: """Get emqx version""" - zh: """获取 emqx 版本""" - } - } - - license { - desc { - en: """Get license information""" - zh: """获取 license 信息""" - } - } - - os_name { - desc { - en: """Get OS name""" - zh: """获取操作系统名称""" - } - } - - os_version { - desc { - en: """Get OS version""" - zh: """获取操作系统版本""" - } - } - - otp_version { - desc { - en: """Get Erlang OTP version""" - zh: """获取 OTP 版本""" - } - } - - up_time { - desc { - en: """Get uptime""" - zh: """获取运行时间""" - } - } - - uuid { - desc { - en: """Get UUID""" - zh: """获取 UUID""" - } - } - - nodes_uuid { - desc { - en: """Get nodes UUID""" - zh: """获取节点 UUID""" - } - } - - active_plugins { - desc { - en: """Get active plugins""" - zh: """获取活跃插件""" - } - } - - active_modules { - desc { - en: """Get active modules""" - zh: """获取活跃模块""" - } - } - - num_clients { - desc { - en: """Get number of clients""" - zh: """获取客户端数量""" - } - } - - messages_received { - desc { - en: """Get number of messages received""" - zh: """获取接收到的消息数量""" - } - } - - messages_sent { - desc { - en: """Get number of messages sent""" - zh: """获取发送的消息数量""" - } - } -} diff --git a/apps/emqx_modules/i18n/emqx_topic_metrics_api_i18n.conf b/apps/emqx_modules/i18n/emqx_topic_metrics_api_i18n.conf deleted file mode 100644 index 3725ccd13..000000000 --- a/apps/emqx_modules/i18n/emqx_topic_metrics_api_i18n.conf +++ /dev/null @@ -1,238 +0,0 @@ -emqx_topic_metrics_api { - get_topic_metrics_api { - desc { - en: """List Topic metrics""" - zh: """获取主题监控数据""" - } - } - - reset_topic_metrics_api{ - desc { - en: """Reset telemetry status""" - zh: """重置主题监控状态""" - } - } - - post_topic_metrics_api { - desc { - en: """Create Topic metrics""" - zh: """创建主题监控数据""" - } - } - - gat_topic_metrics_data_api { - desc { - en: """Get Topic metrics""" - zh: """获取主题监控数据""" - } - } - - delete_topic_metrics_data_api { - desc { - en: """Delete Topic metrics""" - zh: """删除主题监控数据""" - } - } - - topic_metrics_api_response409 { - desc { - en: """Conflict. Topic metrics exceeded max limit 512""" - zh: """冲突。主题监控数据超过最大限制512""" - } - } - - topic_metrics_api_response400 { - desc { - en: """Bad Request. Already exists or bad topic name""" - zh: """错误请求。已存在或错误的主题名称""" - } - } - - topic_metrics_api_response404 { - desc { - en: """Not Found. Topic metrics not found""" - zh: """未找到。主题监控数据未找到""" - } - } - - reset_topic_desc { - en: """Topic Name. If this parameter is not present,all created topic metrics will be reset.""" - zh: """主题名称。如果此参数不存在,则所有创建的主题监控数据都将重置。""" - } - - topic { - desc { - en: """Topic""" - zh: """主题""" - } - } - - topic_in_body { - desc { - en: """Raw topic string""" - zh: """主题字符串""" - } - } - - topic_in_path { - desc { - en: """Topic string. Notice: Topic string in url path must be encoded""" - zh: """主题字符串。注意:主题字符串在url路径中必须编码""" - } - } - - action { - desc { - en: """Action. Only support reset""" - zh: """操作,仅支持 reset""" - } - } - - create_time { - desc { - en: """Create time""" - zh: """创建时间。标准 rfc3339 时间格式,例如:2018-01-01T12:00:00Z""" - } - } - - reset_time { - desc { - en: """Reset time. In rfc3339. Nullable if never reset""" - zh: """重置时间。标准 rfc3339 时间格式,例如:2018-01-01T12:00:00Z。如果从未重置则为空""" - } - } - - metrics { - desc { - en: """Metrics""" - zh: """监控数据""" - } - } - - message_dropped_count { - desc { - en: """Dropped messages count""" - zh: """丢弃消息数量""" - } - } - - message_dropped_rate { - desc { - en: """Dropped messages rate""" - zh: """丢弃消息速率""" - } - } - - message_in_count { - desc { - en: """In messages count""" - zh: """接收消息数量""" - } - } - - message_in_rate { - desc { - en: """In messages rate""" - zh: """接收消息速率""" - } - } - - message_out_count { - desc { - en: """Out messages count""" - zh: """发送消息数量""" - } - } - - message_out_rate { - desc { - en: """Out messages rate""" - zh: """发送消息速率""" - } - } - - message_qos0_in_count { - desc { - en: """QoS0 in messages count""" - zh: """QoS0 接收消息数量""" - } - } - - message_qos0_in_rate { - desc { - en: """QoS0 in messages rate""" - zh: """QoS0 接收消息速率""" - } - } - - message_qos0_out_count { - desc { - en: """QoS0 out messages count""" - zh: """QoS0 发送消息数量""" - } - } - - message_qos0_out_rate { - desc { - en: """QoS0 out messages rate""" - zh: """QoS0 发送消息速率""" - } - } - - message_qos1_in_count { - desc { - en: """QoS1 in messages count""" - zh: """QoS1 接收消息数量""" - } - } - - message_qos1_in_rate { - desc { - en: """QoS1 in messages rate""" - zh: """QoS1 接收消息速率""" - } - } - - message_qos1_out_count { - desc { - en: """QoS1 out messages count""" - zh: """QoS1 发送消息数量""" - } - } - - message_qos1_out_rate { - desc { - en: """QoS1 out messages rate""" - zh: """QoS1 发送消息速率""" - } - } - - message_qos2_in_count { - desc { - en: """QoS2 in messages count""" - zh: """QoS2 接收消息数量""" - } - } - - message_qos2_in_rate { - desc { - en: """QoS2 in messages rate""" - zh: """QoS2 接收消息速率""" - } - } - - message_qos2_out_count { - desc { - en: """QoS2 out messages count""" - zh: """QoS2 发送消息数量""" - } - } - - message_qos2_out_rate { - desc { - en: """QoS2 out messages rate""" - zh: """QoS2 发送消息速率""" - } - } - -} diff --git a/apps/emqx_modules/rebar.config b/apps/emqx_modules/rebar.config index 9688d5043..ff542aed7 100644 --- a/apps/emqx_modules/rebar.config +++ b/apps/emqx_modules/rebar.config @@ -2,6 +2,7 @@ {deps, [ {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, {emqx_conf, {path, "../emqx_conf"}} ]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_modules/src/emqx_delayed.erl b/apps/emqx_modules/src/emqx_delayed.erl index 91cdbedd3..85313c181 100644 --- a/apps/emqx_modules/src/emqx_delayed.erl +++ b/apps/emqx_modules/src/emqx_delayed.erl @@ -328,7 +328,7 @@ handle_info(Info, State) -> terminate(_Reason, #{stats_timer := StatsTimer} = State) -> emqx_conf:remove_handler([delayed]), - emqx_misc:cancel_timer(StatsTimer), + emqx_utils:cancel_timer(StatsTimer), do_load_or_unload(false, State). code_change(_Vsn, State, _Extra) -> @@ -370,14 +370,14 @@ ensure_publish_timer({Ts, _Id}, State = #{publish_timer := undefined}) -> ensure_publish_timer({Ts, _Id}, State = #{publish_timer := TRef, publish_at := PubAt}) when Ts < PubAt -> - ok = emqx_misc:cancel_timer(TRef), + ok = emqx_utils:cancel_timer(TRef), ensure_publish_timer(Ts, ?NOW, State); ensure_publish_timer(_Key, State) -> State. ensure_publish_timer(Ts, Now, State) -> Interval = max(1, Ts - Now), - TRef = emqx_misc:start_timer(Interval, do_publish), + TRef = emqx_utils:start_timer(Interval, do_publish), State#{publish_timer := TRef, publish_at := Now + Interval}. do_publish(Key, Now) -> @@ -418,7 +418,7 @@ do_load_or_unload(true, State) -> State; do_load_or_unload(false, #{publish_timer := PubTimer} = State) -> emqx_hooks:del('message.publish', {?MODULE, on_message_publish}), - emqx_misc:cancel_timer(PubTimer), + emqx_utils:cancel_timer(PubTimer), ets:delete_all_objects(?TAB), State#{publish_timer := undefined, publish_at := 0}; do_load_or_unload(_, State) -> diff --git a/apps/emqx_modules/src/emqx_delayed_api.erl b/apps/emqx_modules/src/emqx_delayed_api.erl index d4e7e5b90..766d23d6b 100644 --- a/apps/emqx_modules/src/emqx_delayed_api.erl +++ b/apps/emqx_modules/src/emqx_delayed_api.erl @@ -52,7 +52,7 @@ -define(INVALID_NODE, 'INVALID_NODE'). api_spec() -> - emqx_dashboard_swagger:spec(?MODULE). + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). paths() -> [ @@ -202,9 +202,9 @@ delayed_message(get, #{bindings := #{node := NodeBin, msgid := HexId}}) -> {200, Message#{payload => base64:encode(Payload)}} end; {error, not_found} -> - {404, generate_http_code_map(not_found, Id)}; + {404, generate_http_code_map(not_found, HexId)}; {badrpc, _} -> - {400, generate_http_code_map(invalid_node, Id)} + {400, generate_http_code_map(invalid_node, NodeBin)} end end ); @@ -271,19 +271,19 @@ generate_http_code_map(id_schema_error, Id) -> #{ code => ?MESSAGE_ID_SCHEMA_ERROR, message => - iolist_to_binary(io_lib:format("Message ID ~p schema error", [Id])) + iolist_to_binary(io_lib:format("Message ID ~s schema error", [Id])) }; generate_http_code_map(not_found, Id) -> #{ code => ?MESSAGE_ID_NOT_FOUND, message => - iolist_to_binary(io_lib:format("Message ID ~p not found", [Id])) + iolist_to_binary(io_lib:format("Message ID ~s not found", [Id])) }; generate_http_code_map(invalid_node, Node) -> #{ code => ?INVALID_NODE, message => - iolist_to_binary(io_lib:format("The node name ~p is invalid", [Node])) + iolist_to_binary(io_lib:format("The node name ~s is invalid", [Node])) }. make_maybe(X, Error, Fun) -> diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src index 20f8a76fc..b984cf658 100644 --- a/apps/emqx_modules/src/emqx_modules.app.src +++ b/apps/emqx_modules/src/emqx_modules.app.src @@ -1,9 +1,9 @@ %% -*- mode: erlang -*- {application, emqx_modules, [ {description, "EMQX Modules"}, - {vsn, "5.0.9"}, + {vsn, "5.0.14"}, {modules, []}, - {applications, [kernel, stdlib, emqx]}, + {applications, [kernel, stdlib, emqx, emqx_ctl]}, {mod, {emqx_modules_app, []}}, {registered, [emqx_modules_sup]}, {env, []} diff --git a/apps/emqx_modules/src/emqx_modules_schema.erl b/apps/emqx_modules/src/emqx_modules_schema.erl index ddb8f37a3..9057333d5 100644 --- a/apps/emqx_modules/src/emqx_modules_schema.erl +++ b/apps/emqx_modules/src/emqx_modules_schema.erl @@ -34,8 +34,16 @@ roots() -> [ "delayed", "telemetry", - array("rewrite", #{desc => "List of topic rewrite rules."}), - array("topic_metrics", #{desc => "List of topics whose metrics are reported."}) + array("rewrite", #{ + desc => "List of topic rewrite rules.", + importance => ?IMPORTANCE_HIDDEN, + default => [] + }), + array("topic_metrics", #{ + desc => "List of topics whose metrics are reported.", + importance => ?IMPORTANCE_HIDDEN, + default => [] + }) ]. fields("telemetry") -> diff --git a/apps/emqx_modules/src/emqx_telemetry.erl b/apps/emqx_modules/src/emqx_telemetry.erl index 16fef8d34..3b27302df 100644 --- a/apps/emqx_modules/src/emqx_telemetry.erl +++ b/apps/emqx_modules/src/emqx_telemetry.erl @@ -161,7 +161,7 @@ handle_call(enable, _From, State) -> FirstReportTimeoutMS = timer:seconds(10), {reply, ok, ensure_report_timer(FirstReportTimeoutMS, State)}; handle_call(disable, _From, State = #state{timer = Timer}) -> - emqx_misc:cancel_timer(Timer), + emqx_utils:cancel_timer(Timer), {reply, ok, State#state{timer = undefined}}; handle_call(get_node_uuid, _From, State = #state{node_uuid = UUID}) -> {reply, {ok, UUID}, State}; @@ -205,7 +205,7 @@ ensure_report_timer(State = #state{report_interval = ReportInterval}) -> ensure_report_timer(ReportInterval, State). ensure_report_timer(ReportInterval, State) -> - State#state{timer = emqx_misc:start_timer(ReportInterval, time_to_report_telemetry_data)}. + State#state{timer = emqx_utils:start_timer(ReportInterval, time_to_report_telemetry_data)}. os_info() -> case erlang:system_info(os_type) of @@ -266,7 +266,7 @@ uptime() -> element(1, erlang:statistics(wall_clock)). nodes_uuid() -> - Nodes = lists:delete(node(), mria_mnesia:running_nodes()), + Nodes = lists:delete(node(), mria:running_nodes()), lists:foldl( fun(Node, Acc) -> case emqx_telemetry_proto_v1:get_node_uuid(Node) of @@ -356,7 +356,7 @@ get_telemetry(State0 = #state{node_uuid = NodeUUID, cluster_uuid = ClusterUUID}) report_telemetry(State0 = #state{url = URL}) -> {State, Data} = get_telemetry(State0), - case emqx_json:safe_encode(Data) of + case emqx_utils_json:safe_encode(Data) of {ok, Bin} -> httpc_request(post, URL, [], Bin), ?tp(debug, telemetry_data_reported, #{}); diff --git a/apps/emqx_modules/src/emqx_telemetry_api.erl b/apps/emqx_modules/src/emqx_telemetry_api.erl index 798c3ad17..b7209d146 100644 --- a/apps/emqx_modules/src/emqx_telemetry_api.erl +++ b/apps/emqx_modules/src/emqx_telemetry_api.erl @@ -243,7 +243,7 @@ status(put, #{body := Body}) -> data(get, _Request) -> case emqx_modules_conf:is_telemetry_enabled() of true -> - {200, emqx_json:encode(get_telemetry_data())}; + {200, emqx_utils_json:encode(get_telemetry_data())}; false -> {404, #{ code => ?NOT_FOUND, diff --git a/apps/emqx_modules/src/emqx_topic_metrics.erl b/apps/emqx_modules/src/emqx_topic_metrics.erl index dfc6b07ab..efe309b9e 100644 --- a/apps/emqx_modules/src/emqx_topic_metrics.erl +++ b/apps/emqx_modules/src/emqx_topic_metrics.erl @@ -179,7 +179,12 @@ deregister_all() -> gen_server:call(?MODULE, {deregister, all}). is_registered(Topic) -> - ets:member(?TAB, Topic). + try + ets:member(?TAB, Topic) + catch + error:badarg -> + false + end. all_registered_topics() -> [Topic || {Topic, _} <- ets:tab2list(?TAB)]. @@ -201,7 +206,7 @@ reset() -> init([Opts]) -> erlang:process_flag(trap_exit, true), - ok = emqx_tables:new(?TAB, [{read_concurrency, true}]), + ok = emqx_utils_ets:new(?TAB, [{read_concurrency, true}]), erlang:send_after(timer:seconds(?TICKING_INTERVAL), self(), ticking), Fun = fun(#{topic := Topic}, CurrentSpeeds) -> diff --git a/apps/emqx_modules/src/emqx_topic_metrics_api.erl b/apps/emqx_modules/src/emqx_topic_metrics_api.erl index ef3c2be69..50b586228 100644 --- a/apps/emqx_modules/src/emqx_topic_metrics_api.erl +++ b/apps/emqx_modules/src/emqx_topic_metrics_api.erl @@ -321,7 +321,7 @@ operate_topic_metrics(delete, #{bindings := #{topic := Topic}}) -> %%-------------------------------------------------------------------- cluster_accumulation_metrics() -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_topic_metrics_proto_v1:metrics(Nodes) of {SuccResList, []} -> {ok, accumulate_nodes_metrics(SuccResList)}; @@ -330,7 +330,7 @@ cluster_accumulation_metrics() -> end. cluster_accumulation_metrics(Topic) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_topic_metrics_proto_v1:metrics(Nodes, Topic) of {SuccResList, []} -> case @@ -422,12 +422,12 @@ do_accumulation_metrics(MetricsIn, {MetricsAcc, _}) -> ). reset() -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), _ = emqx_topic_metrics_proto_v1:reset(Nodes), ok. reset(Topic) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_topic_metrics_proto_v1:reset(Nodes, Topic) of {SuccResList, []} -> case diff --git a/apps/emqx_modules/test/emqx_delayed_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_SUITE.erl index ab35e37dc..8c271f0c1 100644 --- a/apps/emqx_modules/test/emqx_delayed_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_SUITE.erl @@ -40,9 +40,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -169,10 +167,10 @@ t_cluster(_) -> emqx_delayed_proto_v1:get_delayed_message(node(), Id) ), - ?assertEqual( - emqx_delayed:get_delayed_message(Id), - emqx_delayed_proto_v1:get_delayed_message(node(), Id) - ), + %% The 'local' and the 'fake-remote' values should be the same, + %% however there is a race condition, so we are just assert that they are both 'ok' tuples + ?assertMatch({ok, _}, emqx_delayed:get_delayed_message(Id)), + ?assertMatch({ok, _}, emqx_delayed_proto_v1:get_delayed_message(node(), Id)), ok = emqx_delayed_proto_v1:delete_delayed_message(node(), Id), @@ -229,6 +227,14 @@ t_banned_delayed(_) -> }), snabbkaffe:start_trace(), + {ok, SubRef} = + snabbkaffe:subscribe( + ?match_event(#{?snk_kind := ignore_delayed_message_publish}), + _NEvents = 2, + _Timeout = 10000, + 0 + ), + lists:foreach( fun(ClientId) -> Msg = emqx_message:make(ClientId, <<"$delayed/1/bc">>, <<"payload">>), @@ -237,8 +243,7 @@ t_banned_delayed(_) -> [ClientId1, ClientId1, ClientId1, ClientId2, ClientId2] ), - timer:sleep(2000), - Trace = snabbkaffe:collect_trace(), + {ok, Trace} = snabbkaffe:receive_events(SubRef), snabbkaffe:stop(), emqx_banned:delete(Who), mnesia:clear_table(emqx_delayed), diff --git a/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl index 96cdf7840..b5995a47a 100644 --- a/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl @@ -26,37 +26,28 @@ <<"max_delayed_messages">> => <<"0">> }). --import(emqx_dashboard_api_test_helpers, [request/2, request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/2, request/3, uri/1]). all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_modules, emqx_dashboard], - fun set_special_configs/1 + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_modules] ), emqx_delayed:load(), Config. end_per_suite(Config) -> ok = emqx_delayed:unload(), - emqx_common_test_helpers:stop_apps([emqx_conf, emqx_dashboard, emqx_modules]), + emqx_mgmt_api_test_util:end_suite([emqx_conf, emqx_modules]), Config. init_per_testcase(_, Config) -> {ok, _} = emqx_cluster_rpc:start_link(), Config. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(); -set_special_configs(_App) -> - ok. - %%------------------------------------------------------------------------------ %% Test Cases %%------------------------------------------------------------------------------ @@ -235,8 +226,8 @@ t_large_payload(_) -> %%-------------------------------------------------------------------- decode_json(Data) -> - BinJson = emqx_json:decode(Data, [return_maps]), - emqx_map_lib:unsafe_atom_key_map(BinJson). + BinJson = emqx_utils_json:decode(Data, [return_maps]), + emqx_utils_maps:unsafe_atom_key_map(BinJson). clear_all_record() -> ets:delete_all_objects(emqx_delayed). diff --git a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl index 666af9ef0..14e477bf9 100644 --- a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl +++ b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl @@ -29,9 +29,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> - emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>, #{ - raw_with_default => true - }), + emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Conf. diff --git a/apps/emqx_modules/test/emqx_rewrite_SUITE.erl b/apps/emqx_modules/test/emqx_rewrite_SUITE.erl index 1847f876e..aa2c7cad7 100644 --- a/apps/emqx_modules/test/emqx_rewrite_SUITE.erl +++ b/apps/emqx_modules/test/emqx_rewrite_SUITE.erl @@ -73,9 +73,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, #{}, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, #{}), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -160,17 +158,13 @@ t_rewrite_re_error(_Config) -> ok. t_list(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Expect = maps:get(<<"rewrite">>, ?REWRITE), ?assertEqual(Expect, emqx_rewrite:list()), ok. t_update(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Init = emqx_rewrite:list(), Rules = [ #{ @@ -186,9 +180,7 @@ t_update(_Config) -> ok. t_update_disable(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), ?assertEqual(ok, emqx_rewrite:update([])), timer:sleep(150), @@ -203,9 +195,7 @@ t_update_disable(_Config) -> ok. t_update_re_failed(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Re = <<"*^test/*">>, Rules = [ #{ @@ -261,9 +251,7 @@ receive_publish(Timeout) -> end. init() -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), ok = emqx_rewrite:enable(), {ok, C} = emqtt:start_link([{clientid, <<"c1">>}, {username, <<"u1">>}]), {ok, _} = emqtt:connect(C), diff --git a/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl b/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl index 90e90d788..528102d9e 100644 --- a/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl @@ -18,7 +18,7 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/3, uri/1]). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -33,24 +33,15 @@ init_per_testcase(_, Config) -> Config. init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_modules, emqx_dashboard], - fun set_special_configs/1 + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_modules] ), Config. end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_conf, emqx_dashboard, emqx_modules]), - ok. - -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(); -set_special_configs(_App) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf, emqx_modules]), ok. %%------------------------------------------------------------------------------ @@ -81,7 +72,7 @@ t_mqtt_topic_rewrite(_) -> ?assertEqual( Rules, - jsx:decode(Result) + emqx_utils_json:decode(Result, [return_maps]) ). t_mqtt_topic_rewrite_limit(_) -> diff --git a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl index 37c1115aa..86ea65620 100644 --- a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl @@ -42,9 +42,8 @@ init_per_suite(Config) -> emqx_common_test_helpers:deps_path(emqx_authz, "etc/acl.conf") end ), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), + emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -153,9 +152,7 @@ init_per_testcase(t_exhook_info, Config) -> {ok, _} = emqx_exhook_demo_svr:start(), {ok, Sock} = gen_tcp:connect("localhost", 9000, [], 3000), _ = gen_tcp:close(Sock), - ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf), {ok, _} = application:ensure_all_started(emqx_exhook), Config; init_per_testcase(t_cluster_uuid, Config) -> @@ -176,9 +173,7 @@ init_per_testcase(t_uuid_restored_from_file, Config) -> %% clear the UUIDs in the DB {atomic, ok} = mria:clear_table(emqx_telemetry), emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -331,9 +326,7 @@ t_uuid_saved_to_file(_Config) -> %% clear the UUIDs in the DB {atomic, ok} = mria:clear_table(emqx_telemetry), emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -511,7 +504,7 @@ t_send_after_enable(_) -> ), receive {request, post, _URL, _Headers, Body} -> - {ok, Decoded} = emqx_json:safe_decode(Body, [return_maps]), + {ok, Decoded} = emqx_utils_json:safe_decode(Body, [return_maps]), ?assertMatch( #{ <<"uuid">> := _, @@ -656,8 +649,10 @@ mock_advanced_mqtt_features() -> lists:foreach( fun(N) -> - Num = integer_to_binary(N), - Message = emqx_message:make(<<"$delayed/", Num/binary, "/delayed">>, <<"payload">>), + DelaySec = integer_to_binary(N + 10), + Message = emqx_message:make( + <<"$delayed/", DelaySec/binary, "/delayed">>, <<"payload">> + ), {stop, _} = emqx_delayed:on_message_publish(Message) end, lists:seq(1, 4) @@ -823,15 +818,11 @@ start_slave(Name) -> (emqx) -> application:set_env(emqx, boot_modules, []), ekka:join(TestNode), - emqx_common_test_helpers:load_config( - emqx_modules_schema, ?BASE_CONF, #{raw_with_default => true} - ), + emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok; (_App) -> - emqx_common_test_helpers:load_config( - emqx_modules_schema, ?BASE_CONF, #{raw_with_default => true} - ), + emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok end, Opts = #{ @@ -856,9 +847,10 @@ stop_slave(Node) -> %emqx_cluster_rpc:fast_forward_to_commit(Node, 100), rpc:call(Node, ?MODULE, leave_cluster, []), ok = slave:stop(Node), - ?assertEqual([node()], mria_mnesia:running_nodes()), + ?assertEqual([node()], mria:running_nodes()), ?assertEqual([], nodes()), - ok. + _ = application:stop(mria), + ok = application:start(mria). leave_cluster() -> try mnesia_hook:module_info() of diff --git a/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl index 288a155d9..c375810b5 100644 --- a/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl @@ -18,7 +18,7 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/2, request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/2, request/3, uri/1]). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -29,12 +29,9 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_authn, emqx_authz, emqx_modules, emqx_dashboard], + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 ), @@ -49,8 +46,8 @@ end_per_suite(_Config) -> <<"sources">> => [] } ), - emqx_common_test_helpers:stop_apps([ - emqx_dashboard, emqx_conf, emqx_authn, emqx_authz, emqx_modules + emqx_mgmt_api_test_util:end_suite([ + emqx_conf, emqx_authn, emqx_authz, emqx_modules ]), ok. @@ -113,7 +110,7 @@ t_status(_) -> ?assertEqual( #{<<"enable">> => false}, - jsx:decode(Result0) + emqx_utils_json:decode(Result0) ), ?assertMatch( @@ -139,7 +136,7 @@ t_status(_) -> ?assertEqual( #{<<"enable">> => true}, - jsx:decode(Result1) + emqx_utils_json:decode(Result1) ), ?assertMatch( @@ -180,7 +177,7 @@ t_data(_) -> <<"uuid">> := _, <<"vm_specs">> := _ }, - jsx:decode(Result) + emqx_utils_json:decode(Result) ), {ok, 200, _} = diff --git a/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl b/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl index 10ce5d0df..a147f41cd 100644 --- a/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl +++ b/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl @@ -28,9 +28,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -42,6 +40,9 @@ init_per_testcase(_Case, Config) -> emqx_topic_metrics:deregister_all(), Config. +end_per_testcase(t_metrics_not_started, _Config) -> + _ = supervisor:restart_child(emqx_modules_sup, emqx_topic_metrics), + ok; end_per_testcase(_Case, _Config) -> emqx_topic_metrics:deregister_all(), emqx_config:put([topic_metrics], []), @@ -181,3 +182,10 @@ t_unknown_messages(_) -> OldPid, whereis(emqx_topic_metrics) ). + +t_metrics_not_started(_Config) -> + _ = emqx_topic_metrics:register(<<"a/b/c">>), + ?assert(emqx_topic_metrics:is_registered(<<"a/b/c">>)), + ok = supervisor:terminate_child(emqx_modules_sup, emqx_topic_metrics), + ?assertNot(emqx_topic_metrics:is_registered(<<"a/b/c">>)), + {ok, _} = supervisor:restart_child(emqx_modules_sup, emqx_topic_metrics). diff --git a/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl b/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl index 8c23d042c..2e668b6cf 100644 --- a/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl @@ -18,7 +18,7 @@ -compile(nowarn_export_all). -compile(export_all). --import(emqx_dashboard_api_test_helpers, [request/3, uri/1]). +-import(emqx_mgmt_api_test_util, [request/2, request/3, uri/1]). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -40,13 +40,9 @@ init_per_testcase(_, Config) -> Config. init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - - ok = emqx_common_test_helpers:start_apps( - [emqx_conf, emqx_modules, emqx_dashboard], - fun set_special_configs/1 + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), + ok = emqx_mgmt_api_test_util:init_suite( + [emqx_conf, emqx_modules] ), %% When many tests run in an obscure order, it may occur that @@ -59,15 +55,10 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_conf, emqx_dashboard, emqx_modules]), + emqx_mgmt_api_test_util:end_suite([emqx_conf, emqx_modules]), application:stop(gen_rpc), ok. -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(); -set_special_configs(_App) -> - ok. - %%------------------------------------------------------------------------------ %% Tests %%------------------------------------------------------------------------------ @@ -80,7 +71,7 @@ t_mqtt_topic_metrics_collection(_) -> ?assertEqual( [], - jsx:decode(Result0) + emqx_utils_json:decode(Result0) ), {ok, 200, _} = request( @@ -101,7 +92,7 @@ t_mqtt_topic_metrics_collection(_) -> <<"metrics">> := #{} } ], - jsx:decode(Result1) + emqx_utils_json:decode(Result1) ), ?assertMatch( @@ -156,7 +147,7 @@ t_mqtt_topic_metrics(_) -> uri(["mqtt", "topic_metrics"]) ), - ?assertMatch([_], jsx:decode(Result0)), + ?assertMatch([_], emqx_utils_json:decode(Result0)), {ok, 200, Result1} = request( get, @@ -168,7 +159,7 @@ t_mqtt_topic_metrics(_) -> <<"topic">> := <<"topic/1/2">>, <<"metrics">> := #{} }, - jsx:decode(Result1) + emqx_utils_json:decode(Result1) ), ?assertMatch( @@ -294,7 +285,7 @@ t_node_aggregation(_) -> <<"topic">> := <<"topic/1/2">>, <<"metrics">> := #{<<"messages.dropped.count">> := 3} }, - jsx:decode(Result) + emqx_utils_json:decode(Result) ), meck:unload(emqx_topic_metrics_proto_v1). @@ -315,6 +306,3 @@ t_badrpc(_) -> %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ - -request(Method, Url) -> - request(Method, Url, []). diff --git a/apps/emqx_node_rebalance/BSL.txt b/apps/emqx_node_rebalance/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_node_rebalance/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_node_rebalance/README.md b/apps/emqx_node_rebalance/README.md new file mode 100644 index 000000000..8a384fb5d --- /dev/null +++ b/apps/emqx_node_rebalance/README.md @@ -0,0 +1,40 @@ +# EMQX Node Rebalance + +`emqx_node_rebalance` is a part of the node evacuation/node rebalance feature in EMQX. +It implements high-level scenarios for node evacuation and rebalancing. + +## Application Responsibilities + +`emqx_node_rebalance` application's core concept is a _rebalance coordinator_. +_Rebalance сoordinator_ is an entity that implements the rebalancing logic and orchestrates the rebalancing process. +In particular, it: + +* Enables/Disables Eviction Agent on nodes. +* Sends connection/session eviction commands to Eviction Agents according to the evacuation logic. + +We have two implementations of the _rebalance coordinator_: +* `emqx_node_rebalance` - a coordinator that implements node rebalancing; +* `emqx_node_rebalance_evacuation` - a coordinator that implements node evacuation. + +## EMQX Integration + +`emqx_node_rebalance` is a high-level application that is loosely coupled with the rest of the system. +It uses Eviction Agent to perform the required operations. + +## User Facing API + +The application provides API (CLI and HTTP) to perform the following operations: +* Start/Stop rebalancing across a set of nodes or the whole cluster; +* Start/Stop evacuation of a node; +* Get the current rebalancing status of a local node. +* Get the current rebalancing status of the whole cluster. + +Also, an HTTP endpoint is provided for liveness probes. + +# Documentation + +The rebalancing concept is described in the corresponding [EIP](https://github.com/emqx/eip/blob/main/active/0020-node-rebalance.md). + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). diff --git a/apps/emqx_node_rebalance/etc/emqx_node_rebalance.conf b/apps/emqx_node_rebalance/etc/emqx_node_rebalance.conf new file mode 100644 index 000000000..8ace22435 --- /dev/null +++ b/apps/emqx_node_rebalance/etc/emqx_node_rebalance.conf @@ -0,0 +1,3 @@ +##-------------------------------------------------------------------- +## EMQX Node Rebalance Plugin +##-------------------------------------------------------------------- diff --git a/apps/emqx_node_rebalance/include/emqx_node_rebalance.hrl b/apps/emqx_node_rebalance/include/emqx_node_rebalance.hrl new file mode 100644 index 000000000..7d7bc439e --- /dev/null +++ b/apps/emqx_node_rebalance/include/emqx_node_rebalance.hrl @@ -0,0 +1,21 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-define(DEFAULT_CONN_EVICT_RATE, 500). +-define(DEFAULT_SESS_EVICT_RATE, 500). + +%% sec +-define(DEFAULT_WAIT_HEALTH_CHECK, 60). +%% sec +-define(DEFAULT_WAIT_TAKEOVER, 60). + +-define(DEFAULT_ABS_CONN_THRESHOLD, 1000). +-define(DEFAULT_ABS_SESS_THRESHOLD, 1000). + +-define(DEFAULT_REL_CONN_THRESHOLD, 1.1). +-define(DEFAULT_REL_SESS_THRESHOLD, 1.1). + +-define(EVICT_INTERVAL, 1000). + +-define(EVACUATION_FILENAME, <<".evacuation">>). diff --git a/apps/emqx_node_rebalance/rebar.config b/apps/emqx_node_rebalance/rebar.config new file mode 100644 index 000000000..b055d8f4f --- /dev/null +++ b/apps/emqx_node_rebalance/rebar.config @@ -0,0 +1,2 @@ +{deps, [{emqx, {path, "../../apps/emqx"}}]}. +{project_plugins, [erlfmt]}. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src b/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src new file mode 100644 index 000000000..381001b87 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src @@ -0,0 +1,21 @@ +{application, emqx_node_rebalance, [ + {description, "EMQX Node Rebalance"}, + {vsn, "5.0.0"}, + {registered, [ + emqx_node_rebalance_sup, + emqx_node_rebalance, + emqx_node_rebalance_agent, + emqx_node_rebalance_evacuation + ]}, + {applications, [ + kernel, + stdlib + ]}, + {mod, {emqx_node_rebalance_app, []}}, + {env, []}, + {modules, []}, + {links, [ + {"Homepage", "https://www.emqx.com/"}, + {"Github", "https://github.com/emqx"} + ]} +]}. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance.appup.src b/apps/emqx_node_rebalance/src/emqx_node_rebalance.appup.src new file mode 100644 index 000000000..c1b84778d --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance.appup.src @@ -0,0 +1,3 @@ +%% -*- mode: erlang -*- +%% Unless you know what you are doing, DO NOT edit manually!! +{VSN, [{<<".*">>, []}], [{<<".*">>, []}]}. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl new file mode 100644 index 000000000..1f2adc565 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl @@ -0,0 +1,438 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance). + +-include("emqx_node_rebalance.hrl"). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/types.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-export([ + start/1, + status/0, + status/1, + stop/0 +]). + +-export([start_link/0]). + +-behaviour(gen_statem). + +-export([ + init/1, + callback_mode/0, + handle_event/4, + code_change/4 +]). + +-export([ + is_node_available/0, + available_nodes/1, + connection_count/0, + session_count/0, + disconnected_session_count/0 +]). + +-export_type([ + start_opts/0, + start_error/0 +]). + +%%-------------------------------------------------------------------- +%% APIs +%%-------------------------------------------------------------------- + +-type start_opts() :: #{ + conn_evict_rate => pos_integer(), + sess_evict_rate => pos_integer(), + wait_health_check => pos_integer(), + wait_takeover => pos_integer(), + abs_conn_threshold => pos_integer(), + rel_conn_threshold => number(), + abs_sess_threshold => pos_integer(), + rel_sess_threshold => number(), + nodes => [node()] +}. +-type start_error() :: already_started | [{node(), term()}]. + +-spec start(start_opts()) -> ok_or_error(start_error()). +start(StartOpts) -> + Opts = maps:merge(default_opts(), StartOpts), + gen_statem:call(?MODULE, {start, Opts}). + +-spec stop() -> ok_or_error(not_started). +stop() -> + gen_statem:call(?MODULE, stop). + +-spec status() -> disabled | {enabled, map()}. +status() -> + gen_statem:call(?MODULE, status). + +-spec status(pid()) -> disabled | {enabled, map()}. +status(Pid) -> + gen_statem:call(Pid, status). + +-spec start_link() -> startlink_ret(). +start_link() -> + gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []). + +-spec available_nodes(list(node())) -> list(node()). +available_nodes(Nodes) when is_list(Nodes) -> + {Available, _} = emqx_node_rebalance_proto_v1:available_nodes(Nodes), + lists:filter(fun is_atom/1, Available). + +%%-------------------------------------------------------------------- +%% gen_statem callbacks +%%-------------------------------------------------------------------- + +callback_mode() -> handle_event_function. + +%% states: disabled, wait_health_check, evicting_conns, wait_takeover, evicting_sessions + +init([]) -> + ?tp(debug, emqx_node_rebalance_started, #{}), + {ok, disabled, #{}}. + +%% start +handle_event( + {call, From}, + {start, #{wait_health_check := WaitHealthCheck} = Opts}, + disabled, + #{} = Data +) -> + case enable_rebalance(Data#{opts => Opts}) of + {ok, NewData} -> + ?SLOG(warning, #{msg => "node_rebalance_enabled", opts => Opts}), + {next_state, wait_health_check, NewData, [ + {state_timeout, seconds(WaitHealthCheck), evict_conns}, + {reply, From, ok} + ]}; + {error, Reason} -> + ?SLOG(warning, #{ + msg => "node_rebalance_enable_failed", + reason => Reason + }), + {keep_state_and_data, [{reply, From, {error, Reason}}]} + end; +handle_event({call, From}, {start, _Opts}, _State, #{}) -> + {keep_state_and_data, [{reply, From, {error, already_started}}]}; +%% stop +handle_event({call, From}, stop, disabled, #{}) -> + {keep_state_and_data, [{reply, From, {error, not_started}}]}; +handle_event({call, From}, stop, _State, Data) -> + ok = disable_rebalance(Data), + ?SLOG(warning, #{msg => "node_rebalance_stopped"}), + {next_state, disabled, deinit(Data), [{reply, From, ok}]}; +%% status +handle_event({call, From}, status, disabled, #{}) -> + {keep_state_and_data, [{reply, From, disabled}]}; +handle_event({call, From}, status, State, Data) -> + Stats = get_stats(State, Data), + {keep_state_and_data, [ + {reply, From, + {enabled, Stats#{ + state => State, + coordinator_node => node() + }}} + ]}; +%% conn eviction +handle_event( + state_timeout, + evict_conns, + wait_health_check, + Data +) -> + ?SLOG(warning, #{msg => "node_rebalance_wait_health_check_over"}), + {next_state, evicting_conns, Data, [{state_timeout, 0, evict_conns}]}; +handle_event( + state_timeout, + evict_conns, + evicting_conns, + #{ + opts := #{ + wait_takeover := WaitTakeover, + evict_interval := EvictInterval + } + } = Data +) -> + case evict_conns(Data) of + ok -> + ?SLOG(warning, #{msg => "node_rebalance_evict_conns_over"}), + {next_state, wait_takeover, Data, [ + {state_timeout, seconds(WaitTakeover), evict_sessions} + ]}; + {continue, NewData} -> + {keep_state, NewData, [{state_timeout, EvictInterval, evict_conns}]} + end; +handle_event( + state_timeout, + evict_sessions, + wait_takeover, + Data +) -> + ?SLOG(warning, #{msg => "node_rebalance_wait_takeover_over"}), + {next_state, evicting_sessions, Data, [{state_timeout, 0, evict_sessions}]}; +handle_event( + state_timeout, + evict_sessions, + evicting_sessions, + #{opts := #{evict_interval := EvictInterval}} = Data +) -> + case evict_sessions(Data) of + ok -> + ?tp(debug, emqx_node_rebalance_evict_sess_over, #{}), + ?SLOG(warning, #{msg => "node_rebalance_evict_sessions_over"}), + ok = disable_rebalance(Data), + ?SLOG(warning, #{msg => "node_rebalance_finished_successfully"}), + {next_state, disabled, deinit(Data)}; + {continue, NewData} -> + {keep_state, NewData, [{state_timeout, EvictInterval, evict_sessions}]} + end; +handle_event({call, From}, Msg, _State, _Data) -> + ?SLOG(warning, #{msg => "node_rebalance_unknown_call", call => Msg}), + {keep_state_and_data, [{reply, From, ignored}]}; +handle_event(info, Msg, _State, _Data) -> + ?SLOG(warning, #{msg => "node_rebalance_unknown_info", info => Msg}), + keep_state_and_data; +handle_event(cast, Msg, _State, _Data) -> + ?SLOG(warning, #{msg => "node_rebalance_unknown_cast", cast => Msg}), + keep_state_and_data. + +code_change(_Vsn, State, Data, _Extra) -> + {ok, State, Data}. + +%%-------------------------------------------------------------------- +%% internal funs +%%-------------------------------------------------------------------- + +enable_rebalance(#{opts := Opts} = Data) -> + Nodes = maps:get(nodes, Opts), + ConnCounts = multicall(Nodes, connection_counts, []), + SessCounts = multicall(Nodes, session_counts, []), + {_, Counts} = lists:unzip(ConnCounts), + Avg = avg(Counts), + {DonorCounts, RecipientCounts} = lists:partition( + fun({_Node, Count}) -> + Count >= Avg + end, + ConnCounts + ), + ?SLOG(warning, #{ + msg => "node_rebalance_enabling", + conn_counts => ConnCounts, + donor_counts => DonorCounts, + recipient_counts => RecipientCounts + }), + {DonorNodes, _} = lists:unzip(DonorCounts), + {RecipientNodes, _} = lists:unzip(RecipientCounts), + case need_rebalance(DonorNodes, RecipientNodes, ConnCounts, SessCounts, Opts) of + false -> + {error, nothing_to_balance}; + true -> + _ = multicall(DonorNodes, enable_rebalance_agent, [self()]), + {ok, Data#{ + donors => DonorNodes, + recipients => RecipientNodes, + initial_conn_counts => maps:from_list(ConnCounts), + initial_sess_counts => maps:from_list(SessCounts) + }} + end. + +disable_rebalance(#{donors := DonorNodes}) -> + _ = multicall(DonorNodes, disable_rebalance_agent, [self()]), + ok. + +evict_conns(#{donors := DonorNodes, recipients := RecipientNodes, opts := Opts} = Data) -> + DonorNodeCounts = multicall(DonorNodes, connection_counts, []), + {_, DonorCounts} = lists:unzip(DonorNodeCounts), + RecipientNodeCounts = multicall(RecipientNodes, connection_counts, []), + {_, RecipientCounts} = lists:unzip(RecipientNodeCounts), + + DonorAvg = avg(DonorCounts), + RecipientAvg = avg(RecipientCounts), + Thresholds = thresholds(conn, Opts), + NewData = Data#{ + donor_conn_avg => DonorAvg, + recipient_conn_avg => RecipientAvg, + donor_conn_counts => maps:from_list(DonorNodeCounts), + recipient_conn_counts => maps:from_list(RecipientNodeCounts) + }, + case within_thresholds(DonorAvg, RecipientAvg, Thresholds) of + true -> + ok; + false -> + ConnEvictRate = maps:get(conn_evict_rate, Opts), + NodesToEvict = nodes_to_evict(RecipientAvg, DonorNodeCounts), + ?SLOG(warning, #{ + msg => "node_rebalance_evict_conns", + nodes => NodesToEvict, + counts => ConnEvictRate + }), + _ = multicall(NodesToEvict, evict_connections, [ConnEvictRate]), + {continue, NewData} + end. + +evict_sessions(#{donors := DonorNodes, recipients := RecipientNodes, opts := Opts} = Data) -> + DonorNodeCounts = multicall(DonorNodes, disconnected_session_counts, []), + {_, DonorCounts} = lists:unzip(DonorNodeCounts), + RecipientNodeCounts = multicall(RecipientNodes, disconnected_session_counts, []), + {_, RecipientCounts} = lists:unzip(RecipientNodeCounts), + + DonorAvg = avg(DonorCounts), + RecipientAvg = avg(RecipientCounts), + Thresholds = thresholds(sess, Opts), + NewData = Data#{ + donor_sess_avg => DonorAvg, + recipient_sess_avg => RecipientAvg, + donor_sess_counts => maps:from_list(DonorNodeCounts), + recipient_sess_counts => maps:from_list(RecipientNodeCounts) + }, + case within_thresholds(DonorAvg, RecipientAvg, Thresholds) of + true -> + ok; + false -> + SessEvictRate = maps:get(sess_evict_rate, Opts), + NodesToEvict = nodes_to_evict(RecipientAvg, DonorNodeCounts), + ?SLOG(warning, #{ + msg => "node_rebalance_evict_sessions", + nodes => NodesToEvict, + counts => SessEvictRate + }), + _ = multicall( + NodesToEvict, + evict_sessions, + [SessEvictRate, RecipientNodes, disconnected] + ), + {continue, NewData} + end. + +need_rebalance([] = _DonorNodes, _RecipientNodes, _ConnCounts, _SessCounts, _Opts) -> + false; +need_rebalance(_DonorNodes, [] = _RecipientNodes, _ConnCounts, _SessCounts, _Opts) -> + false; +need_rebalance(DonorNodes, RecipientNodes, ConnCounts, SessCounts, Opts) -> + DonorConnAvg = avg_for_nodes(DonorNodes, ConnCounts), + RecipientConnAvg = avg_for_nodes(RecipientNodes, ConnCounts), + DonorSessAvg = avg_for_nodes(DonorNodes, SessCounts), + RecipientSessAvg = avg_for_nodes(RecipientNodes, SessCounts), + Result = + (not within_thresholds(DonorConnAvg, RecipientConnAvg, thresholds(conn, Opts))) orelse + (not within_thresholds(DonorSessAvg, RecipientSessAvg, thresholds(sess, Opts))), + ?tp( + debug, + emqx_node_rebalance_need_rebalance, + #{ + donors => DonorNodes, + recipients => RecipientNodes, + conn_counts => ConnCounts, + sess_counts => SessCounts, + opts => Opts, + result => Result + } + ), + Result. + +avg_for_nodes(Nodes, Counts) -> + avg(maps:values(maps:with(Nodes, maps:from_list(Counts)))). + +within_thresholds(Value, GoalValue, {AbsThres, RelThres}) -> + (Value =< GoalValue + AbsThres) orelse (Value =< GoalValue * RelThres). + +thresholds(conn, #{abs_conn_threshold := Abs, rel_conn_threshold := Rel}) -> + {Abs, Rel}; +thresholds(sess, #{abs_sess_threshold := Abs, rel_sess_threshold := Rel}) -> + {Abs, Rel}. + +nodes_to_evict(Goal, NodeCounts) -> + {Nodes, _} = lists:unzip( + lists:filter( + fun({_Node, Count}) -> + Count > Goal + end, + NodeCounts + ) + ), + Nodes. + +get_stats(disabled, _Data) -> #{}; +get_stats(_State, Data) -> Data. + +avg(List) when length(List) >= 1 -> + lists:sum(List) / length(List). + +multicall(Nodes, F, A) -> + case apply(emqx_node_rebalance_proto_v1, F, [Nodes | A]) of + {Results, []} -> + case lists:partition(fun is_ok/1, lists:zip(Nodes, Results)) of + {OkResults, []} -> + [{Node, ok_result(Result)} || {Node, Result} <- OkResults]; + {_, BadResults} -> + error({bad_nodes, BadResults}) + end; + {_, [_BadNode | _] = BadNodes} -> + error({bad_nodes, BadNodes}) + end. + +is_ok({_Node, {ok, _}}) -> true; +is_ok({_Node, ok}) -> true; +is_ok(_) -> false. + +ok_result({ok, Result}) -> Result; +ok_result(ok) -> ok. + +connection_count() -> + {ok, emqx_eviction_agent:connection_count()}. + +session_count() -> + {ok, emqx_eviction_agent:session_count()}. + +disconnected_session_count() -> + {ok, emqx_eviction_agent:session_count(disconnected)}. + +default_opts() -> + #{ + conn_evict_rate => ?DEFAULT_CONN_EVICT_RATE, + abs_conn_threshold => ?DEFAULT_ABS_CONN_THRESHOLD, + rel_conn_threshold => ?DEFAULT_REL_CONN_THRESHOLD, + + sess_evict_rate => ?DEFAULT_SESS_EVICT_RATE, + abs_sess_threshold => ?DEFAULT_ABS_SESS_THRESHOLD, + rel_sess_threshold => ?DEFAULT_REL_SESS_THRESHOLD, + + wait_health_check => ?DEFAULT_WAIT_HEALTH_CHECK, + wait_takeover => ?DEFAULT_WAIT_TAKEOVER, + + evict_interval => ?EVICT_INTERVAL, + + nodes => all_nodes() + }. + +deinit(Data) -> + Keys = [ + recipient_conn_avg, + recipient_sess_avg, + donor_conn_avg, + donor_sess_avg, + recipient_conn_counts, + recipient_sess_counts, + donor_conn_counts, + donor_sess_counts, + initial_conn_counts, + initial_sess_counts, + opts + ], + maps:without(Keys, Data). + +is_node_available() -> + true = is_pid(whereis(emqx_node_rebalance_agent)), + disabled = emqx_eviction_agent:status(), + node(). + +all_nodes() -> + mria_mnesia:running_nodes(). + +seconds(Sec) -> + round(timer:seconds(Sec)). diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl new file mode 100644 index 000000000..47708d00e --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl @@ -0,0 +1,131 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_agent). + +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/types.hrl"). + +-include_lib("stdlib/include/qlc.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-export([ + start_link/0, + enable/1, + disable/1, + status/0 +]). + +-export([ + init/1, + handle_call/3, + handle_info/2, + handle_cast/2, + code_change/3 +]). + +-define(ENABLE_KIND, emqx_node_rebalance). + +%%-------------------------------------------------------------------- +%% APIs +%%-------------------------------------------------------------------- + +-type status() :: {enabled, pid()} | disabled. + +-spec start_link() -> startlink_ret(). +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +-spec enable(pid()) -> ok_or_error(already_enabled | eviction_agent_busy). +enable(CoordinatorPid) -> + gen_server:call(?MODULE, {enable, CoordinatorPid}). + +-spec disable(pid()) -> ok_or_error(already_disabled | invalid_coordinator). +disable(CoordinatorPid) -> + gen_server:call(?MODULE, {disable, CoordinatorPid}). + +-spec status() -> status(). +status() -> + gen_server:call(?MODULE, status). + +%%-------------------------------------------------------------------- +%% gen_server callbacks +%%-------------------------------------------------------------------- + +init([]) -> + {ok, #{}}. + +handle_call({enable, CoordinatorPid}, _From, St) -> + case St of + #{coordinator_pid := _Pid} -> + {reply, {error, already_enabled}, St}; + _ -> + true = link(CoordinatorPid), + EvictionAgentPid = whereis(emqx_eviction_agent), + true = link(EvictionAgentPid), + case emqx_eviction_agent:enable(?ENABLE_KIND, undefined) of + ok -> + {reply, ok, #{ + coordinator_pid => CoordinatorPid, + eviction_agent_pid => EvictionAgentPid + }}; + {error, eviction_agent_busy} -> + true = unlink(EvictionAgentPid), + true = unlink(CoordinatorPid), + {reply, {error, eviction_agent_busy}, St} + end + end; +handle_call({disable, CoordinatorPid}, _From, St) -> + case St of + #{ + coordinator_pid := CoordinatorPid, + eviction_agent_pid := EvictionAgentPid + } -> + _ = emqx_eviction_agent:disable(?ENABLE_KIND), + true = unlink(EvictionAgentPid), + true = unlink(CoordinatorPid), + NewSt = maps:without( + [coordinator_pid, eviction_agent_pid], + St + ), + {reply, ok, NewSt}; + #{coordinator_pid := _CoordinatorPid} -> + {reply, {error, invalid_coordinator}, St}; + #{} -> + {reply, {error, already_disabled}, St} + end; +handle_call(status, _From, St) -> + case St of + #{coordinator_pid := Pid} -> + {reply, {enabled, Pid}, St}; + _ -> + {reply, disabled, St} + end; +handle_call(Msg, _From, St) -> + ?SLOG(warning, #{ + msg => "unknown_call", + call => Msg, + state => St + }), + {reply, ignored, St}. + +handle_info(Msg, St) -> + ?SLOG(warning, #{ + msg => "unknown_info", + info => Msg, + state => St + }), + {noreply, St}. + +handle_cast(Msg, St) -> + ?SLOG(warning, #{ + msg => "unknown_cast", + cast => Msg, + state => St + }), + {noreply, St}. + +code_change(_Vsn, State, _Extra) -> + {ok, State}. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl new file mode 100644 index 000000000..1f6328a63 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl @@ -0,0 +1,733 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_node_rebalance_api). + +-behaviour(minirest_api). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_utils/include/emqx_utils_api.hrl"). + +%% Swagger specs from hocon schema +-export([ + api_spec/0, + paths/0, + schema/1, + namespace/0 +]). + +-export([ + fields/1, + roots/0 +]). + +%% API callbacks +-export([ + '/load_rebalance/status'/2, + '/load_rebalance/global_status'/2, + '/load_rebalance/availability_check'/2, + '/load_rebalance/:node/start'/2, + '/load_rebalance/:node/stop'/2, + '/load_rebalance/:node/evacuation/start'/2, + '/load_rebalance/:node/evacuation/stop'/2 +]). + +%% Schema examples +-export([ + rebalance_example/0, + rebalance_evacuation_example/0, + translate/2 +]). + +-import(hoconsc, [mk/2, ref/1, ref/2]). +-import(emqx_dashboard_swagger, [error_codes/2]). + +-define(BAD_REQUEST, 'BAD_REQUEST'). +-define(NODE_EVACUATING, 'NODE_EVACUATING'). +-define(RPC_ERROR, 'RPC_ERROR'). +-define(NOT_FOUND, 'NOT_FOUND'). + +%%-------------------------------------------------------------------- +%% API Spec +%%-------------------------------------------------------------------- + +namespace() -> "load_rebalance". + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + [ + "/load_rebalance/status", + "/load_rebalance/global_status", + "/load_rebalance/availability_check", + "/load_rebalance/:node/start", + "/load_rebalance/:node/stop", + "/load_rebalance/:node/evacuation/start", + "/load_rebalance/:node/evacuation/stop" + ]. + +schema("/load_rebalance/status") -> + #{ + 'operationId' => '/load_rebalance/status', + get => #{ + tags => [<<"load_rebalance">>], + summary => <<"Get rebalance status">>, + description => ?DESC("load_rebalance_status"), + responses => #{ + 200 => local_status_response_schema() + } + } + }; +schema("/load_rebalance/global_status") -> + #{ + 'operationId' => '/load_rebalance/global_status', + get => #{ + tags => [<<"load_rebalance">>], + summary => <<"Get global rebalance status">>, + description => ?DESC("load_rebalance_global_status"), + responses => #{ + 200 => response_schema() + } + } + }; +schema("/load_rebalance/availability_check") -> + #{ + 'operationId' => '/load_rebalance/availability_check', + get => #{ + tags => [<<"load_rebalance">>], + summary => <<"Node rebalance availability check">>, + description => ?DESC("load_rebalance_availability_check"), + responses => #{ + 200 => response_schema(), + 503 => error_codes([?NODE_EVACUATING], <<"Node Evacuating">>) + } + } + }; +schema("/load_rebalance/:node/start") -> + #{ + 'operationId' => '/load_rebalance/:node/start', + post => #{ + tags => [<<"load_rebalance">>], + summary => <<"Start rebalancing with the node as coordinator">>, + description => ?DESC("load_rebalance_start"), + parameters => [param_node()], + 'requestBody' => + emqx_dashboard_swagger:schema_with_examples( + ref(rebalance_start), + rebalance_example() + ), + responses => #{ + 200 => response_schema(), + 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>), + 404 => error_codes([?NOT_FOUND], <<"Not Found">>) + } + } + }; +schema("/load_rebalance/:node/stop") -> + #{ + 'operationId' => '/load_rebalance/:node/stop', + post => #{ + tags => [<<"load_rebalance">>], + summary => <<"Stop rebalancing coordinated by the node">>, + description => ?DESC("load_rebalance_stop"), + parameters => [param_node()], + responses => #{ + 200 => response_schema(), + 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>), + 404 => error_codes([?NOT_FOUND], <<"Not Found">>) + } + } + }; +schema("/load_rebalance/:node/evacuation/start") -> + #{ + 'operationId' => '/load_rebalance/:node/evacuation/start', + post => #{ + tags => [<<"load_rebalance">>], + summary => <<"Start evacuation on a node">>, + description => ?DESC("load_rebalance_evacuation_start"), + parameters => [param_node()], + 'requestBody' => + emqx_dashboard_swagger:schema_with_examples( + ref(rebalance_evacuation_start), + rebalance_evacuation_example() + ), + responses => #{ + 200 => response_schema(), + 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>), + 404 => error_codes([?NOT_FOUND], <<"Not Found">>) + } + } + }; +schema("/load_rebalance/:node/evacuation/stop") -> + #{ + 'operationId' => '/load_rebalance/:node/evacuation/stop', + post => #{ + tags => [<<"load_rebalance">>], + summary => <<"Stop evacuation on a node">>, + description => ?DESC("load_rebalance_evacuation_stop"), + parameters => [param_node()], + responses => #{ + 200 => response_schema(), + 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>), + 404 => error_codes([?NOT_FOUND], <<"Not Found">>) + } + } + }. + +%%-------------------------------------------------------------------- +%% Handlers +%%-------------------------------------------------------------------- + +'/load_rebalance/status'(get, #{}) -> + case emqx_node_rebalance_status:local_status() of + disabled -> + {200, #{status => disabled}}; + {rebalance, Stats} -> + {200, format_status(rebalance, Stats)}; + {evacuation, Stats} -> + {200, format_status(evacuation, Stats)} + end. + +'/load_rebalance/global_status'(get, #{}) -> + #{ + evacuations := Evacuations, + rebalances := Rebalances + } = emqx_node_rebalance_status:global_status(), + {200, #{ + evacuations => format_as_map_list(Evacuations), + rebalances => format_as_map_list(Rebalances) + }}. + +'/load_rebalance/availability_check'(get, #{}) -> + case emqx_eviction_agent:status() of + disabled -> + {200, #{}}; + {enabled, _Stats} -> + error_response(503, ?NODE_EVACUATING, <<"Node Evacuating">>) + end. + +'/load_rebalance/:node/start'(post, #{bindings := #{node := NodeBin}, body := Params0}) -> + emqx_utils_api:with_node(NodeBin, fun(Node) -> + Params1 = translate(rebalance_start, Params0), + with_nodes_at_key(nodes, Params1, fun(Params2) -> + wrap_rpc( + Node, emqx_node_rebalance_api_proto_v1:node_rebalance_start(Node, Params2) + ) + end) + end). + +'/load_rebalance/:node/stop'(post, #{bindings := #{node := NodeBin}}) -> + emqx_utils_api:with_node(NodeBin, fun(Node) -> + wrap_rpc( + Node, emqx_node_rebalance_api_proto_v1:node_rebalance_stop(Node) + ) + end). + +'/load_rebalance/:node/evacuation/start'(post, #{ + bindings := #{node := NodeBin}, body := Params0 +}) -> + emqx_utils_api:with_node(NodeBin, fun(Node) -> + Params1 = translate(rebalance_evacuation_start, Params0), + with_nodes_at_key(migrate_to, Params1, fun(Params2) -> + wrap_rpc( + Node, + emqx_node_rebalance_api_proto_v1:node_rebalance_evacuation_start( + Node, Params2 + ) + ) + end) + end). + +'/load_rebalance/:node/evacuation/stop'(post, #{bindings := #{node := NodeBin}}) -> + emqx_utils_api:with_node(NodeBin, fun(Node) -> + wrap_rpc( + Node, emqx_node_rebalance_api_proto_v1:node_rebalance_evacuation_stop(Node) + ) + end). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +wrap_rpc(Node, RPCResult) -> + case RPCResult of + ok -> + {200, #{}}; + {error, Reason} -> + error_response( + 400, ?BAD_REQUEST, io_lib:format("error on node ~p: ~p", [Node, Reason]) + ); + {badrpc, Reason} -> + error_response( + 503, ?RPC_ERROR, io_lib:format("RPC error on node ~p: ~p", [Node, Reason]) + ) + end. + +format_status(Process, Stats) -> + Stats#{process => Process, status => enabled}. + +validate_nodes(Key, Params) when is_map_key(Key, Params) -> + BinNodes = maps:get(Key, Params), + {ValidNodes, InvalidNodes} = lists:foldl( + fun(BinNode, {Nodes, UnknownNodes}) -> + case parse_node(BinNode) of + {ok, Node} -> {[Node | Nodes], UnknownNodes}; + {error, _} -> {Nodes, [BinNode | UnknownNodes]} + end + end, + {[], []}, + BinNodes + ), + case InvalidNodes of + [] -> + case emqx_node_rebalance_evacuation:available_nodes(ValidNodes) of + ValidNodes -> {ok, Params#{Key => ValidNodes}}; + OtherNodes -> {error, {unavailable, ValidNodes -- OtherNodes}} + end; + _ -> + {error, {invalid, InvalidNodes}} + end; +validate_nodes(_Key, Params) -> + {ok, Params}. + +with_nodes_at_key(Key, Params, Fun) -> + Res = validate_nodes(Key, Params), + case Res of + {ok, Params1} -> + Fun(Params1); + {error, {unavailable, Nodes}} -> + error_response(400, ?NOT_FOUND, io_lib:format("Nodes unavailable: ~p", [Nodes])); + {error, {invalid, Nodes}} -> + error_response(400, ?BAD_REQUEST, io_lib:format("Invalid nodes: ~p", [Nodes])) + end. + +parse_node(Bin) when is_binary(Bin) -> + try + {ok, binary_to_existing_atom(Bin)} + catch + error:badarg -> + {error, {unknown, Bin}} + end. + +format_as_map_list(List) -> + lists:map( + fun({Node, Info}) -> + Info#{node => Node} + end, + List + ). + +error_response(HttpCode, Code, Message) -> + {HttpCode, ?ERROR_MSG(Code, Message)}. + +without(Keys, Props) -> + lists:filter( + fun({Key, _}) -> + not lists:member(Key, Keys) + end, + Props + ). + +%%------------------------------------------------------------------------------ +%% Schema +%%------------------------------------------------------------------------------ + +translate(Ref, Conf) -> + Options = #{atom_key => true}, + #{Ref := TranslatedConf} = hocon_tconf:check_plain( + ?MODULE, #{atom_to_binary(Ref) => Conf}, Options, [Ref] + ), + TranslatedConf. + +param_node() -> + { + node, + mk(binary(), #{ + in => path, + desc => ?DESC(param_node), + required => true + }) + }. + +fields(rebalance_start) -> + [ + {"wait_health_check", + mk( + emqx_schema:duration_s(), + #{ + desc => ?DESC(wait_health_check), + required => false + } + )}, + {"conn_evict_rate", + mk( + pos_integer(), + #{ + desc => ?DESC(conn_evict_rate), + required => false + } + )}, + {"sess_evict_rate", + mk( + pos_integer(), + #{ + desc => ?DESC(sess_evict_rate), + required => false + } + )}, + {"abs_conn_threshold", + mk( + pos_integer(), + #{ + desc => ?DESC(abs_conn_threshold), + required => false + } + )}, + {"rel_conn_threshold", + mk( + number(), + #{ + desc => ?DESC(rel_conn_threshold), + required => false, + validator => [fun(Value) -> Value > 1.0 end] + } + )}, + {"abs_sess_threshold", + mk( + pos_integer(), + #{ + desc => ?DESC(abs_sess_threshold), + required => false + } + )}, + {"rel_sess_threshold", + mk( + number(), + #{ + desc => ?DESC(rel_sess_threshold), + required => false, + validator => [fun(Value) -> Value > 1.0 end] + } + )}, + {"wait_takeover", + mk( + emqx_schema:duration_s(), + #{ + desc => ?DESC(wait_takeover), + required => false + } + )}, + {"nodes", + mk( + list(binary()), + #{ + desc => ?DESC(rebalance_nodes), + required => false, + validator => [fun(Values) -> length(Values) > 0 end] + } + )} + ]; +fields(rebalance_evacuation_start) -> + [ + {"conn_evict_rate", + mk( + pos_integer(), + #{ + desc => ?DESC(conn_evict_rate), + required => false + } + )}, + {"sess_evict_rate", + mk( + pos_integer(), + #{ + desc => ?DESC(sess_evict_rate), + required => false + } + )}, + {"redirect_to", + mk( + binary(), + #{ + desc => ?DESC(redirect_to), + required => false + } + )}, + {"wait_takeover", + mk( + pos_integer(), + #{ + desc => ?DESC(wait_takeover), + required => false + } + )}, + {"migrate_to", + mk( + nonempty_list(binary()), + #{ + desc => ?DESC(migrate_to), + required => false + } + )} + ]; +fields(local_status_disabled) -> + [ + {"status", + mk( + disabled, + #{ + desc => ?DESC(local_status_enabled), + required => true + } + )} + ]; +fields(local_status_enabled) -> + [ + {"status", + mk( + enabled, + #{ + desc => ?DESC(local_status_enabled), + required => true + } + )}, + {"process", + mk( + hoconsc:union([rebalance, evacuation]), + #{ + desc => ?DESC(local_status_process), + required => true + } + )}, + {"state", + mk( + atom(), + #{ + desc => ?DESC(local_status_state), + required => true + } + )}, + {"coordinator_node", + mk( + binary(), + #{ + desc => ?DESC(local_status_coordinator_node), + required => false + } + )}, + {"connection_eviction_rate", + mk( + pos_integer(), + #{ + desc => ?DESC(local_status_connection_eviction_rate), + required => false + } + )}, + {"session_eviction_rate", + mk( + pos_integer(), + #{ + desc => ?DESC(local_status_session_eviction_rate), + required => false + } + )}, + {"connection_goal", + mk( + non_neg_integer(), + #{ + desc => ?DESC(local_status_connection_goal), + required => false + } + )}, + {"session_goal", + mk( + non_neg_integer(), + #{ + desc => ?DESC(local_status_session_goal), + required => false + } + )}, + {"disconnected_session_goal", + mk( + non_neg_integer(), + #{ + desc => ?DESC(local_status_disconnected_session_goal), + required => false + } + )}, + {"session_recipients", + mk( + list(binary()), + #{ + desc => ?DESC(local_status_session_recipients), + required => false + } + )}, + {"recipients", + mk( + list(binary()), + #{ + desc => ?DESC(local_status_recipients), + required => false + } + )}, + {"stats", + mk( + ref(status_stats), + #{ + desc => ?DESC(local_status_stats), + required => false + } + )} + ]; +fields(status_stats) -> + [ + {"initial_connected", + mk( + non_neg_integer(), + #{ + desc => ?DESC(status_stats_initial_connected), + required => true + } + )}, + {"current_connected", + mk( + non_neg_integer(), + #{ + desc => ?DESC(status_stats_current_connected), + required => true + } + )}, + {"initial_sessions", + mk( + non_neg_integer(), + #{ + desc => ?DESC(status_stats_initial_sessions), + required => true + } + )}, + {"current_sessions", + mk( + non_neg_integer(), + #{ + desc => ?DESC(status_stats_current_sessions), + required => true + } + )}, + {"current_disconnected_sessions", + mk( + non_neg_integer(), + #{ + desc => ?DESC(status_stats_current_disconnected_sessions), + required => false + } + )} + ]; +fields(global_coordinator_status) -> + without( + ["status", "process", "session_goal", "session_recipients", "stats"], + fields(local_status_enabled) + ) ++ + [ + {"donors", + mk( + list(binary()), + #{ + desc => ?DESC(coordinator_status_donors), + required => false + } + )}, + {"donor_conn_avg", + mk( + non_neg_integer(), + #{ + desc => ?DESC(coordinator_status_donor_conn_avg), + required => false + } + )}, + {"donor_sess_avg", + mk( + non_neg_integer(), + #{ + desc => ?DESC(coordinator_status_donor_sess_avg), + required => false + } + )}, + {"node", + mk( + binary(), + #{ + desc => ?DESC(coordinator_status_node), + required => true + } + )} + ]; +fields(global_evacuation_status) -> + without(["status", "process"], fields(local_status_enabled)) ++ + [ + {"node", + mk( + binary(), + #{ + desc => ?DESC(evacuation_status_node), + required => true + } + )} + ]; +fields(global_status) -> + [ + {"evacuations", + mk( + hoconsc:array(ref(global_evacuation_status)), + #{ + desc => ?DESC(global_status_evacuations), + required => false + } + )}, + {"rebalances", + mk( + hoconsc:array(ref(global_coordinator_status)), + #{ + desc => ?DESC(global_status_rebalances), + required => false + } + )} + ]. + +rebalance_example() -> + #{ + wait_health_check => 10, + conn_evict_rate => 10, + sess_evict_rate => 20, + abs_conn_threshold => 10, + rel_conn_threshold => 1.5, + abs_sess_threshold => 10, + rel_sess_threshold => 1.5, + wait_takeover => 10, + nodes => [<<"othernode@127.0.0.1">>] + }. + +rebalance_evacuation_example() -> + #{ + conn_evict_rate => 100, + sess_evict_rate => 100, + redirect_to => <<"othernode:1883">>, + wait_takeover => 10, + migrate_to => [<<"othernode@127.0.0.1">>] + }. + +local_status_response_schema() -> + hoconsc:union([ref(local_status_disabled), ref(local_status_enabled)]). + +response_schema() -> + mk( + map(), + #{ + desc => ?DESC(empty_response) + } + ). + +roots() -> []. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_app.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_app.erl new file mode 100644 index 000000000..3cd59e0f4 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_app.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_app). + +-behaviour(application). + +-emqx_plugin(?MODULE). + +-export([ + start/2, + stop/1 +]). + +start(_Type, _Args) -> + {ok, Sup} = emqx_node_rebalance_sup:start_link(), + ok = emqx_node_rebalance_cli:load(), + {ok, Sup}. + +stop(_State) -> + emqx_node_rebalance_cli:unload(). diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_cli.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_cli.erl new file mode 100644 index 000000000..3bafb9ffe --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_cli.erl @@ -0,0 +1,305 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_cli). + +%% APIs +-export([ + load/0, + unload/0, + cli/1 +]). + +load() -> + emqx_ctl:register_command(rebalance, {?MODULE, cli}, []). + +unload() -> + emqx_ctl:unregister_command(rebalance). + +cli(["start" | StartArgs]) -> + case start_args(StartArgs) of + {evacuation, Opts} -> + case emqx_node_rebalance_evacuation:status() of + disabled -> + ok = emqx_node_rebalance_evacuation:start(Opts), + emqx_ctl:print("Rebalance(evacuation) started~n"), + true; + {enabled, _} -> + emqx_ctl:print("Rebalance is already enabled~n"), + false + end; + {rebalance, Opts} -> + case emqx_node_rebalance:start(Opts) of + ok -> + emqx_ctl:print("Rebalance started~n"), + true; + {error, Reason} -> + emqx_ctl:print("Rebalance start error: ~p~n", [Reason]), + false + end; + {error, Error} -> + emqx_ctl:print("Rebalance start error: ~s~n", [Error]), + false + end; +cli(["node-status", NodeStr]) -> + case emqx_utils:safe_to_existing_atom(NodeStr, utf8) of + {ok, Node} -> + node_status(emqx_node_rebalance_status:local_status(Node)); + {error, _} -> + emqx_ctl:print("Node status error: invalid node~n"), + false + end; +cli(["node-status"]) -> + node_status(emqx_node_rebalance_status:local_status()); +cli(["status"]) -> + #{ + evacuations := Evacuations, + rebalances := Rebalances + } = emqx_node_rebalance_status:global_status(), + lists:foreach( + fun({Node, Status}) -> + emqx_ctl:print( + "--------------------------------------------------------------------~n" + ), + emqx_ctl:print( + "Node ~p: evacuation~n~s", + [Node, emqx_node_rebalance_status:format_local_status(Status)] + ) + end, + Evacuations + ), + lists:foreach( + fun({Node, Status}) -> + emqx_ctl:print( + "--------------------------------------------------------------------~n" + ), + emqx_ctl:print( + "Node ~p: rebalance coordinator~n~s", + [Node, emqx_node_rebalance_status:format_coordinator_status(Status)] + ) + end, + Rebalances + ); +cli(["stop"]) -> + case emqx_node_rebalance_evacuation:status() of + {enabled, _} -> + ok = emqx_node_rebalance_evacuation:stop(), + emqx_ctl:print("Rebalance(evacuation) stopped~n"), + true; + disabled -> + case emqx_node_rebalance:status() of + {enabled, _} -> + ok = emqx_node_rebalance:stop(), + emqx_ctl:print("Rebalance stopped~n"), + true; + disabled -> + emqx_ctl:print("Rebalance is already disabled~n"), + false + end + end; +cli(_) -> + emqx_ctl:usage( + [ + { + "rebalance start --evacuation \\\n" + " [--redirect-to \"Host1:Port1 Host2:Port2 ...\"] \\\n" + " [--conn-evict-rate CountPerSec] \\\n" + " [--migrate-to \"node1@host1 node2@host2 ...\"] \\\n" + " [--wait-takeover Secs] \\\n" + " [--sess-evict-rate CountPerSec]", + "Start current node evacuation with optional server redirect to the specified servers" + }, + + { + "rebalance start \\\n" + " [--nodes \"node1@host1 node2@host2\"] \\\n" + " [--wait-health-check Secs] \\\n" + " [--conn-evict-rate ConnPerSec] \\\n" + " [--abs-conn-threshold Count] \\\n" + " [--rel-conn-threshold Fraction] \\\n" + " [--conn-evict-rate ConnPerSec] \\\n" + " [--wait-takeover Secs] \\\n" + " [--sess-evict-rate CountPerSec] \\\n" + " [--abs-sess-threshold Count] \\\n" + " [--rel-sess-threshold Fraction]", + "Start rebalance on the specified nodes using the current node as the coordinator" + }, + + {"rebalance node-status", "Get current node rebalance status"}, + + {"rebalance node-status \"node1@host1\"", "Get remote node rebalance status"}, + + {"rebalance status", + "Get statuses of all current rebalance/evacuation processes across the cluster"}, + + {"rebalance stop", "Stop node rebalance"} + ] + ). + +node_status(NodeStatus) -> + case NodeStatus of + {Process, Status} when Process =:= evacuation orelse Process =:= rebalance -> + emqx_ctl:print( + "Rebalance type: ~p~n~s~n", + [Process, emqx_node_rebalance_status:format_local_status(Status)] + ); + disabled -> + emqx_ctl:print("Rebalance disabled~n"); + Other -> + emqx_ctl:print("Error detecting rebalance status: ~p~n", [Other]) + end. + +start_args(Args) -> + case collect_args(Args, #{}) of + {ok, #{"--evacuation" := true} = Collected} -> + case validate_evacuation(maps:to_list(Collected), #{}) of + {ok, Validated} -> + {evacuation, Validated}; + {error, _} = Error -> + Error + end; + {ok, #{} = Collected} -> + case validate_rebalance(maps:to_list(Collected), #{}) of + {ok, Validated} -> + {rebalance, Validated}; + {error, _} = Error -> + Error + end; + {error, _} = Error -> + Error + end. + +collect_args([], Map) -> + {ok, Map}; +%% evacuation +collect_args(["--evacuation" | Args], Map) -> + collect_args(Args, Map#{"--evacuation" => true}); +collect_args(["--redirect-to", ServerReference | Args], Map) -> + collect_args(Args, Map#{"--redirect-to" => ServerReference}); +collect_args(["--migrate-to", MigrateTo | Args], Map) -> + collect_args(Args, Map#{"--migrate-to" => MigrateTo}); +%% rebalance +collect_args(["--nodes", Nodes | Args], Map) -> + collect_args(Args, Map#{"--nodes" => Nodes}); +collect_args(["--wait-health-check", WaitHealthCheck | Args], Map) -> + collect_args(Args, Map#{"--wait-health-check" => WaitHealthCheck}); +collect_args(["--abs-conn-threshold", AbsConnThres | Args], Map) -> + collect_args(Args, Map#{"--abs-conn-threshold" => AbsConnThres}); +collect_args(["--rel-conn-threshold", RelConnThres | Args], Map) -> + collect_args(Args, Map#{"--rel-conn-threshold" => RelConnThres}); +collect_args(["--abs-sess-threshold", AbsSessThres | Args], Map) -> + collect_args(Args, Map#{"--abs-sess-threshold" => AbsSessThres}); +collect_args(["--rel-sess-threshold", RelSessThres | Args], Map) -> + collect_args(Args, Map#{"--rel-sess-threshold" => RelSessThres}); +%% common +collect_args(["--conn-evict-rate", ConnEvictRate | Args], Map) -> + collect_args(Args, Map#{"--conn-evict-rate" => ConnEvictRate}); +collect_args(["--wait-takeover", WaitTakeover | Args], Map) -> + collect_args(Args, Map#{"--wait-takeover" => WaitTakeover}); +collect_args(["--sess-evict-rate", SessEvictRate | Args], Map) -> + collect_args(Args, Map#{"--sess-evict-rate" => SessEvictRate}); +%% fallback +collect_args(Args, _Map) -> + {error, io_lib:format("unknown arguments: ~p", [Args])}. + +validate_evacuation([], Map) -> + {ok, Map}; +validate_evacuation([{"--evacuation", _} | Rest], Map) -> + validate_evacuation(Rest, Map); +validate_evacuation([{"--redirect-to", ServerReference} | Rest], Map) -> + validate_evacuation(Rest, Map#{server_reference => list_to_binary(ServerReference)}); +validate_evacuation([{"--conn-evict-rate", _} | _] = Opts, Map) -> + validate_pos_int(conn_evict_rate, Opts, Map, fun validate_evacuation/2); +validate_evacuation([{"--sess-evict-rate", _} | _] = Opts, Map) -> + validate_pos_int(sess_evict_rate, Opts, Map, fun validate_evacuation/2); +validate_evacuation([{"--wait-takeover", _} | _] = Opts, Map) -> + validate_pos_int(wait_takeover, Opts, Map, fun validate_evacuation/2); +validate_evacuation([{"--migrate-to", MigrateTo} | Rest], Map) -> + case strings_to_atoms(string:tokens(MigrateTo, ", ")) of + {_, Invalid} when Invalid =/= [] -> + {error, io_lib:format("invalid --migrate-to, invalid nodes: ~p", [Invalid])}; + {Nodes, []} -> + case emqx_node_rebalance_evacuation:available_nodes(Nodes) of + [] -> + {error, "invalid --migrate-to, no nodes"}; + Nodes -> + validate_evacuation(Rest, Map#{migrate_to => Nodes}); + OtherNodes -> + {error, + io_lib:format( + "invalid --migrate-to, unavailable nodes: ~p", + [Nodes -- OtherNodes] + )} + end + end; +validate_evacuation(Rest, _Map) -> + {error, io_lib:format("unknown evacuation arguments: ~p", [Rest])}. + +validate_rebalance([], Map) -> + {ok, Map}; +validate_rebalance([{"--wait-health-check", _} | _] = Opts, Map) -> + validate_pos_int(wait_health_check, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--conn-evict-rate", _} | _] = Opts, Map) -> + validate_pos_int(conn_evict_rate, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--sess-evict-rate", _} | _] = Opts, Map) -> + validate_pos_int(sess_evict_rate, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--abs-conn-threshold", _} | _] = Opts, Map) -> + validate_pos_int(abs_conn_threshold, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--rel-conn-threshold", _} | _] = Opts, Map) -> + validate_fraction(rel_conn_threshold, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--abs-sess-threshold", _} | _] = Opts, Map) -> + validate_pos_int(abs_sess_threshold, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--rel-sess-threshold", _} | _] = Opts, Map) -> + validate_fraction(rel_sess_threshold, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--wait-takeover", _} | _] = Opts, Map) -> + validate_pos_int(wait_takeover, Opts, Map, fun validate_rebalance/2); +validate_rebalance([{"--nodes", NodeStr} | Rest], Map) -> + case strings_to_atoms(string:tokens(NodeStr, ", ")) of + {_, Invalid} when Invalid =/= [] -> + {error, io_lib:format("invalid --nodes, invalid nodes: ~p", [Invalid])}; + {Nodes, []} -> + case emqx_node_rebalance:available_nodes(Nodes) of + [] -> + {error, "invalid --nodes, no nodes"}; + Nodes -> + validate_rebalance(Rest, Map#{nodes => Nodes}); + OtherNodes -> + {error, + io_lib:format( + "invalid --nodes, unavailable nodes: ~p", + [Nodes -- OtherNodes] + )} + end + end; +validate_rebalance(Rest, _Map) -> + {error, io_lib:format("unknown rebalance arguments: ~p", [Rest])}. + +validate_fraction(Name, [{OptionName, Value} | Rest], Map, Next) -> + case string:to_float(Value) of + {Num, ""} when Num > 1.0 -> + Next(Rest, Map#{Name => Num}); + _ -> + {error, "invalid " ++ OptionName ++ " value"} + end. + +validate_pos_int(Name, [{OptionName, Value} | Rest], Map, Next) -> + case string:to_integer(Value) of + {Int, ""} when Int > 0 -> + Next(Rest, Map#{Name => Int}); + _ -> + {error, "invalid " ++ OptionName ++ " value"} + end. + +strings_to_atoms(Strings) -> + strings_to_atoms(Strings, [], []). + +strings_to_atoms([], Atoms, Invalid) -> + {lists:reverse(Atoms), lists:reverse(Invalid)}; +strings_to_atoms([Str | Rest], Atoms, Invalid) -> + case emqx_utils:safe_to_existing_atom(Str, utf8) of + {ok, Atom} -> + strings_to_atoms(Rest, [Atom | Atoms], Invalid); + {error, _} -> + strings_to_atoms(Rest, Atoms, [Str | Invalid]) + end. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl new file mode 100644 index 000000000..4de362ca9 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl @@ -0,0 +1,308 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_evacuation). + +-include("emqx_node_rebalance.hrl"). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/types.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-export([ + start/1, + status/0, + stop/0 +]). + +-export([start_link/0]). + +-behaviour(gen_statem). + +-export([ + init/1, + callback_mode/0, + handle_event/4, + code_change/4 +]). + +-export([ + is_node_available/0, + available_nodes/1 +]). + +-export_type([ + start_opts/0, + start_error/0 +]). + +-ifdef(TEST). +-export([migrate_to/1]). +-endif. + +%%-------------------------------------------------------------------- +%% APIs +%%-------------------------------------------------------------------- + +-define(EVICT_INTERVAL_NO_NODES, 30000). + +-type migrate_to() :: [node()] | undefined. + +-type start_opts() :: #{ + server_reference => emqx_eviction_agent:server_reference(), + conn_evict_rate => pos_integer(), + sess_evict_rate => pos_integer(), + wait_takeover => pos_integer(), + migrate_to => migrate_to() +}. +-type start_error() :: already_started | eviction_agent_busy. +-type stats() :: #{ + initial_conns := non_neg_integer(), + initial_sessions := non_neg_integer(), + current_conns := non_neg_integer(), + current_sessions := non_neg_integer(), + conn_evict_rate := pos_integer(), + sess_evict_rate := pos_integer(), + server_reference := emqx_eviction_agent:server_reference(), + migrate_to := migrate_to() +}. +-type status() :: {enabled, stats()} | disabled. + +-spec start(start_opts()) -> ok_or_error(start_error()). +start(StartOpts) -> + Opts = maps:merge(default_opts(), StartOpts), + gen_statem:call(?MODULE, {start, Opts}). + +-spec stop() -> ok_or_error(not_started). +stop() -> + gen_statem:call(?MODULE, stop). + +-spec status() -> status(). +status() -> + gen_statem:call(?MODULE, status). + +-spec start_link() -> startlink_ret(). +start_link() -> + gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []). + +-spec available_nodes(list(node())) -> list(node()). +available_nodes(Nodes) when is_list(Nodes) -> + {Available, _} = emqx_node_rebalance_evacuation_proto_v1:available_nodes(Nodes), + lists:filter(fun is_atom/1, Available). + +%%-------------------------------------------------------------------- +%% gen_statem callbacks +%%-------------------------------------------------------------------- + +callback_mode() -> handle_event_function. + +%% states: disabled, evicting_conns, waiting_takeover, evicting_sessions, prohibiting + +init([]) -> + case emqx_node_rebalance_evacuation_persist:read(default_opts()) of + {ok, #{server_reference := ServerReference} = Opts} -> + ?SLOG(warning, #{msg => "restoring_evacuation_state", opts => Opts}), + case emqx_eviction_agent:enable(?MODULE, ServerReference) of + ok -> + Data = init_data(#{}, Opts), + ok = warn_enabled(), + {ok, evicting_conns, Data, [{state_timeout, 0, evict_conns}]}; + {error, eviction_agent_busy} -> + emqx_node_rebalance_evacuation_persist:clear(), + {ok, disabled, #{}} + end; + none -> + {ok, disabled, #{}} + end. + +%% start +handle_event( + {call, From}, + {start, #{server_reference := ServerReference} = Opts}, + disabled, + #{} = Data +) -> + case emqx_eviction_agent:enable(?MODULE, ServerReference) of + ok -> + NewData = init_data(Data, Opts), + ok = emqx_node_rebalance_evacuation_persist:save(Opts), + ?SLOG(warning, #{ + msg => "node_evacuation_started", + opts => Opts + }), + {next_state, evicting_conns, NewData, [ + {state_timeout, 0, evict_conns}, + {reply, From, ok} + ]}; + {error, eviction_agent_busy} -> + {keep_state_and_data, [{reply, From, {error, eviction_agent_busy}}]} + end; +handle_event({call, From}, {start, _Opts}, _State, #{}) -> + {keep_state_and_data, [{reply, From, {error, already_started}}]}; +%% stop +handle_event({call, From}, stop, disabled, #{}) -> + {keep_state_and_data, [{reply, From, {error, not_started}}]}; +handle_event({call, From}, stop, _State, Data) -> + ok = emqx_node_rebalance_evacuation_persist:clear(), + _ = emqx_eviction_agent:disable(?MODULE), + ?SLOG(warning, #{msg => "node_evacuation_stopped"}), + {next_state, disabled, deinit(Data), [{reply, From, ok}]}; +%% status +handle_event({call, From}, status, disabled, #{}) -> + {keep_state_and_data, [{reply, From, disabled}]}; +handle_event({call, From}, status, State, #{migrate_to := MigrateTo} = Data) -> + Stats = maps:with( + [ + initial_conns, + current_conns, + initial_sessions, + current_sessions, + server_reference, + conn_evict_rate, + sess_evict_rate + ], + Data + ), + {keep_state_and_data, [ + {reply, From, {enabled, Stats#{state => State, migrate_to => migrate_to(MigrateTo)}}} + ]}; +%% conn eviction +handle_event( + state_timeout, + evict_conns, + evicting_conns, + #{ + conn_evict_rate := ConnEvictRate, + wait_takeover := WaitTakeover + } = Data +) -> + case emqx_eviction_agent:status() of + {enabled, #{connections := Conns}} when Conns > 0 -> + ok = emqx_eviction_agent:evict_connections(ConnEvictRate), + ?tp(debug, node_evacuation_evict_conn, #{conn_evict_rate => ConnEvictRate}), + ?SLOG( + warning, + #{ + msg => "node_evacuation_evict_conns", + count => Conns, + conn_evict_rate => ConnEvictRate + } + ), + NewData = Data#{current_conns => Conns}, + {keep_state, NewData, [{state_timeout, ?EVICT_INTERVAL, evict_conns}]}; + {enabled, #{connections := 0}} -> + NewData = Data#{current_conns => 0}, + ?SLOG(warning, #{msg => "node_evacuation_evict_conns_done"}), + {next_state, waiting_takeover, NewData, [ + {state_timeout, timer:seconds(WaitTakeover), evict_sessions} + ]} + end; +handle_event( + state_timeout, + evict_sessions, + waiting_takeover, + Data +) -> + ?SLOG(warning, #{msg => "node_evacuation_waiting_takeover_done"}), + {next_state, evicting_sessions, Data, [{state_timeout, 0, evict_sessions}]}; +%% session eviction +handle_event( + state_timeout, + evict_sessions, + evicting_sessions, + #{ + sess_evict_rate := SessEvictRate, + migrate_to := MigrateTo, + current_sessions := CurrSessCount + } = Data +) -> + case emqx_eviction_agent:status() of + {enabled, #{sessions := SessCount}} when SessCount > 0 -> + case migrate_to(MigrateTo) of + [] -> + ?SLOG(warning, #{ + msg => "no_nodes_to_evacuate_sessions", session_count => CurrSessCount + }), + {keep_state_and_data, [ + {state_timeout, ?EVICT_INTERVAL_NO_NODES, evict_sessions} + ]}; + Nodes -> + ok = emqx_eviction_agent:evict_sessions(SessEvictRate, Nodes), + ?SLOG( + warning, + #{ + msg => "node_evacuation_evict_sessions", + session_count => SessCount, + session_evict_rate => SessEvictRate, + target_nodes => Nodes + } + ), + NewData = Data#{current_sessions => SessCount}, + {keep_state, NewData, [{state_timeout, ?EVICT_INTERVAL, evict_sessions}]} + end; + {enabled, #{sessions := 0}} -> + ?tp(debug, node_evacuation_evict_sess_over, #{}), + ?SLOG(warning, #{msg => "node_evacuation_evict_sessions_over"}), + NewData = Data#{current_sessions => 0}, + {next_state, prohibiting, NewData} + end; +handle_event({call, From}, Msg, State, Data) -> + ?SLOG(warning, #{msg => "unknown_call", call => Msg, state => State, data => Data}), + {keep_state_and_data, [{reply, From, ignored}]}; +handle_event(info, Msg, State, Data) -> + ?SLOG(warning, #{msg => "unknown_info", info => Msg, state => State, data => Data}), + keep_state_and_data; +handle_event(cast, Msg, State, Data) -> + ?SLOG(warning, #{msg => "unknown_cast", cast => Msg, state => State, data => Data}), + keep_state_and_data. + +code_change(_Vsn, State, Data, _Extra) -> + {ok, State, Data}. + +%%-------------------------------------------------------------------- +%% internal funs +%%-------------------------------------------------------------------- + +default_opts() -> + #{ + server_reference => undefined, + conn_evict_rate => ?DEFAULT_CONN_EVICT_RATE, + sess_evict_rate => ?DEFAULT_SESS_EVICT_RATE, + wait_takeover => ?DEFAULT_WAIT_TAKEOVER, + migrate_to => undefined + }. + +init_data(Data0, Opts) -> + Data1 = maps:merge(Data0, Opts), + {enabled, #{connections := ConnCount, sessions := SessCount}} = emqx_eviction_agent:status(), + Data1#{ + initial_conns => ConnCount, + current_conns => ConnCount, + initial_sessions => SessCount, + current_sessions => SessCount + }. + +deinit(Data) -> + Keys = + [initial_conns, current_conns, initial_sessions, current_sessions] ++ + maps:keys(default_opts()), + maps:without(Keys, Data). + +warn_enabled() -> + ?SLOG(warning, #{msg => "node_evacuation_enabled"}), + io:format( + standard_error, "Node evacuation is enabled. The node will not receive connections.~n", [] + ). + +migrate_to(undefined) -> + migrate_to(all_nodes()); +migrate_to(Nodes) when is_list(Nodes) -> + available_nodes(Nodes). + +is_node_available() -> + disabled = emqx_eviction_agent:status(), + node(). + +all_nodes() -> + mria_mnesia:running_nodes() -- [node()]. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation_persist.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation_persist.erl new file mode 100644 index 000000000..6b145c699 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation_persist.erl @@ -0,0 +1,120 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_evacuation_persist). + +-export([ + save/1, + clear/0, + read/1 +]). + +-ifdef(TEST). +-export([evacuation_filepath/0]). +-endif. + +-include("emqx_node_rebalance.hrl"). +-include_lib("emqx/include/types.hrl"). + +%%-------------------------------------------------------------------- +%% APIs +%%-------------------------------------------------------------------- + +%% do not persist `migrate_to`: +%% * after restart there is nothing to migrate +%% * this value may be invalid after node was offline +-type persisted_start_opts() :: #{ + server_reference => emqx_eviction_agent:server_reference(), + conn_evict_rate => pos_integer(), + sess_evict_rate => pos_integer(), + wait_takeover => pos_integer() +}. +-type start_opts() :: #{ + server_reference => emqx_eviction_agent:server_reference(), + conn_evict_rate => pos_integer(), + sess_evict_rate => pos_integer(), + wait_takeover => pos_integer(), + migrate_to => emqx_node_rebalance_evacuation:migrate_to() +}. + +-spec save(persisted_start_opts()) -> ok_or_error(term()). +save( + #{ + server_reference := ServerReference, + conn_evict_rate := ConnEvictRate, + sess_evict_rate := SessEvictRate, + wait_takeover := WaitTakeover + } = Data +) when + (is_binary(ServerReference) orelse ServerReference =:= undefined) andalso + is_integer(ConnEvictRate) andalso ConnEvictRate > 0 andalso + is_integer(SessEvictRate) andalso SessEvictRate > 0 andalso + is_integer(WaitTakeover) andalso WaitTakeover >= 0 +-> + Filepath = evacuation_filepath(), + case filelib:ensure_dir(Filepath) of + ok -> + JsonData = emqx_utils_json:encode( + prepare_for_encode(maps:with(persist_keys(), Data)), + [pretty] + ), + file:write_file(Filepath, JsonData); + {error, _} = Error -> + Error + end. + +-spec clear() -> ok. +clear() -> + file:delete(evacuation_filepath()). + +-spec read(start_opts()) -> {ok, start_opts()} | none. +read(DefaultOpts) -> + case file:read_file(evacuation_filepath()) of + {ok, Data} -> + case emqx_utils_json:safe_decode(Data, [return_maps]) of + {ok, Map} when is_map(Map) -> + {ok, map_to_opts(DefaultOpts, Map)}; + _NotAMap -> + {ok, DefaultOpts} + end; + {error, _} -> + none + end. + +%%-------------------------------------------------------------------- +%% Internal funcs +%%-------------------------------------------------------------------- + +persist_keys() -> + [ + server_reference, + conn_evict_rate, + sess_evict_rate, + wait_takeover + ]. + +prepare_for_encode(#{server_reference := undefined} = Data) -> + Data#{server_reference => null}; +prepare_for_encode(Data) -> + Data. + +format_after_decode(#{server_reference := null} = Data) -> + Data#{server_reference => undefined}; +format_after_decode(Data) -> + Data. + +map_to_opts(DefaultOpts, Map) -> + format_after_decode( + map_to_opts( + maps:to_list(DefaultOpts), Map, #{} + ) + ). + +map_to_opts([], _Map, Opts) -> + Opts; +map_to_opts([{Key, DefaultVal} | Rest], Map, Opts) -> + map_to_opts(Rest, Map, Opts#{Key => maps:get(atom_to_binary(Key), Map, DefaultVal)}). + +evacuation_filepath() -> + filename:join([emqx:data_dir(), ?EVACUATION_FILENAME]). diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl new file mode 100644 index 000000000..1d45d64e8 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl @@ -0,0 +1,238 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_status). + +-export([ + local_status/0, + local_status/1, + global_status/0, + format_local_status/1, + format_coordinator_status/1 +]). + +%% For RPC +-export([ + evacuation_status/0, + rebalance_status/0 +]). + +%%-------------------------------------------------------------------- +%% APIs +%%-------------------------------------------------------------------- + +-spec local_status() -> disabled | {evacuation, map()} | {rebalance, map()}. +local_status() -> + case emqx_node_rebalance_evacuation:status() of + {enabled, Status} -> + {evacuation, evacuation(Status)}; + disabled -> + case emqx_node_rebalance_agent:status() of + {enabled, CoordinatorPid} -> + case emqx_node_rebalance:status(CoordinatorPid) of + {enabled, Status} -> + local_rebalance(Status, node()); + disabled -> + disabled + end; + disabled -> + disabled + end + end. + +-spec local_status(node()) -> disabled | {evacuation, map()} | {rebalance, map()}. +local_status(Node) -> + emqx_node_rebalance_status_proto_v1:local_status(Node). + +-spec format_local_status(map()) -> iodata(). +format_local_status(Status) -> + format_status(Status, local_status_field_format_order()). + +-spec global_status() -> #{rebalances := [{node(), map()}], evacuations := [{node(), map()}]}. +global_status() -> + Nodes = mria_mnesia:running_nodes(), + {RebalanceResults, _} = emqx_node_rebalance_status_proto_v1:rebalance_status(Nodes), + Rebalances = [ + {Node, coordinator_rebalance(Status)} + || {Node, {enabled, Status}} <- RebalanceResults + ], + {EvacuatioResults, _} = emqx_node_rebalance_status_proto_v1:evacuation_status(Nodes), + Evacuations = [{Node, evacuation(Status)} || {Node, {enabled, Status}} <- EvacuatioResults], + #{rebalances => Rebalances, evacuations => Evacuations}. + +-spec format_coordinator_status(map()) -> iodata(). +format_coordinator_status(Status) -> + format_status(Status, coordinator_status_field_format_order()). + +%%-------------------------------------------------------------------- +%% Internal functions +%%-------------------------------------------------------------------- + +evacuation(Status) -> + #{ + state => maps:get(state, Status), + connection_eviction_rate => maps:get(conn_evict_rate, Status), + session_eviction_rate => maps:get(sess_evict_rate, Status), + connection_goal => 0, + session_goal => 0, + session_recipients => maps:get(migrate_to, Status), + stats => #{ + initial_connected => maps:get(initial_conns, Status), + current_connected => maps:get(current_conns, Status), + initial_sessions => maps:get(initial_sessions, Status), + current_sessions => maps:get(current_sessions, Status) + } + }. + +local_rebalance(#{donors := Donors} = Stats, Node) -> + case lists:member(Node, Donors) of + true -> {rebalance, donor_rebalance(Stats, Node)}; + false -> disabled + end. + +donor_rebalance(Status, Node) -> + Opts = maps:get(opts, Status), + InitialConnCounts = maps:get(initial_conn_counts, Status), + InitialSessCounts = maps:get(initial_sess_counts, Status), + + CurrentStats = #{ + initial_connected => maps:get(Node, InitialConnCounts), + initial_sessions => maps:get(Node, InitialSessCounts), + current_connected => emqx_eviction_agent:connection_count(), + current_sessions => emqx_eviction_agent:session_count(), + current_disconnected_sessions => emqx_eviction_agent:session_count( + disconnected + ) + }, + maps:from_list( + [ + {state, maps:get(state, Status)}, + {coordinator_node, maps:get(coordinator_node, Status)}, + {connection_eviction_rate, maps:get(conn_evict_rate, Opts)}, + {session_eviction_rate, maps:get(sess_evict_rate, Opts)}, + {recipients, maps:get(recipients, Status)}, + {stats, CurrentStats} + ] ++ + [ + {connection_goal, maps:get(recipient_conn_avg, Status)} + || maps:is_key(recipient_conn_avg, Status) + ] ++ + [ + {disconnected_session_goal, maps:get(recipient_sess_avg, Status)} + || maps:is_key(recipient_sess_avg, Status) + ] + ). + +coordinator_rebalance(Status) -> + Opts = maps:get(opts, Status), + maps:from_list( + [ + {state, maps:get(state, Status)}, + {coordinator_node, maps:get(coordinator_node, Status)}, + {connection_eviction_rate, maps:get(conn_evict_rate, Opts)}, + {session_eviction_rate, maps:get(sess_evict_rate, Opts)}, + {recipients, maps:get(recipients, Status)}, + {donors, maps:get(donors, Status)} + ] ++ + [ + {connection_goal, maps:get(recipient_conn_avg, Status)} + || maps:is_key(recipient_conn_avg, Status) + ] ++ + [ + {disconnected_session_goal, maps:get(recipient_sess_avg, Status)} + || maps:is_key(recipient_sess_avg, Status) + ] ++ + [ + {donor_conn_avg, maps:get(donor_conn_avg, Status)} + || maps:is_key(donor_conn_avg, Status) + ] ++ + [ + {donor_sess_avg, maps:get(donor_sess_avg, Status)} + || maps:is_key(donor_sess_avg, Status) + ] + ). + +local_status_field_format_order() -> + [ + state, + coordinator_node, + connection_eviction_rate, + session_eviction_rate, + connection_goal, + session_goal, + disconnected_session_goal, + session_recipients, + recipients, + stats + ]. + +coordinator_status_field_format_order() -> + [ + state, + coordinator_node, + donors, + recipients, + connection_eviction_rate, + session_eviction_rate, + connection_goal, + disconnected_session_goal, + donor_conn_avg, + donor_sess_avg + ]. + +format_status(Status, FieldOrder) -> + Fields = lists:flatmap( + fun(FieldName) -> + maps:to_list(maps:with([FieldName], Status)) + end, + FieldOrder + ), + lists:map( + fun format_local_status_field/1, + Fields + ). + +format_local_status_field({state, State}) -> + io_lib:format("Rebalance state: ~p~n", [State]); +format_local_status_field({coordinator_node, Node}) -> + io_lib:format("Coordinator node: ~p~n", [Node]); +format_local_status_field({connection_eviction_rate, ConnEvictRate}) -> + io_lib:format("Connection eviction rate: ~p connections/second~n", [ConnEvictRate]); +format_local_status_field({session_eviction_rate, SessEvictRate}) -> + io_lib:format("Session eviction rate: ~p sessions/second~n", [SessEvictRate]); +format_local_status_field({connection_goal, ConnGoal}) -> + io_lib:format("Connection goal: ~p~n", [ConnGoal]); +format_local_status_field({session_goal, SessGoal}) -> + io_lib:format("Session goal: ~p~n", [SessGoal]); +format_local_status_field({disconnected_session_goal, DisconnSessGoal}) -> + io_lib:format("Disconnected session goal: ~p~n", [DisconnSessGoal]); +format_local_status_field({session_recipients, SessionRecipients}) -> + io_lib:format("Session recipient nodes: ~p~n", [SessionRecipients]); +format_local_status_field({recipients, Recipients}) -> + io_lib:format("Recipient nodes: ~p~n", [Recipients]); +format_local_status_field({donors, Donors}) -> + io_lib:format("Donor nodes: ~p~n", [Donors]); +format_local_status_field({donor_conn_avg, DonorConnAvg}) -> + io_lib:format("Current average donor node connection count: ~p~n", [DonorConnAvg]); +format_local_status_field({donor_sess_avg, DonorSessAvg}) -> + io_lib:format("Current average donor node disconnected session count: ~p~n", [DonorSessAvg]); +format_local_status_field({stats, Stats}) -> + format_local_stats(Stats). + +format_local_stats(Stats) -> + [ + "Channel statistics:\n" + | lists:map( + fun({Name, Value}) -> + io_lib:format(" ~p: ~p~n", [Name, Value]) + end, + maps:to_list(Stats) + ) + ]. + +evacuation_status() -> + {node(), emqx_node_rebalance_evacuation:status()}. + +rebalance_status() -> + {node(), emqx_node_rebalance:status()}. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_sup.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_sup.erl new file mode 100644 index 000000000..cfaccc4c2 --- /dev/null +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_sup.erl @@ -0,0 +1,35 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init([]) -> + Childs = [ + child_spec(emqx_node_rebalance_evacuation, []), + child_spec(emqx_node_rebalance_agent, []), + child_spec(emqx_node_rebalance, []) + ], + {ok, { + #{strategy => one_for_one, intensity => 10, period => 3600}, + Childs + }}. + +child_spec(Mod, Args) -> + #{ + id => Mod, + start => {Mod, start_link, Args}, + restart => permanent, + shutdown => 5000, + type => worker, + modules => [Mod] + }. diff --git a/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_api_proto_v1.erl b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_api_proto_v1.erl new file mode 100644 index 000000000..131973932 --- /dev/null +++ b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_api_proto_v1.erl @@ -0,0 +1,43 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_api_proto_v1). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + node_rebalance_evacuation_start/2, + node_rebalance_evacuation_stop/1, + + node_rebalance_start/2, + node_rebalance_stop/1 +]). + +-include_lib("emqx/include/bpapi.hrl"). +-include_lib("emqx/include/types.hrl"). + +introduced_in() -> + "5.0.22". + +-spec node_rebalance_evacuation_start(node(), emqx_node_rebalance_evacuation:start_opts()) -> + emqx_rpc:badrpc() | ok_or_error(emqx_node_rebalance_evacuation:start_error()). +node_rebalance_evacuation_start(Node, #{} = Opts) -> + rpc:call(Node, emqx_node_rebalance_evacuation, start, [Opts]). + +-spec node_rebalance_evacuation_stop(node()) -> + emqx_rpc:badrpc() | ok_or_error(not_started). +node_rebalance_evacuation_stop(Node) -> + rpc:call(Node, emqx_node_rebalance_evacuation, stop, []). + +-spec node_rebalance_start(node(), emqx_node_rebalance:start_opts()) -> + emqx_rpc:badrpc() | ok_or_error(emqx_node_rebalance:start_error()). +node_rebalance_start(Node, Opts) -> + rpc:call(Node, emqx_node_rebalance, start, [Opts]). + +-spec node_rebalance_stop(node()) -> + emqx_rpc:badrpc() | ok_or_error(not_started). +node_rebalance_stop(Node) -> + rpc:call(Node, emqx_node_rebalance, stop, []). diff --git a/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_evacuation_proto_v1.erl b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_evacuation_proto_v1.erl new file mode 100644 index 000000000..f5a6e1077 --- /dev/null +++ b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_evacuation_proto_v1.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_evacuation_proto_v1). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + available_nodes/1 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +introduced_in() -> + "5.0.22". + +-spec available_nodes([node()]) -> emqx_rpc:multicall_result(node()). +available_nodes(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance_evacuation, is_node_available, []). diff --git a/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v1.erl b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v1.erl new file mode 100644 index 000000000..98625d4fd --- /dev/null +++ b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v1.erl @@ -0,0 +1,62 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_proto_v1). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + available_nodes/1, + evict_connections/2, + evict_sessions/4, + connection_counts/1, + session_counts/1, + enable_rebalance_agent/2, + disable_rebalance_agent/2, + disconnected_session_counts/1 +]). + +-include_lib("emqx/include/bpapi.hrl"). +-include_lib("emqx/include/types.hrl"). + +introduced_in() -> + "5.0.22". + +-spec available_nodes([node()]) -> emqx_rpc:multicall_result(node()). +available_nodes(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, is_node_available, []). + +-spec evict_connections([node()], non_neg_integer()) -> + emqx_rpc:multicall_result(ok_or_error(disabled)). +evict_connections(Nodes, Count) -> + rpc:multicall(Nodes, emqx_eviction_agent, evict_connections, [Count]). + +-spec evict_sessions([node()], non_neg_integer(), [node()], emqx_channel:conn_state()) -> + emqx_rpc:multicall_result(ok_or_error(disabled)). +evict_sessions(Nodes, Count, RecipientNodes, ConnState) -> + rpc:multicall(Nodes, emqx_eviction_agent, evict_sessions, [Count, RecipientNodes, ConnState]). + +-spec connection_counts([node()]) -> emqx_rpc:multicall_result({ok, non_neg_integer()}). +connection_counts(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, connection_count, []). + +-spec session_counts([node()]) -> emqx_rpc:multicall_result({ok, non_neg_integer()}). +session_counts(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, session_count, []). + +-spec enable_rebalance_agent([node()], pid()) -> + emqx_rpc:multicall_result(ok_or_error(already_enabled | eviction_agent_busy)). +enable_rebalance_agent(Nodes, OwnerPid) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, enable, [OwnerPid]). + +-spec disable_rebalance_agent([node()], pid()) -> + emqx_rpc:multicall_result(ok_or_error(already_disabled | invalid_coordinator)). +disable_rebalance_agent(Nodes, OwnerPid) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, disable, [OwnerPid]). + +-spec disconnected_session_counts([node()]) -> emqx_rpc:multicall_result({ok, non_neg_integer()}). +disconnected_session_counts(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, disconnected_session_count, []). diff --git a/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_status_proto_v1.erl b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_status_proto_v1.erl new file mode 100644 index 000000000..e3e4a423c --- /dev/null +++ b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_status_proto_v1.erl @@ -0,0 +1,36 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_status_proto_v1). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + local_status/1, + rebalance_status/1, + evacuation_status/1 +]). + +-include_lib("emqx/include/bpapi.hrl"). +-include_lib("emqx/include/types.hrl"). + +introduced_in() -> + "5.0.22". + +-spec local_status(node()) -> + emqx_rpc:badrpc() | disabled | {evacuation, map()} | {rebalance, map()}. +local_status(Node) -> + rpc:call(Node, emqx_node_rebalance_status, local_status, []). + +-spec rebalance_status([node()]) -> + emqx_rpc:multicall_result({node(), map()}). +rebalance_status(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance_status, rebalance_status, []). + +-spec evacuation_status([node()]) -> + emqx_rpc:multicall_result({node(), map()}). +evacuation_status(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance_status, evacuation_status, []). diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl new file mode 100644 index 000000000..a818145a2 --- /dev/null +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl @@ -0,0 +1,229 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("emqx/include/asserts.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import( + emqx_eviction_agent_test_helpers, + [emqtt_connect_many/1, emqtt_connect_many/2, stop_many/1, case_specific_node_name/3] +). + +-define(START_APPS, [emqx_eviction_agent, emqx_node_rebalance]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + ok = emqx_common_test_helpers:start_apps([]), + Config. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([]), + ok. + +init_per_testcase(Case, Config) -> + ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + [ + {case_specific_node_name(?MODULE, Case, '_donor'), 2883}, + {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + ], + ?START_APPS + ), + ok = snabbkaffe:start_trace(), + [{cluster_nodes, ClusterNodes} | Config]. + +end_per_testcase(_Case, Config) -> + ok = snabbkaffe:stop(), + ok = emqx_eviction_agent_test_helpers:stop_cluster( + ?config(cluster_nodes, Config), + ?START_APPS + ). + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_rebalance(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + Nodes = [DonorNode, RecipientNode], + + Conns = emqtt_connect_many(DonorPort, 500), + + Opts = #{ + conn_evict_rate => 10, + sess_evict_rate => 10, + evict_interval => 10, + abs_conn_threshold => 50, + abs_sess_threshold => 50, + rel_conn_threshold => 1.0, + rel_sess_threshold => 1.0, + wait_health_check => 0.01, + wait_takeover => 0.01, + nodes => Nodes + }, + + ?assertWaitEvent( + ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]), + #{?snk_kind := emqx_node_rebalance_evict_sess_over}, + 10000 + ), + + DonorConnCount = rpc:call(DonorNode, emqx_eviction_agent, connection_count, []), + DonorSessCount = rpc:call(DonorNode, emqx_eviction_agent, session_count, []), + DonorDSessCount = rpc:call(DonorNode, emqx_eviction_agent, session_count, [disconnected]), + + RecipientConnCount = rpc:call(RecipientNode, emqx_eviction_agent, connection_count, []), + RecipientSessCount = rpc:call(RecipientNode, emqx_eviction_agent, session_count, []), + RecipientDSessCount = rpc:call(RecipientNode, emqx_eviction_agent, session_count, [disconnected]), + + ct:pal( + "Donor: conn=~p, sess=~p, dsess=~p", + [DonorConnCount, DonorSessCount, DonorDSessCount] + ), + ct:pal( + "Recipient: conn=~p, sess=~p, dsess=~p", + [RecipientConnCount, RecipientSessCount, RecipientDSessCount] + ), + + ?assert(DonorConnCount - 50 =< RecipientConnCount), + ?assert(DonorDSessCount - 50 =< RecipientDSessCount), + + ok = stop_many(Conns). + +t_rebalance_node_crash(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + Nodes = [DonorNode, RecipientNode], + + Conns = emqtt_connect_many(DonorPort, 500), + + Opts = #{ + conn_evict_rate => 10, + sess_evict_rate => 10, + evict_interval => 10, + abs_conn_threshold => 50, + abs_sess_threshold => 50, + rel_conn_threshold => 1.0, + rel_sess_threshold => 1.0, + wait_health_check => 0.01, + wait_takeover => 0.01, + nodes => Nodes + }, + + ?assertWaitEvent( + begin + ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]), + emqx_common_test_helpers:stop_slave(RecipientNode) + end, + #{?snk_kind := emqx_node_rebalance_started}, + 1000 + ), + + ?assertEqual( + disabled, + rpc:call(DonorNode, emqx_node_rebalance, status, []) + ), + + ok = stop_many(Conns). + +t_no_need_to_rebalance(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + Nodes = [DonorNode, RecipientNode], + + Opts = #{ + conn_evict_rate => 10, + sess_evict_rate => 10, + evict_interval => 10, + abs_conn_threshold => 50, + abs_sess_threshold => 50, + rel_conn_threshold => 1.0, + rel_sess_threshold => 1.0, + wait_health_check => 0.01, + wait_takeover => 0.01, + nodes => Nodes + }, + + ?assertEqual( + {error, nothing_to_balance}, + rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]) + ), + + Conns = emqtt_connect_many(DonorPort, 50), + + ?assertEqual( + {error, nothing_to_balance}, + rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]) + ), + + ok = stop_many(Conns). + +t_unknown_mesages(Config) -> + process_flag(trap_exit, true), + [{DonorNode, DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + Nodes = [DonorNode, RecipientNode], + + Conns = emqtt_connect_many(DonorPort, 500), + + Opts = #{ + wait_health_check => 100, + abs_conn_threshold => 50, + nodes => Nodes + }, + + Pid = rpc:call(DonorNode, erlang, whereis, [emqx_node_rebalance]), + + Pid ! unknown, + ok = gen_server:cast(Pid, unknown), + ?assertEqual( + ignored, + gen_server:call(Pid, unknown) + ), + + ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]), + + Pid ! unknown, + ok = gen_server:cast(Pid, unknown), + ?assertEqual( + ignored, + gen_server:call(Pid, unknown) + ), + + ok = stop_many(Conns). + +t_available_nodes(Config) -> + [{DonorNode, _DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + %% Start eviction agent on RecipientNode so that it will be "occupied" + %% and not available for rebalance + ok = rpc:call(RecipientNode, emqx_eviction_agent, enable, [test_rebalance, undefined]), + + %% Only DonorNode should be is available for rebalance, since RecipientNode is "occupied" + ?assertEqual( + [DonorNode], + rpc:call( + DonorNode, + emqx_node_rebalance, + available_nodes, + [[DonorNode, RecipientNode]] + ) + ). diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl new file mode 100644 index 000000000..8b21f9433 --- /dev/null +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl @@ -0,0 +1,214 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_agent_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import( + emqx_eviction_agent_test_helpers, + [case_specific_node_name/2] +). + +all() -> + [ + {group, local}, + {group, cluster} + ]. + +groups() -> + [ + {local, [], [ + t_enable_disable, + t_enable_egent_busy, + t_unknown_messages + ]}, + {cluster, [], [ + t_rebalance_agent_coordinator_fail, + t_rebalance_agent_fail + ]} + ]. + +init_per_suite(Config) -> + ok = emqx_common_test_helpers:start_apps([emqx_eviction_agent, emqx_node_rebalance]), + Config. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_node_rebalance]), + ok. + +init_per_group(local, Config) -> + [{cluster, false} | Config]; +init_per_group(cluster, Config) -> + [{cluster, true} | Config]. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(Case, Config) -> + case ?config(cluster, Config) of + true -> + ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + [{case_specific_node_name(?MODULE, Case), 2883}], + [emqx_eviction_agent, emqx_node_rebalance] + ), + [{cluster_nodes, ClusterNodes} | Config]; + false -> + Config + end. + +end_per_testcase(_Case, Config) -> + case ?config(cluster, Config) of + true -> + emqx_eviction_agent_test_helpers:stop_cluster( + ?config(cluster_nodes, Config), + [emqx_eviction_agent, emqx_node_rebalance] + ); + false -> + ok + end. + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +%% Local tests + +t_enable_disable(_Config) -> + ?assertEqual( + disabled, + emqx_node_rebalance_agent:status() + ), + + ?assertEqual( + ok, + emqx_node_rebalance_agent:enable(self()) + ), + + ?assertEqual( + {error, already_enabled}, + emqx_node_rebalance_agent:enable(self()) + ), + + ?assertEqual( + {enabled, self()}, + emqx_node_rebalance_agent:status() + ), + + ?assertEqual( + {error, invalid_coordinator}, + emqx_node_rebalance_agent:disable(spawn_link(fun() -> ok end)) + ), + + ?assertEqual( + ok, + emqx_node_rebalance_agent:disable(self()) + ), + + ?assertEqual( + {error, already_disabled}, + emqx_node_rebalance_agent:disable(self()) + ), + + ?assertEqual( + disabled, + emqx_node_rebalance_agent:status() + ). + +t_enable_egent_busy(_Config) -> + ok = emqx_eviction_agent:enable(rebalance_test, undefined), + + ?assertEqual( + {error, eviction_agent_busy}, + emqx_node_rebalance_agent:enable(self()) + ), + + ok = emqx_eviction_agent:disable(rebalance_test). + +t_unknown_messages(_Config) -> + Pid = whereis(emqx_node_rebalance_agent), + + ok = gen_server:cast(Pid, unknown), + + Pid ! unknown, + + ignored = gen_server:call(Pid, unknown). + +%% Cluster tests + +% The following tests verify that emqx_node_rebalance_agent correctly links +% coordinator process with emqx_eviction_agent-s. + +t_rebalance_agent_coordinator_fail(Config) -> + process_flag(trap_exit, true), + + [{Node, _}] = ?config(cluster_nodes, Config), + + CoordinatorPid = spawn_link( + fun() -> + receive + done -> ok + end + end + ), + + ?assertEqual( + disabled, + rpc:call(Node, emqx_eviction_agent, status, []) + ), + + ?assertEqual( + ok, + rpc:call(Node, emqx_node_rebalance_agent, enable, [CoordinatorPid]) + ), + + ?assertMatch( + {enabled, _}, + rpc:call(Node, emqx_eviction_agent, status, []) + ), + + EvictionAgentPid = rpc:call(Node, erlang, whereis, [emqx_eviction_agent]), + true = link(EvictionAgentPid), + + true = exit(CoordinatorPid, kill), + + receive + {'EXIT', EvictionAgentPid, _} -> true + after 1000 -> + ct:fail("emqx_eviction_agent did not exit") + end. + +t_rebalance_agent_fail(Config) -> + process_flag(trap_exit, true), + + [{Node, _}] = ?config(cluster_nodes, Config), + + CoordinatorPid = spawn_link( + fun() -> + receive + done -> ok + end + end + ), + + ?assertEqual( + ok, + rpc:call(Node, emqx_node_rebalance_agent, enable, [CoordinatorPid]) + ), + + EvictionAgentPid = rpc:call(Node, erlang, whereis, [emqx_eviction_agent]), + true = exit(EvictionAgentPid, kill), + + receive + {'EXIT', CoordinatorPid, _} -> true + after 1000 -> + ct:fail("emqx_node_rebalance_agent did not exit") + end. diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl new file mode 100644 index 000000000..d8202a33e --- /dev/null +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl @@ -0,0 +1,444 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_api_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-import( + emqx_mgmt_api_test_util, + [ + request/2, + request/3, + uri/1 + ] +). + +-import( + emqx_eviction_agent_test_helpers, + [emqtt_connect_many/2, stop_many/1, case_specific_node_name/3] +). + +-define(START_APPS, [emqx_eviction_agent, emqx_node_rebalance]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + ok = emqx_common_test_helpers:start_apps(?START_APPS), + Config. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps(?START_APPS), + ok. + +init_per_testcase(Case, Config) -> + [{DonorNode, _} | _] = + ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + [ + {case_specific_node_name(?MODULE, Case, '_donor'), 2883}, + {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + ], + ?START_APPS, + [{emqx, data_dir, case_specific_data_dir(Case, Config)}] + ), + + ok = rpc:call(DonorNode, emqx_mgmt_api_test_util, init_suite, []), + ok = take_auth_header_from(DonorNode), + + [{cluster_nodes, ClusterNodes} | Config]. +end_per_testcase(_Case, Config) -> + _ = emqx_eviction_agent_test_helpers:stop_cluster( + ?config(cluster_nodes, Config), + ?START_APPS + ). + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_start_evacuation_validation(Config) -> + [{DonorNode, _}, {RecipientNode, _}] = ?config(cluster_nodes, Config), + BadOpts = [ + #{conn_evict_rate => <<"conn">>}, + #{sess_evict_rate => <<"sess">>}, + #{redirect_to => 123}, + #{wait_takeover => <<"wait">>}, + #{migrate_to => []}, + #{migrate_to => <<"migrate_to">>}, + #{migrate_to => [<<"bad_node">>]}, + #{migrate_to => [<<"bad_node">>, atom_to_binary(DonorNode)]}, + #{unknown => <<"Value">>} + ], + lists:foreach( + fun(Opts) -> + ?assertMatch( + {ok, 400, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "evacuation", "start"], + Opts + ) + ) + end, + BadOpts + ), + ?assertMatch( + {ok, 404, #{}}, + api_post( + ["load_rebalance", "bad@node", "evacuation", "start"], + #{} + ) + ), + + ?assertMatch( + {ok, 200, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "evacuation", "start"], + #{ + conn_evict_rate => 10, + sess_evict_rate => 10, + wait_takeover => 10, + redirect_to => <<"srv">>, + migrate_to => [atom_to_binary(RecipientNode)] + } + ) + ), + + DonorNodeBin = atom_to_binary(DonorNode), + ?assertMatch( + {ok, 200, #{<<"evacuations">> := [#{<<"node">> := DonorNodeBin}]}}, + api_get(["load_rebalance", "global_status"]) + ). + +t_start_rebalance_validation(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _}] = ?config(cluster_nodes, Config), + + BadOpts = [ + #{conn_evict_rate => <<"conn">>}, + #{sess_evict_rate => <<"sess">>}, + #{abs_conn_threshold => <<"act">>}, + #{rel_conn_threshold => <<"rct">>}, + #{abs_sess_threshold => <<"act">>}, + #{rel_sess_threshold => <<"rct">>}, + #{wait_takeover => <<"wait">>}, + #{wait_health_check => <<"wait">>}, + #{nodes => <<"nodes">>}, + #{nodes => []}, + #{nodes => [<<"bad_node">>]}, + #{nodes => [<<"bad_node">>, atom_to_binary(DonorNode)]}, + #{unknown => <<"Value">>} + ], + lists:foreach( + fun(Opts) -> + ?assertMatch( + {ok, 400, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "start"], + Opts + ) + ) + end, + BadOpts + ), + ?assertMatch( + {ok, 404, #{}}, + api_post( + ["load_rebalance", "bad@node", "start"], + #{} + ) + ), + + Conns = emqtt_connect_many(DonorPort, 50), + + ?assertMatch( + {ok, 200, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "start"], + #{ + conn_evict_rate => 10, + sess_evict_rate => 10, + wait_takeover => 10, + wait_health_check => 10, + abs_conn_threshold => 10, + rel_conn_threshold => 1.001, + abs_sess_threshold => 10, + rel_sess_threshold => 1.001, + nodes => [ + atom_to_binary(DonorNode), + atom_to_binary(RecipientNode) + ] + } + ) + ), + + DonorNodeBin = atom_to_binary(DonorNode), + ?assertMatch( + {ok, 200, #{<<"rebalances">> := [#{<<"node">> := DonorNodeBin}]}}, + api_get(["load_rebalance", "global_status"]) + ), + + ok = stop_many(Conns). + +t_start_stop_evacuation(Config) -> + [{DonorNode, _}, {RecipientNode, _}] = ?config(cluster_nodes, Config), + + StartOpts = maps:merge( + emqx_node_rebalance_api:rebalance_evacuation_example(), + #{migrate_to => [atom_to_binary(RecipientNode)]} + ), + + ?assertMatch( + {ok, 200, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "evacuation", "start"], + StartOpts + ) + ), + + StatusResponse = api_get(["load_rebalance", "status"]), + + ?assertMatch( + {ok, 200, _}, + StatusResponse + ), + + {ok, 200, Status} = StatusResponse, + + ?assertMatch( + #{ + process := evacuation, + connection_eviction_rate := 100, + session_eviction_rate := 100, + connection_goal := 0, + session_goal := 0, + stats := #{ + initial_connected := _, + current_connected := _, + initial_sessions := _, + current_sessions := _ + } + }, + emqx_node_rebalance_api:translate(local_status_enabled, Status) + ), + + DonorNodeBin = atom_to_binary(DonorNode), + + GlobalStatusResponse = api_get(["load_rebalance", "global_status"]), + + ?assertMatch( + {ok, 200, _}, + GlobalStatusResponse + ), + + {ok, 200, GlobalStatus} = GlobalStatusResponse, + + ?assertMatch( + #{ + rebalances := [], + evacuations := [ + #{ + node := DonorNodeBin, + connection_eviction_rate := 100, + session_eviction_rate := 100, + connection_goal := 0, + session_goal := 0, + stats := #{ + initial_connected := _, + current_connected := _, + initial_sessions := _, + current_sessions := _ + } + } + ] + }, + emqx_node_rebalance_api:translate(global_status, GlobalStatus) + ), + + ?assertMatch( + {ok, 200, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "evacuation", "stop"], + #{} + ) + ), + + ?assertMatch( + {ok, 200, #{<<"status">> := <<"disabled">>}}, + api_get(["load_rebalance", "status"]) + ), + + ?assertMatch( + {ok, 200, #{<<"evacuations">> := [], <<"rebalances">> := []}}, + api_get(["load_rebalance", "global_status"]) + ). + +t_start_stop_rebalance(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _}] = ?config(cluster_nodes, Config), + + ?assertMatch( + {ok, 200, #{<<"status">> := <<"disabled">>}}, + api_get(["load_rebalance", "status"]) + ), + + Conns = emqtt_connect_many(DonorPort, 100), + + StartOpts = maps:without( + [nodes], + emqx_node_rebalance_api:rebalance_example() + ), + + ?assertMatch( + {ok, 200, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "start"], + StartOpts + ) + ), + + StatusResponse = api_get(["load_rebalance", "status"]), + + ?assertMatch( + {ok, 200, _}, + StatusResponse + ), + + {ok, 200, Status} = StatusResponse, + + ?assertMatch( + #{process := rebalance, connection_eviction_rate := 10, session_eviction_rate := 20}, + emqx_node_rebalance_api:translate(local_status_enabled, Status) + ), + + DonorNodeBin = atom_to_binary(DonorNode), + RecipientNodeBin = atom_to_binary(RecipientNode), + + GlobalStatusResponse = api_get(["load_rebalance", "global_status"]), + + ?assertMatch( + {ok, 200, _}, + GlobalStatusResponse + ), + + {ok, 200, GlobalStatus} = GlobalStatusResponse, + + ?assertMatch( + {ok, 200, #{ + <<"evacuations">> := [], + <<"rebalances">> := + [ + #{ + <<"state">> := _, + <<"node">> := DonorNodeBin, + <<"coordinator_node">> := _, + <<"connection_eviction_rate">> := 10, + <<"session_eviction_rate">> := 20, + <<"donors">> := [DonorNodeBin], + <<"recipients">> := [RecipientNodeBin] + } + ] + }}, + GlobalStatusResponse + ), + + ?assertMatch( + #{ + evacuations := [], + rebalances := [ + #{ + state := _, + node := DonorNodeBin, + coordinator_node := _, + connection_eviction_rate := 10, + session_eviction_rate := 20, + donors := [DonorNodeBin], + recipients := [RecipientNodeBin] + } + ] + }, + emqx_node_rebalance_api:translate(global_status, GlobalStatus) + ), + + ?assertMatch( + {ok, 200, #{}}, + api_post( + ["load_rebalance", atom_to_list(DonorNode), "stop"], + #{} + ) + ), + + ?assertMatch( + {ok, 200, #{<<"status">> := <<"disabled">>}}, + api_get(["load_rebalance", "status"]) + ), + + ?assertMatch( + {ok, 200, #{<<"evacuations">> := [], <<"rebalances">> := []}}, + api_get(["load_rebalance", "global_status"]) + ), + + ok = stop_many(Conns). + +t_availability_check(Config) -> + [{DonorNode, _} | _] = ?config(cluster_nodes, Config), + ?assertMatch( + {ok, 200, #{}}, + api_get(["load_rebalance", "availability_check"]) + ), + + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [#{}]), + + ?assertMatch( + {ok, 503, _}, + api_get(["load_rebalance", "availability_check"]) + ), + + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, stop, []), + + ?assertMatch( + {ok, 200, #{}}, + api_get(["load_rebalance", "availability_check"]) + ). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +api_get(Path) -> + case request(get, uri(Path)) of + {ok, Code, ResponseBody} -> + {ok, Code, jiffy:decode(ResponseBody, [return_maps])}; + {error, _} = Error -> + Error + end. + +api_post(Path, Data) -> + case request(post, uri(Path), Data) of + {ok, Code, ResponseBody} -> + {ok, Code, jiffy:decode(ResponseBody, [return_maps])}; + {error, _} = Error -> + Error + end. + +take_auth_header_from(Node) -> + meck:new(emqx_common_test_http, [passthrough]), + meck:expect( + emqx_common_test_http, + default_auth_header, + fun() -> rpc:call(Node, emqx_common_test_http, default_auth_header, []) end + ), + ok. + +case_specific_data_dir(Case, Config) -> + case ?config(priv_dir, Config) of + undefined -> undefined; + PrivDir -> filename:join(PrivDir, atom_to_list(Case)) + end. diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl new file mode 100644 index 000000000..54ecad026 --- /dev/null +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl @@ -0,0 +1,291 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_cli_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-import( + emqx_eviction_agent_test_helpers, + [emqtt_connect_many/2, stop_many/1, case_specific_node_name/3] +). + +-define(START_APPS, [emqx_eviction_agent, emqx_node_rebalance]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps(?START_APPS), + Config. + +end_per_suite(Config) -> + emqx_common_test_helpers:stop_apps(lists:reverse(?START_APPS)), + Config. + +init_per_testcase(Case = t_rebalance, Config) -> + _ = emqx_node_rebalance_evacuation:stop(), + ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + [ + {case_specific_node_name(?MODULE, Case, '_donor'), 2883}, + {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + ], + ?START_APPS + ), + [{cluster_nodes, ClusterNodes} | Config]; +init_per_testcase(_Case, Config) -> + _ = emqx_node_rebalance_evacuation:stop(), + _ = emqx_node_rebalance:stop(), + Config. + +end_per_testcase(t_rebalance, Config) -> + _ = emqx_node_rebalance_evacuation:stop(), + _ = emqx_node_rebalance:stop(), + _ = emqx_eviction_agent_test_helpers:stop_cluster( + ?config(cluster_nodes, Config), + ?START_APPS + ); +end_per_testcase(_Case, _Config) -> + _ = emqx_node_rebalance_evacuation:stop(), + _ = emqx_node_rebalance:stop(). + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_evacuation(_Config) -> + %% usage + ok = emqx_node_rebalance_cli:cli(["foobar"]), + + %% status + ok = emqx_node_rebalance_cli:cli(["status"]), + ok = emqx_node_rebalance_cli:cli(["node-status"]), + ok = emqx_node_rebalance_cli:cli(["node-status", atom_to_list(node())]), + + %% start with invalid args + ?assertNot( + emqx_node_rebalance_cli:cli(["start", "--evacuation", "--foo-bar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli:cli(["start", "--evacuation", "--conn-evict-rate", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli:cli(["start", "--evacuation", "--sess-evict-rate", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli:cli(["start", "--evacuation", "--wait-takeover", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli:cli([ + "start", + "--evacuation", + "--migrate-to", + "nonexistent@node" + ]) + ), + ?assertNot( + emqx_node_rebalance_cli:cli([ + "start", + "--evacuation", + "--migrate-to", + "" + ]) + ), + ?assertNot( + emqx_node_rebalance_cli:cli([ + "start", + "--evacuation", + "--unknown-arg" + ]) + ), + ?assert( + emqx_node_rebalance_cli:cli([ + "start", + "--evacuation", + "--conn-evict-rate", + "10", + "--sess-evict-rate", + "10", + "--wait-takeover", + "10", + "--migrate-to", + atom_to_list(node()), + "--redirect-to", + "srv" + ]) + ), + + %% status + ok = emqx_node_rebalance_cli:cli(["status"]), + ok = emqx_node_rebalance_cli:cli(["node-status"]), + ok = emqx_node_rebalance_cli:cli(["node-status", atom_to_list(node())]), + + ?assertMatch( + {enabled, #{}}, + emqx_node_rebalance_evacuation:status() + ), + + %% already enabled + ?assertNot( + emqx_node_rebalance_cli:cli([ + "start", + "--evacuation", + "--conn-evict-rate", + "10", + "--redirect-to", + "srv" + ]) + ), + + %% stop + true = emqx_node_rebalance_cli:cli(["stop"]), + + false = emqx_node_rebalance_cli:cli(["stop"]), + + ?assertEqual( + disabled, + emqx_node_rebalance_evacuation:status() + ). + +t_rebalance(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _}] = ?config(cluster_nodes, Config), + + %% start with invalid args + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--foo-bar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--conn-evict-rate", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--abs-conn-threshold", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--rel-conn-threshold", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--sess-evict-rate", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--abs-sess-threshold", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--rel-sess-threshold", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--wait-takeover", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start", "--wait-health-check", "foobar"]) + ), + + ?assertNot( + emqx_node_rebalance_cli(DonorNode, [ + "start", + "--nodes", + "nonexistent@node" + ]) + ), + ?assertNot( + emqx_node_rebalance_cli(DonorNode, [ + "start", + "--nodes", + "" + ]) + ), + ?assertNot( + emqx_node_rebalance_cli(DonorNode, [ + "start", + "--nodes", + atom_to_list(RecipientNode) + ]) + ), + ?assertNot( + emqx_node_rebalance_cli(DonorNode, [ + "start", + "--unknown-arg" + ]) + ), + + Conns = emqtt_connect_many(DonorPort, 20), + + ?assert( + emqx_node_rebalance_cli(DonorNode, [ + "start", + "--conn-evict-rate", + "10", + "--abs-conn-threshold", + "10", + "--rel-conn-threshold", + "1.1", + "--sess-evict-rate", + "10", + "--abs-sess-threshold", + "10", + "--rel-sess-threshold", + "1.1", + "--wait-takeover", + "10", + "--nodes", + atom_to_list(DonorNode) ++ "," ++ + atom_to_list(RecipientNode) + ]) + ), + + %% status + ok = emqx_node_rebalance_cli(DonorNode, ["status"]), + ok = emqx_node_rebalance_cli(DonorNode, ["node-status"]), + ok = emqx_node_rebalance_cli(DonorNode, ["node-status", atom_to_list(DonorNode)]), + + ?assertMatch( + {enabled, #{}}, + rpc:call(DonorNode, emqx_node_rebalance, status, []) + ), + + %% already enabled + ?assertNot( + emqx_node_rebalance_cli(DonorNode, ["start"]) + ), + + %% stop + true = emqx_node_rebalance_cli(DonorNode, ["stop"]), + + false = emqx_node_rebalance_cli(DonorNode, ["stop"]), + + ?assertEqual( + disabled, + rpc:call(DonorNode, emqx_node_rebalance, status, []) + ), + + ok = stop_many(Conns). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +emqx_node_rebalance_cli(Node, Args) -> + case rpc:call(Node, emqx_node_rebalance_cli, cli, [Args]) of + {badrpc, Reason} -> + error(Reason); + Result -> + Result + end. diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl new file mode 100644 index 000000000..5d774ba7c --- /dev/null +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl @@ -0,0 +1,270 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_evacuation_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("emqx/include/emqx_mqtt.hrl"). +-include_lib("emqx/include/asserts.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import( + emqx_eviction_agent_test_helpers, + [emqtt_connect/1, emqtt_try_connect/1, case_specific_node_name/3] +). + +all() -> [{group, one_node}, {group, two_node}]. + +groups() -> + [ + {one_node, [], one_node_cases()}, + {two_node, [], two_node_cases()} + ]. + +two_node_cases() -> + [ + t_conn_evicted, + t_migrate_to, + t_session_evicted + ]. + +one_node_cases() -> + emqx_common_test_helpers:all(?MODULE) -- two_node_cases(). + +init_per_suite(Config) -> + ok = emqx_common_test_helpers:start_apps([]), + Config. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([]), + ok. + +init_per_group(one_node, Config) -> + [{cluster_type, one_node} | Config]; +init_per_group(two_node, Config) -> + [{cluster_type, two_node} | Config]. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(Case, Config) -> + NodesWithPorts = + case ?config(cluster_type, Config) of + one_node -> + [{case_specific_node_name(?MODULE, Case, '_evacuated'), 2883}]; + two_node -> + [ + {case_specific_node_name(?MODULE, Case, '_evacuated'), 2883}, + {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + ] + end, + ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + NodesWithPorts, + [emqx_eviction_agent, emqx_node_rebalance], + [{emqx, data_dir, case_specific_data_dir(Case, Config)}] + ), + ok = snabbkaffe:start_trace(), + [{cluster_nodes, ClusterNodes} | Config]. + +end_per_testcase(_Case, Config) -> + ok = snabbkaffe:stop(), + ok = emqx_eviction_agent_test_helpers:stop_cluster( + ?config(cluster_nodes, Config), + [emqx_eviction_agent, emqx_node_rebalance] + ). + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +%% One node tests + +t_agent_busy(Config) -> + [{DonorNode, _DonorPort}] = ?config(cluster_nodes, Config), + ok = rpc:call(DonorNode, emqx_eviction_agent, enable, [other_rebalance, undefined]), + + ?assertEqual( + {error, eviction_agent_busy}, + rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]) + ). + +t_already_started(Config) -> + [{DonorNode, _DonorPort}] = ?config(cluster_nodes, Config), + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + + ?assertEqual( + {error, already_started}, + rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]) + ). + +t_not_started(Config) -> + [{DonorNode, _DonorPort}] = ?config(cluster_nodes, Config), + + ?assertEqual( + {error, not_started}, + rpc:call(DonorNode, emqx_node_rebalance_evacuation, stop, []) + ). + +t_start(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}] = ?config(cluster_nodes, Config), + + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_try_connect([{port, DonorPort}]) + ). + +t_persistence(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}] = ?config(cluster_nodes, Config), + + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_try_connect([{port, DonorPort}]) + ), + + ok = rpc:call(DonorNode, supervisor, terminate_child, [ + emqx_node_rebalance_sup, emqx_node_rebalance_evacuation + ]), + {ok, _} = rpc:call(DonorNode, supervisor, restart_child, [ + emqx_node_rebalance_sup, emqx_node_rebalance_evacuation + ]), + + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_try_connect([{port, DonorPort}]) + ), + ?assertMatch( + {enabled, #{conn_evict_rate := 10}}, + rpc:call(DonorNode, emqx_node_rebalance_evacuation, status, []) + ). + +t_unknown_messages(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, _DonorPort}] = ?config(cluster_nodes, Config), + + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + + Pid = rpc:call(DonorNode, erlang, whereis, [emqx_node_rebalance_evacuation]), + + Pid ! unknown, + + ok = gen_server:cast(Pid, unknown), + + ?assertEqual( + ignored, + gen_server:call(Pid, unknown) + ). + +%% Two node tests + +t_conn_evicted(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, _] = ?config(cluster_nodes, Config), + + {ok, C} = emqtt_connect([{clientid, <<"evacuated">>}, {port, DonorPort}]), + + ?assertWaitEvent( + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + #{?snk_kind := node_evacuation_evict_conn}, + 1000 + ), + + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_try_connect([{clientid, <<"connecting">>}, {port, DonorPort}]) + ), + + receive + {'EXIT', C, {disconnected, 156, _}} -> ok + after 1000 -> + ct:fail("Connection not evicted") + end. + +t_migrate_to(Config) -> + [{DonorNode, _DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + ?assertEqual( + [RecipientNode], + rpc:call(DonorNode, emqx_node_rebalance_evacuation, migrate_to, [undefined]) + ), + + ?assertEqual( + [], + rpc:call(DonorNode, emqx_node_rebalance_evacuation, migrate_to, [['unknown@node']]) + ), + + ok = rpc:call(RecipientNode, emqx_eviction_agent, enable, [test_rebalance, undefined]), + + ?assertEqual( + [], + rpc:call(DonorNode, emqx_node_rebalance_evacuation, migrate_to, [undefined]) + ). + +t_session_evicted(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + {ok, C} = emqtt_connect([ + {port, DonorPort}, {clientid, <<"client_with_sess">>}, {clean_start, false} + ]), + + ?assertWaitEvent( + ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + #{?snk_kind := node_evacuation_evict_sess_over}, + 5000 + ), + + receive + {'EXIT', C, {disconnected, ?RC_USE_ANOTHER_SERVER, _}} -> ok + after 1000 -> + ct:fail("Connection not evicted") + end, + + [ChannelPid] = rpc:call(DonorNode, emqx_cm_registry, lookup_channels, [<<"client_with_sess">>]), + + ?assertEqual( + RecipientNode, + node(ChannelPid) + ). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +opts(Config) -> + #{ + server_reference => <<"srv">>, + conn_evict_rate => 10, + sess_evict_rate => 10, + wait_takeover => 1, + migrate_to => migrate_to(Config) + }. + +migrate_to(Config) -> + case ?config(cluster_type, Config) of + one_node -> + []; + two_node -> + [_, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + [RecipientNode] + end. + +case_specific_data_dir(Case, Config) -> + case ?config(priv_dir, Config) of + undefined -> undefined; + PrivDir -> filename:join(PrivDir, atom_to_list(Case)) + end. diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_persist_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_persist_SUITE.erl new file mode 100644 index 000000000..450280cb8 --- /dev/null +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_persist_SUITE.erl @@ -0,0 +1,108 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_evacuation_persist_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_testcase(_Case, Config) -> + _ = emqx_node_rebalance_evacuation_persist:clear(), + Config. + +end_per_testcase(_Case, _Config) -> + _ = emqx_node_rebalance_evacuation_persist:clear(). + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +t_save_read(_Config) -> + DefaultOpts = #{ + server_reference => <<"default_ref">>, + conn_evict_rate => 2001, + sess_evict_rate => 2002, + wait_takeover => 2003 + }, + + Opts0 = #{ + server_reference => <<"ref">>, + conn_evict_rate => 1001, + sess_evict_rate => 1002, + wait_takeover => 1003 + }, + ok = emqx_node_rebalance_evacuation_persist:save(Opts0), + + {ok, ReadOpts0} = emqx_node_rebalance_evacuation_persist:read(DefaultOpts), + ?assertEqual(Opts0, ReadOpts0), + + Opts1 = Opts0#{server_reference => undefined}, + ok = emqx_node_rebalance_evacuation_persist:save(Opts1), + + {ok, ReadOpts1} = emqx_node_rebalance_evacuation_persist:read(DefaultOpts), + ?assertEqual(Opts1, ReadOpts1). + +t_read_default(_Config) -> + ok = write_evacuation_file(<<"{}">>), + + DefaultOpts = #{ + server_reference => <<"ref">>, + conn_evict_rate => 1001, + sess_evict_rate => 1002, + wait_takeover => 1003 + }, + + {ok, ReadOpts} = emqx_node_rebalance_evacuation_persist:read(DefaultOpts), + ?assertEqual(DefaultOpts, ReadOpts). + +t_read_bad_data(_Config) -> + ok = write_evacuation_file(<<"{bad json">>), + + DefaultOpts = #{ + server_reference => <<"ref">>, + conn_evict_rate => 1001, + sess_evict_rate => 1002, + wait_takeover => 1003 + }, + + {ok, ReadOpts} = emqx_node_rebalance_evacuation_persist:read(DefaultOpts), + ?assertEqual(DefaultOpts, ReadOpts). + +t_clear(_Config) -> + ok = write_evacuation_file(<<"{}">>), + + ?assertMatch( + {ok, _}, + emqx_node_rebalance_evacuation_persist:read(#{}) + ), + + ok = emqx_node_rebalance_evacuation_persist:clear(), + + ?assertEqual( + none, + emqx_node_rebalance_evacuation_persist:read(#{}) + ). + +%%-------------------------------------------------------------------- +%% Helpers +%%-------------------------------------------------------------------- + +write_evacuation_file(Json) -> + ok = filelib:ensure_dir(emqx_node_rebalance_evacuation_persist:evacuation_filepath()), + ok = file:write_file( + emqx_node_rebalance_evacuation_persist:evacuation_filepath(), + Json + ). diff --git a/apps/emqx_oracle/BSL.txt b/apps/emqx_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_oracle/README.md b/apps/emqx_oracle/README.md new file mode 100644 index 000000000..873d52259 --- /dev/null +++ b/apps/emqx_oracle/README.md @@ -0,0 +1,14 @@ +# Oracle Database Connector + +This application houses the Oracle Database connector for EMQX Enterprise Edition. +It provides the APIs to connect to Oracle Database. + +So far it is only used to insert messages as data bridge. + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_oracle/rebar.config b/apps/emqx_oracle/rebar.config new file mode 100644 index 000000000..14461ba34 --- /dev/null +++ b/apps/emqx_oracle/rebar.config @@ -0,0 +1,7 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {jamdb_oracle, {git, "https://github.com/emqx/jamdb_oracle", {tag, "0.4.9.4"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + ]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.app.src b/apps/emqx_oracle/src/emqx_oracle.app.src new file mode 100644 index 000000000..fa48e8479 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_oracle, [ + {description, "EMQX Enterprise Oracle Database Connector"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + jamdb_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.erl b/apps/emqx_oracle/src/emqx_oracle.erl new file mode 100644 index 000000000..a0d7169f3 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.erl @@ -0,0 +1,367 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle). + +-behaviour(emqx_resource). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(ORACLE_DEFAULT_PORT, 1521). + +%%==================================================================== +%% Exports +%%==================================================================== + +%% callbacks for behaviour emqx_resource +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +%% callbacks for ecpool +-export([connect/1, prepare_sql_to_conn/2]). + +%% Internal exports used to execute code with ecpool worker +-export([ + query/3, + execute_batch/3, + do_get_status/1 +]). + +-export([ + oracle_host_options/0 +]). + +-define(ACTION_SEND_MESSAGE, send_message). + +-define(SYNC_QUERY_MODE, no_handover). + +-define(ORACLE_HOST_OPTIONS, #{ + default_port => ?ORACLE_DEFAULT_PORT +}). + +-define(MAX_CURSORS, 10). +-define(DEFAULT_POOL_SIZE, 8). +-define(OPT_TIMEOUT, 30000). + +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. + +-type state() :: + #{ + pool_name := binary(), + prepare_sql := prepares(), + params_tokens := params_tokens(), + batch_params_tokens := params_tokens() + }. + +% As ecpool is not monitoring the worker's PID when doing a handover_async, the +% request can be lost if worker crashes. Thus, it's better to force requests to +% be sync for now. +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. +on_start( + InstId, + #{ + server := Server, + database := DB, + sid := Sid, + username := User + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_oracle_connector", + connector => InstId, + config => emqx_utils:redact(Config) + }), + ?tp(oracle_bridge_started, #{instance_id => InstId, config => Config}), + {ok, _} = application:ensure_all_started(ecpool), + {ok, _} = application:ensure_all_started(jamdb_oracle), + jamdb_oracle_conn:set_max_cursors_number(?MAX_CURSORS), + + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, oracle_host_options()), + ServiceName = maps:get(<<"service_name">>, Config, Sid), + Options = [ + {host, Host}, + {port, Port}, + {user, emqx_plugin_libs_rule:str(User)}, + {password, emqx_secret:wrap(maps:get(password, Config, ""))}, + {sid, emqx_plugin_libs_rule:str(Sid)}, + {service_name, emqx_plugin_libs_rule:str(ServiceName)}, + {database, DB}, + {pool_size, maps:get(<<"pool_size">>, Config, ?DEFAULT_POOL_SIZE)}, + {timeout, ?OPT_TIMEOUT}, + {app_name, "EMQX Data To Oracle Database Action"} + ], + PoolName = InstId, + Prepares = parse_prepare_sql(Config), + InitState = #{pool_name => PoolName, prepare_statement => #{}}, + State = maps:merge(InitState, Prepares), + case emqx_resource_pool:start(InstId, ?MODULE, Options) of + ok -> + {ok, init_prepare(State)}; + {error, Reason} -> + ?tp( + oracle_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(InstId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_oracle_connector", + connector => InstId + }), + ?tp(oracle_bridge_stopped, #{instance_id => InstId}), + emqx_resource_pool:stop(PoolName). + +on_query(InstId, {TypeOrKey, NameOrSQL}, #{pool_name := _PoolName} = State) -> + on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); +on_query( + InstId, + {TypeOrKey, NameOrSQL, Params}, + #{pool_name := PoolName} = State +) -> + ?SLOG(debug, #{ + msg => "oracle database connector received sql query", + connector => InstId, + type => TypeOrKey, + sql => NameOrSQL, + state => State + }), + Type = query, + {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), + Res = on_sql_query(InstId, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data), + handle_result(Res). + +on_batch_query( + InstId, + BatchReq, + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State +) -> + case BatchReq of + [{Key, _} = Request | _] -> + BinKey = to_bin(Key), + case maps:get(BinKey, Tokens, undefined) of + undefined -> + Log = #{ + connector => InstId, + first_request => Request, + state => State, + msg => "batch prepare not implemented" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + TokenList -> + {_, Datas} = lists:unzip(BatchReq), + Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], + St = maps:get(BinKey, Sts), + case + on_sql_query(InstId, PoolName, execute_batch, ?SYNC_QUERY_MODE, St, Datas2) + of + {ok, Results} -> + handle_batch_result(Results, 0); + Result -> + Result + end + end; + _ -> + Log = #{ + connector => InstId, + request => BatchReq, + state => State, + msg => "invalid request" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, invalid_request}} + end. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{ + params_tokens := ParamsTokens, prepare_sql := PrepareSql +}) -> + Key = to_bin(TypeOrKey), + case maps:get(Key, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + case maps:get(Key, PrepareSql, undefined) of + undefined -> + {SQLOrData, Params}; + Sql -> + {Sql, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end + end. + +on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) -> + case ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, ApplyMode) of + {error, Reason} = Result -> + ?tp( + oracle_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "oracle database connector do sql query failed", + connector => InstId, + type => Type, + sql => NameOrSQL, + reason => Reason + }), + Result; + Result -> + ?tp( + oracle_connector_query_return, + #{result => Result} + ), + Result + end. + +on_get_status(_InstId, #{pool_name := Pool} = State) -> + case emqx_resource_pool:health_check_workers(Pool, fun ?MODULE:do_get_status/1) of + true -> + case do_check_prepares(State) of + ok -> + connected; + {ok, NState} -> + %% return new state with prepared statements + {connected, NState} + end; + false -> + disconnected + end. + +do_get_status(Conn) -> + ok == element(1, jamdb_oracle:sql_query(Conn, "select 1 from dual")). + +do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> + ok; +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + {ok, State#{prepare_sql => Prepares, prepare_statement := Sts}}. + +%% =================================================================== + +oracle_host_options() -> + ?ORACLE_HOST_OPTIONS. + +connect(Opts) -> + Password = emqx_secret:unwrap(proplists:get_value(password, Opts)), + NewOpts = lists:keyreplace(password, 1, Opts, {password, Password}), + jamdb_oracle:start_link(NewOpts). + +sql_query_to_str(SqlQuery) -> + emqx_plugin_libs_rule:str(SqlQuery). + +sql_params_to_str(Params) when is_list(Params) -> + lists:map( + fun + (false) -> "0"; + (true) -> "1"; + (Value) -> emqx_plugin_libs_rule:str(Value) + end, + Params + ). + +query(Conn, SQL, Params) -> + Ret = jamdb_oracle:sql_query(Conn, {sql_query_to_str(SQL), sql_params_to_str(Params)}), + ?tp(oracle_query, #{conn => Conn, sql => SQL, params => Params, result => Ret}), + handle_result(Ret). + +execute_batch(Conn, SQL, ParamsList) -> + ParamsListStr = lists:map(fun sql_params_to_str/1, ParamsList), + Ret = jamdb_oracle:sql_query(Conn, {batch, sql_query_to_str(SQL), ParamsListStr}), + ?tp(oracle_batch_query, #{conn => Conn, sql => SQL, params => ParamsList, result => Ret}), + handle_result(Ret). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{<<"send_message">> => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_prepare_sql([{Key, H} | T], Prepares, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H, ':n'), + parse_prepare_sql( + T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} + ); +parse_prepare_sql([], Prepares, Tokens) -> + #{ + prepare_sql => Prepares, + params_tokens => Tokens + }. + +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + State#{prepare_statement := Sts}. + +prepare_sql(Prepares, PoolName) when is_map(Prepares) -> + prepare_sql(maps:to_list(Prepares), PoolName); +prepare_sql(Prepares, PoolName) -> + Data = do_prepare_sql(Prepares, PoolName), + {ok, _Sts} = Data, + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_sql_to_conn, [Prepares]}), + Data. + +do_prepare_sql(Prepares, PoolName) -> + do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + +do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> + {ok, Conn} = ecpool_worker:client(Worker), + {ok, Sts} = prepare_sql_to_conn(Conn, Prepares), + do_prepare_sql(T, Prepares, PoolName, Sts); +do_prepare_sql([], _Prepares, _PoolName, LastSts) -> + {ok, LastSts}. + +prepare_sql_to_conn(Conn, Prepares) -> + prepare_sql_to_conn(Conn, Prepares, #{}). + +prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; +prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> + LogMeta = #{msg => "Oracle Database Prepare Statement", name => Key, prepare_sql => SQL}, + ?SLOG(info, LogMeta), + prepare_sql_to_conn(Conn, PrepareList, Statements#{Key => SQL}). + +to_bin(Bin) when is_binary(Bin) -> + Bin; +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom). + +handle_result({error, disconnected}) -> + {error, {recoverable_error, disconnected}}; +handle_result({error, Error}) -> + {error, {unrecoverable_error, Error}}; +handle_result({error, socket, closed} = Error) -> + {error, {recoverable_error, Error}}; +handle_result({error, Type, Reason}) -> + {error, {unrecoverable_error, {Type, Reason}}}; +handle_result(Res) -> + Res. + +handle_batch_result([{affected_rows, RowCount} | Rest], Acc) -> + handle_batch_result(Rest, Acc + RowCount); +handle_batch_result([{proc_result, RetCode, _Rows} | Rest], Acc) when RetCode =:= 0 -> + handle_batch_result(Rest, Acc); +handle_batch_result([{proc_result, RetCode, Reason} | _Rest], _Acc) -> + {error, {unrecoverable_error, {RetCode, Reason}}}; +handle_batch_result([], Acc) -> + {ok, Acc}. diff --git a/apps/emqx_oracle/src/emqx_oracle_schema.erl b/apps/emqx_oracle/src/emqx_oracle_schema.erl new file mode 100644 index 000000000..cfa74054a --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle_schema.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-define(REF_MODULE, emqx_oracle). + +%% Hocon config schema exports +-export([ + roots/0, + fields/1 +]). + +roots() -> + [{config, #{type => hoconsc:ref(?REF_MODULE, config)}}]. + +fields(config) -> + [{server, server()}, {sid, fun sid/1}] ++ + emqx_connector_schema_lib:relational_db_fields() ++ + emqx_connector_schema_lib:prepare_statement_fields(). + +server() -> + Meta = #{desc => ?DESC(?REF_MODULE, "server")}, + emqx_schema:servers_sc(Meta, (?REF_MODULE):oracle_host_options()). + +sid(type) -> binary(); +sid(desc) -> ?DESC(?REF_MODULE, "sid"); +sid(required) -> true; +sid(_) -> undefined. diff --git a/apps/emqx_plugin_libs/rebar.config b/apps/emqx_plugin_libs/rebar.config index 9f17b7657..dee2902a5 100644 --- a/apps/emqx_plugin_libs/rebar.config +++ b/apps/emqx_plugin_libs/rebar.config @@ -1,5 +1,8 @@ %% -*- mode: erlang -*- -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_plugin_libs/src/emqx_placeholder.erl b/apps/emqx_plugin_libs/src/emqx_placeholder.erl index 3b15a389d..dcd666f5b 100644 --- a/apps/emqx_plugin_libs/src/emqx_placeholder.erl +++ b/apps/emqx_plugin_libs/src/emqx_placeholder.erl @@ -30,6 +30,7 @@ proc_sql/2, proc_sql_param_str/2, proc_cql_param_str/2, + proc_param_str/3, preproc_tmpl_deep/1, preproc_tmpl_deep/2, proc_tmpl_deep/2, @@ -39,6 +40,14 @@ sql_data/1 ]). +-export([ + quote_sql/1, + quote_cql/1, + quote_mysql/1 +]). + +-include_lib("emqx/include/emqx_placeholder.hrl"). + -define(EX_PLACE_HOLDER, "(\\$\\{[a-zA-Z0-9\\._]+\\})"). -define(EX_PLACE_HOLDER_DOUBLE_QUOTE, "(\\$\\{[a-zA-Z0-9\\._]+\\}|\"\\$\\{[a-zA-Z0-9\\._]+\\}\")"). @@ -60,7 +69,7 @@ -type preproc_sql_opts() :: #{ placeholders => list(binary()), - replace_with => '?' | '$n', + replace_with => '?' | '$n' | ':n', strip_double_quote => boolean() }. @@ -81,6 +90,8 @@ | {tmpl, tmpl_token()} | {value, term()}. +-dialyzer({no_improper_lists, [quote_mysql/1, escape_mysql/4, escape_prepend/4]}). + %%------------------------------------------------------------------------------ %% APIs %%------------------------------------------------------------------------------ @@ -138,7 +149,7 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> preproc_sql(Sql, '?'). --spec preproc_sql(binary(), '?' | '$n' | preproc_sql_opts()) -> +-spec preproc_sql(binary(), '?' | '$n' | ':n' | preproc_sql_opts()) -> {prepare_statement_key(), tmpl_token()}. preproc_sql(Sql, ReplaceWith) when is_atom(ReplaceWith) -> preproc_sql(Sql, #{replace_with => ReplaceWith}); @@ -160,12 +171,22 @@ proc_sql(Tokens, Data) -> -spec proc_sql_param_str(tmpl_token(), map()) -> binary(). proc_sql_param_str(Tokens, Data) -> + % NOTE + % This is a bit misleading: currently, escaping logic in `quote_sql/1` likely + % won't work with pgsql since it does not support C-style escapes by default. + % https://www.postgresql.org/docs/14/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS proc_param_str(Tokens, Data, fun quote_sql/1). -spec proc_cql_param_str(tmpl_token(), map()) -> binary(). proc_cql_param_str(Tokens, Data) -> proc_param_str(Tokens, Data, fun quote_cql/1). +-spec proc_param_str(tmpl_token(), map(), fun((_Value) -> iodata())) -> binary(). +proc_param_str(Tokens, Data, Quote) -> + iolist_to_binary( + proc_tmpl(Tokens, Data, #{return => rawlist, var_trans => Quote}) + ). + -spec preproc_tmpl_deep(term()) -> deep_template(). preproc_tmpl_deep(Data) -> preproc_tmpl_deep(Data, #{process_keys => true}). @@ -219,23 +240,34 @@ sql_data(Bin) when is_binary(Bin) -> Bin; sql_data(Num) when is_number(Num) -> Num; sql_data(Bool) when is_boolean(Bool) -> Bool; sql_data(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8); -sql_data(Map) when is_map(Map) -> emqx_json:encode(Map). +sql_data(Map) when is_map(Map) -> emqx_utils_json:encode(Map). -spec bin(term()) -> binary(). bin(Val) -> emqx_plugin_libs_rule:bin(Val). +-spec quote_sql(_Value) -> iolist(). +quote_sql(Str) -> + quote_escape(Str, fun escape_sql/1). + +-spec quote_cql(_Value) -> iolist(). +quote_cql(Str) -> + quote_escape(Str, fun escape_cql/1). + +-spec quote_mysql(_Value) -> iolist(). +quote_mysql(Str) when is_binary(Str) -> + try + escape_mysql(Str) + catch + throw:invalid_utf8 -> + [<<"0x">> | binary:encode_hex(Str)] + end; +quote_mysql(Str) -> + quote_escape(Str, fun escape_mysql/1). + %%------------------------------------------------------------------------------ %% Internal functions %%------------------------------------------------------------------------------ -proc_param_str(Tokens, Data, Quote) -> - iolist_to_binary( - proc_tmpl(Tokens, Data, #{return => rawlist, var_trans => Quote}) - ). - -%% backward compatibility for hot upgrading from =< e4.2.1 -get_phld_var(Fun, Data) when is_function(Fun) -> - Fun(Data); get_phld_var(Phld, Data) -> emqx_rule_maps:nested_get(Phld, Data). @@ -284,13 +316,17 @@ preproc_tmpl_deep_map_key(Key, _) -> replace_with(Tmpl, RE, '?') -> re:replace(Tmpl, RE, "?", [{return, binary}, global]); replace_with(Tmpl, RE, '$n') -> + replace_with(Tmpl, RE, <<"$">>); +replace_with(Tmpl, RE, ':n') -> + replace_with(Tmpl, RE, <<":">>); +replace_with(Tmpl, RE, String) when is_binary(String) -> Parts = re:split(Tmpl, RE, [{return, binary}, trim, group]), {Res, _} = lists:foldl( fun ([Tkn, _Phld], {Acc, Seq}) -> Seq1 = erlang:integer_to_binary(Seq), - {<>, Seq + 1}; + {<>, Seq + 1}; ([Tkn], {Acc, Seq}) -> {<>, Seq} end, @@ -299,8 +335,12 @@ replace_with(Tmpl, RE, '$n') -> ), Res. +parse_nested(<<".", R/binary>>) -> + %% ignore the root . + parse_nested(R); parse_nested(Attr) -> case string:split(Attr, <<".">>, all) of + [<<>>] -> {var, ?PH_VAR_THIS}; [Attr] -> {var, Attr}; Nested -> {path, [{key, P} || P <- Nested]} end. @@ -310,21 +350,56 @@ unwrap(<<"\"${", Val/binary>>, _StripDoubleQuote = true) -> unwrap(<<"${", Val/binary>>, _StripDoubleQuote) -> binary:part(Val, {0, byte_size(Val) - 1}). -quote_sql(Str) -> - quote(Str, <<"\\\\'">>). - -quote_cql(Str) -> - quote(Str, <<"''">>). - -quote(Str, ReplaceWith) when - is_list(Str); - is_binary(Str); - is_atom(Str); - is_map(Str) --> - [$', escape_apo(bin(Str), ReplaceWith), $']; -quote(Val, _) -> +-spec quote_escape(_Value, fun((binary()) -> iodata())) -> iodata(). +quote_escape(Str, EscapeFun) when is_binary(Str) -> + EscapeFun(Str); +quote_escape(Str, EscapeFun) when is_list(Str) -> + case unicode:characters_to_binary(Str) of + Bin when is_binary(Bin) -> + EscapeFun(Bin); + Otherwise -> + error(Otherwise) + end; +quote_escape(Str, EscapeFun) when is_atom(Str) orelse is_map(Str) -> + EscapeFun(bin(Str)); +quote_escape(Val, _EscapeFun) -> bin(Val). -escape_apo(Str, ReplaceWith) -> - re:replace(Str, <<"'">>, ReplaceWith, [{return, binary}, global]). +-spec escape_sql(binary()) -> iolist(). +escape_sql(S) -> + ES = binary:replace(S, [<<"\\">>, <<"'">>], <<"\\">>, [global, {insert_replaced, 1}]), + [$', ES, $']. + +-spec escape_cql(binary()) -> iolist(). +escape_cql(S) -> + ES = binary:replace(S, <<"'">>, <<"'">>, [global, {insert_replaced, 1}]), + [$', ES, $']. + +-spec escape_mysql(binary()) -> iolist(). +escape_mysql(S0) -> + % https://dev.mysql.com/doc/refman/8.0/en/string-literals.html + [$', escape_mysql(S0, 0, 0, S0), $']. + +%% NOTE +%% This thing looks more complicated than needed because it's optimized for as few +%% intermediate memory (re)allocations as possible. +escape_mysql(<<$', Rest/binary>>, I, Run, Src) -> + escape_prepend(I, Run, Src, [<<"\\'">> | escape_mysql(Rest, I + Run + 1, 0, Src)]); +escape_mysql(<<$\\, Rest/binary>>, I, Run, Src) -> + escape_prepend(I, Run, Src, [<<"\\\\">> | escape_mysql(Rest, I + Run + 1, 0, Src)]); +escape_mysql(<<0, Rest/binary>>, I, Run, Src) -> + escape_prepend(I, Run, Src, [<<"\\0">> | escape_mysql(Rest, I + Run + 1, 0, Src)]); +escape_mysql(<<_/utf8, Rest/binary>> = S, I, Run, Src) -> + CWidth = byte_size(S) - byte_size(Rest), + escape_mysql(Rest, I, Run + CWidth, Src); +escape_mysql(<<>>, 0, _, Src) -> + Src; +escape_mysql(<<>>, I, Run, Src) -> + binary:part(Src, I, Run); +escape_mysql(_, _I, _Run, _Src) -> + throw(invalid_utf8). + +escape_prepend(_RunI, 0, _Src, Tail) -> + Tail; +escape_prepend(I, Run, Src, Tail) -> + [binary:part(Src, I, Run) | Tail]. diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src index bcdcfe420..bfd7e68fa 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugin_libs, [ {description, "EMQX Plugin utility libs"}, - {vsn, "4.3.4"}, + {vsn, "4.3.10"}, {modules, []}, {applications, [kernel, stdlib]}, {env, []} diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl index 57bdd16e5..9a4c01a2b 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl @@ -31,7 +31,8 @@ proc_sql_param_str/2, proc_cql_param_str/2, split_insert_sql/1, - detect_sql_type/1 + detect_sql_type/1, + proc_batch_sql/3 ]). %% type converting @@ -63,13 +64,10 @@ can_topic_match_oneof/2 ]). +-export_type([tmpl_token/0]). + -compile({no_auto_import, [float/1]}). --define(EX_PLACE_HOLDER, "(\\$\\{[a-zA-Z0-9\\._]+\\})"). - -%% Space and CRLF --define(EX_WITHE_CHARS, "\\s"). - -type uri_string() :: iodata(). -type tmpl_token() :: list({var, binary()} | {str, binary()}). @@ -107,9 +105,8 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> emqx_placeholder:preproc_sql(Sql). --spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n') -> +-spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n' | ':n') -> {prepare_statement_key(), tmpl_token()}. - preproc_sql(Sql, ReplaceWith) -> emqx_placeholder:preproc_sql(Sql, ReplaceWith). @@ -162,6 +159,20 @@ detect_sql_type(SQL) -> {error, invalid_sql} end. +-spec proc_batch_sql( + BatchReqs :: list({atom(), map()}), + InsertPart :: binary(), + Tokens :: tmpl_token() +) -> InsertSQL :: binary(). +proc_batch_sql(BatchReqs, InsertPart, Tokens) -> + ValuesPart = erlang:iolist_to_binary( + lists:join($,, [ + proc_sql_param_str(Tokens, Msg) + || {_, Msg} <- BatchReqs + ]) + ), + <>. + unsafe_atom_key(Key) when is_atom(Key) -> Key; unsafe_atom_key(Key) when is_binary(Key) -> @@ -213,7 +224,7 @@ tcp_connectivity(Host, Port) -> ) -> ok | {error, Reason :: term()}. tcp_connectivity(Host, Port, Timeout) -> - case gen_tcp:connect(Host, Port, emqx_misc:ipv6_probe([]), Timeout) of + case gen_tcp:connect(Host, Port, emqx_utils:ipv6_probe([]), Timeout) of {ok, Sock} -> gen_tcp:close(Sock), ok; @@ -224,11 +235,11 @@ tcp_connectivity(Host, Port, Timeout) -> str(Bin) when is_binary(Bin) -> binary_to_list(Bin); str(Num) when is_number(Num) -> number_to_list(Num); str(Atom) when is_atom(Atom) -> atom_to_list(Atom); -str(Map) when is_map(Map) -> binary_to_list(emqx_json:encode(Map)); +str(Map) when is_map(Map) -> binary_to_list(emqx_utils_json:encode(Map)); str(List) when is_list(List) -> case io_lib:printable_list(List) of true -> List; - false -> binary_to_list(emqx_json:encode(List)) + false -> binary_to_list(emqx_utils_json:encode(List)) end; str(Data) -> error({invalid_str, Data}). @@ -246,11 +257,11 @@ utf8_str(Str) -> bin(Bin) when is_binary(Bin) -> Bin; bin(Num) when is_number(Num) -> number_to_binary(Num); bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8); -bin(Map) when is_map(Map) -> emqx_json:encode(Map); +bin(Map) when is_map(Map) -> emqx_utils_json:encode(Map); bin(List) when is_list(List) -> case io_lib:printable_list(List) of true -> list_to_binary(List); - false -> emqx_json:encode(List) + false -> emqx_utils_json:encode(List) end; bin(Data) -> error({invalid_bin, Data}). @@ -300,7 +311,7 @@ float2str(Float, Precision) when is_float(Float) and is_integer(Precision) -> float_to_binary(Float, [{decimals, Precision}, compact]). map(Bin) when is_binary(Bin) -> - case emqx_json:decode(Bin, [return_maps]) of + case emqx_utils_json:decode(Bin, [return_maps]) of Map = #{} -> Map; _ -> error({invalid_map, Bin}) end; diff --git a/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl b/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl index 6baaaefc6..fc431e80c 100644 --- a/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl +++ b/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl @@ -105,19 +105,27 @@ t_preproc_sql3(_) -> emqx_placeholder:proc_sql_param_str(ParamsTokens, Selected) ). -t_preproc_sql4(_) -> +t_preproc_mysql1(_) -> %% with apostrophes %% https://github.com/emqx/emqx/issues/4135 Selected = #{ a => <<"1''2">>, b => 1, c => 1.0, - d => #{d1 => <<"someone's phone">>} + d => #{d1 => <<"someone's phone">>}, + e => <<$\\, 0, "💩"/utf8>>, + f => <<"non-utf8", 16#DCC900:24>>, + g => "utf8's cool 🐸" }, - ParamsTokens = emqx_placeholder:preproc_tmpl(<<"a:${a},b:${b},c:${c},d:${d}">>), + ParamsTokens = emqx_placeholder:preproc_tmpl( + <<"a:${a},b:${b},c:${c},d:${d},e:${e},f:${f},g:${g}">> + ), ?assertEqual( - <<"a:'1\\'\\'2',b:1,c:1.0,d:'{\"d1\":\"someone\\'s phone\"}'">>, - emqx_placeholder:proc_sql_param_str(ParamsTokens, Selected) + << + "a:'1\\'\\'2',b:1,c:1.0,d:'{\"d1\":\"someone\\'s phone\"}'," + "e:'\\\\\\0💩',f:0x6E6F6E2D75746638DCC900,g:'utf8\\'s cool 🐸'"/utf8 + >>, + emqx_placeholder:proc_param_str(ParamsTokens, Selected, fun emqx_placeholder:quote_mysql/1) ). t_preproc_sql5(_) -> diff --git a/apps/emqx_plugins/README.md b/apps/emqx_plugins/README.md new file mode 100644 index 000000000..9c8faccd1 --- /dev/null +++ b/apps/emqx_plugins/README.md @@ -0,0 +1,12 @@ +# Plugins Management + +This application provides the feature for users to upload and install custom, Erlang-based plugins. + +More introduction about [Plugins](https://www.emqx.io/docs/en/v5.0/extensions/plugins.html#develop-emqx-plugins) + +See HTTP API to learn how to [Install/Uninstall a Plugin](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Plugins) + +## Plugin Template + +We provide a [plugin template](https://github.com/emqx/emqx-plugin-template) that +you can use to learn how to write and package custom plugins. diff --git a/apps/emqx_plugins/i18n/emqx_plugins_schema.conf b/apps/emqx_plugins/i18n/emqx_plugins_schema.conf deleted file mode 100644 index 454d36f6f..000000000 --- a/apps/emqx_plugins/i18n/emqx_plugins_schema.conf +++ /dev/null @@ -1,93 +0,0 @@ -emqx_plugins_schema { - plugins { - desc { - en: """ -Manage EMQX plugins.
-Plugins can be pre-built as a part of EMQX package, -or installed as a standalone package in a location specified by -install_dir config key
-The standalone-installed plugins are referred to as 'external' plugins. -""" - zh: """管理EMQX插件。
-插件可以是EMQX安装包中的一部分,也可以是一个独立的安装包。
-独立安装的插件称为“外部插件”。 - """ - } - label { - en: "Plugins" - zh: "插件" - } - } - state { - desc { - en: "A per-plugin config to describe the desired state of the plugin." - zh: "描述插件的状态" - } - label { - en: "State" - zh: "插件状态" - } - } - name_vsn { - desc { - en: """The {name}-{version} of the plugin.
-It should match the plugin application name-version as the for the plugin release package name
-For example: my_plugin-0.1.0. -""" - zh: """插件的名称{name}-{version}。
-它应该与插件的发布包名称一致,如my_plugin-0.1.0。""" - } - label { - en: "Name-Version" - zh: "名称-版本" - } - } - enable { - desc { - en: "Set to 'true' to enable this plugin" - zh: "设置为“true”以启用此插件" - } - label { - en: "Enable" - zh: "启用" - } - } - states { - desc { - en: """An array of plugins in the desired states.
-The plugins are started in the defined order""" - zh: """一组插件的状态。插件将按照定义的顺序启动""" - } - label { - en: "States" - zh: "插件启动顺序及状态" - } - } - install_dir { - desc { - en: """ -The installation directory for the external plugins. -The plugin beam files and configuration files should reside in -the subdirectory named as emqx_foo_bar-0.1.0. -
-NOTE: For security reasons, this directory should **NOT** be writable -by anyone except emqx (or any user which runs EMQX). -""" - zh: "插件安装包的目录,出于安全考虑,该目录应该值允许 emqx,或用于运行 EMQX 服务的用户拥有写入权限。" - } - label { - en: "Install Directory" - zh: "安装目录" - } - } - check_interval { - desc { - en: """Check interval: check if the status of the plugins in the cluster is consistent,
-if the results of 3 consecutive checks are not consistent, then alarm. -""" - zh: """检查间隔:检查集群中插件的状态是否一致,
-如果连续3次检查结果不一致,则报警。 -""" - } - } -} diff --git a/apps/emqx_plugins/src/emqx_plugins.app.src b/apps/emqx_plugins/src/emqx_plugins.app.src index 1635bb516..d5c16ea59 100644 --- a/apps/emqx_plugins/src/emqx_plugins.app.src +++ b/apps/emqx_plugins/src/emqx_plugins.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugins, [ {description, "EMQX Plugin Management"}, - {vsn, "0.1.0"}, + {vsn, "0.1.4"}, {modules, []}, {mod, {emqx_plugins_app, []}}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx_plugins/src/emqx_plugins.erl b/apps/emqx_plugins/src/emqx_plugins.erl index 613148a24..04faa44e9 100644 --- a/apps/emqx_plugins/src/emqx_plugins.erl +++ b/apps/emqx_plugins/src/emqx_plugins.erl @@ -16,8 +16,13 @@ -module(emqx_plugins). --include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include("emqx_plugins.hrl"). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. -export([ ensure_installed/1, @@ -42,7 +47,8 @@ -export([ get_config/2, - put_config/2 + put_config/2, + get_tar/1 ]). %% internal @@ -56,10 +62,6 @@ -compile(nowarn_export_all). -endif. --include_lib("emqx/include/emqx.hrl"). --include_lib("emqx/include/logger.hrl"). --include("emqx_plugins.hrl"). - %% "my_plugin-0.1.0" -type name_vsn() :: binary() | string(). %% the parse result of the JSON info file @@ -87,14 +89,15 @@ ensure_installed(NameVsn) -> do_ensure_installed(NameVsn) -> TarGz = pkg_file(NameVsn), - case erl_tar:extract(TarGz, [{cwd, install_dir()}, compressed]) of - ok -> + case erl_tar:extract(TarGz, [compressed, memory]) of + {ok, TarContent} -> + ok = write_tar_file_content(install_dir(), TarContent), case read_plugin(NameVsn, #{}) of {ok, _} -> ok; {error, Reason} -> ?SLOG(warning, Reason#{msg => "failed_to_read_after_install"}), - _ = ensure_uninstalled(NameVsn), + ok = delete_tar_file_content(install_dir(), TarContent), {error, Reason} end; {error, {_, enoent}} -> @@ -111,8 +114,95 @@ do_ensure_installed(NameVsn) -> }} end. -%% @doc Ensure files and directories for the given plugin are delete. -%% If a plugin is running, or enabled, error is returned. +-spec get_tar(name_vsn()) -> {ok, binary()} | {error, any}. +get_tar(NameVsn) -> + TarGz = pkg_file(NameVsn), + case file:read_file(TarGz) of + {ok, Content} -> + {ok, Content}; + {error, _} -> + case maybe_create_tar(NameVsn, TarGz, install_dir()) of + ok -> + file:read_file(TarGz); + Err -> + Err + end + end. + +maybe_create_tar(NameVsn, TarGzName, InstallDir) when is_binary(InstallDir) -> + maybe_create_tar(NameVsn, TarGzName, binary_to_list(InstallDir)); +maybe_create_tar(NameVsn, TarGzName, InstallDir) -> + case filelib:wildcard(filename:join(dir(NameVsn), "**")) of + [_ | _] = PluginFiles -> + InstallDir1 = string:trim(InstallDir, trailing, "/") ++ "/", + PluginFiles1 = [{string:prefix(F, InstallDir1), F} || F <- PluginFiles], + erl_tar:create(TarGzName, PluginFiles1, [compressed]); + _ -> + {error, plugin_not_found} + end. + +write_tar_file_content(BaseDir, TarContent) -> + lists:foreach( + fun({Name, Bin}) -> + Filename = filename:join(BaseDir, Name), + ok = filelib:ensure_dir(Filename), + ok = file:write_file(Filename, Bin) + end, + TarContent + ). + +delete_tar_file_content(BaseDir, TarContent) -> + lists:foreach( + fun({Name, _}) -> + Filename = filename:join(BaseDir, Name), + case filelib:is_file(Filename) of + true -> + TopDirOrFile = top_dir(BaseDir, Filename), + ok = file:del_dir_r(TopDirOrFile); + false -> + %% probably already deleted + ok + end + end, + TarContent + ). + +top_dir(BaseDir0, DirOrFile) -> + BaseDir = normalize_dir(BaseDir0), + case filename:dirname(DirOrFile) of + RockBottom when RockBottom =:= "/" orelse RockBottom =:= "." -> + throw({out_of_bounds, DirOrFile}); + BaseDir -> + DirOrFile; + Parent -> + top_dir(BaseDir, Parent) + end. + +normalize_dir(Dir) -> + %% Get rid of possible trailing slash + filename:join([Dir, ""]). + +-ifdef(TEST). +normalize_dir_test_() -> + [ + ?_assertEqual("foo", normalize_dir("foo")), + ?_assertEqual("foo", normalize_dir("foo/")), + ?_assertEqual("/foo", normalize_dir("/foo")), + ?_assertEqual("/foo", normalize_dir("/foo/")) + ]. + +top_dir_test_() -> + [ + ?_assertEqual("base/foo", top_dir("base", filename:join(["base", "foo", "bar"]))), + ?_assertEqual("/base/foo", top_dir("/base", filename:join(["/", "base", "foo", "bar"]))), + ?_assertEqual("/base/foo", top_dir("/base/", filename:join(["/", "base", "foo", "bar"]))), + ?_assertThrow({out_of_bounds, _}, top_dir("/base", filename:join(["/", "base"]))), + ?_assertThrow({out_of_bounds, _}, top_dir("/base", filename:join(["/", "foo", "bar"]))) + ]. +-endif. + +%% @doc Ensure files and directories for the given plugin are being deleted. +%% If a plugin is running, or enabled, an error is returned. -spec ensure_uninstalled(name_vsn()) -> ok | {error, any()}. ensure_uninstalled(NameVsn) -> case read_plugin(NameVsn, #{}) of @@ -331,6 +421,7 @@ do_ensure_started(NameVsn) -> tryit( "start_plugins", fun() -> + ok = ensure_exists_and_installed(NameVsn), Plugin = do_read_plugin(NameVsn), ok = load_code_start_apps(NameVsn, Plugin) end @@ -384,6 +475,53 @@ do_read_plugin({file, InfoFile}, Options) -> do_read_plugin(NameVsn, Options) -> do_read_plugin({file, info_file(NameVsn)}, Options). +ensure_exists_and_installed(NameVsn) -> + case filelib:is_dir(dir(NameVsn)) of + true -> + ok; + false -> + %% Do we have the package, but it's not extracted yet? + case get_tar(NameVsn) of + {ok, TarContent} -> + ok = file:write_file(pkg_file(NameVsn), TarContent), + ok = do_ensure_installed(NameVsn); + _ -> + %% If not, try to get it from the cluster. + do_get_from_cluster(NameVsn) + end + end. + +do_get_from_cluster(NameVsn) -> + Nodes = [N || N <- mria:running_nodes(), N /= node()], + case get_from_any_node(Nodes, NameVsn, []) of + {ok, TarContent} -> + ok = file:write_file(pkg_file(NameVsn), TarContent), + ok = do_ensure_installed(NameVsn); + {error, NodeErrors} when Nodes =/= [] -> + ?SLOG(error, #{ + msg => "failed_to_copy_plugin_from_other_nodes", + name_vsn => NameVsn, + node_errors => NodeErrors + }), + {error, plugin_not_found}; + {error, _} -> + ?SLOG(error, #{ + msg => "no_nodes_to_copy_plugin_from", + name_vsn => NameVsn + }), + {error, plugin_not_found} + end. + +get_from_any_node([], _NameVsn, Errors) -> + {error, Errors}; +get_from_any_node([Node | T], NameVsn, Errors) -> + case emqx_plugins_proto_v1:get_tar(Node, NameVsn, infinity) of + {ok, _} = Res -> + Res; + Err -> + get_from_any_node(T, NameVsn, [{Node, Err} | Errors]) + end. + plugins_readme(NameVsn, #{fill_readme := true}, Info) -> case file:read_file(readme_file(NameVsn)) of {ok, Bin} -> Info#{readme => Bin}; diff --git a/apps/emqx_plugins/src/emqx_plugins_schema.erl b/apps/emqx_plugins/src/emqx_plugins_schema.erl index 8b3cca8fd..b86f6b6c1 100644 --- a/apps/emqx_plugins/src/emqx_plugins_schema.erl +++ b/apps/emqx_plugins/src/emqx_plugins_schema.erl @@ -29,7 +29,7 @@ namespace() -> "plugin". -roots() -> [?CONF_ROOT]. +roots() -> [{?CONF_ROOT, ?HOCON(?R_REF(?CONF_ROOT), #{importance => ?IMPORTANCE_LOW})}]. fields(?CONF_ROOT) -> #{ @@ -73,16 +73,19 @@ states(type) -> ?ARRAY(?R_REF(state)); states(required) -> false; states(default) -> []; states(desc) -> ?DESC(states); +states(importance) -> ?IMPORTANCE_HIGH; states(_) -> undefined. install_dir(type) -> string(); install_dir(required) -> false; -%% runner's root dir -install_dir(default) -> "plugins"; -install_dir(T) when T =/= desc -> undefined; -install_dir(desc) -> ?DESC(install_dir). +%% runner's root dir todo move to data dir in 5.1 +install_dir(default) -> <<"plugins">>; +install_dir(desc) -> ?DESC(install_dir); +install_dir(importance) -> ?IMPORTANCE_LOW; +install_dir(_) -> undefined. check_interval(type) -> emqx_schema:duration(); -check_interval(default) -> "5s"; -check_interval(T) when T =/= desc -> undefined; -check_interval(desc) -> ?DESC(check_interval). +check_interval(default) -> <<"5s">>; +check_interval(desc) -> ?DESC(check_interval); +check_interval(deprecated) -> {since, "5.0.24"}; +check_interval(_) -> undefined. diff --git a/apps/emqx_plugins/src/emqx_plugins_sup.erl b/apps/emqx_plugins/src/emqx_plugins_sup.erl index 31427aaf6..f22daa9b8 100644 --- a/apps/emqx_plugins/src/emqx_plugins_sup.erl +++ b/apps/emqx_plugins/src/emqx_plugins_sup.erl @@ -26,18 +26,6 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - %% TODO: Add monitor plugins change. - Monitor = emqx_plugins_monitor, - _Children = [ - #{ - id => Monitor, - start => {Monitor, start_link, []}, - restart => permanent, - shutdown => brutal_kill, - type => worker, - modules => [Monitor] - } - ], SupFlags = #{ strategy => one_for_one, diff --git a/apps/emqx_plugins/src/proto/emqx_plugins_proto_v1.erl b/apps/emqx_plugins/src/proto/emqx_plugins_proto_v1.erl new file mode 100644 index 000000000..e1cd42c7b --- /dev/null +++ b/apps/emqx_plugins/src/proto/emqx_plugins_proto_v1.erl @@ -0,0 +1,35 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_plugins_proto_v1). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + get_tar/3 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +-type name_vsn() :: binary() | string(). + +introduced_in() -> + "5.0.21". + +-spec get_tar(node(), name_vsn(), timeout()) -> {ok, binary()} | {error, any}. +get_tar(Node, NameVsn, Timeout) -> + rpc:call(Node, emqx_plugins, get_tar, [NameVsn], Timeout). diff --git a/apps/emqx_plugins/test/emqx_plugins_SUITE.erl b/apps/emqx_plugins/test/emqx_plugins_SUITE.erl index 3823d940d..14d6d06fc 100644 --- a/apps/emqx_plugins/test/emqx_plugins_SUITE.erl +++ b/apps/emqx_plugins/test/emqx_plugins_SUITE.erl @@ -19,17 +19,50 @@ -compile(export_all). -compile(nowarn_export_all). --include_lib("emqx/include/emqx.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-define(EMQX_PLUGIN_TEMPLATE_RELEASE_NAME, "emqx_plugin_template"). +-define(EMQX_PLUGIN_TEMPLATE_URL, + "https://github.com/emqx/emqx-plugin-template/releases/download/" +). -define(EMQX_PLUGIN_TEMPLATE_VSN, "5.0.0"). +-define(EMQX_PLUGIN_TEMPLATE_TAG, "5.0.0"). +-define(EMQX_ELIXIR_PLUGIN_TEMPLATE_RELEASE_NAME, "elixir_plugin_template"). +-define(EMQX_ELIXIR_PLUGIN_TEMPLATE_URL, + "https://github.com/emqx/emqx-elixir-plugin/releases/download/" +). -define(EMQX_ELIXIR_PLUGIN_TEMPLATE_VSN, "0.1.0"). +-define(EMQX_ELIXIR_PLUGIN_TEMPLATE_TAG, "0.1.0-2"). -define(PACKAGE_SUFFIX, ".tar.gz"). -all() -> emqx_common_test_helpers:all(?MODULE). +all() -> + [ + {group, copy_plugin}, + {group, create_tar_copy_plugin}, + emqx_common_test_helpers:all(?MODULE) + ]. + +groups() -> + [ + {copy_plugin, [sequence], [ + group_t_copy_plugin_to_a_new_node, + group_t_copy_plugin_to_a_new_node_single_node + ]}, + {create_tar_copy_plugin, [sequence], [group_t_copy_plugin_to_a_new_node]} + ]. + +init_per_group(copy_plugin, Config) -> + Config; +init_per_group(create_tar_copy_plugin, Config) -> + [{remove_tar, true} | Config]. + +end_per_group(_Group, _Config) -> + ok. init_per_suite(Config) -> WorkDir = proplists:get_value(data_dir, Config), + filelib:ensure_path(WorkDir), OrigInstallDir = emqx_plugins:get_config(install_dir, undefined), emqx_common_test_helpers:start_apps([emqx_conf]), emqx_plugins:put_config(install_dir, WorkDir), @@ -60,63 +93,44 @@ end_per_testcase(TestCase, Config) -> emqx_plugins:put_configured([]), ?MODULE:TestCase({'end', Config}). -build_demo_plugin_package() -> - build_demo_plugin_package( - #{ - target_path => "_build/default/emqx_plugrel", - release_name => "emqx_plugin_template", - git_url => "https://github.com/emqx/emqx-plugin-template.git", - vsn => ?EMQX_PLUGIN_TEMPLATE_VSN, - workdir => "demo_src", - shdir => emqx_plugins:install_dir() - } - ). +get_demo_plugin_package() -> + get_demo_plugin_package(emqx_plugins:install_dir()). -build_demo_plugin_package( +get_demo_plugin_package( #{ - target_path := TargetPath, release_name := ReleaseName, git_url := GitUrl, vsn := PluginVsn, - workdir := DemoWorkDir, + tag := ReleaseTag, shdir := WorkDir } = Opts ) -> - BuildSh = filename:join([WorkDir, "build-demo-plugin.sh"]), - Cmd = string:join( - [ - BuildSh, - PluginVsn, - TargetPath, - ReleaseName, - GitUrl, - DemoWorkDir - ], - " " - ), - case emqx_run_sh:do(Cmd, [{cd, WorkDir}]) of - {ok, _} -> - Pkg = filename:join([ - WorkDir, - ReleaseName ++ "-" ++ - PluginVsn ++ - ?PACKAGE_SUFFIX - ]), - case filelib:is_regular(Pkg) of - true -> Opts#{package => Pkg}; - false -> error(#{reason => unexpected_build_result, not_found => Pkg}) - end; - {error, {Rc, Output}} -> - io:format(user, "failed_to_build_demo_plugin, Exit = ~p, Output:~n~ts\n", [Rc, Output]), - error(failed_to_build_demo_plugin) - end. + TargetName = lists:flatten([ReleaseName, "-", PluginVsn, ?PACKAGE_SUFFIX]), + FileURI = lists:flatten(lists:join("/", [GitUrl, ReleaseTag, TargetName])), + {ok, {_, _, PluginBin}} = httpc:request(FileURI), + Pkg = filename:join([ + WorkDir, + TargetName + ]), + ok = file:write_file(Pkg, PluginBin), + Opts#{package => Pkg}; +get_demo_plugin_package(Dir) -> + get_demo_plugin_package( + #{ + release_name => ?EMQX_PLUGIN_TEMPLATE_RELEASE_NAME, + git_url => ?EMQX_PLUGIN_TEMPLATE_URL, + vsn => ?EMQX_PLUGIN_TEMPLATE_VSN, + tag => ?EMQX_PLUGIN_TEMPLATE_TAG, + shdir => Dir + } + ). bin(A) when is_atom(A) -> atom_to_binary(A, utf8); bin(L) when is_list(L) -> unicode:characters_to_binary(L, utf8); bin(B) when is_binary(B) -> B. t_demo_install_start_stop_uninstall({init, Config}) -> - Opts = #{package := Package} = build_demo_plugin_package(), + Opts = #{package := Package} = get_demo_plugin_package(), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), [ {name_vsn, NameVsn}, @@ -186,7 +200,7 @@ write_info_file(Config, NameVsn, Content) -> ok = file:write_file(InfoFile, Content). t_position({init, Config}) -> - #{package := Package} = build_demo_plugin_package(), + #{package := Package} = get_demo_plugin_package(), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), [{name_vsn, NameVsn} | Config]; t_position({'end', _Config}) -> @@ -225,7 +239,7 @@ t_position(Config) -> ok. t_start_restart_and_stop({init, Config}) -> - #{package := Package} = build_demo_plugin_package(), + #{package := Package} = get_demo_plugin_package(), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), [{name_vsn, NameVsn} | Config]; t_start_restart_and_stop({'end', _Config}) -> @@ -275,7 +289,7 @@ t_start_restart_and_stop(Config) -> ok. t_enable_disable({init, Config}) -> - #{package := Package} = build_demo_plugin_package(), + #{package := Package} = get_demo_plugin_package(), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), [{name_vsn, NameVsn} | Config]; t_enable_disable({'end', Config}) -> @@ -337,27 +351,60 @@ t_bad_tar_gz(Config) -> %% idempotent ok = emqx_plugins:delete_package("fake-vsn"). -%% create a corrupted .tar.gz +%% create with incomplete info file %% failed install attempts should not leave behind extracted dir t_bad_tar_gz2({init, Config}) -> - Config; -t_bad_tar_gz2({'end', _Config}) -> - ok; -t_bad_tar_gz2(Config) -> WorkDir = proplists:get_value(data_dir, Config), NameVsn = "foo-0.2", - %% this an invalid info file content + %% this an invalid info file content (description missing) BadInfo = "name=foo, rel_vsn=\"0.2\", rel_apps=[foo]", ok = write_info_file(Config, NameVsn, BadInfo), TarGz = filename:join([WorkDir, NameVsn ++ ".tar.gz"]), ok = make_tar(WorkDir, NameVsn), + [{tar_gz, TarGz}, {name_vsn, NameVsn} | Config]; +t_bad_tar_gz2({'end', Config}) -> + NameVsn = ?config(name_vsn, Config), + ok = emqx_plugins:delete_package(NameVsn), + ok; +t_bad_tar_gz2(Config) -> + TarGz = ?config(tar_gz, Config), + NameVsn = ?config(name_vsn, Config), ?assert(filelib:is_regular(TarGz)), - %% failed to install, it also cleans up the bad .tar.gz file + %% failed to install, it also cleans up the bad content of .tar.gz file ?assertMatch({error, _}, emqx_plugins:ensure_installed(NameVsn)), + ?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))), + %% but the tar.gz file is still around + ?assert(filelib:is_regular(TarGz)), + ok. + +%% test that we even cleanup content that doesn't match the expected name-vsn +%% pattern +t_tar_vsn_content_mismatch({init, Config}) -> + WorkDir = proplists:get_value(data_dir, Config), + NameVsn = "bad_tar-0.2", + %% this an invalid info file content + BadInfo = "name=foo, rel_vsn=\"0.2\", rel_apps=[\"foo-0.2\"], description=\"lorem ipsum\"", + ok = write_info_file(Config, "foo-0.2", BadInfo), + TarGz = filename:join([WorkDir, "bad_tar-0.2.tar.gz"]), + ok = make_tar(WorkDir, "foo-0.2", NameVsn), + file:delete(filename:join([WorkDir, "foo-0.2", "release.json"])), + [{tar_gz, TarGz}, {name_vsn, NameVsn} | Config]; +t_tar_vsn_content_mismatch({'end', Config}) -> + NameVsn = ?config(name_vsn, Config), + ok = emqx_plugins:delete_package(NameVsn), + ok; +t_tar_vsn_content_mismatch(Config) -> + TarGz = ?config(tar_gz, Config), + NameVsn = ?config(name_vsn, Config), + ?assert(filelib:is_regular(TarGz)), + %% failed to install, it also cleans up content of the bad .tar.gz file even + %% if in other directory + ?assertMatch({error, _}, emqx_plugins:ensure_installed(NameVsn)), + ?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))), + ?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir("foo-0.2"))), %% the tar.gz file is still around ?assert(filelib:is_regular(TarGz)), - ?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))), - ok = emqx_plugins:delete_package(NameVsn). + ok. t_bad_info_json({init, Config}) -> Config; @@ -388,14 +435,13 @@ t_bad_info_json(Config) -> t_elixir_plugin({init, Config}) -> Opts0 = #{ - target_path => "_build/prod/plugrelex/elixir_plugin_template", - release_name => "elixir_plugin_template", - git_url => "https://github.com/emqx/emqx-elixir-plugin.git", + release_name => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_RELEASE_NAME, + git_url => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_URL, vsn => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_VSN, - workdir => "demo_src_elixir", + tag => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_TAG, shdir => emqx_plugins:install_dir() }, - Opts = #{package := Package} = build_demo_plugin_package(Opts0), + Opts = #{package := Package} = get_demo_plugin_package(Opts0), NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), [ {name_vsn, NameVsn}, @@ -458,12 +504,187 @@ t_elixir_plugin(Config) -> ?assertEqual([], emqx_plugins:list()), ok. +group_t_copy_plugin_to_a_new_node({init, Config}) -> + WorkDir = proplists:get_value(data_dir, Config), + FromInstallDir = filename:join(WorkDir, atom_to_list(plugins_copy_from)), + file:del_dir_r(FromInstallDir), + ok = filelib:ensure_path(FromInstallDir), + ToInstallDir = filename:join(WorkDir, atom_to_list(plugins_copy_to)), + file:del_dir_r(ToInstallDir), + ok = filelib:ensure_path(ToInstallDir), + #{package := Package, release_name := PluginName} = get_demo_plugin_package(FromInstallDir), + [{CopyFrom, CopyFromOpts}, {CopyTo, CopyToOpts}] = + emqx_common_test_helpers:emqx_cluster( + [ + {core, plugins_copy_from}, + {core, plugins_copy_to} + ], + #{ + apps => [emqx_conf, emqx_plugins], + env => [ + {emqx, init_config_load_done, false}, + {emqx, boot_modules, []} + ], + load_schema => false + } + ), + CopyFromNode = emqx_common_test_helpers:start_slave( + CopyFrom, maps:remove(join_to, CopyFromOpts) + ), + ok = rpc:call(CopyFromNode, emqx_plugins, put_config, [install_dir, FromInstallDir]), + CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, maps:remove(join_to, CopyToOpts)), + ok = rpc:call(CopyToNode, emqx_plugins, put_config, [install_dir, ToInstallDir]), + NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), + ok = rpc:call(CopyFromNode, emqx_plugins, ensure_installed, [NameVsn]), + ok = rpc:call(CopyFromNode, emqx_plugins, ensure_started, [NameVsn]), + ok = rpc:call(CopyFromNode, emqx_plugins, ensure_enabled, [NameVsn]), + case proplists:get_bool(remove_tar, Config) of + true -> + %% Test the case when a plugin is installed, but its original tar file is removed + %% and must be re-created + ok = file:delete(filename:join(FromInstallDir, NameVsn ++ ?PACKAGE_SUFFIX)); + false -> + ok + end, + [ + {from_install_dir, FromInstallDir}, + {to_install_dir, ToInstallDir}, + {copy_from_node, CopyFromNode}, + {copy_to_node, CopyToNode}, + {name_vsn, NameVsn}, + {plugin_name, PluginName} + | Config + ]; +group_t_copy_plugin_to_a_new_node({'end', Config}) -> + CopyFromNode = proplists:get_value(copy_from_node, Config), + CopyToNode = proplists:get_value(copy_to_node, Config), + ok = rpc:call(CopyFromNode, emqx_config, delete_override_conf_files, []), + ok = rpc:call(CopyToNode, emqx_config, delete_override_conf_files, []), + rpc:call(CopyToNode, ekka, leave, []), + rpc:call(CopyFromNode, ekka, leave, []), + ok = emqx_common_test_helpers:stop_slave(CopyToNode), + ok = emqx_common_test_helpers:stop_slave(CopyFromNode), + ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)), + ok = file:del_dir_r(proplists:get_value(from_install_dir, Config)); +group_t_copy_plugin_to_a_new_node(Config) -> + CopyFromNode = proplists:get_value(copy_from_node, Config), + CopyToNode = proplists:get_value(copy_to_node, Config), + CopyToDir = proplists:get_value(to_install_dir, Config), + CopyFromPluginsState = rpc:call(CopyFromNode, emqx_plugins, get_config, [[states], []]), + NameVsn = proplists:get_value(name_vsn, Config), + PluginName = proplists:get_value(plugin_name, Config), + PluginApp = list_to_atom(PluginName), + ?assertMatch([#{enable := true, name_vsn := NameVsn}], CopyFromPluginsState), + ?assert( + proplists:is_defined( + PluginApp, + rpc:call(CopyFromNode, application, which_applications, []) + ) + ), + ?assertEqual([], filelib:wildcard(filename:join(CopyToDir, "**"))), + %% Check that a new node doesn't have this plugin before it joins the cluster + ?assertEqual([], rpc:call(CopyToNode, emqx_conf, get, [[plugins, states], []])), + ?assertMatch({error, _}, rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn])), + ?assertNot( + proplists:is_defined( + PluginApp, + rpc:call(CopyToNode, application, which_applications, []) + ) + ), + ok = rpc:call(CopyToNode, ekka, join, [CopyFromNode]), + %% Mimic cluster-override conf copying + ok = rpc:call(CopyToNode, emqx_plugins, put_config, [[states], CopyFromPluginsState]), + %% Plugin copying is triggered upon app restart on a new node. + %% This is similar to emqx_conf, which copies cluster-override conf upon start, + %% see: emqx_conf_app:init_conf/0 + ok = rpc:call(CopyToNode, application, stop, [emqx_plugins]), + {ok, _} = rpc:call(CopyToNode, application, ensure_all_started, [emqx_plugins]), + ?assertMatch( + {ok, #{running_status := running, config_status := enabled}}, + rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn]) + ). + +%% checks that we can start a cluster with a lone node. +group_t_copy_plugin_to_a_new_node_single_node({init, Config}) -> + PrivDataDir = ?config(priv_dir, Config), + ToInstallDir = filename:join(PrivDataDir, "plugins_copy_to"), + file:del_dir_r(ToInstallDir), + ok = filelib:ensure_path(ToInstallDir), + #{package := Package, release_name := PluginName} = get_demo_plugin_package(ToInstallDir), + NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), + [{CopyTo, CopyToOpts}] = + emqx_common_test_helpers:emqx_cluster( + [ + {core, plugins_copy_to} + ], + #{ + apps => [emqx_conf, emqx_plugins], + env => [ + {emqx, init_config_load_done, false}, + {emqx, boot_modules, []} + ], + env_handler => fun + (emqx_plugins) -> + ok = emqx_plugins:put_config(install_dir, ToInstallDir), + %% this is to simulate an user setting the state + %% via environment variables before starting the node + ok = emqx_plugins:put_config( + states, + [#{name_vsn => NameVsn, enable => true}] + ), + ok; + (_) -> + ok + end, + priv_data_dir => PrivDataDir, + schema_mod => emqx_conf_schema, + peer_mod => slave, + load_schema => true + } + ), + [ + {to_install_dir, ToInstallDir}, + {copy_to_node_name, CopyTo}, + {copy_to_opts, CopyToOpts}, + {name_vsn, NameVsn}, + {plugin_name, PluginName} + | Config + ]; +group_t_copy_plugin_to_a_new_node_single_node({'end', Config}) -> + CopyToNode = proplists:get_value(copy_to_node, Config), + ok = emqx_common_test_helpers:stop_slave(CopyToNode), + ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)), + ok; +group_t_copy_plugin_to_a_new_node_single_node(Config) -> + CopyTo = ?config(copy_to_node_name, Config), + CopyToOpts = ?config(copy_to_opts, Config), + ToInstallDir = ?config(to_install_dir, Config), + NameVsn = proplists:get_value(name_vsn, Config), + %% Start the node for the first time. The plugin should start + %% successfully even if it's not extracted yet. Simply starting + %% the node would crash if not working properly. + CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, CopyToOpts), + ct:pal("~p config:\n ~p", [ + CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}]) + ]), + ct:pal("~p install_dir:\n ~p", [ + CopyToNode, erpc:call(CopyToNode, file, list_dir, [ToInstallDir]) + ]), + ?assertMatch( + {ok, #{running_status := running, config_status := enabled}}, + rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn]) + ), + ok. + make_tar(Cwd, NameWithVsn) -> + make_tar(Cwd, NameWithVsn, NameWithVsn). + +make_tar(Cwd, NameWithVsn, TarfileVsn) -> {ok, OriginalCwd} = file:get_cwd(), ok = file:set_cwd(Cwd), try Files = filelib:wildcard(NameWithVsn ++ "/**"), - TarFile = NameWithVsn ++ ".tar.gz", + TarFile = TarfileVsn ++ ".tar.gz", ok = erl_tar:create(TarFile, Files, [compressed]) after file:set_cwd(OriginalCwd) diff --git a/apps/emqx_plugins/test/emqx_plugins_SUITE_data/build-demo-plugin.sh b/apps/emqx_plugins/test/emqx_plugins_SUITE_data/build-demo-plugin.sh deleted file mode 100755 index 15255524e..000000000 --- a/apps/emqx_plugins/test/emqx_plugins_SUITE_data/build-demo-plugin.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -vsn="${1}" -target_path="${2}" -release_name="${3}" -git_url="${4}" -workdir="${5}" - -target_name="${release_name}-${vsn}.tar.gz" -target="$workdir/${target_path}/${target_name}" -if [ -f "${target}" ]; then - cp "$target" ./ - exit 0 -fi - -# cleanup -rm -rf "${workdir}" - -git clone "${git_url}" -b "${vsn}" "${workdir}" -make -C "$workdir" rel - -cp "$target" ./ diff --git a/apps/emqx_prometheus/README.md b/apps/emqx_prometheus/README.md index ddff9774c..c008adc81 100644 --- a/apps/emqx_prometheus/README.md +++ b/apps/emqx_prometheus/README.md @@ -1,279 +1,16 @@ -# emqx-prometheus +# EMQX Prometheus Agent -EMQX Prometheus Agent - -## push emqx stats/metrics to prometheus PushGateway - -``` -prometheus.push.gateway.server = http://127.0.0.1:9091 - -prometheus.interval = 15000 -``` - -## pull emqx stats/metrics - -``` -Method: GET -Path: api/v4/emqx_prometheus?type=prometheus -params: type: [prometheus| json] - -prometheus data - -# TYPE erlang_vm_ets_limit gauge -erlang_vm_ets_limit 256000 -# TYPE erlang_vm_logical_processors gauge -erlang_vm_logical_processors 4 -# TYPE erlang_vm_logical_processors_available gauge -erlang_vm_logical_processors_available NaN -# TYPE erlang_vm_logical_processors_online gauge -erlang_vm_logical_processors_online 4 -# TYPE erlang_vm_port_count gauge -erlang_vm_port_count 17 -# TYPE erlang_vm_port_limit gauge -erlang_vm_port_limit 1048576 +This application provides the ability to integrate with Prometheus. It provides +an HTTP API for collecting metrics of the current node +and also supports configuring a Push Gateway URL address for pushing these metrics. -json data +More introduction about [Integrate with Prometheus](https://www.emqx.io/docs/en/v5.0/observability/prometheus.html#integrate-with-prometheus) -{ - "stats": {key:value}, - "metrics": {key:value}, - "packets": {key:value}, - "messages": {key:value}, - "delivery": {key:value}, - "client": {key:value}, - "session": {key:value} -} - -``` +See HTTP API docs to learn how to +[Update Prometheus config](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Monitor/paths/~1prometheus/put) +and [Get all metrics data](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Monitor/paths/~1prometheus~1stats/get). -## Before EMQX v4.0.0 -The prometheus data simple is: - - -```bash -# TYPE erlang_vm_ets_limit gauge -erlang_vm_ets_limit 2097152 -# TYPE erlang_vm_logical_processors gauge -erlang_vm_logical_processors 2 -# TYPE erlang_vm_logical_processors_available gauge -erlang_vm_logical_processors_available 2 -# TYPE erlang_vm_logical_processors_online gauge -erlang_vm_logical_processors_online 2 -# TYPE erlang_vm_port_count gauge -erlang_vm_port_count 19 -# TYPE erlang_vm_port_limit gauge -erlang_vm_port_limit 1048576 -# TYPE erlang_vm_process_count gauge -erlang_vm_process_count 460 -# TYPE erlang_vm_process_limit gauge -erlang_vm_process_limit 2097152 -# TYPE erlang_vm_schedulers gauge -erlang_vm_schedulers 2 -# TYPE erlang_vm_schedulers_online gauge -erlang_vm_schedulers_online 2 -# TYPE erlang_vm_smp_support untyped -erlang_vm_smp_support 1 -# TYPE erlang_vm_threads untyped -erlang_vm_threads 1 -# TYPE erlang_vm_thread_pool_size gauge -erlang_vm_thread_pool_size 32 -# TYPE erlang_vm_time_correction untyped -erlang_vm_time_correction 1 -# TYPE erlang_vm_statistics_context_switches counter -erlang_vm_statistics_context_switches 39850 -# TYPE erlang_vm_statistics_garbage_collection_number_of_gcs counter -erlang_vm_statistics_garbage_collection_number_of_gcs 17116 -# TYPE erlang_vm_statistics_garbage_collection_words_reclaimed counter -erlang_vm_statistics_garbage_collection_words_reclaimed 55711819 -# TYPE erlang_vm_statistics_garbage_collection_bytes_reclaimed counter -erlang_vm_statistics_garbage_collection_bytes_reclaimed 445694552 -# TYPE erlang_vm_statistics_bytes_received_total counter -erlang_vm_statistics_bytes_received_total 400746 -# TYPE erlang_vm_statistics_bytes_output_total counter -erlang_vm_statistics_bytes_output_total 337197 -# TYPE erlang_vm_statistics_reductions_total counter -erlang_vm_statistics_reductions_total 21157980 -# TYPE erlang_vm_statistics_run_queues_length_total gauge -erlang_vm_statistics_run_queues_length_total 0 -# TYPE erlang_vm_statistics_runtime_milliseconds counter -erlang_vm_statistics_runtime_milliseconds 6559 -# TYPE erlang_vm_statistics_wallclock_time_milliseconds counter -erlang_vm_statistics_wallclock_time_milliseconds 261243 -# TYPE erlang_vm_memory_atom_bytes_total gauge -erlang_vm_memory_atom_bytes_total{usage="used"} 1814822 -erlang_vm_memory_atom_bytes_total{usage="free"} 22459 -# TYPE erlang_vm_memory_bytes_total gauge -erlang_vm_memory_bytes_total{kind="system"} 109820104 -erlang_vm_memory_bytes_total{kind="processes"} 44983656 -# TYPE erlang_vm_dets_tables gauge -erlang_vm_dets_tables 1 -# TYPE erlang_vm_ets_tables gauge -erlang_vm_ets_tables 139 -# TYPE erlang_vm_memory_processes_bytes_total gauge -erlang_vm_memory_processes_bytes_total{usage="used"} 44983656 -erlang_vm_memory_processes_bytes_total{usage="free"} 0 -# TYPE erlang_vm_memory_system_bytes_total gauge -erlang_vm_memory_system_bytes_total{usage="atom"} 1837281 -erlang_vm_memory_system_bytes_total{usage="binary"} 595872 -erlang_vm_memory_system_bytes_total{usage="code"} 40790577 -erlang_vm_memory_system_bytes_total{usage="ets"} 37426896 -erlang_vm_memory_system_bytes_total{usage="other"} 29169478 -# TYPE erlang_mnesia_held_locks gauge -erlang_mnesia_held_locks 0 -# TYPE erlang_mnesia_lock_queue gauge -erlang_mnesia_lock_queue 0 -# TYPE erlang_mnesia_transaction_participants gauge -erlang_mnesia_transaction_participants 0 -# TYPE erlang_mnesia_transaction_coordinators gauge -erlang_mnesia_transaction_coordinators 0 -# TYPE erlang_mnesia_failed_transactions counter -erlang_mnesia_failed_transactions 2 -# TYPE erlang_mnesia_committed_transactions counter -erlang_mnesia_committed_transactions 239 -# TYPE erlang_mnesia_logged_transactions counter -erlang_mnesia_logged_transactions 60 -# TYPE erlang_mnesia_restarted_transactions counter -erlang_mnesia_restarted_transactions 0 -# TYPE emqx_packets_auth_received counter -emqx_packets_auth_received 0 -# TYPE emqx_packets_auth_sent counter -emqx_packets_auth_sent 0 -# TYPE emqx_packets_received counter -emqx_packets_received 0 -# TYPE emqx_packets_sent counter -emqx_packets_sent 0 -# TYPE emqx_packets_connect counter -emqx_packets_connect 0 -# TYPE emqx_packets_connack_sent counter -emqx_packets_connack_sent 0 -# TYPE emqx_packets_connack_error counter -emqx_packets_connack_error 0 -# TYPE emqx_packets_connack_auth_error counter -emqx_packets_connack_auth_error 0 -# TYPE emqx_packets_disconnect_received counter -emqx_packets_disconnect_received 0 -# TYPE emqx_packets_disconnect_sent counter -emqx_packets_disconnect_sent 0 -# TYPE emqx_packets_subscribe counter -emqx_packets_subscribe 0 -# TYPE emqx_packets_subscribe_error counter -emqx_packets_subscribe_error 0 -# TYPE emqx_packets_subscribe_auth_error counter -emqx_packets_subscribe_auth_error 0 -# TYPE emqx_packets_suback counter -emqx_packets_suback 0 -# TYPE emqx_packets_unsubscribe counter -emqx_packets_unsubscribe 0 -# TYPE emqx_packets_unsubscribe_error counter -emqx_packets_unsubscribe_error 0 -# TYPE emqx_packets_unsuback counter -emqx_packets_unsuback 0 -# TYPE emqx_packets_publish_received counter -emqx_packets_publish_received 0 -# TYPE emqx_packets_publish_sent counter -emqx_packets_publish_sent 0 -# TYPE emqx_packets_publish_auth_error counter -emqx_packets_publish_auth_error 0 -# TYPE emqx_packets_publish_error counter -emqx_packets_publish_error 0 -# TYPE emqx_packets_puback_received counter -emqx_packets_puback_received 0 -# TYPE emqx_packets_puback_sent counter -emqx_packets_puback_sent 0 -# TYPE emqx_packets_puback_missed counter -emqx_packets_puback_missed 0 -# TYPE emqx_packets_pubrec_received counter -emqx_packets_pubrec_received 0 -# TYPE emqx_packets_pubrec_sent counter -emqx_packets_pubrec_sent 0 -# TYPE emqx_packets_pubrec_missed counter -emqx_packets_pubrec_missed 0 -# TYPE emqx_packets_pubrel_received counter -emqx_packets_pubrel_received 0 -# TYPE emqx_packets_pubrel_sent counter -emqx_packets_pubrel_sent 0 -# TYPE emqx_packets_pubrel_missed counter -emqx_packets_pubrel_missed 0 -# TYPE emqx_packets_pubcomp_received counter -emqx_packets_pubcomp_received 0 -# TYPE emqx_packets_pubcomp_sent counter -emqx_packets_pubcomp_sent 0 -# TYPE emqx_packets_pubcomp_missed counter -emqx_packets_pubcomp_missed 0 -# TYPE emqx_packets_pingreq counter -emqx_packets_pingreq 0 -# TYPE emqx_packets_pingresp counter -emqx_packets_pingresp 0 -# TYPE emqx_bytes_received counter -emqx_bytes_received 0 -# TYPE emqx_bytes_sent counter -emqx_bytes_sent 0 -# TYPE emqx_connections_count gauge -emqx_connections_count 0 -# TYPE emqx_connections_max gauge -emqx_connections_max 0 -# TYPE emqx_retained_count gauge -emqx_retained_count 3 -# TYPE emqx_retained_max gauge -emqx_retained_max 3 -# TYPE emqx_sessions_count gauge -emqx_sessions_count 0 -# TYPE emqx_sessions_max gauge -emqx_sessions_max 0 -# TYPE emqx_subscriptions_count gauge -emqx_subscriptions_count 0 -# TYPE emqx_subscriptions_max gauge -emqx_subscriptions_max 0 -# TYPE emqx_topics_count gauge -emqx_topics_count 0 -# TYPE emqx_topics_max gauge -emqx_topics_max 0 -# TYPE emqx_vm_cpu_use gauge -emqx_vm_cpu_use 100.0 -# TYPE emqx_vm_cpu_idle gauge -emqx_vm_cpu_idle 0.0 -# TYPE emqx_vm_run_queue gauge -emqx_vm_run_queue 1 -# TYPE emqx_vm_process_messages_in_queues gauge -emqx_vm_process_messages_in_queues 0 -# TYPE emqx_messages_received counter -emqx_messages_received 0 -# TYPE emqx_messages_sent counter -emqx_messages_sent 0 -# TYPE emqx_messages_dropped counter -emqx_messages_dropped 0 -# TYPE emqx_messages_retained counter -emqx_messages_retained 3 -# TYPE emqx_messages_qos0_received counter -emqx_messages_qos0_received 0 -# TYPE emqx_messages_qos0_sent counter -emqx_messages_qos0_sent 0 -# TYPE emqx_messages_qos1_received counter -emqx_messages_qos1_received 0 -# TYPE emqx_messages_qos1_sent counter -emqx_messages_qos1_sent 0 -# TYPE emqx_messages_qos2_received counter -emqx_messages_qos2_received 0 -# TYPE emqx_messages_qos2_expired counter -emqx_messages_qos2_expired 0 -# TYPE emqx_messages_qos2_sent counter -emqx_messages_qos2_sent 0 -# TYPE emqx_messages_qos2_dropped counter -emqx_messages_qos2_dropped 0 -# TYPE emqx_messages_forward counter -emqx_messages_forward 0 -``` - - -License -------- - -Apache License Version 2.0 - -Author ------- - -EMQX Team. - +Correspondingly, we have also provided a [Grafana template](https://grafana.com/grafana/dashboards/17446-emqx/) +for visualizing these metrics. diff --git a/apps/emqx_prometheus/TODO b/apps/emqx_prometheus/TODO deleted file mode 100644 index a868fba7e..000000000 --- a/apps/emqx_prometheus/TODO +++ /dev/null @@ -1,2 +0,0 @@ -1. Add more VM Metrics -2. Add more emqx Metrics diff --git a/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf b/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf deleted file mode 100644 index 7f251ff4b..000000000 --- a/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf +++ /dev/null @@ -1,70 +0,0 @@ -emqx_prometheus_schema { - - prometheus { - desc { - en: """Settings for reporting metrics to Prometheus""" - zh: """Prometheus 监控数据推送""" - } - label { - en: """Prometheus""" - zh: """Prometheus""" - } - } - - push_gateway_server { - desc { - en: """URL of Prometheus server""" - zh: """Prometheus 服务器地址""" - } - } - - interval { - desc { - en: """Data reporting interval""" - zh: """数据推送间隔""" - } - } - enable { - desc { - en: """Turn Prometheus data pushing on or off""" - zh: """开启或关闭 Prometheus 数据推送""" - } - } - vm_dist_collector { - desc { - en: """Enable or disable VM distribution collector, collects information about the sockets and processes involved in the Erlang distribution mechanism.""" - zh: """开启或关闭 VM 分布采集器,收集 Erlang 分布机制中涉及的套接字和进程的信息。""" - } - } - mnesia_collector { - desc { - en: """Enable or disable Mnesia collector, collects Mnesia metrics mainly using mnesia:system_info/1 .""" - zh: """开启或关闭 Mnesia 采集器, 使用 mnesia:system_info/1 收集 Mnesia 相关指标""" - } - } - vm_statistics_collector { - desc { - en: """Enable or disable VM statistics collector, collects Erlang VM metrics using erlang:statistics/1 .""" - zh: """开启或关闭 VM 统计采集器, 使用 erlang:statistics/1 收集 Erlang VM 相关指标""" - } - } - - vm_system_info_collector { - desc { - en: """Enable or disable VM system info collector, collects Erlang VM metrics using erlang:system_info/1 .""" - zh: """开启或关闭 VM 系统信息采集器, 使用 erlang:system_info/1 收集 Erlang VM 相关指标""" - } - } - vm_memory_collector { - desc { - en: """Enable or disable VM memory collector, collects information about memory dynamically allocated by the Erlang emulator using erlang:memory/0 , also provides basic (D)ETS statistics .""" - zh: """开启或关闭 VM 内存采集器, 使用 erlang:memory/0 收集 Erlang 虚拟机动态分配的内存信息,同时提供基本的 (D)ETS 统计信息""" - } - } - vm_msacc_collector { - desc { - en: """Enable or disable VM msacc collector, collects microstate accounting metrics using erlang:statistics(microstate_accounting) .""" - zh: """开启或关闭 VM msacc 采集器, 使用 erlang:statistics(microstate_accounting) 收集微状态计数指标""" - } - } -} diff --git a/apps/emqx_prometheus/rebar.config b/apps/emqx_prometheus/rebar.config index 88b3d27a2..7b9a6cc48 100644 --- a/apps/emqx_prometheus/rebar.config +++ b/apps/emqx_prometheus/rebar.config @@ -2,6 +2,7 @@ {deps, [ {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, {prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}} ]}. diff --git a/apps/emqx_prometheus/src/emqx_prometheus.app.src b/apps/emqx_prometheus/src/emqx_prometheus.app.src index d95c89c3b..f94b22d81 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.app.src +++ b/apps/emqx_prometheus/src/emqx_prometheus.app.src @@ -2,10 +2,10 @@ {application, emqx_prometheus, [ {description, "Prometheus for EMQX"}, % strict semver, bump manually! - {vsn, "5.0.3"}, + {vsn, "5.0.10"}, {modules, []}, {registered, [emqx_prometheus_sup]}, - {applications, [kernel, stdlib, prometheus, emqx]}, + {applications, [kernel, stdlib, prometheus, emqx, emqx_management]}, {mod, {emqx_prometheus_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_prometheus/src/emqx_prometheus.erl b/apps/emqx_prometheus/src/emqx_prometheus.erl index 5424c4e24..d999f294e 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus.erl @@ -98,8 +98,13 @@ handle_cast(_Msg, State) -> {noreply, State}. handle_info({timeout, Timer, ?TIMER_MSG}, State = #{timer := Timer}) -> - #{interval := Interval, push_gateway_server := Server} = opts(), - PushRes = push_to_push_gateway(Server), + #{ + interval := Interval, + headers := Headers, + job_name := JobName, + push_gateway_server := Server + } = opts(), + PushRes = push_to_push_gateway(Server, Headers, JobName), NewTimer = ensure_timer(Interval), NewState = maps:update_with(PushRes, fun(C) -> C + 1 end, 1, State#{timer => NewTimer}), %% Data is too big, hibernate for saving memory and stop system monitor warning. @@ -107,18 +112,27 @@ handle_info({timeout, Timer, ?TIMER_MSG}, State = #{timer := Timer}) -> handle_info(_Msg, State) -> {noreply, State}. -push_to_push_gateway(Uri) -> +push_to_push_gateway(Uri, Headers, JobName) when is_list(Headers) -> [Name, Ip] = string:tokens(atom_to_list(node()), "@"), - Url = lists:concat([Uri, "/metrics/job/", Name, "/instance/", Name, "~", Ip]), + JobName1 = emqx_placeholder:preproc_tmpl(JobName), + JobName2 = binary_to_list( + emqx_placeholder:proc_tmpl( + JobName1, + #{<<"name">> => Name, <<"host">> => Ip} + ) + ), + + Url = lists:concat([Uri, "/metrics/job/", JobName2]), Data = prometheus_text_format:format(), - case httpc:request(post, {Url, [], "text/plain", Data}, ?HTTP_OPTIONS, []) of - {ok, {{"HTTP/1.1", 200, "OK"}, _Headers, _Body}} -> + case httpc:request(post, {Url, Headers, "text/plain", Data}, ?HTTP_OPTIONS, []) of + {ok, {{"HTTP/1.1", 200, _}, _RespHeaders, _RespBody}} -> ok; Error -> ?SLOG(error, #{ msg => "post_to_push_gateway_failed", error => Error, - url => Url + url => Url, + headers => Headers }), failed end. @@ -130,7 +144,7 @@ terminate(_Reason, _State) -> ok. ensure_timer(Interval) -> - emqx_misc:start_timer(Interval, ?TIMER_MSG). + emqx_utils:start_timer(Interval, ?TIMER_MSG). %%-------------------------------------------------------------------- %% prometheus callbacks @@ -205,6 +219,10 @@ emqx_collect(emqx_connections_count, Stats) -> gauge_metric(?C('connections.count', Stats)); emqx_collect(emqx_connections_max, Stats) -> gauge_metric(?C('connections.max', Stats)); +emqx_collect(emqx_live_connections_count, Stats) -> + gauge_metric(?C('live_connections.count', Stats)); +emqx_collect(emqx_live_connections_max, Stats) -> + gauge_metric(?C('live_connections.max', Stats)); %% sessions emqx_collect(emqx_sessions_count, Stats) -> gauge_metric(?C('sessions.count', Stats)); @@ -446,6 +464,8 @@ emqx_stats() -> [ emqx_connections_count, emqx_connections_max, + emqx_live_connections_count, + emqx_live_connections_max, emqx_sessions_count, emqx_sessions_max, emqx_topics_count, @@ -570,20 +590,7 @@ emqx_vm() -> ]. emqx_vm_data() -> - Idle = - case cpu_sup:util([detailed]) of - %% Not support for Windows - {_, 0, 0, _} -> 0; - {_Num, _Use, IdleList, _} -> ?C(idle, IdleList) - end, - RunQueue = erlang:statistics(run_queue), - [ - {run_queue, RunQueue}, - %% XXX: Plan removed at v5.0 - {process_total_messages, 0}, - {cpu_idle, Idle}, - {cpu_use, 100 - Idle} - ] ++ emqx_vm:mem_info(). + emqx_mgmt:vm_stats(). emqx_cluster() -> [ @@ -592,7 +599,8 @@ emqx_cluster() -> ]. emqx_cluster_data() -> - #{running_nodes := Running, stopped_nodes := Stopped} = mria_mnesia:cluster_info(), + Running = emqx:cluster_nodes(running), + Stopped = emqx:cluster_nodes(stopped), [ {nodes_running, length(Running)}, {nodes_stopped, length(Stopped)} diff --git a/apps/emqx_prometheus/src/emqx_prometheus_api.erl b/apps/emqx_prometheus/src/emqx_prometheus_api.erl index 7466a1fd1..d3bfc0224 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_api.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_api.erl @@ -121,12 +121,8 @@ prometheus_config_example() -> enable => true, interval => "15s", push_gateway_server => <<"http://127.0.0.1:9091">>, - vm_dist_collector => enabled, - mnesia_collector => enabled, - vm_statistics_collector => enabled, - vm_system_info_collector => enabled, - vm_memory_collector => enabled, - vm_msacc_collector => enabled + headers => #{'header-name' => 'header-value'}, + job_name => <<"${name}/instance/${name}~${host}">> }. prometheus_data_schema() -> diff --git a/apps/emqx_prometheus/src/emqx_prometheus_schema.erl b/apps/emqx_prometheus/src/emqx_prometheus_schema.erl index 688c9be58..f8005f06b 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_schema.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_schema.erl @@ -25,7 +25,9 @@ roots/0, fields/1, desc/1, - translation/1 + translation/1, + convert_headers/1, + validate_push_gateway_server/1 ]). namespace() -> "prometheus". @@ -38,8 +40,9 @@ fields("prometheus") -> ?HOCON( string(), #{ - default => "http://127.0.0.1:9091", + default => <<"http://127.0.0.1:9091">>, required => true, + validator => fun ?MODULE:validate_push_gateway_server/1, desc => ?DESC(push_gateway_server) } )}, @@ -47,11 +50,31 @@ fields("prometheus") -> ?HOCON( emqx_schema:duration_ms(), #{ - default => "15s", + default => <<"15s">>, required => true, desc => ?DESC(interval) } )}, + {headers, + ?HOCON( + list({string(), string()}), + #{ + default => #{}, + required => false, + converter => fun ?MODULE:convert_headers/1, + desc => ?DESC(headers) + } + )}, + {job_name, + ?HOCON( + binary(), + #{ + default => <<"${name}/instance/${name}~${host}">>, + required => true, + desc => ?DESC(job_name) + } + )}, + {enable, ?HOCON( boolean(), @@ -67,7 +90,7 @@ fields("prometheus") -> #{ default => enabled, required => true, - hidden => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(vm_dist_collector) } )}, @@ -77,7 +100,7 @@ fields("prometheus") -> #{ default => enabled, required => true, - hidden => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(mnesia_collector) } )}, @@ -87,7 +110,7 @@ fields("prometheus") -> #{ default => enabled, required => true, - hidden => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(vm_statistics_collector) } )}, @@ -97,7 +120,7 @@ fields("prometheus") -> #{ default => enabled, required => true, - hidden => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(vm_system_info_collector) } )}, @@ -107,7 +130,7 @@ fields("prometheus") -> #{ default => enabled, required => true, - hidden => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(vm_memory_collector) } )}, @@ -117,7 +140,7 @@ fields("prometheus") -> #{ default => enabled, required => true, - hidden => true, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(vm_msacc_collector) } )} @@ -126,6 +149,23 @@ fields("prometheus") -> desc("prometheus") -> ?DESC(prometheus); desc(_) -> undefined. +convert_headers(Headers) when is_map(Headers) -> + maps:fold( + fun(K, V, Acc) -> + [{binary_to_list(K), binary_to_list(V)} | Acc] + end, + [], + Headers + ); +convert_headers(Headers) when is_list(Headers) -> + Headers. + +validate_push_gateway_server(Url) -> + case uri_string:parse(Url) of + #{scheme := S} when S =:= "https" orelse S =:= "http" -> ok; + _ -> {error, "Invalid url"} + end. + %% for CI test, CI don't load the whole emqx_conf_schema. translation(Name) -> emqx_conf_schema:translation(Name). diff --git a/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl b/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl index b9df1103b..77d9902a2 100644 --- a/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl +++ b/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl @@ -27,6 +27,8 @@ "prometheus {\n" " push_gateway_server = \"http://127.0.0.1:9091\"\n" " interval = \"1s\"\n" + " headers = { Authorization = \"some-authz-tokens\"}\n" + " job_name = \"${name}~${host}\"\n" " enable = true\n" " vm_dist_collector = enabled\n" " mnesia_collector = enabled\n" @@ -85,6 +87,25 @@ t_collector_no_crash_test(_) -> prometheus_text_format:format(), ok. +t_assert_push(_) -> + meck:new(httpc, [passthrough]), + Self = self(), + AssertPush = fun(Method, Req = {Url, Headers, ContentType, _Data}, HttpOpts, Opts) -> + ?assertEqual(post, Method), + ?assertMatch("http://127.0.0.1:9091/metrics/job/test~127.0.0.1", Url), + ?assertEqual([{"Authorization", "some-authz-tokens"}], Headers), + ?assertEqual("text/plain", ContentType), + Self ! pass, + meck:passthrough([Method, Req, HttpOpts, Opts]) + end, + meck:expect(httpc, request, AssertPush), + ?assertMatch(ok, emqx_prometheus_sup:start_child(emqx_prometheus)), + receive + pass -> ok + after 2000 -> + ct:fail(assert_push_request_failed) + end. + t_only_for_coverage(_) -> ?assertEqual("5.0.0", emqx_prometheus_proto_v1:introduced_in()), ok. diff --git a/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl b/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl index 0968100b8..e29d46720 100644 --- a/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl +++ b/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl @@ -66,7 +66,7 @@ t_prometheus_api(_) -> Auth = emqx_mgmt_api_test_util:auth_header_(), {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth), - Conf = emqx_json:decode(Response, [return_maps]), + Conf = emqx_utils_json:decode(Response, [return_maps]), ?assertMatch( #{ <<"push_gateway_server">> := _, @@ -84,7 +84,7 @@ t_prometheus_api(_) -> NewConf = Conf#{<<"interval">> => <<"2s">>, <<"vm_statistics_collector">> => <<"disabled">>}, {ok, Response2} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf), - Conf2 = emqx_json:decode(Response2, [return_maps]), + Conf2 = emqx_utils_json:decode(Response2, [return_maps]), ?assertMatch(NewConf, Conf2), ?assertEqual({ok, []}, application:get_env(prometheus, vm_statistics_collector_metrics)), ?assertEqual({ok, all}, application:get_env(prometheus, vm_memory_collector_metrics)), @@ -92,6 +92,12 @@ t_prometheus_api(_) -> NewConf1 = Conf#{<<"enable">> => (not Enable)}, {ok, _Response3} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf1), ?assertEqual((not Enable), undefined =/= erlang:whereis(emqx_prometheus)), + + ConfWithoutScheme = Conf#{<<"push_gateway_server">> => "127.0.0.1:8081"}, + ?assertMatch( + {error, {"HTTP/1.1", 400, _}}, + emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, ConfWithoutScheme) + ), ok. t_stats_api(_) -> @@ -100,7 +106,7 @@ t_stats_api(_) -> Headers = [{"accept", "application/json"}, Auth], {ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Headers), - Data = emqx_json:decode(Response, [return_maps]), + Data = emqx_utils_json:decode(Response, [return_maps]), ?assertMatch(#{<<"client">> := _, <<"delivery">> := _}, Data), {ok, _} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth), diff --git a/apps/emqx_psk/i18n/emqx_psk_i18n.conf b/apps/emqx_psk/i18n/emqx_psk_i18n.conf deleted file mode 100644 index 6bba9c6d5..000000000 --- a/apps/emqx_psk/i18n/emqx_psk_i18n.conf +++ /dev/null @@ -1,60 +0,0 @@ -emqx_psk_schema { - - psk_authentication { - desc { - en: """PSK stands for 'Pre-Shared Keys'. -This config to enable TLS-PSK authentication. - -Important! Make sure the SSL listener with only tlsv1.2 enabled, and also PSK cipher suites -configured, such as RSA-PSK-AES256-GCM-SHA384. - -See listener SSL options config for more details. - -The IDs and secrets can be provided from a file which is configurable by the init_file field. -""" - zh: """此配置用于启用 TLS-PSK 身份验证。 - -PSK 是 “Pre-Shared-Keys” 的缩写。 - -注意: 确保 SSL 监听器仅启用了 'tlsv1.2',并且配置了PSK 密码套件,例如 'RSA-PSK-AES256-GCM-SHA384'。 - -可以通过查看监听器中的 SSL 选项,了解更多详细信息。 - -可以通过配置 'init_file' 来设置初始化的 ID 和 密钥""" - } - } - - enable { - desc { - en: "Whether to enable TLS PSK support" - zh: "是否开启 TLS PSK 支持" - } - } - - init_file { - desc { - en: """If init_file is specified, EMQX will import PSKs from the file into the built-in database at startup for use by the runtime. -The file has to be structured line-by-line, each line must be in the format of PSKIdentity:SharedSecret. -For example: mydevice1:c2VjcmV0""" - zh: """如果设置了初始化文件,EMQX 将在启动时从初始化文件中导入 PSK 信息到内建数据库中。 -这个文件需要按行进行组织,每一行必须遵守如下格式: PSKIdentity:SharedSecret -例如: mydevice1:c2VjcmV0""" - } - } - - separator { - desc { - en: "The separator between PSKIdentity and SharedSecret in the PSK file" - - zh: "PSK 文件中 PSKIdentitySharedSecret 之间的分隔符" - } - } - - chunk_size { - desc { - en: "The size of each chunk used to import to the built-in database from PSK file" - zh: "将 PSK 文件导入到内建数据时每个块的大小" - } - } - -} diff --git a/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf b/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf deleted file mode 100644 index 332dfdd8c..000000000 --- a/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf +++ /dev/null @@ -1,161 +0,0 @@ -emqx_resource_schema { - - resource_opts { - desc { - en: """Resource options.""" - zh: """资源相关的选项。""" - } - label { - en: """Resource Options""" - zh: """资源选项""" - } - } - - creation_opts { - desc { - en: """Creation options.""" - zh: """资源启动相关的选项。""" - } - label { - en: """Creation Options""" - zh: """资源启动选项""" - } - } - - worker_pool_size { - desc { - en: """The number of buffer workers. Only applicable for egress type bridges. -For bridges only have ingress direction data flow, it can be set to 0 otherwise must be greater than 0.""" - zh: """缓存队列 worker 数量。仅对 egress 类型的桥接有意义。当桥接仅有 ingress 方向时,可设置为 0,否则必须大于 0)。""" - } - label { - en: """Buffer Pool Size""" - zh: """缓存池大小""" - } - } - - health_check_interval { - desc { - en: """Health check interval, in milliseconds.""" - zh: """健康检查间隔,单位毫秒。""" - } - label { - en: """Health Check Interval""" - zh: """健康检查间隔""" - } - } - - start_after_created { - desc { - en: """Whether start the resource right after created.""" - zh: """是否在创建资源后立即启动资源。""" - } - label { - en: """Start After Created""" - zh: """创建后立即启动""" - } - } - - start_timeout { - desc { - en: """If 'start_after_created' enabled, how long time do we wait for the resource get started, in milliseconds.""" - zh: """如果选择了创建后立即启动资源,此选项用来设置等待资源启动的超时时间,单位毫秒。""" - } - label { - en: """Start Timeout""" - zh: """启动超时时间""" - } - } - - auto_restart_interval { - desc { - en: """The auto restart interval after the resource is disconnected, in milliseconds.""" - zh: """资源断开以后,自动重连的时间间隔,单位毫秒。""" - } - label { - en: """Auto Restart Interval""" - zh: """自动重连间隔""" - } - } - - query_mode { - desc { - en: """Query mode. Optional 'sync/async', default 'sync'.""" - zh: """请求模式。可选 '同步/异步',默认为'同步'模式。""" - } - label { - en: """Query mode""" - zh: """请求模式""" - } - } - - enable_batch { - desc { - en: """Batch mode enabled.""" - zh: """启用批量模式。""" - } - label { - en: """Enable batch""" - zh: """启用批量模式""" - } - } - - enable_queue { - desc { - en: """Enable disk buffer queue (only applicable for egress bridges). -When Enabled, messages will be buffered on disk when the bridge connection is down. -When disabled the messages are buffered in RAM only.""" - zh: """启用磁盘缓存队列(仅对 egress 方向桥接有用)。""" - } - label { - en: """Enable disk buffer queue""" - zh: """启用磁盘缓存队列""" - } - } - - async_inflight_window { - desc { - en: """Async query inflight window.""" - zh: """异步请求飞行队列窗口大小。""" - } - label { - en: """Async inflight window""" - zh: """异步请求飞行队列窗口""" - } - } - - batch_size { - desc { - en: """Maximum batch count.""" - zh: """批量请求大小。""" - } - label { - en: """Batch size""" - zh: """批量请求大小""" - } - } - - batch_time { - desc { - en: """Maximum batch waiting interval.""" - zh: """最大批量请求等待时间。""" - } - label { - en: """Batch time""" - zh: """批量等待间隔""" - } - } - - max_queue_bytes { - desc { - en: """Maximum number of bytes to buffer for each buffer worker.""" - zh: """每个缓存 worker 允许使用的最大字节数。""" - } - label { - en: """Max buffer queue size""" - zh: """缓存队列最大长度""" - } - } - - -} diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index 6929cece9..e6f86fb59 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -15,7 +15,6 @@ %%-------------------------------------------------------------------- -type resource_type() :: module(). -type resource_id() :: binary(). --type manager_id() :: binary(). -type raw_resource_config() :: binary() | raw_term_resource_config(). -type raw_term_resource_config() :: #{binary() => term()} | [raw_term_resource_config()]. -type resource_config() :: term(). @@ -29,7 +28,11 @@ -type query_opts() :: #{ %% The key used for picking a resource worker pick_key => term(), - async_reply_fun => reply_fun() + timeout => timeout(), + expire_at => infinity | integer(), + async_reply_fun => reply_fun(), + simple_query => boolean(), + is_buffer_supported => boolean() }. -type resource_data() :: #{ id := resource_id(), @@ -37,9 +40,9 @@ callback_mode := callback_mode(), query_mode := query_mode(), config := resource_config(), + error := term(), state := resource_state(), - status := resource_status(), - metrics := emqx_metrics_worker:metrics() + status := resource_status() }. -type resource_group() :: binary(). -type creation_opts() :: #{ @@ -64,14 +67,12 @@ %% If the resource disconnected, we can set to retry starting the resource %% periodically. auto_restart_interval => pos_integer(), - enable_batch => boolean(), batch_size => pos_integer(), batch_time => pos_integer(), - enable_queue => boolean(), - max_queue_bytes => pos_integer(), + max_buffer_bytes => pos_integer(), query_mode => query_mode(), resume_interval => pos_integer(), - async_inflight_window => pos_integer() + inflight_window => pos_integer() }. -type query_result() :: ok @@ -83,18 +84,17 @@ -define(WORKER_POOL_SIZE, 16). --define(DEFAULT_QUEUE_SEG_SIZE, 10 * 1024 * 1024). --define(DEFAULT_QUEUE_SEG_SIZE_RAW, <<"10MB">>). +-define(DEFAULT_BUFFER_BYTES, 256 * 1024 * 1024). +-define(DEFAULT_BUFFER_BYTES_RAW, <<"256MB">>). --define(DEFAULT_QUEUE_SIZE, 100 * 1024 * 1024). --define(DEFAULT_QUEUE_SIZE_RAW, <<"100MB">>). +-define(DEFAULT_REQUEST_TIMEOUT, timer:seconds(15)). %% count --define(DEFAULT_BATCH_SIZE, 100). +-define(DEFAULT_BATCH_SIZE, 1). %% milliseconds --define(DEFAULT_BATCH_TIME, 20). --define(DEFAULT_BATCH_TIME_RAW, <<"20ms">>). +-define(DEFAULT_BATCH_TIME, 0). +-define(DEFAULT_BATCH_TIME_RAW, <<"0ms">>). %% count -define(DEFAULT_INFLIGHT, 100). @@ -103,9 +103,17 @@ -define(HEALTHCHECK_INTERVAL, 15000). -define(HEALTHCHECK_INTERVAL_RAW, <<"15s">>). +%% milliseconds +-define(START_TIMEOUT, 5000). +-define(START_TIMEOUT_RAW, <<"5s">>). + +%% boolean +-define(START_AFTER_CREATED, true). +-define(START_AFTER_CREATED_RAW, <<"true">>). + %% milliseconds -define(AUTO_RESTART_INTERVAL, 60000). -define(AUTO_RESTART_INTERVAL_RAW, <<"60s">>). --define(TEST_ID_PREFIX, "_test_:"). +-define(TEST_ID_PREFIX, "_probe_:"). -define(RES_METRICS, resource_metrics). diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 00389261b..3e264cb3e 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.4"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index 30934e0e7..80f270b13 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -79,8 +79,7 @@ query/2, query/3, %% query the instance without batching and queuing messages. - simple_sync_query/2, - simple_async_query/3 + simple_sync_query/2 ]). %% Direct calls to the callback module @@ -104,6 +103,7 @@ list_instances_verbose/0, %% return the data of the instance get_instance/1, + get_metrics/1, fetch_creation_opts/1, %% return all the instances of the same resource type list_instances_by_type/1, @@ -111,7 +111,12 @@ list_group_instances/1 ]). --export([inc_received/1, apply_reply_fun/2]). +-export([apply_reply_fun/2]). + +-export_type([ + resource_id/0, + resource_data/0 +]). -optional_callbacks([ on_query/3, @@ -129,6 +134,9 @@ %% when calling emqx_resource:stop/1 -callback on_stop(resource_id(), resource_state()) -> term(). +%% when calling emqx_resource:get_callback_mode/1 +-callback callback_mode() -> callback_mode(). + %% when calling emqx_resource:query/3 -callback on_query(resource_id(), Request :: term(), resource_state()) -> query_result(). @@ -256,19 +264,21 @@ reset_metrics(ResId) -> query(ResId, Request) -> query(ResId, Request, #{}). --spec query(resource_id(), Request :: term(), emqx_resource_worker:query_opts()) -> +-spec query(resource_id(), Request :: term(), query_opts()) -> Result :: term(). query(ResId, Request, Opts) -> - case emqx_resource_manager:ets_lookup(ResId) of + case emqx_resource_manager:lookup_cached(ResId) of {ok, _Group, #{query_mode := QM, mod := Module}} -> IsBufferSupported = is_buffer_supported(Module), case {IsBufferSupported, QM} of {true, _} -> - emqx_resource_worker:simple_sync_query(ResId, Request); + %% only Kafka producer so far + Opts1 = Opts#{is_buffer_supported => true}, + emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts1); {false, sync} -> - emqx_resource_worker:sync_query(ResId, Request, Opts); + emqx_resource_buffer_worker:sync_query(ResId, Request, Opts); {false, async} -> - emqx_resource_worker:async_query(ResId, Request, Opts) + emqx_resource_buffer_worker:async_query(ResId, Request, Opts) end; {error, not_found} -> ?RESOURCE_ERROR(not_found, "resource not found") @@ -276,11 +286,7 @@ query(ResId, Request, Opts) -> -spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term(). simple_sync_query(ResId, Request) -> - emqx_resource_worker:simple_sync_query(ResId, Request). - --spec simple_async_query(resource_id(), Request :: term(), reply_fun()) -> Result :: term(). -simple_async_query(ResId, Request, ReplyFun) -> - emqx_resource_worker:simple_async_query(ResId, Request, ReplyFun). + emqx_resource_buffer_worker:simple_sync_query(ResId, Request). -spec start(resource_id()) -> ok | {error, Reason :: term()}. start(ResId) -> @@ -312,7 +318,12 @@ set_resource_status_connecting(ResId) -> -spec get_instance(resource_id()) -> {ok, resource_group(), resource_data()} | {error, Reason :: term()}. get_instance(ResId) -> - emqx_resource_manager:lookup(ResId). + emqx_resource_manager:lookup_cached(ResId). + +-spec get_metrics(resource_id()) -> + emqx_metrics_worker:metrics(). +get_metrics(ResId) -> + emqx_resource_manager:get_metrics(ResId). -spec fetch_creation_opts(map()) -> creation_opts(). fetch_creation_opts(Opts) -> @@ -322,9 +333,12 @@ fetch_creation_opts(Opts) -> list_instances() -> [Id || #{id := Id} <- list_instances_verbose()]. --spec list_instances_verbose() -> [resource_data()]. +-spec list_instances_verbose() -> [_ResourceDataWithMetrics :: map()]. list_instances_verbose() -> - emqx_resource_manager:list_all(). + [ + Res#{metrics => get_metrics(ResId)} + || #{id := ResId} = Res <- emqx_resource_manager:list_all() + ]. -spec list_instances_by_type(module()) -> [resource_id()]. list_instances_by_type(ResourceType) -> @@ -354,22 +368,29 @@ is_buffer_supported(Module) -> false end. --spec call_start(manager_id(), module(), resource_config()) -> +-spec call_start(resource_id(), module(), resource_config()) -> {ok, resource_state()} | {error, Reason :: term()}. -call_start(MgrId, Mod, Config) -> - ?SAFE_CALL(Mod:on_start(MgrId, Config)). +call_start(ResId, Mod, Config) -> + try + Mod:on_start(ResId, Config) + catch + throw:Error -> + {error, Error}; + Kind:Error:Stacktrace -> + {error, #{exception => Kind, reason => Error, stacktrace => Stacktrace}} + end. --spec call_health_check(manager_id(), module(), resource_state()) -> +-spec call_health_check(resource_id(), module(), resource_state()) -> resource_status() | {resource_status(), resource_state()} | {resource_status(), resource_state(), term()} | {error, term()}. -call_health_check(MgrId, Mod, ResourceState) -> - ?SAFE_CALL(Mod:on_get_status(MgrId, ResourceState)). +call_health_check(ResId, Mod, ResourceState) -> + ?SAFE_CALL(Mod:on_get_status(ResId, ResourceState)). --spec call_stop(manager_id(), module(), resource_state()) -> term(). -call_stop(MgrId, Mod, ResourceState) -> - ?SAFE_CALL(Mod:on_stop(MgrId, ResourceState)). +-spec call_stop(resource_id(), module(), resource_state()) -> term(). +call_stop(ResId, Mod, ResourceState) -> + ?SAFE_CALL(Mod:on_stop(ResId, ResourceState)). -spec check_config(resource_type(), raw_resource_config()) -> {ok, resource_config()} | {error, term()}. @@ -467,8 +488,5 @@ apply_reply_fun(From, Result) -> %% ================================================================================= -inc_received(ResId) -> - emqx_metrics_worker:inc(?RES_METRICS, ResId, 'received'). - filter_instances(Filter) -> [Id || #{id := Id, mod := Mod} <- list_instances_verbose(), Filter(Id, Mod)]. diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl new file mode 100644 index 000000000..2dd14c46b --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl @@ -0,0 +1,1751 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This module implements async message sending, disk message queuing, +%% and message batching using ReplayQ. + +-module(emqx_resource_buffer_worker). + +-include("emqx_resource.hrl"). +-include("emqx_resource_errors.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-behaviour(gen_statem). + +-export([ + start_link/3, + sync_query/3, + async_query/3, + block/1, + resume/1, + flush_worker/1 +]). + +-export([ + simple_sync_query/2, + simple_async_query/3 +]). + +-export([ + callback_mode/0, + init/1, + terminate/2, + code_change/3 +]). + +-export([running/3, blocked/3]). + +-export([queue_item_marshaller/1, estimate_size/1]). + +-export([handle_async_reply/2, handle_async_batch_reply/2, reply_call/2]). + +-export([clear_disk_queue_dir/2]). + +-elvis([{elvis_style, dont_repeat_yourself, disable}]). + +-define(COLLECT_REQ_LIMIT, 1000). +-define(SEND_REQ(FROM, REQUEST), {'$send_req', FROM, REQUEST}). +-define(QUERY(FROM, REQUEST, SENT, EXPIRE_AT), {query, FROM, REQUEST, SENT, EXPIRE_AT}). +-define(SIMPLE_QUERY(REQUEST), ?QUERY(undefined, REQUEST, false, infinity)). +-define(REPLY(FROM, SENT, RESULT), {reply, FROM, SENT, RESULT}). +-define(INFLIGHT_ITEM(Ref, BatchOrQuery, IsRetriable, WorkerMRef), + {Ref, BatchOrQuery, IsRetriable, WorkerMRef} +). +-define(ITEM_IDX, 2). +-define(RETRY_IDX, 3). +-define(WORKER_MREF_IDX, 4). + +-define(ENSURE_ASYNC_FLUSH(InflightTID, EXPR), + (fun() -> + IsFullBefore = is_inflight_full(InflightTID), + case (EXPR) of + blocked -> + ok; + ok -> + ok = maybe_flush_after_async_reply(IsFullBefore) + end + end)() +). + +-type id() :: binary(). +-type index() :: pos_integer(). +-type expire_at() :: infinity | integer(). +-type queue_query() :: ?QUERY(reply_fun(), request(), HasBeenSent :: boolean(), expire_at()). +-type request() :: term(). +-type request_from() :: undefined | gen_statem:from(). +-type request_timeout() :: infinity | timer:time(). +-type health_check_interval() :: timer:time(). +-type state() :: blocked | running. +-type inflight_key() :: integer(). +-type data() :: #{ + id := id(), + index := index(), + inflight_tid := ets:tid(), + async_workers := #{pid() => reference()}, + batch_size := pos_integer(), + batch_time := timer:time(), + queue := replayq:q(), + resume_interval := timer:time(), + tref := undefined | timer:tref() +}. + +callback_mode() -> [state_functions, state_enter]. + +start_link(Id, Index, Opts) -> + gen_statem:start_link(?MODULE, {Id, Index, Opts}, []). + +-spec sync_query(id(), request(), query_opts()) -> Result :: term(). +sync_query(Id, Request, Opts0) -> + ?tp(sync_query, #{id => Id, request => Request, query_opts => Opts0}), + Opts1 = ensure_timeout_query_opts(Opts0, sync), + Opts = ensure_expire_at(Opts1), + PickKey = maps:get(pick_key, Opts, self()), + Timeout = maps:get(timeout, Opts), + emqx_resource_metrics:matched_inc(Id), + pick_call(Id, PickKey, {query, Request, Opts}, Timeout). + +-spec async_query(id(), request(), query_opts()) -> Result :: term(). +async_query(Id, Request, Opts0) -> + ?tp(async_query, #{id => Id, request => Request, query_opts => Opts0}), + Opts1 = ensure_timeout_query_opts(Opts0, async), + Opts = ensure_expire_at(Opts1), + PickKey = maps:get(pick_key, Opts, self()), + emqx_resource_metrics:matched_inc(Id), + pick_cast(Id, PickKey, {query, Request, Opts}). + +%% simple query the resource without batching and queuing. +-spec simple_sync_query(id(), request()) -> term(). +simple_sync_query(Id, Request) -> + %% Note: since calling this function implies in bypassing the + %% buffer workers, and each buffer worker index is used when + %% collecting gauge metrics, we use this dummy index. If this + %% call ends up calling buffering functions, that's a bug and + %% would mess up the metrics anyway. `undefined' is ignored by + %% `emqx_resource_metrics:*_shift/3'. + ?tp(simple_sync_query, #{id => Id, request => Request}), + Index = undefined, + QueryOpts = simple_query_opts(), + emqx_resource_metrics:matched_inc(Id), + Ref = make_request_ref(), + Result = call_query(force_sync, Id, Index, Ref, ?SIMPLE_QUERY(Request), QueryOpts), + _ = handle_query_result(Id, Result, _HasBeenSent = false), + Result. + +%% simple async-query the resource without batching and queuing. +-spec simple_async_query(id(), request(), query_opts()) -> term(). +simple_async_query(Id, Request, QueryOpts0) -> + ?tp(simple_async_query, #{id => Id, request => Request, query_opts => QueryOpts0}), + Index = undefined, + QueryOpts = maps:merge(simple_query_opts(), QueryOpts0), + emqx_resource_metrics:matched_inc(Id), + Ref = make_request_ref(), + Result = call_query(async_if_possible, Id, Index, Ref, ?SIMPLE_QUERY(Request), QueryOpts), + _ = handle_query_result(Id, Result, _HasBeenSent = false), + Result. + +simple_query_opts() -> + ensure_expire_at(#{simple_query => true, timeout => infinity}). + +-spec block(pid()) -> ok. +block(ServerRef) -> + gen_statem:cast(ServerRef, block). + +-spec resume(pid()) -> ok. +resume(ServerRef) -> + gen_statem:cast(ServerRef, resume). + +-spec flush_worker(pid()) -> ok. +flush_worker(ServerRef) -> + gen_statem:cast(ServerRef, flush). + +-spec init({id(), pos_integer(), map()}) -> gen_statem:init_result(state(), data()). +init({Id, Index, Opts}) -> + process_flag(trap_exit, true), + true = gproc_pool:connect_worker(Id, {Id, Index}), + BatchSize = maps:get(batch_size, Opts, ?DEFAULT_BATCH_SIZE), + QueueOpts = replayq_opts(Id, Index, Opts), + Queue = replayq:open(QueueOpts), + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Queue)), + emqx_resource_metrics:inflight_set(Id, Index, 0), + InflightWinSize = maps:get(inflight_window, Opts, ?DEFAULT_INFLIGHT), + InflightTID = inflight_new(InflightWinSize, Id, Index), + HealthCheckInterval = maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL), + RequestTimeout = maps:get(request_timeout, Opts, ?DEFAULT_REQUEST_TIMEOUT), + BatchTime0 = maps:get(batch_time, Opts, ?DEFAULT_BATCH_TIME), + BatchTime = adjust_batch_time(Id, RequestTimeout, BatchTime0), + DefaultResumeInterval = default_resume_interval(RequestTimeout, HealthCheckInterval), + ResumeInterval = maps:get(resume_interval, Opts, DefaultResumeInterval), + Data = #{ + id => Id, + index => Index, + inflight_tid => InflightTID, + async_workers => #{}, + batch_size => BatchSize, + batch_time => BatchTime, + queue => Queue, + resume_interval => ResumeInterval, + tref => undefined + }, + ?tp(buffer_worker_init, #{id => Id, index => Index, queue_opts => QueueOpts}), + {ok, running, Data}. + +running(enter, _, #{tref := _Tref} = Data) -> + ?tp(buffer_worker_enter_running, #{id => maps:get(id, Data), tref => _Tref}), + %% According to `gen_statem' laws, we mustn't call `maybe_flush' + %% directly because it may decide to return `{next_state, blocked, _}', + %% and that's an invalid response for a state enter call. + %% Returning a next event from a state enter call is also + %% prohibited. + {keep_state, ensure_flush_timer(Data, 0)}; +running(cast, resume, _St) -> + keep_state_and_data; +running(cast, flush, Data) -> + flush(Data); +running(cast, block, St) -> + {next_state, blocked, St}; +running(info, ?SEND_REQ(_ReplyTo, _Req) = Request0, Data) -> + handle_query_requests(Request0, Data); +running(info, {flush, Ref}, St = #{tref := {_TRef, Ref}}) -> + flush(St#{tref := undefined}); +running(info, {flush, _Ref}, _St) -> + ?tp(discarded_stale_flush, #{}), + keep_state_and_data; +running(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when + is_map_key(Pid, AsyncWorkers0) +-> + ?SLOG(info, #{msg => async_worker_died, state => running, reason => Reason}), + handle_async_worker_down(Data0, Pid); +running(info, Info, _St) -> + ?SLOG(error, #{msg => unexpected_msg, state => running, info => Info}), + keep_state_and_data. + +blocked(enter, _, #{resume_interval := ResumeT} = St0) -> + ?tp(buffer_worker_enter_blocked, #{}), + %% discard the old timer, new timer will be started when entering running state again + St = cancel_flush_timer(St0), + {keep_state, St, {state_timeout, ResumeT, unblock}}; +blocked(cast, block, _St) -> + keep_state_and_data; +blocked(cast, resume, St) -> + resume_from_blocked(St); +blocked(cast, flush, St) -> + resume_from_blocked(St); +blocked(state_timeout, unblock, St) -> + resume_from_blocked(St); +blocked(info, ?SEND_REQ(_ReplyTo, _Req) = Request0, Data0) -> + Data = collect_and_enqueue_query_requests(Request0, Data0), + {keep_state, Data}; +blocked(info, {flush, _Ref}, _Data) -> + %% ignore stale timer + keep_state_and_data; +blocked(info, {'DOWN', _MRef, process, Pid, Reason}, Data0 = #{async_workers := AsyncWorkers0}) when + is_map_key(Pid, AsyncWorkers0) +-> + ?SLOG(info, #{msg => async_worker_died, state => blocked, reason => Reason}), + handle_async_worker_down(Data0, Pid); +blocked(info, Info, _Data) -> + ?SLOG(error, #{msg => unexpected_msg, state => blocked, info => Info}), + keep_state_and_data. + +terminate(_Reason, #{id := Id, index := Index, queue := Q}) -> + _ = replayq:close(Q), + emqx_resource_metrics:inflight_set(Id, Index, 0), + %% since we want volatile queues, this will be 0 after + %% termination. + emqx_resource_metrics:queuing_set(Id, Index, 0), + gproc_pool:disconnect_worker(Id, {Id, Index}), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%============================================================================== +-define(PICK(ID, KEY, PID, EXPR), + try + case gproc_pool:pick_worker(ID, KEY) of + PID when is_pid(PID) -> + EXPR; + _ -> + ?RESOURCE_ERROR(worker_not_created, "resource not created") + end + catch + error:badarg -> + ?RESOURCE_ERROR(worker_not_created, "resource not created"); + error:timeout -> + ?RESOURCE_ERROR(timeout, "call resource timeout") + end +). + +pick_call(Id, Key, Query, Timeout) -> + ?PICK(Id, Key, Pid, begin + MRef = erlang:monitor(process, Pid, [{alias, reply_demonitor}]), + ReplyTo = {fun ?MODULE:reply_call/2, [MRef]}, + erlang:send(Pid, ?SEND_REQ(ReplyTo, Query)), + receive + {MRef, Response} -> + erlang:demonitor(MRef, [flush]), + Response; + {'DOWN', MRef, process, Pid, Reason} -> + error({worker_down, Reason}) + after Timeout -> + erlang:demonitor(MRef, [flush]), + receive + {MRef, Response} -> + Response + after 0 -> + error(timeout) + end + end + end). + +pick_cast(Id, Key, Query) -> + ?PICK(Id, Key, Pid, begin + ReplyTo = undefined, + erlang:send(Pid, ?SEND_REQ(ReplyTo, Query)), + ok + end). + +resume_from_blocked(Data) -> + ?tp(buffer_worker_resume_from_blocked_enter, #{}), + #{ + id := Id, + index := Index, + inflight_tid := InflightTID + } = Data, + Now = now_(), + case inflight_get_first_retriable(InflightTID, Now) of + none -> + case is_inflight_full(InflightTID) of + true -> + {keep_state, Data}; + false -> + {next_state, running, Data} + end; + {expired, Ref, Batch} -> + IsAcked = ack_inflight(InflightTID, Ref, Id, Index), + IsAcked andalso emqx_resource_metrics:dropped_expired_inc(Id, length(Batch)), + ?tp(buffer_worker_retry_expired, #{expired => Batch}), + resume_from_blocked(Data); + {single, Ref, Query} -> + %% We retry msgs in inflight window sync, as if we send them + %% async, they will be appended to the end of inflight window again. + retry_inflight_sync(Ref, Query, Data); + {batch, Ref, NotExpired, []} -> + retry_inflight_sync(Ref, NotExpired, Data); + {batch, Ref, NotExpired, Expired} -> + NumExpired = length(Expired), + ok = update_inflight_item(InflightTID, Ref, NotExpired, NumExpired), + emqx_resource_metrics:dropped_expired_inc(Id, NumExpired), + ?tp(buffer_worker_retry_expired, #{expired => Expired}), + %% We retry msgs in inflight window sync, as if we send them + %% async, they will be appended to the end of inflight window again. + retry_inflight_sync(Ref, NotExpired, Data) + end. + +retry_inflight_sync(Ref, QueryOrBatch, Data0) -> + #{ + id := Id, + inflight_tid := InflightTID, + index := Index, + resume_interval := ResumeT + } = Data0, + ?tp(buffer_worker_retry_inflight, #{query_or_batch => QueryOrBatch, ref => Ref}), + QueryOpts = #{simple_query => false}, + Result = call_query(force_sync, Id, Index, Ref, QueryOrBatch, QueryOpts), + ReplyResult = + case QueryOrBatch of + ?QUERY(ReplyTo, _, HasBeenSent, _ExpireAt) -> + Reply = ?REPLY(ReplyTo, HasBeenSent, Result), + reply_caller_defer_metrics(Id, Reply, QueryOpts); + [?QUERY(_, _, _, _) | _] = Batch -> + batch_reply_caller_defer_metrics(Id, Result, Batch, QueryOpts) + end, + case ReplyResult of + %% Send failed because resource is down + {nack, PostFn} -> + PostFn(), + ?tp( + buffer_worker_retry_inflight_failed, + #{ + ref => Ref, + query_or_batch => QueryOrBatch + } + ), + {keep_state, Data0, {state_timeout, ResumeT, unblock}}; + %% Send ok or failed but the resource is working + {ack, PostFn} -> + IsAcked = ack_inflight(InflightTID, Ref, Id, Index), + %% we need to defer bumping the counters after + %% `inflight_drop' to avoid the race condition when an + %% inflight request might get completed concurrently with + %% the retry, bumping them twice. Since both inflight + %% requests (repeated and original) have the safe `Ref', + %% we bump the counter when removing it from the table. + IsAcked andalso PostFn(), + ?tp( + buffer_worker_retry_inflight_succeeded, + #{ + ref => Ref, + query_or_batch => QueryOrBatch + } + ), + resume_from_blocked(Data0) + end. + +%% Called during the `running' state only. +-spec handle_query_requests(?SEND_REQ(request_from(), request()), data()) -> + gen_statem:event_handler_result(state(), data()). +handle_query_requests(Request0, Data0) -> + Data = collect_and_enqueue_query_requests(Request0, Data0), + maybe_flush(Data). + +collect_and_enqueue_query_requests(Request0, Data0) -> + #{ + id := Id, + index := Index, + queue := Q + } = Data0, + Requests = collect_requests([Request0], ?COLLECT_REQ_LIMIT), + Queries = + lists:map( + fun + (?SEND_REQ(undefined = _ReplyTo, {query, Req, Opts})) -> + ReplyFun = maps:get(async_reply_fun, Opts, undefined), + HasBeenSent = false, + ExpireAt = maps:get(expire_at, Opts), + ?QUERY(ReplyFun, Req, HasBeenSent, ExpireAt); + (?SEND_REQ(ReplyTo, {query, Req, Opts})) -> + HasBeenSent = false, + ExpireAt = maps:get(expire_at, Opts), + ?QUERY(ReplyTo, Req, HasBeenSent, ExpireAt) + end, + Requests + ), + {Overflown, NewQ} = append_queue(Id, Index, Q, Queries), + ok = reply_overflown(Overflown), + Data0#{queue := NewQ}. + +reply_overflown([]) -> + ok; +reply_overflown([?QUERY(ReplyTo, _Req, _HasBeenSent, _ExpireAt) | More]) -> + do_reply_caller(ReplyTo, {error, buffer_overflow}), + reply_overflown(More). + +do_reply_caller(undefined, _Result) -> + ok; +do_reply_caller({F, Args}, {async_return, Result}) -> + %% this is an early return to async caller, the retry + %% decision has to be made by the caller + do_reply_caller({F, Args}, Result); +do_reply_caller({F, Args}, Result) when is_function(F) -> + _ = erlang:apply(F, Args ++ [Result]), + ok. + +maybe_flush(Data0) -> + #{ + batch_size := BatchSize, + queue := Q + } = Data0, + QueueCount = queue_count(Q), + case QueueCount >= BatchSize of + true -> + flush(Data0); + false -> + {keep_state, ensure_flush_timer(Data0)} + end. + +%% Called during the `running' state only. +-spec flush(data()) -> gen_statem:event_handler_result(state(), data()). +flush(Data0) -> + #{ + id := Id, + index := Index, + batch_size := BatchSize, + inflight_tid := InflightTID, + queue := Q0 + } = Data0, + Data1 = cancel_flush_timer(Data0), + CurrentCount = queue_count(Q0), + IsFull = is_inflight_full(InflightTID), + ?tp_ignore_side_effects_in_prod(buffer_worker_flush, #{ + queued => CurrentCount, + is_inflight_full => IsFull, + inflight => inflight_count(InflightTID) + }), + case {CurrentCount, IsFull} of + {0, _} -> + ?tp_ignore_side_effects_in_prod(buffer_worker_queue_drained, #{ + inflight => inflight_count(InflightTID) + }), + {keep_state, Data1}; + {_, true} -> + ?tp(buffer_worker_flush_but_inflight_full, #{}), + Data2 = ensure_flush_timer(Data1), + {keep_state, Data2}; + {_, false} -> + ?tp(buffer_worker_flush_before_pop, #{}), + {Q1, QAckRef, Batch} = replayq:pop(Q0, #{count_limit => BatchSize}), + Data2 = Data1#{queue := Q1}, + ?tp(buffer_worker_flush_before_sieve_expired, #{}), + Now = now_(), + %% if the request has expired, the caller is no longer + %% waiting for a response. + case sieve_expired_requests(Batch, Now) of + {[], _AllExpired} -> + ok = replayq:ack(Q1, QAckRef), + emqx_resource_metrics:dropped_expired_inc(Id, length(Batch)), + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q1)), + ?tp(buffer_worker_flush_all_expired, #{batch => Batch}), + flush(Data2); + {NotExpired, Expired} -> + NumExpired = length(Expired), + emqx_resource_metrics:dropped_expired_inc(Id, NumExpired), + IsBatch = (BatchSize > 1), + %% We *must* use the new queue, because we currently can't + %% `nack' a `pop'. + %% Maybe we could re-open the queue? + ?tp( + buffer_worker_flush_potentially_partial, + #{expired => Expired, not_expired => NotExpired} + ), + Ref = make_request_ref(), + do_flush(Data2, #{ + is_batch => IsBatch, + batch => NotExpired, + ref => Ref, + ack_ref => QAckRef + }) + end + end. + +-spec do_flush(data(), #{ + is_batch := boolean(), + batch := [queue_query()], + ack_ref := replayq:ack_ref(), + ref := inflight_key() +}) -> + gen_statem:event_handler_result(state(), data()). +do_flush( + #{queue := Q1} = Data0, + #{ + is_batch := false, + batch := Batch, + ref := Ref, + ack_ref := QAckRef + } +) -> + #{ + id := Id, + index := Index, + inflight_tid := InflightTID + } = Data0, + %% unwrap when not batching (i.e., batch size == 1) + [?QUERY(ReplyTo, _, HasBeenSent, _ExpireAt) = Request] = Batch, + QueryOpts = #{inflight_tid => InflightTID, simple_query => false}, + Result = call_query(async_if_possible, Id, Index, Ref, Request, QueryOpts), + Reply = ?REPLY(ReplyTo, HasBeenSent, Result), + case reply_caller(Id, Reply, QueryOpts) of + %% Failed; remove the request from the queue, as we cannot pop + %% from it again, but we'll retry it using the inflight table. + nack -> + ok = replayq:ack(Q1, QAckRef), + %% we set it atomically just below; a limitation of having + %% to use tuples for atomic ets updates + IsRetriable = true, + WorkerMRef0 = undefined, + InflightItem = ?INFLIGHT_ITEM(Ref, Request, IsRetriable, WorkerMRef0), + %% we must append again to the table to ensure that the + %% request will be retried (i.e., it might not have been + %% inserted during `call_query' if the resource was down + %% and/or if it was a sync request). + inflight_append(InflightTID, InflightItem, Id, Index), + mark_inflight_as_retriable(InflightTID, Ref), + {Data1, WorkerMRef} = ensure_async_worker_monitored(Data0, Result), + store_async_worker_reference(InflightTID, Ref, WorkerMRef), + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q1)), + ?tp( + buffer_worker_flush_nack, + #{ + ref => Ref, + is_retriable => IsRetriable, + batch_or_query => Request, + result => Result + } + ), + {next_state, blocked, Data1}; + %% Success; just ack. + ack -> + ok = replayq:ack(Q1, QAckRef), + %% Async requests are acked later when the async worker + %% calls the corresponding callback function. Also, we + %% must ensure the async worker is being monitored for + %% such requests. + IsUnrecoverableError = is_unrecoverable_error(Result), + case is_async_return(Result) of + true when IsUnrecoverableError -> + ack_inflight(InflightTID, Ref, Id, Index); + true -> + ok; + false -> + ack_inflight(InflightTID, Ref, Id, Index) + end, + {Data1, WorkerMRef} = ensure_async_worker_monitored(Data0, Result), + store_async_worker_reference(InflightTID, Ref, WorkerMRef), + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q1)), + ?tp( + buffer_worker_flush_ack, + #{ + batch_or_query => Request, + result => Result + } + ), + CurrentCount = queue_count(Q1), + case CurrentCount > 0 of + true -> + ?tp(buffer_worker_flush_ack_reflush, #{ + batch_or_query => Request, result => Result, queue_count => CurrentCount + }), + flush_worker(self()); + false -> + ?tp_ignore_side_effects_in_prod(buffer_worker_queue_drained, #{ + inflight => inflight_count(InflightTID) + }), + ok + end, + {keep_state, Data1} + end; +do_flush(#{queue := Q1} = Data0, #{ + is_batch := true, + batch := Batch, + ref := Ref, + ack_ref := QAckRef +}) -> + #{ + id := Id, + index := Index, + batch_size := BatchSize, + inflight_tid := InflightTID + } = Data0, + QueryOpts = #{inflight_tid => InflightTID, simple_query => false}, + Result = call_query(async_if_possible, Id, Index, Ref, Batch, QueryOpts), + case batch_reply_caller(Id, Result, Batch, QueryOpts) of + %% Failed; remove the request from the queue, as we cannot pop + %% from it again, but we'll retry it using the inflight table. + nack -> + ok = replayq:ack(Q1, QAckRef), + %% we set it atomically just below; a limitation of having + %% to use tuples for atomic ets updates + IsRetriable = true, + WorkerMRef0 = undefined, + InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, WorkerMRef0), + %% we must append again to the table to ensure that the + %% request will be retried (i.e., it might not have been + %% inserted during `call_query' if the resource was down + %% and/or if it was a sync request). + inflight_append(InflightTID, InflightItem, Id, Index), + mark_inflight_as_retriable(InflightTID, Ref), + {Data1, WorkerMRef} = ensure_async_worker_monitored(Data0, Result), + store_async_worker_reference(InflightTID, Ref, WorkerMRef), + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q1)), + ?tp( + buffer_worker_flush_nack, + #{ + ref => Ref, + is_retriable => IsRetriable, + batch_or_query => Batch, + result => Result + } + ), + {next_state, blocked, Data1}; + %% Success; just ack. + ack -> + ok = replayq:ack(Q1, QAckRef), + %% Async requests are acked later when the async worker + %% calls the corresponding callback function. Also, we + %% must ensure the async worker is being monitored for + %% such requests. + IsUnrecoverableError = is_unrecoverable_error(Result), + case is_async_return(Result) of + true when IsUnrecoverableError -> + ack_inflight(InflightTID, Ref, Id, Index); + true -> + ok; + false -> + ack_inflight(InflightTID, Ref, Id, Index) + end, + {Data1, WorkerMRef} = ensure_async_worker_monitored(Data0, Result), + store_async_worker_reference(InflightTID, Ref, WorkerMRef), + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q1)), + CurrentCount = queue_count(Q1), + ?tp( + buffer_worker_flush_ack, + #{ + batch_or_query => Batch, + result => Result, + queue_count => CurrentCount + } + ), + Data2 = + case {CurrentCount > 0, CurrentCount >= BatchSize} of + {false, _} -> + ?tp_ignore_side_effects_in_prod(buffer_worker_queue_drained, #{ + inflight => inflight_count(InflightTID) + }), + Data1; + {true, true} -> + ?tp(buffer_worker_flush_ack_reflush, #{ + batch_or_query => Batch, + result => Result, + queue_count => CurrentCount, + batch_size => BatchSize + }), + flush_worker(self()), + Data1; + {true, false} -> + ensure_flush_timer(Data1) + end, + {keep_state, Data2} + end. + +batch_reply_caller(Id, BatchResult, Batch, QueryOpts) -> + {ShouldBlock, PostFn} = batch_reply_caller_defer_metrics(Id, BatchResult, Batch, QueryOpts), + PostFn(), + ShouldBlock. + +batch_reply_caller_defer_metrics(Id, BatchResult, Batch, QueryOpts) -> + %% the `Mod:on_batch_query/3` returns a single result for a batch, + %% so we need to expand + Replies = lists:map( + fun(?QUERY(FROM, _REQUEST, SENT, _EXPIRE_AT)) -> + ?REPLY(FROM, SENT, BatchResult) + end, + Batch + ), + {ShouldAck, PostFns} = + lists:foldl( + fun(Reply, {_ShouldAck, PostFns}) -> + %% _ShouldAck should be the same as ShouldAck starting from the second reply + {ShouldAck, PostFn} = reply_caller_defer_metrics(Id, Reply, QueryOpts), + {ShouldAck, [PostFn | PostFns]} + end, + {ack, []}, + Replies + ), + PostFn = fun() -> lists:foreach(fun(F) -> F() end, lists:reverse(PostFns)) end, + {ShouldAck, PostFn}. + +reply_caller(Id, Reply, QueryOpts) -> + {ShouldAck, PostFn} = reply_caller_defer_metrics(Id, Reply, QueryOpts), + PostFn(), + ShouldAck. + +%% Should only reply to the caller when the decision is final (not +%% retriable). See comment on `handle_query_result_pure'. +reply_caller_defer_metrics(Id, ?REPLY(undefined, HasBeenSent, Result), _QueryOpts) -> + handle_query_result_pure(Id, Result, HasBeenSent); +reply_caller_defer_metrics(Id, ?REPLY(ReplyTo, HasBeenSent, Result), QueryOpts) -> + IsSimpleQuery = maps:get(simple_query, QueryOpts, false), + IsUnrecoverableError = is_unrecoverable_error(Result), + {ShouldAck, PostFn} = handle_query_result_pure(Id, Result, HasBeenSent), + case {ShouldAck, Result, IsUnrecoverableError, IsSimpleQuery} of + {ack, {async_return, _}, true, _} -> + ok = do_reply_caller(ReplyTo, Result); + {ack, {async_return, _}, false, _} -> + ok; + {_, _, _, true} -> + ok = do_reply_caller(ReplyTo, Result); + {nack, _, _, _} -> + ok; + {ack, _, _, _} -> + ok = do_reply_caller(ReplyTo, Result) + end, + {ShouldAck, PostFn}. + +handle_query_result(Id, Result, HasBeenSent) -> + {ShouldBlock, PostFn} = handle_query_result_pure(Id, Result, HasBeenSent), + PostFn(), + ShouldBlock. + +%% We should always retry (nack), except when: +%% * resource is not found +%% * resource is stopped +%% * the result is a success (or at least a delayed result) +%% We also retry even sync requests. In that case, we shouldn't reply +%% the caller until one of those final results above happen. +handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(exception, Msg), _HasBeenSent) -> + PostFn = fun() -> + ?SLOG(error, #{msg => resource_exception, info => Msg}), + ok + end, + {nack, PostFn}; +handle_query_result_pure(_Id, ?RESOURCE_ERROR_M(NotWorking, _), _HasBeenSent) when + NotWorking == not_connected; NotWorking == blocked +-> + {nack, fun() -> ok end}; +handle_query_result_pure(Id, ?RESOURCE_ERROR_M(not_found, Msg), _HasBeenSent) -> + PostFn = fun() -> + ?SLOG(error, #{id => Id, msg => resource_not_found, info => Msg}), + emqx_resource_metrics:dropped_resource_not_found_inc(Id), + ok + end, + {ack, PostFn}; +handle_query_result_pure(Id, ?RESOURCE_ERROR_M(stopped, Msg), _HasBeenSent) -> + PostFn = fun() -> + ?SLOG(error, #{id => Id, msg => resource_stopped, info => Msg}), + emqx_resource_metrics:dropped_resource_stopped_inc(Id), + ok + end, + {ack, PostFn}; +handle_query_result_pure(Id, ?RESOURCE_ERROR_M(Reason, _), _HasBeenSent) -> + PostFn = fun() -> + ?SLOG(error, #{id => Id, msg => other_resource_error, reason => Reason}), + ok + end, + {nack, PostFn}; +handle_query_result_pure(Id, {error, Reason} = Error, HasBeenSent) -> + case is_unrecoverable_error(Error) of + true -> + PostFn = + fun() -> + ?SLOG(error, #{id => Id, msg => unrecoverable_error, reason => Reason}), + inc_sent_failed(Id, HasBeenSent), + ok + end, + {ack, PostFn}; + false -> + PostFn = + fun() -> + ?SLOG(error, #{id => Id, msg => send_error, reason => Reason}), + ok + end, + {nack, PostFn} + end; +handle_query_result_pure(Id, {async_return, Result}, HasBeenSent) -> + handle_query_async_result_pure(Id, Result, HasBeenSent); +handle_query_result_pure(Id, Result, HasBeenSent) -> + PostFn = fun() -> + assert_ok_result(Result), + inc_sent_success(Id, HasBeenSent), + ok + end, + {ack, PostFn}. + +handle_query_async_result_pure(Id, {error, Reason} = Error, HasBeenSent) -> + case is_unrecoverable_error(Error) of + true -> + PostFn = + fun() -> + ?SLOG(error, #{id => Id, msg => unrecoverable_error, reason => Reason}), + inc_sent_failed(Id, HasBeenSent), + ok + end, + {ack, PostFn}; + false -> + PostFn = fun() -> + ?SLOG(error, #{id => Id, msg => async_send_error, reason => Reason}), + ok + end, + {nack, PostFn} + end; +handle_query_async_result_pure(_Id, {ok, Pid}, _HasBeenSent) when is_pid(Pid) -> + {ack, fun() -> ok end}; +handle_query_async_result_pure(_Id, ok, _HasBeenSent) -> + {ack, fun() -> ok end}. + +handle_async_worker_down(Data0, Pid) -> + #{async_workers := AsyncWorkers0} = Data0, + {WorkerMRef, AsyncWorkers} = maps:take(Pid, AsyncWorkers0), + Data = Data0#{async_workers := AsyncWorkers}, + mark_inflight_items_as_retriable(Data, WorkerMRef), + {keep_state, Data}. + +-spec call_query(force_sync | async_if_possible, _, _, _, _, _) -> _. +call_query(QM, Id, Index, Ref, Query, QueryOpts) -> + ?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}), + case emqx_resource_manager:lookup_cached(Id) of + {ok, _Group, #{status := stopped}} -> + ?RESOURCE_ERROR(stopped, "resource stopped or disabled"); + {ok, _Group, Resource} -> + do_call_query(QM, Id, Index, Ref, Query, QueryOpts, Resource); + {error, not_found} -> + ?RESOURCE_ERROR(not_found, "resource not found") + end. + +do_call_query(QM, Id, Index, Ref, Query, #{is_buffer_supported := true} = QueryOpts, Resource) -> + %% The connector supports buffer, send even in disconnected state + #{mod := Mod, state := ResSt, callback_mode := CBM} = Resource, + CallMode = call_mode(QM, CBM), + apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts); +do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{status := connected} = Resource) -> + %% when calling from the buffer worker or other simple queries, + %% only apply the query fun when it's at connected status + #{mod := Mod, state := ResSt, callback_mode := CBM} = Resource, + CallMode = call_mode(QM, CBM), + apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts); +do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Data) -> + ?RESOURCE_ERROR(not_connected, "resource not connected"). + +-define(APPLY_RESOURCE(NAME, EXPR, REQ), + try + %% if the callback module (connector) wants to return an error that + %% makes the current resource goes into the `blocked` state, it should + %% return `{error, {recoverable_error, Reason}}` + EXPR + catch + %% For convenience and to make the code in the callbacks cleaner an + %% error exception with the two following formats are translated to the + %% corresponding return values. The receiver of the return values + %% recognizes these special return formats and use them to decided if a + %% request should be retried. + error:{unrecoverable_error, Msg} -> + {error, {unrecoverable_error, Msg}}; + error:{recoverable_error, Msg} -> + {error, {recoverable_error, Msg}}; + ERR:REASON:STACKTRACE -> + ?RESOURCE_ERROR(exception, #{ + name => NAME, + id => Id, + request => REQ, + error => {ERR, REASON}, + stacktrace => STACKTRACE + }) + end +). + +apply_query_fun(sync, Mod, Id, _Index, _Ref, ?QUERY(_, Request, _, _) = _Query, ResSt, _QueryOpts) -> + ?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt, call_mode => sync}), + ?APPLY_RESOURCE(call_query, Mod:on_query(Id, Request, ResSt), Request); +apply_query_fun(async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, ResSt, QueryOpts) -> + ?tp(call_query_async, #{ + id => Id, mod => Mod, query => Query, res_st => ResSt, call_mode => async + }), + InflightTID = maps:get(inflight_tid, QueryOpts, undefined), + ?APPLY_RESOURCE( + call_query_async, + begin + ReplyFun = fun ?MODULE:handle_async_reply/2, + ReplyContext = #{ + buffer_worker => self(), + resource_id => Id, + worker_index => Index, + inflight_tid => InflightTID, + request_ref => Ref, + query_opts => QueryOpts, + min_query => minimize(Query) + }, + IsRetriable = false, + WorkerMRef = undefined, + InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, WorkerMRef), + ok = inflight_append(InflightTID, InflightItem, Id, Index), + Result = Mod:on_query_async(Id, Request, {ReplyFun, [ReplyContext]}, ResSt), + {async_return, Result} + end, + Request + ); +apply_query_fun(sync, Mod, Id, _Index, _Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, _QueryOpts) -> + ?tp(call_batch_query, #{ + id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => sync + }), + Requests = lists:map(fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch), + ?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch); +apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts) -> + ?tp(call_batch_query_async, #{ + id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => async + }), + InflightTID = maps:get(inflight_tid, QueryOpts, undefined), + ?APPLY_RESOURCE( + call_batch_query_async, + begin + ReplyFun = fun ?MODULE:handle_async_batch_reply/2, + ReplyContext = #{ + buffer_worker => self(), + resource_id => Id, + worker_index => Index, + inflight_tid => InflightTID, + request_ref => Ref, + query_opts => QueryOpts, + min_batch => minimize(Batch) + }, + Requests = lists:map( + fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch + ), + IsRetriable = false, + WorkerMRef = undefined, + InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, WorkerMRef), + ok = inflight_append(InflightTID, InflightItem, Id, Index), + Result = Mod:on_batch_query_async(Id, Requests, {ReplyFun, [ReplyContext]}, ResSt), + {async_return, Result} + end, + Batch + ). + +handle_async_reply( + #{ + request_ref := Ref, + inflight_tid := InflightTID, + query_opts := Opts + } = ReplyContext, + Result +) -> + case maybe_handle_unknown_async_reply(InflightTID, Ref, Opts) of + discard -> + ok; + continue -> + ?ENSURE_ASYNC_FLUSH(InflightTID, handle_async_reply1(ReplyContext, Result)) + end. + +handle_async_reply1( + #{ + request_ref := Ref, + inflight_tid := InflightTID, + resource_id := Id, + worker_index := Index, + min_query := ?QUERY(_, _, _, ExpireAt) = _Query + } = ReplyContext, + Result +) -> + ?tp( + handle_async_reply_enter, + #{batch_or_query => [_Query], ref => Ref, result => Result} + ), + Now = now_(), + case is_expired(ExpireAt, Now) of + true -> + IsAcked = ack_inflight(InflightTID, Ref, Id, Index), + IsAcked andalso emqx_resource_metrics:late_reply_inc(Id), + ?tp(handle_async_reply_expired, #{expired => [_Query]}), + ok; + false -> + do_handle_async_reply(ReplyContext, Result) + end. + +do_handle_async_reply( + #{ + query_opts := QueryOpts, + resource_id := Id, + request_ref := Ref, + worker_index := Index, + buffer_worker := Pid, + inflight_tid := InflightTID, + min_query := ?QUERY(ReplyTo, _, Sent, _ExpireAt) = _Query + }, + Result +) -> + %% NOTE: 'inflight' is the count of messages that were sent async + %% but received no ACK, NOT the number of messages queued in the + %% inflight window. + {Action, PostFn} = reply_caller_defer_metrics( + Id, ?REPLY(ReplyTo, Sent, Result), QueryOpts + ), + + ?tp(handle_async_reply, #{ + action => Action, + batch_or_query => [_Query], + ref => Ref, + result => Result + }), + case Action of + nack -> + %% Keep retrying. + ok = mark_inflight_as_retriable(InflightTID, Ref), + ok = ?MODULE:block(Pid), + blocked; + ack -> + ok = do_async_ack(InflightTID, Ref, Id, Index, PostFn, QueryOpts) + end. + +handle_async_batch_reply( + #{ + inflight_tid := InflightTID, + request_ref := Ref, + query_opts := Opts + } = ReplyContext, + Result +) -> + case maybe_handle_unknown_async_reply(InflightTID, Ref, Opts) of + discard -> + ok; + continue -> + ?ENSURE_ASYNC_FLUSH(InflightTID, handle_async_batch_reply1(ReplyContext, Result)) + end. + +handle_async_batch_reply1( + #{ + inflight_tid := InflightTID, + request_ref := Ref, + min_batch := Batch + } = ReplyContext, + Result +) -> + ?tp( + handle_async_reply_enter, + #{batch_or_query => Batch, ref => Ref, result => Result} + ), + Now = now_(), + case sieve_expired_requests(Batch, Now) of + {_NotExpired, []} -> + %% this is the critical code path, + %% we try not to do ets:lookup in this case + %% because the batch can be quite big + do_handle_async_batch_reply(ReplyContext, Result); + {_NotExpired, _Expired} -> + %% at least one is expired + %% the batch from reply context is minimized, so it cannot be used + %% to update the inflight items, hence discard Batch and lookup the RealBatch + ?tp(handle_async_reply_expired, #{expired => _Expired}), + handle_async_batch_reply2(ets:lookup(InflightTID, Ref), ReplyContext, Result, Now) + end. + +handle_async_batch_reply2([], _, _, _) -> + %% this usually should never happen unless the async callback is being evaluated concurrently + ok; +handle_async_batch_reply2([Inflight], ReplyContext, Result, Now) -> + ?INFLIGHT_ITEM(_, RealBatch, _IsRetriable, _WorkerMRef) = Inflight, + #{ + resource_id := Id, + worker_index := Index, + inflight_tid := InflightTID, + request_ref := Ref, + min_batch := Batch + } = ReplyContext, + %% All batch items share the same HasBeenSent flag + %% So we just take the original flag from the ReplyContext batch + %% and put it back to the batch found in inflight table + %% which must have already been set to `false` + [?QUERY(_ReplyTo, _, HasBeenSent, _ExpireAt) | _] = Batch, + {RealNotExpired0, RealExpired} = sieve_expired_requests(RealBatch, Now), + RealNotExpired = + lists:map( + fun(?QUERY(ReplyTo, CoreReq, _HasBeenSent, ExpireAt)) -> + ?QUERY(ReplyTo, CoreReq, HasBeenSent, ExpireAt) + end, + RealNotExpired0 + ), + NumExpired = length(RealExpired), + emqx_resource_metrics:late_reply_inc(Id, NumExpired), + case RealNotExpired of + [] -> + %% all expired, no need to update back the inflight batch + _ = ack_inflight(InflightTID, Ref, Id, Index), + ok; + _ -> + %% some queries are not expired, put them back to the inflight batch + %% so it can be either acked now or retried later + ok = update_inflight_item(InflightTID, Ref, RealNotExpired, NumExpired), + do_handle_async_batch_reply(ReplyContext#{min_batch := RealNotExpired}, Result) + end. + +do_handle_async_batch_reply( + #{ + buffer_worker := Pid, + resource_id := Id, + worker_index := Index, + inflight_tid := InflightTID, + request_ref := Ref, + min_batch := Batch, + query_opts := QueryOpts + }, + Result +) -> + {Action, PostFn} = batch_reply_caller_defer_metrics(Id, Result, Batch, QueryOpts), + ?tp(handle_async_reply, #{ + action => Action, + batch_or_query => Batch, + ref => Ref, + result => Result + }), + case Action of + nack -> + %% Keep retrying. + ok = mark_inflight_as_retriable(InflightTID, Ref), + ok = ?MODULE:block(Pid), + blocked; + ack -> + ok = do_async_ack(InflightTID, Ref, Id, Index, PostFn, QueryOpts) + end. + +do_async_ack(InflightTID, Ref, Id, Index, PostFn, QueryOpts) -> + IsKnownRef = ack_inflight(InflightTID, Ref, Id, Index), + case maps:get(simple_query, QueryOpts, false) of + true -> + PostFn(); + false when IsKnownRef -> + PostFn(); + false -> + ok + end, + ok. + +maybe_flush_after_async_reply(_WasFullBeforeReplyHandled = false) -> + %% inflight was not full before async reply is handled, + %% after it is handled, the inflight table must be even smaller + %% hance we can rely on the buffer worker's flush timer to trigger + %% the next flush + ?tp(skip_flushing_worker, #{}), + ok; +maybe_flush_after_async_reply(_WasFullBeforeReplyHandled = true) -> + %% the inflight table was full before handling aync reply + ?tp(do_flushing_worker, #{}), + ok = ?MODULE:flush_worker(self()). + +%% check if the async reply is valid. +%% e.g. if a connector evaluates the callback more than once: +%% 1. If the request was previously deleted from inflight table due to +%% either succeeded previously or expired, this function logs a +%% warning message and returns 'discard' instruction. +%% 2. If the request was previously failed and now pending on a retry, +%% then this function will return 'continue' as there is no way to +%% tell if this reply is stae or not. +maybe_handle_unknown_async_reply(undefined, _Ref, #{simple_query := true}) -> + continue; +maybe_handle_unknown_async_reply(InflightTID, Ref, #{}) -> + try ets:member(InflightTID, Ref) of + true -> + continue; + false -> + ?tp( + warning, + unknown_async_reply_discarded, + #{inflight_key => Ref} + ), + discard + catch + error:badarg -> + %% shutdown ? + discard + end. + +%%============================================================================== +%% operations for queue +queue_item_marshaller(Bin) when is_binary(Bin) -> + binary_to_term(Bin); +queue_item_marshaller(Item) -> + term_to_binary(Item). + +estimate_size(QItem) -> + erlang:external_size(QItem). + +-spec append_queue(id(), index(), replayq:q(), [queue_query()]) -> + {[queue_query()], replayq:q()}. +append_queue(Id, Index, Q, Queries) -> + %% this assertion is to ensure that we never append a raw binary + %% because the marshaller will get lost. + false = is_binary(hd(Queries)), + Q0 = replayq:append(Q, Queries), + {Overflown, Q2} = + case replayq:overflow(Q0) of + OverflownBytes when OverflownBytes =< 0 -> + {[], Q0}; + OverflownBytes -> + PopOpts = #{bytes_limit => OverflownBytes, count_limit => 999999999}, + {Q1, QAckRef, Items2} = replayq:pop(Q0, PopOpts), + ok = replayq:ack(Q1, QAckRef), + Dropped = length(Items2), + emqx_resource_metrics:dropped_queue_full_inc(Id, Dropped), + ?SLOG(info, #{ + msg => buffer_worker_overflow, + resource_id => Id, + worker_index => Index, + dropped => Dropped + }), + {Items2, Q1} + end, + emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q2)), + ?tp( + buffer_worker_appended_to_queue, + #{ + id => Id, + items => Queries, + queue_count => queue_count(Q2), + overflown => length(Overflown) + } + ), + {Overflown, Q2}. + +%%============================================================================== +%% the inflight queue for async query +-define(MAX_SIZE_REF, max_size). +-define(SIZE_REF, size). +-define(BATCH_COUNT_REF, batch_count). +-define(INITIAL_TIME_REF, initial_time). +-define(INITIAL_MONOTONIC_TIME_REF, initial_monotonic_time). + +inflight_new(InfltWinSZ, Id, Index) -> + TableId = ets:new( + emqx_resource_buffer_worker_inflight_tab, + [ordered_set, public, {write_concurrency, true}] + ), + inflight_append(TableId, {?MAX_SIZE_REF, InfltWinSZ}, Id, Index), + %% we use this counter because we might deal with batches as + %% elements. + inflight_append(TableId, {?SIZE_REF, 0}, Id, Index), + inflight_append(TableId, {?BATCH_COUNT_REF, 0}, Id, Index), + inflight_append(TableId, {?INITIAL_TIME_REF, erlang:system_time()}, Id, Index), + inflight_append( + TableId, {?INITIAL_MONOTONIC_TIME_REF, make_request_ref()}, Id, Index + ), + TableId. + +-spec inflight_get_first_retriable(ets:tid(), integer()) -> + none + | {expired, inflight_key(), [queue_query()]} + | {single, inflight_key(), queue_query()} + | {batch, inflight_key(), _NotExpired :: [queue_query()], _Expired :: [queue_query()]}. +inflight_get_first_retriable(InflightTID, Now) -> + MatchSpec = + ets:fun2ms( + fun(?INFLIGHT_ITEM(Ref, BatchOrQuery, IsRetriable, _WorkerMRef)) when + IsRetriable =:= true + -> + {Ref, BatchOrQuery} + end + ), + case ets:select(InflightTID, MatchSpec, _Limit = 1) of + '$end_of_table' -> + none; + {[{Ref, Query = ?QUERY(_ReplyTo, _CoreReq, _HasBeenSent, ExpireAt)}], _Continuation} -> + case is_expired(ExpireAt, Now) of + true -> + {expired, Ref, [Query]}; + false -> + {single, Ref, Query} + end; + {[{Ref, Batch = [_ | _]}], _Continuation} -> + case sieve_expired_requests(Batch, Now) of + {[], _AllExpired} -> + {expired, Ref, Batch}; + {NotExpired, Expired} -> + {batch, Ref, NotExpired, Expired} + end + end. + +is_inflight_full(undefined) -> + false; +is_inflight_full(InflightTID) -> + [{_, MaxSize}] = ets:lookup(InflightTID, ?MAX_SIZE_REF), + %% we consider number of batches rather than number of messages + %% because one batch request may hold several messages. + Size = inflight_count(InflightTID), + Size >= MaxSize. + +inflight_count(InflightTID) -> + emqx_utils_ets:lookup_value(InflightTID, ?BATCH_COUNT_REF, 0). + +inflight_num_msgs(InflightTID) -> + [{_, Size}] = ets:lookup(InflightTID, ?SIZE_REF), + Size. + +inflight_append(undefined, _InflightItem, _Id, _Index) -> + ok; +inflight_append( + InflightTID, + ?INFLIGHT_ITEM(Ref, [?QUERY(_, _, _, _) | _] = Batch0, IsRetriable, WorkerMRef), + Id, + Index +) -> + Batch = mark_as_sent(Batch0), + InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, WorkerMRef), + IsNew = ets:insert_new(InflightTID, InflightItem), + BatchSize = length(Batch), + IsNew andalso inc_inflight(InflightTID, BatchSize), + emqx_resource_metrics:inflight_set(Id, Index, inflight_num_msgs(InflightTID)), + ?tp(buffer_worker_appended_to_inflight, #{item => InflightItem, is_new => IsNew}), + ok; +inflight_append( + InflightTID, + ?INFLIGHT_ITEM( + Ref, ?QUERY(_ReplyTo, _Req, _HasBeenSent, _ExpireAt) = Query0, IsRetriable, WorkerMRef + ), + Id, + Index +) -> + Query = mark_as_sent(Query0), + InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, WorkerMRef), + IsNew = ets:insert_new(InflightTID, InflightItem), + IsNew andalso inc_inflight(InflightTID, 1), + emqx_resource_metrics:inflight_set(Id, Index, inflight_num_msgs(InflightTID)), + ?tp(buffer_worker_appended_to_inflight, #{item => InflightItem, is_new => IsNew}), + ok; +inflight_append(InflightTID, {Ref, Data}, _Id, _Index) -> + ets:insert(InflightTID, {Ref, Data}), + %% this is a metadata row being inserted; therefore, we don't bump + %% the inflight metric. + ok. + +%% a request was already appended and originally not retriable, but an +%% error occurred and it is now retriable. +mark_inflight_as_retriable(undefined, _Ref) -> + ok; +mark_inflight_as_retriable(InflightTID, Ref) -> + _ = ets:update_element(InflightTID, Ref, {?RETRY_IDX, true}), + %% the old worker's DOWN should not affect this inflight any more + _ = ets:update_element(InflightTID, Ref, {?WORKER_MREF_IDX, erased}), + ok. + +%% Track each worker pid only once. +ensure_async_worker_monitored( + Data0 = #{async_workers := AsyncWorkers}, {async_return, {ok, WorkerPid}} = _Result +) when + is_pid(WorkerPid), is_map_key(WorkerPid, AsyncWorkers) +-> + WorkerMRef = maps:get(WorkerPid, AsyncWorkers), + {Data0, WorkerMRef}; +ensure_async_worker_monitored( + Data0 = #{async_workers := AsyncWorkers0}, {async_return, {ok, WorkerPid}} +) when + is_pid(WorkerPid) +-> + WorkerMRef = monitor(process, WorkerPid), + AsyncWorkers = AsyncWorkers0#{WorkerPid => WorkerMRef}, + Data = Data0#{async_workers := AsyncWorkers}, + {Data, WorkerMRef}; +ensure_async_worker_monitored(Data0, _Result) -> + {Data0, undefined}. + +store_async_worker_reference(undefined = _InflightTID, _Ref, _WorkerMRef) -> + ok; +store_async_worker_reference(_InflightTID, _Ref, undefined = _WorkerRef) -> + ok; +store_async_worker_reference(InflightTID, Ref, WorkerMRef) when + is_reference(WorkerMRef) +-> + _ = ets:update_element( + InflightTID, Ref, {?WORKER_MREF_IDX, WorkerMRef} + ), + ok. + +ack_inflight(undefined, _Ref, _Id, _Index) -> + false; +ack_inflight(InflightTID, Ref, Id, Index) -> + {Count, Removed} = + case ets:take(InflightTID, Ref) of + [?INFLIGHT_ITEM(Ref, ?QUERY(_, _, _, _), _IsRetriable, _WorkerMRef)] -> + {1, true}; + [?INFLIGHT_ITEM(Ref, [?QUERY(_, _, _, _) | _] = Batch, _IsRetriable, _WorkerMRef)] -> + {length(Batch), true}; + [] -> + {0, false} + end, + ok = dec_inflight_remove(InflightTID, Count, Removed), + IsKnownRef = (Count > 0), + case IsKnownRef of + true -> + emqx_resource_metrics:inflight_set(Id, Index, inflight_num_msgs(InflightTID)); + false -> + ok + end, + IsKnownRef. + +mark_inflight_items_as_retriable(Data, WorkerMRef) -> + #{inflight_tid := InflightTID} = Data, + IsRetriable = true, + MatchSpec = + ets:fun2ms( + fun(?INFLIGHT_ITEM(Ref, BatchOrQuery, _IsRetriable, WorkerMRef0)) when + WorkerMRef =:= WorkerMRef0 + -> + ?INFLIGHT_ITEM(Ref, BatchOrQuery, IsRetriable, WorkerMRef0) + end + ), + _NumAffected = ets:select_replace(InflightTID, MatchSpec), + ?tp(buffer_worker_async_agent_down, #{num_affected => _NumAffected}), + ok. + +%% used to update a batch after dropping expired individual queries. +update_inflight_item(InflightTID, Ref, NewBatch, NumExpired) -> + _ = ets:update_element(InflightTID, Ref, {?ITEM_IDX, NewBatch}), + ok = dec_inflight_update(InflightTID, NumExpired). + +inc_inflight(InflightTID, Count) -> + _ = ets:update_counter(InflightTID, ?SIZE_REF, {2, Count}), + _ = ets:update_counter(InflightTID, ?BATCH_COUNT_REF, {2, 1}), + ok. + +dec_inflight_remove(_InflightTID, _Count = 0, _Removed = false) -> + ok; +dec_inflight_remove(InflightTID, _Count = 0, _Removed = true) -> + _ = ets:update_counter(InflightTID, ?BATCH_COUNT_REF, {2, -1, 0, 0}), + ok; +dec_inflight_remove(InflightTID, Count, _Removed = true) when Count > 0 -> + %% If Count > 0, it must have been removed + _ = ets:update_counter(InflightTID, ?BATCH_COUNT_REF, {2, -1, 0, 0}), + _ = ets:update_counter(InflightTID, ?SIZE_REF, {2, -Count, 0, 0}), + ok. + +dec_inflight_update(_InflightTID, _Count = 0) -> + ok; +dec_inflight_update(InflightTID, Count) when Count > 0 -> + _ = ets:update_counter(InflightTID, ?SIZE_REF, {2, -Count, 0, 0}), + ok. + +%%============================================================================== + +inc_sent_failed(Id, _HasBeenSent = true) -> + emqx_resource_metrics:retried_failed_inc(Id); +inc_sent_failed(Id, _HasBeenSent) -> + emqx_resource_metrics:failed_inc(Id). + +inc_sent_success(Id, _HasBeenSent = true) -> + emqx_resource_metrics:retried_success_inc(Id); +inc_sent_success(Id, _HasBeenSent) -> + emqx_resource_metrics:success_inc(Id). + +call_mode(force_sync, _) -> sync; +call_mode(async_if_possible, always_sync) -> sync; +call_mode(async_if_possible, async_if_possible) -> async. + +assert_ok_result(ok) -> + true; +assert_ok_result({async_return, R}) -> + assert_ok_result(R); +assert_ok_result(R) when is_tuple(R) -> + try + ok = erlang:element(1, R) + catch + error:{badmatch, _} -> + error({not_ok_result, R}) + end; +assert_ok_result(R) -> + error({not_ok_result, R}). + +queue_count(Q) -> + replayq:count(Q). + +disk_queue_dir(Id, Index) -> + QDir0 = binary_to_list(Id) ++ ":" ++ integer_to_list(Index), + QDir = filename:join([emqx:data_dir(), "bufs", node(), QDir0]), + emqx_utils:safe_filename(QDir). + +clear_disk_queue_dir(Id, Index) -> + ReplayQDir = disk_queue_dir(Id, Index), + case file:del_dir_r(ReplayQDir) of + {error, enoent} -> + ok; + Res -> + Res + end. + +ensure_flush_timer(Data = #{batch_time := T}) -> + ensure_flush_timer(Data, T). + +ensure_flush_timer(Data = #{tref := undefined}, 0) -> + %% if the batch_time is 0, we don't need to start a timer, which + %% can be costly at high rates. + Ref = make_ref(), + self() ! {flush, Ref}, + Data#{tref => {Ref, Ref}}; +ensure_flush_timer(Data = #{tref := undefined}, T) -> + Ref = make_ref(), + TRef = erlang:send_after(T, self(), {flush, Ref}), + Data#{tref => {TRef, Ref}}; +ensure_flush_timer(Data, _T) -> + Data. + +cancel_flush_timer(St = #{tref := undefined}) -> + St; +cancel_flush_timer(St = #{tref := {TRef, _Ref}}) -> + _ = erlang:cancel_timer(TRef), + St#{tref => undefined}. + +-spec make_request_ref() -> inflight_key(). +make_request_ref() -> + now_(). + +collect_requests(Acc, Limit) -> + Count = length(Acc), + do_collect_requests(Acc, Count, Limit). + +do_collect_requests(Acc, Count, Limit) when Count >= Limit -> + lists:reverse(Acc); +do_collect_requests(Acc, Count, Limit) -> + receive + ?SEND_REQ(_ReplyTo, _Req) = Request -> + do_collect_requests([Request | Acc], Count + 1, Limit) + after 0 -> + lists:reverse(Acc) + end. + +mark_as_sent(Batch) when is_list(Batch) -> + lists:map(fun mark_as_sent/1, Batch); +mark_as_sent(?QUERY(ReplyTo, Req, _HasBeenSent, ExpireAt)) -> + HasBeenSent = true, + ?QUERY(ReplyTo, Req, HasBeenSent, ExpireAt). + +is_unrecoverable_error({error, {unrecoverable_error, _}}) -> + true; +is_unrecoverable_error({error, {recoverable_error, _}}) -> + false; +is_unrecoverable_error({async_return, Result}) -> + is_unrecoverable_error(Result); +is_unrecoverable_error({error, _}) -> + %% TODO: delete this clause. + %% Ideally all errors except for 'unrecoverable_error' should be + %% retried, including DB schema errors. + true; +is_unrecoverable_error(_) -> + false. + +is_async_return({async_return, _}) -> + true; +is_async_return(_) -> + false. + +sieve_expired_requests(Batch, Now) -> + lists:partition( + fun(?QUERY(_ReplyTo, _CoreReq, _HasBeenSent, ExpireAt)) -> + not is_expired(ExpireAt, Now) + end, + Batch + ). + +-spec is_expired(infinity | integer(), integer()) -> boolean(). +is_expired(infinity = _ExpireAt, _Now) -> + false; +is_expired(ExpireAt, Now) -> + Now > ExpireAt. + +now_() -> + erlang:monotonic_time(nanosecond). + +-spec ensure_timeout_query_opts(query_opts(), sync | async) -> query_opts(). +ensure_timeout_query_opts(#{timeout := _} = Opts, _SyncOrAsync) -> + Opts; +ensure_timeout_query_opts(#{} = Opts0, sync) -> + Opts0#{timeout => ?DEFAULT_REQUEST_TIMEOUT}; +ensure_timeout_query_opts(#{} = Opts0, async) -> + Opts0#{timeout => infinity}. + +-spec ensure_expire_at(query_opts()) -> query_opts(). +ensure_expire_at(#{expire_at := _} = Opts) -> + Opts; +ensure_expire_at(#{timeout := infinity} = Opts) -> + Opts#{expire_at => infinity}; +ensure_expire_at(#{timeout := TimeoutMS} = Opts) -> + TimeoutNS = erlang:convert_time_unit(TimeoutMS, millisecond, nanosecond), + ExpireAt = now_() + TimeoutNS, + Opts#{expire_at => ExpireAt}. + +%% no need to keep the request for async reply handler +minimize(?QUERY(_, _, _, _) = Q) -> + do_minimize(Q); +minimize(L) when is_list(L) -> + lists:map(fun do_minimize/1, L). + +-ifdef(TEST). +do_minimize(?QUERY(_ReplyTo, _Req, _Sent, _ExpireAt) = Query) -> Query. +-else. +do_minimize(?QUERY(ReplyTo, _Req, Sent, ExpireAt)) -> ?QUERY(ReplyTo, [], Sent, ExpireAt). +-endif. + +%% To avoid message loss due to misconfigurations, we adjust +%% `batch_time' based on `request_timeout'. If `batch_time' > +%% `request_timeout', all requests will timeout before being sent if +%% the message rate is low. Even worse if `pool_size' is high. +%% We cap `batch_time' at `request_timeout div 2' as a rule of thumb. +adjust_batch_time(_Id, _RequestTimeout = infinity, BatchTime0) -> + BatchTime0; +adjust_batch_time(Id, RequestTimeout, BatchTime0) -> + BatchTime = max(0, min(BatchTime0, RequestTimeout div 2)), + case BatchTime =:= BatchTime0 of + false -> + ?SLOG(info, #{ + id => Id, + msg => adjusting_buffer_worker_batch_time, + new_batch_time => BatchTime + }); + true -> + ok + end, + BatchTime. + +replayq_opts(Id, Index, Opts) -> + BufferMode = maps:get(buffer_mode, Opts, memory_only), + TotalBytes = maps:get(max_buffer_bytes, Opts, ?DEFAULT_BUFFER_BYTES), + case BufferMode of + memory_only -> + #{ + mem_only => true, + marshaller => fun ?MODULE:queue_item_marshaller/1, + max_total_bytes => TotalBytes, + sizer => fun ?MODULE:estimate_size/1 + }; + volatile_offload -> + SegBytes0 = maps:get(buffer_seg_bytes, Opts, TotalBytes), + SegBytes = min(SegBytes0, TotalBytes), + #{ + dir => disk_queue_dir(Id, Index), + marshaller => fun ?MODULE:queue_item_marshaller/1, + max_total_bytes => TotalBytes, + %% we don't want to retain the queue after + %% resource restarts. + offload => {true, volatile}, + seg_bytes => SegBytes, + sizer => fun ?MODULE:estimate_size/1 + } + end. + +%% The request timeout should be greater than the resume interval, as +%% it defines how often the buffer worker tries to unblock. If request +%% timeout is <= resume interval and the buffer worker is ever +%% blocked, than all queued requests will basically fail without being +%% attempted. +-spec default_resume_interval(request_timeout(), health_check_interval()) -> timer:time(). +default_resume_interval(_RequestTimeout = infinity, HealthCheckInterval) -> + max(1, HealthCheckInterval); +default_resume_interval(RequestTimeout, HealthCheckInterval) -> + max(1, min(HealthCheckInterval, RequestTimeout div 3)). + +-spec reply_call(reference(), term()) -> ok. +reply_call(Alias, Response) -> + %% Since we use a reference created with `{alias, + %% reply_demonitor}', after we `demonitor' it in case of a + %% timeout, we won't send any more messages that the caller is not + %% expecting anymore. Using `gen_statem:reply({pid(), + %% reference()}, _)' would still send a late reply even after the + %% demonitor. + erlang:send(Alias, {Alias, Response}), + ok. + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +adjust_batch_time_test_() -> + %% just for logging + Id = some_id, + [ + {"batch time smaller than request_time/2", + ?_assertEqual( + 100, + adjust_batch_time(Id, 500, 100) + )}, + {"batch time equal to request_time/2", + ?_assertEqual( + 100, + adjust_batch_time(Id, 200, 100) + )}, + {"batch time greater than request_time/2", + ?_assertEqual( + 50, + adjust_batch_time(Id, 100, 100) + )}, + {"batch time smaller than request_time/2 (request_time = infinity)", + ?_assertEqual( + 100, + adjust_batch_time(Id, infinity, 100) + )} + ]. +-endif. diff --git a/apps/emqx_resource/src/emqx_resource_worker_sup.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl similarity index 82% rename from apps/emqx_resource/src/emqx_resource_worker_sup.erl rename to apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl index b6557620c..104ad7ade 100644 --- a/apps/emqx_resource/src/emqx_resource_worker_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl @@ -13,7 +13,7 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_resource_worker_sup). +-module(emqx_resource_buffer_worker_sup). -behaviour(supervisor). %%%============================================================================= @@ -23,7 +23,7 @@ %% External API -export([start_link/0]). --export([start_workers/2, stop_workers/2]). +-export([start_workers/2, stop_workers/2, worker_pids/1]). %% Callbacks -export([init/1]). @@ -52,6 +52,7 @@ init([]) -> ChildSpecs = [], {ok, {SupFlags, ChildSpecs}}. +-spec start_workers(emqx_resource:resource_id(), _Opts :: #{atom() => _}) -> ok. start_workers(ResId, Opts) -> WorkerPoolSize = worker_pool_size(Opts), _ = ensure_worker_pool(ResId, hash, [{size, WorkerPoolSize}]), @@ -63,17 +64,28 @@ start_workers(ResId, Opts) -> lists:seq(1, WorkerPoolSize) ). +-spec stop_workers(emqx_resource:resource_id(), _Opts :: #{atom() => _}) -> ok. stop_workers(ResId, Opts) -> WorkerPoolSize = worker_pool_size(Opts), lists:foreach( fun(Idx) -> - ensure_worker_removed(ResId, Idx) + _ = ensure_worker_removed(ResId, Idx), + ensure_disk_queue_dir_absent(ResId, Idx) end, lists:seq(1, WorkerPoolSize) ), ensure_worker_pool_removed(ResId), ok. +-spec worker_pids(emqx_resource:resource_id()) -> [pid()]. +worker_pids(ResId) -> + lists:map( + fun({_Name, Pid}) -> + Pid + end, + gproc_pool:active_workers(ResId) + ). + %%%============================================================================= %%% Internal %%%============================================================================= @@ -98,7 +110,7 @@ ensure_worker_added(ResId, Idx) -> -define(CHILD_ID(MOD, RESID, INDEX), {MOD, RESID, INDEX}). ensure_worker_started(ResId, Idx, Opts) -> - Mod = emqx_resource_worker, + Mod = emqx_resource_buffer_worker, Spec = #{ id => ?CHILD_ID(Mod, ResId, Idx), start => {Mod, start_link, [ResId, Idx, Opts]}, @@ -115,7 +127,7 @@ ensure_worker_started(ResId, Idx, Opts) -> end. ensure_worker_removed(ResId, Idx) -> - ChildId = ?CHILD_ID(emqx_resource_worker, ResId, Idx), + ChildId = ?CHILD_ID(emqx_resource_buffer_worker, ResId, Idx), case supervisor:terminate_child(?SERVER, ChildId) of ok -> Res = supervisor:delete_child(?SERVER, ChildId), @@ -127,10 +139,10 @@ ensure_worker_removed(ResId, Idx) -> {error, Reason} end. -ensure_worker_pool_removed(ResId) -> - try - gproc_pool:delete(ResId) - catch - error:badarg -> ok - end, +ensure_disk_queue_dir_absent(ResourceId, Index) -> + ok = emqx_resource_buffer_worker:clear_disk_queue_dir(ResourceId, Index), + ok. + +ensure_worker_pool_removed(ResId) -> + gproc_pool:force_delete(ResId), ok. diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 26ce5d6f7..f42d3c1b5 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -17,8 +17,8 @@ -behaviour(gen_statem). -include("emqx_resource.hrl"). --include("emqx_resource_utils.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/trace.hrl"). % API -export([ @@ -36,29 +36,30 @@ lookup/1, list_all/0, list_group/1, - ets_lookup/1, + lookup_cached/1, get_metrics/1, reset_metrics/1 ]). -export([ - set_resource_status_connecting/1, - manager_id_to_resource_id/1 + set_resource_status_connecting/1 ]). % Server --export([start_link/6]). +-export([start_link/5]). % Behaviour -export([init/1, callback_mode/0, handle_event/4, terminate/3]). % State record -record(data, { - id, manager_id, group, mod, callback_mode, query_mode, config, opts, status, state, error + id, group, mod, callback_mode, query_mode, config, opts, status, state, error, pid }). -type data() :: #data{}. --define(ETS_TABLE, ?MODULE). +-define(NAME(ResId), {n, l, {?MODULE, ResId}}). +-define(REF(ResId), {via, gproc, ?NAME(ResId)}). + -define(WAIT_FOR_RESOURCE_DELAY, 100). -define(T_OPERATION, 5000). -define(T_LOOKUP, 1000). @@ -69,13 +70,6 @@ %% API %%------------------------------------------------------------------------------ -make_manager_id(ResId) -> - emqx_resource:generate_id(ResId). - -manager_id_to_resource_id(MgrId) -> - [ResId, _Index] = string:split(MgrId, ":", trailing), - ResId. - %% @doc Called from emqx_resource when starting a resource instance. %% %% Triggers the emqx_resource_manager_sup supervisor to actually create @@ -92,8 +86,7 @@ ensure_resource(ResId, Group, ResourceType, Config, Opts) -> {ok, _Group, Data} -> {ok, Data}; {error, not_found} -> - MgrId = set_new_owner(ResId), - create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) + create_and_return_data(ResId, Group, ResourceType, Config, Opts) end. %% @doc Called from emqx_resource when recreating a resource which may or may not exist @@ -103,28 +96,22 @@ recreate(ResId, ResourceType, NewConfig, Opts) -> case lookup(ResId) of {ok, Group, #{mod := ResourceType, status := _} = _Data} -> _ = remove(ResId, false), - MgrId = set_new_owner(ResId), - create_and_return_data(MgrId, ResId, Group, ResourceType, NewConfig, Opts); + create_and_return_data(ResId, Group, ResourceType, NewConfig, Opts); {ok, _, #{mod := Mod}} when Mod =/= ResourceType -> {error, updating_to_incorrect_resource_type}; {error, not_found} -> {error, not_found} end. -create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) -> - create(MgrId, ResId, Group, ResourceType, Config, Opts), +create_and_return_data(ResId, Group, ResourceType, Config, Opts) -> + _ = create(ResId, Group, ResourceType, Config, Opts), {ok, _Group, Data} = lookup(ResId), {ok, Data}. -%% internal configs --define(START_AFTER_CREATED, true). -%% in milliseconds --define(START_TIMEOUT, 5000). - %% @doc Create a resource_manager and wait until it is running -create(MgrId, ResId, Group, ResourceType, Config, Opts) -> +create(ResId, Group, ResourceType, Config, Opts) -> % The state machine will make the actual call to the callback/resource module after init - ok = emqx_resource_manager_sup:ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts), + ok = emqx_resource_manager_sup:ensure_child(ResId, Group, ResourceType, Config, Opts), ok = emqx_metrics_worker:create_metrics( ?RES_METRICS, ResId, @@ -134,9 +121,10 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> 'retried.success', 'retried.failed', 'success', + 'late_reply', 'failed', 'dropped', - 'dropped.queue_not_enabled', + 'dropped.expired', 'dropped.queue_full', 'dropped.resource_not_found', 'dropped.resource_stopped', @@ -151,7 +139,7 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> %% buffer, so there is no need for resource workers ok; false -> - ok = emqx_resource_worker_sup:start_workers(ResId, Opts), + ok = emqx_resource_buffer_worker_sup:start_workers(ResId, Opts), case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of true -> wait_for_ready(ResId, maps:get(start_timeout, Opts, ?START_TIMEOUT)); @@ -168,13 +156,18 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> ok | {error, Reason :: term()}. create_dry_run(ResourceType, Config) -> ResId = make_test_id(), - MgrId = set_new_owner(ResId), - ok = emqx_resource_manager_sup:ensure_child( - MgrId, ResId, <<"dry_run">>, ResourceType, Config, #{} - ), - case wait_for_ready(ResId, 15000) of + Opts = + case is_map(Config) of + true -> maps:get(resource_opts, Config, #{}); + false -> #{} + end, + ok = emqx_resource_manager_sup:ensure_child(ResId, <<"dry_run">>, ResourceType, Config, Opts), + case wait_for_ready(ResId, 5000) of ok -> remove(ResId); + {error, Reason} -> + _ = remove(ResId), + {error, Reason}; timeout -> _ = remove(ResId), {error, timeout} @@ -195,7 +188,7 @@ remove(ResId, ClearMetrics) when is_binary(ResId) -> restart(ResId, Opts) when is_binary(ResId) -> case safe_call(ResId, restart, ?T_OPERATION) of ok -> - wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), + _ = wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), ok; {error, _Reason} = Error -> Error @@ -206,7 +199,7 @@ restart(ResId, Opts) when is_binary(ResId) -> start(ResId, Opts) -> case safe_call(ResId, start, ?T_OPERATION) of ok -> - wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), + _ = wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), ok; {error, _Reason} = Error -> Error @@ -231,17 +224,18 @@ set_resource_status_connecting(ResId) -> -spec lookup(resource_id()) -> {ok, resource_group(), resource_data()} | {error, not_found}. lookup(ResId) -> case safe_call(ResId, lookup, ?T_LOOKUP) of - {error, timeout} -> ets_lookup(ResId); + {error, timeout} -> lookup_cached(ResId); Result -> Result end. -%% @doc Lookup the group and data of a resource --spec ets_lookup(resource_id()) -> {ok, resource_group(), resource_data()} | {error, not_found}. -ets_lookup(ResId) -> - case read_cache(ResId) of - {Group, Data} -> - {ok, Group, data_record_to_external_map_with_metrics(Data)}; - not_found -> +%% @doc Lookup the group and data of a resource from the cache +-spec lookup_cached(resource_id()) -> {ok, resource_group(), resource_data()} | {error, not_found}. +lookup_cached(ResId) -> + try read_cache(ResId) of + Data = #data{group = Group} -> + {ok, Group, data_record_to_external_map(Data)} + catch + error:badarg -> {error, not_found} end. @@ -255,22 +249,18 @@ reset_metrics(ResId) -> emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId). %% @doc Returns the data for all resources --spec list_all() -> [resource_data()] | []. +-spec list_all() -> [resource_data()]. list_all() -> - try - [ - data_record_to_external_map_with_metrics(Data) - || {_Id, _Group, Data} <- ets:tab2list(?ETS_TABLE) - ] - catch - error:badarg -> [] - end. + lists:map( + fun data_record_to_external_map/1, + gproc:select({local, names}, [{{?NAME('_'), '_', '$1'}, [], ['$1']}]) + ). %% @doc Returns a list of ids for all the resources in a group -spec list_group(resource_group()) -> [resource_id()]. list_group(Group) -> - List = ets:match(?ETS_TABLE, {'$1', Group, '_'}), - lists:flatten(List). + Guard = {'==', {element, #data.group, '$1'}, Group}, + gproc:select({local, names}, [{{?NAME('$2'), '_', '$1'}, [Guard], ['$2']}]). -spec health_check(resource_id()) -> {ok, resource_status()} | {error, term()}. health_check(ResId) -> @@ -279,10 +269,9 @@ health_check(ResId) -> %% Server start/stop callbacks %% @doc Function called from the supervisor to actually start the server -start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> +start_link(ResId, Group, ResourceType, Config, Opts) -> Data = #data{ id = ResId, - manager_id = MgrId, group = Group, mod = ResourceType, callback_mode = emqx_resource:get_callback_mode(ResourceType), @@ -293,26 +282,30 @@ start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> query_mode = maps:get(query_mode, Opts, sync), config = Config, opts = Opts, - status = connecting, state = undefined, error = undefined }, - Module = atom_to_binary(?MODULE), - ProcName = binary_to_atom(<>, utf8), - gen_statem:start_link({local, ProcName}, ?MODULE, {Data, Opts}, []). + gen_statem:start_link(?REF(ResId), ?MODULE, {Data, Opts}, []). -init({Data, Opts}) -> +init({DataIn, Opts}) -> process_flag(trap_exit, true), - %% init the cache so that lookup/1 will always return something - insert_cache(Data#data.id, Data#data.group, Data), - case maps:get(start_after_created, Opts, true) of - true -> {ok, connecting, Data, {next_event, internal, start_resource}}; - false -> {ok, stopped, Data} + Data = DataIn#data{pid = self()}, + case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of + true -> + %% init the cache so that lookup/1 will always return something + UpdatedData = update_state(Data#data{status = connecting}), + {ok, connecting, UpdatedData, {next_event, internal, start_resource}}; + false -> + %% init the cache so that lookup/1 will always return something + UpdatedData = update_state(Data#data{status = stopped}), + {ok, stopped, UpdatedData} end. +terminate({shutdown, removed}, _State, _Data) -> + ok; terminate(_Reason, _State, Data) -> - _ = maybe_clear_alarm(Data#data.id), - delete_cache(Data#data.id, Data#data.manager_id), + _ = maybe_stop_resource(Data), + _ = erase_cache(Data), ok. %% Behavior callback @@ -323,34 +316,32 @@ callback_mode() -> [handle_event_function, state_enter]. % Called during testing to force a specific state handle_event({call, From}, set_resource_status_connecting, _State, Data) -> - {next_state, connecting, Data#data{status = connecting}, [{reply, From, ok}]}; + UpdatedData = update_state(Data#data{status = connecting}, Data), + {next_state, connecting, UpdatedData, [{reply, From, ok}]}; % Called when the resource is to be restarted handle_event({call, From}, restart, _State, Data) -> - _ = stop_resource(Data), - start_resource(Data, From); -% Called when the resource is to be started -handle_event({call, From}, start, stopped, Data) -> + DataNext = stop_resource(Data), + start_resource(DataNext, From); +% Called when the resource is to be started (also used for manual reconnect) +handle_event({call, From}, start, State, Data) when + State =:= stopped orelse + State =:= disconnected +-> start_resource(Data, From); handle_event({call, From}, start, _State, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; -% Called when the resource received a `quit` message -handle_event(info, quit, stopped, _Data) -> - {stop, {shutdown, quit}}; -handle_event(info, quit, _State, Data) -> - _ = stop_resource(Data), - {stop, {shutdown, quit}}; % Called when the resource is to be stopped handle_event({call, From}, stop, stopped, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; handle_event({call, From}, stop, _State, Data) -> - Result = stop_resource(Data), - {next_state, stopped, Data, [{reply, From, Result}]}; + UpdatedData = stop_resource(Data), + {next_state, stopped, update_state(UpdatedData, Data), [{reply, From, ok}]}; % Called when a resource is to be stopped and removed. handle_event({call, From}, {remove, ClearMetrics}, _State, Data) -> handle_remove_event(From, ClearMetrics, Data); % Called when the state-data of the resource is being looked up. handle_event({call, From}, lookup, _State, #data{group = Group} = Data) -> - Reply = {ok, Group, data_record_to_external_map_with_metrics(Data)}, + Reply = {ok, Group, data_record_to_external_map(Data)}, {keep_state_and_data, [{reply, From, Reply}]}; % Called when doing a manually health check. handle_event({call, From}, health_check, stopped, _Data) -> @@ -359,11 +350,9 @@ handle_event({call, From}, health_check, stopped, _Data) -> handle_event({call, From}, health_check, _State, Data) -> handle_manually_health_check(From, Data); % State: CONNECTING -handle_event(enter, _OldState, connecting, Data) -> - UpdatedData = Data#data{status = connecting}, - insert_cache(Data#data.id, Data#data.group, Data), - Actions = [{state_timeout, 0, health_check}], - {keep_state, UpdatedData, Actions}; +handle_event(enter, _OldState, connecting = State, Data) -> + ok = log_state_consistency(State, Data), + {keep_state_and_data, [{state_timeout, 0, health_check}]}; handle_event(internal, start_resource, connecting, Data) -> start_resource(Data, undefined); handle_event(state_timeout, health_check, connecting, Data) -> @@ -371,27 +360,24 @@ handle_event(state_timeout, health_check, connecting, Data) -> %% State: CONNECTED %% The connected state is entered after a successful on_start/2 of the callback mod %% and successful health_checks -handle_event(enter, _OldState, connected, Data) -> - UpdatedData = Data#data{status = connected}, - insert_cache(Data#data.id, Data#data.group, UpdatedData), - _ = emqx_alarm:deactivate(Data#data.id), - Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], - {next_state, connected, UpdatedData, Actions}; +handle_event(enter, _OldState, connected = State, Data) -> + ok = log_state_consistency(State, Data), + _ = emqx_alarm:safe_deactivate(Data#data.id), + ?tp(resource_connected_enter, #{}), + {keep_state_and_data, health_check_actions(Data)}; handle_event(state_timeout, health_check, connected, Data) -> handle_connected_health_check(Data); %% State: DISCONNECTED -handle_event(enter, _OldState, disconnected, Data) -> - UpdatedData = Data#data{status = disconnected}, - insert_cache(Data#data.id, Data#data.group, UpdatedData), - handle_disconnected_state_enter(UpdatedData); +handle_event(enter, _OldState, disconnected = State, Data) -> + ok = log_state_consistency(State, Data), + {keep_state_and_data, retry_actions(Data)}; handle_event(state_timeout, auto_retry, disconnected, Data) -> start_resource(Data, undefined); %% State: STOPPED %% The stopped state is entered after the resource has been explicitly stopped -handle_event(enter, _OldState, stopped, Data) -> - UpdatedData = Data#data{status = stopped}, - insert_cache(Data#data.id, Data#data.group, UpdatedData), - {next_state, stopped, UpdatedData}; +handle_event(enter, _OldState, stopped = State, Data) -> + ok = log_state_consistency(State, Data), + {keep_state_and_data, []}; % Ignore all other events handle_event(EventType, EventData, State, Data) -> ?SLOG( @@ -406,62 +392,41 @@ handle_event(EventType, EventData, State, Data) -> ), keep_state_and_data. +log_state_consistency(State, #data{status = State} = Data) -> + log_cache_consistency(read_cache(Data#data.id), Data); +log_state_consistency(State, Data) -> + ?tp(warning, "inconsistent_state", #{ + state => State, + data => Data + }). + +log_cache_consistency(Data, Data) -> + ok; +log_cache_consistency(DataCached, Data) -> + ?tp(warning, "inconsistent_cache", #{ + cache => DataCached, + data => Data + }). + %%------------------------------------------------------------------------------ %% internal functions %%------------------------------------------------------------------------------ -insert_cache(ResId, Group, Data = #data{manager_id = MgrId}) -> - case get_owner(ResId) of - not_found -> - ets:insert(?ETS_TABLE, {ResId, Group, Data}); - MgrId -> - ets:insert(?ETS_TABLE, {ResId, Group, Data}); - _ -> - ?SLOG(error, #{ - msg => get_resource_owner_failed, - resource_id => ResId, - action => quit_resource - }), - self() ! quit - end. +insert_cache(ResId, Data = #data{}) -> + gproc:set_value(?NAME(ResId), Data). read_cache(ResId) -> - case ets:lookup(?ETS_TABLE, ResId) of - [{_Id, Group, Data}] -> {Group, Data}; - [] -> not_found + gproc:lookup_value(?NAME(ResId)). + +erase_cache(_Data = #data{id = ResId}) -> + gproc:unreg(?NAME(ResId)). + +try_read_cache(ResId) -> + try + read_cache(ResId) + catch + error:badarg -> not_found end. -delete_cache(ResId, MgrId) -> - case get_owner(ResId) of - MgrIdNow when MgrIdNow == not_found; MgrIdNow == MgrId -> - do_delete_cache(ResId); - _ -> - ok - end. - -do_delete_cache(<> = ResId) -> - ets:delete(?ETS_TABLE, {owner, ResId}), - ets:delete(?ETS_TABLE, ResId); -do_delete_cache(ResId) -> - ets:delete(?ETS_TABLE, ResId). - -set_new_owner(ResId) -> - MgrId = make_manager_id(ResId), - ok = set_owner(ResId, MgrId), - MgrId. - -set_owner(ResId, MgrId) -> - ets:insert(?ETS_TABLE, {{owner, ResId}, MgrId}), - ok. - -get_owner(ResId) -> - case ets:lookup(?ETS_TABLE, {owner, ResId}) of - [{_, MgrId}] -> MgrId; - [] -> not_found - end. - -handle_disconnected_state_enter(Data) -> - {next_state, disconnected, Data, retry_actions(Data)}. - retry_actions(Data) -> case maps:get(auto_restart_interval, Data#data.opts, ?AUTO_RESTART_INTERVAL) of undefined -> @@ -470,61 +435,72 @@ retry_actions(Data) -> [{state_timeout, RetryInterval, auto_retry}] end. +health_check_actions(Data) -> + [{state_timeout, health_check_interval(Data#data.opts), health_check}]. + handle_remove_event(From, ClearMetrics, Data) -> - stop_resource(Data), - ok = emqx_resource_worker_sup:stop_workers(Data#data.id, Data#data.opts), + _ = stop_resource(Data), + ok = emqx_resource_buffer_worker_sup:stop_workers(Data#data.id, Data#data.opts), case ClearMetrics of true -> ok = emqx_metrics_worker:clear_metrics(?RES_METRICS, Data#data.id); false -> ok end, - {stop_and_reply, normal, [{reply, From, ok}]}. + _ = erase_cache(Data), + {stop_and_reply, {shutdown, removed}, [{reply, From, ok}]}. start_resource(Data, From) -> %% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache - insert_cache(Data#data.id, Data#data.group, Data), - case emqx_resource:call_start(Data#data.manager_id, Data#data.mod, Data#data.config) of + case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of {ok, ResourceState} -> - UpdatedData = Data#data{state = ResourceState, status = connecting}, + UpdatedData = Data#data{status = connecting, state = ResourceState}, %% Perform an initial health_check immediately before transitioning into a connected state Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok), - {next_state, connecting, UpdatedData, Actions}; + {next_state, connecting, update_state(UpdatedData, Data), Actions}; {error, Reason} = Err -> - ?SLOG(error, #{ + ?SLOG(warning, #{ msg => start_resource_failed, id => Data#data.id, reason => Reason }), - _ = maybe_alarm(disconnected, Data#data.id), + _ = maybe_alarm(disconnected, Data#data.id, Err, Data#data.error), %% Keep track of the error reason why the connection did not work %% so that the Reason can be returned when the verification call is made. - UpdatedData = Data#data{error = Reason}, + UpdatedData = Data#data{status = disconnected, error = Err}, Actions = maybe_reply(retry_actions(UpdatedData), From, Err), - {next_state, disconnected, UpdatedData, Actions} + {next_state, disconnected, update_state(UpdatedData, Data), Actions} end. -stop_resource(#data{state = undefined, id = ResId} = _Data) -> - _ = maybe_clear_alarm(ResId), - ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId), - ok; -stop_resource(Data) -> +maybe_stop_resource(#data{status = Status} = Data) when Status /= stopped -> + stop_resource(Data); +maybe_stop_resource(#data{status = stopped} = Data) -> + Data. + +stop_resource(#data{state = ResState, id = ResId} = Data) -> %% We don't care the return value of the Mod:on_stop/2. %% The callback mod should make sure the resource is stopped after on_stop/2 %% is returned. - ResId = Data#data.id, - _ = emqx_resource:call_stop(Data#data.manager_id, Data#data.mod, Data#data.state), + case ResState /= undefined of + true -> + emqx_resource:call_stop(Data#data.id, Data#data.mod, ResState); + false -> + ok + end, _ = maybe_clear_alarm(ResId), ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId), - ok. + Data#data{status = stopped}. make_test_id() -> - RandId = iolist_to_binary(emqx_misc:gen_id(16)), + RandId = iolist_to_binary(emqx_utils:gen_id(16)), <>. handle_manually_health_check(From, Data) -> - with_health_check(Data, fun(Status, UpdatedData) -> - Actions = [{reply, From, {ok, Status}}], - {next_state, Status, UpdatedData, Actions} - end). + with_health_check( + Data, + fun(Status, UpdatedData) -> + Actions = [{reply, From, {ok, Status}}], + {next_state, Status, UpdatedData, Actions} + end + ). handle_connecting_health_check(Data) -> with_health_check( @@ -533,8 +509,7 @@ handle_connecting_health_check(Data) -> (connected, UpdatedData) -> {next_state, connected, UpdatedData}; (connecting, UpdatedData) -> - Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], - {keep_state, UpdatedData, Actions}; + {keep_state, UpdatedData, health_check_actions(UpdatedData)}; (disconnected, UpdatedData) -> {next_state, disconnected, UpdatedData} end @@ -545,10 +520,9 @@ handle_connected_health_check(Data) -> Data, fun (connected, UpdatedData) -> - Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], - {keep_state, UpdatedData, Actions}; + {keep_state, UpdatedData, health_check_actions(UpdatedData)}; (Status, UpdatedData) -> - ?SLOG(error, #{ + ?SLOG(warning, #{ msg => health_check_failed, id => Data#data.id, status => Status @@ -557,53 +531,70 @@ handle_connected_health_check(Data) -> end ). -with_health_check(Data, Func) -> +with_health_check(#data{state = undefined} = Data, Func) -> + Func(disconnected, Data); +with_health_check(#data{error = PrevError} = Data, Func) -> ResId = Data#data.id, - HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state), + HCRes = emqx_resource:call_health_check(Data#data.id, Data#data.mod, Data#data.state), {Status, NewState, Err} = parse_health_check_result(HCRes, Data), - _ = maybe_alarm(Status, ResId), - ok = maybe_resume_resource_workers(Status), + _ = maybe_alarm(Status, ResId, Err, PrevError), + ok = maybe_resume_resource_workers(ResId, Status), UpdatedData = Data#data{ state = NewState, status = Status, error = Err }, - insert_cache(ResId, UpdatedData#data.group, UpdatedData), - Func(Status, UpdatedData). + Func(Status, update_state(UpdatedData, Data)). + +update_state(Data) -> + update_state(Data, undefined). + +update_state(DataWas, DataWas) -> + DataWas; +update_state(Data, _DataWas) -> + _ = insert_cache(Data#data.id, Data), + Data. health_check_interval(Opts) -> maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL). -maybe_alarm(connected, _ResId) -> +maybe_alarm(connected, _ResId, _Error, _PrevError) -> ok; -maybe_alarm(_Status, <>) -> +maybe_alarm(_Status, <>, _Error, _PrevError) -> ok; -maybe_alarm(_Status, ResId) -> - emqx_alarm:activate( +%% Assume that alarm is already active +maybe_alarm(_Status, _ResId, Error, Error) -> + ok; +maybe_alarm(_Status, ResId, Error, _PrevError) -> + HrError = + case Error of + {error, undefined} -> <<"Unknown reason">>; + {error, Reason} -> emqx_utils:readable_error_msg(Reason) + end, + emqx_alarm:safe_activate( ResId, #{resource_id => ResId, reason => resource_down}, - <<"resource down: ", ResId/binary>> - ). + <<"resource down: ", HrError/binary>> + ), + ?tp(resource_activate_alarm, #{resource_id => ResId}). -maybe_resume_resource_workers(connected) -> +maybe_resume_resource_workers(ResId, connected) -> lists:foreach( - fun({_, Pid, _, _}) -> - emqx_resource_worker:resume(Pid) - end, - supervisor:which_children(emqx_resource_worker_sup) + fun emqx_resource_buffer_worker:resume/1, + emqx_resource_buffer_worker_sup:worker_pids(ResId) ); -maybe_resume_resource_workers(_) -> +maybe_resume_resource_workers(_, _) -> ok. maybe_clear_alarm(<>) -> ok; maybe_clear_alarm(ResId) -> - emqx_alarm:deactivate(ResId). + emqx_alarm:safe_deactivate(ResId). parse_health_check_result(Status, Data) when ?IS_STATUS(Status) -> - {Status, Data#data.state, undefined}; + {Status, Data#data.state, status_to_error(Status)}; parse_health_check_result({Status, NewState}, _Data) when ?IS_STATUS(Status) -> - {Status, NewState, undefined}; + {Status, NewState, status_to_error(Status)}; parse_health_check_result({Status, NewState, Error}, _Data) when ?IS_STATUS(Status) -> - {Status, NewState, Error}; + {Status, NewState, {error, Error}}; parse_health_check_result({error, Error}, Data) -> ?SLOG( error, @@ -613,36 +604,47 @@ parse_health_check_result({error, Error}, Data) -> reason => Error } ), - {disconnected, Data#data.state, Error}. + {disconnected, Data#data.state, {error, Error}}. + +status_to_error(connected) -> + undefined; +status_to_error(_) -> + {error, undefined}. + +%% Compatibility +external_error({error, Reason}) -> Reason; +external_error(Other) -> Other. maybe_reply(Actions, undefined, _Reply) -> Actions; maybe_reply(Actions, From, Reply) -> [{reply, From, Reply} | Actions]. --spec data_record_to_external_map_with_metrics(data()) -> resource_data(). -data_record_to_external_map_with_metrics(Data) -> +-spec data_record_to_external_map(data()) -> resource_data(). +data_record_to_external_map(Data) -> #{ id => Data#data.id, + error => external_error(Data#data.error), mod => Data#data.mod, callback_mode => Data#data.callback_mode, query_mode => Data#data.query_mode, config => Data#data.config, status => Data#data.status, - state => Data#data.state, - metrics => get_metrics(Data#data.id) + state => Data#data.state }. --spec wait_for_ready(resource_id(), integer()) -> ok | timeout. +-spec wait_for_ready(resource_id(), integer()) -> ok | timeout | {error, term()}. wait_for_ready(ResId, WaitTime) -> do_wait_for_ready(ResId, WaitTime div ?WAIT_FOR_RESOURCE_DELAY). do_wait_for_ready(_ResId, 0) -> timeout; do_wait_for_ready(ResId, Retry) -> - case ets_lookup(ResId) of - {ok, _Group, #{status := connected}} -> + case try_read_cache(ResId) of + #data{status = connected} -> ok; + #data{status = disconnected, error = Err} -> + {error, external_error(Err)}; _ -> timer:sleep(?WAIT_FOR_RESOURCE_DELAY), do_wait_for_ready(ResId, Retry - 1) @@ -650,10 +652,7 @@ do_wait_for_ready(ResId, Retry) -> safe_call(ResId, Message, Timeout) -> try - Module = atom_to_binary(?MODULE), - MgrId = get_owner(ResId), - ProcName = binary_to_existing_atom(<>, utf8), - gen_statem:call(ProcName, Message, {clean_timeout, Timeout}) + gen_statem:call(?REF(ResId), Message, {clean_timeout, Timeout}) catch error:badarg -> {error, not_found}; diff --git a/apps/emqx_resource/src/emqx_resource_manager_sup.erl b/apps/emqx_resource/src/emqx_resource_manager_sup.erl index 5b731d6cf..2f442cd56 100644 --- a/apps/emqx_resource/src/emqx_resource_manager_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_manager_sup.erl @@ -17,23 +17,20 @@ -behaviour(supervisor). --export([ensure_child/6]). +-export([ensure_child/5]). -export([start_link/0]). -export([init/1]). -ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts) -> - _ = supervisor:start_child(?MODULE, [MgrId, ResId, Group, ResourceType, Config, Opts]), +ensure_child(ResId, Group, ResourceType, Config, Opts) -> + _ = supervisor:start_child(?MODULE, [ResId, Group, ResourceType, Config, Opts]), ok. start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - TabOpts = [named_table, set, public, {read_concurrency, true}], - _ = ets:new(emqx_resource_manager, TabOpts), - ChildSpecs = [ #{ id => emqx_resource_manager, @@ -44,6 +41,5 @@ init([]) -> modules => [emqx_resource_manager] } ], - SupFlags = #{strategy => simple_one_for_one, intensity => 10, period => 10}, {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_resource/src/emqx_resource_metrics.erl b/apps/emqx_resource/src/emqx_resource_metrics.erl index ff764ab3c..df28d893b 100644 --- a/apps/emqx_resource/src/emqx_resource_metrics.erl +++ b/apps/emqx_resource/src/emqx_resource_metrics.erl @@ -24,9 +24,6 @@ ]). -export([ - batching_set/3, - batching_shift/3, - batching_get/1, inflight_set/3, inflight_get/1, queuing_set/3, @@ -37,12 +34,12 @@ dropped_other_inc/1, dropped_other_inc/2, dropped_other_get/1, + dropped_expired_inc/1, + dropped_expired_inc/2, + dropped_expired_get/1, dropped_queue_full_inc/1, dropped_queue_full_inc/2, dropped_queue_full_get/1, - dropped_queue_not_enabled_inc/1, - dropped_queue_not_enabled_inc/2, - dropped_queue_not_enabled_get/1, dropped_resource_not_found_inc/1, dropped_resource_not_found_inc/2, dropped_resource_not_found_get/1, @@ -52,9 +49,15 @@ failed_inc/1, failed_inc/2, failed_get/1, + late_reply_inc/1, + late_reply_inc/2, + late_reply_get/1, matched_inc/1, matched_inc/2, matched_get/1, + received_inc/1, + received_inc/2, + received_get/1, retried_inc/1, retried_inc/2, retried_get/1, @@ -77,16 +80,17 @@ events() -> [ [?TELEMETRY_PREFIX, Event] || Event <- [ - batching, dropped_other, + dropped_expired, dropped_queue_full, - dropped_queue_not_enabled, dropped_resource_not_found, dropped_resource_stopped, + late_reply, failed, inflight, matched, queuing, + received, retried_failed, retried_success, success @@ -118,22 +122,26 @@ handle_telemetry_event( dropped_other -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.other', Val); + dropped_expired -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.expired', Val); dropped_queue_full -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.queue_full', Val); - dropped_queue_not_enabled -> - emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), - emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.queue_not_enabled', Val); dropped_resource_not_found -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.resource_not_found', Val); dropped_resource_stopped -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.resource_stopped', Val); + late_reply -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'late_reply', Val); failed -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'failed', Val); matched -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'matched', Val); + received -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'received', Val); retried_failed -> emqx_metrics_worker:inc(?RES_METRICS, ID, 'retried', Val), emqx_metrics_worker:inc(?RES_METRICS, ID, 'failed', Val), @@ -154,8 +162,6 @@ handle_telemetry_event( _HandlerConfig ) -> case Event of - batching -> - emqx_metrics_worker:set_gauge(?RES_METRICS, ID, WorkerID, 'batching', Val); inflight -> emqx_metrics_worker:set_gauge(?RES_METRICS, ID, WorkerID, 'inflight', Val); queuing -> @@ -163,45 +169,12 @@ handle_telemetry_event( _ -> ok end; -handle_telemetry_event( - [?TELEMETRY_PREFIX, Event], - _Measurements = #{gauge_shift := Val}, - _Metadata = #{resource_id := ID, worker_id := WorkerID}, - _HandlerConfig -) -> - case Event of - batching -> - emqx_metrics_worker:shift_gauge(?RES_METRICS, ID, WorkerID, 'batching', Val); - _ -> - ok - end; handle_telemetry_event(_EventName, _Measurements, _Metadata, _HandlerConfig) -> ok. %% Gauges (value can go both up and down): %% -------------------------------------- -%% @doc Count of messages that are currently accumulated in memory waiting for -%% being sent in one batch -batching_set(ID, WorkerID, Val) -> - telemetry:execute( - [?TELEMETRY_PREFIX, batching], - #{gauge_set => Val}, - #{resource_id => ID, worker_id => WorkerID} - ). - -batching_shift(_ID, _WorkerID = undefined, _Val) -> - ok; -batching_shift(ID, WorkerID, Val) -> - telemetry:execute( - [?TELEMETRY_PREFIX, batching], - #{gauge_shift => Val}, - #{resource_id => ID, worker_id => WorkerID} - ). - -batching_get(ID) -> - emqx_metrics_worker:get_gauge(?RES_METRICS, ID, 'batching'). - %% @doc Count of batches of messages that are currently %% queuing. [Gauge] queuing_set(ID, WorkerID, Val) -> @@ -233,6 +206,8 @@ inflight_get(ID) -> dropped_inc(ID) -> dropped_inc(ID, 1). +dropped_inc(_ID, 0) -> + ok; dropped_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, dropped], #{counter_inc => Val}, #{resource_id => ID}). @@ -243,6 +218,8 @@ dropped_get(ID) -> dropped_other_inc(ID) -> dropped_other_inc(ID, 1). +dropped_other_inc(_ID, 0) -> + ok; dropped_other_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, dropped_other], #{counter_inc => Val}, #{ resource_id => ID @@ -251,10 +228,40 @@ dropped_other_inc(ID, Val) -> dropped_other_get(ID) -> emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.other'). +%% @doc Count of messages dropped due to being expired before being sent. +dropped_expired_inc(ID) -> + dropped_expired_inc(ID, 1). + +dropped_expired_inc(_ID, 0) -> + ok; +dropped_expired_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped_expired], #{counter_inc => Val}, #{ + resource_id => ID + }). + +dropped_expired_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.expired'). + +%% @doc Count of messages that were sent but received a late reply. +late_reply_inc(ID) -> + late_reply_inc(ID, 1). + +late_reply_inc(_ID, 0) -> + ok; +late_reply_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, late_reply], #{counter_inc => Val}, #{ + resource_id => ID + }). + +late_reply_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'late_reply'). + %% @doc Count of messages dropped because the queue was full dropped_queue_full_inc(ID) -> dropped_queue_full_inc(ID, 1). +dropped_queue_full_inc(_ID, 0) -> + ok; dropped_queue_full_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, dropped_queue_full], #{counter_inc => Val}, #{ resource_id => ID @@ -263,22 +270,12 @@ dropped_queue_full_inc(ID, Val) -> dropped_queue_full_get(ID) -> emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.queue_full'). -%% @doc Count of messages dropped because the queue was not enabled -dropped_queue_not_enabled_inc(ID) -> - dropped_queue_not_enabled_inc(ID, 1). - -dropped_queue_not_enabled_inc(ID, Val) -> - telemetry:execute([?TELEMETRY_PREFIX, dropped_queue_not_enabled], #{counter_inc => Val}, #{ - resource_id => ID - }). - -dropped_queue_not_enabled_get(ID) -> - emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.queue_not_enabled'). - %% @doc Count of messages dropped because the resource was not found dropped_resource_not_found_inc(ID) -> dropped_resource_not_found_inc(ID, 1). +dropped_resource_not_found_inc(_ID, 0) -> + ok; dropped_resource_not_found_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, dropped_resource_not_found], #{counter_inc => Val}, #{ resource_id => ID @@ -291,6 +288,8 @@ dropped_resource_not_found_get(ID) -> dropped_resource_stopped_inc(ID) -> dropped_resource_stopped_inc(ID, 1). +dropped_resource_stopped_inc(_ID, 0) -> + ok; dropped_resource_stopped_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, dropped_resource_stopped], #{counter_inc => Val}, #{ resource_id => ID @@ -303,16 +302,32 @@ dropped_resource_stopped_get(ID) -> matched_inc(ID) -> matched_inc(ID, 1). +matched_inc(_ID, 0) -> + ok; matched_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, matched], #{counter_inc => Val}, #{resource_id => ID}). matched_get(ID) -> emqx_metrics_worker:get(?RES_METRICS, ID, 'matched'). +%% @doc The number of messages that have been received from a bridge +received_inc(ID) -> + received_inc(ID, 1). + +received_inc(_ID, 0) -> + ok; +received_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, received], #{counter_inc => Val}, #{resource_id => ID}). + +received_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'received'). + %% @doc The number of times message sends have been retried retried_inc(ID) -> retried_inc(ID, 1). +retried_inc(_ID, 0) -> + ok; retried_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, retried], #{counter_inc => Val}, #{resource_id => ID}). @@ -323,6 +338,8 @@ retried_get(ID) -> failed_inc(ID) -> failed_inc(ID, 1). +failed_inc(_ID, 0) -> + ok; failed_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, failed], #{counter_inc => Val}, #{resource_id => ID}). @@ -333,6 +350,8 @@ failed_get(ID) -> retried_failed_inc(ID) -> retried_failed_inc(ID, 1). +retried_failed_inc(_ID, 0) -> + ok; retried_failed_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, retried_failed], #{counter_inc => Val}, #{ resource_id => ID @@ -345,6 +364,8 @@ retried_failed_get(ID) -> retried_success_inc(ID) -> retried_success_inc(ID, 1). +retried_success_inc(_ID, 0) -> + ok; retried_success_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, retried_success], #{counter_inc => Val}, #{ resource_id => ID @@ -357,6 +378,8 @@ retried_success_get(ID) -> success_inc(ID) -> success_inc(ID, 1). +success_inc(_ID, 0) -> + ok; success_inc(ID, Val) -> telemetry:execute([?TELEMETRY_PREFIX, success], #{counter_inc => Val}, #{resource_id => ID}). diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl b/apps/emqx_resource/src/emqx_resource_pool.erl similarity index 77% rename from apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl rename to apps/emqx_resource/src/emqx_resource_pool.erl index 289d39032..913b29c86 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl +++ b/apps/emqx_resource/src/emqx_resource_pool.erl @@ -14,31 +14,27 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_plugin_libs_pool). +-module(emqx_resource_pool). -export([ - start_pool/3, - stop_pool/1, - pool_name/1, - health_check_ecpool_workers/2, - health_check_ecpool_workers/3 + start/3, + stop/1, + health_check_workers/2, + health_check_workers/3 ]). -include_lib("emqx/include/logger.hrl"). -define(HEALTH_CHECK_TIMEOUT, 15000). -pool_name(ID) when is_binary(ID) -> - list_to_atom(binary_to_list(ID)). - -start_pool(Name, Mod, Options) -> +start(Name, Mod, Options) -> case ecpool:start_sup_pool(Name, Mod, Options) of {ok, _} -> ?SLOG(info, #{msg => "start_ecpool_ok", pool_name => Name}), ok; {error, {already_started, _Pid}} -> - stop_pool(Name), - start_pool(Name, Mod, Options); + stop(Name), + start(Name, Mod, Options); {error, Reason} -> NReason = parse_reason(Reason), ?SLOG(error, #{ @@ -49,7 +45,7 @@ start_pool(Name, Mod, Options) -> {error, {start_pool_failed, Name, NReason}} end. -stop_pool(Name) -> +stop(Name) -> case ecpool:stop_sup_pool(Name) of ok -> ?SLOG(info, #{msg => "stop_ecpool_ok", pool_name => Name}); @@ -64,21 +60,22 @@ stop_pool(Name) -> error({stop_pool_failed, Name, Reason}) end. -health_check_ecpool_workers(PoolName, CheckFunc) -> - health_check_ecpool_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT). +health_check_workers(PoolName, CheckFunc) -> + health_check_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT). -health_check_ecpool_workers(PoolName, CheckFunc, Timeout) when is_function(CheckFunc) -> +health_check_workers(PoolName, CheckFunc, Timeout) -> Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], DoPerWorker = fun(Worker) -> case ecpool_worker:client(Worker) of {ok, Conn} -> - erlang:is_process_alive(Conn) andalso CheckFunc(Conn); + erlang:is_process_alive(Conn) andalso + ecpool_worker:exec(Worker, CheckFunc, Timeout); _ -> false end end, - try emqx_misc:pmap(DoPerWorker, Workers, Timeout) of + try emqx_utils:pmap(DoPerWorker, Workers, Timeout) of [_ | _] = Status -> lists:all(fun(St) -> St =:= true end, Status); [] -> diff --git a/apps/emqx_resource/src/emqx_resource_sup.erl b/apps/emqx_resource/src/emqx_resource_sup.erl index ea31b8b6b..4d9abb03d 100644 --- a/apps/emqx_resource/src/emqx_resource_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_sup.erl @@ -39,8 +39,8 @@ init([]) -> modules => [emqx_resource_manager_sup] }, WorkerSup = #{ - id => emqx_resource_worker_sup, - start => {emqx_resource_worker_sup, start_link, []}, + id => emqx_resource_buffer_worker_sup, + start => {emqx_resource_buffer_worker_sup, start_link, []}, restart => permanent, shutdown => infinity, type => supervisor diff --git a/apps/emqx_resource/src/emqx_resource_worker.erl b/apps/emqx_resource/src/emqx_resource_worker.erl deleted file mode 100644 index 5f7cdf7e0..000000000 --- a/apps/emqx_resource/src/emqx_resource_worker.erl +++ /dev/null @@ -1,758 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - -%% This module implements async message sending, disk message queuing, -%% and message batching using ReplayQ. - --module(emqx_resource_worker). - --include("emqx_resource.hrl"). --include("emqx_resource_utils.hrl"). --include("emqx_resource_errors.hrl"). --include_lib("emqx/include/logger.hrl"). --include_lib("snabbkaffe/include/snabbkaffe.hrl"). - --behaviour(gen_statem). - --export([ - start_link/3, - sync_query/3, - async_query/3, - block/1, - block/2, - resume/1 -]). - --export([ - simple_sync_query/2, - simple_async_query/3 -]). - --export([ - callback_mode/0, - init/1, - terminate/2, - code_change/3 -]). - --export([running/3, blocked/3]). - --export([queue_item_marshaller/1, estimate_size/1]). - --export([reply_after_query/7, batch_reply_after_query/7]). - --define(Q_ITEM(REQUEST), {q_item, REQUEST}). - --define(QUERY(FROM, REQUEST, SENT), {query, FROM, REQUEST, SENT}). --define(REPLY(FROM, REQUEST, SENT, RESULT), {reply, FROM, REQUEST, SENT, RESULT}). --define(EXPAND(RESULT, BATCH), [ - ?REPLY(FROM, REQUEST, SENT, RESULT) - || ?QUERY(FROM, REQUEST, SENT) <- BATCH -]). - --type id() :: binary(). --type query() :: {query, from(), request()}. --type request() :: term(). --type from() :: pid() | reply_fun(). - --callback batcher_flush(Acc :: [{from(), request()}], CbState :: term()) -> - {{from(), result()}, NewCbState :: term()}. - -callback_mode() -> [state_functions, state_enter]. - -start_link(Id, Index, Opts) -> - gen_statem:start_link({local, name(Id, Index)}, ?MODULE, {Id, Index, Opts}, []). - --spec sync_query(id(), request(), query_opts()) -> Result :: term(). -sync_query(Id, Request, Opts) -> - PickKey = maps:get(pick_key, Opts, self()), - Timeout = maps:get(timeout, Opts, infinity), - pick_call(Id, PickKey, {query, Request, Opts}, Timeout). - --spec async_query(id(), request(), query_opts()) -> Result :: term(). -async_query(Id, Request, Opts) -> - PickKey = maps:get(pick_key, Opts, self()), - pick_cast(Id, PickKey, {query, Request, Opts}). - -%% simple query the resource without batching and queuing messages. --spec simple_sync_query(id(), request()) -> Result :: term(). -simple_sync_query(Id, Request) -> - %% Note: since calling this function implies in bypassing the - %% buffer workers, and each buffer worker index is used when - %% collecting gauge metrics, we use this dummy index. If this - %% call ends up calling buffering functions, that's a bug and - %% would mess up the metrics anyway. `undefined' is ignored by - %% `emqx_resource_metrics:*_shift/3'. - Index = undefined, - Result = call_query(sync, Id, Index, ?QUERY(self(), Request, false), #{}), - _ = handle_query_result(Id, Result, false, false), - Result. - --spec simple_async_query(id(), request(), reply_fun()) -> Result :: term(). -simple_async_query(Id, Request, ReplyFun) -> - %% Note: since calling this function implies in bypassing the - %% buffer workers, and each buffer worker index is used when - %% collecting gauge metrics, we use this dummy index. If this - %% call ends up calling buffering functions, that's a bug and - %% would mess up the metrics anyway. `undefined' is ignored by - %% `emqx_resource_metrics:*_shift/3'. - Index = undefined, - Result = call_query(async, Id, Index, ?QUERY(ReplyFun, Request, false), #{}), - _ = handle_query_result(Id, Result, false, false), - Result. - --spec block(pid() | atom()) -> ok. -block(ServerRef) -> - gen_statem:cast(ServerRef, block). - --spec block(pid() | atom(), [query()]) -> ok. -block(ServerRef, Query) -> - gen_statem:cast(ServerRef, {block, Query}). - --spec resume(pid() | atom()) -> ok. -resume(ServerRef) -> - gen_statem:cast(ServerRef, resume). - -init({Id, Index, Opts}) -> - process_flag(trap_exit, true), - true = gproc_pool:connect_worker(Id, {Id, Index}), - Name = name(Id, Index), - BatchSize = maps:get(batch_size, Opts, ?DEFAULT_BATCH_SIZE), - SegBytes0 = maps:get(queue_seg_bytes, Opts, ?DEFAULT_QUEUE_SEG_SIZE), - TotalBytes = maps:get(max_queue_bytes, Opts, ?DEFAULT_QUEUE_SIZE), - SegBytes = min(SegBytes0, TotalBytes), - Queue = - case maps:get(enable_queue, Opts, false) of - true -> - replayq:open(#{ - dir => disk_queue_dir(Id, Index), - seg_bytes => SegBytes, - max_total_bytes => TotalBytes, - sizer => fun ?MODULE:estimate_size/1, - marshaller => fun ?MODULE:queue_item_marshaller/1 - }); - false -> - undefined - end, - emqx_resource_metrics:queuing_set(Id, Index, queue_count(Queue)), - emqx_resource_metrics:batching_set(Id, Index, 0), - emqx_resource_metrics:inflight_set(Id, Index, 0), - InfltWinSZ = maps:get(async_inflight_window, Opts, ?DEFAULT_INFLIGHT), - ok = inflight_new(Name, InfltWinSZ, Id, Index), - HCItvl = maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL), - St = #{ - id => Id, - index => Index, - name => Name, - enable_batch => maps:get(enable_batch, Opts, false), - batch_size => BatchSize, - batch_time => maps:get(batch_time, Opts, ?DEFAULT_BATCH_TIME), - queue => Queue, - resume_interval => maps:get(resume_interval, Opts, HCItvl), - acc => [], - acc_left => BatchSize, - tref => undefined - }, - {ok, blocked, St, {next_event, cast, resume}}. - -running(enter, _, _St) -> - keep_state_and_data; -running(cast, resume, _St) -> - keep_state_and_data; -running(cast, block, St) -> - {next_state, blocked, St}; -running( - cast, {block, [?QUERY(_, _, _) | _] = Batch}, #{id := Id, index := Index, queue := Q} = St -) when - is_list(Batch) --> - Q1 = maybe_append_queue(Id, Index, Q, [?Q_ITEM(Query) || Query <- Batch]), - {next_state, blocked, St#{queue := Q1}}; -running({call, From}, {query, Request, _Opts}, St) -> - query_or_acc(From, Request, St); -running(cast, {query, Request, Opts}, St) -> - ReplyFun = maps:get(async_reply_fun, Opts, undefined), - query_or_acc(ReplyFun, Request, St); -running(info, {flush, Ref}, St = #{tref := {_TRef, Ref}}) -> - flush(St#{tref := undefined}); -running(info, {flush, _Ref}, _St) -> - keep_state_and_data; -running(info, Info, _St) -> - ?SLOG(error, #{msg => unexpected_msg, info => Info}), - keep_state_and_data. - -blocked(enter, _, #{resume_interval := ResumeT} = _St) -> - {keep_state_and_data, {state_timeout, ResumeT, resume}}; -blocked(cast, block, _St) -> - keep_state_and_data; -blocked( - cast, {block, [?QUERY(_, _, _) | _] = Batch}, #{id := Id, index := Index, queue := Q} = St -) when - is_list(Batch) --> - Q1 = maybe_append_queue(Id, Index, Q, [?Q_ITEM(Query) || Query <- Batch]), - {keep_state, St#{queue := Q1}}; -blocked(cast, resume, St) -> - do_resume(St); -blocked(state_timeout, resume, St) -> - do_resume(St); -blocked({call, From}, {query, Request, _Opts}, #{id := Id, index := Index, queue := Q} = St) -> - Error = ?RESOURCE_ERROR(blocked, "resource is blocked"), - _ = reply_caller(Id, ?REPLY(From, Request, false, Error)), - {keep_state, St#{ - queue := maybe_append_queue(Id, Index, Q, [?Q_ITEM(?QUERY(From, Request, false))]) - }}; -blocked(cast, {query, Request, Opts}, #{id := Id, index := Index, queue := Q} = St) -> - ReplyFun = maps:get(async_reply_fun, Opts, undefined), - Error = ?RESOURCE_ERROR(blocked, "resource is blocked"), - _ = reply_caller(Id, ?REPLY(ReplyFun, Request, false, Error)), - {keep_state, St#{ - queue := maybe_append_queue(Id, Index, Q, [?Q_ITEM(?QUERY(ReplyFun, Request, false))]) - }}. - -terminate(_Reason, #{id := Id, index := Index, queue := Q}) -> - GaugeFns = - [ - fun emqx_resource_metrics:batching_set/3, - fun emqx_resource_metrics:inflight_set/3 - ], - lists:foreach(fun(Fn) -> Fn(Id, Index, 0) end, GaugeFns), - emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q)), - gproc_pool:disconnect_worker(Id, {Id, Index}). - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -%%============================================================================== --define(PICK(ID, KEY, EXPR), - try gproc_pool:pick_worker(ID, KEY) of - Pid when is_pid(Pid) -> - EXPR; - _ -> - ?RESOURCE_ERROR(worker_not_created, "resource not created") - catch - error:badarg -> - ?RESOURCE_ERROR(worker_not_created, "resource not created"); - exit:{timeout, _} -> - ?RESOURCE_ERROR(timeout, "call resource timeout") - end -). - -pick_call(Id, Key, Query, Timeout) -> - ?PICK(Id, Key, gen_statem:call(Pid, Query, {clean_timeout, Timeout})). - -pick_cast(Id, Key, Query) -> - ?PICK(Id, Key, gen_statem:cast(Pid, Query)). - -do_resume(#{id := Id, name := Name} = St) -> - case inflight_get_first(Name) of - empty -> - retry_queue(St); - {Ref, FirstQuery} -> - %% We retry msgs in inflight window sync, as if we send them - %% async, they will be appended to the end of inflight window again. - retry_inflight_sync(Id, Ref, FirstQuery, Name, St) - end. - -retry_queue(#{queue := undefined} = St) -> - {next_state, running, St}; -retry_queue( - #{ - queue := Q, - id := Id, - index := Index, - enable_batch := false, - resume_interval := ResumeT - } = St -) -> - case get_first_n_from_queue(Q, 1) of - [] -> - {next_state, running, St}; - [?QUERY(_, Request, HasSent) = Query] -> - QueryOpts = #{inflight_name => maps:get(name, St)}, - Result = call_query(configured, Id, Index, Query, QueryOpts), - case reply_caller(Id, ?REPLY(undefined, Request, HasSent, Result)) of - true -> - {keep_state, St, {state_timeout, ResumeT, resume}}; - false -> - retry_queue(St#{queue := drop_head(Q, Id, Index)}) - end - end; -retry_queue( - #{ - queue := Q, - id := Id, - index := Index, - enable_batch := true, - batch_size := BatchSize, - resume_interval := ResumeT - } = St -) -> - case get_first_n_from_queue(Q, BatchSize) of - [] -> - {next_state, running, St}; - Batch0 -> - QueryOpts = #{inflight_name => maps:get(name, St)}, - Result = call_query(configured, Id, Index, Batch0, QueryOpts), - %% The caller has been replied with ?RESOURCE_ERROR(blocked, _) before saving into the queue, - %% we now change the 'from' field to 'undefined' so it will not reply the caller again. - Batch = [?QUERY(undefined, Request, HasSent) || ?QUERY(_, Request, HasSent) <- Batch0], - case batch_reply_caller(Id, Result, Batch) of - true -> - {keep_state, St, {state_timeout, ResumeT, resume}}; - false -> - retry_queue(St#{queue := drop_first_n_from_queue(Q, length(Batch), Id, Index)}) - end - end. - -retry_inflight_sync( - Id, - Ref, - ?QUERY(_, _, HasSent) = Query, - Name, - #{index := Index, resume_interval := ResumeT} = St0 -) -> - Result = call_query(sync, Id, Index, Query, #{}), - case handle_query_result(Id, Result, HasSent, false) of - %% Send failed because resource down - true -> - {keep_state, St0, {state_timeout, ResumeT, resume}}; - %% Send ok or failed but the resource is working - false -> - inflight_drop(Name, Ref, Id, Index), - do_resume(St0) - end. - -query_or_acc( - From, - Request, - #{ - enable_batch := true, - acc := Acc, - acc_left := Left, - index := Index, - id := Id - } = St0 -) -> - Acc1 = [?QUERY(From, Request, false) | Acc], - emqx_resource_metrics:batching_shift(Id, Index, 1), - St = St0#{acc := Acc1, acc_left := Left - 1}, - case Left =< 1 of - true -> flush(St); - false -> {keep_state, ensure_flush_timer(St)} - end; -query_or_acc(From, Request, #{enable_batch := false, queue := Q, id := Id, index := Index} = St) -> - QueryOpts = #{ - inflight_name => maps:get(name, St) - }, - Result = call_query(configured, Id, Index, ?QUERY(From, Request, false), QueryOpts), - case reply_caller(Id, ?REPLY(From, Request, false, Result)) of - true -> - Query = ?QUERY(From, Request, false), - {next_state, blocked, St#{queue := maybe_append_queue(Id, Index, Q, [?Q_ITEM(Query)])}}; - false -> - {keep_state, St} - end. - -flush(#{acc := []} = St) -> - {keep_state, St}; -flush( - #{ - id := Id, - index := Index, - acc := Batch0, - batch_size := Size, - queue := Q0 - } = St -) -> - Batch = lists:reverse(Batch0), - QueryOpts = #{ - inflight_name => maps:get(name, St) - }, - emqx_resource_metrics:batching_shift(Id, Index, -length(Batch)), - Result = call_query(configured, Id, Index, Batch, QueryOpts), - St1 = cancel_flush_timer(St#{acc_left := Size, acc := []}), - case batch_reply_caller(Id, Result, Batch) of - true -> - Q1 = maybe_append_queue(Id, Index, Q0, [?Q_ITEM(Query) || Query <- Batch]), - {next_state, blocked, St1#{queue := Q1}}; - false -> - {keep_state, St1} - end. - -batch_reply_caller(Id, BatchResult, Batch) -> - lists:foldl( - fun(Reply, BlockWorker) -> - reply_caller(Id, Reply, BlockWorker) - end, - false, - %% the `Mod:on_batch_query/3` returns a single result for a batch, - %% so we need to expand - ?EXPAND(BatchResult, Batch) - ). - -reply_caller(Id, Reply) -> - reply_caller(Id, Reply, false). - -reply_caller(Id, ?REPLY(undefined, _, HasSent, Result), BlockWorker) -> - handle_query_result(Id, Result, HasSent, BlockWorker); -reply_caller(Id, ?REPLY({ReplyFun, Args}, _, HasSent, Result), BlockWorker) when - is_function(ReplyFun) --> - _ = - case Result of - {async_return, _} -> no_reply_for_now; - _ -> apply(ReplyFun, Args ++ [Result]) - end, - handle_query_result(Id, Result, HasSent, BlockWorker); -reply_caller(Id, ?REPLY(From, _, HasSent, Result), BlockWorker) -> - gen_statem:reply(From, Result), - handle_query_result(Id, Result, HasSent, BlockWorker). - -handle_query_result(Id, ?RESOURCE_ERROR_M(exception, Msg), HasSent, BlockWorker) -> - ?SLOG(error, #{msg => resource_exception, info => Msg}), - inc_sent_failed(Id, HasSent), - BlockWorker; -handle_query_result(_Id, ?RESOURCE_ERROR_M(NotWorking, _), _HasSent, _) when - NotWorking == not_connected; NotWorking == blocked --> - true; -handle_query_result(Id, ?RESOURCE_ERROR_M(not_found, Msg), _HasSent, BlockWorker) -> - ?SLOG(error, #{id => Id, msg => resource_not_found, info => Msg}), - emqx_resource_metrics:dropped_resource_not_found_inc(Id), - BlockWorker; -handle_query_result(Id, ?RESOURCE_ERROR_M(stopped, Msg), _HasSent, BlockWorker) -> - ?SLOG(error, #{id => Id, msg => resource_stopped, info => Msg}), - emqx_resource_metrics:dropped_resource_stopped_inc(Id), - BlockWorker; -handle_query_result(Id, ?RESOURCE_ERROR_M(Reason, _), _HasSent, BlockWorker) -> - ?SLOG(error, #{id => Id, msg => other_resource_error, reason => Reason}), - emqx_resource_metrics:dropped_other_inc(Id), - BlockWorker; -handle_query_result(Id, {error, {recoverable_error, Reason}}, _HasSent, _BlockWorker) -> - %% the message will be queued in replayq or inflight window, - %% i.e. the counter 'queuing' or 'dropped' will increase, so we pretend that we have not - %% sent this message. - ?SLOG(warning, #{id => Id, msg => recoverable_error, reason => Reason}), - true; -handle_query_result(Id, {error, Reason}, HasSent, BlockWorker) -> - ?SLOG(error, #{id => Id, msg => send_error, reason => Reason}), - inc_sent_failed(Id, HasSent), - BlockWorker; -handle_query_result(_Id, {async_return, inflight_full}, _HasSent, _BlockWorker) -> - true; -handle_query_result(Id, {async_return, {error, Msg}}, HasSent, BlockWorker) -> - ?SLOG(error, #{id => Id, msg => async_send_error, info => Msg}), - inc_sent_failed(Id, HasSent), - BlockWorker; -handle_query_result(_Id, {async_return, ok}, _HasSent, BlockWorker) -> - BlockWorker; -handle_query_result(Id, Result, HasSent, BlockWorker) -> - assert_ok_result(Result), - inc_sent_success(Id, HasSent), - BlockWorker. - -call_query(QM0, Id, Index, Query, QueryOpts) -> - ?tp(call_query_enter, #{id => Id, query => Query}), - case emqx_resource_manager:ets_lookup(Id) of - {ok, _Group, #{mod := Mod, state := ResSt, status := connected} = Data} -> - QM = - case QM0 of - configured -> maps:get(query_mode, Data); - _ -> QM0 - end, - CM = maps:get(callback_mode, Data), - emqx_resource_metrics:matched_inc(Id), - apply_query_fun(call_mode(QM, CM), Mod, Id, Index, Query, ResSt, QueryOpts); - {ok, _Group, #{status := stopped}} -> - emqx_resource_metrics:matched_inc(Id), - ?RESOURCE_ERROR(stopped, "resource stopped or disabled"); - {ok, _Group, #{status := S}} when S == connecting; S == disconnected -> - emqx_resource_metrics:matched_inc(Id), - ?RESOURCE_ERROR(not_connected, "resource not connected"); - {error, not_found} -> - ?RESOURCE_ERROR(not_found, "resource not found") - end. - --define(APPLY_RESOURCE(NAME, EXPR, REQ), - try - %% if the callback module (connector) wants to return an error that - %% makes the current resource goes into the `blocked` state, it should - %% return `{error, {recoverable_error, Reason}}` - EXPR - catch - ERR:REASON:STACKTRACE -> - ?RESOURCE_ERROR(exception, #{ - name => NAME, - id => Id, - request => REQ, - error => {ERR, REASON}, - stacktrace => STACKTRACE - }) - end -). - -apply_query_fun(sync, Mod, Id, _Index, ?QUERY(_, Request, _) = _Query, ResSt, _QueryOpts) -> - ?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt}), - ?APPLY_RESOURCE(call_query, Mod:on_query(Id, Request, ResSt), Request); -apply_query_fun(async, Mod, Id, Index, ?QUERY(_, Request, _) = Query, ResSt, QueryOpts) -> - ?tp(call_query_async, #{id => Id, mod => Mod, query => Query, res_st => ResSt}), - Name = maps:get(inflight_name, QueryOpts, undefined), - ?APPLY_RESOURCE( - call_query_async, - case inflight_is_full(Name) of - true -> - {async_return, inflight_full}; - false -> - ReplyFun = fun ?MODULE:reply_after_query/7, - Ref = make_message_ref(), - Args = [self(), Id, Index, Name, Ref, Query], - ok = inflight_append(Name, Ref, Query, Id, Index), - Result = Mod:on_query_async(Id, Request, {ReplyFun, Args}, ResSt), - {async_return, Result} - end, - Request - ); -apply_query_fun(sync, Mod, Id, _Index, [?QUERY(_, _, _) | _] = Batch, ResSt, _QueryOpts) -> - ?tp(call_batch_query, #{id => Id, mod => Mod, batch => Batch, res_st => ResSt}), - Requests = [Request || ?QUERY(_From, Request, _) <- Batch], - ?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch); -apply_query_fun(async, Mod, Id, Index, [?QUERY(_, _, _) | _] = Batch, ResSt, QueryOpts) -> - ?tp(call_batch_query_async, #{id => Id, mod => Mod, batch => Batch, res_st => ResSt}), - Name = maps:get(inflight_name, QueryOpts, undefined), - ?APPLY_RESOURCE( - call_batch_query_async, - case inflight_is_full(Name) of - true -> - {async_return, inflight_full}; - false -> - ReplyFun = fun ?MODULE:batch_reply_after_query/7, - Ref = make_message_ref(), - Args = {ReplyFun, [self(), Id, Index, Name, Ref, Batch]}, - Requests = [Request || ?QUERY(_From, Request, _) <- Batch], - ok = inflight_append(Name, Ref, Batch, Id, Index), - Result = Mod:on_batch_query_async(Id, Requests, Args, ResSt), - {async_return, Result} - end, - Batch - ). - -reply_after_query(Pid, Id, Index, Name, Ref, ?QUERY(From, Request, HasSent), Result) -> - %% NOTE: 'inflight' is the count of messages that were sent async - %% but received no ACK, NOT the number of messages queued in the - %% inflight window. - case reply_caller(Id, ?REPLY(From, Request, HasSent, Result)) of - true -> - ?MODULE:block(Pid); - false -> - drop_inflight_and_resume(Pid, Name, Ref, Id, Index) - end. - -batch_reply_after_query(Pid, Id, Index, Name, Ref, Batch, Result) -> - %% NOTE: 'inflight' is the count of messages that were sent async - %% but received no ACK, NOT the number of messages queued in the - %% inflight window. - case batch_reply_caller(Id, Result, Batch) of - true -> - ?MODULE:block(Pid); - false -> - drop_inflight_and_resume(Pid, Name, Ref, Id, Index) - end. - -drop_inflight_and_resume(Pid, Name, Ref, Id, Index) -> - case inflight_is_full(Name) of - true -> - inflight_drop(Name, Ref, Id, Index), - ?MODULE:resume(Pid); - false -> - inflight_drop(Name, Ref, Id, Index) - end. - -%%============================================================================== -%% operations for queue -queue_item_marshaller(?Q_ITEM(_) = I) -> - term_to_binary(I); -queue_item_marshaller(Bin) when is_binary(Bin) -> - binary_to_term(Bin). - -estimate_size(QItem) -> - size(queue_item_marshaller(QItem)). - -maybe_append_queue(Id, _Index, undefined, _Items) -> - emqx_resource_metrics:dropped_queue_not_enabled_inc(Id), - undefined; -maybe_append_queue(Id, Index, Q, Items) -> - Q2 = - case replayq:overflow(Q) of - Overflow when Overflow =< 0 -> - Q; - Overflow -> - PopOpts = #{bytes_limit => Overflow, count_limit => 999999999}, - {Q1, QAckRef, Items2} = replayq:pop(Q, PopOpts), - ok = replayq:ack(Q1, QAckRef), - Dropped = length(Items2), - emqx_resource_metrics:dropped_queue_full_inc(Id), - ?SLOG(error, #{msg => drop_query, reason => queue_full, dropped => Dropped}), - Q1 - end, - Q3 = replayq:append(Q2, Items), - emqx_resource_metrics:queuing_set(Id, Index, replayq:count(Q3)), - Q3. - -get_first_n_from_queue(Q, N) -> - get_first_n_from_queue(Q, N, []). - -get_first_n_from_queue(_Q, 0, Acc) -> - lists:reverse(Acc); -get_first_n_from_queue(Q, N, Acc) when N > 0 -> - case replayq:peek(Q) of - empty -> Acc; - ?Q_ITEM(Query) -> get_first_n_from_queue(Q, N - 1, [Query | Acc]) - end. - -drop_first_n_from_queue(Q, 0, _Id, _Index) -> - Q; -drop_first_n_from_queue(Q, N, Id, Index) when N > 0 -> - drop_first_n_from_queue(drop_head(Q, Id, Index), N - 1, Id, Index). - -drop_head(Q, Id, Index) -> - {NewQ, AckRef, _} = replayq:pop(Q, #{count_limit => 1}), - ok = replayq:ack(NewQ, AckRef), - emqx_resource_metrics:queuing_set(Id, Index, replayq:count(NewQ)), - NewQ. - -%%============================================================================== -%% the inflight queue for async query --define(SIZE_REF, -1). -inflight_new(Name, InfltWinSZ, Id, Index) -> - _ = ets:new(Name, [named_table, ordered_set, public, {write_concurrency, true}]), - inflight_append(Name, ?SIZE_REF, {max_size, InfltWinSZ}, Id, Index), - ok. - -inflight_get_first(Name) -> - case ets:next(Name, ?SIZE_REF) of - '$end_of_table' -> - empty; - Ref -> - case ets:lookup(Name, Ref) of - [Object] -> - Object; - [] -> - %% it might have been dropped - inflight_get_first(Name) - end - end. - -inflight_is_full(undefined) -> - false; -inflight_is_full(Name) -> - [{_, {max_size, MaxSize}}] = ets:lookup(Name, ?SIZE_REF), - Size = inflight_size(Name), - Size >= MaxSize. - -inflight_size(Name) -> - %% Note: we subtract 1 because there's a metadata row that hold - %% the maximum size value. - MetadataRowCount = 1, - case ets:info(Name, size) of - undefined -> 0; - Size -> max(0, Size - MetadataRowCount) - end. - -inflight_append(undefined, _Ref, _Query, _Id, _Index) -> - ok; -inflight_append(Name, Ref, [?QUERY(_, _, _) | _] = Batch, Id, Index) -> - ets:insert(Name, {Ref, [?QUERY(From, Req, true) || ?QUERY(From, Req, _) <- Batch]}), - emqx_resource_metrics:inflight_set(Id, Index, inflight_size(Name)), - ok; -inflight_append(Name, Ref, ?QUERY(From, Req, _), Id, Index) -> - ets:insert(Name, {Ref, ?QUERY(From, Req, true)}), - emqx_resource_metrics:inflight_set(Id, Index, inflight_size(Name)), - ok; -inflight_append(Name, Ref, Data, _Id, _Index) -> - ets:insert(Name, {Ref, Data}), - %% this is a metadata row being inserted; therefore, we don't bump - %% the inflight metric. - ok. - -inflight_drop(undefined, _, _Id, _Index) -> - ok; -inflight_drop(Name, Ref, Id, Index) -> - ets:delete(Name, Ref), - emqx_resource_metrics:inflight_set(Id, Index, inflight_size(Name)), - ok. - -%%============================================================================== - -inc_sent_failed(Id, _HasSent = true) -> - emqx_resource_metrics:retried_failed_inc(Id); -inc_sent_failed(Id, _HasSent) -> - emqx_resource_metrics:failed_inc(Id). - -inc_sent_success(Id, _HasSent = true) -> - emqx_resource_metrics:retried_success_inc(Id); -inc_sent_success(Id, _HasSent) -> - emqx_resource_metrics:success_inc(Id). - -call_mode(sync, _) -> sync; -call_mode(async, always_sync) -> sync; -call_mode(async, async_if_possible) -> async. - -assert_ok_result(ok) -> - true; -assert_ok_result({async_return, R}) -> - assert_ok_result(R); -assert_ok_result(R) when is_tuple(R) -> - try - ok = erlang:element(1, R) - catch - error:{badmatch, _} -> - error({not_ok_result, R}) - end; -assert_ok_result(R) -> - error({not_ok_result, R}). - -queue_count(undefined) -> - 0; -queue_count(Q) -> - replayq:count(Q). - --spec name(id(), integer()) -> atom(). -name(Id, Index) -> - Mod = atom_to_list(?MODULE), - Id1 = binary_to_list(Id), - Index1 = integer_to_list(Index), - list_to_atom(lists:concat([Mod, ":", Id1, ":", Index1])). - -disk_queue_dir(Id, Index) -> - QDir = binary_to_list(Id) ++ ":" ++ integer_to_list(Index), - filename:join([emqx:data_dir(), "resource_worker", node(), QDir]). - -ensure_flush_timer(St = #{tref := undefined, batch_time := T}) -> - Ref = make_ref(), - TRef = erlang:send_after(T, self(), {flush, Ref}), - St#{tref => {TRef, Ref}}; -ensure_flush_timer(St) -> - St. - -cancel_flush_timer(St = #{tref := undefined}) -> - St; -cancel_flush_timer(St = #{tref := {TRef, _Ref}}) -> - _ = erlang:cancel_timer(TRef), - St#{tref => undefined}. - -make_message_ref() -> - erlang:unique_integer([monotonic, positive]). diff --git a/apps/emqx_resource/src/schema/emqx_resource_schema.erl b/apps/emqx_resource/src/schema/emqx_resource_schema.erl index 0e25d01b8..3b4fb66e5 100644 --- a/apps/emqx_resource/src/schema/emqx_resource_schema.erl +++ b/apps/emqx_resource/src/schema/emqx_resource_schema.erl @@ -35,39 +35,66 @@ fields("resource_opts") -> {resource_opts, mk( ref(?MODULE, "creation_opts"), - #{ - required => false, - default => #{}, - desc => ?DESC(<<"resource_opts">>) - } + resource_opts_meta() )} ]; fields("creation_opts") -> [ + {buffer_mode, fun buffer_mode/1}, {worker_pool_size, fun worker_pool_size/1}, {health_check_interval, fun health_check_interval/1}, + {resume_interval, fun resume_interval/1}, + {start_after_created, fun start_after_created/1}, + {start_timeout, fun start_timeout/1}, {auto_restart_interval, fun auto_restart_interval/1}, {query_mode, fun query_mode/1}, - {async_inflight_window, fun async_inflight_window/1}, + {request_timeout, fun request_timeout/1}, + {inflight_window, fun inflight_window/1}, {enable_batch, fun enable_batch/1}, {batch_size, fun batch_size/1}, {batch_time, fun batch_time/1}, {enable_queue, fun enable_queue/1}, - {max_queue_bytes, fun max_queue_bytes/1} + {max_buffer_bytes, fun max_buffer_bytes/1}, + {buffer_seg_bytes, fun buffer_seg_bytes/1} ]. +resource_opts_meta() -> + #{ + required => false, + default => #{}, + desc => ?DESC(<<"resource_opts">>) + }. + worker_pool_size(type) -> non_neg_integer(); worker_pool_size(desc) -> ?DESC("worker_pool_size"); worker_pool_size(default) -> ?WORKER_POOL_SIZE; worker_pool_size(required) -> false; worker_pool_size(_) -> undefined. +resume_interval(type) -> emqx_schema:duration_ms(); +resume_interval(importance) -> ?IMPORTANCE_HIDDEN; +resume_interval(desc) -> ?DESC("resume_interval"); +resume_interval(required) -> false; +resume_interval(_) -> undefined. + health_check_interval(type) -> emqx_schema:duration_ms(); health_check_interval(desc) -> ?DESC("health_check_interval"); health_check_interval(default) -> ?HEALTHCHECK_INTERVAL_RAW; health_check_interval(required) -> false; health_check_interval(_) -> undefined. +start_after_created(type) -> boolean(); +start_after_created(desc) -> ?DESC("start_after_created"); +start_after_created(default) -> ?START_AFTER_CREATED_RAW; +start_after_created(required) -> false; +start_after_created(_) -> undefined. + +start_timeout(type) -> emqx_schema:duration_ms(); +start_timeout(desc) -> ?DESC("start_timeout"); +start_timeout(default) -> ?START_TIMEOUT_RAW; +start_timeout(required) -> false; +start_timeout(_) -> undefined. + auto_restart_interval(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]); auto_restart_interval(desc) -> ?DESC("auto_restart_interval"); auto_restart_interval(default) -> ?AUTO_RESTART_INTERVAL_RAW; @@ -80,23 +107,31 @@ query_mode(default) -> async; query_mode(required) -> false; query_mode(_) -> undefined. +request_timeout(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]); +request_timeout(desc) -> ?DESC("request_timeout"); +request_timeout(default) -> <<"15s">>; +request_timeout(_) -> undefined. + enable_batch(type) -> boolean(); enable_batch(required) -> false; enable_batch(default) -> true; +enable_batch(deprecated) -> {since, "v5.0.14"}; enable_batch(desc) -> ?DESC("enable_batch"); enable_batch(_) -> undefined. enable_queue(type) -> boolean(); enable_queue(required) -> false; enable_queue(default) -> false; +enable_queue(deprecated) -> {since, "v5.0.14"}; enable_queue(desc) -> ?DESC("enable_queue"); enable_queue(_) -> undefined. -async_inflight_window(type) -> pos_integer(); -async_inflight_window(desc) -> ?DESC("async_inflight_window"); -async_inflight_window(default) -> ?DEFAULT_INFLIGHT; -async_inflight_window(required) -> false; -async_inflight_window(_) -> undefined. +inflight_window(type) -> pos_integer(); +inflight_window(aliases) -> [async_inflight_window]; +inflight_window(desc) -> ?DESC("inflight_window"); +inflight_window(default) -> ?DEFAULT_INFLIGHT; +inflight_window(required) -> false; +inflight_window(_) -> undefined. batch_size(type) -> pos_integer(); batch_size(desc) -> ?DESC("batch_size"); @@ -110,11 +145,24 @@ batch_time(default) -> ?DEFAULT_BATCH_TIME_RAW; batch_time(required) -> false; batch_time(_) -> undefined. -max_queue_bytes(type) -> emqx_schema:bytesize(); -max_queue_bytes(desc) -> ?DESC("max_queue_bytes"); -max_queue_bytes(default) -> ?DEFAULT_QUEUE_SIZE_RAW; -max_queue_bytes(required) -> false; -max_queue_bytes(_) -> undefined. +max_buffer_bytes(type) -> emqx_schema:bytesize(); +max_buffer_bytes(aliases) -> [max_queue_bytes]; +max_buffer_bytes(desc) -> ?DESC("max_buffer_bytes"); +max_buffer_bytes(default) -> ?DEFAULT_BUFFER_BYTES_RAW; +max_buffer_bytes(required) -> false; +max_buffer_bytes(_) -> undefined. -desc("creation_opts") -> - ?DESC("creation_opts"). +buffer_mode(type) -> enum([memory_only, volatile_offload]); +buffer_mode(desc) -> ?DESC("buffer_mode"); +buffer_mode(default) -> memory_only; +buffer_mode(required) -> false; +buffer_mode(importance) -> ?IMPORTANCE_HIDDEN; +buffer_mode(_) -> undefined. + +buffer_seg_bytes(type) -> emqx_schema:bytesize(); +buffer_seg_bytes(desc) -> ?DESC("buffer_seg_bytes"); +buffer_seg_bytes(required) -> false; +buffer_seg_bytes(importance) -> ?IMPORTANCE_HIDDEN; +buffer_seg_bytes(_) -> undefined. + +desc("creation_opts") -> ?DESC("creation_opts"). diff --git a/apps/emqx_resource/test/emqx_connector_demo.erl b/apps/emqx_resource/test/emqx_connector_demo.erl index 7af0607cb..96e22c6b6 100644 --- a/apps/emqx_resource/test/emqx_connector_demo.erl +++ b/apps/emqx_resource/test/emqx_connector_demo.erl @@ -17,6 +17,7 @@ -module(emqx_connector_demo). -include_lib("typerefl/include/types.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -behaviour(emqx_resource). @@ -28,6 +29,7 @@ on_query/3, on_query_async/4, on_batch_query/3, + on_batch_query_async/4, on_get_status/2 ]). @@ -36,6 +38,8 @@ %% callbacks for emqx_resource config schema -export([roots/0]). +-define(CM_KEY, {?MODULE, callback_mode}). + roots() -> [ {name, fun name/1}, @@ -51,7 +55,6 @@ register(required) -> true; register(default) -> false; register(_) -> undefined. --define(CM_KEY, {?MODULE, callback_mode}). callback_mode() -> persistent_term:get(?CM_KEY). @@ -59,26 +62,21 @@ set_callback_mode(Mode) -> persistent_term:put(?CM_KEY, Mode). on_start(_InstId, #{create_error := true}) -> + ?tp(connector_demo_start_error, #{}), error("some error"); -on_start(InstId, #{name := Name, stop_error := true} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, Opts#{ - id => InstId, - stop_error => true, - pid => spawn_counter_process(Name, Register) - }}; on_start(InstId, #{name := Name} = Opts) -> Register = maps:get(register, Opts, false), + StopError = maps:get(stop_error, Opts, false), {ok, Opts#{ id => InstId, + stop_error => StopError, pid => spawn_counter_process(Name, Register) }}. on_stop(_InstId, #{stop_error := true}) -> {error, stop_error}; on_stop(_InstId, #{pid := Pid}) -> - erlang:exit(Pid, shutdown), - ok. + stop_counter_process(Pid). on_query(_InstId, get_state, State) -> {ok, State}; @@ -87,16 +85,35 @@ on_query(_InstId, get_state_failed, State) -> on_query(_InstId, block, #{pid := Pid}) -> Pid ! block, ok; +on_query(_InstId, block_now, #{pid := Pid}) -> + Pid ! block, + {error, {resource_error, #{reason => blocked, msg => blocked}}}; on_query(_InstId, resume, #{pid := Pid}) -> Pid ! resume, ok; +on_query(_InstId, {big_payload, Payload}, #{pid := Pid}) -> + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, {big_payload, Payload}}, + receive + {ReqRef, ok} -> + ?tp(connector_demo_big_payload, #{payload => Payload}), + ok; + {ReqRef, incorrect_status} -> + {error, {recoverable_error, incorrect_status}} + after 1000 -> + {error, timeout} + end; on_query(_InstId, {inc_counter, N}, #{pid := Pid}) -> ReqRef = make_ref(), From = {self(), ReqRef}, Pid ! {From, {inc, N}}, receive - {ReqRef, ok} -> ok; - {ReqRef, incorrect_status} -> {error, {recoverable_error, incorrect_status}} + {ReqRef, ok} -> + ?tp(connector_demo_inc_counter, #{n => N}), + ok; + {ReqRef, incorrect_status} -> + {error, {recoverable_error, incorrect_status}} after 1000 -> {error, timeout} end; @@ -117,28 +134,83 @@ on_query(_InstId, get_counter, #{pid := Pid}) -> {ReqRef, Num} -> {ok, Num} after 1000 -> {error, timeout} - end. + end; +on_query(_InstId, {sleep_before_reply, For}, #{pid := Pid}) -> + ?tp(connector_demo_sleep, #{mode => sync, for => For}), + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, {sleep_before_reply, For}}, + receive + {ReqRef, Result} -> + Result + after 1000 -> + {error, timeout} + end; +on_query(_InstId, {sync_sleep_before_reply, SleepFor}, _State) -> + %% This simulates a slow sync call + timer:sleep(SleepFor), + {ok, slept}. +on_query_async(_InstId, block, ReplyFun, #{pid := Pid}) -> + Pid ! {block, ReplyFun}, + {ok, Pid}; +on_query_async(_InstId, resume, ReplyFun, #{pid := Pid}) -> + Pid ! {resume, ReplyFun}, + {ok, Pid}; on_query_async(_InstId, {inc_counter, N}, ReplyFun, #{pid := Pid}) -> Pid ! {inc, N, ReplyFun}, - ok; + {ok, Pid}; on_query_async(_InstId, get_counter, ReplyFun, #{pid := Pid}) -> Pid ! {get, ReplyFun}, - ok. + {ok, Pid}; +on_query_async(_InstId, block_now, ReplyFun, #{pid := Pid}) -> + Pid ! {block_now, ReplyFun}, + {ok, Pid}; +on_query_async(_InstId, {big_payload, Payload}, ReplyFun, #{pid := Pid}) -> + Pid ! {big_payload, Payload, ReplyFun}, + {ok, Pid}; +on_query_async(_InstId, {sleep_before_reply, For}, ReplyFun, #{pid := Pid}) -> + ?tp(connector_demo_sleep, #{mode => async, for => For}), + Pid ! {{sleep_before_reply, For}, ReplyFun}, + {ok, Pid}. on_batch_query(InstId, BatchReq, State) -> - %% Requests can be either 'get_counter' or 'inc_counter', but cannot be mixed. + %% Requests can be either 'get_counter' or 'inc_counter', but + %% cannot be mixed. case hd(BatchReq) of {inc_counter, _} -> - batch_inc_counter(InstId, BatchReq, State); + batch_inc_counter(sync, InstId, BatchReq, State); get_counter -> - batch_get_counter(InstId, State) + batch_get_counter(sync, InstId, State); + {big_payload, _Payload} -> + batch_big_payload(sync, InstId, BatchReq, State); + {random_reply, Num} -> + %% async batch retried + make_random_reply(Num) end. -batch_inc_counter(InstId, BatchReq, State) -> +on_batch_query_async(InstId, BatchReq, ReplyFunAndArgs, #{pid := Pid} = State) -> + %% Requests can be of multiple types, but cannot be mixed. + case hd(BatchReq) of + {inc_counter, _} -> + batch_inc_counter({async, ReplyFunAndArgs}, InstId, BatchReq, State); + get_counter -> + batch_get_counter({async, ReplyFunAndArgs}, InstId, State); + block_now -> + on_query_async(InstId, block_now, ReplyFunAndArgs, State); + {big_payload, _Payload} -> + batch_big_payload({async, ReplyFunAndArgs}, InstId, BatchReq, State); + {random_reply, Num} -> + %% only take the first Num in the batch should be random enough + Pid ! {{random_reply, Num}, ReplyFunAndArgs}, + {ok, Pid} + end. + +batch_inc_counter(CallMode, InstId, BatchReq, State) -> TotalN = lists:foldl( fun ({inc_counter, N}, Total) -> + ?tp(connector_demo_batch_inc_individual, #{n => N}), Total + N; (Req, _Total) -> error({mixed_requests_not_allowed, {inc_counter, Req}}) @@ -146,12 +218,33 @@ batch_inc_counter(InstId, BatchReq, State) -> 0, BatchReq ), - on_query(InstId, {inc_counter, TotalN}, State). + case CallMode of + sync -> + on_query(InstId, {inc_counter, TotalN}, State); + {async, ReplyFunAndArgs} -> + on_query_async(InstId, {inc_counter, TotalN}, ReplyFunAndArgs, State) + end. -batch_get_counter(InstId, State) -> - on_query(InstId, get_counter, State). +batch_get_counter(sync, InstId, State) -> + on_query(InstId, get_counter, State); +batch_get_counter({async, ReplyFunAndArgs}, InstId, State) -> + on_query_async(InstId, get_counter, ReplyFunAndArgs, State). + +batch_big_payload(sync, InstId, Batch, State) -> + [Res | _] = lists:map( + fun(Req = {big_payload, _}) -> on_query(InstId, Req, State) end, + Batch + ), + Res; +batch_big_payload({async, ReplyFunAndArgs}, InstId, Batch, State = #{pid := Pid}) -> + lists:foreach( + fun(Req = {big_payload, _}) -> on_query_async(InstId, Req, ReplyFunAndArgs, State) end, + Batch + ), + {ok, Pid}. on_get_status(_InstId, #{health_check_error := true}) -> + ?tp(connector_demo_health_check_error, #{}), disconnected; on_get_status(_InstId, #{pid := Pid}) -> timer:sleep(300), @@ -165,8 +258,21 @@ spawn_counter_process(Name, Register) -> true = maybe_register(Name, Pid, Register), Pid. +stop_counter_process(Pid) -> + true = erlang:is_process_alive(Pid), + true = erlang:exit(Pid, shutdown), + receive + {'EXIT', Pid, shutdown} -> ok + after 5000 -> + {error, timeout} + end. + counter_loop() -> - counter_loop(#{counter => 0, status => running, incorrect_status_count => 0}). + counter_loop(#{ + counter => 0, + status => running, + incorrect_status_count => 0 + }). counter_loop( #{ @@ -180,14 +286,33 @@ counter_loop( block -> ct:pal("counter recv: ~p", [block]), State#{status => blocked}; + {block, ReplyFun} -> + ct:pal("counter recv: ~p", [block]), + apply_reply(ReplyFun, ok), + State#{status => blocked}; + {block_now, ReplyFun} -> + ct:pal("counter recv: ~p", [block_now]), + apply_reply( + ReplyFun, {error, {resource_error, #{reason => blocked, msg => blocked}}} + ), + State#{status => blocked}; resume -> {messages, Msgs} = erlang:process_info(self(), messages), ct:pal("counter recv: ~p, buffered msgs: ~p", [resume, length(Msgs)]), State#{status => running}; + {resume, ReplyFun} -> + {messages, Msgs} = erlang:process_info(self(), messages), + ct:pal("counter recv: ~p, buffered msgs: ~p", [resume, length(Msgs)]), + apply_reply(ReplyFun, ok), + State#{status => running}; {inc, N, ReplyFun} when Status == running -> %ct:pal("async counter recv: ~p", [{inc, N}]), apply_reply(ReplyFun, ok), + ?tp(connector_demo_inc_counter_async, #{n => N}), State#{counter => Num + N}; + {big_payload, _Payload, ReplyFun} when Status == blocked -> + apply_reply(ReplyFun, {error, {recoverable_error, blocked}}), + State; {{FromPid, ReqRef}, {inc, N}} when Status == running -> %ct:pal("sync counter recv: ~p", [{inc, N}]), FromPid ! {ReqRef, ok}, @@ -195,6 +320,12 @@ counter_loop( {{FromPid, ReqRef}, {inc, _N}} when Status == blocked -> FromPid ! {ReqRef, incorrect_status}, State#{incorrect_status_count := IncorrectCount + 1}; + {{FromPid, ReqRef}, {big_payload, _Payload}} when Status == blocked -> + FromPid ! {ReqRef, incorrect_status}, + State#{incorrect_status_count := IncorrectCount + 1}; + {{FromPid, ReqRef}, {big_payload, _Payload}} when Status == running -> + FromPid ! {ReqRef, ok}, + State; {get, ReplyFun} -> apply_reply(ReplyFun, Num), State; @@ -203,10 +334,45 @@ counter_loop( State; {{FromPid, ReqRef}, get} -> FromPid ! {ReqRef, Num}, + State; + {{random_reply, RandNum}, ReplyFun} -> + %% usually a behaving connector should reply once and only once for + %% each (batch) request + %% but we try to reply random results a random number of times + %% with 'ok' in the result, the buffer worker should eventually + %% drain the buffer (and inflights table) + ReplyCount = 1 + (RandNum rem 3), + Results = make_random_replies(ReplyCount), + %% add a delay to trigger inflight full + lists:foreach( + fun(Result) -> + timer:sleep(rand:uniform(5)), + apply_reply(ReplyFun, Result) + end, + Results + ), + State; + {{sleep_before_reply, _} = SleepQ, ReplyFun} -> + apply_reply(ReplyFun, handle_query(async, SleepQ, Status)), + State; + {{FromPid, ReqRef}, {sleep_before_reply, _} = SleepQ} -> + FromPid ! {ReqRef, handle_query(sync, SleepQ, Status)}, State end, counter_loop(NewState). +handle_query(Mode, {sleep_before_reply, For} = Query, Status) -> + ok = timer:sleep(For), + Result = + case Status of + running -> ok; + blocked -> {error, {recoverable_error, blocked}} + end, + ?tp(connector_demo_sleep_handled, #{ + mode => Mode, query => Query, slept => For, result => Result + }), + Result. + maybe_register(Name, Pid, true) -> ct:pal("---- Register Name: ~p", [Name]), ct:pal("---- whereis(): ~p", [whereis(Name)]), @@ -216,3 +382,18 @@ maybe_register(_Name, _Pid, false) -> apply_reply({ReplyFun, Args}, Result) when is_function(ReplyFun) -> apply(ReplyFun, Args ++ [Result]). + +make_random_replies(0) -> + []; +make_random_replies(N) -> + [make_random_reply(N) | make_random_replies(N - 1)]. + +make_random_reply(N) -> + case rand:uniform(3) of + 1 -> + {ok, N}; + 2 -> + {error, {recoverable_error, N}}; + 3 -> + {error, {unrecoverable_error, N}} + end. diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index 8a6b179d5..809f101a8 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -19,17 +19,18 @@ -compile(export_all). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). --include("emqx_resource.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -define(TEST_RESOURCE, emqx_connector_demo). -define(ID, <<"id">>). +-define(ID1, <<"id1">>). -define(DEFAULT_RESOURCE_GROUP, <<"default">>). -define(RESOURCE_ERROR(REASON), {error, {resource_error, #{reason := REASON}}}). -define(TRACE_OPTS, #{timetrap => 10000, timeout => 1000}). +-import(emqx_common_test_helpers, [on_exit/1]). + all() -> emqx_common_test_helpers:all(?MODULE). @@ -37,11 +38,15 @@ groups() -> []. init_per_testcase(_, Config) -> + ct:timetrap({seconds, 30}), emqx_connector_demo:set_callback_mode(always_sync), Config. end_per_testcase(_, _Config) -> - _ = emqx_resource:remove(?ID). + snabbkaffe:stop(), + _ = emqx_resource:remove(?ID), + emqx_common_test_helpers:call_janitor(), + ok. init_per_suite(Config) -> code:ensure_loaded(?TEST_RESOURCE), @@ -67,111 +72,156 @@ t_check_config(_) -> {error, _} = emqx_resource:check_config(?TEST_RESOURCE, #{invalid => config}). t_create_remove(_) -> - {error, _} = emqx_resource:check_and_create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{unknown => test_resource} - ), + ?check_trace( + begin + ?assertMatch( + {error, _}, + emqx_resource:check_and_create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{unknown => test_resource} + ) + ), - {ok, _} = emqx_resource:create( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{name => test_resource} - ), + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource} + ) + ), - {ok, _} = emqx_resource:recreate( - ?ID, - ?TEST_RESOURCE, - #{name => test_resource}, - #{} - ), - {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), + ?assertMatch( + {ok, _}, + emqx_resource:recreate( + ?ID, + ?TEST_RESOURCE, + #{name => test_resource}, + #{} + ) + ), - ?assert(is_process_alive(Pid)), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), - ok = emqx_resource:remove(?ID), - {error, _} = emqx_resource:remove(?ID), + ?assert(is_process_alive(Pid)), - ?assertNot(is_process_alive(Pid)). + ?assertEqual(ok, emqx_resource:remove(?ID)), + ?assertMatch({error, _}, emqx_resource:remove(?ID)), + + ?assertNot(is_process_alive(Pid)) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_create_remove_local(_) -> - {error, _} = emqx_resource:check_and_create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{unknown => test_resource} - ), + ?check_trace( + begin + ?assertMatch( + {error, _}, + emqx_resource:check_and_create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{unknown => test_resource} + ) + ), - {ok, _} = emqx_resource:create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{name => test_resource} - ), + ?assertMatch( + {ok, _}, + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource} + ) + ), - emqx_resource:recreate_local( - ?ID, - ?TEST_RESOURCE, - #{name => test_resource}, - #{} - ), - {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), + emqx_resource:recreate_local( + ?ID, + ?TEST_RESOURCE, + #{name => test_resource}, + #{} + ), - ?assert(is_process_alive(Pid)), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), - emqx_resource:set_resource_status_connecting(?ID), + ?assert(is_process_alive(Pid)), - emqx_resource:recreate_local( - ?ID, - ?TEST_RESOURCE, - #{name => test_resource}, - #{} - ), + emqx_resource:set_resource_status_connecting(?ID), - ok = emqx_resource:remove_local(?ID), - {error, _} = emqx_resource:remove_local(?ID), + emqx_resource:recreate_local( + ?ID, + ?TEST_RESOURCE, + #{name => test_resource}, + #{} + ), - ?assertMatch( - ?RESOURCE_ERROR(not_found), - emqx_resource:query(?ID, get_state) - ), - ?assertNot(is_process_alive(Pid)). + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + ?assertMatch({error, _}, emqx_resource:remove_local(?ID)), + + ?assertMatch( + ?RESOURCE_ERROR(not_found), + emqx_resource:query(?ID, get_state) + ), + + ?assertNot(is_process_alive(Pid)) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_do_not_start_after_created(_) -> - {ok, _} = emqx_resource:create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{name => test_resource}, - #{start_after_created => false} - ), - %% the resource should remain `disconnected` after created - timer:sleep(200), - ?assertMatch( - ?RESOURCE_ERROR(stopped), - emqx_resource:query(?ID, get_state) - ), - ?assertMatch( - {ok, _, #{status := stopped}}, - emqx_resource:get_instance(?ID) - ), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{start_after_created => false} + ) + ), + %% the resource should remain `disconnected` after created + timer:sleep(200), + ?assertMatch( + ?RESOURCE_ERROR(stopped), + emqx_resource:query(?ID, get_state) + ), + ?assertMatch( + {ok, _, #{status := stopped}}, + emqx_resource:get_instance(?ID) + ), - %% start the resource manually.. - ok = emqx_resource:start(?ID), - {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid)), + %% start the resource manually.. + ?assertEqual(ok, emqx_resource:start(?ID)), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), + ?assert(is_process_alive(Pid)), - %% restart the resource - ok = emqx_resource:restart(?ID), - ?assertNot(is_process_alive(Pid)), - {ok, #{pid := Pid2}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid2)), + %% restart the resource + ?assertEqual(ok, emqx_resource:restart(?ID)), + ?assertNot(is_process_alive(Pid)), + {ok, #{pid := Pid2}} = emqx_resource:query(?ID, get_state), + ?assert(is_process_alive(Pid2)), - ok = emqx_resource:remove_local(?ID), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), - ?assertNot(is_process_alive(Pid2)). + ?assertNot(is_process_alive(Pid2)) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_query(_) -> {ok, _} = emqx_resource:create_local( @@ -207,12 +257,17 @@ t_query_counter(_) -> ok = emqx_resource:remove_local(?ID). t_batch_query_counter(_) -> + BatchSize = 100, {ok, _} = emqx_resource:create_local( ?ID, ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, #{name => test_resource, register => true}, - #{enable_batch => true, query_mode => sync} + #{ + batch_size => BatchSize, + batch_time => 100, + query_mode => sync + } ), ?check_trace( @@ -221,19 +276,37 @@ t_batch_query_counter(_) -> fun(Result, Trace) -> ?assertMatch({ok, 0}, Result), QueryTrace = ?of_kind(call_batch_query, Trace), - ?assertMatch([#{batch := [{query, _, get_counter, _}]}], QueryTrace) + ?assertMatch([#{batch := [{query, _, get_counter, _, _}]}], QueryTrace) end ), + NMsgs = 1_000, ?check_trace( ?TRACE_OPTS, - inc_counter_in_parallel(1000), + begin + NEvents = round(math:ceil(NMsgs / BatchSize)), + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter}), + NEvents, + _Timeout = 10_000 + ), + inc_counter_in_parallel(NMsgs), + {ok, _} = snabbkaffe:receive_events(SRef), + ok + end, fun(Trace) -> - QueryTrace = ?of_kind(call_batch_query, Trace), - ?assertMatch([#{batch := BatchReq} | _] when length(BatchReq) > 1, QueryTrace) + QueryTrace = [ + Event + || Event = #{ + ?snk_kind := call_batch_query, + batch := BatchReq + } <- Trace, + length(BatchReq) > 1 + ], + ?assertMatch([_ | _], QueryTrace) end ), - {ok, 1000} = emqx_resource:query(?ID, get_counter), + {ok, NMsgs} = emqx_resource:query(?ID, get_counter), ok = emqx_resource:remove_local(?ID). @@ -243,20 +316,28 @@ t_query_counter_async_query(_) -> ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, #{name => test_resource, register => true}, - #{query_mode => async, enable_batch => false} + #{query_mode => async, batch_size => 1} ), ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + NMsgs = 1_000, ?check_trace( ?TRACE_OPTS, - inc_counter_in_parallel(1000), + begin + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter}), + NMsgs, + _Timeout = 60_000 + ), + inc_counter_in_parallel(NMsgs), + {ok, _} = snabbkaffe:receive_events(SRef), + ok + end, fun(Trace) -> - %% the callback_mode if 'emqx_connector_demo' is 'always_sync'. + %% the callback_mode of 'emqx_connector_demo' is 'always_sync'. QueryTrace = ?of_kind(call_query, Trace), - ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _, _}} | _], QueryTrace) end ), - %% wait for 1s to make sure all the aysnc query is sent to the resource. - timer:sleep(1000), %% simple query ignores the query_mode and batching settings in the resource_worker ?check_trace( ?TRACE_OPTS, @@ -265,10 +346,10 @@ t_query_counter_async_query(_) -> ?assertMatch({ok, 1000}, Result), %% the callback_mode if 'emqx_connector_demo' is 'always_sync'. QueryTrace = ?of_kind(call_query, Trace), - ?assertMatch([#{query := {query, _, get_counter, _}}], QueryTrace) + ?assertMatch([#{query := {query, _, get_counter, _, _}}], QueryTrace) end ), - {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + #{counters := C} = emqx_resource:get_metrics(?ID), ?assertMatch(#{matched := 1002, 'success' := 1002, 'failed' := 0}, C), ok = emqx_resource:remove_local(?ID). @@ -285,20 +366,32 @@ t_query_counter_async_callback(_) -> ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, #{name => test_resource, register => true}, - #{query_mode => async, enable_batch => false, async_inflight_window => 1000000} + #{ + query_mode => async, + batch_size => 1, + inflight_window => 1000000 + } ), ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + NMsgs = 1_000, ?check_trace( ?TRACE_OPTS, - inc_counter_in_parallel(1000, ReqOpts), + begin + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + NMsgs, + _Timeout = 60_000 + ), + inc_counter_in_parallel(NMsgs, ReqOpts), + {ok, _} = snabbkaffe:receive_events(SRef), + ok + end, fun(Trace) -> QueryTrace = ?of_kind(call_query_async, Trace), - ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _, _}} | _], QueryTrace) end ), - %% wait for 1s to make sure all the aysnc query is sent to the resource. - timer:sleep(1000), %% simple query ignores the query_mode and batching settings in the resource_worker ?check_trace( ?TRACE_OPTS, @@ -306,10 +399,10 @@ t_query_counter_async_callback(_) -> fun(Result, Trace) -> ?assertMatch({ok, 1000}, Result), QueryTrace = ?of_kind(call_query, Trace), - ?assertMatch([#{query := {query, _, get_counter, _}}], QueryTrace) + ?assertMatch([#{query := {query, _, get_counter, _, _}}], QueryTrace) end ), - {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + #{counters := C} = emqx_resource:get_metrics(?ID), ?assertMatch(#{matched := 1002, 'success' := 1002, 'failed' := 0}, C), ?assertMatch(1000, ets:info(Tab0, size)), ?assert( @@ -325,12 +418,29 @@ t_query_counter_async_callback(_) -> t_query_counter_async_inflight(_) -> emqx_connector_demo:set_callback_mode(async_if_possible), + MetricsTab = ets:new(metrics_tab, [ordered_set, public]), + ok = telemetry:attach_many( + ?FUNCTION_NAME, + emqx_resource_metrics:events(), + fun(Event, Measurements, Meta, _Config) -> + ets:insert( + MetricsTab, + {erlang:monotonic_time(), #{ + event => Event, measurements => Measurements, metadata => Meta + }} + ), + ok + end, + unused_config + ), + on_exit(fun() -> telemetry:detach(?FUNCTION_NAME) end), Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), - Insert0 = fun(Tab, Result) -> - ets:insert(Tab, {make_ref(), Result}) + Insert0 = fun(Tab, Ref, Result) -> + ct:pal("inserting ~p", [{Ref, Result}]), + ets:insert(Tab, {Ref, Result}) end, - ReqOpts = #{async_reply_fun => {Insert0, [Tab0]}}, + ReqOpts = fun() -> #{async_reply_fun => {Insert0, [Tab0, make_ref()]}} end, WindowSize = 15, {ok, _} = emqx_resource:create_local( ?ID, @@ -339,11 +449,10 @@ t_query_counter_async_inflight(_) -> #{name => test_resource, register => true}, #{ query_mode => async, - enable_batch => false, - async_inflight_window => WindowSize, + batch_size => 1, + inflight_window => WindowSize, worker_pool_size => 1, - resume_interval => 300, - enable_queue => false + resume_interval => 300 } ), ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), @@ -353,85 +462,353 @@ t_query_counter_async_inflight(_) -> %% send async query to make the inflight window full ?check_trace( - ?TRACE_OPTS, - inc_counter_in_parallel(WindowSize, ReqOpts), + {_, {ok, _}} = + ?wait_async_action( + %% one more so that inflight would be already full upon last query + inc_counter_in_parallel(WindowSize + 1, ReqOpts), + #{?snk_kind := buffer_worker_flush_but_inflight_full}, + 1_000 + ), fun(Trace) -> QueryTrace = ?of_kind(call_query_async, Trace), - ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _, _}} | _], QueryTrace) end ), - - %% this will block the resource_worker as the inflight window is full now - ok = emqx_resource:query(?ID, {inc_counter, 1}), + tap_metrics(?LINE), ?assertMatch(0, ets:info(Tab0, size)), - %% sleep to make the resource_worker resume some times - timer:sleep(2000), + tap_metrics(?LINE), %% send query now will fail because the resource is blocked. Insert = fun(Tab, Ref, Result) -> - ets:insert(Tab, {Ref, Result}) + ct:pal("inserting ~p", [{Ref, Result}]), + ets:insert(Tab, {Ref, Result}), + ?tp(tmp_query_inserted, #{}) end, - ok = emqx_resource:query(?ID, {inc_counter, 1}, #{ - async_reply_fun => {Insert, [Tab0, tmp_query]} - }), - timer:sleep(100), - ?assertMatch([{_, {error, {resource_error, #{reason := blocked}}}}], ets:take(Tab0, tmp_query)), + %% since this counts as a failure, it'll be enqueued and retried + %% later, when the resource is unblocked. + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:query(?ID, {inc_counter, 99}, #{ + async_reply_fun => {Insert, [Tab0, tmp_query]} + }), + #{?snk_kind := buffer_worker_appended_to_queue}, + 1_000 + ), + tap_metrics(?LINE), - %% all response should be received after the resource is resumed. + %% all responses should be received after the resource is resumed. + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + %% +2 because the tmp_query above will be retried and succeed + %% this time. + WindowSize + 2, + _Timeout0 = 10_000 + ), ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), - timer:sleep(1000), - ?assertEqual(WindowSize, ets:info(Tab0, size)), + tap_metrics(?LINE), + {ok, _} = snabbkaffe:receive_events(SRef0), + tap_metrics(?LINE), + %% since the previous tmp_query was enqueued to be retried, we + %% take it again from the table; this time, it should have + %% succeeded. + ?assertMatch([{tmp_query, ok}], ets:take(Tab0, tmp_query)), %% send async query, this time everything should be ok. Num = 10, ?check_trace( - ?TRACE_OPTS, - inc_counter_in_parallel(Num, ReqOpts), + begin + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + Num, + _Timeout0 = 10_000 + ), + inc_counter_in_parallel_increasing(Num, 1, ReqOpts), + {ok, _} = snabbkaffe:receive_events(SRef), + ok + end, fun(Trace) -> QueryTrace = ?of_kind(call_query_async, Trace), - ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + ?assertMatch([#{query := {query, _, {inc_counter, _}, _, _}} | _], QueryTrace), + ?assertEqual(WindowSize + Num + 1, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}), + tap_metrics(?LINE), + ok end ), - timer:sleep(1000), - ?assertEqual(WindowSize + Num, ets:info(Tab0, size)), %% block the resource ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), %% again, send async query to make the inflight window full ?check_trace( - ?TRACE_OPTS, - inc_counter_in_parallel(WindowSize, ReqOpts), + {_, {ok, _}} = + ?wait_async_action( + %% one more so that inflight would be already full upon last query + inc_counter_in_parallel(WindowSize + 1, ReqOpts), + #{?snk_kind := buffer_worker_flush_but_inflight_full}, + 1_000 + ), fun(Trace) -> QueryTrace = ?of_kind(call_query_async, Trace), - ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _, _}} | _], QueryTrace) end ), %% this will block the resource_worker - ok = emqx_resource:query(?ID, {inc_counter, 1}), + ok = emqx_resource:query(?ID, {inc_counter, 4}), - Sent = WindowSize + Num + WindowSize, + Sent = WindowSize + 1 + Num + WindowSize + 1, + {ok, SRef1} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + WindowSize + 1, + _Timeout0 = 10_000 + ), ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), - timer:sleep(1000), - ?assertEqual(Sent, ets:info(Tab0, size)), + {ok, _} = snabbkaffe:receive_events(SRef1), + ?assertEqual(Sent, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}), + tap_metrics(?LINE), {ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter), ct:pal("get_counter: ~p, sent: ~p", [Counter, Sent]), ?assert(Sent =< Counter), - {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), - ct:pal("metrics: ~p", [C]), - {ok, IncorrectStatusCount} = emqx_resource:simple_sync_query(?ID, get_incorrect_status_count), - %% The `simple_sync_query' we just did also increases the matched - %% count, hence the + 1. - ExtraSimpleCallCount = IncorrectStatusCount + 1, + %% give the metrics some time to stabilize. + ct:sleep(1000), + #{counters := C, gauges := G} = tap_metrics(?LINE), ?assertMatch( - #{matched := M, success := Ss, dropped := Dp, 'retried.success' := Rs} when - M == Ss + Dp - Rs + ExtraSimpleCallCount, - C, #{ - metrics => C, - extra_simple_call_count => ExtraSimpleCallCount + counters := + #{matched := M, success := Ss, dropped := Dp}, + gauges := #{queuing := Qing, inflight := Infl} + } when + M == Ss + Dp + Qing + Infl, + #{counters => C, gauges => G}, + #{ + metrics => #{counters => C, gauges => G}, + results => ets:tab2list(Tab0), + metrics_trace => ets:tab2list(MetricsTab) + } + ), + ?assert( + lists:all( + fun + ({_, ok}) -> true; + (_) -> false + end, + ets:tab2list(Tab0) + ) + ), + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_inflight_batch(_) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + MetricsTab = ets:new(metrics_tab, [ordered_set, public]), + ok = telemetry:attach_many( + ?FUNCTION_NAME, + emqx_resource_metrics:events(), + fun(Event, Measurements, Meta, _Config) -> + ets:insert( + MetricsTab, + {erlang:monotonic_time(), #{ + event => Event, measurements => Measurements, metadata => Meta + }} + ), + ok + end, + unused_config + ), + on_exit(fun() -> telemetry:detach(?FUNCTION_NAME) end), + + Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), + Insert0 = fun(Tab, Ref, Result) -> + ct:pal("inserting ~p", [{Ref, Result}]), + ets:insert(Tab, {Ref, Result}) + end, + ReqOpts = fun() -> #{async_reply_fun => {Insert0, [Tab0, make_ref()]}} end, + BatchSize = 2, + WindowSize = 15, + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{ + query_mode => async, + batch_size => BatchSize, + batch_time => 100, + inflight_window => WindowSize, + worker_pool_size => 1, + resume_interval => 300 + } + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + + %% block the resource + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + + %% send async query to make the inflight window full + NumMsgs = BatchSize * WindowSize, + ?check_trace( + {_, {ok, _}} = + ?wait_async_action( + %% a batch more so that inflight would be already full upon last query + inc_counter_in_parallel(NumMsgs + BatchSize, ReqOpts), + #{?snk_kind := buffer_worker_flush_but_inflight_full}, + 5_000 + ), + fun(Trace) -> + QueryTrace = [ + Event + || Event = #{ + ?snk_kind := call_batch_query_async, + batch := [ + {query, _, {inc_counter, 1}, _, _}, + {query, _, {inc_counter, 1}, _, _} + ] + } <- + Trace + ], + ?assertMatch([_ | _], QueryTrace) + end + ), + tap_metrics(?LINE), + + Sent1 = NumMsgs + BatchSize, + + ?check_trace( + begin + %% this will block the resource_worker as the inflight window is full now + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:query(?ID, {inc_counter, 2}, ReqOpts()), + #{?snk_kind := buffer_worker_flush_but_inflight_full}, + 5_000 + ), + ?assertMatch(0, ets:info(Tab0, size)), + ok + end, + [] + ), + + Sent2 = Sent1 + 1, + + tap_metrics(?LINE), + %% send query now will fail because the resource is blocked. + Insert = fun(Tab, Ref, Result) -> + ct:pal("inserting ~p", [{Ref, Result}]), + ets:insert(Tab, {Ref, Result}), + ?tp(tmp_query_inserted, #{}) + end, + %% since this counts as a failure, it'll be enqueued and retried + %% later, when the resource is unblocked. + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:query(?ID, {inc_counter, 3}, #{ + async_reply_fun => {Insert, [Tab0, tmp_query]} + }), + #{?snk_kind := buffer_worker_appended_to_queue}, + 1_000 + ), + tap_metrics(?LINE), + + %% all responses should be received after the resource is resumed. + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + %% +2 because the tmp_query above will be retried and succeed + %% this time. + WindowSize + 2, + 5_000 + ), + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), + tap_metrics(?LINE), + {ok, _} = snabbkaffe:receive_events(SRef0), + %% since the previous tmp_query was enqueued to be retried, we + %% take it again from the table; this time, it should have + %% succeeded. + ?assertEqual([{tmp_query, ok}], ets:take(Tab0, tmp_query)), + ?assertEqual(Sent2, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}), + tap_metrics(?LINE), + + %% send async query, this time everything should be ok. + NumBatches1 = 3, + NumMsgs1 = BatchSize * NumBatches1, + ?check_trace( + ?TRACE_OPTS, + begin + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + NumBatches1, + 5_000 + ), + inc_counter_in_parallel(NumMsgs1, ReqOpts), + {ok, _} = snabbkaffe:receive_events(SRef), + ok + end, + fun(Trace) -> + QueryTrace = ?of_kind(call_batch_query_async, Trace), + ?assertMatch( + [#{batch := [{query, _, {inc_counter, _}, _, _} | _]} | _], + QueryTrace + ) + end + ), + + Sent3 = Sent2 + NumMsgs1, + + ?assertEqual(Sent3, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}), + tap_metrics(?LINE), + + %% block the resource + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + %% again, send async query to make the inflight window full + ?check_trace( + {_, {ok, _}} = + ?wait_async_action( + %% a batch more so that inflight would be already full upon last query + inc_counter_in_parallel(NumMsgs + BatchSize, ReqOpts), + #{?snk_kind := buffer_worker_flush_but_inflight_full}, + 5_000 + ), + fun(Trace) -> + QueryTrace = ?of_kind(call_batch_query_async, Trace), + ?assertMatch( + [#{batch := [{query, _, {inc_counter, _}, _, _} | _]} | _], + QueryTrace + ) + end + ), + + Sent4 = Sent3 + NumMsgs + BatchSize, + + %% this will block the resource_worker + ok = emqx_resource:query(?ID, {inc_counter, 1}), + + {ok, SRef1} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := connector_demo_inc_counter_async}), + WindowSize + 1, + 5_000 + ), + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), + {ok, _} = snabbkaffe:receive_events(SRef1), + ?assertEqual(Sent4, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}), + + {ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter), + ct:pal("get_counter: ~p, sent: ~p", [Counter, Sent4]), + ?assert(Sent4 =< Counter), + + %% give the metrics some time to stabilize. + ct:sleep(1000), + #{counters := C, gauges := G} = tap_metrics(?LINE), + ?assertMatch( + #{ + counters := + #{matched := M, success := Ss, dropped := Dp}, + gauges := #{queuing := Qing, inflight := Infl} + } when + M == Ss + Dp + Qing + Infl, + #{counters => C, gauges => G}, + #{ + metrics => #{counters => C, gauges => G}, + results => ets:tab2list(Tab0), + metrics_trace => ets:tab2list(MetricsTab) } ), ?assert( @@ -446,149 +823,210 @@ t_query_counter_async_inflight(_) -> ok = emqx_resource:remove_local(?ID). t_healthy_timeout(_) -> - {ok, _} = emqx_resource:create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{name => <<"bad_not_atom_name">>, register => true}, - %% the ?TEST_RESOURCE always returns the `Mod:on_get_status/2` 300ms later. - #{health_check_interval => 200} - ), - ?assertMatch( - ?RESOURCE_ERROR(not_connected), - emqx_resource:query(?ID, get_state) - ), - ok = emqx_resource:remove_local(?ID). + ?check_trace( + begin + ?assertMatch( + {ok, _}, + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => <<"bad_not_atom_name">>, register => true}, + %% the ?TEST_RESOURCE always returns the `Mod:on_get_status/2` 300ms later. + #{health_check_interval => 200} + ) + ), + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query(?ID, get_state, #{timeout => 1_000}) + ), + ?assertMatch( + {ok, _Group, #{status := disconnected}}, emqx_resource_manager:lookup(?ID) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_healthy(_) -> - {ok, _} = emqx_resource:create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{name => test_resource} - ), - {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), - timer:sleep(300), - emqx_resource:set_resource_status_connecting(?ID), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource} + ) + ), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), + timer:sleep(300), + emqx_resource:set_resource_status_connecting(?ID), - {ok, connected} = emqx_resource:health_check(?ID), - ?assertMatch( - [#{status := connected}], - emqx_resource:list_instances_verbose() - ), + ?assertEqual({ok, connected}, emqx_resource:health_check(?ID)), + ?assertMatch( + [#{status := connected}], + emqx_resource:list_instances_verbose() + ), - erlang:exit(Pid, shutdown), + erlang:exit(Pid, shutdown), - ?assertEqual({ok, disconnected}, emqx_resource:health_check(?ID)), + ?assertEqual({ok, disconnected}, emqx_resource:health_check(?ID)), - ?assertMatch( - [#{status := disconnected}], - emqx_resource:list_instances_verbose() - ), + ?assertMatch( + [#{status := disconnected}], + emqx_resource:list_instances_verbose() + ), - ok = emqx_resource:remove_local(?ID). + ?assertEqual(ok, emqx_resource:remove_local(?ID)) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_stop_start(_) -> - {error, _} = emqx_resource:check_and_create( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{unknown => test_resource} - ), + ?check_trace( + begin + ?assertMatch( + {error, _}, + emqx_resource:check_and_create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{unknown => test_resource} + ) + ), - {ok, _} = emqx_resource:check_and_create( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{<<"name">> => <<"test_resource">>} - ), + ?assertMatch( + {ok, _}, + emqx_resource:check_and_create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{<<"name">> => <<"test_resource">>} + ) + ), - %% add some metrics to test their persistence - WorkerID0 = <<"worker:0">>, - WorkerID1 = <<"worker:1">>, - emqx_resource_metrics:batching_set(?ID, WorkerID0, 2), - emqx_resource_metrics:batching_set(?ID, WorkerID1, 3), - ?assertEqual(5, emqx_resource_metrics:batching_get(?ID)), + %% add some metrics to test their persistence + WorkerID0 = <<"worker:0">>, + WorkerID1 = <<"worker:1">>, + emqx_resource_metrics:inflight_set(?ID, WorkerID0, 2), + emqx_resource_metrics:inflight_set(?ID, WorkerID1, 3), + ?assertEqual(5, emqx_resource_metrics:inflight_get(?ID)), - {ok, _} = emqx_resource:check_and_recreate( - ?ID, - ?TEST_RESOURCE, - #{<<"name">> => <<"test_resource">>}, - #{} - ), + ?assertMatch( + {ok, _}, + emqx_resource:check_and_recreate( + ?ID, + ?TEST_RESOURCE, + #{<<"name">> => <<"test_resource">>}, + #{} + ) + ), - {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid0)), + ?assert(is_process_alive(Pid0)), - %% metrics are reset when recreating - ?assertEqual(0, emqx_resource_metrics:batching_get(?ID)), + %% metrics are reset when recreating + %% depending on timing, might show the request we just did. + ct:sleep(500), + ?assertEqual(0, emqx_resource_metrics:inflight_get(?ID)), - ok = emqx_resource:stop(?ID), + ok = emqx_resource:stop(?ID), - ?assertNot(is_process_alive(Pid0)), + ?assertNot(is_process_alive(Pid0)), - ?assertMatch( - ?RESOURCE_ERROR(stopped), - emqx_resource:query(?ID, get_state) - ), + ?assertMatch( + ?RESOURCE_ERROR(stopped), + emqx_resource:query(?ID, get_state) + ), - ok = emqx_resource:restart(?ID), - timer:sleep(300), + ?assertEqual(ok, emqx_resource:restart(?ID)), + timer:sleep(300), - {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid1)), + ?assert(is_process_alive(Pid1)), - %% now stop while resetting the metrics - emqx_resource_metrics:batching_set(?ID, WorkerID0, 1), - emqx_resource_metrics:batching_set(?ID, WorkerID1, 4), - ?assertEqual(5, emqx_resource_metrics:batching_get(?ID)), - ok = emqx_resource:stop(?ID), - ?assertEqual(0, emqx_resource_metrics:batching_get(?ID)), + %% now stop while resetting the metrics + ct:sleep(500), + emqx_resource_metrics:inflight_set(?ID, WorkerID0, 1), + emqx_resource_metrics:inflight_set(?ID, WorkerID1, 4), + ?assertEqual(5, emqx_resource_metrics:inflight_get(?ID)), + ?assertEqual(ok, emqx_resource:stop(?ID)), + ?assertEqual(0, emqx_resource_metrics:inflight_get(?ID)) + end, - ok. + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_stop_start_local(_) -> - {error, _} = emqx_resource:check_and_create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{unknown => test_resource} - ), + ?check_trace( + begin + ?assertMatch( + {error, _}, + emqx_resource:check_and_create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{unknown => test_resource} + ) + ), - {ok, _} = emqx_resource:check_and_create_local( - ?ID, - ?DEFAULT_RESOURCE_GROUP, - ?TEST_RESOURCE, - #{<<"name">> => <<"test_resource">>} - ), + ?assertMatch( + {ok, _}, + emqx_resource:check_and_create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{<<"name">> => <<"test_resource">>} + ) + ), - {ok, _} = emqx_resource:check_and_recreate_local( - ?ID, - ?TEST_RESOURCE, - #{<<"name">> => <<"test_resource">>}, - #{} - ), + ?assertMatch( + {ok, _}, + emqx_resource:check_and_recreate_local( + ?ID, + ?TEST_RESOURCE, + #{<<"name">> => <<"test_resource">>}, + #{} + ) + ), - {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid0)), + ?assert(is_process_alive(Pid0)), - ok = emqx_resource:stop(?ID), + ?assertEqual(ok, emqx_resource:stop(?ID)), - ?assertNot(is_process_alive(Pid0)), + ?assertNot(is_process_alive(Pid0)), - ?assertMatch( - ?RESOURCE_ERROR(stopped), - emqx_resource:query(?ID, get_state) - ), + ?assertMatch( + ?RESOURCE_ERROR(stopped), + emqx_resource:query(?ID, get_state) + ), - ok = emqx_resource:restart(?ID), + ?assertEqual(ok, emqx_resource:restart(?ID)), - {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid1)). + ?assert(is_process_alive(Pid1)) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). t_list_filter(_) -> {ok, _} = emqx_resource:create_local( @@ -617,20 +1055,22 @@ t_list_filter(_) -> ). t_create_dry_run_local(_) -> - ets:match_delete(emqx_resource_manager, {{owner, '$1'}, '_'}), lists:foreach( fun(_) -> create_dry_run_local_succ() end, lists:seq(1, 10) ), - [] = ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}). + ?retry( + 100, + 5, + ?assertEqual( + [], + emqx_resource:list_instances_verbose() + ) + ). create_dry_run_local_succ() -> - case whereis(test_resource) of - undefined -> ok; - Pid -> exit(Pid, kill) - end, ?assertEqual( ok, emqx_resource:create_dry_run_local( @@ -641,23 +1081,35 @@ create_dry_run_local_succ() -> ?assertEqual(undefined, whereis(test_resource)). t_create_dry_run_local_failed(_) -> + ct:timetrap({seconds, 120}), + ct:pal("creating with creation error"), Res1 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, #{create_error => true} ), ?assertMatch({error, _}, Res1), + ct:pal("creating with health check error"), Res2 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, #{name => test_resource, health_check_error => true} ), ?assertMatch({error, _}, Res2), + ct:pal("creating with stop error"), Res3 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, #{name => test_resource, stop_error => true} ), - ?assertEqual(ok, Res3). + ?assertEqual(ok, Res3), + ?retry( + 100, + 5, + ?assertEqual( + [], + emqx_resource:list_instances_verbose() + ) + ). t_test_func(_) -> ?assertEqual(ok, erlang:apply(emqx_resource_validator:not_empty("not_empty"), [<<"someval">>])), @@ -689,21 +1141,1739 @@ t_auto_retry(_) -> ), ?assertEqual(ok, Res). +t_health_check_disconnected(_) -> + ?check_trace( + begin + _ = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, create_error => true}, + #{auto_retry_interval => 100} + ), + ?assertEqual( + {ok, disconnected}, + emqx_resource:health_check(?ID) + ) + end, + fun(Trace) -> + ?assertEqual([], ?of_kind("inconsistent_state", Trace)), + ?assertEqual([], ?of_kind("inconsistent_cache", Trace)) + end + ). + +t_unblock_only_required_buffer_workers(_) -> + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 5, + batch_time => 100 + } + ), + lists:foreach( + fun emqx_resource_buffer_worker:block/1, + emqx_resource_buffer_worker_sup:worker_pids(?ID) + ), + emqx_resource:create( + ?ID1, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 5, + batch_time => 100 + } + ), + %% creation of `?ID1` should not have unblocked `?ID`'s buffer workers + %% so we should see resumes now (`buffer_worker_enter_running`). + ?check_trace( + ?wait_async_action( + lists:foreach( + fun emqx_resource_buffer_worker:resume/1, + emqx_resource_buffer_worker_sup:worker_pids(?ID) + ), + #{?snk_kind := buffer_worker_enter_running}, + 5000 + ), + fun(Trace) -> + ?assertMatch( + [#{id := ?ID} | _], + ?of_kind(buffer_worker_enter_running, Trace) + ) + end + ). + +t_retry_batch(_Config) -> + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 5, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + Matched0 = emqx_resource_metrics:matched_get(?ID), + ?assertEqual(1, Matched0), + + %% these requests will batch together and fail; the buffer worker + %% will enter the `blocked' state and they'll be retried later, + %% after it unblocks. + Payloads = lists:seq(1, 5), + NumPayloads = length(Payloads), + ExpectedCount = 15, + + ?check_trace( + begin + {ok, {ok, _}} = + ?wait_async_action( + lists:foreach( + fun(N) -> + ok = emqx_resource:query(?ID, {inc_counter, N}) + end, + Payloads + ), + #{?snk_kind := buffer_worker_enter_blocked}, + 5_000 + ), + %% now the individual messages should have been counted + Matched1 = emqx_resource_metrics:matched_get(?ID), + ?assertEqual(Matched0 + NumPayloads, Matched1), + + %% wait for two more retries while the failure is enabled; the + %% batch shall remain enqueued. + {ok, _} = + snabbkaffe:block_until( + ?match_n_events(2, #{?snk_kind := buffer_worker_retry_inflight_failed}), + 5_000 + ), + %% should not have increased the matched count with the retries + Matched2 = emqx_resource_metrics:matched_get(?ID), + ?assertEqual(Matched1, Matched2), + + %% now unblock the buffer worker so it may retry the batch, + %% but it'll still fail + {ok, {ok, _}} = + ?wait_async_action( + ok = emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_retry_inflight_succeeded}, + 5_000 + ), + %% 1 more because of the `resume' call + Matched3 = emqx_resource_metrics:matched_get(?ID), + ?assertEqual(Matched2 + 1, Matched3), + + {ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter), + {Counter, Matched3} + end, + fun({Counter, Matched3}, Trace) -> + %% 1 original attempt + 2 failed retries + final + %% successful attempt. + %% each time should be the original batch (no duplicate + %% elements or reordering). + ExpectedSeenPayloads = lists:flatten(lists:duplicate(4, Payloads)), + Trace1 = lists:sublist( + ?projection(n, ?of_kind(connector_demo_batch_inc_individual, Trace)), + length(ExpectedSeenPayloads) + ), + ?assertEqual(ExpectedSeenPayloads, Trace1), + ?assertMatch( + [#{n := ExpectedCount}], + ?of_kind(connector_demo_inc_counter, Trace) + ), + ?assertEqual(ExpectedCount, Counter), + %% matched should count only the original requests, and not retries + %% + 1 for `resume' call + %% + 1 for `block' call + %% + 1 for `get_counter' call + %% and the message count (1 time) + Matched4 = emqx_resource_metrics:matched_get(?ID), + ?assertEqual(Matched3 + 1, Matched4), + ok + end + ), + ok. + +t_delete_and_re_create_with_same_name(_Config) -> + NumBufferWorkers = 2, + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 1, + worker_pool_size => NumBufferWorkers, + buffer_mode => volatile_offload, + buffer_seg_bytes => 100, + resume_interval => 1_000 + } + ), + %% pre-condition: we should have just created a new queue + Queuing0 = emqx_resource_metrics:queuing_get(?ID), + Inflight0 = emqx_resource_metrics:inflight_get(?ID), + ?assertEqual(0, Queuing0), + ?assertEqual(0, Inflight0), + ?check_trace( + begin + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + NumRequests = 10, + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := buffer_worker_enter_blocked}), + NumBufferWorkers, + _Timeout = 5_000 + ), + %% ensure replayq offloads to disk + Payload = binary:copy(<<"a">>, 119), + lists:foreach( + fun(N) -> + spawn_link(fun() -> + {error, _} = + emqx_resource:query( + ?ID, + {big_payload, <<(integer_to_binary(N))/binary, Payload/binary>>} + ) + end) + end, + lists:seq(1, NumRequests) + ), + + {ok, _} = snabbkaffe:receive_events(SRef), + + %% ensure that stuff got enqueued into disk + tap_metrics(?LINE), + Queuing1 = emqx_resource_metrics:queuing_get(?ID), + Inflight1 = emqx_resource_metrics:inflight_get(?ID), + ?assert(Queuing1 > 0), + ?assertEqual(2, Inflight1), + + %% now, we delete the resource + process_flag(trap_exit, true), + ok = emqx_resource:remove_local(?ID), + ?assertEqual({error, not_found}, emqx_resource_manager:lookup(?ID)), + + %% re-create the resource with the *same name* + {{ok, _}, {ok, _Events}} = + ?wait_async_action( + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 1, + worker_pool_size => 2, + buffer_seg_bytes => 100, + resume_interval => 1_000 + } + ), + #{?snk_kind := buffer_worker_enter_running}, + 5_000 + ), + + %% it shouldn't have anything enqueued, as it's a fresh resource + Queuing2 = emqx_resource_metrics:queuing_get(?ID), + Inflight2 = emqx_resource_metrics:queuing_get(?ID), + ?assertEqual(0, Queuing2), + ?assertEqual(0, Inflight2), + + ok + end, + [] + ), + ok. + +%% check that, if we configure a max queue size too small, then we +%% never send requests and always overflow. +t_always_overflow(_Config) -> + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 1, + worker_pool_size => 1, + max_buffer_bytes => 1, + resume_interval => 1_000 + } + ), + ?check_trace( + begin + Payload = binary:copy(<<"a">>, 100), + %% since it's sync and it should never send a request, this + %% errors with `timeout'. + ?assertEqual( + {error, buffer_overflow}, + emqx_resource:query( + ?ID, + {big_payload, Payload}, + #{timeout => 500} + ) + ), + ok + end, + fun(Trace) -> + ?assertEqual([], ?of_kind(call_query_enter, Trace)), + ok + end + ), + ok. + +t_retry_sync_inflight(_Config) -> + ResumeInterval = 1_000, + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 1, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + QueryOpts = #{}, + ?check_trace( + begin + %% now really make the resource go into `blocked' state. + %% this results in a retriable error when sync. + ok = emqx_resource:simple_sync_query(?ID, block), + TestPid = self(), + {_, {ok, _}} = + ?wait_async_action( + spawn_link(fun() -> + Res = emqx_resource:query(?ID, {big_payload, <<"a">>}, QueryOpts), + TestPid ! {res, Res} + end), + #{?snk_kind := buffer_worker_retry_inflight_failed}, + ResumeInterval * 2 + ), + {ok, {ok, _}} = + ?wait_async_action( + ok = emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_retry_inflight_succeeded}, + ResumeInterval * 3 + ), + receive + {res, Res} -> + ?assertEqual(ok, Res) + after 5_000 -> + ct:fail("no response") + end, + ok + end, + [fun ?MODULE:assert_sync_retry_fail_then_succeed_inflight/1] + ), + ok. + +t_retry_sync_inflight_batch(_Config) -> + ResumeInterval = 1_000, + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 2, + batch_time => 200, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + QueryOpts = #{}, + ?check_trace( + begin + %% make the resource go into `blocked' state. this + %% results in a retriable error when sync. + ok = emqx_resource:simple_sync_query(?ID, block), + process_flag(trap_exit, true), + TestPid = self(), + {_, {ok, _}} = + ?wait_async_action( + spawn_link(fun() -> + Res = emqx_resource:query(?ID, {big_payload, <<"a">>}, QueryOpts), + TestPid ! {res, Res} + end), + #{?snk_kind := buffer_worker_retry_inflight_failed}, + ResumeInterval * 2 + ), + {ok, {ok, _}} = + ?wait_async_action( + ok = emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_retry_inflight_succeeded}, + ResumeInterval * 3 + ), + receive + {res, Res} -> + ?assertEqual(ok, Res) + after 5_000 -> + ct:fail("no response") + end, + ok + end, + [fun ?MODULE:assert_sync_retry_fail_then_succeed_inflight/1] + ), + ok. + +t_retry_async_inflight(_Config) -> + ResumeInterval = 1_000, + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 1, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + QueryOpts = #{}, + ?check_trace( + begin + %% block + ok = emqx_resource:simple_sync_query(?ID, block), + + %% then send an async request; that should be retriable. + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:query(?ID, {big_payload, <<"b">>}, QueryOpts), + #{?snk_kind := buffer_worker_retry_inflight_failed}, + ResumeInterval * 2 + ), + + %% will reply with success after the resource is healed + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_enter_running}, + ResumeInterval * 2 + ), + ok + end, + [fun ?MODULE:assert_async_retry_fail_then_succeed_inflight/1] + ), + ok. + +t_retry_async_inflight_full(_Config) -> + ResumeInterval = 1_000, + AsyncInflightWindow = 5, + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => ?FUNCTION_NAME}, + #{ + query_mode => async, + inflight_window => AsyncInflightWindow, + batch_size => 1, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + ?check_trace( + #{timetrap => 15_000}, + begin + %% block + ok = emqx_resource:simple_sync_query(?ID, block), + + {ok, {ok, _}} = + ?wait_async_action( + inc_counter_in_parallel( + AsyncInflightWindow * 2, + fun() -> + For = (ResumeInterval div 4) + rand:uniform(ResumeInterval div 4), + {sleep_before_reply, For} + end, + #{async_reply_fun => {fun(Res) -> ct:pal("Res = ~p", [Res]) end, []}} + ), + #{?snk_kind := buffer_worker_flush_but_inflight_full}, + ResumeInterval * 2 + ), + + %% will reply with success after the resource is healed + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_enter_running} + ), + ok + end, + [ + fun(Trace) -> + ?assertMatch([#{} | _], ?of_kind(buffer_worker_flush_but_inflight_full, Trace)) + end + ] + ), + ?assertEqual(0, emqx_resource_metrics:inflight_get(?ID)), + ok. + +%% this test case is to ensure the buffer worker will not go crazy even +%% if the underlying connector is misbehaving: evaluate async callbacks multiple times +t_async_reply_multi_eval(_Config) -> + ResumeInterval = 5, + TotalTime = 5_000, + AsyncInflightWindow = 3, + TotalQueries = AsyncInflightWindow * 5, + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => ?FUNCTION_NAME}, + #{ + query_mode => async, + inflight_window => AsyncInflightWindow, + batch_size => 3, + batch_time => 10, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + %% block + ok = emqx_resource:simple_sync_query(?ID, block), + inc_counter_in_parallel( + TotalQueries, + fun() -> + Rand = rand:uniform(1000), + {random_reply, Rand} + end, + #{} + ), + ?retry( + ResumeInterval, + TotalTime div ResumeInterval, + begin + Metrics = tap_metrics(?LINE), + #{ + counters := Counters, + gauges := #{queuing := 0, inflight := 0} + } = Metrics, + #{ + matched := Matched, + success := Success, + dropped := Dropped, + late_reply := LateReply, + failed := Failed + } = Counters, + ?assertEqual(TotalQueries, Matched - 1), + ?assertEqual(Matched, Success + Dropped + LateReply + Failed) + end + ). + +t_retry_async_inflight_batch(_Config) -> + ResumeInterval = 1_000, + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 2, + batch_time => 200, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + QueryOpts = #{}, + ?check_trace( + begin + %% block + ok = emqx_resource:simple_sync_query(?ID, block), + + %% then send an async request; that should be retriable. + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:query(?ID, {big_payload, <<"b">>}, QueryOpts), + #{?snk_kind := buffer_worker_retry_inflight_failed}, + ResumeInterval * 2 + ), + + %% will reply with success after the resource is healed + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_enter_running}, + ResumeInterval * 2 + ), + ok + end, + [fun ?MODULE:assert_async_retry_fail_then_succeed_inflight/1] + ), + ok. + +%% check that we monitor async worker pids and abort their inflight +%% requests if they die. +t_async_pool_worker_death(_Config) -> + ResumeInterval = 1_000, + NumBufferWorkers = 2, + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 1, + worker_pool_size => NumBufferWorkers, + resume_interval => ResumeInterval + } + ), + Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), + Insert0 = fun(Tab, Ref, Result) -> + ct:pal("inserting ~p", [{Ref, Result}]), + ets:insert(Tab, {Ref, Result}) + end, + ReqOpts = fun() -> #{async_reply_fun => {Insert0, [Tab0, make_ref()]}} end, + ?check_trace( + begin + ok = emqx_resource:simple_sync_query(?ID, block), + + NumReqs = 10, + {ok, SRef0} = + snabbkaffe:subscribe( + ?match_event(#{?snk_kind := buffer_worker_appended_to_inflight}), + NumReqs, + 1_000 + ), + inc_counter_in_parallel_increasing(NumReqs, 1, ReqOpts), + {ok, _} = snabbkaffe:receive_events(SRef0), + + Inflight0 = emqx_resource_metrics:inflight_get(?ID), + ?assertEqual(NumReqs, Inflight0), + + %% grab one of the worker pids and kill it + {ok, #{pid := Pid0}} = emqx_resource:simple_sync_query(?ID, get_state), + MRef = monitor(process, Pid0), + ct:pal("will kill ~p", [Pid0]), + exit(Pid0, kill), + receive + {'DOWN', MRef, process, Pid0, killed} -> + ct:pal("~p killed", [Pid0]), + ok + after 200 -> + ct:fail("worker should have died") + end, + + %% inflight requests should have been marked as retriable + wait_until_all_marked_as_retriable(NumReqs), + Inflight1 = emqx_resource_metrics:inflight_get(?ID), + ?assertEqual(NumReqs, Inflight1), + + NumReqs + end, + fun(NumReqs, Trace) -> + Events = ?of_kind(buffer_worker_async_agent_down, Trace), + %% At least one buffer worker should have marked its + %% requests as retriable. If a single one has + %% received all requests, that's all we got. + ?assertMatch([_ | _], Events), + %% All requests distributed over all buffer workers + %% should have been marked as retriable, by the time + %% the inflight has been drained. + ?assertEqual( + NumReqs, + lists:sum([N || #{num_affected := N} <- Events]) + ), + ok + end + ), + ok. + +t_expiration_sync_before_sending(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 1, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + do_t_expiration_before_sending(sync). + +t_expiration_sync_batch_before_sending(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 2, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + do_t_expiration_before_sending(sync). + +t_expiration_async_before_sending(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 1, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + do_t_expiration_before_sending(async). + +t_expiration_async_batch_before_sending(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 2, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + do_t_expiration_before_sending(async). + +do_t_expiration_before_sending(QueryMode) -> + ?check_trace( + begin + ok = emqx_resource:simple_sync_query(?ID, block), + + ?force_ordering( + #{?snk_kind := buffer_worker_flush_before_pop}, + #{?snk_kind := delay_enter} + ), + ?force_ordering( + #{?snk_kind := delay}, + #{?snk_kind := buffer_worker_flush_before_sieve_expired} + ), + + TimeoutMS = 100, + spawn_link(fun() -> + case QueryMode of + sync -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query(?ID, {inc_counter, 99}, #{timeout => TimeoutMS}) + ); + async -> + ?assertEqual( + ok, emqx_resource:query(?ID, {inc_counter, 99}, #{timeout => TimeoutMS}) + ) + end + end), + spawn_link(fun() -> + ?tp(delay_enter, #{}), + ct:sleep(2 * TimeoutMS), + ?tp(delay, #{}), + ok + end), + + {ok, _} = ?block_until(#{?snk_kind := buffer_worker_flush_all_expired}, 4 * TimeoutMS), + ok + end, + fun(Trace) -> + ?assertMatch( + [#{batch := [{query, _, {inc_counter, 99}, _, _}]}], + ?of_kind(buffer_worker_flush_all_expired, Trace) + ), + Metrics = tap_metrics(?LINE), + ?assertMatch( + #{ + counters := #{ + matched := 2, + %% the block call + success := 1, + dropped := 1, + 'dropped.expired' := 1, + retried := 0, + failed := 0 + } + }, + Metrics + ), + ok + end + ), + ok. + +t_expiration_sync_before_sending_partial_batch(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 2, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + install_telemetry_handler(?FUNCTION_NAME), + do_t_expiration_before_sending_partial_batch(sync). + +t_expiration_async_before_sending_partial_batch(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 2, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + install_telemetry_handler(?FUNCTION_NAME), + do_t_expiration_before_sending_partial_batch(async). + +do_t_expiration_before_sending_partial_batch(QueryMode) -> + ?check_trace( + begin + ok = emqx_resource:simple_sync_query(?ID, block), + + ?force_ordering( + #{?snk_kind := buffer_worker_flush_before_pop}, + #{?snk_kind := delay_enter} + ), + ?force_ordering( + #{?snk_kind := delay}, + #{?snk_kind := buffer_worker_flush_before_sieve_expired} + ), + + Pid0 = + spawn_link(fun() -> + ?assertEqual( + ok, emqx_resource:query(?ID, {inc_counter, 99}, #{timeout => infinity}) + ), + ?tp(infinity_query_returned, #{}) + end), + TimeoutMS = 100, + Pid1 = + spawn_link(fun() -> + case QueryMode of + sync -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query(?ID, {inc_counter, 199}, #{timeout => TimeoutMS}) + ); + async -> + ?assertEqual( + ok, + emqx_resource:query(?ID, {inc_counter, 199}, #{timeout => TimeoutMS}) + ) + end + end), + Pid2 = + spawn_link(fun() -> + ?tp(delay_enter, #{}), + ct:sleep(2 * TimeoutMS), + ?tp(delay, #{}), + ok + end), + + {ok, _} = ?block_until( + #{?snk_kind := buffer_worker_flush_potentially_partial}, 4 * TimeoutMS + ), + ok = emqx_resource:simple_sync_query(?ID, resume), + case QueryMode of + async -> + {ok, _} = ?block_until( + #{ + ?snk_kind := handle_async_reply, + action := ack, + batch_or_query := [{query, _, {inc_counter, 99}, _, _}] + }, + 10 * TimeoutMS + ); + sync -> + %% more time because it needs to retry if sync + {ok, _} = ?block_until(#{?snk_kind := infinity_query_returned}, 20 * TimeoutMS) + end, + + lists:foreach( + fun(Pid) -> + unlink(Pid), + exit(Pid, kill) + end, + [Pid0, Pid1, Pid2] + ), + ok + end, + fun(Trace) -> + ?assertMatch( + [ + #{ + expired := [{query, _, {inc_counter, 199}, _, _}], + not_expired := [{query, _, {inc_counter, 99}, _, _}] + } + ], + ?of_kind(buffer_worker_flush_potentially_partial, Trace) + ), + wait_until_gauge_is(inflight, 0, 500), + Metrics = tap_metrics(?LINE), + case QueryMode of + async -> + ?assertMatch( + #{ + counters := #{ + matched := 4, + %% the block call, the request with + %% infinity timeout, and the resume + %% call. + success := 3, + dropped := 1, + 'dropped.expired' := 1, + %% was sent successfully and held by + %% the test connector. + retried := 0, + failed := 0 + } + }, + Metrics + ); + sync -> + ?assertMatch( + #{ + counters := #{ + matched := 4, + %% the block call, the request with + %% infinity timeout, and the resume + %% call. + success := 3, + dropped := 1, + 'dropped.expired' := 1, + %% currently, the test connector + %% replies with an error that may make + %% the buffer worker retry. + retried := Retried, + failed := 0 + } + } when Retried =< 1, + Metrics + ) + end, + ok + end + ), + ok. + +t_expiration_async_after_reply(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 1, + worker_pool_size => 1, + resume_interval => 1_000 + } + ), + install_telemetry_handler(?FUNCTION_NAME), + do_t_expiration_async_after_reply(single). + +t_expiration_async_batch_after_reply(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 3, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 2_000 + } + ), + install_telemetry_handler(?FUNCTION_NAME), + do_t_expiration_async_after_reply(batch). + +do_t_expiration_async_after_reply(IsBatch) -> + ?check_trace( + begin + NAcks = + case IsBatch of + batch -> 1; + single -> 3 + end, + ?force_ordering( + #{?snk_kind := buffer_worker_flush_ack}, + NAcks, + #{?snk_kind := delay_enter}, + _Guard = true + ), + ?force_ordering( + #{?snk_kind := delay}, + #{ + ?snk_kind := handle_async_reply_enter, + batch_or_query := [{query, _, {inc_counter, 199}, _, _} | _] + } + ), + + TimeoutMS = 100, + ?assertEqual( + ok, + emqx_resource:query(?ID, {inc_counter, 199}, #{timeout => TimeoutMS}) + ), + ?assertEqual( + ok, + emqx_resource:query(?ID, {inc_counter, 299}, #{timeout => TimeoutMS}) + ), + ?assertEqual( + ok, emqx_resource:query(?ID, {inc_counter, 99}, #{timeout => infinity}) + ), + Pid0 = + spawn_link(fun() -> + ?tp(delay_enter, #{}), + ct:sleep(2 * TimeoutMS), + ?tp(delay, #{}), + ok + end), + + {ok, _} = ?block_until( + #{?snk_kind := buffer_worker_flush_potentially_partial}, 4 * TimeoutMS + ), + {ok, _} = ?block_until( + #{?snk_kind := handle_async_reply_expired}, 10 * TimeoutMS + ), + wait_telemetry_event(success, #{n_events => 1, timeout => 4_000}), + + unlink(Pid0), + exit(Pid0, kill), + ok + end, + fun(Trace) -> + case IsBatch of + batch -> + ?assertMatch( + [ + #{ + expired := [ + {query, _, {inc_counter, 199}, _, _}, + {query, _, {inc_counter, 299}, _, _} + ] + } + ], + ?of_kind(handle_async_reply_expired, Trace) + ); + single -> + ?assertMatch( + [ + #{expired := [{query, _, {inc_counter, 199}, _, _}]}, + #{expired := [{query, _, {inc_counter, 299}, _, _}]} + ], + ?of_kind(handle_async_reply_expired, Trace) + ) + end, + Metrics = tap_metrics(?LINE), + ?assertMatch( + #{ + counters := #{ + matched := 3, + %% the request with infinity timeout. + success := 1, + dropped := 0, + late_reply := 2, + retried := 0, + failed := 0 + } + }, + Metrics + ), + ok + end + ), + ok. + +t_expiration_batch_all_expired_after_reply(_Config) -> + ResumeInterval = 300, + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 3, + batch_time => 100, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + ?check_trace( + begin + ?force_ordering( + #{?snk_kind := buffer_worker_flush_ack}, + #{?snk_kind := delay_enter} + ), + ?force_ordering( + #{?snk_kind := delay}, + #{ + ?snk_kind := handle_async_reply_enter, + batch_or_query := [{query, _, {inc_counter, 199}, _, _} | _] + } + ), + + TimeoutMS = 200, + ?assertEqual( + ok, + emqx_resource:query(?ID, {inc_counter, 199}, #{timeout => TimeoutMS}) + ), + ?assertEqual( + ok, + emqx_resource:query(?ID, {inc_counter, 299}, #{timeout => TimeoutMS}) + ), + Pid0 = + spawn_link(fun() -> + ?tp(delay_enter, #{}), + ct:sleep(2 * TimeoutMS), + ?tp(delay, #{}), + ok + end), + + {ok, _} = ?block_until( + #{?snk_kind := handle_async_reply_expired}, 10 * TimeoutMS + ), + + unlink(Pid0), + exit(Pid0, kill), + ok + end, + fun(Trace) -> + ?assertMatch( + [ + #{ + expired := [ + {query, _, {inc_counter, 199}, _, _}, + {query, _, {inc_counter, 299}, _, _} + ] + } + ], + ?of_kind(handle_async_reply_expired, Trace) + ), + Metrics = tap_metrics(?LINE), + ?assertMatch( + #{ + counters := #{ + matched := 2, + success := 0, + dropped := 0, + late_reply := 2, + retried := 0, + failed := 0 + }, + gauges := #{ + inflight := 0, + queuing := 0 + } + }, + Metrics + ), + ok + end + ), + ok. + +t_expiration_retry(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 1, + worker_pool_size => 1, + resume_interval => 300 + } + ), + do_t_expiration_retry(). + +t_expiration_retry_batch(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 2, + batch_time => 100, + worker_pool_size => 1, + resume_interval => 300 + } + ), + do_t_expiration_retry(). + +do_t_expiration_retry() -> + ResumeInterval = 300, + ?check_trace( + begin + ok = emqx_resource:simple_sync_query(?ID, block), + + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := buffer_worker_flush_nack}), + 1, + 200 + ), + TimeoutMS = 100, + %% the request that expires must be first, so it's the + %% head of the inflight table (and retriable). + {ok, SRef1} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := buffer_worker_appended_to_queue}), + 1, + ResumeInterval * 2 + ), + spawn_link(fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query( + ?ID, + {inc_counter, 1}, + #{timeout => TimeoutMS} + ) + ) + end), + Pid1 = + spawn_link(fun() -> + receive + go -> ok + end, + ?assertEqual( + ok, + emqx_resource:query( + ?ID, + {inc_counter, 2}, + #{timeout => infinity} + ) + ) + end), + {ok, _} = snabbkaffe:receive_events(SRef1), + Pid1 ! go, + {ok, _} = snabbkaffe:receive_events(SRef0), + + {ok, _} = + ?block_until( + #{?snk_kind := buffer_worker_retry_expired}, + ResumeInterval * 10 + ), + + {ok, {ok, _}} = + ?wait_async_action( + emqx_resource:simple_sync_query(?ID, resume), + #{?snk_kind := buffer_worker_retry_inflight_succeeded}, + ResumeInterval * 5 + ), + + ok + end, + fun(Trace) -> + ?assertMatch( + [#{expired := [{query, _, {inc_counter, 1}, _, _}]}], + ?of_kind(buffer_worker_retry_expired, Trace) + ), + Metrics = tap_metrics(?LINE), + ?assertMatch( + #{ + gauges := #{ + inflight := 0, + queuing := 0 + } + }, + Metrics + ), + ok + end + ), + ok. + +t_expiration_retry_batch_multiple_times(_Config) -> + ResumeInterval = 300, + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => sync, + batch_size => 2, + batch_time => 100, + worker_pool_size => 1, + resume_interval => ResumeInterval + } + ), + ?check_trace( + begin + ok = emqx_resource:simple_sync_query(?ID, block), + + {ok, SRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := buffer_worker_flush_nack}), + 1, + 200 + ), + TimeoutMS = 100, + spawn_link(fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query( + ?ID, + {inc_counter, 1}, + #{timeout => TimeoutMS} + ) + ) + end), + spawn_link(fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query( + ?ID, + {inc_counter, 2}, + #{timeout => ResumeInterval + TimeoutMS} + ) + ) + end), + {ok, _} = snabbkaffe:receive_events(SRef), + + {ok, _} = + snabbkaffe:block_until( + ?match_n_events(2, #{?snk_kind := buffer_worker_retry_expired}), + ResumeInterval * 10 + ), + + ok + end, + fun(Trace) -> + ?assertMatch( + [ + #{expired := [{query, _, {inc_counter, 1}, _, _}]}, + #{expired := [{query, _, {inc_counter, 2}, _, _}]} + ], + ?of_kind(buffer_worker_retry_expired, Trace) + ), + ok + end + ), + ok. + +t_recursive_flush(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 1, + worker_pool_size => 1 + } + ), + do_t_recursive_flush(). + +t_recursive_flush_batch(_Config) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + query_mode => async, + batch_size => 2, + batch_time => 10_000, + worker_pool_size => 1 + } + ), + do_t_recursive_flush(). + +do_t_recursive_flush() -> + ?check_trace( + begin + Timeout = 1_000, + Pid = spawn_link(fun S() -> + emqx_resource:query(?ID, {inc_counter, 1}), + S() + end), + %% we want two reflushes to happen before we analyze the + %% trace, so that we get a single full interaction + {ok, _} = snabbkaffe:block_until( + ?match_n_events(2, #{?snk_kind := buffer_worker_flush_ack_reflush}), Timeout + ), + unlink(Pid), + exit(Pid, kill), + ok + end, + fun(Trace) -> + %% check that a recursive flush leads to a new call to flush/1 + Pairs = ?find_pairs( + #{?snk_kind := buffer_worker_flush_ack_reflush}, + #{?snk_kind := buffer_worker_flush}, + Trace + ), + ?assert(lists:any(fun(E) -> E end, [true || {pair, _, _} <- Pairs])) + end + ), + ok. + +t_call_mode_uncoupled_from_query_mode(_Config) -> + DefaultOpts = #{ + batch_size => 1, + batch_time => 5, + worker_pool_size => 1 + }, + ?check_trace( + begin + %% We check that we can call the buffer workers with async + %% calls, even if the underlying connector itself only + %% supports sync calls. + emqx_connector_demo:set_callback_mode(always_sync), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + DefaultOpts#{query_mode => async} + ), + ?tp_span( + async_query_sync_driver, + #{}, + ?assertMatch( + {ok, {ok, _}}, + ?wait_async_action( + emqx_resource:query(?ID, {inc_counter, 1}), + #{?snk_kind := buffer_worker_flush_ack}, + 500 + ) + ) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + + %% And we check the converse: a connector that allows async + %% calls can be called synchronously, but the underlying + %% call should be async. + emqx_connector_demo:set_callback_mode(async_if_possible), + {ok, _} = emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + DefaultOpts#{query_mode => sync} + ), + ?tp_span( + sync_query_async_driver, + #{}, + ?assertEqual(ok, emqx_resource:query(?ID, {inc_counter, 2})) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + ?tp(sync_query_async_driver, #{}), + ok + end, + fun(Trace0) -> + Trace1 = trace_between_span(Trace0, async_query_sync_driver), + ct:pal("async query calling sync driver\n ~p", [Trace1]), + ?assert( + ?strict_causality( + #{?snk_kind := async_query, request := {inc_counter, 1}}, + #{?snk_kind := call_query, call_mode := sync}, + Trace1 + ) + ), + + Trace2 = trace_between_span(Trace0, sync_query_async_driver), + ct:pal("sync query calling async driver\n ~p", [Trace2]), + ?assert( + ?strict_causality( + #{?snk_kind := sync_query, request := {inc_counter, 2}}, + #{?snk_kind := call_query_async}, + Trace2 + ) + ), + ok + end + ). + +%% The default mode is currently `memory_only'. +t_volatile_offload_mode(_Config) -> + MaxBufferBytes = 1_000, + DefaultOpts = #{ + max_buffer_bytes => MaxBufferBytes, + worker_pool_size => 1 + }, + ?check_trace( + begin + emqx_connector_demo:set_callback_mode(async_if_possible), + %% Create without any specified segment bytes; should + %% default to equal max bytes. + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + DefaultOpts#{buffer_mode => volatile_offload} + ) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + + %% Create with segment bytes < max bytes + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + DefaultOpts#{ + buffer_mode => volatile_offload, + buffer_seg_bytes => MaxBufferBytes div 2 + } + ) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + %% Create with segment bytes = max bytes + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + DefaultOpts#{ + buffer_mode => volatile_offload, + buffer_seg_bytes => MaxBufferBytes + } + ) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + + %% Create with segment bytes > max bytes; should normalize + %% to max bytes. + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + DefaultOpts#{ + buffer_mode => volatile_offload, + buffer_seg_bytes => 2 * MaxBufferBytes + } + ) + ), + ?assertEqual(ok, emqx_resource:remove_local(?ID)), + + ok + end, + fun(Trace) -> + HalfMaxBufferBytes = MaxBufferBytes div 2, + ?assertMatch( + [ + #{ + dir := _, + max_total_bytes := MaxTotalBytes, + seg_bytes := MaxTotalBytes, + offload := {true, volatile} + }, + #{ + dir := _, + max_total_bytes := MaxTotalBytes, + %% uses the specified value since it's smaller + %% than max bytes. + seg_bytes := HalfMaxBufferBytes, + offload := {true, volatile} + }, + #{ + dir := _, + max_total_bytes := MaxTotalBytes, + seg_bytes := MaxTotalBytes, + offload := {true, volatile} + }, + #{ + dir := _, + max_total_bytes := MaxTotalBytes, + seg_bytes := MaxTotalBytes, + offload := {true, volatile} + } + ], + ?projection(queue_opts, ?of_kind(buffer_worker_init, Trace)) + ), + ok + end + ). + +t_late_call_reply(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + RequestTimeout = 500, + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + buffer_mode => memory_only, + request_timeout => RequestTimeout, + query_mode => sync + } + ) + ), + ?check_trace( + begin + %% Sleep for longer than the request timeout; the call reply will + %% have been already returned (a timeout), but the resource will + %% still send a message with the reply. + %% The demo connector will reply with `{error, timeout}' after 1 s. + SleepFor = RequestTimeout + 500, + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query( + ?ID, + {sync_sleep_before_reply, SleepFor}, + #{timeout => RequestTimeout} + ) + ), + %% Our process shouldn't receive any late messages. + receive + LateReply -> + ct:fail("received late reply: ~p", [LateReply]) + after SleepFor -> + ok + end, + ok + end, + [] + ), + ok. + +t_resource_create_error_activate_alarm_once(_) -> + do_t_resource_activate_alarm_once( + #{name => test_resource, create_error => true}, + connector_demo_start_error + ). + +t_resource_health_check_error_activate_alarm_once(_) -> + do_t_resource_activate_alarm_once( + #{name => test_resource, health_check_error => true}, + connector_demo_health_check_error + ). + +do_t_resource_activate_alarm_once(ResourceConfig, SubscribeEvent) -> + ?check_trace( + begin + ?wait_async_action( + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + ResourceConfig, + #{auto_restart_interval => 100, health_check_interval => 100} + ), + #{?snk_kind := resource_activate_alarm, resource_id := ?ID} + ), + ?assertMatch([#{activated := true, name := ?ID}], emqx_alarm:get_alarms(activated)), + {ok, SubRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := SubscribeEvent}), 4, 7000 + ), + ?assertMatch({ok, [_, _, _, _]}, snabbkaffe:receive_events(SubRef)) + end, + fun(Trace) -> + ?assertMatch([_], ?of_kind(resource_activate_alarm, Trace)) + end + ). + %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ -inc_counter_in_parallel(N) -> - inc_counter_in_parallel(N, #{}). -inc_counter_in_parallel(N, Opts) -> +inc_counter_in_parallel(N) -> + inc_counter_in_parallel(N, {inc_counter, 1}, #{}). + +inc_counter_in_parallel(N, Opts0) -> + inc_counter_in_parallel(N, {inc_counter, 1}, Opts0). + +inc_counter_in_parallel(N, Query, Opts) -> Parent = self(), Pids = [ erlang:spawn(fun() -> - emqx_resource:query(?ID, {inc_counter, 1}, Opts), + emqx_resource:query(?ID, maybe_apply(Query), maybe_apply(Opts)), Parent ! {complete, self()} end) || _ <- lists:seq(1, N) ], + [ + receive + {complete, Pid} -> ok + after 1000 -> + ct:fail({wait_for_query_timeout, Pid}) + end + || Pid <- Pids + ], + ok. + +inc_counter_in_parallel_increasing(N, StartN, Opts) -> + Parent = self(), + Pids = [ + erlang:spawn(fun() -> + emqx_resource:query(?ID, {inc_counter, M}, maybe_apply(Opts)), + Parent ! {complete, self()} + end) + || M <- lists:seq(StartN, StartN + N - 1) + ], [ receive {complete, Pid} -> ok @@ -713,9 +2883,172 @@ inc_counter_in_parallel(N, Opts) -> || Pid <- Pids ]. +maybe_apply(FunOrTerm) -> + maybe_apply(FunOrTerm, []). + +maybe_apply(Fun, Args) when is_function(Fun) -> + erlang:apply(Fun, Args); +maybe_apply(Term, _Args) -> + Term. + bin_config() -> <<"\"name\": \"test_resource\"">>. config() -> {ok, Config} = hocon:binary(bin_config()), Config. + +tap_metrics(Line) -> + #{counters := C, gauges := G} = emqx_resource:get_metrics(?ID), + ct:pal("metrics (l. ~b): ~p", [Line, #{counters => C, gauges => G}]), + #{counters => C, gauges => G}. + +install_telemetry_handler(TestCase) -> + Tid = ets:new(TestCase, [ordered_set, public]), + HandlerId = TestCase, + TestPid = self(), + _ = telemetry:attach_many( + HandlerId, + emqx_resource_metrics:events(), + fun(EventName, Measurements, Metadata, _Config) -> + Data = #{ + name => EventName, + measurements => Measurements, + metadata => Metadata + }, + ets:insert(Tid, {erlang:monotonic_time(), Data}), + TestPid ! {telemetry, Data}, + ok + end, + unused_config + ), + on_exit(fun() -> + telemetry:detach(HandlerId), + ets:delete(Tid) + end), + put({?MODULE, telemetry_table}, Tid), + Tid. + +wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> + Events = receive_all_events(GaugeName, Timeout), + case length(Events) > 0 andalso lists:last(Events) of + #{measurements := #{gauge_set := ExpectedValue}} -> + ok; + #{measurements := #{gauge_set := Value}} -> + ct:fail( + "gauge ~p didn't reach expected value ~p; last value: ~p", + [GaugeName, ExpectedValue, Value] + ); + false -> + ct:pal("no ~p gauge events received!", [GaugeName]) + end. + +receive_all_events(EventName, Timeout) -> + receive_all_events(EventName, Timeout, []). + +receive_all_events(EventName, Timeout, Acc) -> + receive + {telemetry, #{name := [_, _, EventName]} = Event} -> + receive_all_events(EventName, Timeout, [Event | Acc]) + after Timeout -> + lists:reverse(Acc) + end. + +wait_telemetry_event(EventName) -> + wait_telemetry_event(EventName, #{timeout => 5_000, n_events => 1}). + +wait_telemetry_event( + EventName, + Opts0 +) -> + DefaultOpts = #{timeout => 5_000, n_events => 1}, + #{timeout := Timeout, n_events := NEvents} = maps:merge(DefaultOpts, Opts0), + wait_n_events(NEvents, Timeout, EventName). + +wait_n_events(NEvents, _Timeout, _EventName) when NEvents =< 0 -> + ok; +wait_n_events(NEvents, Timeout, EventName) -> + TelemetryTable = get({?MODULE, telemetry_table}), + receive + {telemetry, #{name := [_, _, EventName]}} -> + wait_n_events(NEvents - 1, Timeout, EventName) + after Timeout -> + RecordedEvents = ets:tab2list(TelemetryTable), + ct:pal("recorded events: ~p", [RecordedEvents]), + error({timeout_waiting_for_telemetry, EventName}) + end. + +assert_sync_retry_fail_then_succeed_inflight(Trace) -> + ct:pal(" ~p", [Trace]), + ?assert( + ?strict_causality( + #{?snk_kind := buffer_worker_flush_nack, ref := _Ref}, + #{?snk_kind := buffer_worker_retry_inflight_failed, ref := _Ref}, + Trace + ) + ), + %% not strict causality because it might retry more than once + %% before restoring the resource health. + ?assert( + ?causality( + #{?snk_kind := buffer_worker_retry_inflight_failed, ref := _Ref}, + #{?snk_kind := buffer_worker_retry_inflight_succeeded, ref := _Ref}, + Trace + ) + ), + ok. + +assert_async_retry_fail_then_succeed_inflight(Trace) -> + ct:pal(" ~p", [Trace]), + ?assert( + ?strict_causality( + #{?snk_kind := handle_async_reply, action := nack}, + #{?snk_kind := buffer_worker_retry_inflight_failed, ref := _Ref}, + Trace + ) + ), + %% not strict causality because it might retry more than once + %% before restoring the resource health. + ?assert( + ?causality( + #{?snk_kind := buffer_worker_retry_inflight_failed, ref := _Ref}, + #{?snk_kind := buffer_worker_retry_inflight_succeeded, ref := _Ref}, + Trace + ) + ), + ok. + +trace_between_span(Trace0, Marker) -> + {Trace1, [_ | _]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := {complete, _}}, Trace0), + {[_ | _], [_ | Trace2]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := start}, Trace1), + Trace2. + +wait_until_all_marked_as_retriable(NumExpected) when NumExpected =< 0 -> + ok; +wait_until_all_marked_as_retriable(NumExpected) -> + Seen = #{}, + do_wait_until_all_marked_as_retriable(NumExpected, Seen). + +do_wait_until_all_marked_as_retriable(NumExpected, _Seen) when NumExpected =< 0 -> + ok; +do_wait_until_all_marked_as_retriable(NumExpected, Seen) -> + Res = ?block_until( + #{?snk_kind := buffer_worker_async_agent_down, ?snk_meta := #{pid := P}} when + not is_map_key(P, Seen), + 10_000 + ), + case Res of + {timeout, Evts} -> + ct:pal("events so far:\n ~p", [Evts]), + ct:fail("timeout waiting for events"); + {ok, #{num_affected := NumAffected, ?snk_meta := #{pid := Pid}}} -> + ct:pal("affected: ~p; pid: ~p", [NumAffected, Pid]), + case NumAffected >= NumExpected of + true -> + ok; + false -> + do_wait_until_all_marked_as_retriable(NumExpected - NumAffected, Seen#{ + Pid => true + }) + end + end. diff --git a/apps/emqx_retainer/i18n/emqx_retainer_api_i18n.conf b/apps/emqx_retainer/i18n/emqx_retainer_api_i18n.conf deleted file mode 100644 index aced87076..000000000 --- a/apps/emqx_retainer/i18n/emqx_retainer_api_i18n.conf +++ /dev/null @@ -1,143 +0,0 @@ -emqx_retainer_api { - - get_config_api { - desc { - en: "View config" - zh: "查看配置内容" - } - } - - config_content { - desc { - en: "The config content" - zh: "配置内容" - } - } - - config_not_found { - desc { - en: "Config not found." - zh: "配置不存在" - } - } - - update_retainer_api { - desc { - en: "Update retainer config." - zh: "更新配置" - } - } - - update_config_success { - desc { - en: "Update configs successfully." - zh: "配置更新成功" - } - } - - update_config_failed { - desc { - en: "Update config failed" - zh: "配置更新失败" - } - } - - list_retained_api { - desc { - en: "List retained messages." - zh: "查看保留消息列表" - } - } - - retained_list { - desc { - en: "Retained messages list." - zh: "保留消息列表" - } - } - - unsupported_backend { - desc { - en: "Unsupported backend." - zh: "不支持的后端" - } - } - - lookup_api { - desc { - en: "Lookup a message by a topic without wildcards." - zh: "通过不带通配符的主题查看对应的保留消息" - } - } - - message_detail { - desc { - en: "Details of the message." - zh: "消息详情" - } - } - - message_not_exist { - desc { - en: "Viewed message doesn't exist." - zh: "消息不存在" - } - } - - delete_matching_api { - desc { - en: "Delete matching messages." - zh: "删除对应的消息" - } - } - - topic { - desc { - en: "Topic." - zh: "主题" - } - } - - msgid { - desc { - en: "Message ID." - zh: "消息 ID" - } - } - - qos { - desc { - en: "QoS." - zh: "QoS" - } - } - - publish_at { - desc { - en: "Message publish time, RFC 3339 format." - zh: "消息发送时间, RFC 3339 格式" - } - } - - from_clientid { - desc { - en: "The clientid of publisher." - zh: "发布者的 ClientID" - } - } - - from_username { - desc { - en: "The username of publisher." - zh: "发布者的用户名" - } - } - - payload { - desc { - en: "Payload." - zh: "消息内容" - } - } - -} diff --git a/apps/emqx_retainer/i18n/emqx_retainer_i18n.conf b/apps/emqx_retainer/i18n/emqx_retainer_i18n.conf deleted file mode 100644 index 1808f3fe7..000000000 --- a/apps/emqx_retainer/i18n/emqx_retainer_i18n.conf +++ /dev/null @@ -1,107 +0,0 @@ -emqx_retainer_schema { - - enable { - desc { - en: "Enable retainer feature" - zh: "是否开启消息保留功能" - } - } - - msg_expiry_interval { - desc { - en: "Message retention time. 0 means message will never be expired." - zh: "消息保留时间。0 代表永久保留" - } - } - - flow_control { - desc { - en: "Flow control." - zh: "流控设置" - } - } - - msg_clear_interval { - desc { - en: """Periodic interval for cleaning up expired messages. -Never clear if the value is 0. - """ - zh: "消息清理间隔。0 代表不进行清理" - } - } - - max_payload_size { - desc { - en: "Maximum retained message size." - zh: "消息大小最大值" - } - } - - stop_publish_clear_msg { - desc { - en: """When the retained flag of the `PUBLISH` message is set and Payload is empty, -whether to continue to publish the message. -See: -http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718038""" - zh: """是否不发送保留消息的清理消息,在 MQTT 5.0 中如果一条保留消息的消息体为空,则会清除掉之前存储 -的对应的保留消息,通过这个值控制是否停止发送清理消息""" - } - } - - backend { - desc { - en: "Settings for the database storing the retained messages." - zh: "保留消息的存储后端" - } - } - - mnesia_config_type { - desc { - en: "Backend type." - zh: "后端类型" - } - } - - mnesia_config_storage_type { - desc { - en: "Specifies whether the messages are stored in RAM or persisted on disc." - zh: "选择消息是存放在磁盘还是内存中" - } - } - - max_retained_messages { - desc { - en: "Maximum number of retained messages. 0 means no limit." - zh: "消息保留的数量上限。0 表示无限" - } - } - - batch_read_number { - desc { - en: "Size of the batch when reading messages from storage. 0 means no limit." - zh: "从存储后端批量加载时的每批数量上限,0 代表一次性读取" - } - } - - batch_deliver_number { - desc { - en: "The number of retained messages can be delivered per batch." - zh: "批量派发时每批的数量。0 代表一次性全部派发" - } - } - - batch_deliver_limiter { - desc { - en: """The rate limiter name for retained messages' delivery. -Limiter helps to avoid delivering too many messages to the client at once, which may cause the client to block or crash, or drop messages due to exceeding the size of the message queue. -The names of the available rate limiters are taken from the existing rate limiters under `limiter.batch`. -If this field is empty, limiter is not used.""" - zh: """批量发送的限流器的名称。 -限流器可以用来防止短时间内向客户端发送太多的消息,从而避免过多的消息导致客户端队列堵塞甚至崩溃。 -这个名称需要是指向 `limiter.batch` 下的一个真实存在的限流器。 -如果这个字段为空,则不使用限流器。 - """ - } - } - -} diff --git a/apps/emqx_retainer/rebar.config b/apps/emqx_retainer/rebar.config index 7e791f90f..a178e10a1 100644 --- a/apps/emqx_retainer/rebar.config +++ b/apps/emqx_retainer/rebar.config @@ -1,6 +1,9 @@ %% -*- mode: erlang -*- -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {edoc_opts, [{preprocess, true}]}. {erl_opts, [ @@ -27,7 +30,7 @@ {profiles, [ {test, [ {deps, [ - {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.5.0"}}} + {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.8.5"}}} ]} ]} ]}. diff --git a/apps/emqx_retainer/src/emqx_retainer.app.src b/apps/emqx_retainer/src/emqx_retainer.app.src index f61468d9b..7bfc8ee4e 100644 --- a/apps/emqx_retainer/src/emqx_retainer.app.src +++ b/apps/emqx_retainer/src/emqx_retainer.app.src @@ -2,10 +2,10 @@ {application, emqx_retainer, [ {description, "EMQX Retainer"}, % strict semver, bump manually! - {vsn, "5.0.8"}, + {vsn, "5.0.12"}, {modules, []}, {registered, [emqx_retainer_sup]}, - {applications, [kernel, stdlib, emqx]}, + {applications, [kernel, stdlib, emqx, emqx_ctl]}, {mod, {emqx_retainer_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_retainer/src/emqx_retainer.erl b/apps/emqx_retainer/src/emqx_retainer.erl index aa1260033..b81ea2446 100644 --- a/apps/emqx_retainer/src/emqx_retainer.erl +++ b/apps/emqx_retainer/src/emqx_retainer.erl @@ -321,16 +321,23 @@ update_config( OldConf ) -> #{ - backend := BackendCfg, + backend := #{ + type := BackendType, + storage_type := StorageType + }, msg_clear_interval := ClearInterval } = NewConf, - #{backend := OldBackendCfg} = OldConf, - - StorageType = maps:get(type, BackendCfg), - OldStrorageType = maps:get(type, OldBackendCfg), - case OldStrorageType of - StorageType -> + #{ + backend := #{ + type := OldBackendType, + storage_type := OldStorageType + } + } = OldConf, + SameBackendType = BackendType =:= OldBackendType, + SameStorageType = StorageType =:= OldStorageType, + case SameBackendType andalso SameStorageType of + true -> State#{ clear_timer := check_timer( ClearTimer, @@ -338,7 +345,7 @@ update_config( clear_expired ) }; - _ -> + false -> State2 = disable_retainer(State), enable_retainer(State2, NewConf) end. diff --git a/apps/emqx_retainer/src/emqx_retainer_api.erl b/apps/emqx_retainer/src/emqx_retainer_api.erl index fa11b00f4..7b1337140 100644 --- a/apps/emqx_retainer/src/emqx_retainer_api.erl +++ b/apps/emqx_retainer/src/emqx_retainer_api.erl @@ -166,7 +166,7 @@ config(put, #{body := Body}) -> %%------------------------------------------------------------------------------ lookup_retained(get, #{query_string := Qs}) -> Page = maps:get(<<"page">>, Qs, 1), - Limit = maps:get(<<"limit">>, Qs, emqx_mgmt:max_row_limit()), + Limit = maps:get(<<"limit">>, Qs, emqx_mgmt:default_row_limit()), {ok, Msgs} = emqx_retainer_mnesia:page_read(undefined, undefined, Page, Limit), {200, #{ data => [format_message(Msg) || Msg <- Msgs], diff --git a/apps/emqx_retainer/src/emqx_retainer_dispatcher.erl b/apps/emqx_retainer/src/emqx_retainer_dispatcher.erl index 454a65eb3..7b0a0dc2a 100644 --- a/apps/emqx_retainer/src/emqx_retainer_dispatcher.erl +++ b/apps/emqx_retainer/src/emqx_retainer_dispatcher.erl @@ -91,7 +91,7 @@ worker() -> | ignore. start_link(Pool, Id) -> gen_server:start_link( - {local, emqx_misc:proc_name(?MODULE, Id)}, + {local, emqx_utils:proc_name(?MODULE, Id)}, ?MODULE, [Pool, Id], [{hibernate_after, 1000}] @@ -156,7 +156,7 @@ handle_cast({dispatch, Context, Pid, Topic}, #{limiter := Limiter} = State) -> {ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter), {noreply, State#{limiter := Limiter2}}; handle_cast({refresh_limiter, Conf}, State) -> - BucketCfg = emqx_map_lib:deep_get([flow_control, batch_deliver_limiter], Conf, undefined), + BucketCfg = emqx_utils_maps:deep_get([flow_control, batch_deliver_limiter], Conf, undefined), {ok, Limiter} = emqx_limiter_server:connect(?APP, internal, BucketCfg), {noreply, State#{limiter := Limiter}}; handle_cast(Msg, State) -> diff --git a/apps/emqx_retainer/src/emqx_retainer_mnesia.erl b/apps/emqx_retainer/src/emqx_retainer_mnesia.erl index 69a6a877a..2137d49f2 100644 --- a/apps/emqx_retainer/src/emqx_retainer_mnesia.erl +++ b/apps/emqx_retainer/src/emqx_retainer_mnesia.erl @@ -146,7 +146,9 @@ store_retained(_, Msg = #message{topic = Topic}) -> reason => table_is_full }); false -> - do_store_retained(Msg, Tokens, ExpiryTime) + do_store_retained(Msg, Tokens, ExpiryTime), + ?tp(message_retained, #{topic => Topic}), + ok end. clear_expired(_) -> @@ -624,7 +626,7 @@ do_reindex_batch(QC, Done) -> {Status, Done + length(Topics)}. wait_dispatch_complete(Timeout) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), {Results, []} = emqx_retainer_proto_v2:wait_dispatch_complete(Nodes, Timeout), lists:all( fun(Result) -> Result =:= ok end, @@ -647,7 +649,7 @@ active_indices() -> {dirty_indices(read), dirty_indices(write)}. are_indices_updated(Indices) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_retainer_proto_v2:active_mnesia_indices(Nodes) of {Results, []} -> lists:all( diff --git a/apps/emqx_retainer/src/emqx_retainer_schema.erl b/apps/emqx_retainer/src/emqx_retainer_schema.erl index 472ecc284..74f1e2371 100644 --- a/apps/emqx_retainer/src/emqx_retainer_schema.erl +++ b/apps/emqx_retainer/src/emqx_retainer_schema.erl @@ -41,25 +41,26 @@ fields("retainer") -> sc( emqx_schema:duration_ms(), msg_expiry_interval, - "0s" + <<"0s">> )}, {msg_clear_interval, sc( emqx_schema:duration_ms(), msg_clear_interval, - "0s" + <<"0s">> )}, {flow_control, sc( ?R_REF(flow_control), flow_control, - #{} + #{}, + ?IMPORTANCE_HIDDEN )}, {max_payload_size, sc( emqx_schema:bytesize(), max_payload_size, - "1MB" + <<"1MB">> )}, {stop_publish_clear_msg, sc( @@ -125,7 +126,9 @@ desc(_) -> %% hoconsc:mk(Type, #{desc => ?DESC(DescId)}). sc(Type, DescId, Default) -> - hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}). + sc(Type, DescId, Default, ?DEFAULT_IMPORTANCE). +sc(Type, DescId, Default, Importance) -> + hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId), importance => Importance}). backend_config() -> hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}). diff --git a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl index 845f07802..c90ec6b2b 100644 --- a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl +++ b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl @@ -687,11 +687,19 @@ t_deliver_when_banned(_) -> }), timer:sleep(100), - snabbkaffe:start_trace(), - {ok, #{}, [0]} = emqtt:subscribe(C1, <<"retained/+">>, [{qos, 0}, {rh, 0}]), - timer:sleep(500), - Trace = snabbkaffe:collect_trace(), + snabbkaffe:start_trace(), + {ok, SubRef} = + snabbkaffe:subscribe( + ?match_event(#{?snk_kind := ignore_retained_message_deliver}), + _NEvents = 3, + _Timeout = 10000, + 0 + ), + + {ok, #{}, [0]} = emqtt:subscribe(C1, <<"retained/+">>, [{qos, 0}, {rh, 0}]), + + {ok, Trace} = snabbkaffe:receive_events(SubRef), ?assertEqual(3, length(?of_kind(ignore_retained_message_deliver, Trace))), snabbkaffe:stop(), emqx_banned:delete(Who), @@ -750,23 +758,22 @@ with_conf(ConfMod, Case) -> end. make_limiter_cfg(Rate) -> - Infinity = emqx_limiter_schema:infinity_value(), Client = #{ rate => Rate, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. + #{client => Client, rate => Rate, initial => 0, burst => 0}. make_limiter_json(Rate) -> Client = #{ <<"rate">> => Rate, <<"initial">> => 0, - <<"capacity">> => <<"infinity">>, + <<"burst">> => <<"0">>, <<"low_watermark">> => 0, <<"divisible">> => <<"false">>, <<"max_retry_time">> => <<"5s">>, @@ -776,5 +783,5 @@ make_limiter_json(Rate) -> <<"client">> => Client, <<"rate">> => <<"infinity">>, <<"initial">> => 0, - <<"capacity">> => <<"infinity">> + <<"burst">> => <<"0">> }. diff --git a/apps/emqx_retainer/test/emqx_retainer_api_SUITE.erl b/apps/emqx_retainer/test/emqx_retainer_api_SUITE.erl index aee6aa4e4..61eee0510 100644 --- a/apps/emqx_retainer/test/emqx_retainer_api_SUITE.erl +++ b/apps/emqx_retainer/test/emqx_retainer_api_SUITE.erl @@ -72,7 +72,7 @@ t_config(_Config) -> ), UpdateConf = fun(Enable) -> - RawConf = emqx_json:decode(ConfJson, [return_maps]), + RawConf = emqx_utils_json:decode(ConfJson, [return_maps]), UpdateJson = RawConf#{<<"enable">> := Enable}, {ok, UpdateResJson} = request_api( put, @@ -81,7 +81,7 @@ t_config(_Config) -> auth_header_(), UpdateJson ), - UpdateRawConf = emqx_json:decode(UpdateResJson, [return_maps]), + UpdateRawConf = emqx_utils_json:decode(UpdateResJson, [return_maps]), ?assertEqual(Enable, maps:get(<<"enable">>, UpdateRawConf)) end, @@ -103,11 +103,12 @@ t_messages(_) -> end, ?check_trace( - ?wait_async_action( - lists:foreach(Each, lists:seq(1, 5)), - #{?snk_kind := message_retained, topic := <<"retained/A">>}, - 500 - ), + {ok, {ok, _}} = + ?wait_async_action( + lists:foreach(Each, lists:seq(1, 5)), + #{?snk_kind := message_retained, topic := <<"retained/A">>}, + 500 + ), [] ), @@ -149,11 +150,12 @@ t_messages_page(_) -> end, ?check_trace( - ?wait_async_action( - lists:foreach(Each, lists:seq(1, 5)), - #{?snk_kind := message_retained, topic := <<"retained/A">>}, - 500 - ), + {ok, {ok, _}} = + ?wait_async_action( + lists:foreach(Each, lists:seq(1, 5)), + #{?snk_kind := message_retained, topic := <<"retained/A">>}, + 500 + ), [] ), Page = 4, @@ -219,12 +221,98 @@ t_lookup_and_delete(_) -> ok = emqtt:disconnect(C1). +t_change_storage_type(_Config) -> + Path = api_path(["mqtt", "retainer"]), + {ok, ConfJson} = request_api(get, Path), + RawConf = emqx_utils_json:decode(ConfJson, [return_maps]), + %% pre-conditions + ?assertMatch( + #{ + <<"backend">> := #{ + <<"type">> := <<"built_in_database">>, + <<"storage_type">> := <<"ram">> + }, + <<"enable">> := true + }, + RawConf + ), + ?assertEqual(ram_copies, mnesia:table_info(?TAB_INDEX_META, storage_type)), + ?assertEqual(ram_copies, mnesia:table_info(?TAB_MESSAGE, storage_type)), + ?assertEqual(ram_copies, mnesia:table_info(?TAB_INDEX, storage_type)), + %% insert some retained messages + {ok, C0} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]), + {ok, _} = emqtt:connect(C0), + ok = snabbkaffe:start_trace(), + Topic = <<"retained">>, + Payload = <<"retained">>, + {ok, {ok, _}} = + ?wait_async_action( + emqtt:publish(C0, Topic, Payload, [{qos, 0}, {retain, true}]), + #{?snk_kind := message_retained, topic := Topic}, + 500 + ), + emqtt:stop(C0), + ok = snabbkaffe:stop(), + {ok, MsgsJson0} = request_api(get, api_path(["mqtt", "retainer", "messages"])), + #{data := Msgs0, meta := _} = decode_json(MsgsJson0), + ?assertEqual(1, length(Msgs0)), + + ChangedConf = emqx_utils_maps:deep_merge( + RawConf, + #{ + <<"backend">> => + #{<<"storage_type">> => <<"disc">>} + } + ), + {ok, UpdateResJson} = request_api( + put, + Path, + [], + auth_header_(), + ChangedConf + ), + UpdatedRawConf = emqx_utils_json:decode(UpdateResJson, [return_maps]), + ?assertMatch( + #{ + <<"backend">> := #{ + <<"type">> := <<"built_in_database">>, + <<"storage_type">> := <<"disc">> + }, + <<"enable">> := true + }, + UpdatedRawConf + ), + ?assertEqual(disc_copies, mnesia:table_info(?TAB_INDEX_META, storage_type)), + ?assertEqual(disc_copies, mnesia:table_info(?TAB_MESSAGE, storage_type)), + ?assertEqual(disc_copies, mnesia:table_info(?TAB_INDEX, storage_type)), + %% keep retained messages + {ok, MsgsJson1} = request_api(get, api_path(["mqtt", "retainer", "messages"])), + #{data := Msgs1, meta := _} = decode_json(MsgsJson1), + ?assertEqual(1, length(Msgs1)), + {ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]), + {ok, _} = emqtt:connect(C1), + {ok, _, _} = emqtt:subscribe(C1, Topic), + + receive + {publish, #{topic := T, payload := P, retain := R}} -> + ?assertEqual(Payload, P), + ?assertEqual(Topic, T), + ?assert(R), + ok + after 500 -> + emqtt:stop(C1), + ct:fail("should have preserved retained messages") + end, + emqtt:stop(C1), + + ok. + %%-------------------------------------------------------------------- %% HTTP Request %%-------------------------------------------------------------------- decode_json(Data) -> - BinJson = emqx_json:decode(Data, [return_maps]), - emqx_map_lib:unsafe_atom_key_map(BinJson). + BinJson = emqx_utils_json:decode(Data, [return_maps]), + emqx_utils_maps:unsafe_atom_key_map(BinJson). %%-------------------------------------------------------------------- %% Internal funcs diff --git a/apps/emqx_rule_engine/README.md b/apps/emqx_rule_engine/README.md index 2485ff534..2c2e43db3 100644 --- a/apps/emqx_rule_engine/README.md +++ b/apps/emqx_rule_engine/README.md @@ -1,23 +1,46 @@ -# emqx-rule-engine +# Emqx Rule Engine -IoT Rule Engine +The rule engine's goal is to provide a simple and flexible way to transform and +reroute the messages coming to the EMQX broker. For example, one message +containing measurements from multiple sensors of different types can be +transformed into multiple messages. + + +## Concepts + +A rule is quite simple. A rule describes which messages it affects by +specifying a topic filter and a set of conditions that need to be met. If a +message matches the topic filter and all the conditions are met, the rule is +triggered. The rule can then transform the message and route it to a different +topic, or send it to another service (defined by an EMQX bridge). The rule +engine's message data transformation is designed to work well with structured data +such as JSON, avro, and protobuf. + + +A rule consists of the three parts **MATCH**, **TRANSFORM** and **ACTIONS** that are +described below: + +* **MATCH** - The rule's trigger condition. The rule is triggered when a message + arrives that matches the topic filter and all the specified conditions are met. +* **TRANSFORM** - The rule's data transformation. The rule can select data from the + incoming message and transform it into a new message. +* **ACTIONS** - The rule's action(s). The rule can have one or more actions. The + actions are executed when the rule is triggered. The actions can be to route + the message to a different topic, or send it to another service (defined by + an EMQX bridge). -## Concept -``` -iot rule "Rule Name" - when - match TopicFilters and Conditions - select - para1 = val1 - para2 = val2 - then - take action(#{para2 => val1, #para2 => val2}) -``` ## Architecture +The following diagram shows how the rule engine is integrated with the EMQX +message broker. Incoming messages are checked against the rules, and if a rule +matches, it is triggered with the message as input. The rule can then transform +or split the message and/or route it to a different topic, or send it to another +service (defined by an EMQX bridge). + + ``` |-----------------| Pub ---->| Message Routing |----> Sub @@ -28,11 +51,33 @@ iot rule "Rule Name" | Rule Engine | |-----------------| | | - Backends Services Bridges + Services Bridges (defined by EMQX bridges) ``` -## SQL for Rule query statement +## Domain Specific Language for Rules + +The **MATCH** and **TRANSFORM** parts of the rule are specified using a domain +specific language that looks similar to SQL. The following is an example of a +rule engine statement. The `from "topic/a"` part specifies the topic filter +(only messages to the topic `topic/a` will be considered). The `where t > 50` +part specifies the condition that needs to be met for the rule to be triggered. +The `select id, time, temperature as t` part specifies the data transformation +(the selected fields will remain in the transformed message payload). The `as +t` part specifies that the `temperature` field name is changed to `t` in the +output message. The name `t` can also be used in the where part of the rule as +an alias for `t`. + ``` -select id, time, temperature as t from "topic/a" where t > 50; +select id, time, temperature as t from "topic/a" where t > 50 ``` + + This just scratches the surface of what is possible with the rule engine. The + full documentation is available at [EMQX Rule + Engine](https://www.emqx.io/docs/en/v5.0/data-integration/rules.html). For + example, there are many built-in functions that can be used in the rule engine + language to help in doing transformations and matching. One of the [built-in + functions allows you to run JQ + queries](https://www.emqx.io/docs/en/v5.0/data-integration/rule-sql-jq.html) + which allows you to do complex transformations of the message. + diff --git a/apps/emqx_rule_engine/i18n/emqx_rule_api_schema.conf b/apps/emqx_rule_engine/i18n/emqx_rule_api_schema.conf deleted file mode 100644 index 88a5313ad..000000000 --- a/apps/emqx_rule_engine/i18n/emqx_rule_api_schema.conf +++ /dev/null @@ -1,685 +0,0 @@ -emqx_rule_api_schema { - - event_event_type { - desc { - en: "Event Type" - zh: "事件类型" - } - label: { - en: "Event Type" - zh: "事件类型" - } - } - - event_id { - desc { - en: "Message ID" - zh: "消息 ID" - } - label: { - en: "Message ID" - zh: "消息 ID" - } - } - - event_clientid { - desc { - en: "The Client ID" - zh: "客户端 ID" - } - label: { - en: "Client ID" - zh: "客户端 ID" - } - } - - event_username { - desc { - en: "The User Name" - zh: "" - } - label: { - en: "Username" - zh: "用户名" - } - } - - event_payload { - desc { - en: "The Message Payload" - zh: "消息负载" - } - label: { - en: "Message Payload" - zh: "消息负载" - } - } - - event_peerhost { - desc { - en: "The IP Address of the Peer Client" - zh: "对等客户端的 IP 地址" - } - label: { - en: "Peer IP Address" - zh: "对等客户端的 IP" - } - } - - event_topic { - desc { - en: "Message Topic" - zh: "消息主题" - } - label: { - en: "Message Topic" - zh: "消息主题" - } - } - - event_publish_received_at { - desc { - en: "The Time that this Message is Received" - zh: "消息被接受的时间" - } - label: { - en: "Message Received Time" - zh: "消息被接受的时间" - } - } - - event_qos { - desc { - en: "The Message QoS" - zh: "消息的 QoS" - } - label: { - en: "Message QoS" - zh: "消息 QoS" - } - } - - event_from_clientid { - desc { - en: "The Client ID" - zh: "事件来源客户端的 ID" - } - label: { - en: "Client ID" - zh: "客户端 ID" - } - } - - event_from_username { - desc { - en: "The User Name" - zh: "事件来源客户端的用户名" - } - label: { - en: "Username" - zh: "用户名" - } - } - - event_mountpoint { - desc { - en: "The Mountpoint" - zh: "挂载点" - } - label: { - en: "Mountpoint" - zh: "挂载点" - } - } - - event_peername { - desc { - en: "The IP Address and Port of the Peer Client" - zh: "对等客户端的 IP 地址和端口" - } - label: { - en: "IP Address And Port" - zh: "IP 地址和端口" - } - } - - event_sockname { - desc { - en: "The IP Address and Port of the Local Listener" - zh: "本地监听的 IP 地址和端口" - } - label: { - en: "IP Address And Port" - zh: "IP 地址和端口" - } - } - - event_proto_name { - desc { - en: "Protocol Name" - zh: "协议名称" - } - label: { - en: "Protocol Name" - zh: "协议名称" - } - } - - event_proto_ver { - desc { - en: "Protocol Version" - zh: "协议版本" - } - label: { - en: "Protocol Version" - zh: "协议版本" - } - } - - event_keepalive { - desc { - en: "KeepAlive" - zh: "保持连接" - } - label: { - en: "KeepAlive" - zh: "保持连接" - } - } - - event_clean_start { - desc { - en: "Clean Start" - zh: "清除会话" - } - label: { - en: "Clean Start" - zh: "清除会话" - } - } - - event_expiry_interval { - desc { - en: "Expiry Interval" - zh: "到期间隔" - } - label: { - en: "Expiry Interval" - zh: "到期间隔" - } - } - - event_is_bridge { - desc { - en: "Is Bridge" - zh: "是否桥接" - } - label: { - en: "Is Bridge" - zh: "是否桥接" - } - } - - event_connected_at { - desc { - en: "The Time that this Client is Connected" - zh: "客户端连接完成时的时刻" - } - label: { - en: "Connected Time" - zh: "连接完成时的时刻" - } - } - - event_action { - desc { - en: "Publish or Subscribe" - zh: "订阅或发布" - } - label: { - en: "Publish or Subscribe" - zh: "订阅或发布" - } - } - - event_authz_source { - desc { - en: "Cache, Plugs or Default" - zh: "缓存,插件或者默认值" - } - label: { - en: "Auth Source" - zh: "认证源" - } - } - - event_result { - desc { - en: "Allow or Deny" - zh: "允许或禁止" - } - label: { - en: "Auth Result" - zh: "认证结果" - } - } - - event_server { - desc { - en: "The IP address (or hostname) and port of the MQTT broker, in IP:Port format" - zh: "MQTT broker的 IP 地址(或主机名)和端口,采用 IP:Port 格式" - } - label: { - en: "Server IP And Port" - zh: "服务器 IP 地址和端口" - } - } - - event_dup { - desc { - en: "The DUP flag of the MQTT message" - zh: "MQTT 消息的 DUP 标志" - } - label: { - en: "DUP Flag" - zh: "DUP 标志" - } - } - - event_retain { - desc { - en: "If is a retain message" - zh: "是否是保留消息" - } - label: { - en: "Retain Message" - zh: "保留消息" - } - } - - event_ctx_dropped { - desc { - en: "The Reason for Dropping" - zh: "消息被丢弃的原因" - } - label: { - en: "Dropped Reason" - zh: "丢弃原因" - } - } - - event_ctx_disconnected_reason { - desc { - en: "The Reason for Disconnect" - zh: "断开连接的原因" - } - label: { - en: "Disconnect Reason" - zh: "断开连接原因" - } - } - - event_ctx_disconnected_da { - desc { - en: "The Time that this Client is Disconnected" - zh: "客户端断开连接的时刻" - } - label: { - en: "Disconnected Time" - zh: "客户端断开连接时刻" - } - } - - event_ctx_connack_reason_code { - desc { - en: "The reason code" - zh: "错误码" - } - label: { - en: "Reason Code" - zh: "错误码" - } - } - - rule_id { - desc { - en: "The ID of the rule" - zh: "规则的 ID " - } - label: { - en: "Rule ID" - zh: "规则 ID " - } - } - - node_node { - desc { - en: "The node name" - zh: "节点名字" - } - label: { - en: "Node Name" - zh: "节点名字" - } - } - - metrics_sql_matched { - desc { - en: "How much times the FROM clause of the SQL is matched." - zh: "SQL 的 FROM 子句匹配的次数。" - } - label: { - en: "Matched" - zh: "命中数" - } - } - - metrics_sql_matched_rate { - desc { - en: "The rate of matched, times/second" - zh: "命中速率,次/秒" - } - label: { - en: "命中速率" - zh: "Matched Rate" - } - } - - metrics_sql_matched_rate_max { - desc { - en: "The max rate of matched, times/second" - zh: "最大命中速率,次/秒" - } - label: { - en: "Max Matched Rate" - zh: "最大命中速率" - } - } - - metrics_sql_matched_rate_last5m { - desc { - en: "The average rate of matched in last 5 minutes, times/second" - zh: "5分钟平均命中速率,次/秒" - } - label: { - en: "Average Matched Rate" - zh: "平均命中速率" - } - } - - metrics_sql_passed { - desc { - en: "How much times the SQL is passed" - zh: "SQL 通过的次数" - } - label: { - en: "SQL Passed" - zh: "SQL 通过" - } - } - - metrics_sql_failed { - desc { - en: "How much times the SQL is failed" - zh: "SQL 失败的次数" - } - label: { - en: "SQL Failed" - zh: "SQL 失败" - } - } - - metrics_sql_failed_exception { - desc { - en: "How much times the SQL is failed due to exceptions. This may because of a crash when calling a SQL function, or trying to do arithmetic operation on undefined variables" - zh: "SQL 由于执行异常而失败的次数。 这可能是因为调用 SQL 函数时崩溃,或者试图对未定义的变量进行算术运算" - } - label: { - en: "SQL Exception" - zh: "SQL 执行异常" - } - } - - metrics_sql_failed_unknown { - desc { - en: "How much times the SQL is failed due to an unknown error." - zh: "由于未知错误导致 SQL 失败的次数。" - } - label: { - en: "SQL Unknown Error" - zh: "SQL 未知错误" - } - } - - metrics_actions_total { - desc { - en: "How much times the actions are called by the rule. This value may several times of 'matched', depending on the number of the actions of the rule." - zh: "规则调用输出的次数。 该值可能是“sql.matched”的几倍,具体取决于规则输出的数量。" - } - label: { - en: "Action Total" - zh: "调用输出次数" - } - } - - metrics_actions_success { - desc { - en: "How much times the rule success to call the actions." - zh: "规则成功调用输出的次数。" - } - label: { - en: "Success Action" - zh: "成功调用输出次数" - } - } - - metrics_actions_failed { - desc { - en: "How much times the rule failed to call the actions." - zh: "规则调用输出失败的次数。" - } - label: { - en: "Failed Action" - zh: "调用输出失败次数" - } - } - - metrics_actions_failed_out_of_service { - desc { - en: "How much times the rule failed to call actions due to the action is out of service. For example, a bridge is disabled or stopped." - zh: "由于输出停止服务而导致规则调用输出失败的次数。 例如,桥接被禁用或停止。" - } - label: { - en: "Fail Action" - zh: "调用输出失败次数" - } - } - - metrics_actions_failed_unknown { - desc { - en: "How much times the rule failed to call actions due to to an unknown error." - zh: "由于未知错误,规则调用输出失败的次数。" - } - label: { - en: "Fail Action" - zh: "调用输出失败次数" - } - } - - test_context { - desc { - en: "The context of the event for testing" - zh: "测试事件的上下文" - } - label: { - en: "Event Conetxt" - zh: "事件上下文" - } - } - - test_sql { - desc { - en: "The SQL of the rule for testing" - zh: "测试的 SQL" - } - label: { - en: "Test SQL" - zh: "测试 SQL" - } - } - - rs_event { - desc { - en: "The event topics" - zh: "事件主题" - } - label: { - en: "Event Topics" - zh: "事件主题" - } - } - - rs_title { - desc { - en: "The title" - zh: "标题" - } - label: { - en: "Title" - zh: "标题" - } - } - - rs_description { - desc { - en: "The description" - zh: "描述" - } - label: { - en: "Description" - zh: "描述" - } - } - - rs_columns { - desc { - en: "The columns" - zh: "列" - } - label: { - en: "Column" - zh: "列" - } - } - - rs_test_columns { - desc { - en: "The test columns" - zh: "测试列" - } - label: { - en: "Test Columns" - zh: "测试列" - } - } - - rs_sql_example { - desc { - en: "The sql_example" - zh: "SQL 例子" - } - label: { - en: "SQL Example" - zh: "SQL 例子" - } - } - - ri_metrics { - desc { - en: "The metrics of the rule" - zh: "规则的计数器" - } - label: { - en: "Rule Metrics" - zh: "规则计数器" - } - } - - ri_node_metrics { - desc { - en: "The metrics of the rule for each node" - zh: "每个节点的规则计数器" - } - label: { - en: "Each Node Rule Metrics" - zh: "每个节点规则计数器" - } - } - - ri_from { - desc { - en: "The topics of the rule" - zh: "规则指定的主题" - } - label: { - en: "Topics of Rule" - zh: "规则指定的主题" - } - } - - ri_created_at { - desc { - en: "The created time of the rule" - zh: "规则创建时间" - } - label: { - en: "Rule Create Time" - zh: "规则创建时间" - } - } - - root_rule_creation { - desc { - en: "Schema for creating rules" - zh: "用于创建规则的 Schema" - } - label: { - en: "Create Schema" - zh: "用于创建规则的 Schema" - } - } - - root_rule_info { - desc { - en: "Schema for rule info" - zh: "用于规则信息的 Schema" - } - label: { - en: "Info Schema" - zh: "用于规则信息的 Schema" - } - } - - root_rule_events { - desc { - en: "Schema for rule events" - zh: "用于事件的 Schema" - } - label: { - en: "Rule Events Schema" - zh: "用于规则事件的 Schema" - } - } - - root_rule_test { - desc { - en: "Schema for testing rules" - zh: "用于规则测试的 Schema" - } - label: { - en: "Rule Test Schema" - zh: "用于规则测试的 Schema" - } - } - -} diff --git a/apps/emqx_rule_engine/i18n/emqx_rule_engine_api.conf b/apps/emqx_rule_engine/i18n/emqx_rule_engine_api.conf deleted file mode 100644 index 39fc3186c..000000000 --- a/apps/emqx_rule_engine/i18n/emqx_rule_engine_api.conf +++ /dev/null @@ -1,151 +0,0 @@ -emqx_rule_engine_api { - - api1 { - desc { - en: "List all rules" - zh: "列出所有规则" - } - label: { - en: "List All Rules" - zh: "列出所有规则" - } - } - api1_enable { - desc { - en: "Filter enable/disable rules" - zh: "根据规则是否开启条件过滤" - } - } - - api1_from { - desc { - en: "Filter rules by from(topic), exact match" - zh: "根据规则来源 Topic 过滤, 需要完全匹配" - } - } - - api1_like_id { - desc { - en: "Filter rules by id, Substring matching" - zh: "根据规则 id 过滤, 使用子串模糊匹配" - } - } - - api1_like_from { - desc { - en: "Filter rules by from(topic), Substring matching" - zh: "根据规则来源 Topic 过滤, 使用子串模糊匹配" - } - } - - api1_like_description { - desc { - en: "Filter rules by description, Substring matching" - zh: "根据规则描述过滤, 使用子串模糊匹配" - } - } - api1_match_from { - desc { - en: "Filter rules by from(topic), Mqtt topic matching" - zh: "根据规则来源 Topic 过滤, 使用 MQTT Topic 匹配" - } - } - - api2 { - desc { - en: "Create a new rule using given Id" - zh: "通过指定 ID 创建规则" - } - label: { - en: "Create Rule By ID" - zh: "通过指定 ID 创建规则" - } - } - - api3 { - desc { - en: "List all events can be used in rules" - zh: "列出所有能被规则使用的事件" - } - label: { - en: "List All Events Can Be Used In Rule" - zh: "列出所有能被规则使用的事件" - } - } - - api4 { - desc { - en: "Get a rule by given Id" - zh: "通过 ID 查询规则" - } - label: { - en: "Get Rule" - zh: "查询规则" - } - } - - api4_1 { - desc { - en: "Get a rule's metrics by given Id" - zh: "通过给定的 Id 获得规则的指标数据" - } - label: { - en: "Get Metric" - zh: "获得指标数据" - } - } - - api5 { - desc { - en: "Update a rule by given Id to all nodes in the cluster" - zh: "通过 ID 更新集群里所有节点上的规则" - } - label: { - en: "Update Cluster Rule" - zh: "更新集群规则" - } - } - - api6 { - desc { - en: "Delete a rule by given Id from all nodes in the cluster" - zh: "通过 ID 删除集群里所有节点上的规则" - } - label: { - en: "Delete Cluster Rule" - zh: "删除集群规则" - } - } - - api7 { - desc { - en: "Reset a rule metrics" - zh: "重置规则计数" - } - label: { - en: "Reset Rule Metrics" - zh: "重置规则计数" - } - } - - api8 { - desc { - en: "Test a rule" - zh: "测试一个规则" - } - label: { - en: "Test Rule" - zh: "测试规则" - } - } - desc9 { - desc { - en: "List of rules" - zh: "列出所有规则" - } - label: { - en: "List Rules" - zh: "列出所有规则" - } - } -} diff --git a/apps/emqx_rule_engine/i18n/emqx_rule_engine_schema.conf b/apps/emqx_rule_engine/i18n/emqx_rule_engine_schema.conf deleted file mode 100644 index bc5735c67..000000000 --- a/apps/emqx_rule_engine/i18n/emqx_rule_engine_schema.conf +++ /dev/null @@ -1,397 +0,0 @@ -emqx_rule_engine_schema { - - rules_name { - desc { - en: "The name of the rule" - zh: "规则名字" - } - label: { - en: "Rule Name" - zh: "规则名字" - } - } - - rules_sql { - desc { - en: """ -SQL query to transform the messages. -Example: SELECT * FROM "test/topic" WHERE payload.x = 1 -""" - zh: """ -用于处理消息的 SQL 。 -示例:SELECT * FROM "test/topic" WHERE payload.x = 1 -""" - } - label: { - en: "Rule SQL" - zh: "规则 SQL" - } - } - - rules_actions { - desc { - en: """ -A list of actions of the rule. -An action can be a string that refers to the channel ID of an EMQX bridge, or an object -that refers to a function. -There a some built-in functions like "republish" and "console", and we also support user -provided functions in the format: "{module}:{function}". -The actions in the list are executed sequentially. -This means that if one of the action is executing slowly, all the following actions will not -be executed until it returns. -If one of the action crashed, all other actions come after it will still be executed, in the -original order. -If there's any error when running an action, there will be an error message, and the 'failure' -counter of the function action or the bridge channel will increase. -""" - zh: """ -规则的动作列表。 -动作可以是指向 EMQX bridge 的引用,也可以是一个指向函数的对象。 -我们支持一些内置函数,如“republish”和“console”,我们还支持用户提供的函数,它的格式为:“{module}:{function}”。 -列表中的动作按顺序执行。这意味着如果其中一个动作执行缓慢,则以下所有动作都不会被执行直到它返回。 -如果其中一个动作崩溃,在它之后的所有动作仍然会被按照原始顺序执行。 -如果运行动作时出现任何错误,则会出现错误消息,并且相应的计数器会增加。 -""" - } - label: { - en: "Rule Action List" - zh: "动作列表" - } - } - - rules_enable { - desc { - en: "Enable or disable the rule" - zh: "启用或禁用规则引擎" - } - label: { - en: "Enable Or Disable Rule" - zh: "启用或禁用规则引擎" - } - } - - rules_metadata { - desc { - en: "Rule metadata, do not change manually" - zh: "规则的元数据,不要手动修改" - } - label: { - en: "Rule metadata" - zh: "规则的元数据" - } - } - - rules_description { - desc { - en: "The description of the rule" - zh: "规则的描述" - } - label: { - en: "Rule Description" - zh: "规则描述" - } - } - - republish_function { - desc { - en: """Republish the message as a new MQTT message""" - zh: """将消息重新发布为新的 MQTT 消息""" - } - label: { - en: "Republish Function" - zh: "重新发布函数" - } - } - - console_function { - desc { - en: """Print the actions to the console""" - zh: "将输出打印到控制台" - } - label: { - en: "Console Function" - zh: "控制台函数" - } - } - - user_provided_function_function { - desc { - en: """ -The user provided function. Should be in the format: '{module}:{function}'. -Where {module} is the Erlang callback module and {function} is the Erlang function. - -To write your own function, checkout the function console and -republish in the source file: -apps/emqx_rule_engine/src/emqx_rule_actions.erl as an example. -""" - zh: """ -用户提供的函数。 格式应为:'{module}:{function}'。 -其中 {module} 是 Erlang 回调模块, {function} 是 Erlang 函数。 -要编写自己的函数,请检查源文件:apps/emqx_rule_engine/src/emqx_rule_actions.erl 中的示例函数 consolerepublish 。 -""" - } - label: { - en: "User Provided Function" - zh: "用户提供的函数" - } - } - - user_provided_function_args { - desc { - en: """ -The args will be passed as the 3rd argument to module:function/3, -checkout the function console and republish in the source file: -apps/emqx_rule_engine/src/emqx_rule_actions.erl as an example. -""" - zh: """ -用户提供的参数将作为函数 module:function/3 的第三个参数, -请检查源文件:apps/emqx_rule_engine/src/emqx_rule_actions.erl 中的示例函数 consolerepublish 。 -""" - } - label: { - en: "User Provided Function Args" - zh: "用户提供函数的参数" - } - } - - republish_args_topic { - desc { - en: """ -The target topic of message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -""" - zh: """ -重新发布消息的目标主题。 -允许使用带有变量的模板,请参阅“republish_args”的描述。 -""" - } - label: { - en: "Target Topic" - zh: "目标主题" - } - } - - republish_args_qos { - desc { - en: """ -The qos of the message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -Defaults to ${qos}. If variable ${qos} is not found from the selected result of the rule, -0 is used. -""" - zh: """ -要重新发布的消息的 qos。允许使用带有变量的模板,请参阅“republish_args”的描述。 -默认为 ${qos}。 如果从规则的选择结果中没有找到变量 ${qos},则使用 0。 -""" - } - label: { - en: "Message QoS" - zh: "消息 QoS 等级" - } - } - - republish_args_retain { - desc { - en: """ -The 'retain' flag of the message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -Defaults to ${retain}. If variable ${retain} is not found from the selected result -of the rule, false is used. -""" - zh: """ -要重新发布的消息的“保留”标志。允许使用带有变量的模板,请参阅“republish_args”的描述。 -默认为 ${retain}。 如果从所选结果中未找到变量 ${retain},则使用 false。 -""" - } - label: { - en: "Retain Flag" - zh: "保留消息标志" - } - } - - republish_args_payload { - desc { - en: """ -The payload of the message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -Defaults to ${payload}. If variable ${payload} is not found from the selected result -of the rule, then the string "undefined" is used. -""" - zh: """ -要重新发布的消息的有效负载。允许使用带有变量的模板,请参阅“republish_args”的描述。 -默认为 ${payload}。 如果从所选结果中未找到变量 ${payload},则使用字符串 "undefined"。 -""" - } - label: { - en: "Message Payload" - zh: "消息负载" - } - } - republish_args_user_properties { - desc { - en: """ -From which variable should the MQTT message's User-Property pairs be taken from. -The value must be a map. -You may configure it to ${pub_props.'User-Property'} or -use SELECT *,pub_props.'User-Property' as user_properties -to forward the original user properties to the republished message. -You may also call map_put function like -map_put('my-prop-name', 'my-prop-value', user_properties) as user_properties -to inject user properties. -NOTE: MQTT spec allows duplicated user property names, but EMQX Rule-Engine does not. -""" - zh: """ -指定使用哪个变量来填充 MQTT 消息的 User-Property 列表。这个变量的值必须是一个 map 类型。 -可以设置成 ${pub_props.'User-Property'} 或者 -使用 SELECT *,pub_props.'User-Property' as user_properties 来把源 MQTT 消息 -的 User-Property 列表用于填充。 -也可以使用 map_put 函数来添加新的 User-Property, -map_put('my-prop-name', 'my-prop-value', user_properties) as user_properties -注意:MQTT 协议允许一个消息中出现多次同一个 property 名,但是 EMQX 的规则引擎不允许。 -""" - } - } - - rule_engine_ignore_sys_message { - desc { - en: "When set to 'true' (default), rule-engine will ignore messages published to $SYS topics." - zh: "当设置为“true”(默认)时,规则引擎将忽略发布到 $SYS 主题的消息。" - } - label: { - en: "Ignore Sys Message" - zh: "忽略系统消息" - } - } - - rule_engine_rules { - desc { - en: """The rules""" - zh: "规则" - } - label: { - en: "Rules" - zh: "规则" - } - } - - rule_engine_jq_function_default_timeout { - desc { - en: "Default timeout for the `jq` rule engine function" - zh: "规则引擎内建函数 `jq` 默认时间限制" - } - label: { - en: "Rule engine jq function default timeout" - zh: "规则引擎 jq 函数时间限制" - } - } - - rule_engine_jq_implementation_module { - desc { - en: "The implementation module for the jq rule engine function. The two options are jq_nif and jq_port. With the jq_nif option an Erlang NIF library is used while with the jq_port option an implementation based on Erlang port programs is used. The jq_nif option (the default option) is the fastest implementation of the two but jq_port is safer as the jq programs will not execute in the same process as the Erlang VM." - zh: "jq 规则引擎功能的实现模块。可用的两个选项是 jq_nif 和 jq_port。jq_nif 使用 Erlang NIF 库访问 jq 库,而 jq_port 使用基于 Erlang Port 的实现。jq_nif 方式(默认选项)是这两个选项中最快的实现,但 jq_port 方式更安全,因为这种情况下 jq 程序不会在 Erlang VM 进程中执行。" - } - label: { - en: "JQ Implementation Module" - zh: "JQ 实现模块" - } - } - - desc_rule_engine { - desc { - en: """Configuration for the EMQX Rule Engine.""" - zh: """配置 EMQX 规则引擎。""" - } - label: { - en: "Rule Engine Configuration" - zh: "配置规则引擎" - } - } - - desc_rules { - desc { - en: """Configuration for a rule.""" - zh: """配置规则""" - } - label: { - en: "Rule Configuration" - zh: "配置规则" - } - } - - desc_builtin_action_republish { - desc { - en: """Configuration for a built-in action.""" - zh: """配置重新发布。""" - } - label: { - en: "Republish Configuration" - zh: "配置重新发布" - } - } - - desc_builtin_action_console { - desc { - en: """Configuration for a built-in action.""" - zh: """配置打印到控制台""" - } - label: { - en: "Action Console Configuration" - zh: "配置打印到控制台" - } - } - - desc_user_provided_function { - desc { - en: """Configuration for a built-in action.""" - zh: """配置用户函数""" - } - label: { - en: "User Provid Function Configuration" - zh: "配置用户函数" - } - } - - desc_republish_args { - desc { - en: """The arguments of the built-in 'republish' action.One can use variables in the args. -The variables are selected by the rule. For example, if the rule SQL is defined as following: - - SELECT clientid, qos, payload FROM "t/1" - -Then there are 3 variables available: clientid, qos and -payload. And if we've set the args to: - - { - topic = "t/${clientid}" - qos = "${qos}" - payload = "msg: ${payload}" - } - -When the rule is triggered by an MQTT message with payload = `hello`, qos = 1, -clientid = `Steve`, the rule will republish a new MQTT message to topic `t/Steve`, -payload = `msg: hello`, and `qos = 1`.""" - zh: """ -内置 'republish' 动作的参数。 -可以在参数中使用变量。 -变量是规则中选择的字段。 例如规则 SQL 定义如下: - - SELECT clientid, qos, payload FROM "t/1" - -然后有 3 个变量可用:clientidqospayload。 如果我们将参数设置为: - - { - topic = "t/${clientid}" - qos = "${qos}" - payload = "msg: ${payload}" - } - -当收到一条消息 payload = `hello`, qos = 1, clientid = `Steve` 时,将重新发布一条新的 MQTT 消息到主题 `t/Steve` -消息内容为 payload = `msg: hello`, and `qos = 1""" - } - label: { - en: "Republish Args" - zh: "重新发布参数" - } - } - -} diff --git a/apps/emqx_rule_engine/rebar.config b/apps/emqx_rule_engine/rebar.config index 110caa33d..07c53d3e3 100644 --- a/apps/emqx_rule_engine/rebar.config +++ b/apps/emqx_rule_engine/rebar.config @@ -1,7 +1,8 @@ %% -*- mode: erlang -*- {deps, [ - {emqx, {path, "../emqx"}} + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} ]}. {erl_opts, [ diff --git a/apps/emqx_rule_engine/src/emqx_rule_actions.erl b/apps/emqx_rule_engine/src/emqx_rule_actions.erl index c4a6e2e73..a9f24ddcd 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_actions.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_actions.erl @@ -175,7 +175,7 @@ safe_publish(RuleId, Topic, QoS, Flags, Payload, PubProps) -> flags = Flags, headers = #{ republish_by => RuleId, - properties => emqx_misc:pub_props_to_packet(PubProps) + properties => emqx_utils:pub_props_to_packet(PubProps) }, topic = Topic, payload = Payload, @@ -213,7 +213,7 @@ replace_simple_var(Val, _Data, _Default) -> Val. format_msg([], Selected) -> - emqx_json:encode(Selected); + emqx_utils_json:encode(Selected); format_msg(Tokens, Selected) -> emqx_plugin_libs_rule:proc_tmpl(Tokens, Selected). diff --git a/apps/emqx_rule_engine/src/emqx_rule_api_schema.erl b/apps/emqx_rule_engine/src/emqx_rule_api_schema.erl index 23c2aab50..c9926f56f 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_api_schema.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_api_schema.erl @@ -26,7 +26,7 @@ -export([roots/0, fields/1]). --type tag() :: rule_creation | rule_test. +-type tag() :: rule_creation | rule_test | rule_engine. -spec check_params(map(), tag()) -> {ok, map()} | {error, term()}. check_params(Params, Tag) -> @@ -48,12 +48,15 @@ check_params(Params, Tag) -> roots() -> [ + {"rule_engine", sc(ref("rule_engine"), #{desc => ?DESC("root_rule_engine")})}, {"rule_creation", sc(ref("rule_creation"), #{desc => ?DESC("root_rule_creation")})}, {"rule_info", sc(ref("rule_info"), #{desc => ?DESC("root_rule_info")})}, {"rule_events", sc(ref("rule_events"), #{desc => ?DESC("root_rule_events")})}, {"rule_test", sc(ref("rule_test"), #{desc => ?DESC("root_rule_test")})} ]. +fields("rule_engine") -> + emqx_rule_engine_schema:rule_engine_settings(); fields("rule_creation") -> emqx_rule_engine_schema:fields("rules"); fields("rule_info") -> diff --git a/apps/emqx_rule_engine/src/emqx_rule_date.erl b/apps/emqx_rule_engine/src/emqx_rule_date.erl index a41beb20d..aeb5d7a1b 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_date.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_date.erl @@ -88,20 +88,20 @@ parse_date(TimeUnit, Offset, FormatString, InputString) -> calendar:rfc3339_to_system_time(Str, [{unit, TimeUnit}]). mlist(R) -> - %% %H Shows hour in 24-hour format [15] + %% %H Shows hour in 24-hour format [15] [ {$H, R#result.hour}, - %% %M Displays minutes [00-59] + %% %M Displays minutes [00-59] {$M, R#result.minute}, - %% %S Displays seconds [00-59] + %% %S Displays seconds [00-59] {$S, R#result.second}, - %% %y Displays year YYYY [2021] + %% %y Displays year YYYY [2021] {$y, R#result.year}, - %% %m Displays the number of the month [01-12] + %% %m Displays the number of the month [01-12] {$m, R#result.month}, - %% %d Displays the number of the month [01-12] + %% %d Displays the number of the month [01-12] {$d, R#result.day}, - %% %Z Displays Time zone + %% %Z Displays Time zone {$Z, R#result.zone} ]. @@ -223,20 +223,20 @@ parse_zone(Input) -> mlist1() -> maps:from_list( - %% %H Shows hour in 24-hour format [15] + %% %H Shows hour in 24-hour format [15] [ {$H, fun(Input) -> parse_int_times(2, Input) end}, - %% %M Displays minutes [00-59] + %% %M Displays minutes [00-59] {$M, fun(Input) -> parse_int_times(2, Input) end}, - %% %S Displays seconds [00-59] + %% %S Displays seconds [00-59] {$S, fun(Input) -> parse_second(Input) end}, - %% %y Displays year YYYY [2021] + %% %y Displays year YYYY [2021] {$y, fun(Input) -> parse_int_times(4, Input) end}, - %% %m Displays the number of the month [01-12] + %% %m Displays the number of the month [01-12] {$m, fun(Input) -> parse_int_times(2, Input) end}, - %% %d Displays the number of the month [01-12] + %% %d Displays the number of the month [01-12] {$d, fun(Input) -> parse_int_times(2, Input) end}, - %% %Z Displays Time zone + %% %Z Displays Time zone {$Z, fun(Input) -> parse_zone(Input) end} ] ). diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src index edca35839..94a48fb35 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src @@ -2,10 +2,10 @@ {application, emqx_rule_engine, [ {description, "EMQX Rule Engine"}, % strict semver, bump manually! - {vsn, "5.0.6"}, + {vsn, "5.0.16"}, {modules, []}, {registered, [emqx_rule_engine_sup, emqx_rule_engine]}, - {applications, [kernel, stdlib, rulesql, getopt]}, + {applications, [kernel, stdlib, rulesql, getopt, emqx_ctl]}, {mod, {emqx_rule_engine_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.erl b/apps/emqx_rule_engine/src/emqx_rule_engine.erl index 0fab91389..9dd94970b 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.erl @@ -115,7 +115,7 @@ start_link() -> %%------------------------------------------------------------------------------ post_config_update(_, _Req, NewRules, OldRules, _AppEnvs) -> #{added := Added, removed := Removed, changed := Updated} = - emqx_map_lib:diff_maps(NewRules, OldRules), + emqx_utils_maps:diff_maps(NewRules, OldRules), maps_foreach( fun({Id, {_Old, New}}) -> {ok, _} = update_rule(New#{id => bin(Id)}) @@ -213,11 +213,12 @@ get_rules_with_same_event(Topic) -> ]. -spec get_rule_ids_by_action(action_name()) -> [rule_id()]. -get_rule_ids_by_action(ActionName) when is_binary(ActionName) -> +get_rule_ids_by_action(BridgeId) when is_binary(BridgeId) -> [ Id - || #{actions := Acts, id := Id} <- get_rules(), - lists:any(fun(A) -> A =:= ActionName end, Acts) + || #{actions := Acts, id := Id, from := Froms} <- get_rules(), + forwards_to_bridge(Acts, BridgeId) orelse + references_ingress_bridge(Froms, BridgeId) ]; get_rule_ids_by_action(#{function := FuncName}) when is_binary(FuncName) -> {Mod, Fun} = @@ -268,7 +269,7 @@ load_hooks_for_rule(#{from := Topics}) -> maybe_add_metrics_for_rule(Id) -> case emqx_metrics_worker:has_metrics(rule_metrics, Id) of true -> - ok; + ok = reset_metrics_for_rule(Id); false -> ok = emqx_metrics_worker:create_metrics(rule_metrics, Id, ?METRICS, ?RATE_METRICS) end. @@ -317,9 +318,9 @@ get_basic_usage_info() -> NumRules = length(EnabledRules), ReferencedBridges = lists:foldl( - fun(#{actions := Actions, from := From}, Acc) -> - BridgeIDs0 = [BridgeID || <<"$bridges/", BridgeID/binary>> <- From], - BridgeIDs1 = lists:filter(fun is_binary/1, Actions), + fun(#{actions := Actions, from := Froms}, Acc) -> + BridgeIDs0 = get_referenced_hookpoints(Froms), + BridgeIDs1 = get_egress_bridges(Actions), tally_referenced_bridges(BridgeIDs0 ++ BridgeIDs1, Acc) end, #{}, @@ -340,7 +341,10 @@ get_basic_usage_info() -> tally_referenced_bridges(BridgeIDs, Acc0) -> lists:foldl( fun(BridgeID, Acc) -> - {BridgeType, _BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeID), + {BridgeType, _BridgeName} = emqx_bridge_resource:parse_bridge_id( + BridgeID, + #{atom_name => false} + ), maps:update_with( BridgeType, fun(X) -> X + 1 end, @@ -478,3 +482,28 @@ contains_actions(Actions, Mod0, Func0) -> end, Actions ). + +forwards_to_bridge(Actions, BridgeId) -> + lists:any(fun(A) -> A =:= BridgeId end, Actions). + +references_ingress_bridge(Froms, BridgeId) -> + lists:member( + BridgeId, + [ + RefBridgeId + || From <- Froms, + {ok, RefBridgeId} <- + [emqx_bridge_resource:bridge_hookpoint_to_bridge_id(From)] + ] + ). + +get_referenced_hookpoints(Froms) -> + [ + BridgeID + || From <- Froms, + {ok, BridgeID} <- + [emqx_bridge_resource:bridge_hookpoint_to_bridge_id(From)] + ]. + +get_egress_bridges(Actions) -> + lists:filter(fun is_binary/1, Actions). diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl b/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl index 95c028a1e..d66f2c1c9 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl @@ -32,6 +32,7 @@ %% API callbacks -export([ + '/rule_engine'/2, '/rule_events'/2, '/rule_test'/2, '/rules'/2, @@ -41,7 +42,7 @@ ]). %% query callback --export([qs2ms/2, run_fuzzy_match/2, format_rule_resp/1]). +-export([qs2ms/2, run_fuzzy_match/2, format_rule_info_resp/1]). -define(ERR_BADARGS(REASON), begin R0 = err_msg(REASON), @@ -134,6 +135,7 @@ api_spec() -> paths() -> [ + "/rule_engine", "/rule_events", "/rule_test", "/rules", @@ -145,6 +147,9 @@ paths() -> error_schema(Code, Message) when is_atom(Code) -> emqx_dashboard_swagger:error_codes([Code], list_to_binary(Message)). +rule_engine_schema() -> + ref(emqx_rule_api_schema, "rule_engine"). + rule_creation_schema() -> ref(emqx_rule_api_schema, "rule_creation"). @@ -180,11 +185,11 @@ schema("/rules") -> ref(emqx_dashboard_swagger, page), ref(emqx_dashboard_swagger, limit) ], - summary => <<"List Rules">>, + summary => <<"List rules">>, responses => #{ 200 => [ - {data, mk(array(rule_info_schema()), #{desc => ?DESC("desc9")})}, + {data, mk(array(rule_info_schema()), #{desc => ?DESC("api1_resp")})}, {meta, mk(ref(emqx_dashboard_swagger, meta), #{})} ], 400 => error_schema('BAD_REQUEST', "Invalid Parameters") @@ -193,7 +198,7 @@ schema("/rules") -> post => #{ tags => [<<"rules">>], description => ?DESC("api2"), - summary => <<"Create a Rule">>, + summary => <<"Create a rule">>, 'requestBody' => rule_creation_schema(), responses => #{ 400 => error_schema('BAD_REQUEST', "Invalid Parameters"), @@ -207,7 +212,7 @@ schema("/rule_events") -> get => #{ tags => [<<"rules">>], description => ?DESC("api3"), - summary => <<"List Events">>, + summary => <<"List rule events">>, responses => #{ 200 => mk(ref(emqx_rule_api_schema, "rule_events"), #{}) } @@ -219,7 +224,7 @@ schema("/rules/:id") -> get => #{ tags => [<<"rules">>], description => ?DESC("api4"), - summary => <<"Get a Rule">>, + summary => <<"Get rule">>, parameters => param_path_id(), responses => #{ 404 => error_schema('NOT_FOUND', "Rule not found"), @@ -229,7 +234,7 @@ schema("/rules/:id") -> put => #{ tags => [<<"rules">>], description => ?DESC("api5"), - summary => <<"Update a Rule">>, + summary => <<"Update rule">>, parameters => param_path_id(), 'requestBody' => rule_creation_schema(), responses => #{ @@ -240,7 +245,7 @@ schema("/rules/:id") -> delete => #{ tags => [<<"rules">>], description => ?DESC("api6"), - summary => <<"Delete a Rule">>, + summary => <<"Delete rule">>, parameters => param_path_id(), responses => #{ 204 => <<"Delete rule successfully">> @@ -253,7 +258,7 @@ schema("/rules/:id/metrics") -> get => #{ tags => [<<"rules">>], description => ?DESC("api4_1"), - summary => <<"Get a Rule's Metrics">>, + summary => <<"Get rule metrics">>, parameters => param_path_id(), responses => #{ 404 => error_schema('NOT_FOUND', "Rule not found"), @@ -267,7 +272,7 @@ schema("/rules/:id/metrics/reset") -> put => #{ tags => [<<"rules">>], description => ?DESC("api7"), - summary => <<"Reset a Rule Metrics">>, + summary => <<"Reset rule metrics">>, parameters => param_path_id(), responses => #{ 404 => error_schema('NOT_FOUND', "Rule not found"), @@ -281,7 +286,7 @@ schema("/rule_test") -> post => #{ tags => [<<"rules">>], description => ?DESC("api8"), - summary => <<"Test a Rule">>, + summary => <<"Test a rule">>, 'requestBody' => rule_test_schema(), responses => #{ 400 => error_schema('BAD_REQUEST', "Invalid Parameters"), @@ -289,6 +294,26 @@ schema("/rule_test") -> 200 => <<"Rule Test Pass">> } } + }; +schema("/rule_engine") -> + #{ + 'operationId' => '/rule_engine', + get => #{ + tags => [<<"rules">>], + description => ?DESC("api9"), + responses => #{ + 200 => rule_engine_schema() + } + }, + put => #{ + tags => [<<"rules">>], + description => ?DESC("api10"), + 'requestBody' => rule_engine_schema(), + responses => #{ + 200 => rule_engine_schema(), + 400 => error_schema('BAD_REQUEST', "Invalid request") + } + } }. param_path_id() -> @@ -309,7 +334,7 @@ param_path_id() -> QueryString, ?RULE_QS_SCHEMA, fun ?MODULE:qs2ms/2, - fun ?MODULE:format_rule_resp/1 + fun ?MODULE:format_rule_info_resp/1 ) of {error, page_limit_invalid} -> @@ -318,7 +343,7 @@ param_path_id() -> {200, Result} end; '/rules'(post, #{body := Params0}) -> - case maps:get(<<"id">>, Params0, list_to_binary(emqx_misc:gen_id(8))) of + case maps:get(<<"id">>, Params0, list_to_binary(emqx_utils:gen_id(8))) of <<>> -> {400, #{code => 'BAD_REQUEST', message => <<"empty rule id is not allowed">>}}; Id -> @@ -331,7 +356,7 @@ param_path_id() -> case emqx_conf:update(ConfPath, Params, #{override_to => cluster}) of {ok, #{post_config_update := #{emqx_rule_engine := AllRules}}} -> [Rule] = get_one_rule(AllRules, Id), - {201, format_rule_resp(Rule)}; + {201, format_rule_info_resp(Rule)}; {error, Reason} -> ?SLOG(error, #{ msg => "create_rule_failed", @@ -362,7 +387,7 @@ param_path_id() -> '/rules/:id'(get, #{bindings := #{id := Id}}) -> case emqx_rule_engine:get_rule(Id) of {ok, Rule} -> - {200, format_rule_resp(Rule)}; + {200, format_rule_info_resp(Rule)}; not_found -> {404, #{code => 'NOT_FOUND', message => <<"Rule Id Not Found">>}} end; @@ -372,7 +397,7 @@ param_path_id() -> case emqx_conf:update(ConfPath, Params, #{override_to => cluster}) of {ok, #{post_config_update := #{emqx_rule_engine := AllRules}}} -> [Rule] = get_one_rule(AllRules, Id), - {200, format_rule_resp(Rule)}; + {200, format_rule_info_resp(Rule)}; {error, Reason} -> ?SLOG(error, #{ msg => "update_rule_failed", @@ -419,17 +444,40 @@ param_path_id() -> {404, #{code => 'NOT_FOUND', message => <<"Rule Id Not Found">>}} end. +'/rule_engine'(get, _Params) -> + {200, format_rule_engine_resp(emqx_conf:get([rule_engine]))}; +'/rule_engine'(put, #{body := Params}) -> + case rule_engine_update(Params) of + {ok, Config} -> + {200, format_rule_engine_resp(Config)}; + {error, Reason} -> + {400, #{code => 'BAD_REQUEST', message => ?ERR_BADARGS(Reason)}} + end. + %%------------------------------------------------------------------------------ %% Internal functions %%------------------------------------------------------------------------------ -err_msg(Msg) -> emqx_misc:readable_error_msg(Msg). +err_msg({RuleError, {_E, Reason, _S}}) -> + emqx_utils:readable_error_msg(encode_nested_error(RuleError, Reason)); +err_msg({Reason, _Details}) -> + emqx_utils:readable_error_msg(Reason); +err_msg(Msg) -> + emqx_utils:readable_error_msg(Msg). -format_rule_resp(Rules) when is_list(Rules) -> - [format_rule_resp(R) || R <- Rules]; -format_rule_resp({Id, Rule}) -> - format_rule_resp(Rule#{id => Id}); -format_rule_resp(#{ +encode_nested_error(RuleError, Reason) when is_tuple(Reason) -> + encode_nested_error(RuleError, element(1, Reason)); +encode_nested_error(RuleError, Reason) -> + case emqx_utils_json:safe_encode([{RuleError, Reason}]) of + {ok, Json} -> + Json; + _ -> + {RuleError, Reason} + end. + +format_rule_info_resp({Id, Rule}) -> + format_rule_info_resp(Rule#{id => Id}); +format_rule_info_resp(#{ id := Id, name := Name, created_at := CreatedAt, @@ -450,6 +498,9 @@ format_rule_resp(#{ description => Descr }. +format_rule_engine_resp(Config) -> + maps:remove(rules, Config). + format_datetime(Timestamp, Unit) -> list_to_binary(calendar:system_time_to_rfc3339(Timestamp, [{unit, Unit}])). @@ -514,7 +565,7 @@ get_rule_metrics(Id) -> end, [ Format(Node, emqx_plugin_libs_proto_v1:get_metrics(Node, rule_metrics, Id)) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ]. aggregate_metrics(AllMetrics) -> @@ -646,3 +697,14 @@ run_fuzzy_match(E = {_Id, #{from := Topics}}, [{from, like, Pattern} | Fuzzy]) - run_fuzzy_match(E, Fuzzy); run_fuzzy_match(E, [_ | Fuzzy]) -> run_fuzzy_match(E, Fuzzy). + +rule_engine_update(Params) -> + case emqx_rule_api_schema:check_params(Params, rule_engine) of + {ok, _CheckedParams} -> + {ok, #{config := Config}} = emqx_conf:update([rule_engine], Params, #{ + override_to => cluster + }), + {ok, Config}; + {error, Reason} -> + {error, Reason} + end. diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine_app.erl b/apps/emqx_rule_engine/src/emqx_rule_engine_app.erl index d7ea6c6d1..14d2b1f95 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine_app.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine_app.erl @@ -25,7 +25,7 @@ -export([stop/1]). start(_Type, _Args) -> - _ = ets:new(?RULE_TAB, [named_table, public, set, {read_concurrency, true}]), + _ = ets:new(?RULE_TAB, [named_table, public, ordered_set, {read_concurrency, true}]), ok = emqx_rule_events:reload(), SupRet = emqx_rule_engine_sup:start_link(), ok = emqx_rule_engine:load_rules(), diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl b/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl index cbe7dae82..bc8cae07a 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl @@ -23,44 +23,31 @@ -export([ namespace/0, + tags/0, roots/0, fields/1, desc/1, - post_config_update/5 + post_config_update/5, + rule_engine_settings/0 ]). -export([validate_sql/1]). namespace() -> rule_engine. -roots() -> ["rule_engine"]. +tags() -> + [<<"Rule Engine">>]. + +roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_LOW})}]. fields("rule_engine") -> - [ - {ignore_sys_message, - ?HOCON(boolean(), #{default => true, desc => ?DESC("rule_engine_ignore_sys_message")})}, - {rules, - ?HOCON(hoconsc:map("id", ?R_REF("rules")), #{ - desc => ?DESC("rule_engine_rules"), default => #{} - })}, - {jq_function_default_timeout, - ?HOCON( - emqx_schema:duration_ms(), - #{ - default => "10s", - desc => ?DESC("rule_engine_jq_function_default_timeout") - } - )}, - {jq_implementation_module, - ?HOCON( - hoconsc:enum([jq_nif, jq_port]), - #{ - default => jq_nif, - mapping => "jq.jq_implementation_module", - desc => ?DESC("rule_engine_jq_implementation_module") - } - )} - ]; + rule_engine_settings() ++ + [ + {rules, + ?HOCON(hoconsc:map("id", ?R_REF("rules")), #{ + desc => ?DESC("rule_engine_rules"), default => #{} + })} + ]; fields("rules") -> [ rule_name(), @@ -223,6 +210,31 @@ actions() -> qos() -> ?UNION([emqx_schema:qos(), binary()]). +rule_engine_settings() -> + [ + {ignore_sys_message, + ?HOCON(boolean(), #{default => true, desc => ?DESC("rule_engine_ignore_sys_message")})}, + {jq_function_default_timeout, + ?HOCON( + emqx_schema:duration_ms(), + #{ + default => <<"10s">>, + desc => ?DESC("rule_engine_jq_function_default_timeout") + } + )}, + {jq_implementation_module, + ?HOCON( + hoconsc:enum([jq_nif, jq_port]), + #{ + default => jq_nif, + mapping => "jq.jq_implementation_module", + desc => ?DESC("rule_engine_jq_implementation_module"), + deprecated => {since, "v5.0.22"}, + importance => ?IMPORTANCE_HIDDEN + } + )} + ]. + validate_sql(Sql) -> case emqx_rule_sqlparser:parse(Sql) of {ok, _Result} -> ok; diff --git a/apps/emqx_rule_engine/src/emqx_rule_events.erl b/apps/emqx_rule_engine/src/emqx_rule_events.erl index 0c962f1fa..7f14f6d8b 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_events.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_events.erl @@ -20,6 +20,7 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/emqx_hooks.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge_resource.hrl"). -export([ reload/0, @@ -1011,7 +1012,7 @@ hook_fun_name(HookPoint) -> HookFunName. %% return static function references to help static code checks -hook_fun(<<"$bridges/", _/binary>>) -> fun ?MODULE:on_bridge_message_received/2; +hook_fun(?BRIDGE_HOOKPOINT(_)) -> fun ?MODULE:on_bridge_message_received/2; hook_fun('client.connected') -> fun ?MODULE:on_client_connected/3; hook_fun('client.disconnected') -> fun ?MODULE:on_client_disconnected/4; hook_fun('client.connack') -> fun ?MODULE:on_client_connack/4; @@ -1034,7 +1035,7 @@ ntoa(undefined) -> undefined; ntoa({IpAddr, Port}) -> iolist_to_binary([inet:ntoa(IpAddr), ":", integer_to_list(Port)]); ntoa(IpAddr) -> iolist_to_binary(inet:ntoa(IpAddr)). -event_name(<<"$bridges/", _/binary>> = Bridge) -> Bridge; +event_name(?BRIDGE_HOOKPOINT(_) = Bridge) -> Bridge; event_name(<<"$events/client_connected">>) -> 'client.connected'; event_name(<<"$events/client_disconnected">>) -> 'client.disconnected'; event_name(<<"$events/client_connack">>) -> 'client.connack'; @@ -1047,7 +1048,7 @@ event_name(<<"$events/message_dropped">>) -> 'message.dropped'; event_name(<<"$events/delivery_dropped">>) -> 'delivery.dropped'; event_name(_) -> 'message.publish'. -event_topic(<<"$bridges/", _/binary>> = Bridge) -> Bridge; +event_topic(?BRIDGE_HOOKPOINT(_) = Bridge) -> Bridge; event_topic('client.connected') -> <<"$events/client_connected">>; event_topic('client.disconnected') -> <<"$events/client_disconnected">>; event_topic('client.connack') -> <<"$events/client_connack">>; diff --git a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl index b8bfeb84c..02163f95b 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl @@ -227,9 +227,20 @@ now_timestamp/1, format_date/3, format_date/4, + date_to_unix_ts/3, date_to_unix_ts/4 ]). +%% MongoDB specific date functions. These functions return a date tuple. The +%% MongoDB bridge converts such date tuples to a MongoDB date type. The +%% following functions are therefore only useful for rules with at least one +%% MongoDB action. +-export([ + mongo_date/0, + mongo_date/1, + mongo_date/2 +]). + %% Proc Dict Func -export([ proc_dict_get/1, @@ -643,10 +654,10 @@ map(Data) -> emqx_plugin_libs_rule:map(Data). bin2hexstr(Bin) when is_binary(Bin) -> - emqx_misc:bin_to_hexstr(Bin, upper). + emqx_utils:bin_to_hexstr(Bin, upper). hexstr2bin(Str) when is_binary(Str) -> - emqx_misc:hexstr_to_bin(Str). + emqx_utils:hexstr_to_bin(Str). %%------------------------------------------------------------------------------ %% NULL Funcs @@ -944,7 +955,7 @@ sha256(S) when is_binary(S) -> hash(sha256, S). hash(Type, Data) -> - emqx_misc:bin_to_hexstr(crypto:hash(Type, Data), lower). + emqx_utils:bin_to_hexstr(crypto:hash(Type, Data), lower). %%------------------------------------------------------------------------------ %% gzip Funcs @@ -987,10 +998,10 @@ base64_decode(Data) when is_binary(Data) -> base64:decode(Data). json_encode(Data) -> - emqx_json:encode(Data). + emqx_utils_json:encode(Data). json_decode(Data) -> - emqx_json:decode(Data, [return_maps]). + emqx_utils_json:decode(Data, [return_maps]). term_encode(Term) -> erlang:term_to_binary(Term). @@ -1085,6 +1096,9 @@ format_date(TimeUnit, Offset, FormatString, TimeEpoch) -> ) ). +date_to_unix_ts(TimeUnit, FormatString, InputString) -> + date_to_unix_ts(TimeUnit, "Z", FormatString, InputString). + date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) -> emqx_rule_date:parse_date( time_unit(TimeUnit), @@ -1097,26 +1111,27 @@ date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) -> %% Here the emqx_rule_funcs module acts as a proxy, forwarding %% the function handling to the worker module. %% @end -% '$handle_undefined_function'(schema_decode, [SchemaId, Data|MoreArgs]) -> -% emqx_schema_parser:decode(SchemaId, Data, MoreArgs); -% '$handle_undefined_function'(schema_decode, Args) -> -% error({args_count_error, {schema_decode, Args}}); - -% '$handle_undefined_function'(schema_encode, [SchemaId, Term|MoreArgs]) -> -% emqx_schema_parser:encode(SchemaId, Term, MoreArgs); -% '$handle_undefined_function'(schema_encode, Args) -> -% error({args_count_error, {schema_encode, Args}}); - -% '$handle_undefined_function'(sprintf, [Format|Args]) -> -% erlang:apply(fun sprintf_s/2, [Format, Args]); - -% '$handle_undefined_function'(Fun, Args) -> -% error({sql_function_not_supported, function_literal(Fun, Args)}). - +-if(?EMQX_RELEASE_EDITION == ee). +%% EE +'$handle_undefined_function'(schema_decode, [SchemaId, Data | MoreArgs]) -> + emqx_ee_schema_registry_serde:decode(SchemaId, Data, MoreArgs); +'$handle_undefined_function'(schema_decode, Args) -> + error({args_count_error, {schema_decode, Args}}); +'$handle_undefined_function'(schema_encode, [SchemaId, Term | MoreArgs]) -> + emqx_ee_schema_registry_serde:encode(SchemaId, Term, MoreArgs); +'$handle_undefined_function'(schema_encode, Args) -> + error({args_count_error, {schema_encode, Args}}); '$handle_undefined_function'(sprintf, [Format | Args]) -> erlang:apply(fun sprintf_s/2, [Format, Args]); '$handle_undefined_function'(Fun, Args) -> error({sql_function_not_supported, function_literal(Fun, Args)}). +-else. +%% CE +'$handle_undefined_function'(sprintf, [Format | Args]) -> + erlang:apply(fun sprintf_s/2, [Format, Args]); +'$handle_undefined_function'(Fun, Args) -> + error({sql_function_not_supported, function_literal(Fun, Args)}). +-endif. map_path(Key) -> {path, [{key, P} || P <- string:split(Key, ".", all)]}. @@ -1134,3 +1149,21 @@ function_literal(Fun, [FArg | Args]) when is_atom(Fun), is_list(Args) -> ) ++ ")"; function_literal(Fun, Args) -> {invalid_func, {Fun, Args}}. + +mongo_date() -> + erlang:timestamp(). + +mongo_date(MillisecondsTimestamp) -> + convert_timestamp(MillisecondsTimestamp). + +mongo_date(Timestamp, Unit) -> + InsertedTimeUnit = time_unit(Unit), + ScaledEpoch = erlang:convert_time_unit(Timestamp, InsertedTimeUnit, millisecond), + convert_timestamp(ScaledEpoch). + +convert_timestamp(MillisecondsTimestamp) -> + MicroTimestamp = MillisecondsTimestamp * 1000, + MegaSecs = MicroTimestamp div 1000_000_000_000, + Secs = MicroTimestamp div 1000_000 - MegaSecs * 1000_000, + MicroSecs = MicroTimestamp rem 1000_000, + {MegaSecs, Secs, MicroSecs}. diff --git a/apps/emqx_rule_engine/src/emqx_rule_maps.erl b/apps/emqx_rule_engine/src/emqx_rule_maps.erl index 3e0ebc72d..3dfffca46 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_maps.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_maps.erl @@ -26,9 +26,13 @@ unsafe_atom_key_map/1 ]). +-include_lib("emqx/include/emqx_placeholder.hrl"). + nested_get(Key, Data) -> nested_get(Key, Data, undefined). +nested_get({var, ?PH_VAR_THIS}, Data, _Default) -> + Data; nested_get({var, Key}, Data, Default) -> general_map_get({key, Key}, Data, Data, Default); nested_get({path, Path}, Data, Default) when is_list(Path) -> @@ -82,7 +86,7 @@ general_map_put(Key, Val, Map, OrgData) -> ). general_find(KeyOrIndex, Data, OrgData, Handler) when is_binary(Data) -> - try emqx_json:decode(Data, [return_maps]) of + try emqx_utils_json:decode(Data, [return_maps]) of Json -> general_find(KeyOrIndex, Json, OrgData, Handler) catch _:_ -> Handler(not_found) diff --git a/apps/emqx_rule_engine/src/emqx_rule_runtime.erl b/apps/emqx_rule_engine/src/emqx_rule_runtime.erl index ed6cd22de..d7412d03c 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_runtime.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_runtime.erl @@ -495,7 +495,7 @@ cache_payload(DecodedP) -> safe_decode_and_cache(MaybeJson) -> try - cache_payload(emqx_json:decode(MaybeJson, [return_maps])) + cache_payload(emqx_utils_json:decode(MaybeJson, [return_maps])) catch _:_ -> error({decode_json_failed, MaybeJson}) end. @@ -508,12 +508,12 @@ nested_put(Alias, Val, Columns0) -> emqx_rule_maps:nested_put(Alias, Val, Columns). -define(IS_RES_DOWN(R), R == stopped; R == not_connected; R == not_found). -inc_action_metrics(ok, RuleId) -> - emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.success'); inc_action_metrics({error, {recoverable_error, _}}, RuleId) -> emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.out_of_service'); inc_action_metrics(?RESOURCE_ERROR_M(R, _), RuleId) when ?IS_RES_DOWN(R) -> emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.out_of_service'); +inc_action_metrics({error, {unrecoverable_error, _}}, RuleId) -> + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed'); inc_action_metrics(R, RuleId) -> case is_ok_result(R) of false -> @@ -525,7 +525,9 @@ inc_action_metrics(R, RuleId) -> is_ok_result(ok) -> true; +is_ok_result({async_return, R}) -> + is_ok_result(R); is_ok_result(R) when is_tuple(R) -> ok == erlang:element(1, R); -is_ok_result(ok) -> +is_ok_result(_) -> false. diff --git a/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl b/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl index ff966b912..455efe389 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl @@ -40,11 +40,15 @@ test(#{sql := Sql, context := Context}) -> test_rule(Sql, Select, Context, EventTopics) end; {error, Reason} -> + ?SLOG(debug, #{ + msg => "rulesql_parse_error", + detail => Reason + }), {error, Reason} end. test_rule(Sql, Select, Context, EventTopics) -> - RuleId = iolist_to_binary(["sql_tester:", emqx_misc:gen_id(16)]), + RuleId = iolist_to_binary(["sql_tester:", emqx_utils:gen_id(16)]), ok = emqx_rule_engine:maybe_add_metrics_for_rule(RuleId), Rule = #{ id => RuleId, diff --git a/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl index c986cd365..eb253e516 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl @@ -25,6 +25,8 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +-import(emqx_common_test_helpers, [on_exit/1]). + %%-define(PROPTEST(M,F), true = proper:quickcheck(M:F())). -define(TMP_RULEID, atom_to_binary(?FUNCTION_NAME)). @@ -198,8 +200,11 @@ init_per_testcase(_TestCase, Config) -> end_per_testcase(t_events, Config) -> ets:delete(events_record_tab), - ok = delete_rule(?config(hook_points_rules, Config)); + ok = delete_rule(?config(hook_points_rules, Config)), + emqx_common_test_helpers:call_janitor(), + ok; end_per_testcase(_TestCase, _Config) -> + emqx_common_test_helpers:call_janitor(), ok. %%------------------------------------------------------------------------------ @@ -609,7 +614,9 @@ t_event_client_disconnected_normal(_Config) -> receive {publish, #{topic := T, payload := Payload}} -> ?assertEqual(RepubT, T), - ?assertMatch(#{<<"reason">> := <<"normal">>}, emqx_json:decode(Payload, [return_maps])) + ?assertMatch( + #{<<"reason">> := <<"normal">>}, emqx_utils_json:decode(Payload, [return_maps]) + ) after 1000 -> ct:fail(wait_for_repub_disconnected_normal) end, @@ -646,7 +653,9 @@ t_event_client_disconnected_kicked(_Config) -> receive {publish, #{topic := T, payload := Payload}} -> ?assertEqual(RepubT, T), - ?assertMatch(#{<<"reason">> := <<"kicked">>}, emqx_json:decode(Payload, [return_maps])) + ?assertMatch( + #{<<"reason">> := <<"kicked">>}, emqx_utils_json:decode(Payload, [return_maps]) + ) after 1000 -> ct:fail(wait_for_repub_disconnected_kicked) end, @@ -687,7 +696,7 @@ t_event_client_disconnected_discarded(_Config) -> {publish, #{topic := T, payload := Payload}} -> ?assertEqual(RepubT, T), ?assertMatch( - #{<<"reason">> := <<"discarded">>}, emqx_json:decode(Payload, [return_maps]) + #{<<"reason">> := <<"discarded">>}, emqx_utils_json:decode(Payload, [return_maps]) ) after 1000 -> ct:fail(wait_for_repub_disconnected_discarded) @@ -732,7 +741,7 @@ t_event_client_disconnected_takenover(_Config) -> {publish, #{topic := T, payload := Payload}} -> ?assertEqual(RepubT, T), ?assertMatch( - #{<<"reason">> := <<"takenover">>}, emqx_json:decode(Payload, [return_maps]) + #{<<"reason">> := <<"takenover">>}, emqx_utils_json:decode(Payload, [return_maps]) ) after 1000 -> ct:fail(wait_for_repub_disconnected_discarded) @@ -2629,6 +2638,39 @@ t_sqlparse_invalid_json(_Config) -> } ) ). + +t_sqlparse_both_string_types_in_from(_Config) -> + %% Here is an SQL select statement with both string types in the FROM clause + SqlSelect = + "select clientid, topic as tp " + "from 't/tt', \"$events/client_connected\" ", + ?assertMatch( + {ok, #{<<"clientid">> := <<"abc">>, <<"tp">> := <<"t/tt">>}}, + emqx_rule_sqltester:test( + #{ + sql => SqlSelect, + context => #{clientid => <<"abc">>, topic => <<"t/tt">>} + } + ) + ), + %% Here is an SQL foreach statement with both string types in the FROM clause + SqlForeach = + "foreach payload.sensors " + "from 't/#', \"$events/client_connected\" ", + ?assertMatch( + {ok, []}, + emqx_rule_sqltester:test( + #{ + sql => SqlForeach, + context => + #{ + payload => <<"{\"sensors\": 1}">>, + topic => <<"t/a">> + } + } + ) + ). + %%------------------------------------------------------------------------------ %% Test cases for telemetry functions %%------------------------------------------------------------------------------ @@ -2683,6 +2725,24 @@ t_get_basic_usage_info_1(_Config) -> ), ok. +t_get_rule_ids_by_action_reference_ingress_bridge(_Config) -> + BridgeId = <<"mqtt:ingress">>, + RuleId = <<"rule:ingress_bridge_referenced">>, + {ok, _} = + emqx_rule_engine:create_rule( + #{ + id => RuleId, + sql => <<"select 1 from \"$bridges/", BridgeId/binary, "\"">>, + actions => [#{function => console}] + } + ), + on_exit(fun() -> emqx_rule_engine:delete_rule(RuleId) end), + ?assertMatch( + [RuleId], + emqx_rule_engine:get_rule_ids_by_action(BridgeId) + ), + ok. + %%------------------------------------------------------------------------------ %% Internal helpers %%------------------------------------------------------------------------------ @@ -2744,7 +2804,7 @@ verify_event(EventName) -> [ begin %% verify fields can be formatted to JSON string - _ = emqx_json:encode(Fields), + _ = emqx_utils_json:encode(Fields), %% verify metadata fields verify_metadata_fields(EventName, Fields), %% verify available fields for each event name diff --git a/apps/emqx_rule_engine/test/emqx_rule_engine_api_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_engine_api_SUITE.erl index cbe43eaa6..8d7546fca 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_engine_api_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_engine_api_SUITE.erl @@ -23,6 +23,14 @@ -include_lib("common_test/include/ct.hrl"). -define(CONF_DEFAULT, <<"rule_engine {rules {}}">>). +-define(SIMPLE_RULE(NAME_SUFFIX), #{ + <<"description">> => <<"A simple rule">>, + <<"enable">> => true, + <<"actions">> => [#{<<"function">> => <<"console">>}], + <<"sql">> => <<"SELECT * from \"t/1\"">>, + <<"name">> => <<"test_rule", NAME_SUFFIX/binary>> +}). +-define(SIMPLE_RULE(ID, NAME_SUFFIX), ?SIMPLE_RULE(NAME_SUFFIX)#{<<"id">> => ID}). all() -> emqx_common_test_helpers:all(?MODULE). @@ -37,15 +45,21 @@ end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([emqx_conf, emqx_rule_engine]), ok. +init_per_testcase(t_crud_rule_api, Config) -> + meck:new(emqx_utils_json, [passthrough]), + init_per_testcase(common, Config); init_per_testcase(_, Config) -> Config. +end_per_testcase(t_crud_rule_api, Config) -> + meck:unload(emqx_utils_json), + end_per_testcase(common, Config); end_per_testcase(_, _Config) -> {200, #{data := Rules}} = emqx_rule_engine_api:'/rules'(get, #{query_string => #{}}), lists:foreach( fun(#{id := Id}) -> - emqx_rule_engine_api:'/rules/:id'( + {204} = emqx_rule_engine_api:'/rules/:id'( delete, #{bindings => #{id => Id}} ) @@ -54,45 +68,38 @@ end_per_testcase(_, _Config) -> ). t_crud_rule_api(_Config) -> - RuleID = <<"my_rule">>, - Params0 = #{ - <<"description">> => <<"A simple rule">>, - <<"enable">> => true, - <<"id">> => RuleID, - <<"actions">> => [#{<<"function">> => <<"console">>}], - <<"sql">> => <<"SELECT * from \"t/1\"">>, - <<"name">> => <<"test_rule">> - }, - {201, Rule} = emqx_rule_engine_api:'/rules'(post, #{body => Params0}), - %% if we post again with the same params, it return with 400 "rule id already exists" - ?assertMatch( - {400, #{code := _, message := _Message}}, - emqx_rule_engine_api:'/rules'(post, #{body => Params0}) - ), + RuleId = <<"my_rule">>, + Rule = simple_rule_fixture(RuleId, <<>>), + ?assertEqual(RuleId, maps:get(id, Rule)), - ?assertEqual(RuleID, maps:get(id, Rule)), {200, #{data := Rules}} = emqx_rule_engine_api:'/rules'(get, #{query_string => #{}}), ct:pal("RList : ~p", [Rules]), ?assert(length(Rules) > 0), + %% if we post again with the same id, it return with 400 "rule id already exists" + ?assertMatch( + {400, #{code := _, message := _Message}}, + emqx_rule_engine_api:'/rules'(post, #{body => ?SIMPLE_RULE(RuleId, <<"some_other">>)}) + ), + {204} = emqx_rule_engine_api:'/rules/:id/metrics/reset'(put, #{ - bindings => #{id => RuleID} + bindings => #{id => RuleId} }), - {200, Rule1} = emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleID}}), + {200, Rule1} = emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleId}}), ct:pal("RShow : ~p", [Rule1]), ?assertEqual(Rule, Rule1), - {200, Metrics} = emqx_rule_engine_api:'/rules/:id/metrics'(get, #{bindings => #{id => RuleID}}), + {200, Metrics} = emqx_rule_engine_api:'/rules/:id/metrics'(get, #{bindings => #{id => RuleId}}), ct:pal("RMetrics : ~p", [Metrics]), - ?assertMatch(#{id := RuleID, metrics := _, node_metrics := _}, Metrics), + ?assertMatch(#{id := RuleId, metrics := _, node_metrics := _}, Metrics), {200, Rule2} = emqx_rule_engine_api:'/rules/:id'(put, #{ - bindings => #{id => RuleID}, - body => Params0#{<<"sql">> => <<"select * from \"t/b\"">>} + bindings => #{id => RuleId}, + body => ?SIMPLE_RULE(RuleId)#{<<"sql">> => <<"select * from \"t/b\"">>} }), - {200, Rule3} = emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleID}}), + {200, Rule3} = emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleId}}), %ct:pal("RShow : ~p", [Rule3]), ?assertEqual(Rule3, Rule2), ?assertEqual(<<"select * from \"t/b\"">>, maps:get(sql, Rule3)), @@ -109,50 +116,77 @@ t_crud_rule_api(_Config) -> {204}, emqx_rule_engine_api:'/rules/:id'( delete, - #{bindings => #{id => RuleID}} + #{bindings => #{id => RuleId}} ) ), %ct:pal("Show After Deleted: ~p", [NotFound]), ?assertMatch( {404, #{code := _, message := _Message}}, - emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleID}}) + emqx_rule_engine_api:'/rules/:id'(get, #{bindings => #{id => RuleId}}) ), + {400, #{ + code := 'BAD_REQUEST', + message := SelectAndTransformJsonError + }} = + emqx_rule_engine_api:'/rule_test'( + post, + test_rule_params(<<"SELECT\n payload.msg\nFROM\n \"t/#\"">>, <<"{\"msg\": \"hel">>) + ), + ?assertMatch( + #{<<"select_and_transform_error">> := <<"decode_json_failed">>}, + emqx_utils_json:decode(SelectAndTransformJsonError, [return_maps]) + ), + {400, #{ + code := 'BAD_REQUEST', + message := SelectAndTransformBadArgError + }} = + emqx_rule_engine_api:'/rule_test'( + post, + test_rule_params( + <<"SELECT\n payload.msg > 1\nFROM\n \"t/#\"">>, <<"{\"msg\": \"hello\"}">> + ) + ), + ?assertMatch( + #{<<"select_and_transform_error">> := <<"badarg">>}, + emqx_utils_json:decode(SelectAndTransformBadArgError, [return_maps]) + ), + {400, #{ + code := 'BAD_REQUEST', + message := BadSqlMessage + }} = emqx_rule_engine_api:'/rule_test'( + post, + test_rule_params( + <<"BAD_SQL">>, <<"{\"msg\": \"hello\"}">> + ) + ), + ?assertMatch({match, _}, re:run(BadSqlMessage, "syntax error")), + meck:expect(emqx_utils_json, safe_encode, 1, {error, foo}), ?assertMatch( {400, #{ code := 'BAD_REQUEST', - message := <<"{select_and_transform_error,{error,{decode_json_failed,", _/binary>> + message := <<"{select_and_transform_error,badarg}">> }}, - emqx_rule_engine_api:'/rule_test'(post, test_rule_params()) + emqx_rule_engine_api:'/rule_test'( + post, + test_rule_params( + <<"SELECT\n payload.msg > 1\nFROM\n \"t/#\"">>, <<"{\"msg\": \"hello\"}">> + ) + ) ), ok. t_list_rule_api(_Config) -> - AddIds = - lists:map( - fun(Seq0) -> - Seq = integer_to_binary(Seq0), - Params = #{ - <<"description">> => <<"A simple rule">>, - <<"enable">> => true, - <<"actions">> => [#{<<"function">> => <<"console">>}], - <<"sql">> => <<"SELECT * from \"t/1\"">>, - <<"name">> => <<"test_rule", Seq/binary>> - }, - {201, #{id := Id}} = emqx_rule_engine_api:'/rules'(post, #{body => Params}), - Id - end, - lists:seq(1, 20) - ), - + AddIds = rules_fixture(20), + ct:pal("rule ids: ~p", [AddIds]), {200, #{data := Rules, meta := #{count := Count}}} = emqx_rule_engine_api:'/rules'(get, #{query_string => #{}}), ?assertEqual(20, length(AddIds)), ?assertEqual(20, length(Rules)), ?assertEqual(20, Count), - [RuleID | _] = AddIds, + [RuleId | _] = AddIds, UpdateParams = #{ <<"description">> => <<"中文的描述也能搜索"/utf8>>, <<"enable">> => false, @@ -161,7 +195,7 @@ t_list_rule_api(_Config) -> <<"name">> => <<"test_rule_update1">> }, {200, _Rule2} = emqx_rule_engine_api:'/rules/:id'(put, #{ - bindings => #{id => RuleID}, + bindings => #{id => RuleId}, body => UpdateParams }), QueryStr1 = #{query_string => #{<<"enable">> => false}}, @@ -184,24 +218,90 @@ t_list_rule_api(_Config) -> {200, Result5} = emqx_rule_engine_api:'/rules'(get, QueryStr5), ?assertEqual(maps:get(data, Result1), maps:get(data, Result5)), - QueryStr6 = #{query_string => #{<<"like_id">> => RuleID}}, + QueryStr6 = #{query_string => #{<<"like_id">> => RuleId}}, {200, Result6} = emqx_rule_engine_api:'/rules'(get, QueryStr6), ?assertEqual(maps:get(data, Result1), maps:get(data, Result6)), ok. -test_rule_params() -> +t_reset_metrics_on_disable(_Config) -> + #{id := RuleId} = simple_rule_fixture(), + + %% generate some fake metrics + emqx_metrics_worker:inc(rule_metrics, RuleId, 'matched', 10), + emqx_metrics_worker:inc(rule_metrics, RuleId, 'passed', 10), + {200, #{metrics := Metrics0}} = emqx_rule_engine_api:'/rules/:id/metrics'( + get, + #{bindings => #{id => RuleId}} + ), + ?assertMatch(#{passed := 10, matched := 10}, Metrics0), + + %% disable the rule; metrics should be reset + {200, _Rule2} = emqx_rule_engine_api:'/rules/:id'(put, #{ + bindings => #{id => RuleId}, + body => #{<<"enable">> => false} + }), + + {200, #{metrics := Metrics1}} = emqx_rule_engine_api:'/rules/:id/metrics'( + get, + #{bindings => #{id => RuleId}} + ), + ?assertMatch(#{passed := 0, matched := 0}, Metrics1), + ok. + +test_rule_params(Sql, Payload) -> #{ body => #{ <<"context">> => #{ <<"clientid">> => <<"c_emqx">>, <<"event_type">> => <<"message_publish">>, - <<"payload">> => <<"{\"msg\": \"hel">>, + <<"payload">> => Payload, <<"qos">> => 1, <<"topic">> => <<"t/a">>, <<"username">> => <<"u_emqx">> }, - <<"sql">> => - <<"SELECT\n payload.msg\nFROM\n \"t/#\"">> + <<"sql">> => Sql } }. + +t_rule_engine(_) -> + _ = simple_rule_fixture(), + {200, Config} = emqx_rule_engine_api:'/rule_engine'(get, #{}), + ?assert(not maps:is_key(rules, Config)), + {200, #{ + jq_function_default_timeout := 12000 + % hidden! jq_implementation_module := jq_port + }} = emqx_rule_engine_api:'/rule_engine'(put, #{ + body => #{ + <<"jq_function_default_timeout">> => <<"12s">>, + <<"jq_implementation_module">> => <<"jq_port">> + } + }), + SomeRule = #{<<"sql">> => <<"SELECT * FROM \"t/#\"">>}, + {400, _} = emqx_rule_engine_api:'/rule_engine'(put, #{ + body => #{<<"rules">> => #{<<"some_rule">> => SomeRule}} + }), + {400, _} = emqx_rule_engine_api:'/rule_engine'(put, #{body => #{<<"something">> => <<"weird">>}}). + +rules_fixture(N) -> + lists:map( + fun(Seq0) -> + Seq = integer_to_binary(Seq0), + #{id := Id} = simple_rule_fixture(Seq), + Id + end, + lists:seq(1, N) + ). + +simple_rule_fixture() -> + simple_rule_fixture(<<>>). + +simple_rule_fixture(NameSuffix) -> + create_rule(?SIMPLE_RULE(NameSuffix)). + +simple_rule_fixture(Id, NameSuffix) -> + create_rule(?SIMPLE_RULE(Id, NameSuffix)). + +create_rule(Params) -> + {201, Rule} = emqx_rule_engine_api:'/rules'(post, #{body => Params}), + Rule. diff --git a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl index 5d78f5e4a..d88637312 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl @@ -28,7 +28,7 @@ init_per_suite(Config) -> application:load(emqx_conf), - ConfigConf = <<"rule_engine {jq_function_default_timeout {}}">>, + ConfigConf = <<"rule_engine {jq_function_default_timeout=10s}">>, ok = emqx_common_test_helpers:load_config(emqx_rule_engine_schema, ConfigConf), Config. @@ -686,25 +686,10 @@ t_jq(_) -> %% Got timeout as expected got_timeout end, - ConfigRootKey = emqx_rule_engine_schema:namespace(), - DefaultTimeOut = emqx_config:get([ - ConfigRootKey, - jq_function_default_timeout - ]), - case DefaultTimeOut =< 15000 of - true -> - got_timeout = - try - apply_func(jq, [TOProgram, <<"-2">>]) - catch - throw:{jq_exception, {timeout, _}} -> - %% Got timeout as expected - got_timeout - end; - false -> - %% Skip test as we don't want it to take to long time to run - ok - end. + ?assertThrow( + {jq_exception, {timeout, _}}, + apply_func(jq, [TOProgram, <<"-2">>]) + ). ascii_string() -> list(range(0, 127)). @@ -973,7 +958,7 @@ prop_format_date_fun() -> Args1 = [<<"second">>, <<"+07:00">>, <<"%m--%d--%y---%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -989,7 +974,7 @@ prop_format_date_fun() -> Args2 = [<<"millisecond">>, <<"+04:00">>, <<"--%m--%d--%y---%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(millisecond), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -1005,7 +990,7 @@ prop_format_date_fun() -> Args = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -1017,6 +1002,24 @@ prop_format_date_fun() -> ) ] ) + ), + %% When no offset is specified, the offset should be taken from the formatted time string + ArgsNoOffset = [<<"second">>, <<"%y-%m-%d-%H:%M:%S%Z">>], + ArgsOffset = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], + ?FORALL( + S, + range(0, 4000000000), + S == + apply_func( + date_to_unix_ts, + ArgsNoOffset ++ + [ + apply_func( + format_date, + ArgsOffset ++ [S] + ) + ] + ) ). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_slow_subs/README.md b/apps/emqx_slow_subs/README.md new file mode 100644 index 000000000..8b83508c2 --- /dev/null +++ b/apps/emqx_slow_subs/README.md @@ -0,0 +1,47 @@ +# EMQX Slow Subscriptions + +This application can calculate the latency (time spent) of the message to be processed and transmitted since it arrives at EMQX. + +If the latency exceeds a specified threshold, this application will add the subscriber and topic information to a slow subscriptions list or update the existing record. + +More introduction: [Slow Subscriptions](https://www.emqx.io/docs/en/v5.0/observability/slow-subscribers-statistics.html) + +# Usage + +You can add the below section into `emqx.conf` to enable this application + +```yaml +slow_subs { + enable = true + threshold = "500ms" + expire_interval = "300s" + top_k_num = 10 + stats_type = whole +} +``` + +# Configurations + +**threshold**: Latency threshold for statistics, only messages with latency exceeding this value will be collected. + +Minimum value: 100ms +Default value: 500ms + +**expire_interval**: Eviction time of the record, will start the counting since the creation of the record, and the records that are not triggered again within this specified period will be removed from the rank list. + +Default: 300s + +**top_k_num**: Maximum number of records in the slow subscription statistics record table. + +Maximum value: 1,000 +Default value: 10 + +**stats_type**: Calculation methods of the latency, which are +- **whole**: From the time the message arrives at EMQX until the message transmission completes +- **internal**: From when the message arrives at EMQX until when EMQX starts delivering the message +- **response**: From the time EMQX starts delivering the message until the message transmission completes + +Default value: whole + +# Contributing +Please see our [contributing.md](../../CONTRIBUTING.md). diff --git a/apps/emqx_slow_subs/i18n/emqx_slow_subs_api_i18n.conf b/apps/emqx_slow_subs/i18n/emqx_slow_subs_api_i18n.conf deleted file mode 100644 index 92862bc98..000000000 --- a/apps/emqx_slow_subs/i18n/emqx_slow_subs_api_i18n.conf +++ /dev/null @@ -1,66 +0,0 @@ -emqx_slow_subs_api { - - clear_records_api { - desc { - en: "Clear current data and re count slow topic" - zh: "清除当前记录,然后重新开始统计" - } - } - - get_records_api { - desc { - en: "View slow topics statistics record data" - zh: "查看慢订阅的统计数据" - } - } - - get_setting_api { - desc { - en: "View slow subs settings" - zh: "查看配置" - } - } - - update_setting_api { - desc { - en: "Update slow subs settings" - zh: "更新配置" - } - } - - clientid { - desc { - en: "Message clientid" - zh: "消息的客户端 ID" - } - } - - node { - desc { - en: "Message node name" - zh: "消息的节点名称" - } - } - - topic { - desc { - en: "Message topic" - zh: "消息的主题" - } - } - - timespan { - desc { - en: "Timespan for message transmission" - zh: "消息的传输耗时" - } - } - - last_update_time { - desc { - en: "The timestamp of last update" - zh: "记录的更新时间戳" - } - } - -} diff --git a/apps/emqx_slow_subs/i18n/emqx_slow_subs_i18n.conf b/apps/emqx_slow_subs/i18n/emqx_slow_subs_i18n.conf deleted file mode 100644 index e65e802c2..000000000 --- a/apps/emqx_slow_subs/i18n/emqx_slow_subs_i18n.conf +++ /dev/null @@ -1,38 +0,0 @@ -emqx_slow_subs_schema { - - enable { - desc { - en: "Enable this feature" - zh: "开启慢订阅" - } - } - - threshold { - desc { - en: "The latency threshold for statistics" - zh: "慢订阅统计的阈值" - } - } - - expire_interval { - desc { - en: "The eviction time of the record, which in the statistics record table" - zh: "慢订阅记录的有效时间" - } - } - - top_k_num { - desc { - en: "The maximum number of records in the slow subscription statistics record table" - zh: "慢订阅统计表的记录数量上限" - } - } - - stats_type { - desc { - en: "The method to calculate the latency" - zh: "慢订阅的统计类型" - } - } - -} diff --git a/apps/emqx_slow_subs/rebar.config b/apps/emqx_slow_subs/rebar.config index 9f17b7657..dee2902a5 100644 --- a/apps/emqx_slow_subs/rebar.config +++ b/apps/emqx_slow_subs/rebar.config @@ -1,5 +1,8 @@ %% -*- mode: erlang -*- -{deps, [{emqx, {path, "../emqx"}}]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_slow_subs/src/emqx_slow_subs.app.src b/apps/emqx_slow_subs/src/emqx_slow_subs.app.src index 866655b61..a06ff2595 100644 --- a/apps/emqx_slow_subs/src/emqx_slow_subs.app.src +++ b/apps/emqx_slow_subs/src/emqx_slow_subs.app.src @@ -1,7 +1,7 @@ {application, emqx_slow_subs, [ {description, "EMQX Slow Subscribers Statistics"}, % strict semver, bump manually! - {vsn, "1.0.2"}, + {vsn, "1.0.5"}, {modules, []}, {registered, [emqx_slow_subs_sup]}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl b/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl index 8ebdd50c3..311bcf62e 100644 --- a/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl +++ b/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl @@ -147,5 +147,5 @@ settings(put, #{body := Body}) -> end. rpc_call(Fun) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), Fun(Nodes). diff --git a/apps/emqx_slow_subs/src/emqx_slow_subs_schema.erl b/apps/emqx_slow_subs/src/emqx_slow_subs_schema.erl index 8ae015ae4..47ea18c3c 100644 --- a/apps/emqx_slow_subs/src/emqx_slow_subs_schema.erl +++ b/apps/emqx_slow_subs/src/emqx_slow_subs_schema.erl @@ -22,7 +22,8 @@ namespace() -> "slow_subs". -roots() -> ["slow_subs"]. +roots() -> + [{"slow_subs", ?HOCON(?R_REF("slow_subs"), #{importance => ?IMPORTANCE_HIDDEN})}]. fields("slow_subs") -> [ @@ -30,13 +31,13 @@ fields("slow_subs") -> {threshold, sc( emqx_schema:duration_ms(), - "500ms", + <<"500ms">>, threshold )}, {expire_interval, sc( emqx_schema:duration_ms(), - "300s", + <<"300s">>, expire_interval )}, {top_k_num, diff --git a/apps/emqx_slow_subs/test/emqx_slow_subs_api_SUITE.erl b/apps/emqx_slow_subs/test/emqx_slow_subs_api_SUITE.erl index 5b5ed063f..5196868c7 100644 --- a/apps/emqx_slow_subs/test/emqx_slow_subs_api_SUITE.erl +++ b/apps/emqx_slow_subs/test/emqx_slow_subs_api_SUITE.erl @@ -108,7 +108,7 @@ t_get_history(_) -> "page=1&limit=10", auth_header_() ), - #{<<"data">> := [First | _]} = emqx_json:decode(Data, [return_maps]), + #{<<"data">> := [First | _]} = emqx_utils_json:decode(Data, [return_maps]), ?assertMatch( #{ @@ -165,8 +165,8 @@ t_settting(_) -> ?assertEqual(Conf2#{stats_type := <<"internal">>}, GetReturn). decode_json(Data) -> - BinJosn = emqx_json:decode(Data, [return_maps]), - emqx_map_lib:unsafe_atom_key_map(BinJosn). + BinJosn = emqx_utils_json:decode(Data, [return_maps]), + emqx_utils_maps:unsafe_atom_key_map(BinJosn). request_api(Method, Url, Auth) -> request_api(Method, Url, [], Auth, []). @@ -187,7 +187,7 @@ request_api(Method, Url, QueryParams, Auth, Body) -> "" -> Url; _ -> Url ++ "?" ++ QueryParams end, - do_request_api(Method, {NewUrl, [Auth], "application/json", emqx_json:encode(Body)}). + do_request_api(Method, {NewUrl, [Auth], "application/json", emqx_utils_json:encode(Body)}). do_request_api(Method, Request) -> ct:pal("Method: ~p, Request: ~p", [Method, Request]), @@ -203,13 +203,7 @@ do_request_api(Method, Request) -> end. auth_header_() -> - AppId = <<"admin">>, - AppSecret = <<"public">>, - auth_header_(binary_to_list(AppId), binary_to_list(AppSecret)). - -auth_header_(User, Pass) -> - Encoded = base64:encode_to_string(lists:append([User, ":", Pass])), - {"Authorization", "Basic " ++ Encoded}. + emqx_mgmt_api_test_util:auth_header_(). api_path(Parts) -> ?HOST ++ filename:join([?BASE_PATH, ?API_VERSION] ++ Parts). diff --git a/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf b/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf deleted file mode 100644 index 2721188bd..000000000 --- a/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf +++ /dev/null @@ -1,16 +0,0 @@ -emqx_statsd_api { - - get_statsd_config_api { - desc { - en: """List the configuration of StatsD metrics collection and push service.""" - zh: """列出 StatsD 指标采集和推送服务的的配置。""" - } - } - - update_statsd_config_api { - desc { - en: """Update the configuration of StatsD metrics collection and push service.""" - zh: """更新 StatsD 指标采集和推送服务的配置。""" - } - } -} diff --git a/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf b/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf deleted file mode 100644 index 46d654a46..000000000 --- a/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf +++ /dev/null @@ -1,61 +0,0 @@ -emqx_statsd_schema { - - get_statsd_config_api { - desc { - en: """List the configuration of StatsD metrics collection and push service.""" - zh: """列出 StatsD 指标采集和推送服务的的配置。""" - } - } - - update_statsd_config_api { - desc { - en: """Update the configuration of StatsD metrics collection and push service.""" - zh: """更新 StatsD 指标采集和推送服务的配置。""" - } - } - - statsd { - desc { - en: """StatsD metrics collection and push configuration.""" - zh: """StatsD 指标采集与推送配置。""" - } - label { - en: """StatsD""" - zh: """StatsD""" - } - } - - server { - desc { - en: """StatsD server address.""" - zh: """StatsD 服务器地址。""" - } - } - - sample_interval { - desc { - en: """The sampling interval for metrics.""" - zh: """指标的采样间隔。""" - } - } - - flush_interval { - desc { - en: """The push interval for metrics.""" - zh: """指标的推送间隔。""" - } - } - tags { - desc { - en: """The tags for metrics.""" - zh: """指标的标签。""" - } - } - - enable { - desc { - en: """Enable or disable StatsD metrics collection and push service.""" - zh: """启用或禁用 StatsD 指标采集和推送服务。""" - } - } -} diff --git a/apps/emqx_statsd/rebar.config b/apps/emqx_statsd/rebar.config index bb9a14272..a1383d920 100644 --- a/apps/emqx_statsd/rebar.config +++ b/apps/emqx_statsd/rebar.config @@ -3,6 +3,7 @@ {erl_opts, [debug_info]}. {deps, [ {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, {estatsd, {git, "https://github.com/emqx/estatsd", {tag, "0.1.0"}}} ]}. diff --git a/apps/emqx_statsd/src/emqx_statsd.app.src b/apps/emqx_statsd/src/emqx_statsd.app.src index 638c5a33b..87fc8c596 100644 --- a/apps/emqx_statsd/src/emqx_statsd.app.src +++ b/apps/emqx_statsd/src/emqx_statsd.app.src @@ -1,14 +1,15 @@ %% -*- mode: erlang -*- {application, emqx_statsd, [ {description, "EMQX Statsd"}, - {vsn, "5.0.4"}, + {vsn, "5.0.9"}, {registered, []}, {mod, {emqx_statsd_app, []}}, {applications, [ kernel, stdlib, estatsd, - emqx + emqx, + emqx_management ]}, {env, []}, {modules, []}, diff --git a/apps/emqx_statsd/src/emqx_statsd.erl b/apps/emqx_statsd/src/emqx_statsd.erl index defaf78e0..b2d726b07 100644 --- a/apps/emqx_statsd/src/emqx_statsd.erl +++ b/apps/emqx_statsd/src/emqx_statsd.erl @@ -38,7 +38,7 @@ ]). %% Interface --export([start_link/0]). +-export([start_link/1]). %% Internal Exports -export([ @@ -53,9 +53,9 @@ -define(SAMPLE_TIMEOUT, sample_timeout). %% Remove after 5.1.x -start() -> check_multicall_result(emqx_statsd_proto_v1:start(mria_mnesia:running_nodes())). -stop() -> check_multicall_result(emqx_statsd_proto_v1:stop(mria_mnesia:running_nodes())). -restart() -> check_multicall_result(emqx_statsd_proto_v1:restart(mria_mnesia:running_nodes())). +start() -> check_multicall_result(emqx_statsd_proto_v1:start(mria:running_nodes())). +stop() -> check_multicall_result(emqx_statsd_proto_v1:stop(mria:running_nodes())). +restart() -> check_multicall_result(emqx_statsd_proto_v1:restart(mria:running_nodes())). do_start() -> emqx_statsd_sup:ensure_child_started(?APP). @@ -68,25 +68,26 @@ do_restart() -> ok = do_start(), ok. -start_link() -> - gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). +start_link(Conf) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, Conf, []). -init([]) -> +init(Conf) -> process_flag(trap_exit, true), #{ tags := TagsRaw, server := Server, sample_time_interval := SampleTimeInterval, flush_time_interval := FlushTimeInterval - } = emqx_conf:get([statsd]), - {Host, Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), + } = Conf, + FlushTimeInterval1 = flush_interval(FlushTimeInterval, SampleTimeInterval), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), Tags = maps:fold(fun(K, V, Acc) -> [{to_bin(K), to_bin(V)} | Acc] end, [], TagsRaw), Opts = [{tags, Tags}, {host, Host}, {port, Port}, {prefix, <<"emqx">>}], {ok, Pid} = estatsd:start_link(Opts), {ok, ensure_timer(#{ sample_time_interval => SampleTimeInterval, - flush_time_interval => FlushTimeInterval, + flush_time_interval => FlushTimeInterval1, estatsd_pid => Pid })}. @@ -105,7 +106,7 @@ handle_info( timer := Ref } ) -> - Metrics = emqx_metrics:all() ++ emqx_stats:getstats() ++ emqx_vm_data(), + Metrics = emqx_metrics:all() ++ emqx_stats:getstats() ++ emqx_mgmt:vm_stats(), SampleRate = SampleTimeInterval / FlushTimeInterval, StatsdMetrics = [ {gauge, Name, Value, SampleRate, []} @@ -129,22 +130,21 @@ terminate(_Reason, #{estatsd_pid := Pid}) -> %% Internal function %%------------------------------------------------------------------------------ -emqx_vm_data() -> - Idle = - case cpu_sup:util([detailed]) of - %% Not support for Windows - {_, 0, 0, _} -> 0; - {_Num, _Use, IdleList, _} -> proplists:get_value(idle, IdleList, 0) - end, - RunQueue = erlang:statistics(run_queue), - [ - {run_queue, RunQueue}, - {cpu_idle, Idle}, - {cpu_use, 100 - Idle} - ] ++ emqx_vm:mem_info(). +flush_interval(FlushInterval, SampleInterval) when FlushInterval >= SampleInterval -> + FlushInterval; +flush_interval(_FlushInterval, SampleInterval) -> + ?SLOG( + warning, + #{ + msg => + "Configured flush_time_interval is lower than sample_time_interval, " + "setting: flush_time_interval = sample_time_interval." + } + ), + SampleInterval. ensure_timer(State = #{sample_time_interval := SampleTimeInterval}) -> - State#{timer => emqx_misc:start_timer(SampleTimeInterval, ?SAMPLE_TIMEOUT)}. + State#{timer => emqx_utils:start_timer(SampleTimeInterval, ?SAMPLE_TIMEOUT)}. check_multicall_result({Results, []}) -> case diff --git a/apps/emqx_statsd/src/emqx_statsd_api.erl b/apps/emqx_statsd/src/emqx_statsd_api.erl index b1b3601aa..4ee144d57 100644 --- a/apps/emqx_statsd/src/emqx_statsd_api.erl +++ b/apps/emqx_statsd/src/emqx_statsd_api.erl @@ -49,6 +49,7 @@ schema("/statsd") -> 'operationId' => statsd, get => #{ + deprecated => true, description => ?DESC(get_statsd_config_api), tags => ?API_TAG_STATSD, responses => @@ -56,6 +57,7 @@ schema("/statsd") -> }, put => #{ + deprecated => true, description => ?DESC(update_statsd_config_api), tags => ?API_TAG_STATSD, 'requestBody' => statsd_config_schema(), @@ -77,9 +79,9 @@ statsd_config_schema() -> statsd_example() -> #{ enable => true, - flush_time_interval => "30s", - sample_time_interval => "30s", - server => "127.0.0.1:8125", + flush_time_interval => <<"30s">>, + sample_time_interval => <<"30s">>, + server => <<"127.0.0.1:8125">>, tags => #{} }. diff --git a/apps/emqx_statsd/src/emqx_statsd_config.erl b/apps/emqx_statsd/src/emqx_statsd_config.erl index b818d2691..6bc430956 100644 --- a/apps/emqx_statsd/src/emqx_statsd_config.erl +++ b/apps/emqx_statsd/src/emqx_statsd_config.erl @@ -45,9 +45,9 @@ remove_handler() -> ok = emqx_config_handler:remove_handler(?STATSD), ok. -post_config_update(?STATSD, _Req, #{enable := true}, _Old, _AppEnvs) -> +post_config_update(?STATSD, _Req, #{enable := true} = New, _Old, _AppEnvs) -> emqx_statsd_sup:ensure_child_stopped(?APP), - emqx_statsd_sup:ensure_child_started(?APP); + emqx_statsd_sup:ensure_child_started(?APP, New); post_config_update(?STATSD, _Req, #{enable := false}, _Old, _AppEnvs) -> emqx_statsd_sup:ensure_child_stopped(?APP); post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) -> diff --git a/apps/emqx_statsd/src/emqx_statsd_schema.erl b/apps/emqx_statsd/src/emqx_statsd_schema.erl index 1e5aa6e5f..01decc6f7 100644 --- a/apps/emqx_statsd/src/emqx_statsd_schema.erl +++ b/apps/emqx_statsd/src/emqx_statsd_schema.erl @@ -32,7 +32,8 @@ namespace() -> "statsd". -roots() -> ["statsd"]. +roots() -> + [{"statsd", hoconsc:mk(hoconsc:ref(?MODULE, "statsd"), #{importance => ?IMPORTANCE_HIDDEN})}]. fields("statsd") -> [ @@ -61,12 +62,12 @@ server() -> emqx_schema:servers_sc(Meta, ?SERVER_PARSE_OPTS). sample_interval(type) -> emqx_schema:duration_ms(); -sample_interval(default) -> "30s"; +sample_interval(default) -> <<"30s">>; sample_interval(desc) -> ?DESC(?FUNCTION_NAME); sample_interval(_) -> undefined. flush_interval(type) -> emqx_schema:duration_ms(); -flush_interval(default) -> "30s"; +flush_interval(default) -> <<"30s">>; flush_interval(desc) -> ?DESC(?FUNCTION_NAME); flush_interval(_) -> undefined. diff --git a/apps/emqx_statsd/src/emqx_statsd_sup.erl b/apps/emqx_statsd/src/emqx_statsd_sup.erl index 2845fb505..35c1d332c 100644 --- a/apps/emqx_statsd/src/emqx_statsd_sup.erl +++ b/apps/emqx_statsd/src/emqx_statsd_sup.erl @@ -25,6 +25,7 @@ -export([ start_link/0, ensure_child_started/1, + ensure_child_started/2, ensure_child_stopped/1 ]). @@ -45,7 +46,11 @@ start_link() -> -spec ensure_child_started(atom()) -> ok. ensure_child_started(Mod) when is_atom(Mod) -> - assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, []))). + ensure_child_started(Mod, emqx_conf:get([statsd], #{})). + +-spec ensure_child_started(atom(), map()) -> ok. +ensure_child_started(Mod, Conf) when is_atom(Mod) -> + assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, [Conf]))). %% @doc Stop the child worker process. -spec ensure_child_stopped(any()) -> ok. @@ -61,9 +66,9 @@ ensure_child_stopped(ChildId) -> init([]) -> Children = - case emqx_conf:get([statsd, enable], false) of - true -> [?CHILD(emqx_statsd, [])]; - false -> [] + case emqx_conf:get([statsd], #{}) of + #{enable := true} = Conf -> [?CHILD(emqx_statsd, [Conf])]; + _ -> [] end, {ok, {{one_for_one, 100, 3600}, Children}}. diff --git a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl index 2f8fa5a69..8b8709c27 100644 --- a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl +++ b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl @@ -33,15 +33,33 @@ "tags {\"t1\" = \"good\", test = 100}\n" "}\n" >>). +-define(BAD_CONF, << + "\n" + "statsd {\n" + "enable = true\n" + "flush_time_interval = 4s\n" + "sample_time_interval = 4s\n" + "server = \"\"\n" + "tags {\"t1\" = \"good\", test = 100}\n" + "}\n" +>>). + +-define(DEFAULT_CONF, << + "\n" + "statsd {\n" + "enable = true\n" + "flush_time_interval = 4s\n" + "sample_time_interval = 4s\n" + "tags {\"t1\" = \"good\", test = 100}\n" + "}\n" +>>). init_per_suite(Config) -> emqx_common_test_helpers:start_apps( [emqx_conf, emqx_dashboard, emqx_statsd], fun set_special_configs/1 ), - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF), Config. end_per_suite(_Config) -> @@ -55,6 +73,29 @@ set_special_configs(_) -> all() -> emqx_common_test_helpers:all(?MODULE). +t_server_validator(_) -> + Server0 = emqx_conf:get_raw([statsd, server]), + ?assertThrow( + #{ + kind := validation_error, + path := "statsd.server", + reason := "cannot_be_empty", + value := "" + }, + emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BAD_CONF) + ), + %% default + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?DEFAULT_CONF), + DefaultServer = default_server(), + ?assertEqual(DefaultServer, emqx_conf:get_raw([statsd, server])), + DefaultServerStr = binary_to_list(DefaultServer), + ?assertEqual(DefaultServerStr, emqx_conf:get([statsd, server])), + %% recover + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF), + Server2 = emqx_conf:get_raw([statsd, server]), + ?assertMatch(Server0, Server2), + ok. + t_statsd(_) -> {ok, Socket} = gen_udp:open(8126, [{active, true}]), receive @@ -113,12 +154,51 @@ t_kill_exit(_) -> ?assertNotEqual(Estatsd, Estatsd1), ok. +t_config_update(_) -> + OldRawConf = emqx_conf:get_raw([statsd]), + {ok, _} = emqx_statsd_config:update(OldRawConf#{<<"enable">> => true}), + CommonKeys = [flush_time_interval, sample_time_interval], + OldConf = emqx_conf:get([statsd]), + OldStatsDState = sys:get_state(emqx_statsd), + OldPid = erlang:whereis(emqx_statsd), + ?assertEqual(maps:with(CommonKeys, OldConf), maps:with(CommonKeys, OldStatsDState)), + NewRawConfExpect = OldRawConf#{ + <<"flush_time_interval">> := <<"42s">>, + <<"sample_time_interval">> := <<"42s">> + }, + try + {ok, _} = emqx_statsd_config:update(NewRawConfExpect), + NewRawConf = emqx_conf:get_raw([statsd]), + NewConf = emqx_conf:get([statsd]), + NewStatsDState = sys:get_state(emqx_statsd), + NewPid = erlang:whereis(emqx_statsd), + ?assertNotEqual(OldRawConf, NewRawConf), + ?assertEqual(NewRawConfExpect, NewRawConf), + ?assertEqual(maps:with(CommonKeys, NewConf), maps:with(CommonKeys, NewStatsDState)), + ?assertNotEqual(OldPid, NewPid) + after + {ok, _} = emqx_statsd_config:update(OldRawConf) + end, + %% bad server url + BadRawConf = OldRawConf#{<<"server">> := <<"">>}, + {error, #{ + kind := validation_error, + path := "statsd.server", + reason := "cannot_be_empty", + value := "" + }} = emqx_statsd_config:update(BadRawConf), + ok. + request(Method) -> request(Method, []). request(Method, Body) -> case request(Method, uri(["statsd"]), Body) of {ok, 200, Res} -> - {ok, emqx_json:decode(Res, [return_maps])}; + {ok, emqx_utils_json:decode(Res, [return_maps])}; {ok, _Status, _} -> error end. + +default_server() -> + {server, Schema} = lists:keyfind(server, 1, emqx_statsd_schema:fields("statsd")), + hocon_schema:field_schema(Schema, default). diff --git a/apps/emqx_utils/README.md b/apps/emqx_utils/README.md new file mode 100644 index 000000000..f8c386f3d --- /dev/null +++ b/apps/emqx_utils/README.md @@ -0,0 +1,24 @@ +# Erlang utility library for EMQX + +## Overview + +`emqx_utils` is a collection of utility functions for EMQX, organized into +several modules. It provides various functionalities to make it easier to work +with EMQX, such as binary manipulations, maps, JSON en- and decoding, ets table +handling, data conversions, and more. + +## Features + +- `emqx_utils`: unsorted helper functions, formerly known as `emqx_misc` - NEEDS WORK +- `emqx_utils_api`: collection of helper functions for API responses +- `emqx_utils_binary`: binary reverse, join, trim etc +- `emqx_utils_ets`: convenience functions for creating and looking up data in ets tables. +- `emqx_utils_json`: JSON encoding and decoding +- `emqx_utils_maps`: convenience functions for map lookup and manipulation like + deep_get etc. + +## Contributing + +Please see our [contributing guidelines](../../CONTRIBUTING.md) for information +on how to contribute to `emqx_utils`. We welcome bug reports, feature requests, +and pull requests. diff --git a/apps/emqx_utils/include/emqx_utils_api.hrl b/apps/emqx_utils/include/emqx_utils_api.hrl new file mode 100644 index 000000000..bfc8e0a53 --- /dev/null +++ b/apps/emqx_utils/include/emqx_utils_api.hrl @@ -0,0 +1,36 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_API_LIB_HRL). +-define(EMQX_API_LIB_HRL, true). + +-define(ERROR_MSG(CODE, REASON), #{code => CODE, message => emqx_utils:readable_error_msg(REASON)}). + +-define(OK(CONTENT), {200, CONTENT}). + +-define(NO_CONTENT, 204). + +-define(BAD_REQUEST(CODE, REASON), {400, ?ERROR_MSG(CODE, REASON)}). +-define(BAD_REQUEST(REASON), ?BAD_REQUEST('BAD_REQUEST', REASON)). + +-define(NOT_FOUND(REASON), {404, ?ERROR_MSG('NOT_FOUND', REASON)}). + +-define(INTERNAL_ERROR(REASON), {500, ?ERROR_MSG('INTERNAL_ERROR', REASON)}). + +-define(NOT_IMPLEMENTED, 501). + +-define(SERVICE_UNAVAILABLE(REASON), {503, ?ERROR_MSG('SERVICE_UNAVAILABLE', REASON)}). +-endif. diff --git a/apps/emqx_utils/rebar.config b/apps/emqx_utils/rebar.config new file mode 100644 index 000000000..4c39cfe64 --- /dev/null +++ b/apps/emqx_utils/rebar.config @@ -0,0 +1,11 @@ +%% -*- mode: erlang -*- + +{erl_opts, [ + debug_info +]}. + +{deps, [ + {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} +]}. + +{project_plugins, [erlfmt]}. diff --git a/apps/emqx_utils/src/emqx_utils.app.src b/apps/emqx_utils/src/emqx_utils.app.src new file mode 100644 index 000000000..dff55bc86 --- /dev/null +++ b/apps/emqx_utils/src/emqx_utils.app.src @@ -0,0 +1,27 @@ +%% -*- mode: erlang -*- +{application, emqx_utils, [ + {description, "Miscellaneous utilities for EMQX apps"}, + % strict semver, bump manually! + {vsn, "5.0.1"}, + {modules, [ + emqx_utils, + emqx_utils_api, + emqx_utils_binary, + emqx_utils_ets, + emqx_utils_json, + emqx_utils_maps + ]}, + {registered, []}, + {applications, [ + kernel, + stdlib, + jiffy + ]}, + {env, []}, + {licenses, ["Apache-2.0"]}, + {maintainers, ["EMQX Team "]}, + {links, [ + {"Homepage", "https://emqx.io/"}, + {"Github", "https://github.com/emqx/emqx"} + ]} +]}. diff --git a/apps/emqx/src/emqx_misc.erl b/apps/emqx_utils/src/emqx_utils.erl similarity index 74% rename from apps/emqx/src/emqx_misc.erl rename to apps/emqx_utils/src/emqx_utils.erl index 0e7b29869..e9b2a1f9e 100644 --- a/apps/emqx/src/emqx_misc.erl +++ b/apps/emqx_utils/src/emqx_utils.erl @@ -14,14 +14,12 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_misc). +-module(emqx_utils). -compile(inline). +%% [TODO] Cleanup so the instruction below is not necessary. -elvis([{elvis_style, god_modules, disable}]). --include("types.hrl"). --include("logger.hrl"). - -export([ merge_opts/2, maybe_apply/2, @@ -55,7 +53,8 @@ readable_error_msg/1, safe_to_existing_atom/1, safe_to_existing_atom/2, - pub_props_to_packet/1 + pub_props_to_packet/1, + safe_filename/1 ]). -export([ @@ -68,7 +67,9 @@ nolink_apply/2 ]). --export([clamp/3]). +-export([clamp/3, redact/1, redact/2, is_redacted/2, is_redacted/3]). + +-type maybe(T) :: undefined | T. -dialyzer({nowarn_function, [nolink_apply/2]}). @@ -220,6 +221,7 @@ drain_down(Cnt, Acc) -> %% `ok': There is nothing out of the ordinary. %% `shutdown': Some numbers (message queue length hit the limit), %% hence shutdown for greater good (system stability). +%% [FIXME] cross-dependency on `emqx_types`. -spec check_oom(emqx_types:oom_policy()) -> ok | {shutdown, term()}. check_oom(Policy) -> check_oom(self(), Policy). @@ -228,7 +230,7 @@ check_oom(Policy) -> check_oom(_Pid, #{enable := false}) -> ok; check_oom(Pid, #{ - max_message_queue_len := MaxQLen, + max_mailbox_size := MaxQLen, max_heap_size := MaxHeapSize }) -> case process_info(Pid, [message_queue_len, total_heap_size]) of @@ -245,7 +247,7 @@ do_check_oom([]) -> ok; do_check_oom([{Val, Max, Reason} | Rest]) -> case is_integer(Max) andalso (0 < Max) andalso (Max < Val) of - true -> {shutdown, Reason}; + true -> {shutdown, #{reason => Reason, value => Val, max => Max}}; false -> do_check_oom(Rest) end. @@ -278,6 +280,7 @@ proc_name(Mod, Id) -> list_to_atom(lists:concat([Mod, "_", Id])). %% Get Proc's Stats. +%% [FIXME] cross-dependency on `emqx_types`. -spec proc_stats() -> emqx_types:stats(). proc_stats() -> proc_stats(self()). @@ -544,10 +547,23 @@ readable_error_msg(Error) -> {ok, Msg} -> Msg; false -> - iolist_to_binary(io_lib:format("~0p", [Error])) + to_hr_error(Error) end end. +to_hr_error(nxdomain) -> + <<"Could not resolve host">>; +to_hr_error(econnrefused) -> + <<"Connection refused">>; +to_hr_error({unauthorized_client, _}) -> + <<"Unauthorized client">>; +to_hr_error({not_authorized, _}) -> + <<"Not authorized">>; +to_hr_error({malformed_username_or_password, _}) -> + <<"Bad username or password">>; +to_hr_error(Error) -> + iolist_to_binary(io_lib:format("~0p", [Error])). + try_to_existing_atom(Convert, Data, Encoding) -> try Convert(Data, Encoding) of Atom -> @@ -556,6 +572,88 @@ try_to_existing_atom(Convert, Data, Encoding) -> _:Reason -> {error, Reason} end. +is_sensitive_key(token) -> true; +is_sensitive_key("token") -> true; +is_sensitive_key(<<"token">>) -> true; +is_sensitive_key(password) -> true; +is_sensitive_key("password") -> true; +is_sensitive_key(<<"password">>) -> true; +is_sensitive_key(secret) -> true; +is_sensitive_key("secret") -> true; +is_sensitive_key(<<"secret">>) -> true; +is_sensitive_key(secret_key) -> true; +is_sensitive_key("secret_key") -> true; +is_sensitive_key(<<"secret_key">>) -> true; +is_sensitive_key(security_token) -> true; +is_sensitive_key("security_token") -> true; +is_sensitive_key(<<"security_token">>) -> true; +is_sensitive_key(aws_secret_access_key) -> true; +is_sensitive_key("aws_secret_access_key") -> true; +is_sensitive_key(<<"aws_secret_access_key">>) -> true; +is_sensitive_key(_) -> false. + +redact(Term) -> + do_redact(Term, fun is_sensitive_key/1). + +redact(Term, Checker) -> + do_redact(Term, fun(V) -> + is_sensitive_key(V) orelse Checker(V) + end). + +do_redact(L, Checker) when is_list(L) -> + lists:map(fun(E) -> do_redact(E, Checker) end, L); +do_redact(M, Checker) when is_map(M) -> + maps:map( + fun(K, V) -> + do_redact(K, V, Checker) + end, + M + ); +do_redact({Key, Value}, Checker) -> + case Checker(Key) of + true -> + {Key, redact_v(Value)}; + false -> + {do_redact(Key, Checker), do_redact(Value, Checker)} + end; +do_redact(T, Checker) when is_tuple(T) -> + Elements = erlang:tuple_to_list(T), + Redact = do_redact(Elements, Checker), + erlang:list_to_tuple(Redact); +do_redact(Any, _Checker) -> + Any. + +do_redact(K, V, Checker) -> + case Checker(K) of + true -> + redact_v(V); + false -> + do_redact(V, Checker) + end. + +-define(REDACT_VAL, "******"). +redact_v(V) when is_binary(V) -> <>; +%% The HOCON schema system may generate sensitive values with this format +redact_v([{str, Bin}]) when is_binary(Bin) -> + [{str, <>}]; +redact_v(_V) -> + ?REDACT_VAL. + +is_redacted(K, V) -> + do_is_redacted(K, V, fun is_sensitive_key/1). + +is_redacted(K, V, Fun) -> + do_is_redacted(K, V, fun(E) -> + is_sensitive_key(E) orelse Fun(E) + end). + +do_is_redacted(K, ?REDACT_VAL, Fun) -> + Fun(K); +do_is_redacted(K, <>, Fun) -> + Fun(K); +do_is_redacted(_K, _V, _Fun) -> + false. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -568,6 +666,62 @@ ipv6_probe_test() -> ok end. +redact_test_() -> + Case = fun(Type, KeyT) -> + Key = + case Type of + atom -> KeyT; + string -> erlang:atom_to_list(KeyT); + binary -> erlang:atom_to_binary(KeyT) + end, + + ?assert(is_sensitive_key(Key)), + + %% direct + ?assertEqual({Key, ?REDACT_VAL}, redact({Key, foo})), + ?assertEqual(#{Key => ?REDACT_VAL}, redact(#{Key => foo})), + ?assertEqual({Key, Key, Key}, redact({Key, Key, Key})), + ?assertEqual({[{Key, ?REDACT_VAL}], bar}, redact({[{Key, foo}], bar})), + + %% 1 level nested + ?assertEqual([{Key, ?REDACT_VAL}], redact([{Key, foo}])), + ?assertEqual([#{Key => ?REDACT_VAL}], redact([#{Key => foo}])), + + %% 2 level nested + ?assertEqual(#{opts => [{Key, ?REDACT_VAL}]}, redact(#{opts => [{Key, foo}]})), + ?assertEqual(#{opts => #{Key => ?REDACT_VAL}}, redact(#{opts => #{Key => foo}})), + ?assertEqual({opts, [{Key, ?REDACT_VAL}]}, redact({opts, [{Key, foo}]})), + + %% 3 level nested + ?assertEqual([#{opts => [{Key, ?REDACT_VAL}]}], redact([#{opts => [{Key, foo}]}])), + ?assertEqual([{opts, [{Key, ?REDACT_VAL}]}], redact([{opts, [{Key, foo}]}])), + ?assertEqual([{opts, [#{Key => ?REDACT_VAL}]}], redact([{opts, [#{Key => foo}]}])) + end, + + Types = [atom, string, binary], + Keys = [ + token, + password, + secret + ], + [{case_name(Type, Key), fun() -> Case(Type, Key) end} || Key <- Keys, Type <- Types]. + +redact2_test_() -> + Case = fun(Key, Checker) -> + ?assertEqual({Key, ?REDACT_VAL}, redact({Key, foo}, Checker)), + ?assertEqual(#{Key => ?REDACT_VAL}, redact(#{Key => foo}, Checker)), + ?assertEqual({Key, Key, Key}, redact({Key, Key, Key}, Checker)), + ?assertEqual({[{Key, ?REDACT_VAL}], bar}, redact({[{Key, foo}], bar}, Checker)) + end, + + Checker = fun(E) -> E =:= passcode end, + + Keys = [secret, passcode], + [{case_name(atom, Key), fun() -> Case(Key, Checker) end} || Key <- Keys]. + +case_name(Type, Key) -> + lists:concat([Type, "-", Key]). + -endif. pub_props_to_packet(Properties) -> @@ -583,3 +737,11 @@ pub_props_to_packet(Properties) -> true end, maps:filtermap(F, Properties). + +%% fix filename by replacing characters which could be invalid on some filesystems +%% with safe ones +-spec safe_filename(binary() | unicode:chardata()) -> binary() | [unicode:chardata()]. +safe_filename(Filename) when is_binary(Filename) -> + binary:replace(Filename, <<":">>, <<"-">>, [global]); +safe_filename(Filename) when is_list(Filename) -> + lists:flatten(string:replace(Filename, ":", "-", all)). diff --git a/apps/emqx_utils/src/emqx_utils_api.erl b/apps/emqx_utils/src/emqx_utils_api.erl new file mode 100644 index 000000000..a1bc97cd6 --- /dev/null +++ b/apps/emqx_utils/src/emqx_utils_api.erl @@ -0,0 +1,77 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_utils_api). + +-export([ + with_node/2, + with_node_or_cluster/2 +]). + +-include("emqx_utils_api.hrl"). + +-define(NODE_NOT_FOUND(NODE), ?NOT_FOUND(<<"Node not found: ", NODE/binary>>)). + +%%-------------------------------------------------------------------- +%% exported API +%%-------------------------------------------------------------------- +-spec with_node(binary() | atom(), fun((atom()) -> {ok, term()} | {error, term()})) -> + ?OK(term()) | ?NOT_FOUND(binary()) | ?BAD_REQUEST(term()). +with_node(Node0, Fun) -> + case lookup_node(Node0) of + {ok, Node} -> + handle_result(Fun(Node)); + not_found -> + ?NODE_NOT_FOUND(Node0) + end. + +-spec with_node_or_cluster(binary() | atom(), fun((atom()) -> {ok, term()} | {error, term()})) -> + ?OK(term()) | ?NOT_FOUND(iolist()) | ?BAD_REQUEST(term()). +with_node_or_cluster(<<"all">>, Fun) -> + handle_result(Fun(all)); +with_node_or_cluster(Node, Fun) -> + with_node(Node, Fun). + +%%-------------------------------------------------------------------- +%% Internal +%%-------------------------------------------------------------------- + +-spec lookup_node(atom() | binary()) -> {ok, atom()} | not_found. +lookup_node(BinNode) when is_binary(BinNode) -> + case emqx_utils:safe_to_existing_atom(BinNode, utf8) of + {ok, Node} -> + is_running_node(Node); + _Error -> + not_found + end; +lookup_node(Node) when is_atom(Node) -> + is_running_node(Node). + +-spec is_running_node(atom()) -> {ok, atom()} | not_found. +is_running_node(Node) -> + case lists:member(Node, mria:running_nodes()) of + true -> + {ok, Node}; + false -> + not_found + end. + +handle_result({ok, Result}) -> + ?OK(Result); +handle_result({error, Reason}) -> + ?BAD_REQUEST(Reason); +handle_result({HTTPCode, Content}) when is_integer(HTTPCode) -> + {HTTPCode, Content}. diff --git a/apps/emqx_gateway/src/lwm2m/binary_util.erl b/apps/emqx_utils/src/emqx_utils_binary.erl similarity index 97% rename from apps/emqx_gateway/src/lwm2m/binary_util.erl rename to apps/emqx_utils/src/emqx_utils_binary.erl index 68ac7a0d7..26976496d 100644 --- a/apps/emqx_gateway/src/lwm2m/binary_util.erl +++ b/apps/emqx_utils/src/emqx_utils_binary.erl @@ -1,4 +1,6 @@ %%-------------------------------------------------------------------- +%% Original file taken from https://github.com/arcusfelis/binary2 +%% Copyright (c) 2016 Michael Uvarov %% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +16,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(binary_util). - -%% copied from https://github.com/arcusfelis/binary2 +-module(emqx_utils_binary). %% Bytes -export([ diff --git a/apps/emqx/src/emqx_tables.erl b/apps/emqx_utils/src/emqx_utils_ets.erl similarity index 98% rename from apps/emqx/src/emqx_tables.erl rename to apps/emqx_utils/src/emqx_utils_ets.erl index ffdf7d891..099152675 100644 --- a/apps/emqx/src/emqx_tables.erl +++ b/apps/emqx_utils/src/emqx_utils_ets.erl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_tables). +-module(emqx_utils_ets). -export([ new/1, diff --git a/apps/emqx/src/emqx_json.erl b/apps/emqx_utils/src/emqx_utils_json.erl similarity index 91% rename from apps/emqx/src/emqx_json.erl rename to apps/emqx_utils/src/emqx_utils_json.erl index 7827b98c9..df7388c94 100644 --- a/apps/emqx/src/emqx_json.erl +++ b/apps/emqx_utils/src/emqx_utils_json.erl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_json). +-module(emqx_utils_json). -compile(inline). @@ -46,6 +46,10 @@ ]} ). +-export([is_json/1]). + +-compile({inline, [is_json/1]}). + -type encode_options() :: jiffy:encode_options(). -type decode_options() :: jiffy:decode_options(). @@ -79,7 +83,7 @@ safe_encode(Term, Opts) -> end. -spec decode(json_text()) -> json_term(). -decode(Json) -> decode(Json, []). +decode(Json) -> decode(Json, [return_maps]). -spec decode(json_text(), decode_options()) -> json_term(). decode(Json, Opts) -> @@ -100,6 +104,10 @@ safe_decode(Json, Opts) -> {error, Reason} end. +-spec is_json(json_text()) -> boolean(). +is_json(Json) -> + element(1, safe_decode(Json)) =:= ok. + %%-------------------------------------------------------------------- %% Helpers %%-------------------------------------------------------------------- @@ -117,6 +125,8 @@ to_ejson([{_, _} | _] = L) -> {[{K, to_ejson(V)} || {K, V} <- L]}; to_ejson(L) when is_list(L) -> [to_ejson(E) || E <- L]; +to_ejson(M) when is_map(M) -> + maps:map(fun(_K, V) -> to_ejson(V) end, M); to_ejson(T) -> T. diff --git a/apps/emqx/src/emqx_map_lib.erl b/apps/emqx_utils/src/emqx_utils_maps.erl similarity index 95% rename from apps/emqx/src/emqx_map_lib.erl rename to apps/emqx_utils/src/emqx_utils_maps.erl index 5455fe9e7..d1c3ed649 100644 --- a/apps/emqx/src/emqx_map_lib.erl +++ b/apps/emqx_utils/src/emqx_utils_maps.erl @@ -13,7 +13,7 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_map_lib). +-module(emqx_utils_maps). -export([ deep_get/2, @@ -41,14 +41,13 @@ -type config_key_path() :: [config_key()]. -type convert_fun() :: fun((...) -> {K1 :: any(), V1 :: any()} | drop). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). %%----------------------------------------------------------------- -spec deep_get(config_key_path(), map()) -> term(). deep_get(ConfKeyPath, Map) -> - Ref = make_ref(), - Res = deep_get(ConfKeyPath, Map, Ref), - case Res =:= Ref of - true -> error({config_not_found, ConfKeyPath}); - false -> Res + case deep_get(ConfKeyPath, Map, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, ConfKeyPath}); + Res -> Res end. -spec deep_get(config_key_path(), map(), term()) -> term(). @@ -152,7 +151,7 @@ deep_convert(Val, _, _Args) -> -spec unsafe_atom_key_map(#{binary() | atom() => any()}) -> #{atom() => any()}. unsafe_atom_key_map(Map) -> - covert_keys_to_atom(Map, fun(K) -> binary_to_atom(K, utf8) end). + convert_keys_to_atom(Map, fun(K) -> binary_to_atom(K, utf8) end). -spec binary_key_map(map()) -> map(). binary_key_map(Map) -> @@ -167,7 +166,7 @@ binary_key_map(Map) -> -spec safe_atom_key_map(#{binary() | atom() => any()}) -> #{atom() => any()}. safe_atom_key_map(Map) -> - covert_keys_to_atom(Map, fun(K) -> binary_to_existing_atom(K, utf8) end). + convert_keys_to_atom(Map, fun(K) -> binary_to_existing_atom(K, utf8) end). -spec jsonable_map(map() | list()) -> map() | list(). jsonable_map(Map) -> @@ -210,6 +209,7 @@ binary_string_kv(K, V, JsonableFun) -> {K1, V1} -> {binary_string(K1), V1} end. +%% [FIXME] this doesn't belong here binary_string([]) -> []; binary_string(Val) when is_list(Val) -> @@ -221,7 +221,7 @@ binary_string(Val) -> Val. %%--------------------------------------------------------------------------- -covert_keys_to_atom(BinKeyMap, Conv) -> +convert_keys_to_atom(BinKeyMap, Conv) -> deep_convert( BinKeyMap, fun @@ -332,7 +332,7 @@ deep_filter(M, F) when is_map(M) -> if_only_to_toggle_enable(OldConf, Conf) -> #{added := Added, removed := Removed, changed := Updated} = - emqx_map_lib:diff_maps(OldConf, Conf), + emqx_utils_maps:diff_maps(OldConf, Conf), case {Added, Removed, Updated} of {Added, Removed, #{enable := _} = Updated} when map_size(Added) =:= 0, diff --git a/apps/emqx/test/emqx_misc_SUITE.erl b/apps/emqx_utils/test/emqx_utils_SUITE.erl similarity index 66% rename from apps/emqx/test/emqx_misc_SUITE.erl rename to apps/emqx_utils/test/emqx_utils_SUITE.erl index c2bd751fa..6c6bcf8d3 100644 --- a/apps/emqx/test/emqx_misc_SUITE.erl +++ b/apps/emqx_utils/test/emqx_utils_SUITE.erl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_misc_SUITE). +-module(emqx_utils_SUITE). -compile(export_all). -compile(nowarn_export_all). @@ -32,7 +32,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_merge_opts(_) -> - Opts = emqx_misc:merge_opts(?SOCKOPTS, [ + Opts = emqx_utils:merge_opts(?SOCKOPTS, [ raw, binary, {backlog, 1024}, @@ -57,58 +57,59 @@ t_merge_opts(_) -> ). t_maybe_apply(_) -> - ?assertEqual(undefined, emqx_misc:maybe_apply(fun(A) -> A end, undefined)), - ?assertEqual(a, emqx_misc:maybe_apply(fun(A) -> A end, a)). + ?assertEqual(undefined, emqx_utils:maybe_apply(fun(A) -> A end, undefined)), + ?assertEqual(a, emqx_utils:maybe_apply(fun(A) -> A end, a)). t_run_fold(_) -> - ?assertEqual(1, emqx_misc:run_fold([], 1, state)), + ?assertEqual(1, emqx_utils:run_fold([], 1, state)), Add = fun(I, St) -> I + St end, Mul = fun(I, St) -> I * St end, - ?assertEqual(6, emqx_misc:run_fold([Add, Mul], 1, 2)). + ?assertEqual(6, emqx_utils:run_fold([Add, Mul], 1, 2)). t_pipeline(_) -> - ?assertEqual({ok, input, state}, emqx_misc:pipeline([], input, state)), + ?assertEqual({ok, input, state}, emqx_utils:pipeline([], input, state)), Funs = [ fun(_I, _St) -> ok end, fun(_I, St) -> {ok, St + 1} end, fun(I, St) -> {ok, I + 1, St + 1} end, fun(I, St) -> {ok, I * 2, St * 2} end ], - ?assertEqual({ok, 4, 6}, emqx_misc:pipeline(Funs, 1, 1)), + ?assertEqual({ok, 4, 6}, emqx_utils:pipeline(Funs, 1, 1)), ?assertEqual( - {error, undefined, 1}, emqx_misc:pipeline([fun(_I) -> {error, undefined} end], 1, 1) + {error, undefined, 1}, emqx_utils:pipeline([fun(_I) -> {error, undefined} end], 1, 1) ), ?assertEqual( - {error, undefined, 2}, emqx_misc:pipeline([fun(_I, _St) -> {error, undefined, 2} end], 1, 1) + {error, undefined, 2}, + emqx_utils:pipeline([fun(_I, _St) -> {error, undefined, 2} end], 1, 1) ). t_start_timer(_) -> - TRef = emqx_misc:start_timer(1, tmsg), + TRef = emqx_utils:start_timer(1, tmsg), timer:sleep(2), ?assertEqual([{timeout, TRef, tmsg}], drain()), - ok = emqx_misc:cancel_timer(TRef). + ok = emqx_utils:cancel_timer(TRef). t_cancel_timer(_) -> - Timer = emqx_misc:start_timer(0, foo), - ok = emqx_misc:cancel_timer(Timer), + Timer = emqx_utils:start_timer(0, foo), + ok = emqx_utils:cancel_timer(Timer), ?assertEqual([], drain()), - ok = emqx_misc:cancel_timer(undefined). + ok = emqx_utils:cancel_timer(undefined). t_proc_name(_) -> - ?assertEqual(emqx_pool_1, emqx_misc:proc_name(emqx_pool, 1)). + ?assertEqual(emqx_pool_1, emqx_utils:proc_name(emqx_pool, 1)). t_proc_stats(_) -> Pid1 = spawn(fun() -> exit(normal) end), timer:sleep(10), - ?assertEqual([], emqx_misc:proc_stats(Pid1)), + ?assertEqual([], emqx_utils:proc_stats(Pid1)), Pid2 = spawn(fun() -> - ?assertMatch([{mailbox_len, 0} | _], emqx_misc:proc_stats()), + ?assertMatch([{mailbox_len, 0} | _], emqx_utils:proc_stats()), timer:sleep(200) end), timer:sleep(10), Pid2 ! msg, timer:sleep(10), - ?assertMatch([{mailbox_len, 1} | _], emqx_misc:proc_stats(Pid2)). + ?assertMatch([{mailbox_len, 1} | _], emqx_utils:proc_stats(Pid2)). t_drain_deliver(_) -> self() ! {deliver, t1, m1}, @@ -118,35 +119,38 @@ t_drain_deliver(_) -> {deliver, t1, m1}, {deliver, t2, m2} ], - emqx_misc:drain_deliver(2) + emqx_utils:drain_deliver(2) ). t_drain_down(_) -> {Pid1, _Ref1} = erlang:spawn_monitor(fun() -> ok end), {Pid2, _Ref2} = erlang:spawn_monitor(fun() -> ok end), timer:sleep(100), - ?assertEqual([Pid1, Pid2], lists:sort(emqx_misc:drain_down(2))), - ?assertEqual([], emqx_misc:drain_down(1)). + ?assertEqual([Pid1, Pid2], lists:sort(emqx_utils:drain_down(2))), + ?assertEqual([], emqx_utils:drain_down(1)). t_index_of(_) -> - try emqx_misc:index_of(a, []) of + try emqx_utils:index_of(a, []) of _ -> ct:fail(should_throw_error) catch error:Reason -> ?assertEqual(badarg, Reason) end, - ?assertEqual(3, emqx_misc:index_of(a, [b, c, a, e, f])). + ?assertEqual(3, emqx_utils:index_of(a, [b, c, a, e, f])). t_check(_) -> Policy = #{ - max_message_queue_len => 10, + max_mailbox_size => 10, max_heap_size => 1024 * 1024 * 8, enable => true }, [self() ! {msg, I} || I <- lists:seq(1, 5)], - ?assertEqual(ok, emqx_misc:check_oom(Policy)), + ?assertEqual(ok, emqx_utils:check_oom(Policy)), [self() ! {msg, I} || I <- lists:seq(1, 6)], - ?assertEqual({shutdown, message_queue_too_long}, emqx_misc:check_oom(Policy)). + ?assertEqual( + {shutdown, #{reason => message_queue_too_long, value => 11, max => 10}}, + emqx_utils:check_oom(Policy) + ). drain() -> drain([]). @@ -159,22 +163,22 @@ drain(Acc) -> end. t_rand_seed(_) -> - ?assert(is_tuple(emqx_misc:rand_seed())). + ?assert(is_tuple(emqx_utils:rand_seed())). t_now_to_secs(_) -> - ?assert(is_integer(emqx_misc:now_to_secs(os:timestamp()))). + ?assert(is_integer(emqx_utils:now_to_secs(os:timestamp()))). t_now_to_ms(_) -> - ?assert(is_integer(emqx_misc:now_to_ms(os:timestamp()))). + ?assert(is_integer(emqx_utils:now_to_ms(os:timestamp()))). t_gen_id(_) -> - ?assertEqual(10, length(emqx_misc:gen_id(10))), - ?assertEqual(20, length(emqx_misc:gen_id(20))). + ?assertEqual(10, length(emqx_utils:gen_id(10))), + ?assertEqual(20, length(emqx_utils:gen_id(20))). t_pmap_normal(_) -> ?assertEqual( [5, 7, 9], - emqx_misc:pmap( + emqx_utils:pmap( fun({A, B}) -> A + B end, [{2, 3}, {3, 4}, {4, 5}] ) @@ -183,7 +187,7 @@ t_pmap_normal(_) -> t_pmap_timeout(_) -> ?assertExit( timeout, - emqx_misc:pmap( + emqx_utils:pmap( fun (timeout) -> ct:sleep(1000); ({A, B}) -> A + B @@ -196,7 +200,7 @@ t_pmap_timeout(_) -> t_pmap_exception(_) -> ?assertError( foobar, - emqx_misc:pmap( + emqx_utils:pmap( fun (error) -> error(foobar); ({A, B}) -> A + B diff --git a/apps/emqx_utils/test/emqx_utils_api_SUITE.erl b/apps/emqx_utils/test/emqx_utils_api_SUITE.erl new file mode 100644 index 000000000..3ed3cd250 --- /dev/null +++ b/apps/emqx_utils/test/emqx_utils_api_SUITE.erl @@ -0,0 +1,101 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_utils_api_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include("emqx_utils_api.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(DUMMY, dummy_module). + +all() -> emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:boot_modules(all), + emqx_common_test_helpers:start_apps([]), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([]). + +init_per_testcase(_Case, Config) -> + meck:new(?DUMMY, [non_strict]), + meck:expect(?DUMMY, expect_not_called, 1, fun(Node) -> throw({blow_this_up, Node}) end), + meck:expect(?DUMMY, expect_success, 1, {ok, success}), + meck:expect(?DUMMY, expect_error, 1, {error, error}), + Config. + +end_per_testcase(_Case, _Config) -> + meck:unload(?DUMMY). + +t_with_node(_) -> + test_with(fun emqx_utils_api:with_node/2, [<<"all">>]). + +t_with_node_or_cluster(_) -> + test_with(fun emqx_utils_api:with_node_or_cluster/2, []), + meck:reset(?DUMMY), + ?assertEqual( + ?OK(success), + emqx_utils_api:with_node_or_cluster( + <<"all">>, + fun ?DUMMY:expect_success/1 + ) + ), + ?assertMatch([{_, {?DUMMY, expect_success, [all]}, {ok, success}}], meck:history(?DUMMY)). + +%% helpers +test_with(TestFun, ExtraBadNodes) -> + % make sure this is an atom + 'unknownnode@unknownnohost', + BadNodes = + [ + <<"undefined">>, + <<"this_should_not_be_an_atom">>, + <<"unknownnode@unknownnohost">> + ] ++ ExtraBadNodes, + [ensure_not_found(TestFun(N, fun ?DUMMY:expect_not_called/1)) || N <- BadNodes], + ensure_not_called(?DUMMY, expect_not_called), + ensure_not_existing_atom(<<"this_should_not_be_an_atom">>), + + GoodNode = node(), + + ?assertEqual( + ?OK(success), + TestFun(GoodNode, fun ?DUMMY:expect_success/1) + ), + + ?assertEqual( + ?BAD_REQUEST(error), + TestFun(GoodNode, fun ?DUMMY:expect_error/1) + ), + ok. + +ensure_not_found(Result) -> + ?assertMatch({404, _}, Result). + +ensure_not_called(Mod, Fun) -> + ?assert(not meck:called(Mod, Fun, '_')). + +ensure_not_existing_atom(Bin) -> + try binary_to_existing_atom(Bin) of + _ -> throw(is_atom) + catch + error:badarg -> + ok + end. diff --git a/apps/emqx_utils/test/emqx_utils_binary_tests.erl b/apps/emqx_utils/test/emqx_utils_binary_tests.erl new file mode 100644 index 000000000..79851dca5 --- /dev/null +++ b/apps/emqx_utils/test/emqx_utils_binary_tests.erl @@ -0,0 +1,213 @@ +%%-------------------------------------------------------------------- +%% Original file taken from https://github.com/arcusfelis/binary2 +%% Copyright (c) 2016 Michael Uvarov +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +-module(emqx_utils_binary_tests). + +-import(emqx_utils_binary, [ + trim/1, + ltrim/1, + rtrim/1, + trim/2, + ltrim/2, + rtrim/2, + reverse/1, + inverse/1, + join/2, + suffix/2, + prefix/2, + duplicate/2, + union/2, + intersection/2, + subtract/2, + optimize_patterns/1 +]). + +-include_lib("eunit/include/eunit.hrl"). + +trim1_test_() -> + [ + ?_assertEqual(trim(<<>>), <<>>), + ?_assertEqual(trim(<<0, 0, 0>>), <<>>), + ?_assertEqual(trim(<<1, 2, 3>>), <<1, 2, 3>>), + ?_assertEqual(trim(<<0, 1, 2>>), <<1, 2>>), + ?_assertEqual(trim(<<0, 0, 1, 2>>), <<1, 2>>), + ?_assertEqual(trim(<<1, 2, 0, 0>>), <<1, 2>>), + ?_assertEqual(trim(<<0, 1, 2, 0>>), <<1, 2>>), + ?_assertEqual(trim(<<0, 0, 0, 1, 2, 0, 0, 0>>), <<1, 2>>) + ]. + +ltrim1_test_() -> + [ + ?_assertEqual(ltrim(<<>>), <<>>), + ?_assertEqual(ltrim(<<0, 0, 0>>), <<>>), + ?_assertEqual(ltrim(<<1, 2, 3>>), <<1, 2, 3>>), + ?_assertEqual(ltrim(<<0, 1, 2>>), <<1, 2>>), + ?_assertEqual(ltrim(<<0, 0, 1, 2>>), <<1, 2>>), + ?_assertEqual(ltrim(<<1, 2, 0, 0>>), <<1, 2, 0, 0>>), + ?_assertEqual(ltrim(<<0, 1, 2, 0>>), <<1, 2, 0>>), + ?_assertEqual(ltrim(<<0, 0, 0, 1, 2, 0, 0, 0>>), <<1, 2, 0, 0, 0>>) + ]. + +rtrim1_test_() -> + [ + ?_assertEqual(rtrim(<<>>), <<>>), + ?_assertEqual(rtrim(<<1, 2, 3>>), <<1, 2, 3>>), + ?_assertEqual(rtrim(<<0, 0, 0>>), <<>>), + ?_assertEqual(rtrim(<<0, 1, 2>>), <<0, 1, 2>>), + ?_assertEqual(rtrim(<<0, 0, 1, 2>>), <<0, 0, 1, 2>>), + ?_assertEqual(rtrim(<<1, 2, 0, 0>>), <<1, 2>>), + ?_assertEqual(rtrim(<<0, 1, 2, 0>>), <<0, 1, 2>>), + ?_assertEqual(rtrim(<<0, 0, 0, 1, 2, 0, 0, 0>>), <<0, 0, 0, 1, 2>>) + ]. + +trim2_test_() -> + [ + ?_assertEqual(trim(<<5>>, 5), <<>>), + ?_assertEqual(trim(<<5, 1, 2, 5>>, 5), <<1, 2>>), + ?_assertEqual(trim(<<5, 5, 5, 1, 2, 0, 0, 0>>, 5), <<1, 2, 0, 0, 0>>) + ]. + +ltrim2_test_() -> + [ + ?_assertEqual(ltrim(<<5>>, 5), <<>>), + ?_assertEqual(ltrim(<<5, 1, 2, 5>>, 5), <<1, 2, 5>>), + ?_assertEqual(ltrim(<<5, 5, 5, 1, 2, 0, 0, 0>>, 5), <<1, 2, 0, 0, 0>>) + ]. + +rtrim2_test_() -> + [ + ?_assertEqual(rtrim(<<5>>, 5), <<>>), + ?_assertEqual(rtrim(<<5, 1, 2, 5>>, 5), <<5, 1, 2>>), + ?_assertEqual(rtrim(<<5, 5, 5, 1, 2, 0, 0, 0>>, 5), <<5, 5, 5, 1, 2, 0, 0, 0>>) + ]. + +mtrim2_test_() -> + [ + ?_assertEqual(trim(<<5>>, [1, 5]), <<>>), + ?_assertEqual(trim(<<5, 1, 2, 5>>, [1, 5]), <<2>>), + ?_assertEqual(trim(<<5, 1, 2, 5>>, [1, 2, 5]), <<>>), + ?_assertEqual(trim(<<5, 5, 5, 1, 2, 0, 0, 0>>, [1, 5]), <<2, 0, 0, 0>>) + ]. + +mltrim2_test_() -> + [ + ?_assertEqual(ltrim(<<5>>, [1, 5]), <<>>), + ?_assertEqual(ltrim(<<5, 1, 2, 5>>, [1, 5]), <<2, 5>>), + ?_assertEqual(ltrim(<<5, 1, 2, 5>>, [2, 5]), <<1, 2, 5>>), + ?_assertEqual(ltrim(<<5, 5, 5, 1, 2, 0, 0, 0>>, [1, 5]), <<2, 0, 0, 0>>) + ]. + +mrtrim2_test_() -> + [ + ?_assertEqual(rtrim(<<5>>, [1, 5]), <<>>), + ?_assertEqual(rtrim(<<5, 1, 2, 5>>, [1, 5]), <<5, 1, 2>>), + ?_assertEqual(rtrim(<<5, 1, 2, 5>>, [2, 5]), <<5, 1>>), + ?_assertEqual(rtrim(<<5, 5, 5, 1, 2, 0, 0, 0>>, [1, 5]), <<5, 5, 5, 1, 2, 0, 0, 0>>), + ?_assertEqual(rtrim(<<5, 5, 5, 1, 2, 0, 0, 0>>, [0, 5]), <<5, 5, 5, 1, 2>>) + ]. + +reverse_test_() -> + [?_assertEqual(reverse(<<0, 1, 2>>), <<2, 1, 0>>)]. + +join_test_() -> + [ + ?_assertEqual(join([<<1, 2>>, <<3, 4>>, <<5, 6>>], <<0>>), <<1, 2, 0, 3, 4, 0, 5, 6>>), + ?_assertEqual( + join([<<"abc">>, <<"def">>, <<"xyz">>], <<"|">>), + <<"abc|def|xyz">> + ), + ?_assertEqual( + join([<<>>, <<"|">>, <<"x|z">>], <<"|">>), + <<"|||x|z">> + ), + ?_assertEqual( + join([<<"abc">>, <<"def">>, <<"xyz">>], <<>>), + <<"abcdefxyz">> + ), + ?_assertEqual(join([], <<"|">>), <<>>) + ]. + +duplicate_test_() -> + [ + ?_assertEqual(duplicate(5, <<1, 2>>), <<1, 2, 1, 2, 1, 2, 1, 2, 1, 2>>), + ?_assertEqual(duplicate(50, <<0>>), <<0:400>>) + ]. + +suffix_test_() -> + [ + ?_assertEqual(suffix(<<1, 2, 3, 4, 5>>, 2), <<4, 5>>), + ?_assertError(badarg, prefix(<<1, 2, 3, 4, 5>>, 25)) + ]. + +prefix_test_() -> + [ + ?_assertEqual(prefix(<<1, 2, 3, 4, 5>>, 2), <<1, 2>>), + ?_assertError(badarg, prefix(<<1, 2, 3, 4, 5>>, 25)) + ]. + +union_test_() -> + [ + ?_assertEqual( + union( + <<2#0011011:7>>, + <<2#1011110:7>> + ), + <<2#1011111:7>> + ) + ]. + +inverse_test_() -> + [ + ?_assertEqual(inverse(inverse(<<0, 1, 2>>)), <<0, 1, 2>>), + ?_assertEqual(inverse(<<0>>), <<255>>), + ?_assertEqual(inverse(<<2#1:1>>), <<2#0:1>>), + ?_assertEqual(inverse(<<2#0:1>>), <<2#1:1>>), + ?_assertEqual( + inverse(<<2#01:2>>), + <<2#10:2>> + ), + ?_assertEqual( + inverse(<<2#0011011:7>>), + <<2#1100100:7>> + ) + ]. + +intersection_test_() -> + [ + ?_assertEqual( + intersection( + <<2#0011011>>, + <<2#1011110>> + ), + <<2#0011010>> + ) + ]. + +subtract_test_() -> + [ + ?_assertEqual( + subtract( + <<2#0011011>>, + <<2#1011110>> + ), + <<2#0000001>> + ) + ]. + +optimize_patterns_test_() -> + [ + ?_assertEqual( + [<<"t">>], + optimize_patterns([<<"t">>, <<"test">>]) + ), + ?_assertEqual( + [<<"t">>], + optimize_patterns([<<"t">>, <<"t">>, <<"test">>]) + ), + ?_assertEqual( + [<<"t">>], + optimize_patterns([<<"test">>, <<"t">>, <<"t">>]) + ) + ]. diff --git a/apps/emqx/test/emqx_tables_SUITE.erl b/apps/emqx_utils/test/emqx_utils_ets_SUITE.erl similarity index 73% rename from apps/emqx/test/emqx_tables_SUITE.erl rename to apps/emqx_utils/test/emqx_utils_ets_SUITE.erl index ad53e7139..13bf427fd 100644 --- a/apps/emqx/test/emqx_tables_SUITE.erl +++ b/apps/emqx_utils/test/emqx_utils_ets_SUITE.erl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_tables_SUITE). +-module(emqx_utils_ets_SUITE). -compile(export_all). -compile(nowarn_export_all). @@ -26,19 +26,19 @@ all() -> emqx_common_test_helpers:all(?MODULE). t_new(_) -> - ok = emqx_tables:new(?TAB), - ok = emqx_tables:new(?TAB, [{read_concurrency, true}]), + ok = emqx_utils_ets:new(?TAB), + ok = emqx_utils_ets:new(?TAB, [{read_concurrency, true}]), ?assertEqual(?TAB, ets:info(?TAB, name)). t_lookup_value(_) -> - ok = emqx_tables:new(?TAB, []), + ok = emqx_utils_ets:new(?TAB, []), true = ets:insert(?TAB, {key, val}), - ?assertEqual(val, emqx_tables:lookup_value(?TAB, key)), - ?assertEqual(undefined, emqx_tables:lookup_value(?TAB, badkey)). + ?assertEqual(val, emqx_utils_ets:lookup_value(?TAB, key)), + ?assertEqual(undefined, emqx_utils_ets:lookup_value(?TAB, badkey)). t_delete(_) -> - ok = emqx_tables:new(?TAB, []), + ok = emqx_utils_ets:new(?TAB, []), ?assertEqual(?TAB, ets:info(?TAB, name)), - ok = emqx_tables:delete(?TAB), - ok = emqx_tables:delete(?TAB), + ok = emqx_utils_ets:delete(?TAB), + ok = emqx_utils_ets:delete(?TAB), ?assertEqual(undefined, ets:info(?TAB, name)). diff --git a/apps/emqx/test/emqx_json_SUITE.erl b/apps/emqx_utils/test/emqx_utils_json_SUITE.erl similarity index 83% rename from apps/emqx/test/emqx_json_SUITE.erl rename to apps/emqx_utils/test/emqx_utils_json_SUITE.erl index a0bf48e4e..daf31b440 100644 --- a/apps/emqx/test/emqx_json_SUITE.erl +++ b/apps/emqx_utils/test/emqx_utils_json_SUITE.erl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_json_SUITE). +-module(emqx_utils_json_SUITE). -compile(export_all). -compile(nowarn_export_all). @@ -22,7 +22,7 @@ -include_lib("eunit/include/eunit.hrl"). -import( - emqx_json, + emqx_utils_json, [ encode/1, decode/1, @@ -51,7 +51,7 @@ %% #{<<"foo">> => <<"bar">>} -> {"foo": "bar"} -> #{<<"foo">> => <<"bar">>} %%-------------------------------------------------------------------- -%% but in emqx_json, we use the jsx style for it: +%% but in emqx_utils_json, we use the jsx style for it: %%-------------------------------------------------------------------- %% Erlang JSON Erlang %% ------------------------------------------------------------------- @@ -84,10 +84,10 @@ t_decode_encode(_) -> 1.25 = decode(encode(1.25)), [] = decode(encode([])), [true, 1] = decode(encode([true, 1])), - [{}] = decode(encode([{}])), - [{<<"foo">>, <<"bar">>}] = decode(encode([{foo, bar}])), - [{<<"foo">>, <<"bar">>}] = decode(encode([{<<"foo">>, <<"bar">>}])), - [[{<<"foo">>, <<"bar">>}]] = decode(encode([[{<<"foo">>, <<"bar">>}]])), + [{}] = decode(encode([{}]), []), + [{<<"foo">>, <<"bar">>}] = decode(encode([{foo, bar}]), []), + [{<<"foo">>, <<"bar">>}] = decode(encode([{<<"foo">>, <<"bar">>}]), []), + [[{<<"foo">>, <<"bar">>}]] = decode(encode([[{<<"foo">>, <<"bar">>}]]), []), [ [ {<<"foo">>, <<"bar">>}, @@ -101,7 +101,8 @@ t_decode_encode(_) -> {<<"a">>, <<"b">>} ], [{<<"x">>, <<"y">>}] - ]) + ]), + [] ), #{<<"foo">> := <<"bar">>} = decode(encode(#{<<"foo">> => <<"bar">>}), [return_maps]), JsonText = <<"{\"bool\":true,\"int\":10,\"foo\":\"bar\"}">>, @@ -110,8 +111,12 @@ t_decode_encode(_) -> <<"int">> => 10, <<"foo">> => <<"bar">> }, - ?assertEqual(JsonText, encode({decode(JsonText)})), - ?assertEqual(JsonMaps, decode(JsonText, [return_maps])). + ?assertEqual(JsonText, encode({decode(JsonText, [])})), + ?assertEqual(JsonMaps, decode(JsonText, [return_maps])), + ?assertEqual( + #{<<"foo">> => #{<<"bar">> => <<"baz">>}}, + decode(encode(#{<<"foo">> => [{<<"bar">>, <<"baz">>}]})) + ). t_safe_decode_encode(_) -> safe_encode_decode(null), @@ -123,16 +128,20 @@ t_safe_decode_encode(_) -> 1.25 = safe_encode_decode(1.25), [] = safe_encode_decode([]), [true, 1] = safe_encode_decode([true, 1]), - [{}] = decode(encode([{}])), + [{}] = decode(encode([{}]), []), [{<<"foo">>, <<"bar">>}] = safe_encode_decode([{foo, bar}]), [{<<"foo">>, <<"bar">>}] = safe_encode_decode([{<<"foo">>, <<"bar">>}]), [[{<<"foo">>, <<"bar">>}]] = safe_encode_decode([[{<<"foo">>, <<"bar">>}]]), - {ok, Json} = emqx_json:safe_encode(#{<<"foo">> => <<"bar">>}), - {ok, #{<<"foo">> := <<"bar">>}} = emqx_json:safe_decode(Json, [return_maps]). + {ok, Json} = emqx_utils_json:safe_encode(#{<<"foo">> => <<"bar">>}), + {ok, #{<<"foo">> := <<"bar">>}} = emqx_utils_json:safe_decode(Json, [return_maps]). safe_encode_decode(Term) -> - {ok, Json} = emqx_json:safe_encode(Term), - case emqx_json:safe_decode(Json) of + {ok, Json} = emqx_utils_json:safe_encode(Term), + case emqx_utils_json:safe_decode(Json, []) of {ok, {NTerm}} -> NTerm; {ok, NTerm} -> NTerm end. + +t_is_json(_) -> + ?assert(emqx_utils_json:is_json(<<"{}">>)), + ?assert(not emqx_utils_json:is_json(<<"foo">>)). diff --git a/apps/emqx/test/emqx_map_lib_tests.erl b/apps/emqx_utils/test/emqx_utils_maps_tests.erl similarity index 76% rename from apps/emqx/test/emqx_map_lib_tests.erl rename to apps/emqx_utils/test/emqx_utils_maps_tests.erl index 894811d7c..506851f0a 100644 --- a/apps/emqx/test/emqx_map_lib_tests.erl +++ b/apps/emqx_utils/test/emqx_utils_maps_tests.erl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_map_lib_tests). +-module(emqx_utils_maps_tests). -include_lib("eunit/include/eunit.hrl"). best_effort_recursive_sum_test_() -> @@ -22,21 +22,21 @@ best_effort_recursive_sum_test_() -> [ ?_assertEqual( #{foo => 3}, - emqx_map_lib:best_effort_recursive_sum(#{foo => 1}, #{foo => 2}, DummyLogger) + emqx_utils_maps:best_effort_recursive_sum(#{foo => 1}, #{foo => 2}, DummyLogger) ), ?_assertEqual( #{foo => 3, bar => 6.0}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => 1, bar => 2.0}, #{foo => 2, bar => 4.0}, DummyLogger ) ), ?_assertEqual( #{foo => 1, bar => 2}, - emqx_map_lib:best_effort_recursive_sum(#{foo => 1}, #{bar => 2}, DummyLogger) + emqx_utils_maps:best_effort_recursive_sum(#{foo => 1}, #{bar => 2}, DummyLogger) ), ?_assertEqual( #{foo => #{bar => 42}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => #{bar => 2}}, #{foo => #{bar => 40}}, DummyLogger ) ), @@ -45,7 +45,9 @@ best_effort_recursive_sum_test_() -> Logger = fun(What) -> Self ! {log, What} end, ?assertEqual( #{foo => 1, bar => 2}, - emqx_map_lib:best_effort_recursive_sum(#{foo => 1, bar => 2}, #{bar => bar}, Logger) + emqx_utils_maps:best_effort_recursive_sum( + #{foo => 1, bar => 2}, #{bar => bar}, Logger + ) ), receive {log, Log} -> @@ -55,55 +57,55 @@ best_effort_recursive_sum_test_() -> end, ?_assertEqual( #{}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => foo}, #{foo => bar}, DummyLogger ) ), ?_assertEqual( #{foo => 1}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => 1}, #{foo => bar}, DummyLogger ) ), ?_assertEqual( #{foo => 1}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => bar}, #{foo => 1}, DummyLogger ) ), ?_assertEqual( #{foo => #{bar => 1}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => #{bar => 1}}, #{foo => 1}, DummyLogger ) ), ?_assertEqual( #{foo => #{bar => 1}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => 1}, #{foo => #{bar => 1}}, DummyLogger ) ), ?_assertEqual( #{foo => #{bar => 1}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => 1, bar => ignored}, #{foo => #{bar => 1}}, DummyLogger ) ), ?_assertEqual( #{foo => #{bar => 2}, bar => #{foo => 1}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => 1, bar => #{foo => 1}}, #{foo => #{bar => 2}, bar => 2}, DummyLogger ) ), ?_assertEqual( #{foo => #{bar => 2}, bar => #{foo => 1}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => #{bar => 2}, bar => 2}, #{foo => 1, bar => #{foo => 1}}, DummyLogger ) ), ?_assertEqual( #{foo => #{bar => #{}}}, - emqx_map_lib:best_effort_recursive_sum( + emqx_utils_maps:best_effort_recursive_sum( #{foo => #{bar => #{foo => []}}}, #{foo => 1}, DummyLogger ) ) diff --git a/apps/emqx/test/props/prop_emqx_json.erl b/apps/emqx_utils/test/props/prop_emqx_utils_json.erl similarity index 97% rename from apps/emqx/test/props/prop_emqx_json.erl rename to apps/emqx_utils/test/props/prop_emqx_utils_json.erl index 2bc079634..0be1508da 100644 --- a/apps/emqx/test/props/prop_emqx_json.erl +++ b/apps/emqx_utils/test/props/prop_emqx_utils_json.erl @@ -14,10 +14,10 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(prop_emqx_json). +-module(prop_emqx_utils_json). -import( - emqx_json, + emqx_utils_json, [ decode/1, decode/2, @@ -66,7 +66,7 @@ prop_object_proplist_to_proplist() -> begin {ok, J} = safe_encode(T), {ok, T} = safe_decode(J), - T = decode(encode(T)), + T = decode(encode(T), []), true end ). @@ -108,7 +108,7 @@ prop_object_map_to_proplist() -> T = to_list(T0), {ok, J} = safe_encode(T0), {ok, T} = safe_decode(J), - T = decode(encode(T0)), + T = decode(encode(T0), []), true end ). diff --git a/bin/emqx b/bin/emqx index c5bf88149..07684af8e 100755 --- a/bin/emqx +++ b/bin/emqx @@ -76,6 +76,12 @@ logwarn() { fi } +logdebug() { + if [ "$DEBUG" -eq 1 ]; then + echo "DEBUG: $*" + fi +} + die() { set +x logerr "$1" @@ -85,7 +91,7 @@ die() { assert_node_alive() { if ! relx_nodetool "ping" > /dev/null; then - die "node_is_not_running!" 1 + exit 1 fi } @@ -153,13 +159,13 @@ usage() { echo "Print EMQX installation root dir" ;; eval) - echo "Evaluate an Erlang or Elixir expression in the EMQX node" + echo "Evaluate an Erlang expression in the EMQX node." ;; - eval-erl) - echo "Evaluate an Erlang expression in the EMQX node, even on Elixir node" + eval-ex) + echo "Evaluate an Elixir expression in the EMQX node. Only applies to Elixir node" ;; versions) - echo "List installed EMQX versions and their status" + echo "List installed EMQX release versions and their status" ;; unpack) echo "Usage: $REL_NAME unpack [VERSION]" @@ -217,12 +223,12 @@ usage() { echo " ctl: Administration commands, execute '$REL_NAME ctl help' for more details" echo '' echo "More:" - echo " Shell attach: remote_console | attach" - echo " Up/Down-grade: upgrade | downgrade | install | uninstall" - echo " Install info: ertspath | root_dir" - echo " Runtime info: pid | ping | versions" + echo " Shell attach: remote_console | attach" +# echo " Up/Down-grade: upgrade | downgrade | install | uninstall | versions" # TODO enable when supported + echo " Install Info: ertspath | root_dir" + echo " Runtime Status: pid | ping" echo " Validate Config: check_config" - echo " Advanced: console_clean | escript | rpc | rpcterms | eval | eval-erl" + echo " Advanced: console_clean | escript | rpc | rpcterms | eval | eval-ex" echo '' echo "Execute '$REL_NAME COMMAND help' for more information" ;; @@ -230,6 +236,7 @@ usage() { } COMMAND="${1:-}" +GREP='grep --color=never' if [ -z "$COMMAND" ]; then usage 'help' @@ -297,8 +304,10 @@ if [ "$ES" -ne 0 ]; then fi # Make sure log directory exists -mkdir -p "$RUNNER_LOG_DIR" +mkdir -p "$EMQX_LOG_DIR" +# turn off debug as this is static +set +x COMPATIBILITY_CHECK=' io:format("BEAM_OK~n", []), try @@ -321,56 +330,47 @@ COMPATIBILITY_CHECK=' end, halt(0). ' +[ "$DEBUG" -eq 1 ] && set -x compatiblity_info() { # RELEASE_LIB is used by Elixir # set crash-dump bytes to zero to ensure no crash dump is generated when erl crashes env ERL_CRASH_DUMP_BYTES=0 "$BINDIR/$PROGNAME" \ -noshell \ - -boot_var RELEASE_LIB "$ERTS_LIB_DIR/lib" \ -boot "$REL_DIR/start_clean" \ + -boot_var RELEASE_LIB "$ERTS_LIB_DIR/lib" \ -eval "$COMPATIBILITY_CHECK" } # Collect Erlang/OTP runtime sanity and compatibility in one go -if [ "$IS_BOOT_COMMAND" = 'yes' ]; then +maybe_use_portable_dynlibs() { # Read BUILD_INFO early as the next commands may mess up the shell BUILD_INFO="$(cat "${REL_DIR}/BUILD_INFO")" COMPATIBILITY_INFO="$(compatiblity_info 2>/dev/null || true)" - if ! (echo -e "$COMPATIBILITY_INFO" | grep -q 'CRYPTO_OK'); then + if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'CRYPTO_OK'); then ## failed to start, might be due to missing libs, try to be portable export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:-$DYNLIBS_DIR}" if [ "$LD_LIBRARY_PATH" != "$DYNLIBS_DIR" ]; then export LD_LIBRARY_PATH="$DYNLIBS_DIR:$LD_LIBRARY_PATH" fi ## Turn off debug, because COMPATIBILITY_INFO needs to capture stderr - set +x COMPATIBILITY_INFO="$(compatiblity_info 2>&1 || true)" - if ! (echo -e "$COMPATIBILITY_INFO" | grep -q 'BEAM_OK'); then + if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'BEAM_OK'); then ## not able to start beam.smp - set +x logerr "$COMPATIBILITY_INFO" logerr "Please ensure it is running on the correct platform:" logerr "$BUILD_INFO" logerr "Version=$REL_VSN" logerr "Required dependencies: openssl-1.1.1 (libcrypto), libncurses and libatomic1" exit 1 - elif ! (echo -e "$COMPATIBILITY_INFO" | grep -q 'CRYPTO_OK'); then + elif ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'CRYPTO_OK'); then ## not able to start crypto app - set +x logerr "$COMPATIBILITY_INFO" exit 2 fi - logerr "Using libs from '${DYNLIBS_DIR}' due to missing from the OS." + logwarn "Using libs from '${DYNLIBS_DIR}' due to missing from the OS." fi - [ "$DEBUG" -eq 1 ] && set -x -fi - -# Warn the user if ulimit -n is less than 1024 -ULIMIT_F=$(ulimit -n) -if [ "$ULIMIT_F" -lt 1024 ]; then - logwarn "ulimit -n is ${ULIMIT_F}; 1024 is the recommended minimum." -fi +} SED_REPLACE="sed -i " case $(sed --help 2>&1) in @@ -396,9 +396,7 @@ relx_get_pid() { remsh() { # Generate a unique id used to allow multiple remsh to the same node # transparently - id="remsh$(relx_gen_id)-${NAME}" - # Get the node's ticktime so that we use the same thing. - TICKTIME="$(relx_nodetool rpcterms net_kernel get_net_ticktime)" + id="remsh$(gen_node_id)-${NAME}" # shellcheck disable=SC2086 # Setup remote shell command to control node @@ -426,7 +424,7 @@ remsh() { } # Generate a random id -relx_gen_id() { +gen_node_id() { od -t u -N 4 /dev/urandom | head -n1 | awk '{print $2 % 1000}' } @@ -437,9 +435,8 @@ call_nodetool() { # Control a node relx_nodetool() { command="$1"; shift - ERL_FLAGS="${ERL_FLAGS:-} $EPMD_ARGS" \ - call_nodetool "$NAME_TYPE" "$NAME" \ - -setcookie "$COOKIE" "$command" "$@" + ERL_FLAGS="${ERL_FLAGS:-} $EPMD_ARGS -setcookie $COOKIE" \ + call_nodetool "$NAME_TYPE" "$NAME" "$command" "$@" } call_hocon() { @@ -447,15 +444,52 @@ call_hocon() { || die "call_hocon_failed: $*" $? } +find_emqx_process() { + ## Find the running node from 'ps -ef' + ## * The grep args like '[e]mqx' but not 'emqx' is to avoid greping the grep command itself + ## * The running 'remsh' and 'nodetool' processes must be excluded + if [ -n "${EMQX_NODE__NAME:-}" ]; then + # if node name is provided, filter by node name + # shellcheck disable=SC2009 + ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true + else + # shellcheck disable=SC2009 + ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true + fi +} + ## Resolve boot configs in a batch ## This is because starting the Erlang beam with all modules loaded ## and parsing HOCON config + environment variables is a non-trivial task -CONF_KEYS=( 'node.data_dir' 'node.name' 'node.cookie' 'node.db_backend' 'cluster.proto_dist' ) +CONF_KEYS=( 'node.data_dir' 'node.name' 'node.cookie' 'node.db_backend' 'cluster.proto_dist' 'node.dist_net_ticktime' ) if [ "$IS_ENTERPRISE" = 'yes' ]; then CONF_KEYS+=( 'license.key' ) fi +## To be backward compatible, read and then unset EMQX_NODE_NAME +if [ -n "${EMQX_NODE_NAME:-}" ]; then + export EMQX_NODE__NAME="${EMQX_NODE_NAME}" + unset EMQX_NODE_NAME +fi + +# Turn off debug as the ps output can be quite noisy +set +x + +PS_LINE="$(find_emqx_process)" +logdebug "PS_LINE=$PS_LINE" +RUNNING_NODES_COUNT="$(echo -e "$PS_LINE" | sed '/^\s*$/d' | wc -l)" +[ "$RUNNING_NODES_COUNT" -gt 1 ] && logdebug "More than one running node found: count=$RUNNING_NODES_COUNT" + if [ "$IS_BOOT_COMMAND" = 'yes' ]; then + if [ "$RUNNING_NODES_COUNT" -gt 0 ] && [ "$COMMAND" != 'check_config' ]; then + running_node_name=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' || true) + if [ -n "$running_node_name" ] && [ "$running_node_name" = "${EMQX_NODE__NAME:-}" ]; then + echo "Node ${running_node_name} is already running!" + exit 1 + fi + fi + [ -f "$EMQX_ETC_DIR"/emqx.conf ] || die "emqx.conf is not found in $EMQX_ETC_DIR" 1 + maybe_use_portable_dynlibs if [ "${EMQX_BOOT_CONFIGS:-}" = '' ]; then EMQX_BOOT_CONFIGS="$(call_hocon -s "$SCHEMA_MOD" -c "$EMQX_ETC_DIR"/emqx.conf multi_get "${CONF_KEYS[@]}")" ## export here so the 'console' command recursively called from @@ -463,37 +497,73 @@ if [ "$IS_BOOT_COMMAND" = 'yes' ]; then export EMQX_BOOT_CONFIGS fi else - # For non-boot commands, we try to get data_dir and ssl_dist_optfile from 'ps -ef' output - # shellcheck disable=SC2009 - PS_LINE="$(ps -ef | grep "\-[r]oot $RUNNER_ROOT_DIR" || true)" - if [ "$(echo -e "$PS_LINE" | wc -l)" -eq 1 ]; then - ## only one emqx node is running - ## strip 'emqx_data_dir ' and ' --' because the dir in between may contain spaces - DATA_DIR="$(echo -e "$PS_LINE" | grep -oE "\-emqx_data_dir.*" | sed -E 's#.+emqx_data_dir[[:blank:]]##g' | sed -E 's#[[:blank:]]--$##g' || true)" - if [ "$DATA_DIR" = '' ]; then - ## this should not happen unless -emqx_data_dir is not set - die "node_is_not_running!" 1 - fi - # get ssl_dist_optfile option - SSL_DIST_OPTFILE="$(echo -e "$PS_LINE" | grep -oE '\-ssl_dist_optfile\s.+\s' | awk '{print $2}' || true)" + # For non-boot commands, we need below runtime facts to connect to the running node: + # 1. The running node name; + # 2. The Erlang cookie in use by the running node name; + # 3. SSL options if the node is using TLS for Erlang distribution; + # 4. Erlang kernel application's net_ticktime config. + # + # There are 3 sources of truth to get those runtime information. + # Listed in the order of preference: + # 1. The boot command (which can be inspected from 'ps -ef' command output) + # 2. The generated vm.