chore: Sync master
This commit is contained in:
commit
b5443c2981
|
@ -1,9 +1,15 @@
|
|||
MYSQL_TAG=8
|
||||
REDIS_TAG=6
|
||||
REDIS_TAG=7.0
|
||||
MONGO_TAG=5
|
||||
PGSQL_TAG=13
|
||||
LDAP_TAG=2.4.50
|
||||
INFLUXDB_TAG=2.5.0
|
||||
TDENGINE_TAG=3.0.2.4
|
||||
DYNAMO_TAG=1.21.0
|
||||
CASSANDRA_TAG=3.11.6
|
||||
OPENTS_TAG=9aa7f88
|
||||
|
||||
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
|
||||
SQLSERVER_TAG=2019-CU19-ubuntu-20.04
|
||||
|
||||
TARGET=emqx/emqx
|
||||
EMQX_TAG=build-alpine-amd64
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
.PHONY: help up down ct ct-all bash run
|
||||
|
||||
define usage
|
||||
make -f .ci/docker-compose-file/Makefile.local up
|
||||
make -f .ci/docker-compose-file/Makefile.local ct CONTAINER=erlang SUITE=apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl
|
||||
make -f .ci/docker-compose-file/Makefile.local down
|
||||
endef
|
||||
export usage
|
||||
|
||||
help:
|
||||
@echo "$$usage"
|
||||
|
||||
up:
|
||||
env \
|
||||
MYSQL_TAG=8 \
|
||||
REDIS_TAG=6 \
|
||||
MONGO_TAG=5 \
|
||||
PGSQL_TAG=13 \
|
||||
docker compose \
|
||||
-f .ci/docker-compose-file/docker-compose.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-pgsql-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
|
||||
up -d --build --remove-orphans
|
||||
|
||||
down:
|
||||
docker compose \
|
||||
-f .ci/docker-compose-file/docker-compose.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-pgsql-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \
|
||||
-f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
|
||||
down --remove-orphans
|
||||
|
||||
ct:
|
||||
docker exec -i "$(CONTAINER)" bash -c "rebar3 ct --name 'test@127.0.0.1' -v --suite $(SUITE)"
|
||||
|
||||
ct-all:
|
||||
docker exec -i "$(CONTAINER)" bash -c "make ct"
|
||||
|
||||
bash:
|
||||
docker exec -it "$(CONTAINER)" bash
|
||||
|
||||
run:
|
||||
docker exec -it "$(CONTAINER)" bash -c "make run";
|
|
@ -0,0 +1,4 @@
|
|||
ARG CASSANDRA_TAG=3.11.6
|
||||
FROM cassandra:${CASSANDRA_TAG}
|
||||
COPY cassandra.yaml /etc/cassandra/cassandra.yaml
|
||||
CMD ["cassandra", "-f"]
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,23 @@
|
|||
Certificate and Key files for testing
|
||||
|
||||
## Cassandra (v3.x)
|
||||
|
||||
### How to convert server PEM to JKS Format
|
||||
|
||||
1. Convert server.crt and server.key to server.p12
|
||||
|
||||
```bash
|
||||
openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12 -name "certificate"
|
||||
```
|
||||
|
||||
2. Convert server.p12 to server.jks
|
||||
|
||||
```bash
|
||||
keytool -importkeystore -srckeystore server.p12 -srcstoretype pkcs12 -destkeystore server.jks
|
||||
```
|
||||
|
||||
### How to convert CA PEM certificate to truststore.jks
|
||||
|
||||
```
|
||||
keytool -import -file ca.pem -keystore truststore.jks
|
||||
```
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAzs74tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8Z
|
||||
OUiOuOjynKvsJeltdmc0L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoV
|
||||
JXgNT3ru56aZFa6Ov6NhfZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXM
|
||||
inMd1tsHL7xHLf3KjCbkusA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1
|
||||
P/T+W6WBldv0i2WlNbfiuAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/
|
||||
FBT5knNtmXTt78xBBlIPFas5BAJIeV4eADx9MwIDAQABAoIBAQCZTvcynpJuxIxn
|
||||
vmItjK5U/4wIBjZNIawQk6BoG7tR2JyJ/1jcjTw4OX/4wr450JRz7MfUJweD5hDb
|
||||
OTMtLLNXlG6+YR4vsIUEiSlvhy5srVH0jG5Wq2t6mxBVq7vaRd/OkshnuU79+Pq7
|
||||
iHqclS7GSACxYkXWyxE6wtPh5aTWP8joK/LvYFiOqKPilUnLZ4hBhmL7CRUCZ0ZA
|
||||
QGNyEhlmiAL+LNKW2RLXPBxlKX21X78ahUQmkkTM0lBK9x6hm4dD3SpLqmZyQQ9M
|
||||
UfiMbU6XOYlDva/USZzrvTDlRf9uCG9QOsZzngP1aIy8Cq3QHECOeMIPO9WQLMll
|
||||
SyY+SpyJAoGBAP4fhnbDpQC6ekd9TNoU9GE/FNNNGKLh82GDgnGcWU/oIzv8GlaR
|
||||
rkEHTb6aRoPpjTxWIjJpScs9kycC+7N3oNo9rub4s5UvllI+EgQ95+j/5fnZx6gO
|
||||
la8ousLy1hTYu9C0nTWdTV3YtfC0l0opn7Friv5QafNmhSn74DqrH0BHAoGBANBV
|
||||
/NhBDAH1PHzYA+XuNLYTLv56Q4osmoen17nPnFNWb1TtWblzb0yWp86GGDFcs8CZ
|
||||
eH0mXCRUzGMSWtOHe4CbIm2brAYXuL2t6+DZ1A22gsnW5avNrosZRS7eN7BE7DDj
|
||||
5cp9+Es9UWnArzJU7jSWwAtA6o47WHfHU/pqRB21AoGAGx6eKPqEF2nPNuXmV7e4
|
||||
xNAIluw5XtiiMpvoRdubpG1vpS0oWmi9oe73mwm30MgR7Ih8qciWuXvewmENH3/6
|
||||
yI+gpMGR2K/1aN166rz4jOMSVfGp3wN/cev00m0774mZsZI03M3mvccs031ST/XV
|
||||
Nwf1E2Ldi747I9nfeiNc+G0CgYEAslFHD1ntiyd6VGkYPQ978nPM/2dqs7OluILC
|
||||
tHmslfAfbpOQ/ph9JRK2IqDHyEhOWoWBiazxpO8n2Yx2TSNjZBpkh2h8/uIC7+cT
|
||||
Q+tuAya6H0ReZISx5sEEZC8zfx4fA2Gs53qWsN+U9W1FB1GGaWC2k2tG1+KXwD3N
|
||||
9UJLdxkCgYBB96dsfT7nXmy0JLUz0rQ4umBje6H5uvuaevWdVMEptHB+O7+6CAse
|
||||
OVwqlFLQ4QC7s4/P9FQwfr/0uMRInB1aC043Haa1LbiRcRIlSuBDUezK5xidUbz+
|
||||
uB/ABkwwEuqW3Ns1+QieJyyfoNYKZ2v0RtYxBuieKOpUCm3oNFZRWg==
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -0,0 +1,25 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIEMjCCAhoCFCOrAvLNRztbFFcN0zrCQXoj73cHMA0GCSqGSIb3DQEBCwUAMDQx
|
||||
EjAQBgNVBAoMCUVNUVggVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9y
|
||||
aXR5MB4XDTIzMDMxNzA5MzgzMVoXDTMzMDMxNDA5MzgzMVowdzELMAkGA1UEBhMC
|
||||
U0UxEjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYD
|
||||
VQQKDAlNeU9yZ05hbWUxGDAWBgNVBAsMD015U2VydmljZUNsaWVudDESMBAGA1UE
|
||||
AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzs74
|
||||
tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8ZOUiOuOjynKvsJeltdmc0
|
||||
L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoVJXgNT3ru56aZFa6Ov6Nh
|
||||
fZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXMinMd1tsHL7xHLf3KjCbk
|
||||
usA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1P/T+W6WBldv0i2WlNbfi
|
||||
uAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/FBT5knNtmXTt78xBBlIP
|
||||
Fas5BAJIeV4eADx9MwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQBHgfJgMjTgWZXG
|
||||
eyzIVxaqzWTLxrT7zPy09Mw4qsAl1TfWg9/r8nuskq4bjBQuKm0k9H0HQXz//eFC
|
||||
Qn85qTHyAmZok6c4ljO2P+kTIl3nkKk5zudmeCTy3W9YBdyWvDXQ/GhbywIfO+1Y
|
||||
fYA82I5rXVg4c9fUVTNczUFyDNcZzoJoqCS8jwFDtNR0N/fptJN14j8pnYvNV+4c
|
||||
hZ+pcnhSoz7dD8WjyYCc/QCajJdTyb15i072HxuGmhwltjnwIE/2xfeXCCeUTzsJ
|
||||
8h4/ABRu9VEqjqDQHepXIflYuVhU38SL0f4ly7neMXmytAbXwGLVM+ME81HG60Bw
|
||||
8hkfSwKBbEkhUmD6+V1bdUz14I6HjWJt/INtFU+O+MYZbIFt4ep9GKLV3nk97CyL
|
||||
fwDv5b4WXdC68iWMZqSrADAXr+VG3DgHqpNItj0XmhY6ihmt5tA3Z6IZJj45TShA
|
||||
vRqTCx3Hf6EO3zf4KCrzaPSSSfVLnGKftA/6oz3bl8EK2e2M44lOspRk4l9k+iBR
|
||||
sfHPmpiWY0hIiFtd3LD/uGDSBcGkKjU/fLvJZXJpVXwmT9pmK9LzkAPOK1rr97e9
|
||||
esHqwe1bo3z7IdeREZ0wdxqGL3BNpm4f1NaIzV/stX+vScau0AyFYXzumjeBIpKa
|
||||
Gt0A+dZnUfWG6qn5NiRENXxFQSppaA==
|
||||
-----END CERTIFICATE-----
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,678 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
NOTE: User and query level settings are set up in "users.xml" file.
|
||||
If you have accidentially specified user-level settings here, server won't start.
|
||||
You can either move the settings to the right place inside "users.xml" file
|
||||
or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
|
||||
-->
|
||||
<yandex>
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
|
||||
<!-- Per level overrides (legacy):
|
||||
|
||||
For example to suppress logging of the ConfigReloader you can use:
|
||||
NOTE: levels.logger is reserved, see below.
|
||||
-->
|
||||
<!--
|
||||
<levels>
|
||||
<ConfigReloader>none</ConfigReloader>
|
||||
</levels>
|
||||
-->
|
||||
|
||||
<!-- Per level overrides:
|
||||
|
||||
For example to suppress logging of the RBAC for default user you can use:
|
||||
(But please note that the logger name maybe changed from version to version, even after minor upgrade)
|
||||
-->
|
||||
<!--
|
||||
<levels>
|
||||
<logger>
|
||||
<name>ContextAccess (default)</name>
|
||||
<level>none</level>
|
||||
</logger>
|
||||
<logger>
|
||||
<name>DatabaseOrdinary (test)</name>
|
||||
<level>none</level>
|
||||
</logger>
|
||||
</levels>
|
||||
-->
|
||||
</logger>
|
||||
|
||||
<send_crash_reports>
|
||||
<!-- Changing <enabled> to true allows sending crash reports to -->
|
||||
<!-- the ClickHouse core developers team via Sentry https://sentry.io -->
|
||||
<!-- Doing so at least in pre-production environments is highly appreciated -->
|
||||
<enabled>false</enabled>
|
||||
<!-- Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report -->
|
||||
<anonymize>false</anonymize>
|
||||
<!-- Default endpoint should be changed to different Sentry DSN only if you have -->
|
||||
<!-- some in-house engineers or hired consultants who're going to debug ClickHouse issues for you -->
|
||||
<endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint>
|
||||
</send_crash_reports>
|
||||
|
||||
<!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
<mysql_port>9004</mysql_port>
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
-->
|
||||
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source and secured Zookeeper communication -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<!-- <listen_host>::</listen_host> -->
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!--
|
||||
<listen_host>::1</listen_host>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
-->
|
||||
<!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
|
||||
<!-- <listen_try>0</listen_try> -->
|
||||
|
||||
<!-- Allow listen on same address:port -->
|
||||
<!-- <listen_reuse_port>0</listen_reuse_port> -->
|
||||
|
||||
<!-- <listen_backlog>64</listen_backlog> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Maximum memory usage (resident set size) for server process.
|
||||
Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
|
||||
If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
|
||||
|
||||
The constraint is checked on query execution time.
|
||||
If a query tries to allocate memory and the current memory usage plus allocation is greater
|
||||
than specified threshold, exception will be thrown.
|
||||
|
||||
It is not practical to set this constraint to small values like just a few gigabytes,
|
||||
because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
|
||||
-->
|
||||
<max_server_memory_usage>0</max_server_memory_usage>
|
||||
|
||||
<!-- Maximum number of threads in the Global thread pool.
|
||||
This will default to a maximum of 10000 threads if not specified.
|
||||
This setting will be useful in scenarios where there are a large number
|
||||
of distributed queries that are running concurrently but are idling most
|
||||
of the time, in which case a higher number of threads might be required.
|
||||
-->
|
||||
|
||||
<max_thread_pool_size>10000</max_thread_pool_size>
|
||||
|
||||
<!-- On memory constrained environments you may have to set this to value larger than 1.
|
||||
-->
|
||||
<max_server_memory_usage_to_ram_ratio>10</max_server_memory_usage_to_ram_ratio>
|
||||
|
||||
<!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
|
||||
Data will be stored in system.trace_log table with query_id = empty string.
|
||||
Zero means disabled.
|
||||
-->
|
||||
<total_memory_profiler_step>4194304</total_memory_profiler_step>
|
||||
|
||||
<!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
|
||||
The probability is for every alloc/free regardless to the size of the allocation.
|
||||
Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
|
||||
which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
|
||||
You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
|
||||
-->
|
||||
<total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Policy from the <storage_configuration> for the temporary files.
|
||||
If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
|
||||
|
||||
Notes:
|
||||
- move_factor is ignored
|
||||
- keep_free_space_bytes is ignored
|
||||
- max_data_part_size_bytes is ignored
|
||||
- you must have exactly one volume in that policy
|
||||
-->
|
||||
<!-- <tmp_policy>tmp</tmp_policy> -->
|
||||
|
||||
<!-- Directory with user provided files that are accessible by 'file' table function. -->
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
|
||||
<!-- Path to folder where users and roles created by SQL commands are stored. -->
|
||||
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>/etc/clickhouse-server/users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
|
||||
<!-- <system_profile>default</system_profile> -->
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Perform mlockall after startup to lower first queries latency
|
||||
and to prevent clickhouse executable from being paged out under high IO load.
|
||||
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
|
||||
-->
|
||||
<mlock_executable>true</mlock_executable>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.tech/docs/en/operations/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers incl="clickhouse_remote_servers" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
||||
<!-- <internal_replication>false</internal_replication> -->
|
||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
||||
<!-- <weight>1</weight> -->
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
<test_cluster_two_shards_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_localhost>
|
||||
<test_cluster_two_shards>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards>
|
||||
<test_shard_localhost_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost_secure>
|
||||
<test_unavailable_shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>1</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||
If this section is not present in configuration, all hosts are allowed.
|
||||
-->
|
||||
<remote_url_allow_hosts>
|
||||
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
||||
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
|
||||
If port is explicitly specified in URL, the host:port is checked as a whole.
|
||||
If host specified here without port, any port with this host allowed.
|
||||
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
|
||||
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
||||
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
||||
-->
|
||||
|
||||
<!-- Regular expression can be specified. RE2 engine is used for regexps.
|
||||
Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
|
||||
(forgetting to do so is a common source of error).
|
||||
-->
|
||||
</remote_url_allow_hosts>
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/
|
||||
-->
|
||||
|
||||
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true</hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<events_cumulative>false</events_cumulative>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<events_cumulative>false</events_cumulative>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
<!-- Serve endpoint fot Prometheus monitoring. -->
|
||||
<!--
|
||||
endpoint - mertics path (relative to root, statring with "/")
|
||||
port - port to setup server. If not defined or 0 than http_port used
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<!--
|
||||
PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
|
||||
Example:
|
||||
event_date
|
||||
toMonday(event_date)
|
||||
toYYYYMM(event_date)
|
||||
toStartOfHour(event_time)
|
||||
-->
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
|
||||
<!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
|
||||
Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
<!-- Trace log. Stores stack traces collected by query profilers.
|
||||
See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
|
||||
<trace_log>
|
||||
<database>system</database>
|
||||
<table>trace_log</table>
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</trace_log>
|
||||
|
||||
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||
Used only for queries with setting log_query_threads = 1. -->
|
||||
<query_thread_log>
|
||||
<database>system</database>
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_thread_log>
|
||||
|
||||
<!-- Uncomment if use part log.
|
||||
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||
The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
|
||||
<text_log>
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<level></level>
|
||||
</text_log>
|
||||
-->
|
||||
|
||||
<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
|
||||
<!--
|
||||
Asynchronous metric log contains values of metrics from
|
||||
system.asynchronous_metrics.
|
||||
-->
|
||||
<asynchronous_metric_log>
|
||||
<database>system</database>
|
||||
<table>asynchronous_metric_log</table>
|
||||
<!--
|
||||
Asynchronous metrics are updated once a minute, so there is
|
||||
no need to flush more often.
|
||||
-->
|
||||
<flush_interval_milliseconds>60000</flush_interval_milliseconds>
|
||||
</asynchronous_metric_log>
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/docs/en/dicts/external_dicts/
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="clickhouse_compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
|
||||
<!-- Settings from this profile will be used to execute DDL queries -->
|
||||
<!-- <profile>default</profile> -->
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
|
||||
The same for max_partition_size_to_drop.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup_example>
|
||||
<pattern>
|
||||
<regexp>click_cost</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>max</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup_example>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
|
||||
<!-- Uncomment to use query masking rules.
|
||||
name - name for the rule (optional)
|
||||
regexp - RE2 compatible regular expression (mandatory)
|
||||
replace - substitution string for sensitive data (optional, by default - six asterisks)
|
||||
<query_masking_rules>
|
||||
<rule>
|
||||
<name>hide SSN</name>
|
||||
<regexp>\b\d{3}-\d{2}-\d{4}\b</regexp>
|
||||
<replace>000-00-0000</replace>
|
||||
</rule>
|
||||
</query_masking_rules>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to use custom http handlers.
|
||||
rules are checked from top to bottom, first match runs the handler
|
||||
url - to match request URL, you can use 'regex:' prefix to use regex match(optional)
|
||||
methods - to match request method, you can use commas to separate multiple method matches(optional)
|
||||
headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional)
|
||||
handler is request handler
|
||||
type - supported types: static, dynamic_query_handler, predefined_query_handler
|
||||
query - use with predefined_query_handler type, executes query when the handler is called
|
||||
query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
|
||||
status - use with static type, response status code
|
||||
content_type - use with static type, response content-type
|
||||
response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
|
||||
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url>/</url>
|
||||
<methods>POST,GET</methods>
|
||||
<headers><pragma>no-cache</pragma></headers>
|
||||
<handler>
|
||||
<type>dynamic_query_handler</type>
|
||||
<query_param_name>query</query_param_name>
|
||||
</handler>
|
||||
</rule>
|
||||
|
||||
<rule>
|
||||
<url>/predefined_query</url>
|
||||
<methods>POST,GET</methods>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT * FROM system.settings</query>
|
||||
</handler>
|
||||
</rule>
|
||||
|
||||
<rule>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<status>200</status>
|
||||
<content_type>text/plain; charset=UTF-8</content_type>
|
||||
<response_content>config://http_server_default_response</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
||||
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
||||
</yandex>
|
|
@ -0,0 +1,110 @@
|
|||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password>public</password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.yandex.ru.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
|
@ -0,0 +1,32 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
cassandra_server:
|
||||
container_name: cassandra
|
||||
build:
|
||||
context: ./cassandra
|
||||
args:
|
||||
CASSANDRA_TAG: ${CASSANDRA_TAG}
|
||||
image: emqx-cassandra
|
||||
restart: always
|
||||
environment:
|
||||
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
|
||||
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
|
||||
HEAP_NEWSIZE: "128M"
|
||||
MAX_HEAP_SIZE: "2048M"
|
||||
volumes:
|
||||
- ./certs:/certs
|
||||
#ports:
|
||||
# - "9042:9042"
|
||||
# - "9142:9142"
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
/opt/cassandra/bin/cassandra -f -R > /cassandra.log &
|
||||
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"
|
||||
while [[ $$? -ne 0 ]];do sleep 5; /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"; done
|
||||
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "describe keyspaces;"
|
||||
tail -f /cassandra.log
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,16 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
container_name: clickhouse
|
||||
image: clickhouse/clickhouse-server:23.1.2.9-alpine
|
||||
restart: always
|
||||
volumes:
|
||||
- ./clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./clickhouse/config.xml:/etc/clickhouse-server/config.d/config.xml
|
||||
expose:
|
||||
- "8123"
|
||||
ports:
|
||||
- "8123:8123"
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,15 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
dynamodb-local:
|
||||
container_name: dynamo
|
||||
image: amazon/dynamodb-local:${DYNAMO_TAG}
|
||||
restart: always
|
||||
ports:
|
||||
- "8000:8000"
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: root
|
||||
AWS_SECRET_ACCESS_KEY: public
|
||||
AWS_DEFAULT_REGION: us-west-2
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,31 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
iotdb:
|
||||
container_name: iotdb
|
||||
hostname: iotdb
|
||||
image: apache/iotdb:1.1.0-standalone
|
||||
restart: always
|
||||
environment:
|
||||
- enable_rest_service=true
|
||||
- cn_internal_address=iotdb
|
||||
- cn_internal_port=10710
|
||||
- cn_consensus_port=10720
|
||||
- cn_target_config_node_list=iotdb:10710
|
||||
- dn_rpc_address=iotdb
|
||||
- dn_internal_address=iotdb
|
||||
- dn_rpc_port=6667
|
||||
- dn_mpp_data_exchange_port=10740
|
||||
- dn_schema_region_consensus_port=10750
|
||||
- dn_data_region_consensus_port=10760
|
||||
- dn_target_config_node_list=iotdb:10710
|
||||
# volumes:
|
||||
# - ./data:/iotdb/data
|
||||
# - ./logs:/iotdb/logs
|
||||
expose:
|
||||
- "18080"
|
||||
# IoTDB's REST interface, uncomment for local testing
|
||||
# ports:
|
||||
# - "18080:18080"
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -2,7 +2,7 @@ version: '3.9'
|
|||
|
||||
services:
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper
|
||||
image: docker.io/library/zookeeper:3.6
|
||||
ports:
|
||||
- "2181:2181"
|
||||
container_name: zookeeper
|
||||
|
@ -10,51 +10,57 @@ services:
|
|||
networks:
|
||||
emqx_bridge:
|
||||
ssl_cert_gen:
|
||||
image: fredrikhgrelland/alpine-jdk11-openssl
|
||||
# see https://github.com/emqx/docker-images
|
||||
image: ghcr.io/emqx/certgen:latest
|
||||
container_name: ssl_cert_gen
|
||||
user: "${DOCKER_USER:-root}"
|
||||
volumes:
|
||||
- emqx-shared-secret:/var/lib/secret
|
||||
- ./kafka/generate-certs.sh:/bin/generate-certs.sh
|
||||
entrypoint: /bin/sh
|
||||
command: /bin/generate-certs.sh
|
||||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
kdc:
|
||||
hostname: kdc.emqx.net
|
||||
image: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04
|
||||
image: ghcr.io/emqx/emqx-builder/5.0-33:1.13.4-24.3.4.2-3-ubuntu20.04
|
||||
container_name: kdc.emqx.net
|
||||
expose:
|
||||
- 88 # kdc
|
||||
- 749 # admin server
|
||||
# ports:
|
||||
# - 88:88
|
||||
# - 749:749
|
||||
networks:
|
||||
emqx_bridge:
|
||||
volumes:
|
||||
- emqx-shared-secret:/var/lib/secret
|
||||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
|
||||
- ./kerberos/krb5.conf:/etc/krb5.conf
|
||||
- ./kerberos/run.sh:/usr/bin/run.sh
|
||||
command: run.sh
|
||||
kafka_1:
|
||||
image: wurstmeister/kafka:2.13-2.7.0
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9093:9093"
|
||||
- "9094:9094"
|
||||
- "9095:9095"
|
||||
image: wurstmeister/kafka:2.13-2.8.1
|
||||
# ports:
|
||||
# - "9192-9195:9192-9195"
|
||||
container_name: kafka-1.emqx.net
|
||||
hostname: kafka-1.emqx.net
|
||||
depends_on:
|
||||
- "kdc"
|
||||
- "zookeeper"
|
||||
- "ssl_cert_gen"
|
||||
kdc:
|
||||
condition: service_started
|
||||
zookeeper:
|
||||
condition: service_started
|
||||
ssl_cert_gen:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL
|
||||
KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095,LOCAL_PLAINTEXT://:9192,LOCAL_SASL_PLAINTEXT://:9193,LOCAL_SSL://:9194,LOCAL_SASL_SSL://:9195,TOXIPROXY_PLAINTEXT://:9292,TOXIPROXY_SASL_PLAINTEXT://:9293,TOXIPROXY_SSL://:9294,TOXIPROXY_SASL_SSL://:9295
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095,LOCAL_PLAINTEXT://localhost:9192,LOCAL_SASL_PLAINTEXT://localhost:9193,LOCAL_SSL://localhost:9194,LOCAL_SASL_SSL://localhost:9195,TOXIPROXY_PLAINTEXT://toxiproxy.emqx.net:9292,TOXIPROXY_SASL_PLAINTEXT://toxiproxy.emqx.net:9293,TOXIPROXY_SSL://toxiproxy.emqx.net:9294,TOXIPROXY_SASL_SSL://toxiproxy.emqx.net:9295
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL,LOCAL_PLAINTEXT:PLAINTEXT,LOCAL_SASL_PLAINTEXT:SASL_PLAINTEXT,LOCAL_SSL:SSL,LOCAL_SASL_SSL:SASL_SSL,TOXIPROXY_PLAINTEXT:PLAINTEXT,TOXIPROXY_SASL_PLAINTEXT:SASL_PLAINTEXT,TOXIPROXY_SSL:SSL,TOXIPROXY_SASL_SSL:SASL_SSL
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI
|
||||
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
|
||||
KAFKA_JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf"
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf"
|
||||
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
|
||||
KAFKA_CREATE_TOPICS_NG: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1,
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
|
||||
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
|
||||
KAFKA_SSL_TRUSTSTORE_LOCATION: /var/lib/secret/kafka.truststore.jks
|
||||
KAFKA_SSL_TRUSTSTORE_PASSWORD: password
|
||||
|
@ -64,7 +70,7 @@ services:
|
|||
networks:
|
||||
emqx_bridge:
|
||||
volumes:
|
||||
- emqx-shared-secret:/var/lib/secret
|
||||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
- ./kafka/jaas.conf:/etc/kafka/jaas.conf
|
||||
- ./kafka/kafka-entrypoint.sh:/bin/kafka-entrypoint.sh
|
||||
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
|
||||
|
|
|
@ -13,10 +13,12 @@ services:
|
|||
networks:
|
||||
- emqx_bridge
|
||||
command:
|
||||
--bind-address "::"
|
||||
--character-set-server=utf8mb4
|
||||
--collation-server=utf8mb4_general_ci
|
||||
--explicit_defaults_for_timestamp=true
|
||||
--lower_case_table_names=1
|
||||
--max_allowed_packet=128M
|
||||
--skip-symbolic-links
|
||||
- --bind-address=0.0.0.0
|
||||
- --character-set-server=utf8mb4
|
||||
- --collation-server=utf8mb4_general_ci
|
||||
- --lower-case-table-names=1
|
||||
- --max-allowed-packet=128M
|
||||
# Severely limit maximum number of prepared statements the server must permit
|
||||
# so that we hit potential resource exhaustion earlier in tests.
|
||||
- --max-prepared-stmt-count=64
|
||||
- --skip-symbolic-links
|
||||
|
|
|
@ -23,9 +23,11 @@ services:
|
|||
- --port=3306
|
||||
- --character-set-server=utf8mb4
|
||||
- --collation-server=utf8mb4_general_ci
|
||||
- --explicit_defaults_for_timestamp=true
|
||||
- --lower_case_table_names=1
|
||||
- --max_allowed_packet=128M
|
||||
- --lower-case-table-names=1
|
||||
- --max-allowed-packet=128M
|
||||
# Severely limit maximum number of prepared statements the server must permit
|
||||
# so that we hit potential resource exhaustion earlier in tests.
|
||||
- --max-prepared-stmt-count=64
|
||||
- --ssl-ca=/etc/certs/ca-cert.pem
|
||||
- --ssl-cert=/etc/certs/server-cert.pem
|
||||
- --ssl-key=/etc/certs/server-key.pem
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
opents_server:
|
||||
container_name: opents
|
||||
image: petergrace/opentsdb-docker:${OPENTS_TAG}
|
||||
restart: always
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,11 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
oracle_server:
|
||||
container_name: oracle
|
||||
image: oracleinanutshell/oracle-xe-11g:1.0.0
|
||||
restart: always
|
||||
environment:
|
||||
ORACLE_DISABLE_ASYNCH_IO: true
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,32 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
pulsar:
|
||||
container_name: pulsar
|
||||
image: apachepulsar/pulsar:2.11.0
|
||||
# ports:
|
||||
# - 6650:6650
|
||||
# - 8080:8080
|
||||
networks:
|
||||
emqx_bridge:
|
||||
volumes:
|
||||
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
|
||||
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
|
||||
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
|
||||
restart: always
|
||||
command:
|
||||
- bash
|
||||
- "-c"
|
||||
- |
|
||||
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
|
||||
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
|
||||
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
|
||||
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
|
||||
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
|
||||
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
|
||||
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
|
||||
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
|
||||
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
|
||||
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
|
||||
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
|
||||
bin/pulsar standalone -nfw -nss
|
|
@ -0,0 +1,17 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
rabbitmq:
|
||||
container_name: rabbitmq
|
||||
image: rabbitmq:3.11-management
|
||||
|
||||
restart: always
|
||||
expose:
|
||||
- "15672"
|
||||
- "5672"
|
||||
# We don't want to take ports from the host
|
||||
# ports:
|
||||
# - "15672:15672"
|
||||
# - "5672:5672"
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -1,11 +1,57 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
redis_cluster:
|
||||
|
||||
redis-cluster-1: &redis-node
|
||||
container_name: redis-cluster-1
|
||||
image: redis:${REDIS_TAG}
|
||||
container_name: redis-cluster
|
||||
volumes:
|
||||
- ./redis/:/data/conf
|
||||
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log"
|
||||
- ./redis/cluster-tcp:/usr/local/etc/redis
|
||||
command: redis-server /usr/local/etc/redis/redis.conf
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
|
||||
redis-cluster-2:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-2
|
||||
|
||||
redis-cluster-3:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-3
|
||||
|
||||
redis-cluster-4:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-4
|
||||
|
||||
redis-cluster-5:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-5
|
||||
|
||||
redis-cluster-6:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-6
|
||||
|
||||
redis-cluster-create:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-create
|
||||
command: >
|
||||
redis-cli
|
||||
--cluster create
|
||||
redis-cluster-1:6379
|
||||
redis-cluster-2:6379
|
||||
redis-cluster-3:6379
|
||||
redis-cluster-4:6379
|
||||
redis-cluster-5:6379
|
||||
redis-cluster-6:6379
|
||||
--cluster-replicas 1
|
||||
--cluster-yes
|
||||
--pass "public"
|
||||
--no-auth-warning
|
||||
depends_on:
|
||||
- redis-cluster-1
|
||||
- redis-cluster-2
|
||||
- redis-cluster-3
|
||||
- redis-cluster-4
|
||||
- redis-cluster-5
|
||||
- redis-cluster-6
|
||||
|
||||
|
|
|
@ -1,14 +1,59 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
redis_cluster_tls:
|
||||
container_name: redis-cluster-tls
|
||||
|
||||
redis-cluster-tls-1: &redis-node
|
||||
container_name: redis-cluster-tls-1
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
|
||||
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
|
||||
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
|
||||
- ./redis/:/data/conf
|
||||
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster --tls-enabled && tail -f /var/log/redis-server.log"
|
||||
- ./redis/cluster-tls:/usr/local/etc/redis
|
||||
- ../../apps/emqx/etc/certs:/etc/certs
|
||||
command: redis-server /usr/local/etc/redis/redis.conf
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
redis-cluster-tls-2:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-tls-2
|
||||
|
||||
redis-cluster-tls-3:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-tls-3
|
||||
|
||||
redis-cluster-tls-4:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-tls-4
|
||||
|
||||
redis-cluster-tls-5:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-tls-5
|
||||
|
||||
redis-cluster-tls-6:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-tls-6
|
||||
|
||||
redis-cluster-tls-create:
|
||||
<<: *redis-node
|
||||
container_name: redis-cluster-tls-create
|
||||
command: >
|
||||
redis-cli
|
||||
--cluster create
|
||||
redis-cluster-tls-1:6389
|
||||
redis-cluster-tls-2:6389
|
||||
redis-cluster-tls-3:6389
|
||||
redis-cluster-tls-4:6389
|
||||
redis-cluster-tls-5:6389
|
||||
redis-cluster-tls-6:6389
|
||||
--cluster-replicas 1
|
||||
--cluster-yes
|
||||
--pass "public"
|
||||
--no-auth-warning
|
||||
--tls
|
||||
--insecure
|
||||
depends_on:
|
||||
- redis-cluster-tls-1
|
||||
- redis-cluster-tls-2
|
||||
- redis-cluster-tls-3
|
||||
- redis-cluster-tls-4
|
||||
- redis-cluster-tls-5
|
||||
- redis-cluster-tls-6
|
||||
|
||||
|
|
|
@ -1,11 +1,41 @@
|
|||
version: '3.9'
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
redis_sentinel_server:
|
||||
|
||||
redis-sentinel-master:
|
||||
container_name: redis-sentinel-master
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ./redis/sentinel-tcp:/usr/local/etc/redis
|
||||
command: redis-server /usr/local/etc/redis/master.conf
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
redis-sentinel-slave:
|
||||
container_name: redis-sentinel-slave
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ./redis/sentinel-tcp:/usr/local/etc/redis
|
||||
command: redis-server /usr/local/etc/redis/slave.conf
|
||||
networks:
|
||||
- emqx_bridge
|
||||
depends_on:
|
||||
- redis-sentinel-master
|
||||
|
||||
redis-sentinel:
|
||||
container_name: redis-sentinel
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ./redis/:/data/conf
|
||||
command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel && tail -f /var/log/redis-server.log"
|
||||
- ./redis/sentinel-tcp/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
|
||||
depends_on:
|
||||
- redis-sentinel-master
|
||||
- redis-sentinel-slave
|
||||
command: >
|
||||
bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf &&
|
||||
redis-sentinel /usr/local/etc/redis/sentinel.conf"
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,14 +1,44 @@
|
|||
version: '3.9'
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
redis_sentinel_server_tls:
|
||||
|
||||
redis-sentinel-tls-master:
|
||||
container_name: redis-sentinel-tls-master
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ./redis/sentinel-tls:/usr/local/etc/redis
|
||||
- ../../apps/emqx/etc/certs:/etc/certs
|
||||
command: redis-server /usr/local/etc/redis/master.conf
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
redis-sentinel-tls-slave:
|
||||
container_name: redis-sentinel-tls-slave
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ./redis/sentinel-tls:/usr/local/etc/redis
|
||||
- ../../apps/emqx/etc/certs:/etc/certs
|
||||
command: redis-server /usr/local/etc/redis/slave.conf
|
||||
networks:
|
||||
- emqx_bridge
|
||||
depends_on:
|
||||
- redis-sentinel-tls-master
|
||||
|
||||
redis-sentinel-tls:
|
||||
container_name: redis-sentinel-tls
|
||||
image: redis:${REDIS_TAG}
|
||||
volumes:
|
||||
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
|
||||
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
|
||||
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
|
||||
- ./redis/:/data/conf
|
||||
command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel --tls-enabled && tail -f /var/log/redis-server.log"
|
||||
- ./redis/sentinel-tls/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
|
||||
- ../../apps/emqx/etc/certs:/etc/certs
|
||||
depends_on:
|
||||
- redis-sentinel-tls-master
|
||||
- redis-sentinel-tls-slave
|
||||
command: >
|
||||
bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf &&
|
||||
redis-sentinel /usr/local/etc/redis/sentinel.conf"
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
mqnamesrv:
|
||||
image: apache/rocketmq:4.9.4
|
||||
container_name: rocketmq_namesrv
|
||||
# ports:
|
||||
# - 9876:9876
|
||||
volumes:
|
||||
- ./rocketmq/logs:/opt/logs
|
||||
- ./rocketmq/store:/opt/store
|
||||
command: ./mqnamesrv
|
||||
networks:
|
||||
- emqx_bridge
|
||||
|
||||
mqbroker:
|
||||
image: apache/rocketmq:4.9.4
|
||||
container_name: rocketmq_broker
|
||||
# ports:
|
||||
# - 10909:10909
|
||||
# - 10911:10911
|
||||
volumes:
|
||||
- ./rocketmq/logs:/opt/logs
|
||||
- ./rocketmq/store:/opt/store
|
||||
- ./rocketmq/conf/broker.conf:/etc/rocketmq/broker.conf
|
||||
environment:
|
||||
NAMESRV_ADDR: "rocketmq_namesrv:9876"
|
||||
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
|
||||
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m"
|
||||
command: ./mqbroker -c /etc/rocketmq/broker.conf
|
||||
depends_on:
|
||||
- mqnamesrv
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,19 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
sql_server:
|
||||
container_name: sqlserver
|
||||
# See also:
|
||||
# https://mcr.microsoft.com/en-us/product/mssql/server/about
|
||||
# https://hub.docker.com/_/microsoft-mssql-server
|
||||
image: ${MS_IMAGE_ADDR}:${SQLSERVER_TAG}
|
||||
environment:
|
||||
# See also:
|
||||
# https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-environment-variables
|
||||
ACCEPT_EULA: "Y"
|
||||
MSSQL_SA_PASSWORD: "mqtt_public1"
|
||||
restart: always
|
||||
# ports:
|
||||
# - "1433:1433"
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -0,0 +1,11 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
tdengine_server:
|
||||
container_name: tdengine
|
||||
image: tdengine/tdengine:${TDENGINE_TAG}
|
||||
restart: always
|
||||
ports:
|
||||
- "6041:6041"
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -6,15 +6,28 @@ services:
|
|||
image: ghcr.io/shopify/toxiproxy:2.5.0
|
||||
restart: always
|
||||
networks:
|
||||
- emqx_bridge
|
||||
emqx_bridge:
|
||||
aliases:
|
||||
- toxiproxy
|
||||
- toxiproxy.emqx.net
|
||||
volumes:
|
||||
- "./toxiproxy.json:/config/toxiproxy.json"
|
||||
ports:
|
||||
- 8474:8474
|
||||
- 8086:8086
|
||||
- 8087:8087
|
||||
- 11433:1433
|
||||
- 13306:3306
|
||||
- 13307:3307
|
||||
- 15432:5432
|
||||
- 15433:5433
|
||||
- 16041:6041
|
||||
- 18000:8000
|
||||
- 19876:9876
|
||||
- 19042:9042
|
||||
- 19142:9142
|
||||
- 14242:4242
|
||||
- 28080:18080
|
||||
command:
|
||||
- "-host=0.0.0.0"
|
||||
- "-config=/config/toxiproxy.json"
|
||||
|
|
|
@ -3,7 +3,7 @@ version: '3.9'
|
|||
services:
|
||||
erlang:
|
||||
container_name: erlang
|
||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04}
|
||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-33:1.13.4-24.3.4.2-3-ubuntu20.04}
|
||||
env_file:
|
||||
- conf.env
|
||||
environment:
|
||||
|
@ -16,14 +16,18 @@ services:
|
|||
GITHUB_REF: ${GITHUB_REF}
|
||||
networks:
|
||||
- emqx_bridge
|
||||
ports:
|
||||
- 28083:18083
|
||||
- 2883:1883
|
||||
volumes:
|
||||
- ../..:/emqx
|
||||
- emqx-shared-secret:/var/lib/secret
|
||||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
|
||||
- ./kerberos/krb5.conf:/etc/krb5.conf
|
||||
# - ./odbc/odbcinst.ini:/etc/odbcinst.ini
|
||||
working_dir: /emqx
|
||||
tty: true
|
||||
user: "${UID_GID}"
|
||||
user: "${DOCKER_USER:-root}"
|
||||
|
||||
networks:
|
||||
emqx_bridge:
|
||||
|
@ -37,6 +41,3 @@ networks:
|
|||
gateway: 172.100.239.1
|
||||
- subnet: 2001:3200:3200::/64
|
||||
gateway: 2001:3200:3200::1
|
||||
|
||||
volumes: # add this section
|
||||
emqx-shared-secret: # does not need anything underneath this
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
#!/usr/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
set -x
|
||||
|
||||
# Source https://github.com/zmstone/docker-kafka/blob/master/generate-certs.sh
|
||||
|
||||
HOST="*."
|
||||
DAYS=3650
|
||||
PASS="password"
|
||||
|
||||
cd /var/lib/secret/
|
||||
|
||||
# Delete old files
|
||||
(rm ca.key ca.crt server.key server.csr server.crt client.key client.csr client.crt server.p12 kafka.keystore.jks kafka.truststore.jks 2>/dev/null || true)
|
||||
|
||||
ls
|
||||
|
||||
echo '== Generate self-signed server and client certificates'
|
||||
echo '= generate CA'
|
||||
openssl req -new -x509 -keyout ca.key -out ca.crt -days $DAYS -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST"
|
||||
|
||||
echo '= generate server certificate request'
|
||||
openssl req -newkey rsa:2048 -sha256 -keyout server.key -out server.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST"
|
||||
|
||||
echo '= sign server certificate'
|
||||
openssl x509 -req -CA ca.crt -CAkey ca.key -in server.csr -out server.crt -days "$DAYS" -CAcreateserial
|
||||
|
||||
echo '= generate client certificate request'
|
||||
openssl req -newkey rsa:2048 -sha256 -keyout client.key -out client.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST"
|
||||
|
||||
echo '== sign client certificate'
|
||||
openssl x509 -req -CA ca.crt -CAkey ca.key -in client.csr -out client.crt -days $DAYS -CAserial ca.srl
|
||||
|
||||
echo '= Convert self-signed certificate to PKCS#12 format'
|
||||
openssl pkcs12 -export -name "$HOST" -in server.crt -inkey server.key -out server.p12 -CAfile ca.crt -passout pass:"$PASS"
|
||||
|
||||
echo '= Import PKCS#12 into a java keystore'
|
||||
|
||||
echo $PASS | keytool -importkeystore -destkeystore kafka.keystore.jks -srckeystore server.p12 -srcstoretype pkcs12 -alias "$HOST" -storepass "$PASS"
|
||||
|
||||
|
||||
echo '= Import CA into java truststore'
|
||||
|
||||
echo yes | keytool -keystore kafka.truststore.jks -alias CARoot -import -file ca.crt -storepass "$PASS"
|
|
@ -17,6 +17,7 @@ timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.keytab ]; do sleep 1;
|
|||
echo "+++++++ Wait until SSL certs are generated ++++++++"
|
||||
|
||||
timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.truststore.jks ]; do sleep 1; done'
|
||||
keytool -list -v -keystore /var/lib/secret/kafka.keystore.jks -storepass password
|
||||
|
||||
sleep 3
|
||||
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
[ms-sql]
|
||||
Description=Microsoft ODBC Driver 17 for SQL Server
|
||||
Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1
|
||||
UsageCount=1
|
||||
|
||||
[ODBC Driver 17 for SQL Server]
|
||||
Description=Microsoft ODBC Driver 17 for SQL Server
|
||||
Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1
|
||||
UsageCount=1
|
|
@ -1,7 +1,7 @@
|
|||
ARG BUILD_FROM=postgres:13
|
||||
FROM ${BUILD_FROM}
|
||||
ARG POSTGRES_USER=postgres
|
||||
COPY --chown=$POSTGRES_USER ./pgsql/pg_hba.conf /var/lib/postgresql/pg_hba.conf
|
||||
COPY --chown=$POSTGRES_USER ./pgsql/pg_hba_tls.conf /var/lib/postgresql/pg_hba.conf
|
||||
COPY --chown=$POSTGRES_USER certs/server.key /var/lib/postgresql/server.key
|
||||
COPY --chown=$POSTGRES_USER certs/server.crt /var/lib/postgresql/server.crt
|
||||
COPY --chown=$POSTGRES_USER certs/ca.crt /var/lib/postgresql/root.crt
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
# TYPE DATABASE USER CIDR-ADDRESS METHOD
|
||||
local all all trust
|
||||
# TODO: also test with `cert`? will require client certs
|
||||
hostssl all all 0.0.0.0/0 password
|
||||
hostssl all all ::/0 password
|
||||
|
||||
hostssl all www-data 0.0.0.0/0 cert clientcert=1
|
||||
hostssl all postgres 0.0.0.0/0 cert clientcert=1
|
|
@ -1,3 +0,0 @@
|
|||
r700?i.log
|
||||
nodes.700?.conf
|
||||
*.rdb
|
|
@ -0,0 +1,18 @@
|
|||
bind :: 0.0.0.0
|
||||
port 6379
|
||||
requirepass public
|
||||
|
||||
cluster-enabled yes
|
||||
|
||||
masterauth public
|
||||
|
||||
protected-mode no
|
||||
daemonize no
|
||||
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
always-show-logo no
|
||||
save ""
|
||||
appendonly no
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
bind :: 0.0.0.0
|
||||
port 6379
|
||||
requirepass public
|
||||
|
||||
cluster-enabled yes
|
||||
|
||||
masterauth public
|
||||
|
||||
tls-port 6389
|
||||
tls-cert-file /etc/certs/cert.pem
|
||||
tls-key-file /etc/certs/key.pem
|
||||
tls-ca-cert-file /etc/certs/cacert.pem
|
||||
tls-auth-clients no
|
||||
|
||||
tls-replication yes
|
||||
tls-cluster yes
|
||||
|
||||
|
||||
protected-mode no
|
||||
daemonize no
|
||||
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
always-show-logo no
|
||||
save ""
|
||||
appendonly no
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
daemonize yes
|
||||
bind 0.0.0.0 ::
|
||||
logfile /var/log/redis-server.log
|
||||
protected-mode no
|
||||
requirepass public
|
||||
masterauth public
|
||||
|
||||
tls-cert-file /etc/certs/redis.crt
|
||||
tls-key-file /etc/certs/redis.key
|
||||
tls-ca-cert-file /etc/certs/ca.crt
|
||||
tls-replication yes
|
||||
tls-cluster yes
|
|
@ -1,6 +0,0 @@
|
|||
daemonize yes
|
||||
bind 0.0.0.0 ::
|
||||
logfile /var/log/redis-server.log
|
||||
protected-mode no
|
||||
requirepass public
|
||||
masterauth public
|
|
@ -1,126 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
LOCAL_IP=$(hostname -i | grep -oE '((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])\.){3}(25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])' | head -n 1)
|
||||
|
||||
node=single
|
||||
tls=false
|
||||
while [[ $# -gt 0 ]]
|
||||
do
|
||||
key="$1"
|
||||
|
||||
case $key in
|
||||
-n|--node)
|
||||
node="$2"
|
||||
shift # past argument
|
||||
shift # past value
|
||||
;;
|
||||
--tls-enabled)
|
||||
tls=true
|
||||
shift # past argument
|
||||
;;
|
||||
*)
|
||||
shift # past argument
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
rm -f \
|
||||
/data/conf/r7000i.log \
|
||||
/data/conf/r7001i.log \
|
||||
/data/conf/r7002i.log \
|
||||
/data/conf/nodes.7000.conf \
|
||||
/data/conf/nodes.7001.conf \
|
||||
/data/conf/nodes.7002.conf
|
||||
|
||||
if [ "$node" = "cluster" ]; then
|
||||
if $tls; then
|
||||
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||
--tls-port 8000 --cluster-enabled yes
|
||||
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||
--tls-port 8001 --cluster-enabled yes
|
||||
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||
--tls-port 8002 --cluster-enabled yes
|
||||
else
|
||||
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||
--cluster-enabled yes
|
||||
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||
--cluster-enabled yes
|
||||
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||
--cluster-enabled yes
|
||||
fi
|
||||
elif [ "$node" = "sentinel" ]; then
|
||||
if $tls; then
|
||||
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||
--tls-port 8000 --cluster-enabled no
|
||||
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||
--tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000
|
||||
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||
--tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000
|
||||
|
||||
else
|
||||
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||
--cluster-enabled no
|
||||
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||
--cluster-enabled no --slaveof "$LOCAL_IP" 7000
|
||||
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||
--cluster-enabled no --slaveof "$LOCAL_IP" 7000
|
||||
fi
|
||||
fi
|
||||
|
||||
REDIS_LOAD_FLG=true
|
||||
|
||||
while $REDIS_LOAD_FLG;
|
||||
do
|
||||
sleep 1
|
||||
redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null
|
||||
if ! [ -s /data/conf/r7000i.log ]; then
|
||||
continue
|
||||
fi
|
||||
redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null
|
||||
if ! [ -s /data/conf/r7001i.log ]; then
|
||||
continue
|
||||
fi
|
||||
redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null;
|
||||
if ! [ -s /data/conf/r7002i.log ]; then
|
||||
continue
|
||||
fi
|
||||
if [ "$node" = "cluster" ] ; then
|
||||
if $tls; then
|
||||
yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" \
|
||||
--pass public --no-auth-warning \
|
||||
--tls true --cacert /etc/certs/ca.crt \
|
||||
--cert /etc/certs/redis.crt --key /etc/certs/redis.key
|
||||
else
|
||||
yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" \
|
||||
--pass public --no-auth-warning
|
||||
fi
|
||||
elif [ "$node" = "sentinel" ]; then
|
||||
tee /_sentinel.conf>/dev/null << EOF
|
||||
port 26379
|
||||
bind 0.0.0.0 ::
|
||||
daemonize yes
|
||||
logfile /var/log/redis-server.log
|
||||
dir /tmp
|
||||
EOF
|
||||
if $tls; then
|
||||
cat >>/_sentinel.conf<<EOF
|
||||
tls-port 26380
|
||||
tls-replication yes
|
||||
tls-cert-file /etc/certs/redis.crt
|
||||
tls-key-file /etc/certs/redis.key
|
||||
tls-ca-cert-file /etc/certs/ca.crt
|
||||
sentinel monitor mymaster $LOCAL_IP 8000 1
|
||||
EOF
|
||||
else
|
||||
cat >>/_sentinel.conf<<EOF
|
||||
sentinel monitor mymaster $LOCAL_IP 7000 1
|
||||
EOF
|
||||
fi
|
||||
redis-server /_sentinel.conf --sentinel
|
||||
fi
|
||||
REDIS_LOAD_FLG=false
|
||||
done
|
||||
|
||||
exit 0;
|
|
@ -0,0 +1,14 @@
|
|||
bind :: 0.0.0.0
|
||||
port 6379
|
||||
requirepass public
|
||||
|
||||
protected-mode no
|
||||
daemonize no
|
||||
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
always-show-logo no
|
||||
save ""
|
||||
appendonly no
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
sentinel resolve-hostnames yes
|
||||
bind :: 0.0.0.0
|
||||
|
||||
sentinel monitor mymaster redis-sentinel-master 6379 1
|
||||
sentinel auth-pass mymaster public
|
||||
sentinel down-after-milliseconds mymaster 10000
|
||||
sentinel failover-timeout mymaster 20000
|
|
@ -0,0 +1,17 @@
|
|||
bind :: 0.0.0.0
|
||||
port 6379
|
||||
requirepass public
|
||||
|
||||
replicaof redis-sentinel-master 6379
|
||||
masterauth public
|
||||
|
||||
protected-mode no
|
||||
daemonize no
|
||||
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
always-show-logo no
|
||||
save ""
|
||||
appendonly no
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
bind :: 0.0.0.0
|
||||
port 6379
|
||||
requirepass public
|
||||
|
||||
tls-port 6389
|
||||
tls-cert-file /etc/certs/cert.pem
|
||||
tls-key-file /etc/certs/key.pem
|
||||
tls-ca-cert-file /etc/certs/cacert.pem
|
||||
tls-auth-clients no
|
||||
|
||||
protected-mode no
|
||||
daemonize no
|
||||
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
always-show-logo no
|
||||
save ""
|
||||
appendonly no
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
sentinel resolve-hostnames yes
|
||||
bind :: 0.0.0.0
|
||||
|
||||
tls-port 26380
|
||||
tls-replication yes
|
||||
tls-cert-file /etc/certs/cert.pem
|
||||
tls-key-file /etc/certs/key.pem
|
||||
tls-ca-cert-file /etc/certs/cacert.pem
|
||||
tls-auth-clients no
|
||||
|
||||
sentinel monitor mymaster redis-sentinel-tls-master 6389 1
|
||||
sentinel auth-pass mymaster public
|
||||
sentinel down-after-milliseconds mymaster 10000
|
||||
sentinel failover-timeout mymaster 20000
|
|
@ -0,0 +1,24 @@
|
|||
bind :: 0.0.0.0
|
||||
port 6379
|
||||
requirepass public
|
||||
|
||||
replicaof redis-sentinel-tls-master 6389
|
||||
masterauth public
|
||||
|
||||
tls-port 6389
|
||||
tls-replication yes
|
||||
tls-cert-file /etc/certs/cert.pem
|
||||
tls-key-file /etc/certs/key.pem
|
||||
tls-ca-cert-file /etc/certs/cacert.pem
|
||||
tls-auth-clients no
|
||||
|
||||
protected-mode no
|
||||
daemonize no
|
||||
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
always-show-logo no
|
||||
save ""
|
||||
appendonly no
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
brokerClusterName=DefaultCluster
|
||||
brokerName=broker-a
|
||||
brokerId=0
|
||||
|
||||
brokerIP1=rocketmq_broker
|
||||
|
||||
defaultTopicQueueNums=4
|
||||
autoCreateTopicEnable=true
|
||||
autoCreateSubscriptionGroup=true
|
||||
|
||||
listenPort=10911
|
||||
deleteWhen=04
|
||||
|
||||
fileReservedTime=120
|
||||
mapedFileSizeCommitLog=1073741824
|
||||
mapedFileSizeConsumeQueue=300000
|
||||
diskMaxUsedSpaceRatio=100
|
||||
maxMessageSize=65536
|
||||
|
||||
brokerRole=ASYNC_MASTER
|
||||
|
||||
flushDiskType=ASYNC_FLUSH
|
|
@ -20,8 +20,8 @@ esac
|
|||
|
||||
{
|
||||
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
|
||||
echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s"
|
||||
echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10"
|
||||
echo "EMQX_MQTT__RETRY_INTERVAL=2s"
|
||||
echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10"
|
||||
echo "EMQX_AUTHORIZATION__SOURCES=[]"
|
||||
echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
|
||||
} >> .ci/docker-compose-file/conf.cluster.env
|
||||
|
@ -29,7 +29,7 @@ esac
|
|||
is_node_up() {
|
||||
local node="$1"
|
||||
docker exec -i "$node" \
|
||||
bash -c "emqx eval-erl \"['emqx@node1.emqx.io','emqx@node2.emqx.io'] = maps:get(running_nodes, ekka_cluster:info()).\"" > /dev/null 2>&1
|
||||
bash -c "emqx eval \"['emqx@node1.emqx.io','emqx@node2.emqx.io'] = maps:get(running_nodes, ekka_cluster:info()).\"" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
is_node_listening() {
|
||||
|
|
|
@ -29,5 +29,107 @@
|
|||
"listen": "0.0.0.0:6379",
|
||||
"upstream": "redis:6379",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "pgsql_tcp",
|
||||
"listen": "0.0.0.0:5432",
|
||||
"upstream": "pgsql:5432",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "pgsql_tls",
|
||||
"listen": "0.0.0.0:5433",
|
||||
"upstream": "pgsql-tls:5432",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "tdengine_restful",
|
||||
"listen": "0.0.0.0:6041",
|
||||
"upstream": "tdengine:6041",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "dynamo",
|
||||
"listen": "0.0.0.0:8000",
|
||||
"upstream": "dynamo:8000",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "kafka_plain",
|
||||
"listen": "0.0.0.0:9292",
|
||||
"upstream": "kafka-1.emqx.net:9292",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "kafka_sasl_plain",
|
||||
"listen": "0.0.0.0:9293",
|
||||
"upstream": "kafka-1.emqx.net:9293",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "kafka_ssl",
|
||||
"listen": "0.0.0.0:9294",
|
||||
"upstream": "kafka-1.emqx.net:9294",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "kafka_sasl_ssl",
|
||||
"listen": "0.0.0.0:9295",
|
||||
"upstream": "kafka-1.emqx.net:9295",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "rocketmq",
|
||||
"listen": "0.0.0.0:9876",
|
||||
"upstream": "rocketmq_namesrv:9876",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "cassa_tcp",
|
||||
"listen": "0.0.0.0:9042",
|
||||
"upstream": "cassandra:9042",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "cassa_tls",
|
||||
"listen": "0.0.0.0:9142",
|
||||
"upstream": "cassandra:9142",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "sqlserver",
|
||||
"listen": "0.0.0.0:1433",
|
||||
"upstream": "sqlserver:1433",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "opents",
|
||||
"listen": "0.0.0.0:4242",
|
||||
"upstream": "opents:4242",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "pulsar_plain",
|
||||
"listen": "0.0.0.0:6652",
|
||||
"upstream": "pulsar:6652",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "pulsar_tls",
|
||||
"listen": "0.0.0.0:6653",
|
||||
"upstream": "pulsar:6653",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "oracle",
|
||||
"listen": "0.0.0.0:1521",
|
||||
"upstream": "oracle:1521",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "iotdb",
|
||||
"listen": "0.0.0.0:18080",
|
||||
"upstream": "iotdb:18080",
|
||||
"enabled": true
|
||||
}
|
||||
]
|
||||
|
|
|
@ -20,8 +20,3 @@ indent_size = 4
|
|||
# Tab indentation (no size specified)
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
|
|
@ -1,43 +1,29 @@
|
|||
## Default
|
||||
* @zmstone @ieQu1 @terry-xiaoyu @qzhuyan @HJianBo @zhongwencool
|
||||
|
||||
## MQTT
|
||||
/apps/emqx_connector/src/mqtt/ @qzhuyan
|
||||
/apps/emqx/*/*mqtt* @qzhuyan
|
||||
* @emqx/emqx-review-board
|
||||
|
||||
## apps
|
||||
/apps/emqx/ @lafirest @thalesmg @HJianBo @ieQu1
|
||||
/apps/emqx_authn/ @savonarola @JimMoen @HJianBo
|
||||
/apps/emqx_authz/ @savonarola @JimMoen @HJianBo
|
||||
/apps/emqx_auto_subscribe/ @thalesmg @HJianBo
|
||||
/apps/emqx_bridge/ @terry-xiaoyu @thalesmg
|
||||
/apps/emqx_conf/ @ieQu1 @thalesmg
|
||||
/apps/emqx_connector/ @terry-xiaoyu @JimMoen
|
||||
/apps/emqx_dashboard/ @lafirest @JimMoen
|
||||
/apps/emqx_exhook/ @lafirest @HJianBo @JimMoen
|
||||
/apps/emqx_gateway/ @HJianBo @lafirest
|
||||
/apps/emqx_machine/ @thalesmg @terry-xiaoyu @ieQu1
|
||||
/apps/emqx_management/ @HJianBo @lafirest @sstrigler
|
||||
/apps/emqx_modules/ @thalesmg @terry-xiaoyu @HJianBo
|
||||
/apps/emqx_plugin_libs/ @terry-xiaoyu @lafirest
|
||||
/apps/emqx_plugins/ @thalesmg @JimMoen @ieQu1
|
||||
/apps/emqx_prometheus/ @JimMoen @ieQu1
|
||||
/apps/emqx_psk/ @lafirest @thalesmg @terry-xiaoyu
|
||||
/apps/emqx_resource/ @terry-xiaoyu @thalesmg
|
||||
/apps/emqx_replay/ @ieQu1
|
||||
/apps/emqx_retainer/ @lafirest @ieQu1 @thalesmg
|
||||
/apps/emqx_rule_engine/ @terry-xiaoyu @HJianBo @kjellwinblad
|
||||
/apps/emqx_slow_subs/ @lafirest @HJianBo
|
||||
/apps/emqx_statsd/ @JimMoen @HJianBo
|
||||
|
||||
## other
|
||||
/lib-ee/ @thalesmg
|
||||
/bin/ @zmstone @thalesmg @terry-xiaoyu @id
|
||||
/rel/ @zmstone @thalesmg @id
|
||||
|
||||
/apps/emqx/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_authn/ @emqx/emqx-review-board @JimMoen @savonarola
|
||||
/apps/emqx_authz/ @emqx/emqx-review-board @JimMoen @savonarola
|
||||
/apps/emqx_connector/ @emqx/emqx-review-board @JimMoen
|
||||
/apps/emqx_dashboard/ @emqx/emqx-review-board @JimMoen @lafirest
|
||||
/apps/emqx_exhook/ @emqx/emqx-review-board @JimMoen @lafirest
|
||||
/apps/emqx_gateway/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_management/ @emqx/emqx-review-board @lafirest @sstrigler
|
||||
/apps/emqx_plugin_libs/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_plugins/ @emqx/emqx-review-board @JimMoen
|
||||
/apps/emqx_prometheus/ @emqx/emqx-review-board @JimMoen
|
||||
/apps/emqx_psk/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_retainer/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad
|
||||
/apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen
|
||||
/apps/emqx_replay @emqx/emqx-review-board @ieQu1
|
||||
## CI
|
||||
/.github/ @id
|
||||
/.ci/ @id
|
||||
/scripts/ @id
|
||||
/build @id
|
||||
/deploy/ @id
|
||||
/deploy/ @emqx/emqx-review-board @Rory-Z
|
||||
|
||||
## @Meggielqk owns all files in any i18n directory anywhere in the project
|
||||
/i18n/ @Meggielqk
|
||||
|
||||
## no owner for changelogs, anyone can approve
|
||||
/changes
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
Fixes <issue-or-jira-number>
|
||||
|
||||
## PR Checklist
|
||||
Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked:
|
||||
|
||||
- [ ] If changed package build ci, pass [this action](https://github.com/emqx/emqx/actions/workflows/build_packages.yaml) (manual trigger)
|
||||
- [ ] Change log has been added to `changes/` dir for user-facing artifacts update
|
|
@ -1 +0,0 @@
|
|||
Fixes <issue-or-jira-number>
|
|
@ -1,12 +0,0 @@
|
|||
Fixes <issue-or-jira-number>
|
||||
|
||||
## PR Checklist
|
||||
Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked:
|
||||
|
||||
- [ ] Added tests for the changes
|
||||
- [ ] Changed lines covered in coverage report
|
||||
- [ ] Change log has been added to `changes/` dir
|
||||
- [ ] `appup` files updated (execute `scripts/update-appup.sh emqx`)
|
||||
- [ ] For internal contributor: there is a jira ticket to track this change
|
||||
- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up
|
||||
- [ ] In case of non-backward compatible changes, reviewer should check this item as a write-off, and add details in **Backward Compatibility** section
|
|
@ -1,11 +0,0 @@
|
|||
Fixes <issue-or-jira-number>
|
||||
|
||||
## PR Checklist
|
||||
Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked:
|
||||
|
||||
- [ ] Added tests for the changes
|
||||
- [ ] Changed lines covered in coverage report
|
||||
- [ ] Change log has been added to `changes/` dir
|
||||
- [ ] For internal contributor: there is a jira ticket to track this change
|
||||
- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up
|
||||
- [ ] Schema changes are backward compatible
|
|
@ -1,81 +0,0 @@
|
|||
name: 'Docker meta'
|
||||
inputs:
|
||||
profile:
|
||||
required: true
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
arch:
|
||||
required: true
|
||||
type: string
|
||||
otp:
|
||||
required: true
|
||||
type: string
|
||||
elixir:
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
builder_base:
|
||||
required: true
|
||||
type: string
|
||||
owner:
|
||||
required: true
|
||||
type: string
|
||||
docker_tags:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
outputs:
|
||||
emqx_name:
|
||||
description: "EMQX name"
|
||||
value: ${{ steps.pre-meta.outputs.emqx_name }}
|
||||
version:
|
||||
description: "docker image version"
|
||||
value: ${{ steps.meta.outputs.version }}
|
||||
tags:
|
||||
description: "docker image tags"
|
||||
value: ${{ steps.meta.outputs.tags }}
|
||||
labels:
|
||||
description: "docker image labels"
|
||||
value: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: prepare for docker/metadata-action
|
||||
id: pre-meta
|
||||
shell: bash
|
||||
run: |
|
||||
emqx_name=${{ inputs.profile }}
|
||||
img_suffix=${{ inputs.arch }}
|
||||
img_labels="org.opencontainers.image.otp.version=${{ inputs.otp }}"
|
||||
if [ -n "${{ inputs.elixir }}" ]; then
|
||||
emqx_name="emqx-elixir"
|
||||
img_suffix="elixir-${{ inputs.arch }}"
|
||||
img_labels="org.opencontainers.image.elixir.version=${{ inputs.elixir }}\n${img_labels}"
|
||||
fi
|
||||
if [ "${{ inputs.profile }}" = "emqx" ]; then
|
||||
img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}"
|
||||
fi
|
||||
if [ "${{ inputs.profile }}" = "emqx-enterprise" ]; then
|
||||
img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}"
|
||||
fi
|
||||
if [[ "${{ inputs.builder_base }}" =~ "alpine" ]]; then
|
||||
img_suffix="${img_suffix}-alpine"
|
||||
fi
|
||||
echo "emqx_name=${emqx_name}" >> $GITHUB_OUTPUT
|
||||
echo "img_suffix=${img_suffix}" >> $GITHUB_OUTPUT
|
||||
echo "img_labels=${img_labels}" >> $GITHUB_OUTPUT
|
||||
echo "img_name=${{ inputs.registry }}/${{ inputs.owner }}/${{ inputs.profile }}" >> $GITHUB_OUTPUT
|
||||
- uses: docker/metadata-action@v4
|
||||
id: meta
|
||||
with:
|
||||
images:
|
||||
${{ steps.pre-meta.outputs.img_name }}
|
||||
flavor: |
|
||||
suffix=-${{ steps.pre-meta.outputs.img_suffix }}
|
||||
tags: |
|
||||
type=raw,value=${{ inputs.docker_tags }}
|
||||
labels:
|
||||
${{ steps.pre-meta.outputs.img_labels }}
|
|
@ -3,7 +3,7 @@ inputs:
|
|||
profile: # emqx, emqx-enterprise
|
||||
required: true
|
||||
type: string
|
||||
otp: # 25.1.2-2, 24.3.4.2-1
|
||||
otp: # 25.1.2-2, 24.3.4.2-2
|
||||
required: true
|
||||
type: string
|
||||
os:
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
Fixes <issue-or-jira-number>
|
||||
|
||||
<!-- Make sure to target release-50 branch if this PR is intended to fix the issues for the release candidate. -->
|
||||
|
||||
## Summary
|
||||
copilot:summary
|
||||
|
||||
## PR Checklist
|
||||
Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked:
|
||||
|
||||
- [ ] Added tests for the changes
|
||||
- [ ] Changed lines covered in coverage report
|
||||
- [ ] Change log has been added to `changes/{ce,ee}/(feat|perf|fix)-<PR-id>.en.md` files
|
||||
- [ ] For internal contributor: there is a jira ticket to track this change
|
||||
- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up
|
||||
- [ ] Schema changes are backward compatible
|
||||
|
||||
## Checklist for CI (.github/workflows) changes
|
||||
|
||||
- [ ] If changed package build workflow, pass [this action](https://github.com/emqx/emqx/actions/workflows/build_packages.yaml) (manual trigger)
|
||||
- [ ] Change log has been added to `changes/` dir for user-facing artifacts update
|
|
@ -4,7 +4,7 @@ on: [pull_request]
|
|||
|
||||
jobs:
|
||||
check_apps_version:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -9,27 +9,30 @@ on:
|
|||
tags:
|
||||
- v*
|
||||
- e*
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
- docker-latest-*
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch_or_tag:
|
||||
required: false
|
||||
profile:
|
||||
required: false
|
||||
default: 'emqx'
|
||||
is_latest:
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
# prepare source with any OTP version, no need for a matrix
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04"
|
||||
|
||||
outputs:
|
||||
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
||||
IS_DOCKER_LATEST: ${{ steps.get_profile.outputs.IS_DOCKER_LATEST }}
|
||||
PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
|
||||
EDITION: ${{ steps.get_profile.outputs.EDITION }}
|
||||
IS_LATEST: ${{ steps.get_profile.outputs.IS_LATEST }}
|
||||
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
|
||||
DOCKER_TAG_VERSION: ${{ steps.get_profile.outputs.DOCKER_TAG_VERSION }}
|
||||
VERSION: ${{ steps.get_profile.outputs.VERSION }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -40,59 +43,61 @@ jobs:
|
|||
|
||||
- name: Get profiles to build
|
||||
id: get_profile
|
||||
env:
|
||||
INPUTS_PROFILE: ${{ github.event.inputs.profile }}
|
||||
run: |
|
||||
cd source
|
||||
tag=${{ github.ref }}
|
||||
# tag docker-latest-ce or docker-latest-ee
|
||||
if git describe --tags --exact --match 'docker-latest-*' 2>/dev/null; then
|
||||
echo 'docker_latest=true due to docker-latest-* tag'
|
||||
docker_latest=true
|
||||
elif [ "${{ github.event_name }}" = "release" ]; then
|
||||
echo 'docker_latest=true due to release'
|
||||
docker_latest=true
|
||||
echo 'is_latest=true due to docker-latest-* tag'
|
||||
is_latest=true
|
||||
elif [ "${{ inputs.is_latest }}" = "true" ]; then
|
||||
echo 'is_latest=true due to manual input from workflow_dispatch'
|
||||
is_latest=true
|
||||
else
|
||||
echo 'docker_latest=false'
|
||||
docker_latest=false
|
||||
echo 'is_latest=false'
|
||||
is_latest=false
|
||||
fi
|
||||
if git describe --tags --match "[v|e]*" --exact; then
|
||||
# resolve profile
|
||||
if git describe --tags --match "v*" --exact; then
|
||||
echo "This is an exact git tag, will publish images"
|
||||
is_exact='true'
|
||||
PROFILE=emqx
|
||||
elif git describe --tags --match "e*" --exact; then
|
||||
echo "This is an exact git tag, will publish images"
|
||||
is_exact='true'
|
||||
PROFILE=emqx-enterprise
|
||||
else
|
||||
echo "This is NOT an exact git tag, will not publish images"
|
||||
is_exact='false'
|
||||
fi
|
||||
case $tag in
|
||||
refs/tags/v*)
|
||||
PROFILE='emqx'
|
||||
|
||||
case "${PROFILE:-$INPUTS_PROFILE}" in
|
||||
emqx)
|
||||
EDITION='Opensource'
|
||||
;;
|
||||
refs/tags/e*)
|
||||
PROFILE=emqx-enterprise
|
||||
emqx-enterprise)
|
||||
EDITION='Enterprise'
|
||||
;;
|
||||
*)
|
||||
PROFILE=${{ github.event.inputs.profile }}
|
||||
case "$PROFILE" in
|
||||
emqx)
|
||||
true
|
||||
;;
|
||||
emqx-enterprise)
|
||||
true
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Failed to resolve build profile"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo "ERROR: Failed to resolve build profile"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
VSN="$(./pkg-vsn.sh "$PROFILE")"
|
||||
echo "Building $PROFILE image with tag $VSN (latest=$docker_latest)"
|
||||
echo "IS_DOCKER_LATEST=$docker_latest" >> $GITHUB_OUTPUT
|
||||
echo "Building emqx/$PROFILE:$VSN image (latest=$is_latest)"
|
||||
echo "Push = $is_exact"
|
||||
echo "IS_LATEST=$is_latest" >> $GITHUB_OUTPUT
|
||||
echo "IS_EXACT_TAG=$is_exact" >> $GITHUB_OUTPUT
|
||||
echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT
|
||||
echo "DOCKER_TAG_VERSION=$VSN" >> $GITHUB_OUTPUT
|
||||
echo "PROFILE=$PROFILE" >> $GITHUB_OUTPUT
|
||||
echo "EDITION=$EDITION" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=$VSN" >> $GITHUB_OUTPUT
|
||||
- name: get_all_deps
|
||||
env:
|
||||
PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
|
||||
run: |
|
||||
make -C source deps-all
|
||||
PROFILE=$PROFILE make -C source deps-$PROFILE
|
||||
zip -ryq source.zip source/* source/.[^.]*
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
@ -100,37 +105,33 @@ jobs:
|
|||
path: source.zip
|
||||
|
||||
docker:
|
||||
runs-on: ${{ matrix.arch[1] }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs: prepare
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- [amd64, ubuntu-20.04]
|
||||
- [arm64, aws-arm64]
|
||||
profile:
|
||||
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
|
||||
- "${{ needs.prepare.outputs.PROFILE }}"
|
||||
registry:
|
||||
- 'docker.io'
|
||||
- 'public.ecr.aws'
|
||||
os:
|
||||
- [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"]
|
||||
- [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
|
||||
# NOTE: 'otp' and 'elixir' are to configure emqx-builder image
|
||||
# only support latest otp and elixir, not a matrix
|
||||
builder:
|
||||
- 5.0-26 # update to latest
|
||||
- 5.0-34 # update to latest
|
||||
otp:
|
||||
- 24.3.4.2-1 # switch to 25 once ready to release 5.1
|
||||
- 24.3.4.2-3 # switch to 25 once ready to release 5.1
|
||||
elixir:
|
||||
- 1.13.4 # update to latest
|
||||
- 'no_elixir'
|
||||
- '1.13.4' # update to latest
|
||||
exclude: # TODO: publish enterprise to ecr too?
|
||||
- registry: 'public.ecr.aws'
|
||||
profile: emqx-enterprise
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
if: matrix.arch[1] == 'aws-arm64'
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source
|
||||
|
@ -138,16 +139,17 @@ jobs:
|
|||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
||||
- uses: docker/setup-qemu-action@v2
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login for docker.
|
||||
- name: Login to hub.docker.com
|
||||
uses: docker/login-action@v2
|
||||
if: matrix.registry == 'docker.io'
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_TOKEN }}
|
||||
|
||||
- name: Login for AWS ECR
|
||||
- name: Login to AWS ECR
|
||||
uses: docker/login-action@v2
|
||||
if: matrix.registry == 'public.ecr.aws'
|
||||
with:
|
||||
|
@ -156,229 +158,44 @@ jobs:
|
|||
password: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
ecr: true
|
||||
|
||||
- uses: ./source/.github/actions/docker-meta
|
||||
- name: prepare for docker/metadata-action
|
||||
id: pre-meta
|
||||
shell: bash
|
||||
run: |
|
||||
extra_labels=
|
||||
img_suffix=
|
||||
if [ "${{ matrix.elixir }}" != 'no_elixir' ]; then
|
||||
img_suffix="-elixir"
|
||||
extra_labels="org.opencontainers.image.elixir.version=${{ matrix.elixir }}"
|
||||
fi
|
||||
|
||||
echo "img_suffix=$img_suffix" >> $GITHUB_OUTPUT
|
||||
echo "extra_labels=$extra_labels" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: docker/metadata-action@v4
|
||||
id: meta
|
||||
with:
|
||||
profile: ${{ matrix.profile }}
|
||||
registry: ${{ matrix.registry }}
|
||||
arch: ${{ matrix.arch[0] }}
|
||||
otp: ${{ matrix.otp }}
|
||||
builder_base: ${{ matrix.os[0] }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
|
||||
images: |
|
||||
${{ matrix.registry }}/${{ github.repository_owner }}/${{ matrix.profile }}
|
||||
flavor: |
|
||||
suffix=${{ steps.pre-meta.outputs.img_suffix }}
|
||||
tags: |
|
||||
type=raw,value=${{ needs.prepare.outputs.VERSION }}
|
||||
type=raw,value=latest,enable=${{ needs.prepare.outputs.IS_LATEST }}
|
||||
labels: |
|
||||
org.opencontainers.image.otp.version=${{ matrix.otp }}
|
||||
org.opencontainers.image.edition=${{ needs.prepare.outputs.EDITION }}
|
||||
${{ steps.pre-meta.outputs.extra_labels }}
|
||||
|
||||
- uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: ${{ needs.prepare.outputs.IS_EXACT_TAG == 'true' || github.repository_owner != 'emqx' }}
|
||||
pull: true
|
||||
no-cache: true
|
||||
platforms: linux/${{ matrix.arch[0] }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
BUILD_FROM=ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
||||
RUN_FROM=${{ matrix.os[1] }}
|
||||
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
|
||||
EMQX_NAME=${{ matrix.profile }}${{ steps.pre-meta.outputs.img_suffix }}
|
||||
file: source/${{ matrix.os[2] }}
|
||||
context: source
|
||||
|
||||
- name: Docker Hub Description
|
||||
if: matrix.registry == 'docker.io'
|
||||
uses: peter-evans/dockerhub-description@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
repository: "emqx/${{ needs.prepare.outputs.BUILD_PROFILE }}"
|
||||
readme-filepath: ./source/deploy/docker/README.md
|
||||
short-description: "The most scalable open-source MQTT broker for IoT, IIoT, connected vehicles, and more."
|
||||
|
||||
docker-elixir:
|
||||
runs-on: ${{ matrix.arch[1] }}
|
||||
needs: prepare
|
||||
# do not build elixir images for ee for now
|
||||
if: needs.prepare.outputs.BUILD_PROFILE == 'emqx'
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- [amd64, ubuntu-20.04]
|
||||
- [arm64, aws-arm64]
|
||||
profile:
|
||||
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
|
||||
registry:
|
||||
- 'docker.io'
|
||||
os:
|
||||
- [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
|
||||
builder:
|
||||
- 5.0-26 # update to latest
|
||||
otp:
|
||||
- 25.1.2-2 # update to latest
|
||||
elixir:
|
||||
- 1.13.4 # update to latest
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
if: matrix.arch[1] == 'aws-arm64'
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login for docker.
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_TOKEN }}
|
||||
|
||||
- uses: ./source/.github/actions/docker-meta
|
||||
id: meta
|
||||
with:
|
||||
profile: ${{ matrix.profile }}
|
||||
registry: ${{ matrix.registry }}
|
||||
arch: ${{ matrix.arch[0] }}
|
||||
otp: ${{ matrix.otp }}
|
||||
elixir: ${{ matrix.elixir }}
|
||||
builder_base: ${{ matrix.os[0] }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
|
||||
|
||||
- uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: ${{ needs.prepare.outputs.IS_EXACT_TAG == 'true' || github.repository_owner != 'emqx' }}
|
||||
pull: true
|
||||
no-cache: true
|
||||
platforms: linux/${{ matrix.arch[0] }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
BUILD_FROM=ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
||||
RUN_FROM=${{ matrix.os[1] }}
|
||||
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
|
||||
file: source/${{ matrix.os[2] }}
|
||||
context: source
|
||||
|
||||
docker-push-multi-arch-manifest:
|
||||
# note, we only run on amd64
|
||||
if: needs.prepare.outputs.IS_EXACT_TAG
|
||||
needs:
|
||||
- prepare
|
||||
- docker
|
||||
runs-on: ${{ matrix.arch[1] }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- [amd64, ubuntu-20.04]
|
||||
profile:
|
||||
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
|
||||
os:
|
||||
- [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"]
|
||||
- [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
|
||||
# NOTE: only support latest otp version, not a matrix
|
||||
otp:
|
||||
- 24.3.4.2-1 # switch to 25 once ready to release 5.1
|
||||
registry:
|
||||
- 'docker.io'
|
||||
- 'public.ecr.aws'
|
||||
exclude:
|
||||
- registry: 'public.ecr.aws'
|
||||
profile: emqx-enterprise
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source
|
||||
path: .
|
||||
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
||||
- uses: docker/login-action@v2
|
||||
if: matrix.registry == 'docker.io'
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_TOKEN }}
|
||||
|
||||
- uses: docker/login-action@v2
|
||||
if: matrix.registry == 'public.ecr.aws'
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
password: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
ecr: true
|
||||
|
||||
- uses: ./source/.github/actions/docker-meta
|
||||
id: meta
|
||||
with:
|
||||
profile: ${{ matrix.profile }}
|
||||
registry: ${{ matrix.registry }}
|
||||
arch: ${{ matrix.arch[0] }}
|
||||
otp: ${{ matrix.otp }}
|
||||
builder_base: ${{ matrix.os[0] }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
|
||||
|
||||
- name: update manifest for multiarch image
|
||||
working-directory: source
|
||||
run: |
|
||||
is_latest="${{ needs.prepare.outputs.IS_DOCKER_LATEST }}"
|
||||
scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" "$is_latest"
|
||||
|
||||
docker-elixir-push-multi-arch-manifest:
|
||||
# note, we only run on amd64
|
||||
# do not build enterprise elixir images for now
|
||||
if: needs.prepare.outputs.IS_EXACT_TAG == 'true' && needs.prepare.outputs.BUILD_PROFILE == 'emqx'
|
||||
needs:
|
||||
- prepare
|
||||
- docker-elixir
|
||||
runs-on: ${{ matrix.arch[1] }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- [amd64, ubuntu-20.04]
|
||||
profile:
|
||||
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
|
||||
# NOTE: for docker, only support latest otp version, not a matrix
|
||||
otp:
|
||||
- 25.1.2-2 # update to latest
|
||||
elixir:
|
||||
- 1.13.4 # update to latest
|
||||
registry:
|
||||
- 'docker.io'
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source
|
||||
path: .
|
||||
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
||||
- uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_TOKEN }}
|
||||
|
||||
- uses: ./source/.github/actions/docker-meta
|
||||
id: meta
|
||||
with:
|
||||
profile: ${{ matrix.profile }}
|
||||
registry: ${{ matrix.registry }}
|
||||
arch: ${{ matrix.arch[0] }}
|
||||
otp: ${{ matrix.otp }}
|
||||
elixir: ${{ matrix.elixir }}
|
||||
builder_base: ${{ matrix.os[0] }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
|
||||
|
||||
- name: update manifest for multiarch image
|
||||
working-directory: source
|
||||
run: |
|
||||
scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" false
|
||||
|
|
|
@ -5,8 +5,6 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 */6 * * *'
|
||||
push:
|
||||
branches:
|
||||
- 'ci/**'
|
||||
|
@ -22,24 +20,22 @@ on:
|
|||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04
|
||||
runs-on: ubuntu-22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
|
||||
outputs:
|
||||
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
||||
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
|
||||
|
||||
VERSION: ${{ steps.get_profile.outputs.VERSION }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used
|
||||
path: source
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get profile to build
|
||||
id: get_profile
|
||||
run: |
|
||||
cd source
|
||||
git config --global --add safe.directory "$(pwd)"
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
tag=${{ github.ref }}
|
||||
if git describe --tags --match "[v|e]*" --exact; then
|
||||
echo "WARN: This is an exact git tag, will publish release"
|
||||
|
@ -75,47 +71,36 @@ jobs:
|
|||
;;
|
||||
esac
|
||||
echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT
|
||||
- name: get_all_deps
|
||||
run: |
|
||||
make -C source deps-all
|
||||
zip -ryq source.zip source/* source/.[^.]*
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: source
|
||||
path: source.zip
|
||||
echo "VERSION=$(./pkg-vsn.sh $PROFILE)" >> $GITHUB_OUTPUT
|
||||
|
||||
windows:
|
||||
runs-on: windows-2019
|
||||
if: startsWith(github.ref_name, 'v')
|
||||
needs: prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
profile: # for now only CE for windows
|
||||
- emqx
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
name: source
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: Expand-Archive -Path source.zip -DestinationPath ./
|
||||
ref: ${{ github.event.inputs.branch_or_tag }}
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ilammy/msvc-dev-cmd@v1.12.0
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- name: build
|
||||
env:
|
||||
PYTHON: python
|
||||
DIAGNOSTIC: 1
|
||||
working-directory: source
|
||||
run: |
|
||||
# ensure crypto app (openssl)
|
||||
erl -eval "erlang:display(crypto:info_lib())" -s init stop
|
||||
make ${{ matrix.profile }}-tgz
|
||||
- name: run emqx
|
||||
timeout-minutes: 5
|
||||
working-directory: source
|
||||
run: |
|
||||
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start
|
||||
Start-Sleep -s 5
|
||||
|
@ -130,15 +115,7 @@ jobs:
|
|||
if: success()
|
||||
with:
|
||||
name: ${{ matrix.profile }}
|
||||
path: source/_packages/${{ matrix.profile }}/
|
||||
- name: Send notification to Slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: failure() && github.event_name == 'schedule'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Scheduled run of ${{ github.workflow }}@Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
||||
path: _packages/${{ matrix.profile }}/
|
||||
|
||||
mac:
|
||||
needs: prepare
|
||||
|
@ -148,22 +125,18 @@ jobs:
|
|||
profile:
|
||||
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
|
||||
otp:
|
||||
- 24.3.4.2-1
|
||||
- 24.3.4.2-3
|
||||
os:
|
||||
- macos-11
|
||||
- macos-12
|
||||
- macos-12-arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: emqx/self-hosted-cleanup-action@v1.0.3
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
name: source
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: |
|
||||
ln -s . source
|
||||
unzip -o -q source.zip
|
||||
rm source source.zip
|
||||
ref: ${{ github.event.inputs.branch_or_tag }}
|
||||
fetch-depth: 0
|
||||
- uses: ./.github/actions/package-macos
|
||||
with:
|
||||
profile: ${{ matrix.profile }}
|
||||
|
@ -178,18 +151,12 @@ jobs:
|
|||
with:
|
||||
name: ${{ matrix.profile }}
|
||||
path: _packages/${{ matrix.profile }}/
|
||||
- name: Send notification to Slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: failure() && github.event_name == 'schedule'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
||||
|
||||
linux:
|
||||
needs: prepare
|
||||
runs-on: ${{ matrix.build_machine }}
|
||||
# always run in builder container because the host might have the wrong OTP version etc.
|
||||
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
|
||||
container:
|
||||
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
|
||||
|
||||
|
@ -199,45 +166,47 @@ jobs:
|
|||
profile:
|
||||
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
|
||||
otp:
|
||||
- 24.3.4.2-1
|
||||
- 24.3.4.2-3
|
||||
arch:
|
||||
- amd64
|
||||
- arm64
|
||||
os:
|
||||
- ubuntu22.04
|
||||
- ubuntu20.04
|
||||
- ubuntu18.04
|
||||
- debian11
|
||||
- debian10
|
||||
- el9
|
||||
- el8
|
||||
- el7
|
||||
- amzn2
|
||||
build_machine:
|
||||
- aws-arm64
|
||||
- ubuntu-20.04
|
||||
- ubuntu-22.04
|
||||
builder:
|
||||
- 5.0-26
|
||||
- 5.0-34
|
||||
elixir:
|
||||
- 1.13.4
|
||||
exclude:
|
||||
- arch: arm64
|
||||
build_machine: ubuntu-20.04
|
||||
build_machine: ubuntu-22.04
|
||||
- arch: amd64
|
||||
build_machine: aws-arm64
|
||||
include:
|
||||
- profile: emqx
|
||||
otp: 25.1.2-2
|
||||
otp: 25.1.2-3
|
||||
arch: amd64
|
||||
os: ubuntu20.04
|
||||
build_machine: ubuntu-20.04
|
||||
builder: 5.0-26
|
||||
os: ubuntu22.04
|
||||
build_machine: ubuntu-22.04
|
||||
builder: 5.0-34
|
||||
elixir: 1.13.4
|
||||
release_with: elixir
|
||||
- profile: emqx
|
||||
otp: 25.1.2-2
|
||||
otp: 25.1.2-3
|
||||
arch: amd64
|
||||
os: amzn2
|
||||
build_machine: ubuntu-20.04
|
||||
builder: 5.0-26
|
||||
build_machine: ubuntu-22.04
|
||||
builder: 5.0-34
|
||||
elixir: 1.13.4
|
||||
release_with: elixir
|
||||
|
||||
|
@ -248,24 +217,20 @@ jobs:
|
|||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
if: matrix.build_machine == 'aws-arm64'
|
||||
- uses: actions/download-artifact@v3
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
name: source
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
ref: ${{ github.event.inputs.branch_or_tag }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: build emqx packages
|
||||
working-directory: source
|
||||
env:
|
||||
BUILDER: ${{ matrix.builder }}
|
||||
ELIXIR: ${{ matrix.elixir }}
|
||||
OTP: ${{ matrix.otp }}
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
ARCH: ${{ matrix.arch }}
|
||||
SYSTEM: ${{ matrix.os }}
|
||||
run: |
|
||||
set -eu
|
||||
git config --global --add safe.directory "/__w/emqx/emqx"
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
# Align path for CMake caches
|
||||
if [ ! "$PWD" = "/emqx" ]; then
|
||||
ln -s $PWD /emqx
|
||||
|
@ -274,7 +239,8 @@ jobs:
|
|||
echo "pwd is $PWD"
|
||||
PKGTYPES="tgz pkg"
|
||||
IS_ELIXIR="no"
|
||||
if [ ${{ matrix.release_with }} == 'elixir' ]; then
|
||||
WITH_ELIXIR=${{ matrix.release_with }}
|
||||
if [ "${WITH_ELIXIR:-}" == 'elixir' ]; then
|
||||
PKGTYPES="tgz"
|
||||
# set Elixir build flag
|
||||
IS_ELIXIR="yes"
|
||||
|
@ -286,26 +252,18 @@ jobs:
|
|||
--pkgtype "${PKGTYPE}" \
|
||||
--arch "${ARCH}" \
|
||||
--elixir "${IS_ELIXIR}" \
|
||||
--builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${SYSTEM}"
|
||||
--builder "force_host"
|
||||
done
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: success()
|
||||
with:
|
||||
name: ${{ matrix.profile }}
|
||||
path: source/_packages/${{ matrix.profile }}/
|
||||
- name: Send notification to Slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: failure() && github.event_name == 'schedule'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
||||
path: _packages/${{ matrix.profile }}/
|
||||
|
||||
publish_artifacts:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [prepare, mac, linux]
|
||||
if: ${{ needs.prepare.outputs.IS_EXACT_TAG }}
|
||||
if: needs.prepare.outputs.IS_EXACT_TAG == 'true'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
@ -328,7 +286,7 @@ jobs:
|
|||
echo "$(cat $var.sha256) $var" | sha256sum -c || exit 1
|
||||
done
|
||||
cd -
|
||||
- uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
- uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
name: Scheduled build packages
|
||||
|
||||
concurrency:
|
||||
group: build-${{ github.event_name }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 */6 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
if: github.repository_owner == 'emqx'
|
||||
runs-on: aws-${{ matrix.arch }}
|
||||
# always run in builder container because the host might have the wrong OTP version etc.
|
||||
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
|
||||
container:
|
||||
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
profile:
|
||||
- ['emqx', 'master']
|
||||
- ['emqx-enterprise', 'release-50']
|
||||
otp:
|
||||
- 24.3.4.2-3
|
||||
arch:
|
||||
- amd64
|
||||
os:
|
||||
- debian10
|
||||
- amzn2
|
||||
builder:
|
||||
- 5.0-34
|
||||
elixir:
|
||||
- 1.13.4
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: emqx/self-hosted-cleanup-action@v1.0.3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ matrix.profile[1] }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: build emqx packages
|
||||
env:
|
||||
ELIXIR: ${{ matrix.elixir }}
|
||||
PROFILE: ${{ matrix.profile[0] }}
|
||||
ARCH: ${{ matrix.arch }}
|
||||
run: |
|
||||
set -eu
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
PKGTYPES="tgz pkg"
|
||||
IS_ELIXIR="no"
|
||||
for PKGTYPE in ${PKGTYPES};
|
||||
do
|
||||
./scripts/buildx.sh \
|
||||
--profile "${PROFILE}" \
|
||||
--pkgtype "${PKGTYPE}" \
|
||||
--arch "${ARCH}" \
|
||||
--elixir "${IS_ELIXIR}" \
|
||||
--builder "force_host"
|
||||
done
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: success()
|
||||
with:
|
||||
name: ${{ matrix.profile[0] }}
|
||||
path: _packages/${{ matrix.profile[0] }}/
|
||||
- name: Send notification to Slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: failure()
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Scheduled build of ${{ matrix.profile[0] }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
||||
|
||||
mac:
|
||||
runs-on: ${{ matrix.os }}
|
||||
if: github.repository_owner == 'emqx'
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
profile:
|
||||
- emqx
|
||||
branch:
|
||||
- master
|
||||
otp:
|
||||
- 24.3.4.2-3
|
||||
os:
|
||||
- macos-12
|
||||
- macos-12-arm64
|
||||
|
||||
steps:
|
||||
- uses: emqx/self-hosted-cleanup-action@v1.0.3
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ matrix.branch }}
|
||||
fetch-depth: 0
|
||||
- uses: ./.github/actions/package-macos
|
||||
with:
|
||||
profile: ${{ matrix.profile }}
|
||||
otp: ${{ matrix.otp }}
|
||||
os: ${{ matrix.os }}
|
||||
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
|
||||
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
|
||||
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
|
||||
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: success()
|
||||
with:
|
||||
name: ${{ matrix.profile }}
|
||||
path: _packages/${{ matrix.profile }}/
|
||||
- name: Send notification to Slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: failure()
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
|
@ -8,6 +8,7 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-50
|
||||
pull_request:
|
||||
# GitHub pull_request action is by default triggered when
|
||||
# opened reopened or synchronize,
|
||||
|
@ -29,13 +30,14 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
profile:
|
||||
- ["emqx", "24.3.4.2-1", "el7"]
|
||||
- ["emqx", "25.1.2-2", "ubuntu20.04"]
|
||||
- ["emqx-enterprise", "24.3.4.2-1", "ubuntu20.04"]
|
||||
- ["emqx", "24.3.4.2-3", "el7", "erlang"]
|
||||
- ["emqx", "25.1.2-3", "ubuntu22.04", "elixir"]
|
||||
- ["emqx-enterprise", "24.3.4.2-3", "amzn2", "erlang"]
|
||||
- ["emqx-enterprise", "25.1.2-3", "ubuntu20.04", "erlang"]
|
||||
builder:
|
||||
- 5.0-26
|
||||
- 5.0-34
|
||||
elixir:
|
||||
- 1.13.4
|
||||
- '1.13.4'
|
||||
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
|
||||
|
||||
|
@ -52,22 +54,22 @@ jobs:
|
|||
run: |
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: build and test tgz package
|
||||
if: matrix.profile[3] == 'erlang'
|
||||
run: |
|
||||
make ${EMQX_NAME}-tgz
|
||||
./scripts/pkg-tests.sh ${EMQX_NAME}-tgz
|
||||
- name: run static checks
|
||||
if: contains(matrix.os, 'ubuntu')
|
||||
run: |
|
||||
make static_checks
|
||||
- name: build and test deb/rpm packages
|
||||
if: matrix.profile[3] == 'erlang'
|
||||
run: |
|
||||
make ${EMQX_NAME}-pkg
|
||||
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
|
||||
- name: build and test tgz package (Elixir)
|
||||
if: matrix.profile[3] == 'elixir'
|
||||
run: |
|
||||
make ${EMQX_NAME}-elixir-tgz
|
||||
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-tgz
|
||||
- name: build and test deb/rpm packages (Elixir)
|
||||
if: matrix.profile[3] == 'elixir'
|
||||
run: |
|
||||
make ${EMQX_NAME}-elixir-pkg
|
||||
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg
|
||||
|
@ -80,7 +82,7 @@ jobs:
|
|||
name: "${{ matrix.profile[0] }}_schema_dump"
|
||||
path: |
|
||||
scripts/spellcheck
|
||||
_build/${{ matrix.profile[0] }}/lib/emqx_dashboard/priv/www/static/schema.json
|
||||
_build/docgen/${{ matrix.profile[0] }}/schema-en.json
|
||||
|
||||
windows:
|
||||
runs-on: windows-2019
|
||||
|
@ -94,7 +96,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ilammy/msvc-dev-cmd@v1.12.0
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: ${{ matrix.otp }}
|
||||
- name: build
|
||||
|
@ -130,7 +132,7 @@ jobs:
|
|||
- emqx
|
||||
- emqx-enterprise
|
||||
otp:
|
||||
- 24.3.4.2-1
|
||||
- 24.3.4.2-3
|
||||
os:
|
||||
- macos-11
|
||||
- macos-12-arm64
|
||||
|
@ -156,6 +158,59 @@ jobs:
|
|||
name: ${{ matrix.os }}
|
||||
path: _packages/**/*
|
||||
|
||||
docker:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
profile:
|
||||
- ["emqx", "5.0.16"]
|
||||
- ["emqx-enterprise", "5.0.1"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: prepare
|
||||
run: |
|
||||
EMQX_NAME=${{ matrix.profile[0] }}
|
||||
PKG_VSN=${PKG_VSN:-$(./pkg-vsn.sh $EMQX_NAME)}
|
||||
EMQX_IMAGE_TAG=emqx/$EMQX_NAME:test
|
||||
EMQX_IMAGE_OLD_VERSION_TAG=emqx/$EMQX_NAME:${{ matrix.profile[1] }}
|
||||
echo "EMQX_NAME=$EMQX_NAME" >> $GITHUB_ENV
|
||||
echo "PKG_VSN=$PKG_VSN" >> $GITHUB_ENV
|
||||
echo "EMQX_IMAGE_TAG=$EMQX_IMAGE_TAG" >> $GITHUB_ENV
|
||||
echo "EMQX_IMAGE_OLD_VERSION_TAG=$EMQX_IMAGE_OLD_VERSION_TAG" >> $GITHUB_ENV
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
- name: build and export to Docker
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ./deploy/docker/Dockerfile
|
||||
load: true
|
||||
tags: ${{ env.EMQX_IMAGE_TAG }}
|
||||
build-args: |
|
||||
EMQX_NAME=${{ env.EMQX_NAME }}
|
||||
- name: test docker image
|
||||
run: |
|
||||
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
|
||||
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
|
||||
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
|
||||
docker stop $CID
|
||||
- name: test two nodes cluster with proto_dist=inet_tls in docker
|
||||
run: |
|
||||
./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
|
||||
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
|
||||
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
|
||||
# cleanup
|
||||
./scripts/test/start-two-nodes-in-docker.sh -c
|
||||
- name: export docker image
|
||||
run: |
|
||||
docker save $EMQX_IMAGE_TAG | gzip > $EMQX_NAME-$PKG_VSN.tar.gz
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ matrix.profile[0] }}-docker"
|
||||
path: "${{ env.EMQX_NAME }}-${{ env.PKG_VSN }}.tar.gz"
|
||||
|
||||
spellcheck:
|
||||
needs: linux
|
||||
strategy:
|
||||
|
@ -172,4 +227,4 @@ jobs:
|
|||
path: /tmp/
|
||||
- name: Run spellcheck
|
||||
run: |
|
||||
bash /tmp/scripts/spellcheck/spellcheck.sh /tmp/_build/${{ matrix.profile }}/lib/emqx_dashboard/priv/www/static/schema.json
|
||||
bash /tmp/scripts/spellcheck/spellcheck.sh /tmp/_build/docgen/${{ matrix.profile }}/schema-en.json
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
name: Check Rebar Dependencies
|
||||
|
||||
on: [pull_request, push]
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
check_deps_integrity:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04
|
||||
runs-on: ubuntu-22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -4,8 +4,8 @@ on: [pull_request]
|
|||
|
||||
jobs:
|
||||
code_style_check:
|
||||
runs-on: ubuntu-20.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04"
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
|
@ -2,13 +2,14 @@
|
|||
|
||||
name: Check Elixir Release Applications
|
||||
|
||||
on: [pull_request, push]
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
elixir_apps_check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
# just use the latest builder
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04"
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -35,6 +36,7 @@ jobs:
|
|||
run: ./scripts/check-elixir-applications.exs
|
||||
env:
|
||||
MIX_ENV: ${{ matrix.profile }}
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
# - name: check applications started with emqx_machine
|
||||
# run: ./scripts/check-elixir-emqx-machine-boot-discrepancies.exs
|
||||
# env:
|
||||
|
|
|
@ -2,12 +2,13 @@
|
|||
|
||||
name: Elixir Dependency Version Check
|
||||
|
||||
on: [pull_request, push]
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
elixir_deps_check:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04
|
||||
runs-on: ubuntu-22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
@ -22,7 +23,18 @@ jobs:
|
|||
mix local.hex --force
|
||||
mix local.rebar --force
|
||||
mix deps.get
|
||||
# we check only enterprise because `rebar3 tree`, even if an
|
||||
# enterprise app is excluded from `project_app_dirs` in
|
||||
# `rebar.config.erl`, will still list dependencies from it.
|
||||
# Since the enterprise profile is a superset of the
|
||||
# community one and thus more complete, we use the former.
|
||||
env:
|
||||
MIX_ENV: emqx-enterprise
|
||||
PROFILE: emqx-enterprise
|
||||
- name: check elixir deps
|
||||
run: ./scripts/check-elixir-deps-discrepancies.exs
|
||||
env:
|
||||
MIX_ENV: emqx-enterprise
|
||||
PROFILE: emqx-enterprise
|
||||
|
||||
...
|
||||
|
|
|
@ -11,13 +11,13 @@ on:
|
|||
|
||||
jobs:
|
||||
elixir_release_build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
profile:
|
||||
- emqx
|
||||
- emqx-enterprise
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-25.1.2-2-ubuntu20.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
|
||||
name: Keep master green
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# run hourly
|
||||
- cron: "0 * * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
rerun-failed-jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository_owner == 'emqx'
|
||||
permissions:
|
||||
checks: read
|
||||
actions: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: run script
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
python3 scripts/rerun-failed-checks.py
|
|
@ -0,0 +1,126 @@
|
|||
name: Performance Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'perf/**'
|
||||
schedule:
|
||||
- cron: '0 1 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'emqx'
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
|
||||
outputs:
|
||||
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
|
||||
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.ref }}
|
||||
- name: Work around https://github.com/actions/checkout/issues/766
|
||||
run: |
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- id: prepare
|
||||
run: |
|
||||
echo "EMQX_NAME=emqx" >> $GITHUB_ENV
|
||||
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
|
||||
echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT
|
||||
- name: Build deb package
|
||||
run: |
|
||||
make ${EMQX_NAME}-pkg
|
||||
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
|
||||
- name: Get package file name
|
||||
id: package_file
|
||||
run: |
|
||||
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: emqx-ubuntu20.04
|
||||
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||
|
||||
tf_emqx_perf_test:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- prepare
|
||||
env:
|
||||
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
|
||||
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
|
||||
TF_AWS_REGION: eu-north-1
|
||||
TF_VAR_test_duration: 1800
|
||||
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
|
||||
aws-region: eu-north-1
|
||||
- name: Checkout tf-emqx-performance-test
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: emqx/tf-emqx-performance-test
|
||||
path: tf-emqx-performance-test
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: emqx-ubuntu20.04
|
||||
path: tf-emqx-performance-test/
|
||||
- name: Setup Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_wrapper: false
|
||||
- name: 1on1 scenario
|
||||
id: scenario_1on1
|
||||
working-directory: ./tf-emqx-performance-test
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
TF_VAR_bench_id: "${{ needs.prepare.outputs.BENCH_ID }}/1on1"
|
||||
TF_VAR_use_emqttb: 1
|
||||
TF_VAR_use_emqtt_bench: 0
|
||||
TF_VAR_emqttb_instance_count: 2
|
||||
TF_VAR_emqttb_instance_type: "c5.large"
|
||||
TF_VAR_emqttb_scenario: "@pub --topic 't/%n' --pubinterval 10ms --qos 1 --publatency 50ms --size 16 --num-clients 25000 @sub --topic 't/%n' --num-clients 25000"
|
||||
TF_VAR_emqx_instance_type: "c5.xlarge"
|
||||
TF_VAR_emqx_instance_count: 3
|
||||
run: |
|
||||
terraform init
|
||||
terraform apply -auto-approve
|
||||
./wait-emqttb.sh
|
||||
./fetch-metrics.sh
|
||||
MESSAGES_RECEIVED=$(cat metrics.json | jq '[.[]."messages.received"] | add')
|
||||
MESSAGES_SENT=$(cat metrics.json | jq '[.[]."messages.sent"] | add')
|
||||
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
|
||||
echo PUB_MSG_RATE=$(($MESSAGES_RECEIVED / $TF_VAR_test_duration)) >> $GITHUB_OUTPUT
|
||||
echo SUB_MSG_RATE=$(($MESSAGES_SENT / $TF_VAR_test_duration)) >> $GITHUB_OUTPUT
|
||||
terraform destroy -auto-approve
|
||||
- name: Send notification to Slack
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Performance test result for 1on1 scenario (50k pub, 50k sub): ${{ job.status }}\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Pub message rate*: ${{ steps.scenario_1on1.outputs.PUB_MSG_RATE }}\n*Sub message rate*: ${{ steps.scenario_1on1.outputs.SUB_MSG_RATE }}\nDropped messages: ${{ steps.scenario_1on1.outputs.MESSAGES_DROPPED }}"}
|
||||
- name: terraform destroy
|
||||
if: always()
|
||||
working-directory: ./tf-emqx-performance-test
|
||||
run: |
|
||||
terraform destroy -auto-approve
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: success()
|
||||
with:
|
||||
name: metrics
|
||||
path: "./tf-emqx-performance-test/metrics.json"
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: terraform
|
||||
path: |
|
||||
./tf-emqx-performance-test/.terraform
|
||||
./tf-emqx-performance-test/*.tfstate
|
|
@ -15,11 +15,11 @@ on:
|
|||
|
||||
jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
- uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -53,25 +53,15 @@ jobs:
|
|||
BUCKET=${{ secrets.AWS_S3_BUCKET }}
|
||||
OUTPUT_DIR=${{ steps.profile.outputs.s3dir }}
|
||||
aws s3 cp --recursive s3://$BUCKET/$OUTPUT_DIR/${{ github.ref_name }} packages
|
||||
cd packages
|
||||
DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1'
|
||||
# all packages including full-name and default-name are uploaded to s3
|
||||
# but we only upload default-name packages (and elixir) as github artifacts
|
||||
# so we rename (overwrite) non-default packages before uploading
|
||||
while read -r fname; do
|
||||
default_fname=$(echo "$fname" | sed "s/-${DEFAULT_BEAM_PLATFORM}//g")
|
||||
echo "$fname -> $default_fname"
|
||||
mv -f "$fname" "$default_fname"
|
||||
done < <(find . -maxdepth 1 -type f | grep -E "emqx(-enterprise)?-5\.[0-9]+\.[0-9]+.*-${DEFAULT_BEAM_PLATFORM}" | grep -v elixir)
|
||||
- uses: alexellis/upload-assets@0.4.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
asset_paths: '["packages/*"]'
|
||||
- name: update to emqx.io
|
||||
if: github.event_name == 'release' || inputs.publish_release_artefacts
|
||||
if: startsWith(github.ref_name, 'v') && (github.event_name == 'release' || inputs.publish_release_artefacts)
|
||||
run: |
|
||||
set -e -x -u
|
||||
set -eux
|
||||
curl -w %{http_code} \
|
||||
--insecure \
|
||||
-H "Content-Type: application/json" \
|
||||
|
@ -79,14 +69,35 @@ jobs:
|
|||
-X POST \
|
||||
-d "{\"repo\":\"emqx/emqx\", \"tag\": \"${{ github.ref_name }}\" }" \
|
||||
${{ secrets.EMQX_IO_RELEASE_API }}
|
||||
- name: update homebrew packages
|
||||
if: steps.profile.outputs.profile == 'emqx' && (github.event_name == 'release' || inputs.publish_release_artefacts)
|
||||
- name: Push to packagecloud.io
|
||||
env:
|
||||
PROFILE: ${{ steps.profile.outputs.profile }}
|
||||
VERSION: ${{ steps.profile.outputs.version }}
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
run: |
|
||||
if [ -z $(echo $version | grep -oE "(alpha|beta|rc)\.[0-9]") ]; then
|
||||
curl --silent --show-error \
|
||||
-H "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-X POST \
|
||||
-d "{\"ref\":\"v1.0.4\",\"inputs\":{\"version\": \"${{ github.ref_name }}\"}}" \
|
||||
"https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_homebrew.yaml/dispatches"
|
||||
set -eu
|
||||
REPO=$PROFILE
|
||||
if [ $PROFILE = 'emqx-enterprise' ]; then
|
||||
REPO='emqx-enterprise5'
|
||||
fi
|
||||
function push() {
|
||||
docker run -t --rm -e PACKAGECLOUD_TOKEN=$PACKAGECLOUD_TOKEN -v $(pwd)/$2:/w/$2 -w /w ghcr.io/emqx/package_cloud push emqx/$REPO/$1 $2
|
||||
}
|
||||
push "debian/buster" "packages/$PROFILE-$VERSION-debian10-amd64.deb"
|
||||
push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb"
|
||||
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb"
|
||||
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb"
|
||||
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
|
||||
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
|
||||
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
|
||||
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
|
||||
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
|
||||
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
|
||||
push "el/6" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm"
|
||||
push "el/6" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm"
|
||||
push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm"
|
||||
push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm"
|
||||
push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm"
|
||||
push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm"
|
||||
push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm"
|
||||
push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm"
|
||||
|
|
|
@ -12,26 +12,26 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
builder:
|
||||
- 5.0-26
|
||||
- 5.0-34
|
||||
otp:
|
||||
- 24.3.4.2-1
|
||||
- 25.1.2-2
|
||||
- 24.3.4.2-3
|
||||
- 25.1.2-3
|
||||
# no need to use more than 1 version of Elixir, since tests
|
||||
# run using only Erlang code. This is needed just to specify
|
||||
# the base image.
|
||||
elixir:
|
||||
- 1.13.4
|
||||
os:
|
||||
- ubuntu20.04
|
||||
- ubuntu22.04
|
||||
arch:
|
||||
- amd64
|
||||
runs-on:
|
||||
- aws-amd64
|
||||
- ubuntu-20.04
|
||||
- ubuntu-22.04
|
||||
use-self-hosted:
|
||||
- ${{ github.repository_owner == 'emqx' }}
|
||||
exclude:
|
||||
- runs-on: ubuntu-20.04
|
||||
- runs-on: ubuntu-22.04
|
||||
use-self-hosted: true
|
||||
- runs-on: aws-amd64
|
||||
use-self-hosted: false
|
||||
|
|
|
@ -7,16 +7,17 @@ concurrency:
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
- master
|
||||
- 'ci/**'
|
||||
tags:
|
||||
- v*
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
# prepare source with any OTP version, no need for a matrix
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-alpine3.15.1
|
||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-debian11
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -33,7 +34,7 @@ jobs:
|
|||
path: source.zip
|
||||
|
||||
docker_test:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs: prepare
|
||||
|
||||
strategy:
|
||||
|
@ -47,17 +48,17 @@ jobs:
|
|||
- mnesia
|
||||
- rlog
|
||||
os:
|
||||
- ["alpine3.15.1", "alpine:3.15.1"]
|
||||
- ["debian11", "debian:11-slim"]
|
||||
builder:
|
||||
- 5.0-26
|
||||
- 5.0-34
|
||||
otp:
|
||||
- 24.3.4.2-1
|
||||
- 24.3.4.2-3
|
||||
elixir:
|
||||
- 1.13.4
|
||||
arch:
|
||||
- amd64
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/download-artifact@v3
|
||||
|
@ -107,7 +108,7 @@ jobs:
|
|||
docker exec node1.emqx.io node_dump
|
||||
|
||||
helm_test:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs: prepare
|
||||
|
||||
strategy:
|
||||
|
@ -122,9 +123,9 @@ jobs:
|
|||
os:
|
||||
- ["debian11", "debian:11-slim"]
|
||||
builder:
|
||||
- 5.0-26
|
||||
- 5.0-34
|
||||
otp:
|
||||
- 24.3.4.2-1
|
||||
- 24.3.4.2-3
|
||||
elixir:
|
||||
- 1.13.4
|
||||
arch:
|
||||
|
@ -132,7 +133,7 @@ jobs:
|
|||
# - emqx-enterprise # TODO test enterprise
|
||||
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/download-artifact@v3
|
||||
|
@ -166,8 +167,8 @@ jobs:
|
|||
--set image.pullPolicy=Never \
|
||||
--set image.tag=$EMQX_TAG \
|
||||
--set emqxAclConfig="" \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
|
||||
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
|
||||
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
||||
deploy/charts/${{ matrix.profile }} \
|
||||
|
@ -184,8 +185,8 @@ jobs:
|
|||
--set image.pullPolicy=Never \
|
||||
--set image.tag=$EMQX_TAG \
|
||||
--set emqxAclConfig="" \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
|
||||
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
|
||||
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
||||
deploy/charts/${{ matrix.profile }} \
|
||||
|
@ -201,12 +202,25 @@ jobs:
|
|||
echo "waiting emqx started";
|
||||
sleep 10;
|
||||
done
|
||||
- name: Get Token
|
||||
timeout-minutes: 1
|
||||
run: |
|
||||
kubectl port-forward service/${{ matrix.profile }} 18083:18083 > /dev/null &
|
||||
|
||||
while
|
||||
[ "$(curl --silent -X 'GET' 'http://127.0.0.1:18083/api/v5/status' | tail -n1)" != "emqx is running" ]
|
||||
do
|
||||
echo "waiting emqx"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "TOKEN=$(curl --silent -X 'POST' 'http://127.0.0.1:18083/api/v5/login' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"username": "admin","password": "public"}' | jq -r ".token")" >> $GITHUB_ENV
|
||||
|
||||
- name: Check cluster
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
kubectl port-forward service/${{ matrix.profile }} 18083:18083 > /dev/null &
|
||||
while
|
||||
[ "$(curl --silent --basic -u admin:public -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')" != "3" ];
|
||||
[ "$(curl --silent -H "Authorization: Bearer $TOKEN" -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')" != "3" ];
|
||||
do
|
||||
echo "waiting ${{ matrix.profile }} cluster scale"
|
||||
sleep 1
|
||||
|
|
|
@ -4,41 +4,13 @@ on: [pull_request]
|
|||
|
||||
jobs:
|
||||
run_gitlint:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v3
|
||||
- name: Install gitlint
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt install gitlint
|
||||
- name: Set auth header
|
||||
if: endsWith(github.repository, 'enterprise')
|
||||
run: |
|
||||
echo 'AUTH_HEADER<<EOF' >> $GITHUB_ENV
|
||||
echo "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run gitlint
|
||||
shell: bash
|
||||
run: |
|
||||
pr_number=$(echo $GITHUB_REF | awk 'BEGIN { FS = "/" } ; { print $3 }')
|
||||
messages="$(curl --silent --show-error \
|
||||
--header "${{ env.AUTH_HEADER }}" \
|
||||
--header "Accept: application/vnd.github.v3+json" \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/pulls/${pr_number}/commits")"
|
||||
len=$(echo $messages | jq length)
|
||||
result=true
|
||||
for i in $( seq 0 $(($len - 1)) ); do
|
||||
message=$(echo $messages | jq -r .[$i].commit.message)
|
||||
echo "commit message: $message"
|
||||
status=0
|
||||
echo $message | gitlint -C ./.github/workflows/.gitlint || status=$?
|
||||
if [ $status -ne 0 ]; then
|
||||
result=false
|
||||
fi
|
||||
done
|
||||
if ! ${result} ; then
|
||||
echo "Some of the commit messages are not structured as The Conventional Commits specification. Please check CONTRIBUTING.md for our process on PR."
|
||||
exit 1
|
||||
fi
|
||||
echo "success"
|
||||
set -ex
|
||||
docker run --ulimit nofile=1024 -v $(pwd):/repo -w /repo ghcr.io/emqx/gitlint --commits ${{ github.event.pull_request.base.sha }}..$GITHUB_SHA --config .github/workflows/.gitlint
|
||||
|
|
|
@ -10,11 +10,11 @@ on:
|
|||
|
||||
jobs:
|
||||
build_emqx_for_jmeter_tests:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
version: ${{ steps.build_docker.outputs.version}}
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- name: download jmeter
|
||||
|
@ -44,7 +44,7 @@ jobs:
|
|||
path: ./emqx.tar
|
||||
|
||||
advanced_feat:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -57,7 +57,7 @@ jobs:
|
|||
|
||||
needs: build_emqx_for_jmeter_tests
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -92,7 +92,7 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: emqx/emqx-fvt
|
||||
ref: broker-autotest-v2
|
||||
ref: broker-autotest-v5
|
||||
path: scripts
|
||||
- uses: actions/setup-java@v3
|
||||
with:
|
||||
|
@ -126,7 +126,7 @@ jobs:
|
|||
- name: check logs
|
||||
run: |
|
||||
if cat jmeter_logs/${{ matrix.scripts_type }}.jtl | grep -e '<failure>true</failure>' > /dev/null 2>&1; then
|
||||
echo "check logs filed"
|
||||
echo "check logs failed"
|
||||
exit 1
|
||||
fi
|
||||
- uses: actions/upload-artifact@v3
|
||||
|
@ -136,7 +136,7 @@ jobs:
|
|||
path: ./jmeter_logs
|
||||
|
||||
pgsql_authn_authz:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -153,7 +153,7 @@ jobs:
|
|||
|
||||
needs: build_emqx_for_jmeter_tests
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -191,7 +191,7 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: emqx/emqx-fvt
|
||||
ref: broker-autotest-v2
|
||||
ref: broker-autotest-v5
|
||||
path: scripts
|
||||
- uses: actions/setup-java@v3
|
||||
with:
|
||||
|
@ -235,7 +235,7 @@ jobs:
|
|||
- name: check logs
|
||||
run: |
|
||||
if cat jmeter_logs/${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }}.jtl | grep -e '<failure>true</failure>' > /dev/null 2>&1; then
|
||||
echo "check logs filed"
|
||||
echo "check logs failed"
|
||||
exit 1
|
||||
fi
|
||||
- uses: actions/upload-artifact@v3
|
||||
|
@ -245,7 +245,7 @@ jobs:
|
|||
path: ./jmeter_logs
|
||||
|
||||
mysql_authn_authz:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -259,7 +259,7 @@ jobs:
|
|||
|
||||
needs: build_emqx_for_jmeter_tests
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -297,7 +297,7 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: emqx/emqx-fvt
|
||||
ref: broker-autotest-v2
|
||||
ref: broker-autotest-v5
|
||||
path: scripts
|
||||
- uses: actions/setup-java@v3
|
||||
with:
|
||||
|
@ -341,7 +341,7 @@ jobs:
|
|||
- name: check logs
|
||||
run: |
|
||||
if cat jmeter_logs/${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}.jtl | grep -e '<failure>true</failure>' > /dev/null 2>&1; then
|
||||
echo "check logs filed"
|
||||
echo "check logs failed"
|
||||
exit 1
|
||||
fi
|
||||
- uses: actions/upload-artifact@v3
|
||||
|
@ -351,7 +351,7 @@ jobs:
|
|||
path: ./jmeter_logs
|
||||
|
||||
JWT_authn:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -361,7 +361,7 @@ jobs:
|
|||
|
||||
needs: build_emqx_for_jmeter_tests
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -396,7 +396,7 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: emqx/emqx-fvt
|
||||
ref: broker-autotest-v2
|
||||
ref: broker-autotest-v5
|
||||
path: scripts
|
||||
- name: run jwks_server
|
||||
timeout-minutes: 10
|
||||
|
@ -439,7 +439,7 @@ jobs:
|
|||
- name: check logs
|
||||
run: |
|
||||
if cat jmeter_logs/${{ matrix.scripts_type }}.jtl | grep -e '<failure>true</failure>' > /dev/null 2>&1; then
|
||||
echo "check logs filed"
|
||||
echo "check logs failed"
|
||||
exit 1
|
||||
fi
|
||||
- uses: actions/upload-artifact@v3
|
||||
|
@ -449,7 +449,7 @@ jobs:
|
|||
path: ./jmeter_logs
|
||||
|
||||
built_in_database_authn_authz:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -460,7 +460,7 @@ jobs:
|
|||
|
||||
needs: build_emqx_for_jmeter_tests
|
||||
steps:
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -496,7 +496,7 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: emqx/emqx-fvt
|
||||
ref: broker-autotest-v2
|
||||
ref: broker-autotest-v5
|
||||
path: scripts
|
||||
- uses: actions/setup-java@v3
|
||||
with:
|
||||
|
@ -531,7 +531,7 @@ jobs:
|
|||
- name: check logs
|
||||
run: |
|
||||
if cat jmeter_logs/${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}.jtl | grep -e '<failure>true</failure>' > /dev/null 2>&1; then
|
||||
echo "check logs filed"
|
||||
echo "check logs failed"
|
||||
exit 1
|
||||
fi
|
||||
- uses: actions/upload-artifact@v3
|
||||
|
@ -541,7 +541,7 @@ jobs:
|
|||
path: ./jmeter_logs
|
||||
|
||||
delete-artifact:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [advanced_feat,pgsql_authn_authz,JWT_authn,mysql_authn_authz,built_in_database_authn_authz]
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v2
|
||||
|
|
|
@ -4,19 +4,18 @@ concurrency:
|
|||
group: relup-${{ github.event_name }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- v*
|
||||
- e*
|
||||
pull_request:
|
||||
# on:
|
||||
# push:
|
||||
# branches:
|
||||
# - '**'
|
||||
# tags:
|
||||
# - e*
|
||||
# pull_request:
|
||||
|
||||
jobs:
|
||||
relup_test_plan:
|
||||
runs-on: ubuntu-20.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04"
|
||||
outputs:
|
||||
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
|
||||
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}
|
||||
|
@ -43,7 +42,7 @@ jobs:
|
|||
run: |
|
||||
set -x
|
||||
cd emqx
|
||||
make emqx-tgz
|
||||
export PROFILE='emqx-enterprise'
|
||||
make emqx-enterprise-tgz
|
||||
- uses: actions/upload-artifact@v3
|
||||
name: Upload built emqx and test scenario
|
||||
|
@ -59,7 +58,7 @@ jobs:
|
|||
needs:
|
||||
- relup_test_plan
|
||||
if: needs.relup_test_plan.outputs.OLD_VERSIONS != '[]'
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
@ -72,7 +71,7 @@ jobs:
|
|||
shell: bash
|
||||
steps:
|
||||
# setup Erlang to run lux
|
||||
- uses: emqx/setup-beam@v1.16.1-emqx
|
||||
- uses: erlef/setup-beam@v1.15.2
|
||||
with:
|
||||
otp-version: 24.3.4.6
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -7,227 +7,260 @@ concurrency:
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
- master
|
||||
- 'ci/**'
|
||||
tags:
|
||||
- v*
|
||||
- e*
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
IS_CI: "yes"
|
||||
|
||||
jobs:
|
||||
build-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
prepare: ${{ steps.matrix.outputs.prepare }}
|
||||
host: ${{ steps.matrix.outputs.host }}
|
||||
docker: ${{ steps.matrix.outputs.docker }}
|
||||
runs-on: ${{ steps.runner.outputs.runs-on }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build matrix
|
||||
id: matrix
|
||||
run: |
|
||||
APPS="$(./scripts/find-apps.sh --ci)"
|
||||
MATRIX="$(echo "${APPS}" | jq -c '
|
||||
[
|
||||
(.[] | select(.profile == "emqx") | . + {
|
||||
builder: "5.0-26",
|
||||
otp: "25.1.2-2",
|
||||
elixir: "1.13.4"
|
||||
}),
|
||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||
builder: "5.0-26",
|
||||
otp: ["24.3.4.2-1", "25.1.2-2"][],
|
||||
elixir: "1.13.4"
|
||||
})
|
||||
]
|
||||
')"
|
||||
echo "${MATRIX}" | jq
|
||||
MATRIX_PREPARE="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')"
|
||||
MATRIX_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')"
|
||||
MATRIX_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')"
|
||||
echo "prepare=${MATRIX_PREPARE}" | tee -a $GITHUB_OUTPUT
|
||||
echo "host=${MATRIX_HOST}" | tee -a $GITHUB_OUTPUT
|
||||
echo "docker=${MATRIX_DOCKER}" | tee -a $GITHUB_OUTPUT
|
||||
- name: Choose runner host
|
||||
id: runner
|
||||
run: |
|
||||
RUNS_ON="ubuntu-20.04"
|
||||
${{ github.repository_owner == 'emqx' }} && RUNS_ON="aws-amd64"
|
||||
echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT
|
||||
build-matrix:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
prepare: ${{ steps.matrix.outputs.prepare }}
|
||||
host: ${{ steps.matrix.outputs.host }}
|
||||
docker: ${{ steps.matrix.outputs.docker }}
|
||||
runs-on: ${{ steps.runner.outputs.runs-on }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build matrix
|
||||
id: matrix
|
||||
run: |
|
||||
APPS="$(./scripts/find-apps.sh --ci)"
|
||||
MATRIX="$(echo "${APPS}" | jq -c '
|
||||
[
|
||||
(.[] | select(.profile == "emqx") | . + {
|
||||
builder: "5.0-34",
|
||||
otp: "25.1.2-3",
|
||||
elixir: "1.13.4"
|
||||
}),
|
||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||
builder: "5.0-34",
|
||||
otp: ["24.3.4.2-3", "25.1.2-3"][],
|
||||
elixir: "1.13.4"
|
||||
})
|
||||
]
|
||||
')"
|
||||
echo "${MATRIX}" | jq
|
||||
MATRIX_PREPARE="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')"
|
||||
MATRIX_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')"
|
||||
MATRIX_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')"
|
||||
echo "prepare=${MATRIX_PREPARE}" | tee -a $GITHUB_OUTPUT
|
||||
echo "host=${MATRIX_HOST}" | tee -a $GITHUB_OUTPUT
|
||||
echo "docker=${MATRIX_DOCKER}" | tee -a $GITHUB_OUTPUT
|
||||
- name: Choose runner host
|
||||
id: runner
|
||||
run: |
|
||||
RUNS_ON="ubuntu-22.04"
|
||||
${{ github.repository_owner == 'emqx' }} && RUNS_ON="aws-amd64"
|
||||
echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT
|
||||
|
||||
prepare:
|
||||
runs-on: aws-amd64
|
||||
needs: [build-matrix]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04"
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: source
|
||||
- name: get_all_deps
|
||||
working-directory: source
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
#DIAGNOSTIC: 1
|
||||
run: |
|
||||
make ensure-rebar3
|
||||
# fetch all deps and compile
|
||||
make ${{ matrix.profile }}
|
||||
make test-compile
|
||||
cd ..
|
||||
zip -ryq source.zip source/* source/.[^.]*
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: source.zip
|
||||
prepare:
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
needs: [build-matrix]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: source
|
||||
- name: get_all_deps
|
||||
working-directory: source
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
run: |
|
||||
make ensure-rebar3
|
||||
# fetch all deps and compile
|
||||
make ${{ matrix.profile }}-compile
|
||||
make test-compile
|
||||
cd ..
|
||||
zip -ryq source.zip source/* source/.[^.]*
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: source.zip
|
||||
|
||||
eunit_and_proper:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04"
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -o -q source.zip
|
||||
# produces eunit.coverdata
|
||||
- name: eunit
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
working-directory: source
|
||||
run: make eunit
|
||||
|
||||
# produces proper.coverdata
|
||||
- name: proper
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
working-directory: source
|
||||
run: make proper
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverdata
|
||||
path: source/_build/test/cover
|
||||
|
||||
ct_docker:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.docker) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
- name: run tests
|
||||
working-directory: source
|
||||
env:
|
||||
DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04"
|
||||
MONGO_TAG: 5
|
||||
MYSQL_TAG: 8
|
||||
PGSQL_TAG: 13
|
||||
REDIS_TAG: 6
|
||||
INFLUXDB_TAG: 2.5.0
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
run: ./scripts/ct/run.sh --app ${{ matrix.app }}
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverdata
|
||||
path: source/_build/test/cover
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}
|
||||
path: source/_build/test/logs
|
||||
|
||||
ct:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.host) }}
|
||||
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
||||
# produces $PROFILE-<app-name>.coverdata
|
||||
- name: run common test
|
||||
working-directory: source
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
run: |
|
||||
make "${{ matrix.app }}-ct"
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverdata
|
||||
path: source/_build/test/cover
|
||||
if-no-files-found: warn # do not fail if no coverdata found
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}
|
||||
path: source/_build/test/logs
|
||||
|
||||
make_cover:
|
||||
needs:
|
||||
- eunit_and_proper
|
||||
- ct
|
||||
- ct_docker
|
||||
runs-on: ubuntu-20.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||
steps:
|
||||
static_checks:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-emqx-enterprise-24.3.4.2-1
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -o -q source.zip
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: "source/emqx_dialyzer_${{ matrix.otp }}_plt"
|
||||
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
- name: run static checks
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
working-directory: source
|
||||
run: make static_checks
|
||||
|
||||
eunit_and_proper:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -o -q source.zip
|
||||
# produces eunit.coverdata
|
||||
- name: eunit
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
working-directory: source
|
||||
run: make eunit
|
||||
|
||||
# produces proper.coverdata
|
||||
- name: proper
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
working-directory: source
|
||||
run: make proper
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverdata
|
||||
path: source/_build/test/cover
|
||||
|
||||
ct_docker:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.docker) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
- name: run tests
|
||||
working-directory: source
|
||||
env:
|
||||
DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
|
||||
MONGO_TAG: "5"
|
||||
MYSQL_TAG: "8"
|
||||
PGSQL_TAG: "13"
|
||||
REDIS_TAG: "7.0"
|
||||
INFLUXDB_TAG: "2.5.0"
|
||||
TDENGINE_TAG: "3.0.2.4"
|
||||
OPENTS_TAG: "9aa7f88"
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverdata
|
||||
path: source/_build/test/cover
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}
|
||||
path: source/_build/test/logs
|
||||
|
||||
ct:
|
||||
needs:
|
||||
- build-matrix
|
||||
- prepare
|
||||
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.build-matrix.outputs.host) }}
|
||||
|
||||
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
||||
# produces $PROFILE-<app-name>.coverdata
|
||||
- name: run common test
|
||||
working-directory: source
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
run: |
|
||||
make "${{ matrix.app }}-ct"
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverdata
|
||||
path: source/_build/test/cover
|
||||
if-no-files-found: warn # do not fail if no coverdata found
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}
|
||||
path: source/_build/test/logs
|
||||
|
||||
make_cover:
|
||||
needs:
|
||||
- eunit_and_proper
|
||||
- ct
|
||||
- ct_docker
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04"
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: source-emqx-enterprise-24.3.4.2-3
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
|
@ -256,15 +289,15 @@ jobs:
|
|||
if: failure()
|
||||
run: cat rebar3.crashdump
|
||||
|
||||
# do this in a separate job
|
||||
upload_coverdata:
|
||||
needs: make_cover
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Coveralls Finished
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
curl -v -k https://coveralls.io/webhook \
|
||||
--header "Content-Type: application/json" \
|
||||
--data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true
|
||||
# do this in a separate job
|
||||
upload_coverdata:
|
||||
needs: make_cover
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Coveralls Finished
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
curl -v -k https://coveralls.io/webhook \
|
||||
--header "Content-Type: application/json" \
|
||||
--data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
name: Shellcheck
|
||||
|
||||
on: [pull_request, push]
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v3
|
||||
|
|
|
@ -10,7 +10,8 @@ on:
|
|||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository_owner == 'emqx'
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: none
|
||||
|
|
|
@ -11,11 +11,11 @@ on:
|
|||
|
||||
jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
- uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -43,8 +43,7 @@ tmp/
|
|||
_packages
|
||||
elvis
|
||||
emqx_dialyzer_*_plt
|
||||
*/emqx_dashboard/priv/www
|
||||
*/emqx_dashboard/priv/i18n.conf
|
||||
*/emqx_dashboard/priv/
|
||||
dist.zip
|
||||
scripts/git-token
|
||||
apps/*/etc/*.all
|
||||
|
@ -67,6 +66,9 @@ mix.lock
|
|||
apps/emqx/test/emqx_static_checks_data/master.bpapi
|
||||
# rendered configurations
|
||||
*.conf.rendered
|
||||
*.conf.rendered.*
|
||||
lux_logs/
|
||||
/.prepare
|
||||
bom.json
|
||||
ct_run*/
|
||||
apps/emqx_conf/etc/emqx.conf.all.rendered*
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
erlang 24.3.4.2-1
|
||||
erlang 24.3.4.2-3
|
||||
elixir 1.13.4-otp-24
|
||||
|
|
2
APL.txt
2
APL.txt
|
@ -186,7 +186,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
Copyright (c) 2016-2023 EMQ Technologies Co., Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -55,7 +55,7 @@ Must be one of the following:
|
|||
- **chore**: Updating grunt tasks etc; no production code change
|
||||
- **perf**: A code change that improves performance
|
||||
- **test**: Adding missing tests, refactoring tests; no production code change
|
||||
- **build**: Changes that affect the CI/CD pipeline or build system or external dependencies (example scopes: travis, jenkins, makefile)
|
||||
- **build**: Changes that affect the CI/CD pipeline or build system or external dependencies (example scopes: jenkins, makefile)
|
||||
- **ci**: Changes provided by DevOps for CI purposes.
|
||||
- **revert**: Reverts a previous commit.
|
||||
|
||||
|
|
6
LICENSE
6
LICENSE
|
@ -1,7 +1,7 @@
|
|||
Source code in this repository is variously licensed under below licenses.
|
||||
|
||||
For EMQX: Apache License 2.0, see APL.txt,
|
||||
which applies to all source files except for lib-ee sub-directory.
|
||||
For Default: Apache License 2.0, see APL.txt,
|
||||
which applies to all source files except for folders applied with Business Source License.
|
||||
|
||||
For EMQX Enterprise (since version 5.0): Business Source License 1.1,
|
||||
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory.
|
||||
see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps.
|
||||
|
|
66
Makefile
66
Makefile
|
@ -2,12 +2,8 @@ REBAR = $(CURDIR)/rebar3
|
|||
BUILD = $(CURDIR)/build
|
||||
SCRIPTS = $(CURDIR)/scripts
|
||||
export EMQX_RELUP ?= true
|
||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-debian11
|
||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11
|
||||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.1.4
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.9
|
||||
export EMQX_REL_FORM ?= tgz
|
||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||
ifeq ($(OS),Windows_NT)
|
||||
|
@ -17,6 +13,22 @@ else
|
|||
FIND=find
|
||||
endif
|
||||
|
||||
# Dashbord version
|
||||
# from https://github.com/emqx/emqx-dashboard5
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.2.4-1
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
|
||||
|
||||
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
|
||||
# In make 4.4+, for backward-compatibility the value from the original environment is used.
|
||||
# so the shell script will be executed tons of times.
|
||||
# https://github.com/emqx/emqx/pull/10627
|
||||
ifeq ($(strip $(OTP_VSN)),)
|
||||
export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh)
|
||||
endif
|
||||
ifeq ($(strip $(ELIXIR_VSN)),)
|
||||
export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh)
|
||||
endif
|
||||
|
||||
PROFILE ?= emqx
|
||||
REL_PROFILES := emqx emqx-enterprise
|
||||
PKG_PROFILES := emqx-pkg emqx-enterprise-pkg
|
||||
|
@ -73,24 +85,36 @@ proper: $(REBAR)
|
|||
test-compile: $(REBAR) merge-config
|
||||
$(REBAR) as test compile
|
||||
|
||||
.PHONY: $(REL_PROFILES:%=%-compile)
|
||||
$(REL_PROFILES:%=%-compile): $(REBAR) merge-config
|
||||
$(REBAR) as $(@:%-compile=%) compile
|
||||
|
||||
.PHONY: ct
|
||||
ct: $(REBAR) merge-config
|
||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
|
||||
|
||||
## only check bpapi for enterprise profile because it's a super-set.
|
||||
.PHONY: static_checks
|
||||
static_checks:
|
||||
@$(REBAR) as check do dialyzer, xref, ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE)
|
||||
@$(REBAR) as check do xref, dialyzer
|
||||
@if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi
|
||||
./scripts/check-i18n-style.sh
|
||||
|
||||
APPS=$(shell $(SCRIPTS)/find-apps.sh)
|
||||
|
||||
.PHONY: $(APPS:%=%-ct)
|
||||
define gen-app-ct-target
|
||||
$1-ct: $(REBAR)
|
||||
@$(SCRIPTS)/pre-compile.sh $(PROFILE)
|
||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
|
||||
--name $(CT_NODE_NAME) \
|
||||
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
|
||||
--suite $(shell $(SCRIPTS)/find-suites.sh $1)
|
||||
$1-ct: $(REBAR) merge-config
|
||||
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
|
||||
ifneq ($(SUITES),)
|
||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
|
||||
--readable=$(CT_READABLE) \
|
||||
--name $(CT_NODE_NAME) \
|
||||
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
|
||||
--suite $(SUITES)
|
||||
else
|
||||
@echo 'No suites found for $1'
|
||||
endif
|
||||
endef
|
||||
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
|
||||
|
||||
|
@ -103,7 +127,7 @@ endef
|
|||
$(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
|
||||
|
||||
.PHONY: ct-suite
|
||||
ct-suite: $(REBAR)
|
||||
ct-suite: $(REBAR) merge-config
|
||||
ifneq ($(TESTCASE),)
|
||||
ifneq ($(GROUP),)
|
||||
$(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) --group $(GROUP)
|
||||
|
@ -130,6 +154,11 @@ COMMON_DEPS := $(REBAR)
|
|||
$(REL_PROFILES:%=%): $(COMMON_DEPS)
|
||||
@$(BUILD) $(@) rel
|
||||
|
||||
.PHONY: compile $(PROFILES:%=compile-%)
|
||||
compile: $(PROFILES:%=compile-%)
|
||||
$(PROFILES:%=compile-%):
|
||||
@$(BUILD) $(@:compile-%=%) apps
|
||||
|
||||
## Not calling rebar3 clean because
|
||||
## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc.
|
||||
## 2. it's slow
|
||||
|
@ -148,7 +177,9 @@ $(PROFILES:%=clean-%):
|
|||
.PHONY: clean-all
|
||||
clean-all:
|
||||
@rm -f rebar.lock
|
||||
@rm -rf deps
|
||||
@rm -rf _build
|
||||
@rm -f emqx_dialyzer_*_plt
|
||||
|
||||
.PHONY: deps-all
|
||||
deps-all: $(REBAR) $(PROFILES:%=deps-%)
|
||||
|
@ -212,11 +243,15 @@ endef
|
|||
$(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt))))
|
||||
|
||||
.PHONY: run
|
||||
run: $(PROFILE) quickrun
|
||||
run: compile-$(PROFILE) quickrun
|
||||
|
||||
.PHONY: quickrun
|
||||
quickrun:
|
||||
./_build/$(PROFILE)/rel/emqx/bin/emqx console
|
||||
./dev -p $(PROFILE)
|
||||
|
||||
## Take the currently set PROFILE
|
||||
docker:
|
||||
@$(BUILD) $(PROFILE) docker
|
||||
|
||||
## docker target is to create docker instructions
|
||||
.PHONY: $(REL_PROFILES:%=%-docker) $(REL_PROFILES:%=%-elixir-docker)
|
||||
|
@ -230,7 +265,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
|
|||
.PHONY:
|
||||
merge-config:
|
||||
@$(SCRIPTS)/merge-config.escript
|
||||
@$(SCRIPTS)/merge-i18n.escript
|
||||
|
||||
## elixir target is to create release packages using Elixir's Mix
|
||||
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# EMQX
|
||||
|
||||
[](https://github.com/emqx/emqx/releases)
|
||||
[](https://travis-ci.org/emqx/emqx)
|
||||
[](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml)
|
||||
[](https://coveralls.io/github/emqx/emqx?branch=master)
|
||||
[](https://hub.docker.com/r/emqx/emqx)
|
||||
[](https://slack-invite.emqx.io/)
|
||||
|
@ -11,9 +11,6 @@
|
|||
[](https://www.youtube.com/channel/UCir_r04HIsLjf2qqyZ4A8Cg)
|
||||
|
||||
|
||||
|
||||
[English](./README.md) | 简体中文 | [русский](./README-RU.md)
|
||||
|
||||
EMQX 是一款全球下载量超千万的大规模分布式物联网 MQTT 服务器,单集群支持 1 亿物联网设备连接,消息分发时延低于 1 毫秒。为高可靠、高性能的物联网实时数据移动、处理和集成提供动力,助力企业构建关键业务的 IoT 平台与应用。
|
||||
|
||||
EMQX 自 2013 年在 GitHub 发布开源版本以来,获得了来自 50 多个国家和地区的 20000 余家企业用户的广泛认可,累计连接物联网关键设备超过 1 亿台。
|
||||
|
@ -76,7 +73,7 @@ EMQX Cloud 文档:[docs.emqx.com/zh/cloud/latest/](https://docs.emqx.com/zh/cl
|
|||
|
||||
我们选取了各个编程语言中热门的 MQTT 客户端 SDK,并提供代码示例,帮助您快速掌握 MQTT 客户端库的使用。
|
||||
|
||||
- [MQTT X](https://mqttx.app/zh)
|
||||
- [MQTTX](https://mqttx.app/zh)
|
||||
|
||||
优雅的跨平台 MQTT 5.0 客户端工具,提供了桌面端、命令行、Web 三种版本,帮助您更快的开发和调试 MQTT 服务和应用。
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Брокер EMQX
|
||||
|
||||
[](https://github.com/emqx/emqx/releases)
|
||||
[](https://travis-ci.org/emqx/emqx)
|
||||
[](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml)
|
||||
[](https://coveralls.io/github/emqx/emqx?branch=master)
|
||||
[](https://hub.docker.com/r/emqx/emqx)
|
||||
[](https://slack-invite.emqx.io/)
|
||||
|
@ -9,7 +9,6 @@
|
|||
[](https://twitter.com/EMQTech)
|
||||
[](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)
|
||||
|
||||
[English](./README.md) | [简体中文](./README-CN.md) | русский
|
||||
|
||||
*EMQX* — это самый масштабируемый и популярный высокопроизводительный MQTT брокер с полностью открытым кодом для интернета вещей, межмашинного взаимодействия и мобильных приложений. EMQX может поддерживать более чем 100 миллионов одновременных соединенией на одном кластере с задержкой в 1 миллисекунду, а также принимать и обрабабывать миллионы MQTT сообщений в секунду.
|
||||
|
||||
|
@ -72,7 +71,7 @@ docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p
|
|||
|
||||
Мы выбрали популярные SDK клиентов MQTT на различных языках программирования и предоставили примеры кода, которые помогут вам быстро понять, как использовать клиенты MQTT.
|
||||
|
||||
- [MQTT X](https://mqttx.app/)
|
||||
- [MQTTX](https://mqttx.app/)
|
||||
|
||||
Элегантный кроссплатформенный клиент MQTT 5.0, в виде десктопного приложения, приложения для командной строки и веб-приложения, чтобы помочь вам быстрее разрабатывать и отлаживать службы и приложения MQTT.
|
||||
|
||||
|
|
10
README.md
10
README.md
|
@ -1,7 +1,7 @@
|
|||
# EMQX
|
||||
|
||||
[](https://github.com/emqx/emqx/releases)
|
||||
[](https://travis-ci.org/emqx/emqx)
|
||||
[](https://github.com/emqx/emqx/actions/workflows/run_test_cases.yaml)
|
||||
[](https://coveralls.io/github/emqx/emqx?branch=master)
|
||||
[](https://hub.docker.com/r/emqx/emqx)
|
||||
[](https://slack-invite.emqx.io/)
|
||||
|
@ -10,9 +10,6 @@
|
|||
[](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)
|
||||
|
||||
|
||||
|
||||
English | [简体中文](./README-CN.md) | [русский](./README-RU.md)
|
||||
|
||||
EMQX is the world's most scalable open-source MQTT broker with a high performance that connects 100M+ IoT devices in 1 cluster, while maintaining 1M message per second throughput and sub-millisecond latency.
|
||||
|
||||
EMQX supports multiple open standard protocols like MQTT, HTTP, QUIC, and WebSocket. It’s 100% compliant with MQTT 5.0 and 3.x standard, and secures bi-directional communication with MQTT over TLS/SSL and various authentication mechanisms.
|
||||
|
@ -25,7 +22,7 @@ For more information, please visit [EMQX homepage](https://www.emqx.io/).
|
|||
|
||||
## Get Started
|
||||
|
||||
#### EMQX Cloud
|
||||
#### Run EMQX in the Cloud
|
||||
|
||||
The simplest way to set up EMQX is to create a managed deployment with EMQX Cloud. You can [try EMQX Cloud for free](https://www.emqx.com/en/signup?utm_source=github.com&utm_medium=referral&utm_campaign=emqx-readme-to-cloud&continue=https://cloud-intl.emqx.com/console/deployments/0?oper=new), no credit card required.
|
||||
|
||||
|
@ -62,6 +59,7 @@ For more organised improvement proposals, you can send pull requests to [EIP](ht
|
|||
## Get Involved
|
||||
|
||||
- Follow [@EMQTech on Twitter](https://twitter.com/EMQTech).
|
||||
- Join our [Slack](https://slack-invite.emqx.io/).
|
||||
- If you have a specific question, check out our [discussion forums](https://github.com/emqx/emqx/discussions).
|
||||
- For general discussions, join us on the [official Discord](https://discord.gg/xYGf3fQnES) team.
|
||||
- Keep updated on [EMQX YouTube](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) by subscribing.
|
||||
|
@ -76,7 +74,7 @@ For more organised improvement proposals, you can send pull requests to [EIP](ht
|
|||
|
||||
We have selected popular MQTT client SDKs in various programming languages and provided code examples to help you quickly understand the use of MQTT clients.
|
||||
|
||||
- [MQTT X](https://mqttx.app/)
|
||||
- [MQTTX](https://mqttx.app/)
|
||||
|
||||
An elegant cross-platform MQTT 5.0 client tool that provides desktop, command line, and web to help you develop and debug MQTT services and applications faster.
|
||||
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
listeners.tcp.default {
|
||||
bind = "0.0.0.0:1883"
|
||||
max_connections = 1024000
|
||||
}
|
||||
|
||||
listeners.ssl.default {
|
||||
bind = "0.0.0.0:8883"
|
||||
max_connections = 512000
|
||||
ssl_options {
|
||||
keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
||||
certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
||||
cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
||||
}
|
||||
}
|
||||
|
||||
listeners.ws.default {
|
||||
bind = "0.0.0.0:8083"
|
||||
max_connections = 1024000
|
||||
websocket.mqtt_path = "/mqtt"
|
||||
}
|
||||
|
||||
listeners.wss.default {
|
||||
bind = "0.0.0.0:8084"
|
||||
max_connections = 512000
|
||||
websocket.mqtt_path = "/mqtt"
|
||||
ssl_options {
|
||||
keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
||||
certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
||||
cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
||||
}
|
||||
}
|
||||
|
||||
# listeners.quic.default {
|
||||
# enabled = true
|
||||
# bind = "0.0.0.0:14567"
|
||||
# max_connections = 1024000
|
||||
# keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
||||
# certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
||||
#}
|
|
@ -1,7 +1,9 @@
|
|||
%% The options in the {server, Opts} tuple are used when calling ssl:ssl_accept/3,
|
||||
%% and the options in the {client, Opts} tuple are used when calling ssl:connect/4.
|
||||
%%
|
||||
%% More information at: http://erlang.org/doc/apps/ssl/ssl_distribution.html
|
||||
%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'.
|
||||
%% Which means the EMQX nodes will connect to each other over TLS.
|
||||
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html
|
||||
|
||||
%% For more information in technical details see: http://erlang.org/doc/apps/ssl/ssl_distribution.html
|
||||
|
||||
[{server,
|
||||
[
|
||||
%{log_level, debug}, %% NOTE: debug level logging impacts performance, and need to set EMQX logging level to 'debug'
|
||||
|
|
|
@ -24,9 +24,6 @@
|
|||
## Sets the maximum number of atoms the virtual machine can handle.
|
||||
#+t 1048576
|
||||
|
||||
## Set the location of crash dumps
|
||||
#-env ERL_CRASH_DUMP {{ platform_log_dir }}/crash.dump
|
||||
|
||||
## Set how many times generational garbages collections can be done without
|
||||
## forcing a fullsweep collection.
|
||||
-env ERL_FULLSWEEP_AFTER 1000
|
||||
|
@ -40,11 +37,6 @@
|
|||
## Prevent user from accidentally calling a function from the prompt that could harm a running system.
|
||||
-stdlib restricted_shell emqx_restricted_shell
|
||||
|
||||
## Specifies the net_kernel tick time in seconds.
|
||||
## This is the approximate time a connected node may be unresponsive until
|
||||
## it is considered down and thereby disconnected.
|
||||
-kernel net_ticktime 120
|
||||
|
||||
## Sets the distribution buffer busy limit (dist_buf_busy_limit).
|
||||
## Preferably set in `emqx.conf`,
|
||||
#+zdbbl 8192
|
||||
|
@ -121,3 +113,6 @@
|
|||
## Mnesia thresholds
|
||||
-mnesia dump_log_write_threshold 5000
|
||||
-mnesia dump_log_time_threshold 60000
|
||||
|
||||
## Disable os_mon's disksup by default
|
||||
-os_mon start_disksup false
|
||||
|
|
|
@ -1,190 +0,0 @@
|
|||
emqx_limiter_schema {
|
||||
|
||||
failure_strategy {
|
||||
desc {
|
||||
en: """The strategy when all the retries failed."""
|
||||
zh: """当所有的重试都失败后的处理策略"""
|
||||
}
|
||||
label: {
|
||||
en: """Failure Strategy"""
|
||||
zh: """失败策略"""
|
||||
}
|
||||
}
|
||||
|
||||
max_retry_time {
|
||||
desc {
|
||||
en: """The maximum retry time when acquire failed."""
|
||||
zh: """申请失败后,尝试重新申请的时长最大值"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Retry Time"""
|
||||
zh: """最大重试时间"""
|
||||
}
|
||||
}
|
||||
|
||||
divisible {
|
||||
desc {
|
||||
en: """Is it possible to split the number of requested tokens?"""
|
||||
zh: """申请的令牌数是否可以被分割"""
|
||||
}
|
||||
label: {
|
||||
en: """Divisible"""
|
||||
zh: """是否可分割"""
|
||||
}
|
||||
}
|
||||
|
||||
client_bucket_capacity {
|
||||
desc {
|
||||
en: """The capacity of per user."""
|
||||
zh: """每个使用者的令牌容量上限"""
|
||||
}
|
||||
label: {
|
||||
en: """Capacity"""
|
||||
zh: """容量"""
|
||||
}
|
||||
}
|
||||
|
||||
capacity {
|
||||
desc {
|
||||
en: """The capacity of this token bucket."""
|
||||
zh: """该令牌桶的容量"""
|
||||
}
|
||||
label: {
|
||||
en: """Capacity"""
|
||||
zh: """容量"""
|
||||
}
|
||||
}
|
||||
|
||||
low_watermark {
|
||||
desc {
|
||||
en: """If the remaining tokens are lower than this value,
|
||||
the check/consume will succeed, but it will be forced to wait for a short period of time."""
|
||||
zh: """当桶中剩余的令牌数低于这个值,即使令牌申请成功了,也会被强制暂停一会儿"""
|
||||
}
|
||||
label: {
|
||||
en: """Low Watermark"""
|
||||
zh: """低水位线"""
|
||||
}
|
||||
}
|
||||
|
||||
initial {
|
||||
desc {
|
||||
en: """The initial number of tokens for this bucket."""
|
||||
zh: """桶中的初始令牌数"""
|
||||
}
|
||||
label: {
|
||||
en: """Initial"""
|
||||
zh: """初始令牌数"""
|
||||
}
|
||||
}
|
||||
|
||||
rate {
|
||||
desc {
|
||||
en: """Rate for this bucket."""
|
||||
zh: """桶的令牌生成速率"""
|
||||
}
|
||||
label: {
|
||||
en: """Rate"""
|
||||
zh: """速率"""
|
||||
}
|
||||
}
|
||||
|
||||
client {
|
||||
desc {
|
||||
en: """The rate limit for each user of the bucket"""
|
||||
zh: """对桶的每个使用者的速率控制设置"""
|
||||
}
|
||||
label: {
|
||||
en: """Per Client"""
|
||||
zh: """每个使用者的限制"""
|
||||
}
|
||||
}
|
||||
|
||||
bucket_cfg {
|
||||
desc {
|
||||
en: """Bucket Configs"""
|
||||
zh: """桶的配置"""
|
||||
}
|
||||
label: {
|
||||
en: """Buckets"""
|
||||
zh: """桶的配置"""
|
||||
}
|
||||
}
|
||||
|
||||
burst {
|
||||
desc {
|
||||
en: """The burst, This value is based on rate.<br/>
|
||||
This value + rate = the maximum limit that can be achieved when limiter burst."""
|
||||
zh: """突发速率。
|
||||
突发速率允许短时间内速率超过设置的速率值,突发速率 + 速率 = 当前桶能达到的最大速率值"""
|
||||
}
|
||||
label: {
|
||||
en: """Burst"""
|
||||
zh: """突发速率"""
|
||||
}
|
||||
}
|
||||
|
||||
message_routing {
|
||||
desc {
|
||||
en: """The message routing limiter.
|
||||
This is used to limit the forwarding rate for this EMQX node.
|
||||
Once the limit is reached, new publish will be refused"""
|
||||
zh: """消息派发速率控制器。
|
||||
这个用来控制当前节点内的消息派发速率,当达到最大速率后,新的推送将会被拒绝"""
|
||||
}
|
||||
label: {
|
||||
en: """Message Routing"""
|
||||
zh: """消息派发"""
|
||||
}
|
||||
}
|
||||
|
||||
connection {
|
||||
desc {
|
||||
en: """The connection limiter.
|
||||
This is used to limit the connection rate for this EMQX node.
|
||||
Once the limit is reached, new connections will be refused"""
|
||||
zh: """连接速率控制器。
|
||||
这个用来控制当前节点上的连接速率,当达到最大速率后,新的连接将会被拒绝"""
|
||||
}
|
||||
label: {
|
||||
en: """Connection"""
|
||||
zh: """连接速率"""
|
||||
}
|
||||
}
|
||||
|
||||
message_in {
|
||||
desc {
|
||||
en: """The message in limiter.
|
||||
This is used to limit the inbound message numbers for this EMQX node
|
||||
Once the limit is reached, the restricted client will be slow down even be hung for a while."""
|
||||
zh: """流入速率控制器。
|
||||
这个用来控制当前节点上的消息流入速率,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间"""
|
||||
}
|
||||
label: {
|
||||
en: """Message In"""
|
||||
zh: """消息流入速率"""
|
||||
}
|
||||
}
|
||||
|
||||
bytes_in {
|
||||
desc {
|
||||
en: """The bytes_in limiter.
|
||||
This is used to limit the inbound bytes rate for this EMQX node.
|
||||
Once the limit is reached, the restricted client will be slow down even be hung for a while."""
|
||||
zh: """流入字节率控制器。
|
||||
这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间"""
|
||||
}
|
||||
label: {
|
||||
en: """Bytes In"""
|
||||
zh: """流入字节率"""
|
||||
}
|
||||
}
|
||||
|
||||
internal {
|
||||
desc {
|
||||
en: """Limiter for EMQX internal app."""
|
||||
zh: """EMQX 内部功能所用限制器。"""
|
||||
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,31 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% This file contains common macros for testing.
|
||||
%% It must not be used anywhere except in test suites.
|
||||
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-define(assertWaitEvent(Code, EventMatch, Timeout),
|
||||
?assertMatch(
|
||||
{_, {ok, EventMatch}},
|
||||
?wait_async_action(
|
||||
Code,
|
||||
EventMatch,
|
||||
Timeout
|
||||
)
|
||||
)
|
||||
).
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue