Compare commits

..

3 Commits

Author SHA1 Message Date
Zaiming Shi f5b70457c8 build: prepare to disclose ee code 2021-11-18 16:13:09 +01:00
Zaiming Shi 67f7dfbc6e chore: pin Erlang/OTP 24.1.5-2 2021-11-18 16:12:40 +01:00
Zaiming Shi 6b85b2413b build: delete unused keydelete 2021-11-18 15:41:09 +01:00
3619 changed files with 85816 additions and 503140 deletions

228
.ci/build_packages/tests.sh Executable file
View File

@ -0,0 +1,228 @@
#!/bin/bash
set -x -e -u
export DEBUG=1
export CODE_PATH=${CODE_PATH:-"/emqx"}
export EMQX_NAME=${EMQX_NAME:-"emqx"}
export PACKAGE_PATH="${CODE_PATH}/_packages/${EMQX_NAME}"
export RELUP_PACKAGE_PATH="${CODE_PATH}/_upgrade_base"
case "$(uname -m)" in
x86_64)
ARCH='amd64'
;;
aarch64)
ARCH='arm64'
;;
arm*)
ARCH=arm
;;
esac
export ARCH
emqx_prepare(){
mkdir -p "${PACKAGE_PATH}"
if [ ! -d "/paho-mqtt-testing" ]; then
git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git /paho-mqtt-testing
fi
pip3 install pytest
}
emqx_test(){
cd "${PACKAGE_PATH}"
for var in "$PACKAGE_PATH"/"${EMQX_NAME}"-*;do
case ${var##*.} in
"zip")
packagename=$(basename "${PACKAGE_PATH}/${EMQX_NAME}"-*.zip)
unzip -q "${PACKAGE_PATH}/${packagename}"
export EMQX_ZONES__DEFAULT__MQTT__SERVER_KEEPALIVE=60
export EMQX_MQTT__MAX_TOPIC_ALIAS=10
export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
export EMQX_LOG__FILE_HANDLERS__DEFAULT__LEVEL=debug
if [[ $(arch) == *arm* || $(arch) == aarch64 ]]; then
export EMQX_LISTENERS__QUIC__DEFAULT__ENABLED=false
export WAIT_FOR_ERLANG_STOP=120
fi
# sed -i '/emqx_telemetry/d' "${PACKAGE_PATH}"/emqx/data/loaded_plugins
echo "running ${packagename} start"
if ! "${PACKAGE_PATH}"/emqx/bin/emqx start; then
cat "${PACKAGE_PATH}"/emqx/log/erlang.log.1 || true
cat "${PACKAGE_PATH}"/emqx/log/emqx.log.1 || true
exit 1
fi
IDLE_TIME=0
while ! curl http://127.0.0.1:18083/api/v5/status >/dev/null 2>&1; do
if [ $IDLE_TIME -gt 10 ]
then
echo "emqx running error"
exit 1
fi
sleep 10
IDLE_TIME=$((IDLE_TIME+1))
done
pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic
if ! "${PACKAGE_PATH}"/emqx/bin/emqx stop; then
cat "${PACKAGE_PATH}"/emqx/log/erlang.log.1 || true
cat "${PACKAGE_PATH}"/emqx/log/emqx.log.1 || true
exit 1
fi
echo "running ${packagename} stop"
rm -rf "${PACKAGE_PATH}"/emqx
;;
"deb")
packagename=$(basename "${PACKAGE_PATH}/${EMQX_NAME}"-*.deb)
dpkg -i "${PACKAGE_PATH}/${packagename}"
if [ "$(dpkg -l |grep emqx |awk '{print $1}')" != "ii" ]
then
echo "package install error"
exit 1
fi
echo "running ${packagename} start"
run_test
echo "running ${packagename} stop"
dpkg -r "${EMQX_NAME}"
if [ "$(dpkg -l |grep emqx |awk '{print $1}')" != "rc" ]
then
echo "package remove error"
exit 1
fi
dpkg -P "${EMQX_NAME}"
if dpkg -l |grep -q emqx
then
echo "package uninstall error"
exit 1
fi
;;
"rpm")
packagename=$(basename "${PACKAGE_PATH}/${EMQX_NAME}"-*.rpm)
if [[ "${ARCH}" == "amd64" && $(rpm -E '%{rhel}') == 7 ]] ;
then
# EMQX OTP requires openssl11 to have TLS1.3 support
yum install -y openssl11;
fi
rpm -ivh "${PACKAGE_PATH}/${packagename}"
if ! rpm -q emqx | grep -q emqx; then
echo "package install error"
exit 1
fi
echo "running ${packagename} start"
run_test
echo "running ${packagename} stop"
rpm -e "${EMQX_NAME}"
if [ "$(rpm -q emqx)" != "package emqx is not installed" ];then
echo "package uninstall error"
exit 1
fi
;;
esac
done
}
run_test(){
# sed -i '/emqx_telemetry/d' /var/lib/emqx/loaded_plugins
emqx_env_vars=$(dirname "$(readlink "$(command -v emqx)")")/../releases/emqx_vars
if [ -f "$emqx_env_vars" ];
then
tee -a "$emqx_env_vars" <<EOF
export EMQX_ZONES__DEFAULT__MQTT__SERVER_KEEPALIVE=60
export EMQX_MQTT__MAX_TOPIC_ALIAS=10
export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
export EMQX_LOG__FILE_HANDLERS__DEFAULT__LEVEL=debug
EOF
## for ARM, due to CI env issue, skip start of quic listener for the moment
[[ $(arch) == *arm* || $(arch) == aarch64 ]] && tee -a "$emqx_env_vars" <<EOF
export EMQX_LISTENERS__QUIC__DEFAULT__ENABLED=false
export WAIT_FOR_ERLANG_STOP=120
EOF
else
echo "Error: cannot locate emqx_vars"
exit 1
fi
if ! emqx 'start'; then
cat /var/log/emqx/erlang.log.1 || true
cat /var/log/emqx/emqx.log.1 || true
exit 1
fi
IDLE_TIME=0
while ! curl http://127.0.0.1:18083/api/v5/status >/dev/null 2>&1; do
if [ $IDLE_TIME -gt 10 ]
then
echo "emqx running error"
exit 1
fi
sleep 10
IDLE_TIME=$((IDLE_TIME+1))
done
pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic
# shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions
ps -ef | grep -E '\-progname\s.+emqx\s'
if ! emqx 'stop'; then
# shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions
ps -ef | grep -E '\-progname\s.+emqx\s'
echo "ERROR: failed_to_stop_emqx_with_the_stop_command"
cat /var/log/emqx/erlang.log.1 || true
cat /var/log/emqx/emqx.log.1 || true
exit 1
fi
if [ "$(sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g')" = ubuntu ] \
|| [ "$(sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g')" = debian ] ;then
if ! service emqx start; then
cat /var/log/emqx/erlang.log.1 || true
cat /var/log/emqx/emqx.log.1 || true
exit 1
fi
IDLE_TIME=0
while ! curl http://127.0.0.1:18083/api/v5/status >/dev/null 2>&1; do
if [ $IDLE_TIME -gt 10 ]
then
echo "emqx service error"
exit 1
fi
sleep 10
IDLE_TIME=$((IDLE_TIME+1))
done
service emqx stop
fi
}
relup_test(){
TARGET_VERSION="$("$CODE_PATH"/pkg-vsn.sh)"
if [ -d "${RELUP_PACKAGE_PATH}" ];then
cd "${RELUP_PACKAGE_PATH}"
find . -maxdepth 1 -name "${EMQX_NAME}-*-${ARCH}.zip" |
while read -r pkg; do
packagename=$(basename "${pkg}")
unzip "$packagename"
if ! ./emqx/bin/emqx start; then
cat emqx/log/erlang.log.1 || true
cat emqx/log/emqx.log.1 || true
exit 1
fi
./emqx/bin/emqx_ctl status
./emqx/bin/emqx versions
cp "${PACKAGE_PATH}/${EMQX_NAME}"-*-"${TARGET_VERSION}-${ARCH}".zip ./emqx/releases
./emqx/bin/emqx install "${TARGET_VERSION}"
[ "$(./emqx/bin/emqx versions |grep permanent | awk '{print $2}')" = "${TARGET_VERSION}" ] || exit 1
./emqx/bin/emqx_ctl status
./emqx/bin/emqx stop
rm -rf emqx
done
fi
}
emqx_prepare
emqx_test
relup_test

View File

@ -1,28 +1,8 @@
MYSQL_TAG=8
REDIS_TAG=7.0
MONGO_TAG=5
REDIS_TAG=6
MONGO_TAG=4
PGSQL_TAG=13
LDAP_TAG=2.4.50
INFLUXDB_TAG=2.5.0
TDENGINE_TAG=3.0.2.4
DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
OPENTS_TAG=9aa7f88
KINESIS_TAG=2.1
HSTREAMDB_TAG=v0.19.3
HSTREAMDB_ZK_TAG=3.8.1
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD="emqx123"
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD="emqx123"
# Version of Elastic products
ELASTIC_TAG=8.11.4
LICENSE=basic
TARGET=emqx/emqx
EMQX_TAG=build-alpine-amd64

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
Certificate and Key files for testing
## Cassandra (v3.x)
### How to convert server PEM to JKS Format
1. Convert server.crt and server.key to server.p12
```bash
openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12 -name "certificate"
```
2. Convert server.p12 to server.jks
```bash
keytool -importkeystore -srckeystore server.p12 -srcstoretype pkcs12 -destkeystore server.jks
```
### How to convert CA PEM certificate to truststore.jks
```
keytool -import -file ca.pem -keystore truststore.jks
```

View File

@ -1,29 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIE5DCCAswCCQCF3o0gIdaNDjANBgkqhkiG9w0BAQsFADA0MRIwEAYDVQQKDAlF
TVFYIFRlc3QxHjAcBgNVBAMMFUNlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMTEy
MzAwODQxMTFaFw00OTA1MTcwODQxMTFaMDQxEjAQBgNVBAoMCUVNUVggVGVzdDEe
MBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEAqmqSrxyH16j63QhqGLT1UO8I+m6BM3HfnJQM8laQdtJ0
WgHqCh0/OphH3S7v4SfF4fNJDEJWMWuuzJzU9cTqHPLzhvo3+ZHcMIENgtY2p2Cf
7AQjEqFViEDyv2ZWNEe76BJeShntdY5NZr4gIPar99YGG/Ln8YekspleV+DU38rE
EX9WzhgBr02NN9z4NzIxeB+jdvPnxcXs3WpUxzfnUjOQf/T1tManvSdRbFmKMbxl
A8NLYK3oAYm8EbljWUINUNN6loqYhbigKv8bvo5S4xvRqmX86XB7sc0SApngtNcg
O0EKn8z/KVPDskE+8lMfGMiU2e2Tzw6Rph57mQPOPtIp5hPiKRik7ST9n0p6piXW
zRLplJEzSjf40I1u+VHmpXlWI/Fs8b1UkDSMiMVJf0LyWb4ziBSZOY2LtZzWHbWj
LbNgxQcwSS29tKgUwfEFmFcm+iOM59cPfkl2IgqVLh5h4zmKJJbfQKSaYb5fcKRf
50b1qsN40VbR3Pk/0lJ0/WqgF6kZCExmT1qzD5HJES/5grjjKA4zIxmHOVU86xOF
ouWvtilVR4PGkzmkFvwK5yRhBUoGH/A9BurhqOc0QCGay1kqHQFA6se4JJS+9KOS
x8Rn1Nm6Pi7sd6Le3cKmHTlyl5a/ofKqTCX2Qh+v/7y62V1V1wnoh3ipRjdPTnMC
AwEAATANBgkqhkiG9w0BAQsFAAOCAgEARCqaocvlMFUQjtFtepO2vyG1krn11xJ0
e7md26i+g8SxCCYqQ9IqGmQBg0Im8fyNDKRN/LZoj5+A4U4XkG1yya91ZIrPpWyF
KUiRAItchNj3g1kHmI2ckl1N//6Kpx3DPaS7qXZaN3LTExf6Ph+StE1FnS0wVF+s
tsNIf6EaQ+ZewW3pjdlLeAws3jvWKUkROc408Ngvx74zbbKo/zAC4tz8oH9ZcpsT
WD8enVVEeUQKI6ItcpZ9HgTI9TFWgfZ1vYwvkoRwNIeabYI62JKmLEo2vGfGwWKr
c+GjnJ/tlVI2DpPljfWOnQ037/7yyJI/zo65+HPRmGRD6MuW/BdPDYOvOZUTcQKh
kANi5THSbJJgZcG3jb1NLebaUQ1H0zgVjn0g3KhUV+NJQYk8RQ7rHtB+MySqTKlM
kRkRjfTfR0Ykxpks7Mjvsb6NcZENf08ZFPd45+e/ptsxpiKu4e4W4bV7NZDvNKf9
0/aD3oGYNMiP7s+KJ1lRSAjnBuG21Yk8FpzG+yr8wvJhV8aFgNQ5wIH86SuUTmN0
5bVzFEIcUejIwvGoQEctNHBlOwHrb7zmB6OwyZeMapdXBQ+9UDhYg8ehDqdDOdfn
wsBcnjD2MwNhlE1hjL+tZWLNwSHiD6xx3LvNoXZu2HK8Cp3SOrkE69cFghYMIZZb
T+fp6tNL6LE=
-----END CERTIFICATE-----

View File

@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEAqmqSrxyH16j63QhqGLT1UO8I+m6BM3HfnJQM8laQdtJ0WgHq
Ch0/OphH3S7v4SfF4fNJDEJWMWuuzJzU9cTqHPLzhvo3+ZHcMIENgtY2p2Cf7AQj
EqFViEDyv2ZWNEe76BJeShntdY5NZr4gIPar99YGG/Ln8YekspleV+DU38rEEX9W
zhgBr02NN9z4NzIxeB+jdvPnxcXs3WpUxzfnUjOQf/T1tManvSdRbFmKMbxlA8NL
YK3oAYm8EbljWUINUNN6loqYhbigKv8bvo5S4xvRqmX86XB7sc0SApngtNcgO0EK
n8z/KVPDskE+8lMfGMiU2e2Tzw6Rph57mQPOPtIp5hPiKRik7ST9n0p6piXWzRLp
lJEzSjf40I1u+VHmpXlWI/Fs8b1UkDSMiMVJf0LyWb4ziBSZOY2LtZzWHbWjLbNg
xQcwSS29tKgUwfEFmFcm+iOM59cPfkl2IgqVLh5h4zmKJJbfQKSaYb5fcKRf50b1
qsN40VbR3Pk/0lJ0/WqgF6kZCExmT1qzD5HJES/5grjjKA4zIxmHOVU86xOFouWv
tilVR4PGkzmkFvwK5yRhBUoGH/A9BurhqOc0QCGay1kqHQFA6se4JJS+9KOSx8Rn
1Nm6Pi7sd6Le3cKmHTlyl5a/ofKqTCX2Qh+v/7y62V1V1wnoh3ipRjdPTnMCAwEA
AQKCAgAoIMA5i7ZRCfFIatrQxoudayvqDGtP+dh1vkbuKYQK9rN/HkRF7W0eFw2U
/6BsnDj0Y50nzdcN/BVFCQj8dknKV0sQ1Yqosbfvk/Pigx6Ley0tHixEDsldNC30
89wIo3uTwf+B42kO7Vs8fjiCipMj4Lm/iwsizJXzmDmm58I4kD5rAFkoXm7HILPI
G7g3BxKu/oQ3VmeVIm4MFSWxY3CM4qd7+eqBjuWgnMmHge4QmBQRNsNhGJIxCoXG
hqjmM69/AM009Z3EnxzYAwo9bLYH1F0iirFrJpl53JgJFMLc0ms8iKw/xL2wtZC3
QLXZycjgxRqH1nGfqAaT30mrVkISFnfNdWmILcBqAQs7lsUL1dYrd7RjwhMsRCy7
KMNR7IlevtjgqRXON6xhJELhXoexubAq5giVLkhwREQIYNr6Cq9WAg6C30oYZMoL
EBTtRciyq/S0Tp2gsUI5beWIhe3B83ZDFc6mxqwhOrd+9kK3gRba3KX1m1Ikp60T
JqFCVzm1vVrcQUJm2xDSeP6d5qSkE+9LEsI+oJhBj7mNHZtkcTXBev8uCgm3QAbB
X/9vH+jhio3RgvK1rSsLwUou40MS81xZOBSyXvixgefQpnbAoI1Ou5wekBB36gek
i9OqKFxmI7f0rwVXcSFmXr/vpXi6UOeGsvz+icbGoGrnPKHswQKCAQEA1SYj/KHe
o/9fPYBAOp66jab8gKB2QcnIskXiEpO1bbCrKfJ0mGmcKiRJJbynC6JGwiWCrLvp
Qgkwk67jziUrCaJ8kEWuK3wTR+I+i/XLQOv2iPSouBHXAeokUHlRNJTrEC23dCyg
jvgQXE7OEwk0UHTUsNm40Whv4uGgJTAkwALCWJyhTazF2xpKTyifQ+zcxFdAipte
T5ErlrHIMJkDo4OzMBfXHcuxb5YG87eU1wxZ+76CcYv51xu8TY6cUyNsXM4OA6UT
drgfaQXVpCMCdbGbh0RwBC4spgWRk9F5m5w78K0ZWI2PSykCOyVySh+dgURnn2kE
Cmzo70TjvEPYRQKCAQEAzK0qULtaqIbm7efcktWjv/eGBeEl5nSMalgJEFCJkTYw
UerLDUKmLClc8dBzrybAglx9hJpWYYg6qioc+TxhIF4b8lNo6QZ/LElZRriv6GdC
tP3kiqLCBUWOQkLp3GZRoixdeF0snll0YeV+eQLuGQWMn2Kkjb0J1VHdvuMSKL2p
PHieogwsuE2FNslctVUxOc+ph1/JKq2cEsYZkVKEn79FS63AXlpzz3px4mCDN5Lm
BK5BRgbP+HiZo3ac4L2DoxpMuLCGoIN6X3iJXINo1akuNtp/1n4AX6LXZytI4UGk
xBeAhBnP7QghtAi1u63ZYUE70cVAPV7ybG+JxVgDVwKCAQEAxdmWc+1I/Y+RN0Qx
2nf2EICdR0QrIRwNmDU4CShks0HXT6OHyOXXGGMAJvA7Wpgx+Arbhj0S4sIm/h7L
xFFJ5rKVz1Fuv1x3hTUj+8SW+1dMS4pWhi3BFzzgonZKA3Xrz+Ovsz2td6gZf6WC
sbbMgZZAyzv9yxuXJ9FpVruekUC+Z4RUUgZ6zctUiK/bTjCyJ+oZtc9MNq04+bNi
cIHIF+Kq1Ix8mGK3/C0VnOqeVRNY/02yRXW53osXOiKTRrTN5EM8TPPQ4lU8ir7o
tWft45OOG3xSQf8eYKkwnTZHHENkfB4hNcqI5SpWsNIsiVNZX2FAkn7nSkoX2eln
Pxz2xQKCAQA/tedmGeuuac+YXoQacMX4C2R8kAjsI3tR3vVzTp6DxQpldWCfUA/J
z1ZPL0PTUYy7B29Kx3/7/BvGvDUon9Lb8G9ijvQpFQyhDHPtv6+B+CKblCx/uwoJ
+gy+M3X4VSE0CftObDJnWBESKA2mPXM/9qo/MsVmGWHmNQWBVc1hQShc2m8GoiOJ
exfsZeGl0E7yX+G1cet8jW33qhJrWfROhYtcc0leFWnXO5YXkVNHCULwUg2fbp9u
CJxKdbF/g35mVtlq5AgEDukYrryTP5RybaclC/6fFbmoC1hhlOeqtnRDVc17UU2X
yuAy2kM3mHYB//xO38ePUu7DMjUAaNUhAoIBAQCpgXw+8oxbXWJUz9xfiJIaDI5d
O2KLkywv+JYUZPHGwb6MjiQ+fh2NOPvdoAy8I4/BBVtelD8BRQIWxvhQZUXJEwxh
mi4gUGw08TUNGqhK6v//sNYo2ssn4VWcJcxdSjwlLVAD1BdpP6OeHKElAPxzsrOW
3AmOdc7qe1OnH6hxPG7p8wvUFkdnJOpATcaysUD++xYZt/cj0OyUhhUIpu0RGHgB
RkfL+yLjOCaHTMkpPVZzjL2RBa14ouX+PmA14Zd4gOOjnayFr9Pmvpi0T3dctnu9
S0+AuKLxU3skSp6L+Sr74QsvgtZOShkMjxLQfPJCW/pKKlqLAuDLTPMqGtrO
-----END RSA PRIVATE KEY-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAzs74tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8Z
OUiOuOjynKvsJeltdmc0L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoV
JXgNT3ru56aZFa6Ov6NhfZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXM
inMd1tsHL7xHLf3KjCbkusA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1
P/T+W6WBldv0i2WlNbfiuAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/
FBT5knNtmXTt78xBBlIPFas5BAJIeV4eADx9MwIDAQABAoIBAQCZTvcynpJuxIxn
vmItjK5U/4wIBjZNIawQk6BoG7tR2JyJ/1jcjTw4OX/4wr450JRz7MfUJweD5hDb
OTMtLLNXlG6+YR4vsIUEiSlvhy5srVH0jG5Wq2t6mxBVq7vaRd/OkshnuU79+Pq7
iHqclS7GSACxYkXWyxE6wtPh5aTWP8joK/LvYFiOqKPilUnLZ4hBhmL7CRUCZ0ZA
QGNyEhlmiAL+LNKW2RLXPBxlKX21X78ahUQmkkTM0lBK9x6hm4dD3SpLqmZyQQ9M
UfiMbU6XOYlDva/USZzrvTDlRf9uCG9QOsZzngP1aIy8Cq3QHECOeMIPO9WQLMll
SyY+SpyJAoGBAP4fhnbDpQC6ekd9TNoU9GE/FNNNGKLh82GDgnGcWU/oIzv8GlaR
rkEHTb6aRoPpjTxWIjJpScs9kycC+7N3oNo9rub4s5UvllI+EgQ95+j/5fnZx6gO
la8ousLy1hTYu9C0nTWdTV3YtfC0l0opn7Friv5QafNmhSn74DqrH0BHAoGBANBV
/NhBDAH1PHzYA+XuNLYTLv56Q4osmoen17nPnFNWb1TtWblzb0yWp86GGDFcs8CZ
eH0mXCRUzGMSWtOHe4CbIm2brAYXuL2t6+DZ1A22gsnW5avNrosZRS7eN7BE7DDj
5cp9+Es9UWnArzJU7jSWwAtA6o47WHfHU/pqRB21AoGAGx6eKPqEF2nPNuXmV7e4
xNAIluw5XtiiMpvoRdubpG1vpS0oWmi9oe73mwm30MgR7Ih8qciWuXvewmENH3/6
yI+gpMGR2K/1aN166rz4jOMSVfGp3wN/cev00m0774mZsZI03M3mvccs031ST/XV
Nwf1E2Ldi747I9nfeiNc+G0CgYEAslFHD1ntiyd6VGkYPQ978nPM/2dqs7OluILC
tHmslfAfbpOQ/ph9JRK2IqDHyEhOWoWBiazxpO8n2Yx2TSNjZBpkh2h8/uIC7+cT
Q+tuAya6H0ReZISx5sEEZC8zfx4fA2Gs53qWsN+U9W1FB1GGaWC2k2tG1+KXwD3N
9UJLdxkCgYBB96dsfT7nXmy0JLUz0rQ4umBje6H5uvuaevWdVMEptHB+O7+6CAse
OVwqlFLQ4QC7s4/P9FQwfr/0uMRInB1aC043Haa1LbiRcRIlSuBDUezK5xidUbz+
uB/ABkwwEuqW3Ns1+QieJyyfoNYKZ2v0RtYxBuieKOpUCm3oNFZRWg==
-----END RSA PRIVATE KEY-----

View File

@ -1,25 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEMjCCAhoCFCOrAvLNRztbFFcN0zrCQXoj73cHMA0GCSqGSIb3DQEBCwUAMDQx
EjAQBgNVBAoMCUVNUVggVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9y
aXR5MB4XDTIzMDMxNzA5MzgzMVoXDTMzMDMxNDA5MzgzMVowdzELMAkGA1UEBhMC
U0UxEjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYD
VQQKDAlNeU9yZ05hbWUxGDAWBgNVBAsMD015U2VydmljZUNsaWVudDESMBAGA1UE
AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzs74
tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8ZOUiOuOjynKvsJeltdmc0
L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoVJXgNT3ru56aZFa6Ov6Nh
fZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXMinMd1tsHL7xHLf3KjCbk
usA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1P/T+W6WBldv0i2WlNbfi
uAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/FBT5knNtmXTt78xBBlIP
Fas5BAJIeV4eADx9MwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQBHgfJgMjTgWZXG
eyzIVxaqzWTLxrT7zPy09Mw4qsAl1TfWg9/r8nuskq4bjBQuKm0k9H0HQXz//eFC
Qn85qTHyAmZok6c4ljO2P+kTIl3nkKk5zudmeCTy3W9YBdyWvDXQ/GhbywIfO+1Y
fYA82I5rXVg4c9fUVTNczUFyDNcZzoJoqCS8jwFDtNR0N/fptJN14j8pnYvNV+4c
hZ+pcnhSoz7dD8WjyYCc/QCajJdTyb15i072HxuGmhwltjnwIE/2xfeXCCeUTzsJ
8h4/ABRu9VEqjqDQHepXIflYuVhU38SL0f4ly7neMXmytAbXwGLVM+ME81HG60Bw
8hkfSwKBbEkhUmD6+V1bdUz14I6HjWJt/INtFU+O+MYZbIFt4ep9GKLV3nk97CyL
fwDv5b4WXdC68iWMZqSrADAXr+VG3DgHqpNItj0XmhY6ihmt5tA3Z6IZJj45TShA
vRqTCx3Hf6EO3zf4KCrzaPSSSfVLnGKftA/6oz3bl8EK2e2M44lOspRk4l9k+iBR
sfHPmpiWY0hIiFtd3LD/uGDSBcGkKjU/fLvJZXJpVXwmT9pmK9LzkAPOK1rr97e9
esHqwe1bo3z7IdeREZ0wdxqGL3BNpm4f1NaIzV/stX+vScau0AyFYXzumjeBIpKa
Gt0A+dZnUfWG6qn5NiRENXxFQSppaA==
-----END CERTIFICATE-----

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEBDCCAeygAwIBAgIJAKTICmq1Lg6cMA0GCSqGSIb3DQEBCwUAMDQxEjAQBgNV
BAoMCUVNUVggVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4X
DTIxMTIzMDA4NDExMloXDTQ5MDUxNzA4NDExMlowKzESMBAGA1UECgwJRU1RWCBU
ZXN0MRUwEwYDVQQDDAxhdXRobi1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCdCXfM/j28fsi3vhxmHoy2UUz/VDcTJudadVNqTOQZPuqW5lex
309yYcZqThfT2ZSVIH92ags6aNxr4Uv9vGTkPW22kAiK41imeAj+HLmvByxqfv+s
JlB5YcHXMGQCcFZOaOtabuJ0nmqxO0OWU9CIeE5PWlnVyWM1cvYxtQQLg4BSP8X/
ohFBERaBn0yU0IYTFxo+9A1LB5utnWiv7A/5fZVFBkAdrGMPxcuEF49oynbW4WpN
kn1jY+89BrBvLk+lMZCTI2dRnE5tqt+kD6Ejh3eWRiONoS6sm9rIrH/OMEqEXhfi
bgZZu8rL0o1YL7SATJERBNuvcJpQl7We5UCbAgMBAAGjIjAgMAsGA1UdDwQEAwIF
oDARBglghkgBhvhCAQEEBAMCBkAwDQYJKoZIhvcNAQELBQADggIBAAydWowM0rS5
CgrVsuSUnUntXkIIu9YziI8mKWm8K5sp8lqtVovitVFuG19Y3Ve8r2pIibpBvOKZ
ocr+uUgrZrGGXU3x9/p+miTcHm5M9guPzmN6JbKZ65yIAN9po5CjrczFShqxIQly
ye+5C7/Metf6KM43lLKefDkUgccASKa4KhvP84/Jc8jEKP2cQ5I84yaRyeJgDnJ0
XY6Nu1yn1BLrw9dq5ZcoBYR94aVPnSR63zE58cJ99r8AOSk/Tl7phKNAS7mP94NH
RVTW4R/xGMT/iVz4x9exfeVfAX5fVAPIOXV5VKownmM/WfhICHxNLi++m9nO9sn6
tHT+3ViYUbilhcPlXVgTiVWJrFuoxbPTON4yIxgT3VQz47Oqnx37jeufbb7bGiJW
H/GEtn5pDPbiHbu6j+GK98uTN7OoTM5L81nbct6evEz6sK2T5Ve5Ro2IWWeG7xlB
3+FIK1pzl5OHpLJTED/DKNxt1qlhnjTGSz902fBORYvTCTdpSfGnrUMjJOP0rGHH
81WFMfc6ucsN4zGXVHHUNuNaUp1HprUy4g7ipTXkRn9oyOXkYKMGMX9T2aUeEnXO
U9ij61TrGA+lZENsbFKD/UcLRr4GY21TKj9dKjKyIoru/qDHrtJkSObQlcgOwS7D
ctaGcj4es0ByT2PX/mDqJoMip3E4E11O
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAnQl3zP49vH7It74cZh6MtlFM/1Q3EybnWnVTakzkGT7qluZX
sd9PcmHGak4X09mUlSB/dmoLOmjca+FL/bxk5D1ttpAIiuNYpngI/hy5rwcsan7/
rCZQeWHB1zBkAnBWTmjrWm7idJ5qsTtDllPQiHhOT1pZ1cljNXL2MbUEC4OAUj/F
/6IRQREWgZ9MlNCGExcaPvQNSwebrZ1or+wP+X2VRQZAHaxjD8XLhBePaMp21uFq
TZJ9Y2PvPQawby5PpTGQkyNnUZxObarfpA+hI4d3lkYjjaEurJvayKx/zjBKhF4X
4m4GWbvKy9KNWC+0gEyREQTbr3CaUJe1nuVAmwIDAQABAoIBABl6dMZ8pXWUuGof
XSowYLIf5Lc0aa8gy76AdKU1jniOHa+X9bh1O8WaGYAb5X/IuHOtjyCeOe4jH0gd
iJ/FVjU1xjwtiEVId5SiuwrHjFTafBlXO5IpsTrQYovQXRmMMmSMX0sP3IwBO9w/
ekrElHvf0QzM4vBtuTvtyAXukZZwYWvdJK8GXc7NE0xTNSe0C+f2MS0ZAuWP7g8K
1WgRO+8pb11sK4CAl+yD6Lyf7JVlouTcsYdeRF5o7yuEQ2qlz3+vxPwfMIpONKel
kK5nUUc8OGhHQpkO+ZZXh5fIWkaKFJKMzoAh8pj2HFAfK93s64f3LHu75sum05Gc
RUCSafkCgYEAympLWe+cmq8XyUqQnsQ7hHfc5VKa33YDTEkO/ZncnnNA/k4yH+r7
LGgMZD1zC5R3pRFEET9pUOrlx7Z489Bc1Z9Y+9dDpwg9DRrvkt1/MpxOI2Lk8tiJ
lLU/uRTQXQmHFoEBg6i2CDIZyP/qccCS0zIcMQJDq6WaTfXyJ5k0LOUCgYEAxpvi
l7t9RPIQXTEfWiD3iN11QwZYjZ3c6CfW2iaucPYJZDclk6BO1Chdw55cELbfj4bh
7lMxDYpyOQrEwIXYk1a8IY6VOFFMmOQfCfECm5XNTvz//5vYxYlB8ERdhM7opAYG
YsAyR/+BVEyhG6NXy4sh5Q49YgfrjVMdYmBSX38CgYEAx2BF0lNzNOXsjwgURV5S
pZuPCI8CH8PVYcnAq0lnhudNiHArbUb+mvHt6rqgXDKkWwITws1sBhkptjrlDnsZ
Rg3MD1wsthUmVYdHnajxBj/xs2dQzmc9tS2Gk96Nkma1GhR+EloW2yHGRjbVjbA6
ry53mEp7r1HSGKJ+IEUGoIUCgYEAiRS7FyNPWTECXnAzRZAPiiXgc7yDjmtxN8OX
pcahDFKlNMhjZTt2bTTXUteQj/DI6VWdx1MgPkpagEiQeJlpXHi3LSoukEp85eI+
EiyJMj35ERXK0/ALdHxCSMXHDo2JQPzvl2U0z0DpUPf7Ewpw5IpJgMGNWIZC7K57
T5VQBZ0CgYAcAG1KYZYD+Sb14jJLSD6JqnJBrcv8e6wEAnA+0vuEv09FfgeB4MNZ
FwRR8FQDL8V2QcvsauwcwNOf9m9K8goCV9YKTcFw5Tl0m3uYzCIDVdyZI85NgBS0
m//eODmUYg1gMOi9LfnKgtrW7EURrCNj3Pgt87g7WDiSY+qGB0IzzQ==
-----END RSA PRIVATE KEY-----

View File

@ -1,678 +0,0 @@
<?xml version="1.0"?>
<!--
NOTE: User and query level settings are set up in "users.xml" file.
If you have accidentially specified user-level settings here, server won't start.
You can either move the settings to the right place inside "users.xml" file
or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
-->
<yandex>
<logger>
<!-- Possible levels: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105 -->
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
<!-- Per level overrides (legacy):
For example to suppress logging of the ConfigReloader you can use:
NOTE: levels.logger is reserved, see below.
-->
<!--
<levels>
<ConfigReloader>none</ConfigReloader>
</levels>
-->
<!-- Per level overrides:
For example to suppress logging of the RBAC for default user you can use:
(But please note that the logger name maybe changed from version to version, even after minor upgrade)
-->
<!--
<levels>
<logger>
<name>ContextAccess (default)</name>
<level>none</level>
</logger>
<logger>
<name>DatabaseOrdinary (test)</name>
<level>none</level>
</logger>
</levels>
-->
</logger>
<send_crash_reports>
<!-- Changing <enabled> to true allows sending crash reports to -->
<!-- the ClickHouse core developers team via Sentry https://sentry.io -->
<!-- Doing so at least in pre-production environments is highly appreciated -->
<enabled>false</enabled>
<!-- Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report -->
<anonymize>false</anonymize>
<!-- Default endpoint should be changed to different Sentry DSN only if you have -->
<!-- some in-house engineers or hired consultants who're going to debug ClickHouse issues for you -->
<endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint>
</send_crash_reports>
<!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<mysql_port>9004</mysql_port>
<!-- For HTTPS and SSL over native protocol. -->
<!--
<https_port>8443</https_port>
<tcp_port_secure>9440</tcp_port_secure>
-->
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
<client> <!-- Used for connecting to https dictionary source and secured Zookeeper communication -->
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
<invalidCertificateHandler>
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
<name>RejectCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
<!--
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
-->
<!-- Port for communication between replicas. Used for data exchange. -->
<interserver_http_port>9009</interserver_http_port>
<!-- Hostname that is used by other replicas to request this server.
If not specified, than it is determined analoguous to 'hostname -f' command.
This setting could be used to switch replication to another network interface.
-->
<!--
<interserver_http_host>example.yandex.ru</interserver_http_host>
-->
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
<!-- <listen_host>::</listen_host> -->
<!-- Same for hosts with disabled ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
<!--
<listen_host>::1</listen_host>
<listen_host>127.0.0.1</listen_host>
-->
<!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
<!-- <listen_try>0</listen_try> -->
<!-- Allow listen on same address:port -->
<!-- <listen_reuse_port>0</listen_reuse_port> -->
<!-- <listen_backlog>64</listen_backlog> -->
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<!-- Maximum memory usage (resident set size) for server process.
Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
The constraint is checked on query execution time.
If a query tries to allocate memory and the current memory usage plus allocation is greater
than specified threshold, exception will be thrown.
It is not practical to set this constraint to small values like just a few gigabytes,
because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
-->
<max_server_memory_usage>0</max_server_memory_usage>
<!-- Maximum number of threads in the Global thread pool.
This will default to a maximum of 10000 threads if not specified.
This setting will be useful in scenarios where there are a large number
of distributed queries that are running concurrently but are idling most
of the time, in which case a higher number of threads might be required.
-->
<max_thread_pool_size>10000</max_thread_pool_size>
<!-- On memory constrained environments you may have to set this to value larger than 1.
-->
<max_server_memory_usage_to_ram_ratio>10</max_server_memory_usage_to_ram_ratio>
<!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
Data will be stored in system.trace_log table with query_id = empty string.
Zero means disabled.
-->
<total_memory_profiler_step>4194304</total_memory_profiler_step>
<!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
The probability is for every alloc/free regardless to the size of the allocation.
Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
-->
<total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability>
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
correct maximum value. -->
<!-- <max_open_files>262144</max_open_files> -->
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
Uncompressed cache is advantageous only for very short queries and in rare cases.
-->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<!-- Approximate size of mark cache, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
You should not lower this value.
-->
<mark_cache_size>5368709120</mark_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<!-- Policy from the <storage_configuration> for the temporary files.
If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
Notes:
- move_factor is ignored
- keep_free_space_bytes is ignored
- max_data_part_size_bytes is ignored
- you must have exactly one volume in that policy
-->
<!-- <tmp_policy>tmp</tmp_policy> -->
<!-- Directory with user provided files that are accessible by 'file' table function. -->
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
<!-- Path to folder where users and roles created by SQL commands are stored. -->
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>/etc/clickhouse-server/users.xml</users_config>
<!-- Default profile of settings. -->
<default_profile>default</default_profile>
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
<!-- <system_profile>default</system_profile> -->
<!-- Default database. -->
<default_database>default</default_database>
<!-- Server time zone could be set here.
Time zone is used when converting between String and DateTime types,
when printing DateTime in text formats and parsing DateTime from text,
it is used in date and time related functions, if specific time zone was not passed as an argument.
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
If not specified, system time zone at server startup is used.
Please note, that server could display time zone alias instead of specified name.
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
-->
<!-- <timezone>Europe/Moscow</timezone> -->
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
-->
<!-- <umask>022</umask> -->
<!-- Perform mlockall after startup to lower first queries latency
and to prevent clickhouse executable from being paged out under high IO load.
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
-->
<mlock_executable>true</mlock_executable>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.tech/docs/en/operations/table_engines/distributed/
-->
<remote_servers incl="clickhouse_remote_servers" >
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>localhost</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
</test_shard_localhost>
<test_cluster_two_shards_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_localhost>
<test_cluster_two_shards>
<shard>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards>
<test_shard_localhost_secure>
<shard>
<replica>
<host>localhost</host>
<port>9440</port>
<secure>1</secure>
</replica>
</shard>
</test_shard_localhost_secure>
<test_unavailable_shard>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>localhost</host>
<port>1</port>
</replica>
</shard>
</test_unavailable_shard>
</remote_servers>
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
If this section is not present in configuration, all hosts are allowed.
-->
<remote_url_allow_hosts>
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
If port is explicitly specified in URL, the host:port is checked as a whole.
If host specified here without port, any port with this host allowed.
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
-->
<!-- Regular expression can be specified. RE2 engine is used for regexps.
Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
(forgetting to do so is a common source of error).
-->
</remote_url_allow_hosts>
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/docs/en/table_engines/replication/
-->
<zookeeper incl="zookeeper-servers" optional="true" />
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
-->
<macros incl="macros" optional="true" />
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- Maximum session timeout, in seconds. Default: 3600. -->
<max_session_timeout>3600</max_session_timeout>
<!-- Default session timeout, in seconds. Default: 60. -->
<default_session_timeout>60</default_session_timeout>
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
<!--
interval - send every X second
root_path - prefix for keys
hostname_in_path - append hostname to root_path (default = true)
metrics - send data from table system.metrics
events - send data from table system.events
asynchronous_metrics - send data from table system.asynchronous_metrics
-->
<!--
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>60</interval>
<root_path>one_min</root_path>
<hostname_in_path>true</hostname_in_path>
<metrics>true</metrics>
<events>true</events>
<events_cumulative>false</events_cumulative>
<asynchronous_metrics>true</asynchronous_metrics>
</graphite>
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>1</interval>
<root_path>one_sec</root_path>
<metrics>true</metrics>
<events>true</events>
<events_cumulative>false</events_cumulative>
<asynchronous_metrics>false</asynchronous_metrics>
</graphite>
-->
<!-- Serve endpoint fot Prometheus monitoring. -->
<!--
endpoint - mertics path (relative to root, statring with "/")
port - port to setup server. If not defined or 0 than http_port used
metrics - send data from table system.metrics
events - send data from table system.events
asynchronous_metrics - send data from table system.asynchronous_metrics
status_info - send data from different component from CH, ex: Dictionaries status
-->
<!--
<prometheus>
<endpoint>/metrics</endpoint>
<port>9363</port>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
<status_info>true</status_info>
</prometheus>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.
When query log structure is changed after system update,
then old table will be renamed and new table will be created automatically.
-->
<database>system</database>
<table>query_log</table>
<!--
PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
Example:
event_date
toMonday(event_date)
toYYYYMM(event_date)
toStartOfHour(event_time)
-->
<partition_by>toYYYYMM(event_date)</partition_by>
<!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
-->
<!-- Interval of flushing data. -->
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<!-- Trace log. Stores stack traces collected by query profilers.
See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
<trace_log>
<database>system</database>
<table>trace_log</table>
<partition_by>toYYYYMM(event_date)</partition_by>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</trace_log>
<!-- Query thread log. Has information about all threads participated in query execution.
Used only for queries with setting log_query_threads = 1. -->
<query_thread_log>
<database>system</database>
<table>query_thread_log</table>
<partition_by>toYYYYMM(event_date)</partition_by>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_thread_log>
<!-- Uncomment if use part log.
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
-->
<!-- Uncomment to write text log into table.
Text log contains all information from usual server log but stores it in structured and efficient way.
The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
<text_log>
<database>system</database>
<table>text_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<level></level>
</text_log>
-->
<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
<metric_log>
<database>system</database>
<table>metric_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
</metric_log>
<!--
Asynchronous metric log contains values of metrics from
system.asynchronous_metrics.
-->
<asynchronous_metric_log>
<database>system</database>
<table>asynchronous_metric_log</table>
<!--
Asynchronous metrics are updated once a minute, so there is
no need to flush more often.
-->
<flush_interval_milliseconds>60000</flush_interval_milliseconds>
</asynchronous_metric_log>
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
<!-- Configuration of external dictionaries. See:
https://clickhouse.yandex/docs/en/dicts/external_dicts/
-->
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.
-->
<compression incl="clickhouse_compression">
<!--
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
<case>
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
<!- - What compression method to use. - ->
<method>zstd</method>
</case>
-->
</compression>
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
<!-- Settings from this profile will be used to execute DDL queries -->
<!-- <profile>default</profile> -->
</distributed_ddl>
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
<!--
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
-->
<!-- Protection from accidental DROP.
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
The same for max_partition_size_to_drop.
Uncomment to disable protection.
-->
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
<!-- Example of parameters for GraphiteMergeTree table engine -->
<graphite_rollup_example>
<pattern>
<regexp>click_cost</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>3600</precision>
</retention>
<retention>
<age>86400</age>
<precision>60</precision>
</retention>
</pattern>
<default>
<function>max</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>3600</age>
<precision>300</precision>
</retention>
<retention>
<age>86400</age>
<precision>3600</precision>
</retention>
</default>
</graphite_rollup_example>
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
The directory will be created if it doesn't exist.
-->
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
<!-- Uncomment to use query masking rules.
name - name for the rule (optional)
regexp - RE2 compatible regular expression (mandatory)
replace - substitution string for sensitive data (optional, by default - six asterisks)
<query_masking_rules>
<rule>
<name>hide SSN</name>
<regexp>\b\d{3}-\d{2}-\d{4}\b</regexp>
<replace>000-00-0000</replace>
</rule>
</query_masking_rules>
-->
<!-- Uncomment to use custom http handlers.
rules are checked from top to bottom, first match runs the handler
url - to match request URL, you can use 'regex:' prefix to use regex match(optional)
methods - to match request method, you can use commas to separate multiple method matches(optional)
headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional)
handler is request handler
type - supported types: static, dynamic_query_handler, predefined_query_handler
query - use with predefined_query_handler type, executes query when the handler is called
query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
status - use with static type, response status code
content_type - use with static type, response content-type
response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
<http_handlers>
<rule>
<url>/</url>
<methods>POST,GET</methods>
<headers><pragma>no-cache</pragma></headers>
<handler>
<type>dynamic_query_handler</type>
<query_param_name>query</query_param_name>
</handler>
</rule>
<rule>
<url>/predefined_query</url>
<methods>POST,GET</methods>
<handler>
<type>predefined_query_handler</type>
<query>SELECT * FROM system.settings</query>
</handler>
</rule>
<rule>
<handler>
<type>static</type>
<status>200</status>
<content_type>text/plain; charset=UTF-8</content_type>
<response_content>config://http_server_default_response</response_content>
</handler>
</rule>
</http_handlers>
-->
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
</yandex>

View File

@ -1,110 +0,0 @@
<?xml version="1.0"?>
<yandex>
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
<use_uncompressed_cache>0</use_uncompressed_cache>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password>public</password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.yandex.ru.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks incl="networks" replace="replace">
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</yandex>

View File

@ -1,7 +0,0 @@
MONGO_USERNAME=emqx
MONGO_PASSWORD=passw0rd
MONGO_AUTHSOURCE=admin
# See "Environment Variables" @ https://hub.docker.com/_/mongo
MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME}
MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD}

View File

@ -1,24 +0,0 @@
version: '3.9'
services:
azurite:
container_name: azurite
image: mcr.microsoft.com/azure-storage/azurite:3.30.0
restart: always
expose:
- "10000"
# ports:
# - "10000:10000"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:10000"]
interval: 30s
timeout: 5s
retries: 4
command:
- azurite-blob
- "--blobHost"
- 0.0.0.0
- "-d"
- debug.log

View File

@ -1,38 +0,0 @@
version: '3.9'
x-cassandra: &cassandra
restart: always
image: public.ecr.aws/docker/library/cassandra:${CASSANDRA_TAG:-3.11}
environment:
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
HEAP_NEWSIZE: "128M"
MAX_HEAP_SIZE: "2048M"
#ports:
# - "9042:9042"
# - "9142:9142"
command:
- /bin/bash
- -c
- |
/opt/cassandra/bin/cassandra -f -R > /cassandra.log &
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"
while [[ $$? -ne 0 ]];do sleep 5; /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"; done
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "describe keyspaces;"
tail -f /cassandra.log
networks:
- emqx_bridge
services:
cassandra_server:
<<: *cassandra
container_name: cassandra
volumes:
- ./certs:/certs
- ./cassandra/cassandra.yaml:/etc/cassandra/cassandra.yaml
cassandra_noauth_server:
<<: *cassandra
container_name: cassandra_noauth
volumes:
- ./certs:/certs
- ./cassandra/cassandra_noauth.yaml:/etc/cassandra/cassandra.yaml

View File

@ -1,16 +0,0 @@
version: '3.9'
services:
clickhouse:
container_name: clickhouse
image: clickhouse/clickhouse-server:23.1.2.9-alpine
restart: always
volumes:
- ./clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse/config.xml:/etc/clickhouse-server/config.d/config.xml
expose:
- "8123"
ports:
- "8123:8123"
networks:
- emqx_bridge

View File

@ -1,30 +0,0 @@
version: '3.9'
services:
couchbase:
container_name: couchbase
hostname: couchbase
image: ghcr.io/emqx/couchbase:1.0.0
restart: always
expose:
- 8091-8093
# ports:
# - "8091-8093:8091-8093"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8093/admin/ping"]
interval: 30s
timeout: 5s
retries: 4
environment:
- CLUSTER=localhost
- USER=admin
- PASS=public
- PORT=8091
- RAMSIZEMB=2048
- RAMSIZEINDEXMB=512
- RAMSIZEFTSMB=512
- BUCKETS=mqtt
- BUCKETSIZES=100
- AUTOREBALANCE=true

View File

@ -1,15 +0,0 @@
version: '3.9'
services:
dynamodb-local:
container_name: dynamo
image: public.ecr.aws/aws-dynamodb-local/aws-dynamodb-local:${DYNAMO_TAG}
restart: always
ports:
- "8000:8000"
environment:
AWS_ACCESS_KEY_ID: root
AWS_SECRET_ACCESS_KEY: public
AWS_DEFAULT_REGION: us-west-2
networks:
- emqx_bridge

View File

@ -1,111 +0,0 @@
version: "3.9"
# hint: run the following if the container fails to start locally
# sysctl -w vm.max_map_count=262144
services:
setup:
image: public.ecr.aws/elastic/elasticsearch:${ELASTIC_TAG}
volumes:
- ./elastic:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/ca/ca.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: public.ecr.aws/elastic/elasticsearch:${ELASTIC_TAG}
container_name: elasticsearch
hostname: elasticsearch
volumes:
- ./elastic:/usr/share/elasticsearch/config/certs
- esdata01:/usr/share/elasticsearch/data
ports:
- 9200:9200
environment:
- node.name=es01
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- discovery.type=single-node
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.license.self_generated.type=${LICENSE}
mem_limit: 4G
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
restart: always
networks:
- emqx_bridge
kibana:
depends_on:
es01:
condition: service_healthy
image: public.ecr.aws/elastic/kibana:${ELASTIC_TAG}
volumes:
- ./elastic:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- 5601:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: 1073741824
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
restart: always
networks:
- emqx_bridge
volumes:
esdata01:
driver: local
kibanadata:
driver: local

View File

@ -1,29 +0,0 @@
x-default-emqx: &default-emqx
env_file:
- conf.cluster.env
healthcheck:
test: ["CMD", "/opt/emqx/bin/emqx_ctl", "status"]
interval: 5s
timeout: 25s
retries: 5
services:
emqx1:
<<: *default-emqx
container_name: node1.emqx.io
restart: on-failure
environment:
- "EMQX_HOST=node1.emqx.io"
- "EMQX_NODE__DB_BACKEND=mnesia"
- "EMQX_NODE__DB_ROLE=core"
emqx2:
<<: *default-emqx
container_name: node2.emqx.io
depends_on:
- emqx1
restart: on-failure
environment:
- "EMQX_HOST=node2.emqx.io"
- "EMQX_NODE__DB_BACKEND=mnesia"
- "EMQX_NODE__DB_ROLE=core"

View File

@ -1,4 +1,5 @@
x-default-emqx: &default-emqx
image: $TARGET:$EMQX_TAG
env_file:
- conf.cluster.env
healthcheck:
@ -11,11 +12,10 @@ services:
emqx1:
<<: *default-emqx
container_name: node1.emqx.io
restart: on-failure
environment:
- "EMQX_HOST=node1.emqx.io"
- "EMQX_NODE__DB_BACKEND=rlog"
- "EMQX_NODE__DB_ROLE=core"
- "EMQX_CLUSTER__DB_BACKEND=rlog"
- "EMQX_CLUSTER__RLOG__ROLE=core"
- "EMQX_CLUSTER__STATIC__SEEDS=[emqx@node1.emqx.io]"
- "EMQX_LISTENERS__TCP__DEFAULT__PROXY_PROTOCOL=false"
- "EMQX_LISTENERS__WS__DEFAULT__PROXY_PROTOCOL=false"
@ -23,14 +23,11 @@ services:
emqx2:
<<: *default-emqx
container_name: node2.emqx.io
depends_on:
- emqx1
restart: on-failure
environment:
- "EMQX_HOST=node2.emqx.io"
- "EMQX_NODE__DB_BACKEND=rlog"
- "EMQX_NODE__DB_ROLE=replicant"
- "EMQX_CLUSTER__CORE_NODES=emqx@node1.emqx.io"
- "EMQX_CLUSTER__DB_BACKEND=rlog"
- "EMQX_CLUSTER__RLOG__ROLE=replicant"
- "EMQX_CLUSTER__RLOG__CORE_NODES=emqx@node1.emqx.io"
- "EMQX_CLUSTER__STATIC__SEEDS=[emqx@node1.emqx.io]"
- "EMQX_LISTENERS__TCP__DEFAULT__PROXY_PROTOCOL=false"
- "EMQX_LISTENERS__WS__DEFAULT__PROXY_PROTOCOL=false"

View File

@ -1,7 +1,7 @@
version: '3.9'
x-default-emqx: &default-emqx
image: ${_EMQX_DOCKER_IMAGE_TAG}
image: $TARGET:$EMQX_TAG
env_file:
- conf.cluster.env
healthcheck:
@ -13,15 +13,13 @@ x-default-emqx: &default-emqx
services:
haproxy:
container_name: haproxy
image: public.ecr.aws/docker/library/haproxy:2.4
image: haproxy:2.4
depends_on:
- emqx1
- emqx2
volumes:
- ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
- ../../apps/emqx/etc/certs/cert.pem:/usr/local/etc/haproxy/certs/cert.pem
- ../../apps/emqx/etc/certs/key.pem:/usr/local/etc/haproxy/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/usr/local/etc/haproxy/certs/cacert.pem
- ../../apps/emqx/etc/certs:/usr/local/etc/haproxy/certs
ports:
- "18083:18083"
# - "1883:1883"
@ -36,7 +34,7 @@ services:
- -c
- |
set -x
cat /usr/local/etc/haproxy/certs/cert.pem /usr/local/etc/haproxy/certs/key.pem > /var/lib/haproxy/emqx.pem
cat /usr/local/etc/haproxy/certs/cert.pem /usr/local/etc/haproxy/certs/key.pem > /tmp/emqx.pem
haproxy -f /usr/local/etc/haproxy/haproxy.cfg
emqx1:

View File

@ -1,23 +0,0 @@
version: '3.9'
services:
gcp_emulator:
container_name: gcp_emulator
image: gcr.io/google.com/cloudsdktool/google-cloud-cli:435.0.1-emulators
restart: always
expose:
- "8085"
# ports:
# - "8085:8085"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8085"]
interval: 30s
timeout: 5s
retries: 4
command:
- bash
- "-c"
- |
gcloud beta emulators pubsub start --project=emqx-pubsub --host-port=0.0.0.0:8085 --impersonate-service-account test@emqx.iam.gserviceaccount.com

View File

@ -1,22 +0,0 @@
version: '3.9'
services:
greptimedb:
container_name: greptimedb
hostname: greptimedb
image: greptime/greptimedb:v0.7.1
expose:
- "4000"
- "4001"
# uncomment for local testing
# ports:
# - "4000:4000"
# - "4001:4001"
restart: always
networks:
- emqx_bridge
command:
standalone start
--user-provider=static_user_provider:cmd:greptime_user=greptime_pwd
--http-addr="0.0.0.0:4000"
--rpc-addr="0.0.0.0:4001"

View File

@ -1,132 +0,0 @@
version: "3.5"
services:
hserver:
image: hstreamdb/hstream:${HSTREAMDB_TAG}
container_name: hstreamdb
depends_on:
zookeeper:
condition: service_started
hstore:
condition: service_healthy
# ports:
# - "127.0.0.1:6570:6570"
expose:
- 6570
networks:
- emqx_bridge
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /tmp:/tmp
- data_store:/data/store
command:
- bash
- "-c"
- |
set -e
/usr/local/script/wait-for-storage.sh hstore 6440 zookeeper 2181 600 \
/usr/local/bin/hstream-server \
--bind-address 0.0.0.0 --port 6570 \
--internal-port 6571 \
--server-id 100 \
--seed-nodes "$$(hostname -I | awk '{print $$1}'):6571" \
--advertised-address $$(hostname -I | awk '{print $$1}') \
--metastore-uri zk://zookeeper:2181 \
--store-config /data/store/logdevice.conf \
--store-admin-host hstore --store-admin-port 6440 \
--store-log-level warning \
--io-tasks-path /tmp/io/tasks \
--io-tasks-network emqx_bridge
hstore:
image: hstreamdb/hstream:${HSTREAMDB_TAG}
networks:
- emqx_bridge
volumes:
- data_store:/data/store
command:
- bash
- "-c"
- |
set -ex
# N.B. "enable-dscp-reflection=false" is required for linux kernel which
# doesn't support dscp reflection, e.g. centos7.
/usr/local/bin/ld-dev-cluster --root /data/store \
--use-tcp --tcp-host $$(hostname -I | awk '{print $$1}') \
--user-admin-port 6440 \
--param enable-dscp-reflection=false \
--no-interactive \
> /data/store/hstore.log 2>&1
healthcheck:
test: ["CMD", "grep", "LogDevice Cluster running", "/data/store/hstore.log"]
interval: 10s
timeout: 10s
retries: 60
start_period: 60s
zookeeper:
image: zookeeper:${HSTREAMDB_ZK_TAG}
expose:
- 2181
networks:
- emqx_bridge
volumes:
- data_zk_data:/data
- data_zk_datalog:/datalog
## The three container `hstream-exporter`, `prometheus`, `console`
## is for HStreamDB Web Console
## But HStreamDB Console is not supported in v0.15.0
## because of HStreamApi proto changed
# hstream-exporter:
# depends_on:
# hserver:
# condition: service_completed_successfully
# image: hstreamdb/hstream-exporter
# networks:
# - hstream-quickstart
# command:
# - bash
# - "-c"
# - |
# set -ex
# hstream-exporter --addr hstream://hserver:6570
# prometheus:
# image: prom/prometheus
# expose:
# - 9097
# networks:
# - hstream-quickstart
# ports:
# - "9097:9090"
# volumes:
# - $PWD/prometheus:/etc/prometheus
# console:
# image: hstreamdb/hstream-console
# depends_on:
# - hserver
# expose:
# - 5177
# networks:
# - hstream-quickstart
# environment:
# - SERVER_PORT=5177
# - PROMETHEUS_URL=http://prometheus:9097
# - HSTREAM_PUBLIC_ADDRESS=hstream.example.com
# - HSTREAM_PRIVATE_ADDRESS=hserver:6570
# ports:
# - "5177:5177"
# networks:
# hstream-quickstart:
# name: hstream-quickstart
volumes:
data_store:
name: quickstart_data_store
data_zk_data:
name: quickstart_data_zk_data
data_zk_datalog:
name: quickstart_data_zk_datalog

View File

@ -1,36 +0,0 @@
version: '3.9'
services:
influxdb_server_tcp:
container_name: influxdb_tcp
image: public.ecr.aws/docker/library/influxdb:${INFLUXDB_TAG}
expose:
- "8086"
- "8089/udp"
- "8083"
# ports:
# - "8086:8086"
environment:
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME: root
DOCKER_INFLUXDB_INIT_PASSWORD: emqx@123
DOCKER_INFLUXDB_INIT_ORG: emqx
DOCKER_INFLUXDB_INIT_BUCKET: mqtt
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: abcdefg
volumes:
- "./influxdb/setup-v1.sh:/docker-entrypoint-initdb.d/setup-v1.sh"
restart: always
networks:
- emqx_bridge
# networks:
# emqx_bridge:
# driver: bridge
# name: emqx_bridge
# ipam:
# driver: default
# config:
# - subnet: 172.100.239.0/24
# gateway: 172.100.239.1
# - subnet: 2001:3200:3200::/64
# gateway: 2001:3200:3200::1

View File

@ -1,42 +0,0 @@
version: '3.9'
services:
influxdb_server_tls:
container_name: influxdb_tls
image: public.ecr.aws/docker/library/influxdb:${INFLUXDB_TAG}
expose:
- "8086"
- "8089/udp"
- "8083"
# ports:
# - "8087:8086"
environment:
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME: root
DOCKER_INFLUXDB_INIT_PASSWORD: emqx@123
DOCKER_INFLUXDB_INIT_ORG: emqx
DOCKER_INFLUXDB_INIT_BUCKET: mqtt
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: abcdefg
volumes:
- ./certs/server.crt:/etc/influxdb/cert.pem
- ./certs/server.key:/etc/influxdb/key.pem
- "./influxdb/setup-v1.sh:/docker-entrypoint-initdb.d/setup-v1.sh"
command:
- influxd
- --tls-cert=/etc/influxdb/cert.pem
- --tls-key=/etc/influxdb/key.pem
restart: always
networks:
- emqx_bridge
# networks:
# emqx_bridge:
# driver: bridge
# name: emqx_bridge
# ipam:
# driver: default
# config:
# - subnet: 172.100.239.0/24
# gateway: 172.100.239.1
# - subnet: 2001:3200:3200::/64
# gateway: 2001:3200:3200::1

View File

@ -1,90 +0,0 @@
version: '3.9'
services:
iotdb_1_3_0:
container_name: iotdb130
hostname: iotdb130
image: apache/iotdb:1.3.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb130
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_seed_config_node=iotdb130:10710
- dn_rpc_address=iotdb130
- dn_internal_address=iotdb130
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_seed_config_node=iotdb130:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge
iotdb_1_1_0:
container_name: iotdb110
hostname: iotdb110
image: apache/iotdb:1.1.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb110
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb110:10710
- dn_rpc_address=iotdb110
- dn_internal_address=iotdb110
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb110:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge
iotdb_0_13:
container_name: iotdb013
hostname: iotdb013
image: apache/iotdb:0.13.4-node
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb013
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb013:10710
- dn_rpc_address=iotdb013
- dn_internal_address=iotdb013
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb013:10710
volumes:
- ./iotdb013/iotdb-rest.properties:/iotdb/conf/iotdb-rest.properties
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge

View File

@ -1,79 +0,0 @@
version: '3.9'
services:
zookeeper:
image: public.ecr.aws/docker/library/zookeeper:3.6
ports:
- "2181:2181"
container_name: zookeeper
hostname: zookeeper
networks:
emqx_bridge:
ssl_cert_gen:
# see https://github.com/emqx/docker-images
image: ghcr.io/emqx/certgen:latest
container_name: ssl_cert_gen
user: "${DOCKER_USER:-root}"
volumes:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
kdc:
hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu22.04
container_name: kdc.emqx.net
expose:
- 88 # kdc
- 749 # admin server
# ports:
# - 88:88
# - 749:749
networks:
emqx_bridge:
volumes:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
- ./kerberos/run.sh:/usr/bin/run.sh
command: run.sh
kafka_1:
image: wurstmeister/kafka:2.13-2.8.1
# ports:
# - "9192-9195:9192-9195"
container_name: kafka-1.emqx.net
hostname: kafka-1.emqx.net
depends_on:
kdc:
condition: service_started
zookeeper:
condition: service_started
ssl_cert_gen:
condition: service_completed_successfully
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095,LOCAL_PLAINTEXT://:9192,LOCAL_SASL_PLAINTEXT://:9193,LOCAL_SSL://:9194,LOCAL_SASL_SSL://:9195,TOXIPROXY_PLAINTEXT://:9292,TOXIPROXY_SASL_PLAINTEXT://:9293,TOXIPROXY_SSL://:9294,TOXIPROXY_SASL_SSL://:9295
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095,LOCAL_PLAINTEXT://localhost:9192,LOCAL_SASL_PLAINTEXT://localhost:9193,LOCAL_SSL://localhost:9194,LOCAL_SASL_SSL://localhost:9195,TOXIPROXY_PLAINTEXT://toxiproxy.emqx.net:9292,TOXIPROXY_SASL_PLAINTEXT://toxiproxy.emqx.net:9293,TOXIPROXY_SSL://toxiproxy.emqx.net:9294,TOXIPROXY_SASL_SSL://toxiproxy.emqx.net:9295
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL,LOCAL_PLAINTEXT:PLAINTEXT,LOCAL_SASL_PLAINTEXT:SASL_PLAINTEXT,LOCAL_SSL:SSL,LOCAL_SASL_SSL:SASL_SSL,TOXIPROXY_PLAINTEXT:PLAINTEXT,TOXIPROXY_SASL_PLAINTEXT:SASL_PLAINTEXT,TOXIPROXY_SSL:SSL,TOXIPROXY_SASL_SSL:SASL_SSL
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf"
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
KAFKA_CREATE_TOPICS_NG: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1,
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_SSL_TRUSTSTORE_LOCATION: /var/lib/secret/kafka.truststore.jks
KAFKA_SSL_TRUSTSTORE_PASSWORD: password
KAFKA_SSL_KEYSTORE_LOCATION: /var/lib/secret/kafka.keystore.jks
KAFKA_SSL_KEYSTORE_PASSWORD: password
KAFKA_SSL_KEY_PASSWORD: password
networks:
emqx_bridge:
volumes:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
- ./kafka/jaas.conf:/etc/kafka/jaas.conf
- ./kafka/kafka-entrypoint.sh:/bin/kafka-entrypoint.sh
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
command: kafka-entrypoint.sh

View File

@ -1,12 +0,0 @@
version: '3.9'
services:
kinesis:
container_name: kinesis
image: public.ecr.aws/localstack/localstack:2.1
environment:
- KINESIS_ERROR_PROBABILITY=0.0
- KINESIS_LATENCY=0
restart: always
networks:
- emqx_bridge

View File

@ -6,13 +6,11 @@ services:
build:
context: ../..
dockerfile: .ci/docker-compose-file/openldap/Dockerfile
ulimits:
nofile: 1024
image: openldap
#ports:
# - "389:389"
volumes:
- ./certs/ca.crt:/etc/certs/ca.crt
args:
LDAP_TAG: ${LDAP_TAG}
image: openldap
ports:
- 389:389
restart: always
networks:
- emqx_bridge

View File

@ -1,21 +0,0 @@
version: '3.7'
services:
minio:
hostname: minio
image: quay.io/minio/minio:${MINIO_TAG}
command: server --address ":9000" --console-address ":9001" /minio-data
expose:
- "9000"
- "9001"
ports:
- "9000:9000"
- "9001:9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 5s
retries: 3
networks:
emqx_bridge:

View File

@ -1,23 +0,0 @@
version: '3.7'
services:
minio_tls:
hostname: minio-tls
image: quay.io/minio/minio:${MINIO_TAG}
command: server --certs-dir /etc/certs --address ":9100" --console-address ":9101" /minio-data
volumes:
- ./certs/server.crt:/etc/certs/public.crt
- ./certs/server.key:/etc/certs/private.key
expose:
- "9100"
- "9101"
ports:
- "9100:9100"
- "9101:9101"
healthcheck:
test: ["CMD", "curl", "-k", "-f", "https://localhost:9100/minio/health/live"]
interval: 30s
timeout: 5s
retries: 3
networks:
emqx_bridge:

View File

@ -4,7 +4,7 @@ services:
mongo1:
hostname: mongo1
container_name: mongo1
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -18,11 +18,11 @@ services:
--ipv6
--bind_ip_all
--replSet rs0
mongo2:
hostname: mongo2
container_name: mongo2
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -40,7 +40,7 @@ services:
mongo3:
hostname: mongo3
container_name: mongo3
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -54,10 +54,10 @@ services:
--ipv6
--bind_ip_all
--replSet rs0
mongo_rs_client:
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
container_name: mongo_rs_client
mongo_client:
image: mongo:${MONGO_TAG}
container_name: mongo_client
networks:
- emqx_bridge
depends_on:

View File

@ -4,7 +4,7 @@ services:
mongo1:
hostname: mongo1
container_name: mongo1
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -27,7 +27,7 @@ services:
mongo2:
hostname: mongo2
container_name: mongo2
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -50,7 +50,7 @@ services:
mongo3:
hostname: mongo3
container_name: mongo3
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -71,7 +71,7 @@ services:
mongod --ipv6 --bind_ip_all --tlsMode requireTLS --tlsCertificateKeyFile /etc/certs/mongodb.pem --replSet rs0
mongo_client:
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
container_name: mongo_client
networks:
- emqx_bridge

View File

@ -1,90 +0,0 @@
version: "3"
services:
mongosharded1:
hostname: mongosharded1
container_name: mongosharded1
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
expose:
- 27017
ports:
- 27014:27017
restart: always
command:
--configsvr
--replSet cfg0
--port 27017
--ipv6
--bind_ip_all
mongosharded2:
hostname: mongosharded2
container_name: mongosharded2
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
expose:
- 27017
ports:
- 27015:27017
restart: always
command:
--shardsvr
--replSet rs0
--port 27017
--ipv6
--bind_ip_all
mongosharded3:
hostname: mongosharded3
container_name: mongosharded3
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
expose:
- 27017
ports:
- 27016:27017
restart: always
entrypoint: mongos
command:
--configdb cfg0/mongosharded1:27017
--port 27017
--ipv6
--bind_ip_all
mongosharded_client:
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
container_name: mongosharded_client
networks:
- emqx_bridge
depends_on:
- mongosharded1
- mongosharded2
- mongosharded3
command:
- /bin/bash
- -c
- |
while ! mongo --host mongosharded1 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do
sleep 1
done
mongo --host mongosharded1 --eval "rs.initiate( { _id : 'cfg0', configsvr: true, members: [ { _id : 0, host : 'mongosharded1:27017' } ] })"
while ! mongo --host mongosharded2 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do
sleep 1
done
mongo --host mongosharded2 --eval "rs.initiate( { _id : 'rs0', members: [ { _id : 0, host : 'mongosharded2:27017' } ] })"
mongo --host mongosharded2 --eval "rs.status()"
while ! mongo --host mongosharded3 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do
sleep 1
done
mongo --host mongosharded3 --eval "sh.addShard('rs0/mongosharded2:27017')"
mongo --host mongosharded3 --eval "sh.enableSharding('mqtt')"

View File

@ -2,16 +2,15 @@ version: '3.9'
services:
mongo_server:
container_name: mongo
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
container_name: mongo
image: mongo:${MONGO_TAG}
restart: always
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
ports:
- "27017:27017"
env_file:
- .env
- credentials.env
command:
--ipv6
--bind_ip_all

View File

@ -1,30 +1,23 @@
version: '3.9'
services:
mongo_server_tls:
container_name: mongo-tls
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
mongo_server:
container_name: mongo
image: mongo:${MONGO_TAG}
restart: always
environment:
MONGO_INITDB_DATABASE: mqtt
volumes:
- ./certs/server.crt:/etc/certs/cert.pem
- ./certs/server.key:/etc/certs/key.pem
- ./certs/ca.crt:/etc/certs/cacert.pem
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/cert.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
networks:
- emqx_bridge
ports:
- "27018:27017"
- "27017:27017"
command:
- /bin/bash
- -c
- |
cat /etc/certs/key.pem /etc/certs/cert.pem > /etc/certs/mongodb.pem
mongod --ipv6 --bind_ip_all \
--tlsOnNormalPorts \
--tlsMode requireSSL \
--tlsCertificateKeyFile /etc/certs/mongodb.pem \
--tlsCAFile /etc/certs/cacert.pem \
--tlsDisabledProtocols TLS1_0,TLS1_1 \
--setParameter opensslCipherConfig='HIGH:!EXPORT:!aNULL:!DHE:!kDHE@STRENGTH'
cat /etc/certs/key.pem /etc/certs/cert.pem > /etc/certs/mongodb.pem
mongod --ipv6 --bind_ip_all --sslMode requireSSL --sslPEMKeyFile /etc/certs/mongodb.pem

View File

@ -3,22 +3,18 @@ version: '3.9'
services:
mysql_server:
container_name: mysql
image: public.ecr.aws/docker/library/mysql:${MYSQL_TAG}
image: mysql:${MYSQL_TAG}
restart: always
ports:
- "3306:3306"
environment:
MYSQL_ROOT_PASSWORD: public
MYSQL_DATABASE: mqtt
networks:
- emqx_bridge
command:
- --bind-address=0.0.0.0
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_general_ci
- --lower-case-table-names=1
- --max-allowed-packet=128M
# Severely limit maximum number of prepared statements the server must permit
# so that we hit potential resource exhaustion earlier in tests.
- --max-prepared-stmt-count=64
- --skip-symbolic-links
--bind-address "::"
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
--max_allowed_packet=128M
--skip-symbolic-links

View File

@ -1,37 +1,47 @@
version: '3.9'
services:
mysql_server_tls:
container_name: mysql-tls
image: public.ecr.aws/docker/library/mysql:${MYSQL_TAG}
mysql_server:
container_name: mysql
image: mysql:${MYSQL_TAG}
restart: always
environment:
MYSQL_ROOT_PASSWORD: public
MYSQL_DATABASE: mqtt
MYSQL_USER: user
MYSQL_USER: ssluser
MYSQL_PASSWORD: public
volumes:
- ./certs/ca.crt:/etc/certs/ca-cert.pem
- ./certs/server.crt:/etc/certs/server-cert.pem
- ./certs/server.key:/etc/certs/server-key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca-cert.pem
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server-cert.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/server-key.pem
ports:
- "3307:3306"
- "3306:3306"
networks:
- emqx_bridge
command:
- --bind-address=0.0.0.0
- --port=3306
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_general_ci
- --lower-case-table-names=1
- --max-allowed-packet=128M
# Severely limit maximum number of prepared statements the server must permit
# so that we hit potential resource exhaustion earlier in tests.
- --max-prepared-stmt-count=64
- --ssl-ca=/etc/certs/ca-cert.pem
- --ssl-cert=/etc/certs/server-cert.pem
- --ssl-key=/etc/certs/server-key.pem
- --require-secure-transport=ON
- --tls-version=TLSv1.2,TLSv1.3
- --ssl-cipher=ECDHE-RSA-AES256-GCM-SHA384
--bind-address "::"
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
--max_allowed_packet=128M
--skip-symbolic-links
--ssl-ca=/etc/certs/ca-cert.pem
--ssl-cert=/etc/certs/server-cert.pem
--ssl-key=/etc/certs/server-key.pem
mysql_client:
container_name: mysql_client
image: mysql:${MYSQL_TAG}
networks:
- emqx_bridge
depends_on:
- mysql_server
command:
- /bin/bash
- -c
- |
service mysql start
echo "show tables;" | mysql -h mysql_server -u root -ppublic mqtt mqtt
while [[ $$? -ne 0 ]];do echo "show tables;" | mysql -h mysql_server -u root -ppublic mqtt; done
echo "ALTER USER 'ssluser'@'%' REQUIRE X509;" | mysql -h mysql_server -u root -ppublic mqtt

View File

@ -1,9 +0,0 @@
version: '3.9'
services:
opents_server:
container_name: opents
image: petergrace/opentsdb-docker:${OPENTS_TAG}
restart: always
networks:
- emqx_bridge

View File

@ -1,11 +0,0 @@
version: '3.9'
services:
oracle_server:
container_name: oracle
image: oracleinanutshell/oracle-xe-11g:1.0.0
restart: always
environment:
ORACLE_DISABLE_ASYNCH_IO: true
networks:
- emqx_bridge

View File

@ -1,69 +0,0 @@
version: '3.9'
services:
jaeger-all-in-one:
image: jaegertracing/all-in-one:1.51.0
container_name: jaeger.emqx.net
hostname: jaeger.emqx.net
networks:
- emqx_bridge
restart: always
# ports:
# - "16686:16686"
user: "${DOCKER_USER:-root}"
# Collector
otel-collector:
image: otel/opentelemetry-collector:0.90.0
container_name: otel-collector.emqx.net
hostname: otel-collector.emqx.net
networks:
- emqx_bridge
restart: always
command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"]
volumes:
- ./otel:/etc/
# ports:
# - "1888:1888" # pprof extension
# - "8888:8888" # Prometheus metrics exposed by the collector
# - "8889:8889" # Prometheus exporter metrics
# - "13133:13133" # health_check extension
# - "4317:4317" # OTLP gRPC receiver
# - "4318:4318" # OTLP http receiver
# - "55679:55679" # zpages extension
depends_on:
- jaeger-all-in-one
user: "${DOCKER_USER:-root}"
# Collector
otel-collector-tls:
image: otel/opentelemetry-collector:0.90.0
container_name: otel-collector-tls.emqx.net
hostname: otel-collector-tls.emqx.net
networks:
- emqx_bridge
restart: always
command: ["--config=/etc/otel-collector-config-tls.yaml", "${OTELCOL_ARGS}"]
volumes:
- ./otel:/etc/
- ./certs:/etc/certs
# ports:
# - "14317:4317" # OTLP gRPC receiver
depends_on:
- jaeger-all-in-one
user: "${DOCKER_USER:-root}"
#networks:
# emqx_bridge:
# driver: bridge
# name: emqx_bridge
# enable_ipv6: true
# ipam:
# driver: default
# config:
# - subnet: 172.100.239.0/24
# gateway: 172.100.239.1
# - subnet: 2001:3200:3200::/64
# gateway: 2001:3200:3200::1
#

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
pgsql_server:
container_name: pgsql
image: public.ecr.aws/docker/library/postgres:${PGSQL_TAG}
image: postgres:${PGSQL_TAG}
restart: always
environment:
POSTGRES_PASSWORD: public

View File

@ -1,14 +1,14 @@
version: '3.9'
services:
pgsql_server_tls:
container_name: pgsql-tls
pgsql_server:
container_name: pgsql
build:
context: ./
dockerfile: ./pgsql/Dockerfile
context: ../..
dockerfile: .ci/docker-compose-file/pgsql/Dockerfile
args:
POSTGRES_USER: postgres
BUILD_FROM: public.ecr.aws/docker/library/postgres:${PGSQL_TAG}
BUILD_FROM: postgres:${PGSQL_TAG}
image: emqx_pgsql:${PGSQL_TAG}
restart: always
environment:
@ -16,7 +16,7 @@ services:
POSTGRES_USER: root
POSTGRES_PASSWORD: public
ports:
- "5433:5432"
- "5432:5432"
command:
- -c
- ssl=on
@ -28,7 +28,5 @@ services:
- ssl_ca_file=/var/lib/postgresql/root.crt
- -c
- hba_file=/var/lib/postgresql/pg_hba.conf
# - -c
# - ssl_min_protocol_version=TLSv1.2
networks:
- emqx_bridge

View File

@ -1,32 +0,0 @@
version: '3'
services:
pulsar:
container_name: pulsar
image: apachepulsar/pulsar:2.11.0
# ports:
# - 6650:6650
# - 8080:8080
networks:
emqx_bridge:
volumes:
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
restart: always
command:
- bash
- "-c"
- |
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
bin/pulsar standalone -nfw -nss

View File

@ -2,8 +2,8 @@ version: '3.9'
services:
python:
container_name: python
image: public.ecr.aws/docker/library/python:3.9.16-alpine3.18
container_name: python
image: python:3.7.2-alpine3.9
depends_on:
- emqx1
- emqx2
@ -12,3 +12,4 @@ services:
emqx_bridge:
volumes:
- ./python:/scripts

View File

@ -1,24 +0,0 @@
version: '3.9'
services:
rabbitmq:
container_name: rabbitmq
image: public.ecr.aws/docker/library/rabbitmq:3.11-management
restart: always
expose:
- "15672"
- "5672"
- "5671"
# We don't want to take ports from the host
#ports:
# - "15672:15672"
# - "5672:5672"
# - "5671:5671"
volumes:
- ./certs/ca.crt:/opt/certs/ca.crt
- ./certs/server.crt:/opt/certs/server.crt
- ./certs/server.key:/opt/certs/server.key
- ./rabbitmq/20-tls.conf:/etc/rabbitmq/conf.d/20-tls.conf
networks:
- emqx_bridge

View File

@ -1,57 +1,11 @@
version: '3.9'
services:
redis-cluster-1: &redis-node
container_name: redis-cluster-1
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
services:
redis_server:
image: redis:${REDIS_TAG}
container_name: redis
volumes:
- ./redis/cluster-tcp:/usr/local/etc/redis
command: redis-server /usr/local/etc/redis/redis.conf
- ./redis/:/data/conf
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log"
networks:
- emqx_bridge
redis-cluster-2:
<<: *redis-node
container_name: redis-cluster-2
redis-cluster-3:
<<: *redis-node
container_name: redis-cluster-3
redis-cluster-4:
<<: *redis-node
container_name: redis-cluster-4
redis-cluster-5:
<<: *redis-node
container_name: redis-cluster-5
redis-cluster-6:
<<: *redis-node
container_name: redis-cluster-6
redis-cluster-create:
<<: *redis-node
container_name: redis-cluster-create
command: >
redis-cli
--cluster create
redis-cluster-1:6379
redis-cluster-2:6379
redis-cluster-3:6379
redis-cluster-4:6379
redis-cluster-5:6379
redis-cluster-6:6379
--cluster-replicas 1
--cluster-yes
--pass "public"
--no-auth-warning
depends_on:
- redis-cluster-1
- redis-cluster-2
- redis-cluster-3
- redis-cluster-4
- redis-cluster-5
- redis-cluster-6

View File

@ -1,59 +1,14 @@
version: '3.9'
services:
redis-cluster-tls-1: &redis-node
container_name: redis-cluster-tls-1
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
services:
redis_server:
container_name: redis
image: redis:${REDIS_TAG}
volumes:
- ./redis/cluster-tls:/usr/local/etc/redis
- ../../apps/emqx/etc/certs:/etc/certs
command: redis-server /usr/local/etc/redis/redis.conf
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
- ./redis/:/data/conf
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster --tls-enabled && tail -f /var/log/redis-server.log"
networks:
- emqx_bridge
redis-cluster-tls-2:
<<: *redis-node
container_name: redis-cluster-tls-2
redis-cluster-tls-3:
<<: *redis-node
container_name: redis-cluster-tls-3
redis-cluster-tls-4:
<<: *redis-node
container_name: redis-cluster-tls-4
redis-cluster-tls-5:
<<: *redis-node
container_name: redis-cluster-tls-5
redis-cluster-tls-6:
<<: *redis-node
container_name: redis-cluster-tls-6
redis-cluster-tls-create:
<<: *redis-node
container_name: redis-cluster-tls-create
command: >
redis-cli
--cluster create
redis-cluster-tls-1:6389
redis-cluster-tls-2:6389
redis-cluster-tls-3:6389
redis-cluster-tls-4:6389
redis-cluster-tls-5:6389
redis-cluster-tls-6:6389
--cluster-replicas 1
--cluster-yes
--pass "public"
--no-auth-warning
--tls
--insecure
depends_on:
- redis-cluster-tls-1
- redis-cluster-tls-2
- redis-cluster-tls-3
- redis-cluster-tls-4
- redis-cluster-tls-5
- redis-cluster-tls-6

View File

@ -1,41 +1,11 @@
version: "3"
version: '3.9'
services:
redis-sentinel-master:
container_name: redis-sentinel-master
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
redis_server:
container_name: redis
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tcp:/usr/local/etc/redis
command: redis-server /usr/local/etc/redis/master.conf
- ./redis/:/data/conf
command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel && tail -f /var/log/redis-server.log"
networks:
- emqx_bridge
redis-sentinel-slave:
container_name: redis-sentinel-slave
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tcp:/usr/local/etc/redis
command: redis-server /usr/local/etc/redis/slave.conf
networks:
- emqx_bridge
depends_on:
- redis-sentinel-master
redis-sentinel:
container_name: redis-sentinel
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tcp/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
depends_on:
- redis-sentinel-master
- redis-sentinel-slave
command: >
bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf &&
redis-sentinel /usr/local/etc/redis/sentinel.conf"
networks:
- emqx_bridge

View File

@ -1,44 +1,14 @@
version: "3"
version: '3.9'
services:
redis-sentinel-tls-master:
container_name: redis-sentinel-tls-master
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
redis_server:
container_name: redis
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tls:/usr/local/etc/redis
- ../../apps/emqx/etc/certs:/etc/certs
command: redis-server /usr/local/etc/redis/master.conf
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
- ./redis/:/data/conf
command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel --tls-enabled && tail -f /var/log/redis-server.log"
networks:
- emqx_bridge
redis-sentinel-tls-slave:
container_name: redis-sentinel-tls-slave
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tls:/usr/local/etc/redis
- ../../apps/emqx/etc/certs:/etc/certs
command: redis-server /usr/local/etc/redis/slave.conf
networks:
- emqx_bridge
depends_on:
- redis-sentinel-tls-master
redis-sentinel-tls:
container_name: redis-sentinel-tls
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tls/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
- ../../apps/emqx/etc/certs:/etc/certs
depends_on:
- redis-sentinel-tls-master
- redis-sentinel-tls-slave
command: >
bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf &&
redis-sentinel /usr/local/etc/redis/sentinel.conf"
networks:
- emqx_bridge

View File

@ -2,13 +2,14 @@ version: '3.9'
services:
redis_server:
container_name: redis
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
volumes:
- ./redis/single-tcp:/usr/local/etc/redis/
container_name: redis
image: redis:${REDIS_TAG}
ports:
- "6379:6379"
command: redis-server /usr/local/etc/redis/redis.conf
command:
- redis-server
- "--bind 0.0.0.0 ::"
- --requirepass public
restart: always
networks:
- emqx_bridge

View File

@ -1,17 +1,21 @@
version: '3.9'
services:
redis_server_tls:
container_name: redis-tls
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
redis_server:
container_name: redis
image: redis:${REDIS_TAG}
volumes:
- ./certs/server.crt:/etc/certs/redis.crt
- ./certs/server.key:/etc/certs/redis.key
- ./certs/ca.crt:/etc/certs/ca.crt
- ./redis/single-tls:/usr/local/etc/redis
ports:
- "6380:6380"
command: redis-server /usr/local/etc/redis/redis.conf
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
command:
- redis-server
- "--bind 0.0.0.0 ::"
- --requirepass public
- --tls-port 6380
- --tls-cert-file /etc/certs/redis.crt
- --tls-key-file /etc/certs/redis.key
- --tls-ca-cert-file /etc/certs/ca.crt
restart: always
networks:
emqx_bridge:
- emqx_bridge

View File

@ -1,41 +0,0 @@
version: '3.9'
services:
mqnamesrvssl:
image: apache/rocketmq:4.9.4
container_name: rocketmq_namesrv_ssl
# ports:
# - 9876:9876
volumes:
- ./rocketmq/logs_ssl:/opt/logs
- ./rocketmq/store_ssl:/opt/store
environment:
JAVA_OPT: "-Dtls.server.mode=enforcing"
command: ./mqnamesrv
networks:
- emqx_bridge
mqbrokerssl:
image: apache/rocketmq:4.9.4
container_name: rocketmq_broker_ssl
# ports:
# - 10909:10909
# - 10911:10911
volumes:
- ./rocketmq/logs_ssl:/opt/logs
- ./rocketmq/store_ssl:/opt/store
- ./rocketmq/conf_ssl/broker.conf:/etc/rocketmq/broker.conf
- ./rocketmq/conf_ssl/plain_acl.yml:/home/rocketmq/rocketmq-4.9.4/conf/plain_acl.yml
environment:
NAMESRV_ADDR: "rocketmq_namesrv_ssl:9876"
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m -Dtls.server.mode=enforcing"
command: ./mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- mqnamesrvssl
networks:
- emqx_bridge
networks:
emqx_bridge:
driver: bridge

View File

@ -1,35 +0,0 @@
version: '3.9'
services:
mqnamesrv:
image: apache/rocketmq:4.9.4
container_name: rocketmq_namesrv
# ports:
# - 9876:9876
volumes:
- ./rocketmq/logs:/opt/logs
- ./rocketmq/store:/opt/store
command: ./mqnamesrv
networks:
- emqx_bridge
mqbroker:
image: apache/rocketmq:4.9.4
container_name: rocketmq_broker
# ports:
# - 10909:10909
# - 10911:10911
volumes:
- ./rocketmq/logs:/opt/logs
- ./rocketmq/store:/opt/store
- ./rocketmq/conf/broker.conf:/etc/rocketmq/broker.conf
- ./rocketmq/conf/plain_acl.yml:/home/rocketmq/rocketmq-4.9.4/conf/plain_acl.yml
environment:
NAMESRV_ADDR: "rocketmq_namesrv:9876"
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m"
command: ./mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- mqnamesrv
networks:
- emqx_bridge

View File

@ -1,19 +0,0 @@
version: '3.9'
services:
sql_server:
container_name: sqlserver
# See also:
# https://mcr.microsoft.com/en-us/product/mssql/server/about
# https://hub.docker.com/_/microsoft-mssql-server
image: ${MS_IMAGE_ADDR}:${SQLSERVER_TAG}
environment:
# See also:
# https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-environment-variables
ACCEPT_EULA: "Y"
MSSQL_SA_PASSWORD: "mqtt_public1"
restart: always
# ports:
# - "1433:1433"
networks:
- emqx_bridge

View File

@ -1,11 +0,0 @@
version: '3.9'
services:
tdengine_server:
container_name: tdengine
image: tdengine/tdengine:${TDENGINE_TAG}
restart: always
ports:
- "6041:6041"
networks:
- emqx_bridge

View File

@ -1,63 +0,0 @@
version: '3.9'
services:
toxiproxy:
container_name: toxiproxy
image: ghcr.io/shopify/toxiproxy:2.5.0
restart: always
networks:
emqx_bridge:
aliases:
- toxiproxy
- toxiproxy.emqx.net
volumes:
- "./toxiproxy.json:/config/toxiproxy.json"
ports:
# Toxiproxy management API
- 8474:8474
# InfluxDB
- 8086:8086
# InfluxDB TLS
- 8087:8087
# SQL Server
- 11433:1433
# MySQL
- 13306:3306
# MySQL TLS
- 13307:3307
# PostgreSQL
- 15432:5432
# PostgreSQL TLS
- 15433:5433
# TDEngine
- 16041:6041
# DynamoDB
- 18000:8000
# RocketMQ
- 19876:9876
# Cassandra
- 19042:9042
# Cassandra TLS
- 19142:9142
# Cassandra No Auth
- 19043:9043
# Cassandra TLS No Auth
- 19143:9143
# S3
- 19000:19000
# S3 TLS
- 19100:19100
# IOTDB (3 total)
- 14242:4242
- 28080:18080
- 38080:38080
# HStreamDB
- 15670:5670
# Kinesis
- 4566:4566
# GreptimeDB
- 4000:4000
- 4001:4001
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -1,34 +1,45 @@
version: '3.9'
services:
erlang:
container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu22.04}
erlang23:
container_name: erlang23
image: ghcr.io/emqx/emqx-builder/5.0:23.2.7.2-emqx-2-ubuntu20.04
env_file:
- credentials.env
- conf.env
environment:
GITHUB_ACTIONS: ${GITHUB_ACTIONS:-}
GITHUB_TOKEN: ${GITHUB_TOKEN:-}
GITHUB_RUN_ID: ${GITHUB_RUN_ID:-}
GITHUB_SHA: ${GITHUB_SHA:-}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER:-}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME:-}
GITHUB_REF: ${GITHUB_REF:-}
GITHUB_ACTIONS: ${GITHUB_ACTIONS}
GITHUB_TOKEN: ${GITHUB_TOKEN}
GITHUB_RUN_ID: ${GITHUB_RUN_ID}
GITHUB_SHA: ${GITHUB_SHA}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}
GITHUB_REF: ${GITHUB_REF}
networks:
- emqx_bridge
volumes:
- ../..:/emqx
working_dir: /emqx
tty: true
erlang24:
container_name: erlang24
image: ghcr.io/emqx/emqx-builder/5.0:24.1.1-emqx-1-ubuntu20.04
env_file:
- conf.env
environment:
GITHUB_ACTIONS: ${GITHUB_ACTIONS}
GITHUB_TOKEN: ${GITHUB_TOKEN}
GITHUB_RUN_ID: ${GITHUB_RUN_ID}
GITHUB_SHA: ${GITHUB_SHA}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}
GITHUB_REF: ${GITHUB_REF}
networks:
- emqx_bridge
ports:
- 28083:18083
- 2883:1883
volumes:
- ../..:/emqx
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
# - ./odbc/odbcinst.ini:/etc/odbcinst.ini
working_dir: /emqx
tty: true
user: "${DOCKER_USER:-root}"
networks:
emqx_bridge:

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIVAIrN275DCtGnotTPpxwvQ5751N4OMA0GCSqGSIb3DQEB
CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu
ZXJhdGVkIENBMB4XDTI0MDExNjAyMzIyMFoXDTI3MDExNTAyMzIyMFowNDEyMDAG
A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCy0nwiEurUkIPFMLV1weVM
pPk/AlwZUzqjkeL44gsY53XI9Q05w/sL9u6PzwrXgTCFWNXzI9+MoAtp8phPkn14
cmg5/3sLe9YcFVFjYK/MoljlUbPDj+4dgk8l+w5FRSi0+JN5krUm7rYk9lojAkeS
fX8RU7ekKGbjBXIFtPxX5GNadu9RidR5GkHM3XroAIoris8bFOzMgFn9iybYnkhq
0S+Hpv0A8FVxzle0KNbPpsIkxXH2DnP2iPTDym9xJNl9Iv9MPtj9XaamH7TmXcSt
MbjkAudKsCw4bRuhHonM16DIUr8sX5UcRcAWyJ1x1qpZaOzMdh2VdYAHNuOsZwzJ
AgMBAAGjUzBRMB0GA1UdDgQWBBTAyDlp8NZfPe8NCGVlHJSVclGOhTAfBgNVHSME
GDAWgBTAyDlp8NZfPe8NCGVlHJSVclGOhTAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAeIUXRKmC53iirY4P49YspLafspAMf4ndMFQAp+Oc223Vs
hQC4axNoYnUdzWDH6LioAN7P826xNPqtXvTZF9fmeX7K8Nm9Kdj+for+QQI3j6+X
zq98VVkACb8b/Mc9Nac/WBbv/1IKyKgNNta7//WNPgAFolOfti/C0NLsPcKhrM9L
mGbvRX8ZjH8pVJ0YTy4/xfDcF7G/Lxl4Yvb0ZXpuQbvE1+Y0h5aoTNshT/skJxC4
iyVseYr21s3pptKcr6H9KZuSdZe5pbEo+81nT15w+50aswFLk9GCYh5UsQ+1jkRK
cKgxP93i6x8BVbQJGKi1A1jhauSKX2IpWZQsHy4p
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAstJ8IhLq1JCDxTC1dcHlTKT5PwJcGVM6o5Hi+OILGOd1yPUN
OcP7C/buj88K14EwhVjV8yPfjKALafKYT5J9eHJoOf97C3vWHBVRY2CvzKJY5VGz
w4/uHYJPJfsORUUotPiTeZK1Ju62JPZaIwJHkn1/EVO3pChm4wVyBbT8V+RjWnbv
UYnUeRpBzN166ACKK4rPGxTszIBZ/Ysm2J5IatEvh6b9APBVcc5XtCjWz6bCJMVx
9g5z9oj0w8pvcSTZfSL/TD7Y/V2mph+05l3ErTG45ALnSrAsOG0boR6JzNegyFK/
LF+VHEXAFsidcdaqWWjszHYdlXWABzbjrGcMyQIDAQABAoIBAAZOLXYanmjpIRpX
h7h7oikYEplWDRcQBBvvKZaOyuchhznTKTiZmF0xQ3Ny8J4Ndj9ndODWSZxI6uod
FaGNp+qytwnfgDBVGSVDm6tyRfSkX1fTsA/j3/iupvmO/w9yezdZYgLaCVTyex31
yVMdchZgYjYDUpEBYzJbV2xL18+GBRmmPjdXumlpcJqcclxjOQJSu/1WCGVfn/e/
64NQpAm7NSKLqeUl32g0/DvUpmYRfmf7ZjVUjePaJQU6sw5/N+3V9F1hYs8VSWz0
OMzYIfUcvixw+VWx5bu0nWt98FirhsQPjCTThD+DHP6koXGrdXpeMOQE1YZmoV5T
vP0X+FECgYEA5dsKVDQFL67muqz3CNRVM0xDWACCoa8789hYoxvhd1iO3e4kwXBa
ABPcZckioq+HiQ4UIxC2AhQ1FuTeIUTq7LZ0HtAAdKFi48U4LzmPhNUpG1E/HbJ3
GQbi4u1cAzGYuhdywktgBhn9bJ4XB7+X3815Y9qKkuRcwtXgKGDy8HkCgYEAxyly
vc7NBkLfIAmkOsm6VXfvfBTEUBUGi6+k1rarTUxWFIgRuk4FHywwWUTdxWBKJz3n
HNNJb/g7CcufdhLTuWVHQtJDxYf2cJjoi+Kf7/i/Qs9Nyhokj5Mnh6KlZQOWXpZd
Gwn/O13NeDxt1TIVO2xp6zY4FhVEPvaHuxsMCtECgYA7/eR/P6iO3nZoCJbdXhXy
spftEw0FSCg8p53SzIcXUCzRrcM4HavP0181zb5VebzFP8Bvun/WoRGOLSPwyP0L
1T8Pf7huuGSIEERuxvY3dC8raxQvGxJMnOiA0/Ss/Lfg8hfIsEWashPb0pMuOYpZ
JlblgfejCSlQzOOZhlxB+QKBgQCKmizRLV9/0QAJAsy5YPR9UJdpCebJOKiyg806
5Ct5AvwRE9UKjAuCczU+mu+f0fApOSpi5CQCeYVUvtG90UJpjrM2LLCfgoyeNbv4
xgG6dqlcbHrdgK4bATUMbsOd9g4qy4gGLkHi5df9qkhhi5Y9Iajg2X3U2H4DN3yk
WSFbUQKBgQCLz333qWOuT3OBv+EYxHDQUS4YG+dReUos+v0iPJzu+spnfibBF5IC
RjHIhPsdN1byNB0naXOkkz4tUlLGXv6umFgDtQvy/2rxvxQmUGp/WY1VM2+164Xe
NEWdMEU6UckCoMO77kw8JosKhmXCYaSW5bWwnXuEpOj9WWpwjKtxlA==
-----END RSA PRIVATE KEY-----

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDQDCCAiigAwIBAgIUe90yOBN1KBxOEr2jro3epamZksIwDQYJKoZIhvcNAQEL
BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l
cmF0ZWQgQ0EwHhcNMjQwMTE2MDIzMjIyWhcNMjcwMTE1MDIzMjIyWjAPMQ0wCwYD
VQQDEwRlczAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxGEL71pV
j8qoUxEuL7qjRSeS1eHxeKhu2jqEZb7iA1o/7b/26QuYAkoYL+WuJNfYjg5F/O8W
VVuAYIlN6a/mC6wT2t3pX4YSrdp+i3gtAC/LX+8mAeqMQPD+4jitOwjOsYzbuFCb
nYl86dnFPl/+Pmj20mtZ+Wt7oIPD88j6+r5qgv59pHICxS7Cq304LDTRQbNoT8HO
4c9VGGGtWIdtrqiYrz1OVefkffMrvFt77v6dKHn8g5tSyfQUDCoEKtTOc3Pe5zCB
vIMs6HaapoSkl8XdpFHQ712PCZRebAMCrVcPYQ3r8e9GYmLY/NhxEn3dWTqRhHeg
UD13O8o1aBWonwIDAQABo28wbTAdBgNVHQ4EFgQUXvGJtSf2/mLOK17AzUridtCV
xWwwHwYDVR0jBBgwFoAUwMg5afDWXz3vDQhlZRyUlXJRjoUwIAYDVR0RBBkwF4IJ
bG9jYWxob3N0hwR/AAABggRlczAxMAkGA1UdEwQCMAAwDQYJKoZIhvcNAQELBQAD
ggEBACaNq3ZqrbsGvbEtrf6kJGIsTokTFHeVJUSYmt1ZZzDFLSepXAC/J8gphV45
B+YSlkDPNTwMYlf7TUYY872zkdqOXN9r0NUx8MzVAX0+rux0RJba5GGUvJGZDNMX
WM5z9ry1KjQSQ1bSoRQOD3QArmBmhvikHjLc97Vqt56N0wA/ztXWOpNZX/TXmast
aXlUbcfQE73Cdq9tW1ATXwbQ2Gf7vVAUT3zjZSZbNdgPuBicGJHf85Fhjm2ND4+R
sjLIOQ2YgVxNHYbueScc6lJM5RNK194K7WrEQnRyGHT3NaDUm0FFNl//aQeq1ZVw
6gaUYlkTFauXwEYMDK901cWFaBE=
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAxGEL71pVj8qoUxEuL7qjRSeS1eHxeKhu2jqEZb7iA1o/7b/2
6QuYAkoYL+WuJNfYjg5F/O8WVVuAYIlN6a/mC6wT2t3pX4YSrdp+i3gtAC/LX+8m
AeqMQPD+4jitOwjOsYzbuFCbnYl86dnFPl/+Pmj20mtZ+Wt7oIPD88j6+r5qgv59
pHICxS7Cq304LDTRQbNoT8HO4c9VGGGtWIdtrqiYrz1OVefkffMrvFt77v6dKHn8
g5tSyfQUDCoEKtTOc3Pe5zCBvIMs6HaapoSkl8XdpFHQ712PCZRebAMCrVcPYQ3r
8e9GYmLY/NhxEn3dWTqRhHegUD13O8o1aBWonwIDAQABAoIBADJ3A/Om4az5dcce
96EBU9q+IDBBh2Wr1wzSk9p3sqoM47fLqH5b4dzYwJ1yZw2FwFtFFLw6jqExyexE
7JY8gyAFwPZyJ3pKQHuX1gQuRlYxchB9quU8Kn230LA+w1mT2lXrLj2PzWWvAsAv
m837KiFMpP0O5EjB07u8kLsRr1mG6QQ24Kc8oxd7xLXIiPzSvsOpYwo9hmIWENd5
kyA7oSa9EmN3TRTkKOHI7cFQ3DqIGdO71waUofKOdx39DyHS2YKWxDE/LUjkS9zw
1AyZG09l4uowyLRqwYhivEq9Za6rdc64yheuHatAM9kC2AOcVcsCPZquIe90k4t1
L7e9CAECgYEA1W483xTW8ngzxv9MMuPiW+PwVGRpyQrbO6OZOxdWEYfhrZlk5wlW
XK2T85jqooJwMWPTk1F49vZ9WN2KuLkL65GlkEtkFbxmOiFJjXuWwycbFSk05hPs
4AESBYHieaSPcwYhvLeG6g4PFyeqmbAGnKsJaj2ylPwDBOc7LgVlqAECgYEA64wo
gZwaj5SlP8M/OqGH04UVYr1kP/Eq6eiDfMyV5exy+pyzofZyNKUfJfw6sGgyRRHx
OVxlnPMsZ8zbdOXsvUEIeavpwDfQcp5eAURL65I6GMLsx2QpfiN2mDe1MqQW0jct
UleFaURgS84KHLE0+tBBg906jOHGjsE7Q3lyUJ8CgYBYYPev4K9JZGD8bEcfY6Ie
Lvsb1yC+8VHrFkmjYHxxcfUPr89KpGEwq2fynUW72YufyBiajkgq69Ln84U4DNhU
ydDnOXDOV191fsc4YQ8C7LSYRKH1DBcwgwD1at1fRbdpCAb8YHrrfLre+bv5PBzg
zyps5fOHIfwWEbI90lpQAQKBgQDoMMqBMTtxi+r1lucOScrVtFuncOCQs5BE8cIj
1JxzAQk6iBv/LSvZP2gcDq5f1Oaw9YXfsHguJfwA+ozeiAQ9bw0Gu3N52sstIXWz
M/rO5d9FJ2k3CEJqqFSwqkGBAQXKBUA06jeF1DREpX+MVxbNo1rhvMOJusn7UPm1
gtMwKwKBgQCfRzFO10ITwrw8rcRZwO9Axgqf11V7xn6qpgRxj4h0HOErVTCN1H0b
vE3Pz7cxS/g9vFRP37TuqBLfGVzPt9LAEFwCWPeZJLROBLHyu8XrhTbQx+sI2/pe
SBEJAQAHtYasFTE0sBEKNEY2rIt1c29XZhyhhtNKD9gRN/gB355wLg==
-----END RSA PRIVATE KEY-----

View File

@ -1,7 +0,0 @@
instances:
- name: es01
dns:
- es01
- localhost
ip:
- 127.0.0.1

View File

@ -30,12 +30,24 @@ defaults
##----------------------------------------------------------------
## API
##----------------------------------------------------------------
frontend emqx_mgmt
mode tcp
option tcplog
bind *:18083
default_backend emqx_mgmt_back
frontend emqx_dashboard
mode tcp
option tcplog
bind *:18083
default_backend emqx_dashboard_back
backend emqx_mgmt_back
mode http
# balance static-rr
server emqx-1 node1.emqx.io:18083
server emqx-2 node2.emqx.io:18083
backend emqx_dashboard_back
mode http
# balance static-rr
@ -83,13 +95,13 @@ backend emqx_ws_back
frontend emqx_ssl
mode tcp
option tcplog
bind *:8883 ssl crt /var/lib/haproxy/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
bind *:8883 ssl crt /tmp/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
default_backend emqx_ssl_back
frontend emqx_wss
mode tcp
option tcplog
bind *:8084 ssl crt /var/lib/haproxy/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
bind *:8084 ssl crt /tmp/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
default_backend emqx_wss_back
backend emqx_ssl_back

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -e
# influx v1 dbrp create \
# --bucket-id ${DOCKER_INFLUXDB_INIT_BUCKET_ID} \
# --db ${V1_DB_NAME} \
# --rp ${V1_RP_NAME} \
# --default \
# --org ${DOCKER_INFLUXDB_INIT_ORG}
influx v1 auth create \
--username "${DOCKER_INFLUXDB_INIT_USERNAME}" \
--password "${DOCKER_INFLUXDB_INIT_PASSWORD}" \
--write-bucket "${DOCKER_INFLUXDB_INIT_BUCKET_ID}" \
--org "${DOCKER_INFLUXDB_INIT_ORG}"

View File

@ -1,58 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
####################
### REST Service Configuration
####################
# Is the REST service enabled
enable_rest_service=true
# the binding port of the REST service
# rest_service_port=18080
# the default row limit to a REST query response when the rowSize parameter is not given in request
# rest_query_default_row_size_limit=10000
# the expiration time of the user login information cache (in seconds)
# cache_expire_in_seconds=28800
# maximum number of users can be stored in the user login cache.
# cache_max_num=100
# init capacity of users can be stored in the user login cache.
# cache_init_num=10
# is SSL enabled
# enable_https=false
# SSL key store path
# key_store_path=
# SSL key store password
# key_store_pwd=
# SSL trust store path
# trust_store_path=
# SSL trust store password.
# trust_store_pwd=
# SSL timeout (in seconds)
# idle_timeout_in_seconds=50000

View File

@ -1,16 +0,0 @@
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
user_admin="password"
user_emqxuser="password";
org.apache.kafka.common.security.scram.ScramLoginModule required
username="admin"
password="password";
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.keytab"
principal="kafka/kafka-1.emqx.net@KDC.EMQX.NET";
};

View File

@ -1,60 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
TIMEOUT=60
echo "+++++++ Sleep for a while to make sure that old keytab and truststore is deleted ++++++++"
sleep 5
echo "+++++++ Wait until Kerberos Keytab is created ++++++++"
timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.keytab ]; do sleep 1; done'
echo "+++++++ Wait until SSL certs are generated ++++++++"
timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.truststore.jks ]; do sleep 1; done'
keytool -list -v -keystore /var/lib/secret/kafka.keystore.jks -storepass password
sleep 3
echo "+++++++ Starting Kafka ++++++++"
# fork start Kafka
start-kafka.sh &
SERVER=localhost
PORT1=9092
PORT2=9093
TIMEOUT=60
echo "+++++++ Wait until Kafka ports are up ++++++++"
# shellcheck disable=SC2016
timeout $TIMEOUT bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1
# shellcheck disable=SC2016
timeout $TIMEOUT bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT2
echo "+++++++ Run config commands ++++++++"
kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'SCRAM-SHA-256=[iterations=8192,password=password],SCRAM-SHA-512=[password=password]' --entity-type users --entity-name emqxuser
echo "+++++++ Creating Kafka Topics ++++++++"
# create topics after re-configuration
# there seem to be a race condition when creating the topics (too early)
env KAFKA_CREATE_TOPICS="$KAFKA_CREATE_TOPICS_NG" KAFKA_PORT="$PORT1" create-topics.sh
# create a topic with max.message.bytes=100
/opt/kafka/bin/kafka-topics.sh --create --bootstrap-server "${SERVER}:${PORT1}" --topic max-100-bytes --partitions 1 --replication-factor 1 --config max.message.bytes=100
echo "+++++++ Wait until Kafka ports are down ++++++++"
bash -c 'while printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1
echo "+++++++ Kafka ports are down ++++++++"

View File

@ -1,23 +0,0 @@
[libdefaults]
default_realm = KDC.EMQX.NET
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
KDC.EMQX.NET = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
kdc.emqx.net = KDC.EMQX.NET
.kdc.emqx.net = KDC.EMQX.NET
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log

View File

@ -1,25 +0,0 @@
#!/bin/sh
echo "Remove old keytabs"
rm -f /var/lib/secret/kafka.keytab > /dev/null 2>&1
rm -f /var/lib/secret/rig.keytab > /dev/null 2>&1
echo "Create realm"
kdb5_util -P emqx -r KDC.EMQX.NET create -s
echo "Add principals"
kadmin.local -w password -q "add_principal -randkey kafka/kafka-1.emqx.net@KDC.EMQX.NET"
kadmin.local -w password -q "add_principal -randkey rig@KDC.EMQX.NET" > /dev/null
echo "Create keytabs"
kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.keytab -norandkey kafka/kafka-1.emqx.net@KDC.EMQX.NET " > /dev/null
kadmin.local -w password -q "ktadd -k /var/lib/secret/rig.keytab -norandkey rig@KDC.EMQX.NET " > /dev/null
echo STARTING KDC
/usr/sbin/krb5kdc -n

View File

@ -1,9 +0,0 @@
[ms-sql]
Description=Microsoft ODBC Driver 17 for SQL Server
Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1
UsageCount=1
[ODBC Driver 17 for SQL Server]
Description=Microsoft ODBC Driver 17 for SQL Server
Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1
UsageCount=1

View File

@ -1,11 +1,18 @@
FROM docker.io/zmstone/openldap:2.5.16@sha256:a813922115a1d1f1b974399595921d1778fae22b3f1ee15dcfa8cfa89700dbc7
FROM buildpack-deps:stretch
ARG LDAP_TAG=2.4.50
RUN apt-get update && apt-get install -y groff groff-base
RUN wget ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-${LDAP_TAG}.tgz \
&& gunzip -c openldap-${LDAP_TAG}.tgz | tar xvfB - \
&& cd openldap-${LDAP_TAG} \
&& ./configure && make depend && make && make install \
&& cd .. && rm -rf openldap-${LDAP_TAG}
COPY .ci/docker-compose-file/openldap/slapd.conf /usr/local/etc/openldap/slapd.conf
COPY apps/emqx_ldap/test/data/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
COPY apps/emqx_ldap/test/data/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
COPY .ci/docker-compose-file/certs/ca.crt /usr/local/etc/openldap/cacert.pem
COPY .ci/docker-compose-file/certs/server.crt /usr/local/etc/openldap/cert.pem
COPY .ci/docker-compose-file/certs/server.key /usr/local/etc/openldap/key.pem
COPY apps/emqx_auth_ldap/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
COPY apps/emqx_auth_ldap/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
COPY apps/emqx_auth_ldap/test/certs/*.pem /usr/local/etc/openldap/
RUN mkdir -p /usr/local/etc/openldap/data \
&& slapadd -l /usr/local/etc/openldap/schema/emqx.io.ldif -f /usr/local/etc/openldap/slapd.conf

View File

@ -1,61 +0,0 @@
# LDAP authentication
To run manual tests with the default docker-compose files.
Expose openldap container port by uncommenting the `ports` config in `docker-compose-ldap.yaml `
To start openldap:
```
docker-compose -f ./.ci/docker-compose-file/docker-compose.yaml -f ./.ci/docker-compose-file/docker-compose-ldap.yaml up -docker
```
## LDAP database
LDAP database is populated from below files:
```
apps/emqx_ldap/test/data/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
apps/emqx_ldap/test/data/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
```
## Minimal EMQX config
```
authentication = [
{
backend = ldap
base_dn = "uid=${username},ou=testdevice,dc=emqx,dc=io"
filter = "(& (objectClass=mqttUser) (uid=${username}))"
mechanism = password_based
method {
is_superuser_attribute = isSuperuser
password_attribute = userPassword
type = hash
}
password = public
pool_size = 8
query_timeout = "5s"
request_timeout = "10s"
server = "localhost:1389"
username = "cn=root,dc=emqx,dc=io"
}
]
```
## Example ldapsearch command
```
ldapsearch -x -H ldap://localhost:389 -D "cn=root,dc=emqx,dc=io" -W -b "uid=mqttuser0007,ou=testdevice,dc=emqx,dc=io" "(&(objectClass=mqttUser)(uid=mqttuser0007))"
```
## Example mqttx command
The client password hashes are generated from their username.
```
# disabled user
mqttx pub -t 't/1' -h localhost -p 1883 -m x -u mqttuser0006 -P mqttuser0006
# enabled super-user
mqttx pub -t 't/1' -h localhost -p 1883 -m x -u mqttuser0007 -P mqttuser0007
```

View File

@ -1,13 +1,14 @@
include /usr/local/etc/openldap/schema/core.schema
include /usr/local/etc/openldap/schema/cosine.schema
include /usr/local/etc/openldap/schema/inetorgperson.schema
include /usr/local/etc/openldap/schema/ppolicy.schema
include /usr/local/etc/openldap/schema/emqx.schema
TLSCACertificateFile /usr/local/etc/openldap/cacert.pem
TLSCertificateFile /usr/local/etc/openldap/cert.pem
TLSCertificateKeyFile /usr/local/etc/openldap/key.pem
database mdb
database bdb
suffix "dc=emqx,dc=io"
rootdn "cn=root,dc=emqx,dc=io"
rootpw {SSHA}eoF7NhNrejVYYyGHqnt+MdKNBh4r1w3W

View File

@ -1,6 +0,0 @@
certs
hostname
hosts
otel-collector.json
otel-collector-tls.json
resolv.conf

View File

@ -1,52 +0,0 @@
receivers:
otlp:
protocols:
grpc:
tls:
ca_file: /etc/certs/ca.crt
cert_file: /etc/certs/server.crt
key_file: /etc/certs/server.key
http:
tls:
ca_file: /etc/certs/ca.crt
cert_file: /etc/certs/server.crt
key_file: /etc/certs/server.key
exporters:
logging:
verbosity: detailed
otlp:
endpoint: jaeger.emqx.net:4317
tls:
insecure: true
debug:
verbosity: detailed
file:
path: /etc/otel-collector-tls.json
processors:
batch:
# send data immediately
timeout: 0
extensions:
health_check:
zpages:
endpoint: :55679
service:
extensions: [zpages, health_check]
pipelines:
traces:
receivers: [otlp]
processors: [batch]
exporters: [logging, otlp]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [logging]
logs:
receivers: [otlp]
processors: [batch]
exporters: [logging, file]

View File

@ -1,51 +0,0 @@
receivers:
otlp:
protocols:
grpc:
tls:
# ca_file: /etc/ca.pem
# cert_file: /etc/server.pem
# key_file: /etc/server.key
http:
tls:
# ca_file: /etc/ca.pem
# cert_file: /etc/server.pem
# key_file: /etc/server.key
exporters:
logging:
verbosity: detailed
otlp:
endpoint: jaeger.emqx.net:4317
tls:
insecure: true
debug:
verbosity: detailed
file:
path: /etc/otel-collector.json
processors:
batch:
# send data immediately
timeout: 0
extensions:
health_check:
zpages:
endpoint: :55679
service:
extensions: [zpages, health_check]
pipelines:
traces:
receivers: [otlp]
processors: [batch]
exporters: [logging, otlp]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [logging]
logs:
receivers: [otlp]
processors: [batch]
exporters: [logging, file]

View File

@ -1,10 +1,10 @@
ARG BUILD_FROM=public.ecr.aws/docker/library/postgres:13@sha256:fa69de30d02652cfdfb68166692e5186f6972c17f83c89c71ac8ff0916d46ae3
ARG BUILD_FROM=postgres:11
FROM ${BUILD_FROM}
ARG POSTGRES_USER=postgres
COPY --chown=$POSTGRES_USER ./pgsql/pg_hba_tls.conf /var/lib/postgresql/pg_hba.conf
COPY --chown=$POSTGRES_USER certs/server.key /var/lib/postgresql/server.key
COPY --chown=$POSTGRES_USER certs/server.crt /var/lib/postgresql/server.crt
COPY --chown=$POSTGRES_USER certs/ca.crt /var/lib/postgresql/root.crt
COPY --chown=$POSTGRES_USER .ci/docker-compose-file/pgsql/pg_hba.conf /var/lib/postgresql/pg_hba.conf
COPY --chown=$POSTGRES_USER apps/emqx/etc/certs/key.pem /var/lib/postgresql/server.key
COPY --chown=$POSTGRES_USER apps/emqx/etc/certs/cert.pem /var/lib/postgresql/server.crt
COPY --chown=$POSTGRES_USER apps/emqx/etc/certs/cacert.pem /var/lib/postgresql/root.crt
RUN chmod 600 /var/lib/postgresql/pg_hba.conf
RUN chmod 600 /var/lib/postgresql/server.key
RUN chmod 600 /var/lib/postgresql/server.crt

View File

@ -1,8 +0,0 @@
# TYPE DATABASE USER CIDR-ADDRESS METHOD
local all all trust
# TODO: also test with `cert`? will require client certs
hostssl all all 0.0.0.0/0 password
hostssl all all ::/0 password
hostssl all www-data 0.0.0.0/0 cert clientcert=1
hostssl all postgres 0.0.0.0/0 cert clientcert=1

View File

@ -1,3 +0,0 @@

View File

@ -6,9 +6,6 @@
set -x
set +e
# shellcheck disable=SC3028 disable=SC3054
SCRIPT_DIR="$( dirname -- "$( readlink -f -- "$0"; )"; )"
EMQX_TEST_DB_BACKEND=$1
if [ "$EMQX_TEST_DB_BACKEND" = "rlog" ]
then
@ -21,14 +18,13 @@ else
fi
apk update && apk add git curl
git clone -b develop-5.0 https://github.com/emqx/paho.mqtt.testing.git /paho.mqtt.testing
git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git /paho.mqtt.testing
pip install pytest
pip install --require-hashes -r "$SCRIPT_DIR/requirements.txt"
pytest --retries 3 -v /paho.mqtt.testing/interoperability/test_client/V5/test_connect.py -k test_basic --host "$TARGET_HOST"
pytest -v /paho.mqtt.testing/interoperability/test_client/V5/test_connect.py -k test_basic --host "$TARGET_HOST"
RESULT=$?
pytest --retries 3 -v /paho.mqtt.testing/interoperability/test_client --host "$TARGET_HOST"
pytest -v /paho.mqtt.testing/interoperability/test_client --host "$TARGET_HOST"
RESULT=$(( RESULT + $? ))
# pytest -v /paho.mqtt.testing/interoperability/test_cluster --host1 "node1.emqx.io" --host2 "node2.emqx.io"

View File

@ -1,21 +0,0 @@
pytest-retry==1.6.1 \
--hash=sha256:3d420afc08e61ed3be28ecbb544371041b1b8e5fea7c94eb97cefa0d4ea9825c \
--hash=sha256:3d663159a9be4d6878705822cf27a0976f99ec1bc4f2d9494e80403b17f700f2
pytest==7.4.4 \
--hash=sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280 \
--hash=sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8
pluggy==1.3.0 \
--hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \
--hash=sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7
iniconfig==2.0.0 \
--hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \
--hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374
tomli==2.0.1 \
--hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
--hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
exceptiongroup==1.2.0 \
--hash=sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 \
--hash=sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68
packaging==23.2 \
--hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
--hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7

View File

@ -1,7 +0,0 @@
listeners.ssl.default = 5671
ssl_options.cacertfile = /opt/certs/ca.crt
ssl_options.certfile = /opt/certs/server.crt
ssl_options.keyfile = /opt/certs/server.key
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = true

View File

@ -1,19 +0,0 @@
bind :: 0.0.0.0
port 6379
cluster-enabled yes
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
protected-mode no
daemonize no
loglevel notice
logfile ""
always-show-logo no
save ""
appendonly no

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,29 +0,0 @@
bind :: 0.0.0.0
port 6379
cluster-enabled yes
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
tls-port 6389
tls-cert-file /etc/certs/cert.pem
tls-key-file /etc/certs/key.pem
tls-ca-cert-file /etc/certs/cacert.pem
tls-auth-clients no
tls-replication yes
tls-cluster yes
protected-mode no
daemonize no
loglevel notice
logfile ""
always-show-logo no
save ""
appendonly no

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -0,0 +1,11 @@
daemonize yes
bind 0.0.0.0 ::
logfile /var/log/redis-server.log
tls-cert-file /etc/certs/redis.crt
tls-key-file /etc/certs/redis.key
tls-ca-cert-file /etc/certs/ca.crt
tls-replication yes
tls-cluster yes
protected-mode no
requirepass public
masterauth public

View File

@ -0,0 +1,5 @@
daemonize yes
bind 0.0.0.0 ::
logfile /var/log/redis-server.log
requirepass public
masterauth public

View File

@ -0,0 +1,125 @@
#!/bin/bash
set -x
LOCAL_IP=$(hostname -i | grep -oE '((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])\.){3}(25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])' | head -n 1)
node=single
tls=false
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-n|--node)
node="$2"
shift # past argument
shift # past value
;;
-t|--tls-enabled)
tls="$2"
shift # past argument
shift # past value
;;
*)
shift # past argument
;;
esac
done
rm -f \
/data/conf/r7000i.log \
/data/conf/r7001i.log \
/data/conf/r7002i.log \
/data/conf/nodes.7000.conf \
/data/conf/nodes.7001.conf \
/data/conf/nodes.7002.conf ;
if [ "${node}" = "cluster" ] ; then
if $tls ; then
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
--tls-port 8000 --cluster-enabled yes ;
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
--tls-port 8001 --cluster-enabled yes;
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
--tls-port 8002 --cluster-enabled yes;
else
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf --cluster-enabled yes;
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf --cluster-enabled yes;
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf --cluster-enabled yes;
fi
elif [ "${node}" = "sentinel" ] ; then
if $tls ; then
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
--tls-port 8000 --cluster-enabled no;
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
--tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000;
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
--tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000;
else
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
--cluster-enabled no;
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
--cluster-enabled no --slaveof "$LOCAL_IP" 7000;
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
--cluster-enabled no --slaveof "$LOCAL_IP" 7000;
fi
fi
REDIS_LOAD_FLG=true;
while $REDIS_LOAD_FLG;
do
sleep 1;
redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null;
if [ -s /data/conf/r7000i.log ]; then
:
else
continue;
fi
redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null;
if [ -s /data/conf/r7001i.log ]; then
:
else
continue;
fi
redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null;
if [ -s /data/conf/r7002i.log ]; then
:
else
continue;
fi
if [ "${node}" = "cluster" ] ; then
if $tls ; then
yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" --pass public --no-auth-warning --tls true --cacert /etc/certs/ca.crt --cert /etc/certs/redis.crt --key /etc/certs/redis.key;
else
yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" --pass public --no-auth-warning;
fi
elif [ "${node}" = "sentinel" ] ; then
tee /_sentinel.conf>/dev/null << EOF
port 26379
bind 0.0.0.0 ::
daemonize yes
logfile /var/log/redis-server.log
dir /tmp
EOF
if $tls ; then
cat >>/_sentinel.conf<<EOF
tls-port 26380
tls-replication yes
tls-cert-file /etc/certs/redis.crt
tls-key-file /etc/certs/redis.key
tls-ca-cert-file /etc/certs/ca.crt
sentinel monitor mymaster $LOCAL_IP 8000 1
EOF
else
cat >>/_sentinel.conf<<EOF
sentinel monitor mymaster $LOCAL_IP 7000 1
EOF
fi
redis-server /_sentinel.conf --sentinel;
fi
REDIS_LOAD_FLG=false;
done
exit 0;

View File

@ -1,14 +0,0 @@
bind :: 0.0.0.0
port 6379
aclfile /usr/local/etc/redis/users.acl
protected-mode no
daemonize no
loglevel notice
logfile ""
always-show-logo no
save ""
appendonly no

View File

@ -1,7 +0,0 @@
sentinel resolve-hostnames yes
bind :: 0.0.0.0
sentinel monitor mytcpmaster redis-sentinel-master 6379 1
sentinel auth-pass mytcpmaster public
sentinel down-after-milliseconds mytcpmaster 10000
sentinel failover-timeout mytcpmaster 20000

View File

@ -1,18 +0,0 @@
bind :: 0.0.0.0
port 6379
replicaof redis-sentinel-master 6379
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
protected-mode no
daemonize no
loglevel notice
logfile ""
always-show-logo no
save ""
appendonly no

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

Some files were not shown because too many files have changed in this diff Show More