Compare commits

..

7 Commits

Author SHA1 Message Date
ieQu1 f37d9739a5
Merge pull request #11238 from ieQu1/customer-e502/patch
chore(ekka): Update version to 0.15.5
2023-07-12 16:50:33 +02:00
ieQu1 2808523419 fix: Dialyzer 2023-07-12 11:06:46 +02:00
ieQu1 7fdcca784d fix: Disable waiting for shards on boot 2023-07-12 10:32:25 +02:00
ieQu1 6aca61d121 chore: Fix CI 2023-07-11 11:32:06 +02:00
ieQu1 71f73f68d1 chore(ekka): Update version to 0.15.5 2023-07-10 12:26:50 +02:00
JimMoen 6c36152612
fix: reboot `emqx_license` app for join cluster 2023-05-23 17:01:31 +08:00
JimMoen 326c71205d
chore: compatibility make 4.4+
Port from [PR#10627](https://github.com/emqx/emqx/pull/10627).
2023-05-23 16:54:17 +08:00
3714 changed files with 102033 additions and 379646 deletions

View File

@ -6,23 +6,5 @@ LDAP_TAG=2.4.50
INFLUXDB_TAG=2.5.0
TDENGINE_TAG=3.0.2.4
DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
OPENTS_TAG=9aa7f88
KINESIS_TAG=2.1
HSTREAMDB_TAG=v0.19.3
HSTREAMDB_ZK_TAG=3.8.1
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD="emqx123"
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD="emqx123"
# Version of Elastic products
ELASTIC_TAG=8.11.4
LICENSE=basic
TARGET=emqx/emqx

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
Certificate and Key files for testing
## Cassandra (v3.x)
### How to convert server PEM to JKS Format
1. Convert server.crt and server.key to server.p12
```bash
openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12 -name "certificate"
```
2. Convert server.p12 to server.jks
```bash
keytool -importkeystore -srckeystore server.p12 -srcstoretype pkcs12 -destkeystore server.jks
```
### How to convert CA PEM certificate to truststore.jks
```
keytool -import -file ca.pem -keystore truststore.jks
```

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAzs74tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8Z
OUiOuOjynKvsJeltdmc0L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoV
JXgNT3ru56aZFa6Ov6NhfZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXM
inMd1tsHL7xHLf3KjCbkusA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1
P/T+W6WBldv0i2WlNbfiuAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/
FBT5knNtmXTt78xBBlIPFas5BAJIeV4eADx9MwIDAQABAoIBAQCZTvcynpJuxIxn
vmItjK5U/4wIBjZNIawQk6BoG7tR2JyJ/1jcjTw4OX/4wr450JRz7MfUJweD5hDb
OTMtLLNXlG6+YR4vsIUEiSlvhy5srVH0jG5Wq2t6mxBVq7vaRd/OkshnuU79+Pq7
iHqclS7GSACxYkXWyxE6wtPh5aTWP8joK/LvYFiOqKPilUnLZ4hBhmL7CRUCZ0ZA
QGNyEhlmiAL+LNKW2RLXPBxlKX21X78ahUQmkkTM0lBK9x6hm4dD3SpLqmZyQQ9M
UfiMbU6XOYlDva/USZzrvTDlRf9uCG9QOsZzngP1aIy8Cq3QHECOeMIPO9WQLMll
SyY+SpyJAoGBAP4fhnbDpQC6ekd9TNoU9GE/FNNNGKLh82GDgnGcWU/oIzv8GlaR
rkEHTb6aRoPpjTxWIjJpScs9kycC+7N3oNo9rub4s5UvllI+EgQ95+j/5fnZx6gO
la8ousLy1hTYu9C0nTWdTV3YtfC0l0opn7Friv5QafNmhSn74DqrH0BHAoGBANBV
/NhBDAH1PHzYA+XuNLYTLv56Q4osmoen17nPnFNWb1TtWblzb0yWp86GGDFcs8CZ
eH0mXCRUzGMSWtOHe4CbIm2brAYXuL2t6+DZ1A22gsnW5avNrosZRS7eN7BE7DDj
5cp9+Es9UWnArzJU7jSWwAtA6o47WHfHU/pqRB21AoGAGx6eKPqEF2nPNuXmV7e4
xNAIluw5XtiiMpvoRdubpG1vpS0oWmi9oe73mwm30MgR7Ih8qciWuXvewmENH3/6
yI+gpMGR2K/1aN166rz4jOMSVfGp3wN/cev00m0774mZsZI03M3mvccs031ST/XV
Nwf1E2Ldi747I9nfeiNc+G0CgYEAslFHD1ntiyd6VGkYPQ978nPM/2dqs7OluILC
tHmslfAfbpOQ/ph9JRK2IqDHyEhOWoWBiazxpO8n2Yx2TSNjZBpkh2h8/uIC7+cT
Q+tuAya6H0ReZISx5sEEZC8zfx4fA2Gs53qWsN+U9W1FB1GGaWC2k2tG1+KXwD3N
9UJLdxkCgYBB96dsfT7nXmy0JLUz0rQ4umBje6H5uvuaevWdVMEptHB+O7+6CAse
OVwqlFLQ4QC7s4/P9FQwfr/0uMRInB1aC043Haa1LbiRcRIlSuBDUezK5xidUbz+
uB/ABkwwEuqW3Ns1+QieJyyfoNYKZ2v0RtYxBuieKOpUCm3oNFZRWg==
-----END RSA PRIVATE KEY-----

View File

@ -1,25 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEMjCCAhoCFCOrAvLNRztbFFcN0zrCQXoj73cHMA0GCSqGSIb3DQEBCwUAMDQx
EjAQBgNVBAoMCUVNUVggVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9y
aXR5MB4XDTIzMDMxNzA5MzgzMVoXDTMzMDMxNDA5MzgzMVowdzELMAkGA1UEBhMC
U0UxEjAQBgNVBAgMCVN0b2NraG9sbTESMBAGA1UEBwwJU3RvY2tob2xtMRIwEAYD
VQQKDAlNeU9yZ05hbWUxGDAWBgNVBAsMD015U2VydmljZUNsaWVudDESMBAGA1UE
AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzs74
tdftT7xGMGXQSoX/nnFkFAOjNtEVOI3bChzR+w6Xwo8ZOUiOuOjynKvsJeltdmc0
L+cbHZh7j+aHuAqVYxavqaqhFneF0f03t17qju9AixoVJXgNT3ru56aZFa6Ov6Nh
fZfRirGnbNrg2RhuNeYZ4TYLH7iMR36exNFP83glXwXMinMd1tsHL7xHLf3KjCbk
usA5ncFWcpIUtpuWVn9aAE402dN7BJWfAbkQ4Y3VToR1P/T+W6WBldv0i2WlNbfi
uAzuapA3EzJwoyTrG2Qyz7EtXM8XZdOZ6oJmW4s7c4V/FBT5knNtmXTt78xBBlIP
Fas5BAJIeV4eADx9MwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQBHgfJgMjTgWZXG
eyzIVxaqzWTLxrT7zPy09Mw4qsAl1TfWg9/r8nuskq4bjBQuKm0k9H0HQXz//eFC
Qn85qTHyAmZok6c4ljO2P+kTIl3nkKk5zudmeCTy3W9YBdyWvDXQ/GhbywIfO+1Y
fYA82I5rXVg4c9fUVTNczUFyDNcZzoJoqCS8jwFDtNR0N/fptJN14j8pnYvNV+4c
hZ+pcnhSoz7dD8WjyYCc/QCajJdTyb15i072HxuGmhwltjnwIE/2xfeXCCeUTzsJ
8h4/ABRu9VEqjqDQHepXIflYuVhU38SL0f4ly7neMXmytAbXwGLVM+ME81HG60Bw
8hkfSwKBbEkhUmD6+V1bdUz14I6HjWJt/INtFU+O+MYZbIFt4ep9GKLV3nk97CyL
fwDv5b4WXdC68iWMZqSrADAXr+VG3DgHqpNItj0XmhY6ihmt5tA3Z6IZJj45TShA
vRqTCx3Hf6EO3zf4KCrzaPSSSfVLnGKftA/6oz3bl8EK2e2M44lOspRk4l9k+iBR
sfHPmpiWY0hIiFtd3LD/uGDSBcGkKjU/fLvJZXJpVXwmT9pmK9LzkAPOK1rr97e9
esHqwe1bo3z7IdeREZ0wdxqGL3BNpm4f1NaIzV/stX+vScau0AyFYXzumjeBIpKa
Gt0A+dZnUfWG6qn5NiRENXxFQSppaA==
-----END CERTIFICATE-----

View File

@ -1,7 +0,0 @@
MONGO_USERNAME=emqx
MONGO_PASSWORD=passw0rd
MONGO_AUTHSOURCE=admin
# See "Environment Variables" @ https://hub.docker.com/_/mongo
MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME}
MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD}

View File

@ -1,24 +0,0 @@
version: '3.9'
services:
azurite:
container_name: azurite
image: mcr.microsoft.com/azure-storage/azurite:3.30.0
restart: always
expose:
- "10000"
# ports:
# - "10000:10000"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:10000"]
interval: 30s
timeout: 5s
retries: 4
command:
- azurite-blob
- "--blobHost"
- 0.0.0.0
- "-d"
- debug.log

View File

@ -1,38 +0,0 @@
version: '3.9'
x-cassandra: &cassandra
restart: always
image: public.ecr.aws/docker/library/cassandra:${CASSANDRA_TAG:-3.11}
environment:
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
HEAP_NEWSIZE: "128M"
MAX_HEAP_SIZE: "2048M"
#ports:
# - "9042:9042"
# - "9142:9142"
command:
- /bin/bash
- -c
- |
/opt/cassandra/bin/cassandra -f -R > /cassandra.log &
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"
while [[ $$? -ne 0 ]];do sleep 5; /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"; done
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "describe keyspaces;"
tail -f /cassandra.log
networks:
- emqx_bridge
services:
cassandra_server:
<<: *cassandra
container_name: cassandra
volumes:
- ./certs:/certs
- ./cassandra/cassandra.yaml:/etc/cassandra/cassandra.yaml
cassandra_noauth_server:
<<: *cassandra
container_name: cassandra_noauth
volumes:
- ./certs:/certs
- ./cassandra/cassandra_noauth.yaml:/etc/cassandra/cassandra.yaml

View File

@ -1,30 +0,0 @@
version: '3.9'
services:
couchbase:
container_name: couchbase
hostname: couchbase
image: ghcr.io/emqx/couchbase:1.0.0
restart: always
expose:
- 8091-8093
# ports:
# - "8091-8093:8091-8093"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8093/admin/ping"]
interval: 30s
timeout: 5s
retries: 4
environment:
- CLUSTER=localhost
- USER=admin
- PASS=public
- PORT=8091
- RAMSIZEMB=2048
- RAMSIZEINDEXMB=512
- RAMSIZEFTSMB=512
- BUCKETS=mqtt
- BUCKETSIZES=100
- AUTOREBALANCE=true

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
dynamodb-local:
container_name: dynamo
image: public.ecr.aws/aws-dynamodb-local/aws-dynamodb-local:${DYNAMO_TAG}
image: amazon/dynamodb-local:${DYNAMO_TAG}
restart: always
ports:
- "8000:8000"

View File

@ -1,111 +0,0 @@
version: "3.9"
# hint: run the following if the container fails to start locally
# sysctl -w vm.max_map_count=262144
services:
setup:
image: public.ecr.aws/elastic/elasticsearch:${ELASTIC_TAG}
volumes:
- ./elastic:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/ca/ca.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: public.ecr.aws/elastic/elasticsearch:${ELASTIC_TAG}
container_name: elasticsearch
hostname: elasticsearch
volumes:
- ./elastic:/usr/share/elasticsearch/config/certs
- esdata01:/usr/share/elasticsearch/data
ports:
- 9200:9200
environment:
- node.name=es01
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- discovery.type=single-node
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.license.self_generated.type=${LICENSE}
mem_limit: 4G
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
restart: always
networks:
- emqx_bridge
kibana:
depends_on:
es01:
condition: service_healthy
image: public.ecr.aws/elastic/kibana:${ELASTIC_TAG}
volumes:
- ./elastic:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- 5601:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: 1073741824
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
restart: always
networks:
- emqx_bridge
volumes:
esdata01:
driver: local
kibanadata:
driver: local

View File

@ -13,15 +13,13 @@ x-default-emqx: &default-emqx
services:
haproxy:
container_name: haproxy
image: public.ecr.aws/docker/library/haproxy:2.4
image: haproxy:2.4
depends_on:
- emqx1
- emqx2
volumes:
- ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
- ../../apps/emqx/etc/certs/cert.pem:/usr/local/etc/haproxy/certs/cert.pem
- ../../apps/emqx/etc/certs/key.pem:/usr/local/etc/haproxy/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/usr/local/etc/haproxy/certs/cacert.pem
- ../../apps/emqx/etc/certs:/usr/local/etc/haproxy/certs
ports:
- "18083:18083"
# - "1883:1883"
@ -36,7 +34,7 @@ services:
- -c
- |
set -x
cat /usr/local/etc/haproxy/certs/cert.pem /usr/local/etc/haproxy/certs/key.pem > /var/lib/haproxy/emqx.pem
cat /usr/local/etc/haproxy/certs/cert.pem /usr/local/etc/haproxy/certs/key.pem > /tmp/emqx.pem
haproxy -f /usr/local/etc/haproxy/haproxy.cfg
emqx1:

View File

@ -1,23 +0,0 @@
version: '3.9'
services:
gcp_emulator:
container_name: gcp_emulator
image: gcr.io/google.com/cloudsdktool/google-cloud-cli:435.0.1-emulators
restart: always
expose:
- "8085"
# ports:
# - "8085:8085"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8085"]
interval: 30s
timeout: 5s
retries: 4
command:
- bash
- "-c"
- |
gcloud beta emulators pubsub start --project=emqx-pubsub --host-port=0.0.0.0:8085 --impersonate-service-account test@emqx.iam.gserviceaccount.com

View File

@ -1,22 +0,0 @@
version: '3.9'
services:
greptimedb:
container_name: greptimedb
hostname: greptimedb
image: greptime/greptimedb:v0.7.1
expose:
- "4000"
- "4001"
# uncomment for local testing
# ports:
# - "4000:4000"
# - "4001:4001"
restart: always
networks:
- emqx_bridge
command:
standalone start
--user-provider=static_user_provider:cmd:greptime_user=greptime_pwd
--http-addr="0.0.0.0:4000"
--rpc-addr="0.0.0.0:4001"

View File

@ -1,132 +0,0 @@
version: "3.5"
services:
hserver:
image: hstreamdb/hstream:${HSTREAMDB_TAG}
container_name: hstreamdb
depends_on:
zookeeper:
condition: service_started
hstore:
condition: service_healthy
# ports:
# - "127.0.0.1:6570:6570"
expose:
- 6570
networks:
- emqx_bridge
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /tmp:/tmp
- data_store:/data/store
command:
- bash
- "-c"
- |
set -e
/usr/local/script/wait-for-storage.sh hstore 6440 zookeeper 2181 600 \
/usr/local/bin/hstream-server \
--bind-address 0.0.0.0 --port 6570 \
--internal-port 6571 \
--server-id 100 \
--seed-nodes "$$(hostname -I | awk '{print $$1}'):6571" \
--advertised-address $$(hostname -I | awk '{print $$1}') \
--metastore-uri zk://zookeeper:2181 \
--store-config /data/store/logdevice.conf \
--store-admin-host hstore --store-admin-port 6440 \
--store-log-level warning \
--io-tasks-path /tmp/io/tasks \
--io-tasks-network emqx_bridge
hstore:
image: hstreamdb/hstream:${HSTREAMDB_TAG}
networks:
- emqx_bridge
volumes:
- data_store:/data/store
command:
- bash
- "-c"
- |
set -ex
# N.B. "enable-dscp-reflection=false" is required for linux kernel which
# doesn't support dscp reflection, e.g. centos7.
/usr/local/bin/ld-dev-cluster --root /data/store \
--use-tcp --tcp-host $$(hostname -I | awk '{print $$1}') \
--user-admin-port 6440 \
--param enable-dscp-reflection=false \
--no-interactive \
> /data/store/hstore.log 2>&1
healthcheck:
test: ["CMD", "grep", "LogDevice Cluster running", "/data/store/hstore.log"]
interval: 10s
timeout: 10s
retries: 60
start_period: 60s
zookeeper:
image: zookeeper:${HSTREAMDB_ZK_TAG}
expose:
- 2181
networks:
- emqx_bridge
volumes:
- data_zk_data:/data
- data_zk_datalog:/datalog
## The three container `hstream-exporter`, `prometheus`, `console`
## is for HStreamDB Web Console
## But HStreamDB Console is not supported in v0.15.0
## because of HStreamApi proto changed
# hstream-exporter:
# depends_on:
# hserver:
# condition: service_completed_successfully
# image: hstreamdb/hstream-exporter
# networks:
# - hstream-quickstart
# command:
# - bash
# - "-c"
# - |
# set -ex
# hstream-exporter --addr hstream://hserver:6570
# prometheus:
# image: prom/prometheus
# expose:
# - 9097
# networks:
# - hstream-quickstart
# ports:
# - "9097:9090"
# volumes:
# - $PWD/prometheus:/etc/prometheus
# console:
# image: hstreamdb/hstream-console
# depends_on:
# - hserver
# expose:
# - 5177
# networks:
# - hstream-quickstart
# environment:
# - SERVER_PORT=5177
# - PROMETHEUS_URL=http://prometheus:9097
# - HSTREAM_PUBLIC_ADDRESS=hstream.example.com
# - HSTREAM_PRIVATE_ADDRESS=hserver:6570
# ports:
# - "5177:5177"
# networks:
# hstream-quickstart:
# name: hstream-quickstart
volumes:
data_store:
name: quickstart_data_store
data_zk_data:
name: quickstart_data_zk_data
data_zk_datalog:
name: quickstart_data_zk_datalog

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
influxdb_server_tcp:
container_name: influxdb_tcp
image: public.ecr.aws/docker/library/influxdb:${INFLUXDB_TAG}
image: influxdb:${INFLUXDB_TAG}
expose:
- "8086"
- "8089/udp"

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
influxdb_server_tls:
container_name: influxdb_tls
image: public.ecr.aws/docker/library/influxdb:${INFLUXDB_TAG}
image: influxdb:${INFLUXDB_TAG}
expose:
- "8086"
- "8089/udp"

View File

@ -1,90 +0,0 @@
version: '3.9'
services:
iotdb_1_3_0:
container_name: iotdb130
hostname: iotdb130
image: apache/iotdb:1.3.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb130
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_seed_config_node=iotdb130:10710
- dn_rpc_address=iotdb130
- dn_internal_address=iotdb130
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_seed_config_node=iotdb130:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge
iotdb_1_1_0:
container_name: iotdb110
hostname: iotdb110
image: apache/iotdb:1.1.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb110
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb110:10710
- dn_rpc_address=iotdb110
- dn_internal_address=iotdb110
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb110:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge
iotdb_0_13:
container_name: iotdb013
hostname: iotdb013
image: apache/iotdb:0.13.4-node
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb013
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb013:10710
- dn_rpc_address=iotdb013
- dn_internal_address=iotdb013
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb013:10710
volumes:
- ./iotdb013/iotdb-rest.properties:/iotdb/conf/iotdb-rest.properties
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge

View File

@ -2,7 +2,7 @@ version: '3.9'
services:
zookeeper:
image: public.ecr.aws/docker/library/zookeeper:3.6
image: docker.io/library/zookeeper:3.6
ports:
- "2181:2181"
container_name: zookeeper
@ -18,7 +18,7 @@ services:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
kdc:
hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu22.04
image: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04
container_name: kdc.emqx.net
expose:
- 88 # kdc

View File

@ -1,12 +0,0 @@
version: '3.9'
services:
kinesis:
container_name: kinesis
image: public.ecr.aws/localstack/localstack:2.1
environment:
- KINESIS_ERROR_PROBABILITY=0.0
- KINESIS_LATENCY=0
restart: always
networks:
- emqx_bridge

View File

@ -0,0 +1,16 @@
version: '3.9'
services:
ldap_server:
container_name: ldap
build:
context: ../..
dockerfile: .ci/docker-compose-file/openldap/Dockerfile
args:
LDAP_TAG: ${LDAP_TAG}
image: openldap
ports:
- 389:389
restart: always
networks:
- emqx_bridge

View File

@ -1,18 +0,0 @@
version: '3.9'
services:
ldap_server:
container_name: ldap
build:
context: ../..
dockerfile: .ci/docker-compose-file/openldap/Dockerfile
ulimits:
nofile: 1024
image: openldap
#ports:
# - "389:389"
volumes:
- ./certs/ca.crt:/etc/certs/ca.crt
restart: always
networks:
- emqx_bridge

View File

@ -1,21 +0,0 @@
version: '3.7'
services:
minio:
hostname: minio
image: quay.io/minio/minio:${MINIO_TAG}
command: server --address ":9000" --console-address ":9001" /minio-data
expose:
- "9000"
- "9001"
ports:
- "9000:9000"
- "9001:9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 5s
retries: 3
networks:
emqx_bridge:

View File

@ -1,23 +0,0 @@
version: '3.7'
services:
minio_tls:
hostname: minio-tls
image: quay.io/minio/minio:${MINIO_TAG}
command: server --certs-dir /etc/certs --address ":9100" --console-address ":9101" /minio-data
volumes:
- ./certs/server.crt:/etc/certs/public.crt
- ./certs/server.key:/etc/certs/private.key
expose:
- "9100"
- "9101"
ports:
- "9100:9100"
- "9101:9101"
healthcheck:
test: ["CMD", "curl", "-k", "-f", "https://localhost:9100/minio/health/live"]
interval: 30s
timeout: 5s
retries: 3
networks:
emqx_bridge:

View File

@ -4,7 +4,7 @@ services:
mongo1:
hostname: mongo1
container_name: mongo1
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -22,7 +22,7 @@ services:
mongo2:
hostname: mongo2
container_name: mongo2
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -40,7 +40,7 @@ services:
mongo3:
hostname: mongo3
container_name: mongo3
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -56,7 +56,7 @@ services:
--replSet rs0
mongo_rs_client:
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
container_name: mongo_rs_client
networks:
- emqx_bridge

View File

@ -4,7 +4,7 @@ services:
mongo1:
hostname: mongo1
container_name: mongo1
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -27,7 +27,7 @@ services:
mongo2:
hostname: mongo2
container_name: mongo2
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -50,7 +50,7 @@ services:
mongo3:
hostname: mongo3
container_name: mongo3
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -71,7 +71,7 @@ services:
mongod --ipv6 --bind_ip_all --tlsMode requireTLS --tlsCertificateKeyFile /etc/certs/mongodb.pem --replSet rs0
mongo_client:
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
container_name: mongo_client
networks:
- emqx_bridge

View File

@ -4,7 +4,7 @@ services:
mongosharded1:
hostname: mongosharded1
container_name: mongosharded1
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -24,7 +24,7 @@ services:
mongosharded2:
hostname: mongosharded2
container_name: mongosharded2
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -44,7 +44,7 @@ services:
mongosharded3:
hostname: mongosharded3
container_name: mongosharded3
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
@ -62,7 +62,7 @@ services:
--bind_ip_all
mongosharded_client:
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
container_name: mongosharded_client
networks:
- emqx_bridge

View File

@ -3,15 +3,12 @@ version: '3.9'
services:
mongo_server:
container_name: mongo
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
restart: always
networks:
- emqx_bridge
ports:
- "27017:27017"
env_file:
- .env
- credentials.env
command:
--ipv6
--bind_ip_all

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
mongo_server_tls:
container_name: mongo-tls
image: public.ecr.aws/docker/library/mongo:${MONGO_TAG}
image: mongo:${MONGO_TAG}
restart: always
environment:
MONGO_INITDB_DATABASE: mqtt

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
mysql_server:
container_name: mysql
image: public.ecr.aws/docker/library/mysql:${MYSQL_TAG}
image: mysql:${MYSQL_TAG}
restart: always
ports:
- "3306:3306"

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
mysql_server_tls:
container_name: mysql-tls
image: public.ecr.aws/docker/library/mysql:${MYSQL_TAG}
image: mysql:${MYSQL_TAG}
restart: always
environment:
MYSQL_ROOT_PASSWORD: public

View File

@ -1,9 +0,0 @@
version: '3.9'
services:
opents_server:
container_name: opents
image: petergrace/opentsdb-docker:${OPENTS_TAG}
restart: always
networks:
- emqx_bridge

View File

@ -1,11 +0,0 @@
version: '3.9'
services:
oracle_server:
container_name: oracle
image: oracleinanutshell/oracle-xe-11g:1.0.0
restart: always
environment:
ORACLE_DISABLE_ASYNCH_IO: true
networks:
- emqx_bridge

View File

@ -1,69 +0,0 @@
version: '3.9'
services:
jaeger-all-in-one:
image: jaegertracing/all-in-one:1.51.0
container_name: jaeger.emqx.net
hostname: jaeger.emqx.net
networks:
- emqx_bridge
restart: always
# ports:
# - "16686:16686"
user: "${DOCKER_USER:-root}"
# Collector
otel-collector:
image: otel/opentelemetry-collector:0.90.0
container_name: otel-collector.emqx.net
hostname: otel-collector.emqx.net
networks:
- emqx_bridge
restart: always
command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"]
volumes:
- ./otel:/etc/
# ports:
# - "1888:1888" # pprof extension
# - "8888:8888" # Prometheus metrics exposed by the collector
# - "8889:8889" # Prometheus exporter metrics
# - "13133:13133" # health_check extension
# - "4317:4317" # OTLP gRPC receiver
# - "4318:4318" # OTLP http receiver
# - "55679:55679" # zpages extension
depends_on:
- jaeger-all-in-one
user: "${DOCKER_USER:-root}"
# Collector
otel-collector-tls:
image: otel/opentelemetry-collector:0.90.0
container_name: otel-collector-tls.emqx.net
hostname: otel-collector-tls.emqx.net
networks:
- emqx_bridge
restart: always
command: ["--config=/etc/otel-collector-config-tls.yaml", "${OTELCOL_ARGS}"]
volumes:
- ./otel:/etc/
- ./certs:/etc/certs
# ports:
# - "14317:4317" # OTLP gRPC receiver
depends_on:
- jaeger-all-in-one
user: "${DOCKER_USER:-root}"
#networks:
# emqx_bridge:
# driver: bridge
# name: emqx_bridge
# enable_ipv6: true
# ipam:
# driver: default
# config:
# - subnet: 172.100.239.0/24
# gateway: 172.100.239.1
# - subnet: 2001:3200:3200::/64
# gateway: 2001:3200:3200::1
#

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
pgsql_server:
container_name: pgsql
image: public.ecr.aws/docker/library/postgres:${PGSQL_TAG}
image: postgres:${PGSQL_TAG}
restart: always
environment:
POSTGRES_PASSWORD: public

View File

@ -8,7 +8,7 @@ services:
dockerfile: ./pgsql/Dockerfile
args:
POSTGRES_USER: postgres
BUILD_FROM: public.ecr.aws/docker/library/postgres:${PGSQL_TAG}
BUILD_FROM: postgres:${PGSQL_TAG}
image: emqx_pgsql:${PGSQL_TAG}
restart: always
environment:

View File

@ -1,32 +0,0 @@
version: '3'
services:
pulsar:
container_name: pulsar
image: apachepulsar/pulsar:2.11.0
# ports:
# - 6650:6650
# - 8080:8080
networks:
emqx_bridge:
volumes:
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
restart: always
command:
- bash
- "-c"
- |
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
bin/pulsar standalone -nfw -nss

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
python:
container_name: python
image: public.ecr.aws/docker/library/python:3.9.16-alpine3.18
image: python:3.7.2-alpine3.9
depends_on:
- emqx1
- emqx2
@ -12,3 +12,4 @@ services:
emqx_bridge:
volumes:
- ./python:/scripts

View File

@ -1,24 +0,0 @@
version: '3.9'
services:
rabbitmq:
container_name: rabbitmq
image: public.ecr.aws/docker/library/rabbitmq:3.11-management
restart: always
expose:
- "15672"
- "5672"
- "5671"
# We don't want to take ports from the host
#ports:
# - "15672:15672"
# - "5672:5672"
# - "5671:5671"
volumes:
- ./certs/ca.crt:/opt/certs/ca.crt
- ./certs/server.crt:/opt/certs/server.crt
- ./certs/server.key:/opt/certs/server.key
- ./rabbitmq/20-tls.conf:/etc/rabbitmq/conf.d/20-tls.conf
networks:
- emqx_bridge

View File

@ -3,7 +3,7 @@ services:
redis-cluster-1: &redis-node
container_name: redis-cluster-1
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/cluster-tcp:/usr/local/etc/redis
command: redis-server /usr/local/etc/redis/redis.conf

View File

@ -3,7 +3,7 @@ services:
redis-cluster-tls-1: &redis-node
container_name: redis-cluster-tls-1
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/cluster-tls:/usr/local/etc/redis
- ../../apps/emqx/etc/certs:/etc/certs

View File

@ -4,7 +4,7 @@ services:
redis-sentinel-master:
container_name: redis-sentinel-master
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tcp:/usr/local/etc/redis
command: redis-server /usr/local/etc/redis/master.conf
@ -13,7 +13,7 @@ services:
redis-sentinel-slave:
container_name: redis-sentinel-slave
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tcp:/usr/local/etc/redis
command: redis-server /usr/local/etc/redis/slave.conf
@ -24,7 +24,7 @@ services:
redis-sentinel:
container_name: redis-sentinel
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tcp/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
depends_on:

View File

@ -4,7 +4,7 @@ services:
redis-sentinel-tls-master:
container_name: redis-sentinel-tls-master
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tls:/usr/local/etc/redis
- ../../apps/emqx/etc/certs:/etc/certs
@ -14,7 +14,7 @@ services:
redis-sentinel-tls-slave:
container_name: redis-sentinel-tls-slave
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tls:/usr/local/etc/redis
- ../../apps/emqx/etc/certs:/etc/certs
@ -26,7 +26,7 @@ services:
redis-sentinel-tls:
container_name: redis-sentinel-tls
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./redis/sentinel-tls/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
- ../../apps/emqx/etc/certs:/etc/certs

View File

@ -3,12 +3,13 @@ version: '3.9'
services:
redis_server:
container_name: redis
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
volumes:
- ./redis/single-tcp:/usr/local/etc/redis/
image: redis:${REDIS_TAG}
ports:
- "6379:6379"
command: redis-server /usr/local/etc/redis/redis.conf
command:
- redis-server
- "--bind 0.0.0.0 ::"
- --requirepass public
restart: always
networks:
- emqx_bridge

View File

@ -3,15 +3,23 @@ version: '3.9'
services:
redis_server_tls:
container_name: redis-tls
image: public.ecr.aws/docker/library/redis:${REDIS_TAG}
image: redis:${REDIS_TAG}
volumes:
- ./certs/server.crt:/etc/certs/redis.crt
- ./certs/server.key:/etc/certs/redis.key
- ./certs/ca.crt:/etc/certs/ca.crt
- ./redis/single-tls:/usr/local/etc/redis
ports:
- "6380:6380"
command: redis-server /usr/local/etc/redis/redis.conf
command:
- redis-server
- "--bind 0.0.0.0 ::"
- --requirepass public
- --tls-port 6380
- --tls-cert-file /etc/certs/redis.crt
- --tls-key-file /etc/certs/redis.key
- --tls-ca-cert-file /etc/certs/ca.crt
- --tls-protocols "TLSv1.3"
- --tls-ciphersuites "TLS_CHACHA20_POLY1305_SHA256"
restart: always
networks:
emqx_bridge:

View File

@ -1,41 +0,0 @@
version: '3.9'
services:
mqnamesrvssl:
image: apache/rocketmq:4.9.4
container_name: rocketmq_namesrv_ssl
# ports:
# - 9876:9876
volumes:
- ./rocketmq/logs_ssl:/opt/logs
- ./rocketmq/store_ssl:/opt/store
environment:
JAVA_OPT: "-Dtls.server.mode=enforcing"
command: ./mqnamesrv
networks:
- emqx_bridge
mqbrokerssl:
image: apache/rocketmq:4.9.4
container_name: rocketmq_broker_ssl
# ports:
# - 10909:10909
# - 10911:10911
volumes:
- ./rocketmq/logs_ssl:/opt/logs
- ./rocketmq/store_ssl:/opt/store
- ./rocketmq/conf_ssl/broker.conf:/etc/rocketmq/broker.conf
- ./rocketmq/conf_ssl/plain_acl.yml:/home/rocketmq/rocketmq-4.9.4/conf/plain_acl.yml
environment:
NAMESRV_ADDR: "rocketmq_namesrv_ssl:9876"
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m -Dtls.server.mode=enforcing"
command: ./mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- mqnamesrvssl
networks:
- emqx_bridge
networks:
emqx_bridge:
driver: bridge

View File

@ -1,35 +0,0 @@
version: '3.9'
services:
mqnamesrv:
image: apache/rocketmq:4.9.4
container_name: rocketmq_namesrv
# ports:
# - 9876:9876
volumes:
- ./rocketmq/logs:/opt/logs
- ./rocketmq/store:/opt/store
command: ./mqnamesrv
networks:
- emqx_bridge
mqbroker:
image: apache/rocketmq:4.9.4
container_name: rocketmq_broker
# ports:
# - 10909:10909
# - 10911:10911
volumes:
- ./rocketmq/logs:/opt/logs
- ./rocketmq/store:/opt/store
- ./rocketmq/conf/broker.conf:/etc/rocketmq/broker.conf
- ./rocketmq/conf/plain_acl.yml:/home/rocketmq/rocketmq-4.9.4/conf/plain_acl.yml
environment:
NAMESRV_ADDR: "rocketmq_namesrv:9876"
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m"
command: ./mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- mqnamesrv
networks:
- emqx_bridge

View File

@ -1,19 +0,0 @@
version: '3.9'
services:
sql_server:
container_name: sqlserver
# See also:
# https://mcr.microsoft.com/en-us/product/mssql/server/about
# https://hub.docker.com/_/microsoft-mssql-server
image: ${MS_IMAGE_ADDR}:${SQLSERVER_TAG}
environment:
# See also:
# https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-environment-variables
ACCEPT_EULA: "Y"
MSSQL_SA_PASSWORD: "mqtt_public1"
restart: always
# ports:
# - "1433:1433"
networks:
- emqx_bridge

View File

@ -13,51 +13,15 @@ services:
volumes:
- "./toxiproxy.json:/config/toxiproxy.json"
ports:
# Toxiproxy management API
- 8474:8474
# InfluxDB
- 8086:8086
# InfluxDB TLS
- 8087:8087
# SQL Server
- 11433:1433
# MySQL
- 13306:3306
# MySQL TLS
- 13307:3307
# PostgreSQL
- 15432:5432
# PostgreSQL TLS
- 15433:5433
# TDEngine
- 16041:6041
# DynamoDB
- 18000:8000
# RocketMQ
- 19876:9876
# Cassandra
- 19042:9042
# Cassandra TLS
- 19142:9142
# Cassandra No Auth
- 19043:9043
# Cassandra TLS No Auth
- 19143:9143
# S3
- 19000:19000
# S3 TLS
- 19100:19100
# IOTDB (3 total)
- 14242:4242
- 28080:18080
- 38080:38080
# HStreamDB
- 15670:5670
# Kinesis
- 4566:4566
# GreptimeDB
- 4000:4000
- 4001:4001
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -3,18 +3,17 @@ version: '3.9'
services:
erlang:
container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu22.04}
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04}
env_file:
- credentials.env
- conf.env
environment:
GITHUB_ACTIONS: ${GITHUB_ACTIONS:-}
GITHUB_TOKEN: ${GITHUB_TOKEN:-}
GITHUB_RUN_ID: ${GITHUB_RUN_ID:-}
GITHUB_SHA: ${GITHUB_SHA:-}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER:-}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME:-}
GITHUB_REF: ${GITHUB_REF:-}
GITHUB_ACTIONS: ${GITHUB_ACTIONS}
GITHUB_TOKEN: ${GITHUB_TOKEN}
GITHUB_RUN_ID: ${GITHUB_RUN_ID}
GITHUB_SHA: ${GITHUB_SHA}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}
GITHUB_REF: ${GITHUB_REF}
networks:
- emqx_bridge
ports:
@ -25,7 +24,6 @@ services:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
# - ./odbc/odbcinst.ini:/etc/odbcinst.ini
working_dir: /emqx
tty: true
user: "${DOCKER_USER:-root}"

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIVAIrN275DCtGnotTPpxwvQ5751N4OMA0GCSqGSIb3DQEB
CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu
ZXJhdGVkIENBMB4XDTI0MDExNjAyMzIyMFoXDTI3MDExNTAyMzIyMFowNDEyMDAG
A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCy0nwiEurUkIPFMLV1weVM
pPk/AlwZUzqjkeL44gsY53XI9Q05w/sL9u6PzwrXgTCFWNXzI9+MoAtp8phPkn14
cmg5/3sLe9YcFVFjYK/MoljlUbPDj+4dgk8l+w5FRSi0+JN5krUm7rYk9lojAkeS
fX8RU7ekKGbjBXIFtPxX5GNadu9RidR5GkHM3XroAIoris8bFOzMgFn9iybYnkhq
0S+Hpv0A8FVxzle0KNbPpsIkxXH2DnP2iPTDym9xJNl9Iv9MPtj9XaamH7TmXcSt
MbjkAudKsCw4bRuhHonM16DIUr8sX5UcRcAWyJ1x1qpZaOzMdh2VdYAHNuOsZwzJ
AgMBAAGjUzBRMB0GA1UdDgQWBBTAyDlp8NZfPe8NCGVlHJSVclGOhTAfBgNVHSME
GDAWgBTAyDlp8NZfPe8NCGVlHJSVclGOhTAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAeIUXRKmC53iirY4P49YspLafspAMf4ndMFQAp+Oc223Vs
hQC4axNoYnUdzWDH6LioAN7P826xNPqtXvTZF9fmeX7K8Nm9Kdj+for+QQI3j6+X
zq98VVkACb8b/Mc9Nac/WBbv/1IKyKgNNta7//WNPgAFolOfti/C0NLsPcKhrM9L
mGbvRX8ZjH8pVJ0YTy4/xfDcF7G/Lxl4Yvb0ZXpuQbvE1+Y0h5aoTNshT/skJxC4
iyVseYr21s3pptKcr6H9KZuSdZe5pbEo+81nT15w+50aswFLk9GCYh5UsQ+1jkRK
cKgxP93i6x8BVbQJGKi1A1jhauSKX2IpWZQsHy4p
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAstJ8IhLq1JCDxTC1dcHlTKT5PwJcGVM6o5Hi+OILGOd1yPUN
OcP7C/buj88K14EwhVjV8yPfjKALafKYT5J9eHJoOf97C3vWHBVRY2CvzKJY5VGz
w4/uHYJPJfsORUUotPiTeZK1Ju62JPZaIwJHkn1/EVO3pChm4wVyBbT8V+RjWnbv
UYnUeRpBzN166ACKK4rPGxTszIBZ/Ysm2J5IatEvh6b9APBVcc5XtCjWz6bCJMVx
9g5z9oj0w8pvcSTZfSL/TD7Y/V2mph+05l3ErTG45ALnSrAsOG0boR6JzNegyFK/
LF+VHEXAFsidcdaqWWjszHYdlXWABzbjrGcMyQIDAQABAoIBAAZOLXYanmjpIRpX
h7h7oikYEplWDRcQBBvvKZaOyuchhznTKTiZmF0xQ3Ny8J4Ndj9ndODWSZxI6uod
FaGNp+qytwnfgDBVGSVDm6tyRfSkX1fTsA/j3/iupvmO/w9yezdZYgLaCVTyex31
yVMdchZgYjYDUpEBYzJbV2xL18+GBRmmPjdXumlpcJqcclxjOQJSu/1WCGVfn/e/
64NQpAm7NSKLqeUl32g0/DvUpmYRfmf7ZjVUjePaJQU6sw5/N+3V9F1hYs8VSWz0
OMzYIfUcvixw+VWx5bu0nWt98FirhsQPjCTThD+DHP6koXGrdXpeMOQE1YZmoV5T
vP0X+FECgYEA5dsKVDQFL67muqz3CNRVM0xDWACCoa8789hYoxvhd1iO3e4kwXBa
ABPcZckioq+HiQ4UIxC2AhQ1FuTeIUTq7LZ0HtAAdKFi48U4LzmPhNUpG1E/HbJ3
GQbi4u1cAzGYuhdywktgBhn9bJ4XB7+X3815Y9qKkuRcwtXgKGDy8HkCgYEAxyly
vc7NBkLfIAmkOsm6VXfvfBTEUBUGi6+k1rarTUxWFIgRuk4FHywwWUTdxWBKJz3n
HNNJb/g7CcufdhLTuWVHQtJDxYf2cJjoi+Kf7/i/Qs9Nyhokj5Mnh6KlZQOWXpZd
Gwn/O13NeDxt1TIVO2xp6zY4FhVEPvaHuxsMCtECgYA7/eR/P6iO3nZoCJbdXhXy
spftEw0FSCg8p53SzIcXUCzRrcM4HavP0181zb5VebzFP8Bvun/WoRGOLSPwyP0L
1T8Pf7huuGSIEERuxvY3dC8raxQvGxJMnOiA0/Ss/Lfg8hfIsEWashPb0pMuOYpZ
JlblgfejCSlQzOOZhlxB+QKBgQCKmizRLV9/0QAJAsy5YPR9UJdpCebJOKiyg806
5Ct5AvwRE9UKjAuCczU+mu+f0fApOSpi5CQCeYVUvtG90UJpjrM2LLCfgoyeNbv4
xgG6dqlcbHrdgK4bATUMbsOd9g4qy4gGLkHi5df9qkhhi5Y9Iajg2X3U2H4DN3yk
WSFbUQKBgQCLz333qWOuT3OBv+EYxHDQUS4YG+dReUos+v0iPJzu+spnfibBF5IC
RjHIhPsdN1byNB0naXOkkz4tUlLGXv6umFgDtQvy/2rxvxQmUGp/WY1VM2+164Xe
NEWdMEU6UckCoMO77kw8JosKhmXCYaSW5bWwnXuEpOj9WWpwjKtxlA==
-----END RSA PRIVATE KEY-----

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDQDCCAiigAwIBAgIUe90yOBN1KBxOEr2jro3epamZksIwDQYJKoZIhvcNAQEL
BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l
cmF0ZWQgQ0EwHhcNMjQwMTE2MDIzMjIyWhcNMjcwMTE1MDIzMjIyWjAPMQ0wCwYD
VQQDEwRlczAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxGEL71pV
j8qoUxEuL7qjRSeS1eHxeKhu2jqEZb7iA1o/7b/26QuYAkoYL+WuJNfYjg5F/O8W
VVuAYIlN6a/mC6wT2t3pX4YSrdp+i3gtAC/LX+8mAeqMQPD+4jitOwjOsYzbuFCb
nYl86dnFPl/+Pmj20mtZ+Wt7oIPD88j6+r5qgv59pHICxS7Cq304LDTRQbNoT8HO
4c9VGGGtWIdtrqiYrz1OVefkffMrvFt77v6dKHn8g5tSyfQUDCoEKtTOc3Pe5zCB
vIMs6HaapoSkl8XdpFHQ712PCZRebAMCrVcPYQ3r8e9GYmLY/NhxEn3dWTqRhHeg
UD13O8o1aBWonwIDAQABo28wbTAdBgNVHQ4EFgQUXvGJtSf2/mLOK17AzUridtCV
xWwwHwYDVR0jBBgwFoAUwMg5afDWXz3vDQhlZRyUlXJRjoUwIAYDVR0RBBkwF4IJ
bG9jYWxob3N0hwR/AAABggRlczAxMAkGA1UdEwQCMAAwDQYJKoZIhvcNAQELBQAD
ggEBACaNq3ZqrbsGvbEtrf6kJGIsTokTFHeVJUSYmt1ZZzDFLSepXAC/J8gphV45
B+YSlkDPNTwMYlf7TUYY872zkdqOXN9r0NUx8MzVAX0+rux0RJba5GGUvJGZDNMX
WM5z9ry1KjQSQ1bSoRQOD3QArmBmhvikHjLc97Vqt56N0wA/ztXWOpNZX/TXmast
aXlUbcfQE73Cdq9tW1ATXwbQ2Gf7vVAUT3zjZSZbNdgPuBicGJHf85Fhjm2ND4+R
sjLIOQ2YgVxNHYbueScc6lJM5RNK194K7WrEQnRyGHT3NaDUm0FFNl//aQeq1ZVw
6gaUYlkTFauXwEYMDK901cWFaBE=
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAxGEL71pVj8qoUxEuL7qjRSeS1eHxeKhu2jqEZb7iA1o/7b/2
6QuYAkoYL+WuJNfYjg5F/O8WVVuAYIlN6a/mC6wT2t3pX4YSrdp+i3gtAC/LX+8m
AeqMQPD+4jitOwjOsYzbuFCbnYl86dnFPl/+Pmj20mtZ+Wt7oIPD88j6+r5qgv59
pHICxS7Cq304LDTRQbNoT8HO4c9VGGGtWIdtrqiYrz1OVefkffMrvFt77v6dKHn8
g5tSyfQUDCoEKtTOc3Pe5zCBvIMs6HaapoSkl8XdpFHQ712PCZRebAMCrVcPYQ3r
8e9GYmLY/NhxEn3dWTqRhHegUD13O8o1aBWonwIDAQABAoIBADJ3A/Om4az5dcce
96EBU9q+IDBBh2Wr1wzSk9p3sqoM47fLqH5b4dzYwJ1yZw2FwFtFFLw6jqExyexE
7JY8gyAFwPZyJ3pKQHuX1gQuRlYxchB9quU8Kn230LA+w1mT2lXrLj2PzWWvAsAv
m837KiFMpP0O5EjB07u8kLsRr1mG6QQ24Kc8oxd7xLXIiPzSvsOpYwo9hmIWENd5
kyA7oSa9EmN3TRTkKOHI7cFQ3DqIGdO71waUofKOdx39DyHS2YKWxDE/LUjkS9zw
1AyZG09l4uowyLRqwYhivEq9Za6rdc64yheuHatAM9kC2AOcVcsCPZquIe90k4t1
L7e9CAECgYEA1W483xTW8ngzxv9MMuPiW+PwVGRpyQrbO6OZOxdWEYfhrZlk5wlW
XK2T85jqooJwMWPTk1F49vZ9WN2KuLkL65GlkEtkFbxmOiFJjXuWwycbFSk05hPs
4AESBYHieaSPcwYhvLeG6g4PFyeqmbAGnKsJaj2ylPwDBOc7LgVlqAECgYEA64wo
gZwaj5SlP8M/OqGH04UVYr1kP/Eq6eiDfMyV5exy+pyzofZyNKUfJfw6sGgyRRHx
OVxlnPMsZ8zbdOXsvUEIeavpwDfQcp5eAURL65I6GMLsx2QpfiN2mDe1MqQW0jct
UleFaURgS84KHLE0+tBBg906jOHGjsE7Q3lyUJ8CgYBYYPev4K9JZGD8bEcfY6Ie
Lvsb1yC+8VHrFkmjYHxxcfUPr89KpGEwq2fynUW72YufyBiajkgq69Ln84U4DNhU
ydDnOXDOV191fsc4YQ8C7LSYRKH1DBcwgwD1at1fRbdpCAb8YHrrfLre+bv5PBzg
zyps5fOHIfwWEbI90lpQAQKBgQDoMMqBMTtxi+r1lucOScrVtFuncOCQs5BE8cIj
1JxzAQk6iBv/LSvZP2gcDq5f1Oaw9YXfsHguJfwA+ozeiAQ9bw0Gu3N52sstIXWz
M/rO5d9FJ2k3CEJqqFSwqkGBAQXKBUA06jeF1DREpX+MVxbNo1rhvMOJusn7UPm1
gtMwKwKBgQCfRzFO10ITwrw8rcRZwO9Axgqf11V7xn6qpgRxj4h0HOErVTCN1H0b
vE3Pz7cxS/g9vFRP37TuqBLfGVzPt9LAEFwCWPeZJLROBLHyu8XrhTbQx+sI2/pe
SBEJAQAHtYasFTE0sBEKNEY2rIt1c29XZhyhhtNKD9gRN/gB355wLg==
-----END RSA PRIVATE KEY-----

View File

@ -1,7 +0,0 @@
instances:
- name: es01
dns:
- es01
- localhost
ip:
- 127.0.0.1

View File

@ -83,13 +83,13 @@ backend emqx_ws_back
frontend emqx_ssl
mode tcp
option tcplog
bind *:8883 ssl crt /var/lib/haproxy/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
bind *:8883 ssl crt /tmp/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
default_backend emqx_ssl_back
frontend emqx_wss
mode tcp
option tcplog
bind *:8084 ssl crt /var/lib/haproxy/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
bind *:8084 ssl crt /tmp/emqx.pem ca-file /usr/local/etc/haproxy/certs/cacert.pem verify required no-sslv3
default_backend emqx_wss_back
backend emqx_ssl_back

View File

@ -1,58 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
####################
### REST Service Configuration
####################
# Is the REST service enabled
enable_rest_service=true
# the binding port of the REST service
# rest_service_port=18080
# the default row limit to a REST query response when the rowSize parameter is not given in request
# rest_query_default_row_size_limit=10000
# the expiration time of the user login information cache (in seconds)
# cache_expire_in_seconds=28800
# maximum number of users can be stored in the user login cache.
# cache_max_num=100
# init capacity of users can be stored in the user login cache.
# cache_init_num=10
# is SSL enabled
# enable_https=false
# SSL key store path
# key_store_path=
# SSL key store password
# key_store_pwd=
# SSL trust store path
# trust_store_path=
# SSL trust store password.
# trust_store_pwd=
# SSL timeout (in seconds)
# idle_timeout_in_seconds=50000

View File

@ -49,9 +49,6 @@ echo "+++++++ Creating Kafka Topics ++++++++"
# there seem to be a race condition when creating the topics (too early)
env KAFKA_CREATE_TOPICS="$KAFKA_CREATE_TOPICS_NG" KAFKA_PORT="$PORT1" create-topics.sh
# create a topic with max.message.bytes=100
/opt/kafka/bin/kafka-topics.sh --create --bootstrap-server "${SERVER}:${PORT1}" --topic max-100-bytes --partitions 1 --replication-factor 1 --config max.message.bytes=100
echo "+++++++ Wait until Kafka ports are down ++++++++"
bash -c 'while printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1

View File

@ -1,9 +0,0 @@
[ms-sql]
Description=Microsoft ODBC Driver 17 for SQL Server
Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1
UsageCount=1
[ODBC Driver 17 for SQL Server]
Description=Microsoft ODBC Driver 17 for SQL Server
Driver=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.10.so.2.1
UsageCount=1

View File

@ -1,11 +1,18 @@
FROM docker.io/zmstone/openldap:2.5.16@sha256:a813922115a1d1f1b974399595921d1778fae22b3f1ee15dcfa8cfa89700dbc7
FROM buildpack-deps:stretch
ARG LDAP_TAG=2.4.50
RUN apt-get update && apt-get install -y groff groff-base
RUN wget ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-${LDAP_TAG}.tgz \
&& gunzip -c openldap-${LDAP_TAG}.tgz | tar xvfB - \
&& cd openldap-${LDAP_TAG} \
&& ./configure && make depend && make && make install \
&& cd .. && rm -rf openldap-${LDAP_TAG}
COPY .ci/docker-compose-file/openldap/slapd.conf /usr/local/etc/openldap/slapd.conf
COPY apps/emqx_ldap/test/data/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
COPY apps/emqx_ldap/test/data/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
COPY .ci/docker-compose-file/certs/ca.crt /usr/local/etc/openldap/cacert.pem
COPY .ci/docker-compose-file/certs/server.crt /usr/local/etc/openldap/cert.pem
COPY .ci/docker-compose-file/certs/server.key /usr/local/etc/openldap/key.pem
COPY apps/emqx_authn/test/data/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
COPY apps/emqx_authn/test/data/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
COPY apps/emqx_authn/test/data/certs/*.pem /usr/local/etc/openldap/
RUN mkdir -p /usr/local/etc/openldap/data \
&& slapadd -l /usr/local/etc/openldap/schema/emqx.io.ldif -f /usr/local/etc/openldap/slapd.conf

View File

@ -1,61 +0,0 @@
# LDAP authentication
To run manual tests with the default docker-compose files.
Expose openldap container port by uncommenting the `ports` config in `docker-compose-ldap.yaml `
To start openldap:
```
docker-compose -f ./.ci/docker-compose-file/docker-compose.yaml -f ./.ci/docker-compose-file/docker-compose-ldap.yaml up -docker
```
## LDAP database
LDAP database is populated from below files:
```
apps/emqx_ldap/test/data/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
apps/emqx_ldap/test/data/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
```
## Minimal EMQX config
```
authentication = [
{
backend = ldap
base_dn = "uid=${username},ou=testdevice,dc=emqx,dc=io"
filter = "(& (objectClass=mqttUser) (uid=${username}))"
mechanism = password_based
method {
is_superuser_attribute = isSuperuser
password_attribute = userPassword
type = hash
}
password = public
pool_size = 8
query_timeout = "5s"
request_timeout = "10s"
server = "localhost:1389"
username = "cn=root,dc=emqx,dc=io"
}
]
```
## Example ldapsearch command
```
ldapsearch -x -H ldap://localhost:389 -D "cn=root,dc=emqx,dc=io" -W -b "uid=mqttuser0007,ou=testdevice,dc=emqx,dc=io" "(&(objectClass=mqttUser)(uid=mqttuser0007))"
```
## Example mqttx command
The client password hashes are generated from their username.
```
# disabled user
mqttx pub -t 't/1' -h localhost -p 1883 -m x -u mqttuser0006 -P mqttuser0006
# enabled super-user
mqttx pub -t 't/1' -h localhost -p 1883 -m x -u mqttuser0007 -P mqttuser0007
```

View File

@ -1,13 +1,14 @@
include /usr/local/etc/openldap/schema/core.schema
include /usr/local/etc/openldap/schema/cosine.schema
include /usr/local/etc/openldap/schema/inetorgperson.schema
include /usr/local/etc/openldap/schema/ppolicy.schema
include /usr/local/etc/openldap/schema/emqx.schema
TLSCACertificateFile /usr/local/etc/openldap/cacert.pem
TLSCertificateFile /usr/local/etc/openldap/cert.pem
TLSCertificateKeyFile /usr/local/etc/openldap/key.pem
database mdb
database bdb
suffix "dc=emqx,dc=io"
rootdn "cn=root,dc=emqx,dc=io"
rootpw {SSHA}eoF7NhNrejVYYyGHqnt+MdKNBh4r1w3W

View File

@ -1,6 +0,0 @@
certs
hostname
hosts
otel-collector.json
otel-collector-tls.json
resolv.conf

View File

@ -1,52 +0,0 @@
receivers:
otlp:
protocols:
grpc:
tls:
ca_file: /etc/certs/ca.crt
cert_file: /etc/certs/server.crt
key_file: /etc/certs/server.key
http:
tls:
ca_file: /etc/certs/ca.crt
cert_file: /etc/certs/server.crt
key_file: /etc/certs/server.key
exporters:
logging:
verbosity: detailed
otlp:
endpoint: jaeger.emqx.net:4317
tls:
insecure: true
debug:
verbosity: detailed
file:
path: /etc/otel-collector-tls.json
processors:
batch:
# send data immediately
timeout: 0
extensions:
health_check:
zpages:
endpoint: :55679
service:
extensions: [zpages, health_check]
pipelines:
traces:
receivers: [otlp]
processors: [batch]
exporters: [logging, otlp]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [logging]
logs:
receivers: [otlp]
processors: [batch]
exporters: [logging, file]

View File

@ -1,51 +0,0 @@
receivers:
otlp:
protocols:
grpc:
tls:
# ca_file: /etc/ca.pem
# cert_file: /etc/server.pem
# key_file: /etc/server.key
http:
tls:
# ca_file: /etc/ca.pem
# cert_file: /etc/server.pem
# key_file: /etc/server.key
exporters:
logging:
verbosity: detailed
otlp:
endpoint: jaeger.emqx.net:4317
tls:
insecure: true
debug:
verbosity: detailed
file:
path: /etc/otel-collector.json
processors:
batch:
# send data immediately
timeout: 0
extensions:
health_check:
zpages:
endpoint: :55679
service:
extensions: [zpages, health_check]
pipelines:
traces:
receivers: [otlp]
processors: [batch]
exporters: [logging, otlp]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [logging]
logs:
receivers: [otlp]
processors: [batch]
exporters: [logging, file]

View File

@ -1,4 +1,4 @@
ARG BUILD_FROM=public.ecr.aws/docker/library/postgres:13@sha256:fa69de30d02652cfdfb68166692e5186f6972c17f83c89c71ac8ff0916d46ae3
ARG BUILD_FROM=postgres:13
FROM ${BUILD_FROM}
ARG POSTGRES_USER=postgres
COPY --chown=$POSTGRES_USER ./pgsql/pg_hba_tls.conf /var/lib/postgresql/pg_hba.conf

View File

@ -6,9 +6,6 @@
set -x
set +e
# shellcheck disable=SC3028 disable=SC3054
SCRIPT_DIR="$( dirname -- "$( readlink -f -- "$0"; )"; )"
EMQX_TEST_DB_BACKEND=$1
if [ "$EMQX_TEST_DB_BACKEND" = "rlog" ]
then
@ -21,14 +18,13 @@ else
fi
apk update && apk add git curl
git clone -b develop-5.0 https://github.com/emqx/paho.mqtt.testing.git /paho.mqtt.testing
git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git /paho.mqtt.testing
pip install pytest==6.2.5
pip install --require-hashes -r "$SCRIPT_DIR/requirements.txt"
pytest --retries 3 -v /paho.mqtt.testing/interoperability/test_client/V5/test_connect.py -k test_basic --host "$TARGET_HOST"
pytest -v /paho.mqtt.testing/interoperability/test_client/V5/test_connect.py -k test_basic --host "$TARGET_HOST"
RESULT=$?
pytest --retries 3 -v /paho.mqtt.testing/interoperability/test_client --host "$TARGET_HOST"
pytest -v /paho.mqtt.testing/interoperability/test_client --host "$TARGET_HOST"
RESULT=$(( RESULT + $? ))
# pytest -v /paho.mqtt.testing/interoperability/test_cluster --host1 "node1.emqx.io" --host2 "node2.emqx.io"

View File

@ -1,21 +0,0 @@
pytest-retry==1.6.1 \
--hash=sha256:3d420afc08e61ed3be28ecbb544371041b1b8e5fea7c94eb97cefa0d4ea9825c \
--hash=sha256:3d663159a9be4d6878705822cf27a0976f99ec1bc4f2d9494e80403b17f700f2
pytest==7.4.4 \
--hash=sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280 \
--hash=sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8
pluggy==1.3.0 \
--hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \
--hash=sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7
iniconfig==2.0.0 \
--hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \
--hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374
tomli==2.0.1 \
--hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
--hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
exceptiongroup==1.2.0 \
--hash=sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 \
--hash=sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68
packaging==23.2 \
--hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
--hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7

View File

@ -1,7 +0,0 @@
listeners.ssl.default = 5671
ssl_options.cacertfile = /opt/certs/ca.crt
ssl_options.certfile = /opt/certs/server.crt
ssl_options.keyfile = /opt/certs/server.key
ssl_options.verify = verify_peer
ssl_options.fail_if_no_peer_cert = true

View File

@ -1,11 +1,10 @@
bind :: 0.0.0.0
port 6379
requirepass public
cluster-enabled yes
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
protected-mode no
daemonize no

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,11 +1,10 @@
bind :: 0.0.0.0
port 6379
requirepass public
cluster-enabled yes
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
tls-port 6389
tls-cert-file /etc/certs/cert.pem

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,6 +1,6 @@
bind :: 0.0.0.0
port 6379
aclfile /usr/local/etc/redis/users.acl
requirepass public
protected-mode no
daemonize no

View File

@ -1,7 +1,7 @@
sentinel resolve-hostnames yes
bind :: 0.0.0.0
sentinel monitor mytcpmaster redis-sentinel-master 6379 1
sentinel auth-pass mytcpmaster public
sentinel down-after-milliseconds mytcpmaster 10000
sentinel failover-timeout mytcpmaster 20000
sentinel monitor mymaster redis-sentinel-master 6379 1
sentinel auth-pass mymaster public
sentinel down-after-milliseconds mymaster 10000
sentinel failover-timeout mymaster 20000

View File

@ -1,10 +1,9 @@
bind :: 0.0.0.0
port 6379
requirepass public
replicaof redis-sentinel-master 6379
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
protected-mode no
daemonize no

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,6 +1,6 @@
bind :: 0.0.0.0
port 6379
aclfile /usr/local/etc/redis/users.acl
requirepass public
tls-port 6389
tls-cert-file /etc/certs/cert.pem

View File

@ -8,7 +8,7 @@ tls-key-file /etc/certs/key.pem
tls-ca-cert-file /etc/certs/cacert.pem
tls-auth-clients no
sentinel monitor mytlsmaster redis-sentinel-tls-master 6389 1
sentinel auth-pass mytlsmaster public
sentinel down-after-milliseconds mytlsmaster 10000
sentinel failover-timeout mytlsmaster 20000
sentinel monitor mymaster redis-sentinel-tls-master 6389 1
sentinel auth-pass mymaster public
sentinel down-after-milliseconds mymaster 10000
sentinel failover-timeout mymaster 20000

View File

@ -1,10 +1,9 @@
bind :: 0.0.0.0
port 6379
requirepass public
replicaof redis-sentinel-tls-master 6389
masteruser default
masterauth public
aclfile /usr/local/etc/redis/users.acl
tls-port 6389
tls-replication yes

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,3 +0,0 @@
bind :: 0.0.0.0
port 6379
aclfile /usr/local/etc/redis/users.acl

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,9 +0,0 @@
bind :: 0.0.0.0
aclfile /usr/local/etc/redis/users.acl
tls-port 6380
tls-cert-file /etc/certs/redis.crt
tls-key-file /etc/certs/redis.key
tls-ca-cert-file /etc/certs/ca.crt
tls-protocols "TLSv1.3"
tls-ciphersuites "TLS_CHACHA20_POLY1305_SHA256"

View File

@ -1,2 +0,0 @@
user default on >public ~* &* +@all
user test_user on >test_passwd ~* &* +@all

View File

@ -1,24 +0,0 @@
brokerClusterName=DefaultCluster
brokerName=broker-a
brokerId=0
brokerIP1=rocketmq_broker
defaultTopicQueueNums=4
autoCreateTopicEnable=true
autoCreateSubscriptionGroup=true
listenPort=10911
deleteWhen=04
fileReservedTime=120
mapedFileSizeCommitLog=1073741824
mapedFileSizeConsumeQueue=300000
diskMaxUsedSpaceRatio=100
maxMessageSize=65536
brokerRole=ASYNC_MASTER
flushDiskType=ASYNC_FLUSH
aclEnable=true

View File

@ -1,12 +0,0 @@
globalWhiteRemoteAddresses:
accounts:
- accessKey: RocketMQ
secretKey: 12345678
whiteRemoteAddress:
admin: false
defaultTopicPerm: DENY
defaultGroupPerm: PUB|SUB
topicPerms:
- TopicTest=PUB|SUB
- Topic2=PUB|SUB

View File

@ -1,24 +0,0 @@
brokerClusterName=DefaultClusterSSL
brokerName=broker-a
brokerId=0
brokerIP1=rocketmq_broker_ssl
defaultTopicQueueNums=4
autoCreateTopicEnable=true
autoCreateSubscriptionGroup=true
listenPort=10911
deleteWhen=04
fileReservedTime=120
mapedFileSizeCommitLog=1073741824
mapedFileSizeConsumeQueue=300000
diskMaxUsedSpaceRatio=100
maxMessageSize=65536
brokerRole=ASYNC_MASTER
flushDiskType=ASYNC_FLUSH
aclEnable=true

View File

@ -1,12 +0,0 @@
globalWhiteRemoteAddresses:
accounts:
- accessKey: RocketMQ
secretKey: 12345678
whiteRemoteAddress:
admin: false
defaultTopicPerm: DENY
defaultGroupPerm: PUB|SUB
topicPerms:
- TopicTest=PUB|SUB
- Topic2=PUB|SUB

View File

@ -20,8 +20,8 @@ esac
{
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
echo "EMQX_MQTT__RETRY_INTERVAL=2s"
echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s"
echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_AUTHORIZATION__SOURCES=[]"
echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
} >> .ci/docker-compose-file/conf.cluster.env
@ -29,7 +29,7 @@ esac
is_node_up() {
local node="$1"
docker exec -i "$node" \
bash -c "emqx eval \"['emqx@node1.emqx.io','emqx@node2.emqx.io'] = maps:get(running_nodes, ekka_cluster:info()).\"" > /dev/null 2>&1
bash -c "emqx eval-erl \"['emqx@node1.emqx.io','emqx@node2.emqx.io'] = maps:get(running_nodes, ekka_cluster:info()).\"" > /dev/null 2>&1
}
is_node_listening() {

View File

@ -77,155 +77,5 @@
"listen": "0.0.0.0:9295",
"upstream": "kafka-1.emqx.net:9295",
"enabled": true
},
{
"name": "rocketmq",
"listen": "0.0.0.0:9876",
"upstream": "rocketmq_namesrv:9876",
"enabled": true
},
{
"name": "cassa_tcp",
"listen": "0.0.0.0:9042",
"upstream": "cassandra:9042",
"enabled": true
},
{
"name": "cassa_tls",
"listen": "0.0.0.0:9142",
"upstream": "cassandra:9142",
"enabled": true
},
{
"name": "cassa_no_auth_tcp",
"listen": "0.0.0.0:9043",
"upstream": "cassandra_noauth:9042",
"enabled": true
},
{
"name": "cassa_no_auth_tls",
"listen": "0.0.0.0:9143",
"upstream": "cassandra_noauth:9142",
"enabled": true
},
{
"name": "sqlserver",
"listen": "0.0.0.0:1433",
"upstream": "sqlserver:1433",
"enabled": true
},
{
"name": "opents",
"listen": "0.0.0.0:4242",
"upstream": "opents:4242",
"enabled": true
},
{
"name": "pulsar_plain",
"listen": "0.0.0.0:6652",
"upstream": "pulsar:6652",
"enabled": true
},
{
"name": "pulsar_tls",
"listen": "0.0.0.0:6653",
"upstream": "pulsar:6653",
"enabled": true
},
{
"name": "oracle",
"listen": "0.0.0.0:1521",
"upstream": "oracle:1521",
"enabled": true
},
{
"name": "iotdb110",
"listen": "0.0.0.0:18080",
"upstream": "iotdb110:18080",
"enabled": true
},
{
"name": "iotdb130",
"listen": "0.0.0.0:28080",
"upstream": "iotdb130:18080",
"enabled": true
},
{
"name": "iotdb013",
"listen": "0.0.0.0:38080",
"upstream": "iotdb013:18080",
"enabled": true
},
{
"name": "minio_tcp",
"listen": "0.0.0.0:19000",
"upstream": "minio:9000",
"enabled": true
},
{
"name": "minio_tls",
"listen": "0.0.0.0:19100",
"upstream": "minio-tls:9100",
"enabled": true
},
{
"name": "gcp_emulator",
"listen": "0.0.0.0:8085",
"upstream": "gcp_emulator:8085",
"enabled": true
},
{
"name": "hstreamdb",
"listen": "0.0.0.0:6570",
"upstream": "hstreamdb:6570",
"enabled": true
},
{
"name": "greptimedb_http",
"listen": "0.0.0.0:4000",
"upstream": "greptimedb:4000",
"enabled": true
},
{
"name": "greptimedb_grpc",
"listen": "0.0.0.0:4001",
"upstream": "greptimedb:4001",
"enabled": true
},
{
"name": "kinesis",
"listen": "0.0.0.0:4566",
"upstream": "kinesis:4566",
"enabled": true
},
{
"name": "ldap_tcp",
"listen": "0.0.0.0:389",
"upstream": "ldap:389",
"enabled": true
},
{
"name": "ldap_ssl",
"listen": "0.0.0.0:636",
"upstream": "ldap:636",
"enabled": true
},
{
"name": "elasticsearch",
"listen": "0.0.0.0:9200",
"upstream": "elasticsearch:9200",
"enabled": true
},
{
"name": "azurite_plain",
"listen": "0.0.0.0:10000",
"upstream": "azurite:10000",
"enabled": true
},
{
"name": "couchbase",
"listen": "0.0.0.0:8093",
"upstream": "couchbase:8093",
"enabled": true
}
]

View File

@ -1,7 +1,8 @@
%% -*- mode: erlang -*-
{erl_opts, [debug_info]}.
{deps, [
{deps,
[
{minirest, {git, "https://github.com/emqx/minirest.git", {tag, "1.3.7"}}}
]}.

View File

@ -1,12 +1,12 @@
%% -*- mode: erlang -*-
{application, http_server, [
{description, "An HTTP server application"},
{application, http_server,
[{description, "An HTTP server application"},
{vsn, "0.2.0"},
{registered, []},
% {mod, {http_server_app, []}},
{modules, []},
{applications, [
kernel,
{applications,
[kernel,
stdlib,
minirest
]},

Some files were not shown because too many files have changed in this diff Show More