diff --git a/.ci/acl_migration_test/build.sh b/.ci/acl_migration_test/build.sh
new file mode 100755
index 000000000..b7c779f15
--- /dev/null
+++ b/.ci/acl_migration_test/build.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -xe
+
+cd "$EMQX_PATH"
+
+rm -rf _build _upgrade_base
+
+mkdir _upgrade_base
+pushd _upgrade_base
+ wget "https://s3-us-west-2.amazonaws.com/packages.emqx/emqx-ce/v${EMQX_BASE}/emqx-ubuntu20.04-${EMQX_BASE}-amd64.zip"
+popd
+
+make emqx-zip
diff --git a/.ci/acl_migration_test/prepare.sh b/.ci/acl_migration_test/prepare.sh
new file mode 100755
index 000000000..07706867a
--- /dev/null
+++ b/.ci/acl_migration_test/prepare.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -xe
+
+mkdir -p "$TEST_PATH"
+cd "$TEST_PATH"
+
+cp ../"$EMQX_PATH"/_upgrade_base/*.zip ./
+unzip ./*.zip
+
+cp ../"$EMQX_PATH"/_packages/emqx/*.zip ./emqx/releases/
+
+git clone --depth 1 https://github.com/terry-xiaoyu/one_more_emqx.git
+
+./one_more_emqx/one_more_emqx.sh emqx2
diff --git a/.ci/acl_migration_test/suite.sh b/.ci/acl_migration_test/suite.sh
new file mode 100755
index 000000000..69c024c8d
--- /dev/null
+++ b/.ci/acl_migration_test/suite.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -xe
+
+export EMQX_PATH="$1"
+export EMQX_BASE="$2"
+
+export TEST_PATH="emqx_test"
+
+./build.sh
+
+VERSION=$("$EMQX_PATH"/pkg-vsn.sh)
+export VERSION
+
+./prepare.sh
+
+./test.sh
diff --git a/.ci/acl_migration_test/test.sh b/.ci/acl_migration_test/test.sh
new file mode 100755
index 000000000..b214a0a52
--- /dev/null
+++ b/.ci/acl_migration_test/test.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+
+set -e
+
+EMQX_ENDPOINT="http://localhost:8081/api/v4/acl"
+EMQX2_ENDPOINT="http://localhost:8917/api/v4/acl"
+
+function run() {
+ emqx="$1"
+ shift
+
+ echo "[$emqx]" "$@"
+
+ pushd "$TEST_PATH/$emqx"
+ "$@"
+ popd
+}
+
+function post_rule() {
+ endpoint="$1"
+ rule="$2"
+ echo -n "->($endpoint) "
+ curl -s -u admin:public -X POST "$endpoint" -d "$rule"
+ echo
+}
+
+function verify_clientid_rule() {
+ endpoint="$1"
+ id="$2"
+ echo -n "<-($endpoint) "
+ curl -s -u admin:public "$endpoint/clientid/$id" | grep "$id" || (echo "verify rule for client $id failed" && return 1)
+}
+
+# Run nodes
+
+run emqx ./bin/emqx start
+run emqx2 ./bin/emqx start
+
+run emqx ./bin/emqx_ctl plugins load emqx_auth_mnesia
+run emqx2 ./bin/emqx_ctl plugins load emqx_auth_mnesia
+
+run emqx2 ./bin/emqx_ctl cluster join 'emqx@127.0.0.1'
+
+# Add ACL rule to unupgraded EMQX nodes
+
+post_rule "$EMQX_ENDPOINT" '{"clientid": "CLIENT1_A","topic": "t", "action": "pub", "access": "allow"}'
+post_rule "$EMQX2_ENDPOINT" '{"clientid": "CLIENT1_B","topic": "t", "action": "pub", "access": "allow"}'
+
+# Upgrade emqx2 node
+
+run emqx2 ./bin/emqx install "$VERSION"
+sleep 60
+
+# Verify upgrade blocked
+
+run emqx2 ./bin/emqx eval 'emqx_acl_mnesia_migrator:is_old_table_migrated().' | grep false || (echo "emqx2 shouldn't have migrated" && exit 1)
+
+# Verify old rules on both nodes
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT1_A'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT1_A'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT1_B'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT1_B'
+
+# Add ACL on OLD and NEW node, verify on all nodes
+
+post_rule "$EMQX_ENDPOINT" '{"clientid": "CLIENT2_A","topic": "t", "action": "pub", "access": "allow"}'
+post_rule "$EMQX2_ENDPOINT" '{"clientid": "CLIENT2_B","topic": "t", "action": "pub", "access": "allow"}'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT2_A'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT2_A'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT2_B'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT2_B'
+
+# Upgrade emqx node
+
+run emqx ./bin/emqx install "$VERSION"
+
+# Wait for upgrade
+
+sleep 60
+
+# Verify if upgrade occured
+
+run emqx ./bin/emqx eval 'emqx_acl_mnesia_migrator:is_old_table_migrated().' | grep true || (echo "emqx should have migrated" && exit 1)
+run emqx2 ./bin/emqx eval 'emqx_acl_mnesia_migrator:is_old_table_migrated().' | grep true || (echo "emqx2 should have migrated" && exit 1)
+
+# Verify rules are kept
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT1_A'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT1_A'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT1_B'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT1_B'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT2_A'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT2_A'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT2_B'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT2_B'
+
+# Add ACL on OLD and NEW node, verify on all nodes
+
+post_rule "$EMQX_ENDPOINT" '{"clientid": "CLIENT3_A","topic": "t", "action": "pub", "access": "allow"}'
+post_rule "$EMQX2_ENDPOINT" '{"clientid": "CLIENT3_B","topic": "t", "action": "pub", "access": "allow"}'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT3_A'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT3_A'
+
+verify_clientid_rule "$EMQX_ENDPOINT" 'CLIENT3_B'
+verify_clientid_rule "$EMQX2_ENDPOINT" 'CLIENT3_B'
+
+# Stop nodes
+
+run emqx ./bin/emqx stop
+run emqx2 ./bin/emqx stop
+
+echo "Success!"
+
diff --git a/.ci/docker-compose-file/docker-compose-emqx-broker-cluster.yaml b/.ci/docker-compose-file/docker-compose-emqx-broker-cluster.yaml
new file mode 100644
index 000000000..5d21010b9
--- /dev/null
+++ b/.ci/docker-compose-file/docker-compose-emqx-broker-cluster.yaml
@@ -0,0 +1,99 @@
+version: '3.9'
+
+services:
+ haproxy:
+ container_name: haproxy
+ image: haproxy:2.3
+ depends_on:
+ - emqx1
+ - emqx2
+ volumes:
+ - ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
+ - ../../etc/certs:/usr/local/etc/haproxy/certs
+ ports:
+ - "18083:18083"
+ # - "1883:1883"
+ # - "8883:8883"
+ # - "8083:8083"
+ # - "5683:5683/udp"
+ # - "9999:9999"
+ # - "8084:8084"
+ networks:
+ - emqx_bridge
+ working_dir: /usr/local/etc/haproxy
+ command:
+ - bash
+ - -c
+ - |
+ cat /usr/local/etc/haproxy/certs/cert.pem /usr/local/etc/haproxy/certs/key.pem > /usr/local/etc/haproxy/certs/emqx.pem
+ haproxy -f /usr/local/etc/haproxy/haproxy.cfg
+
+ emqx1:
+ restart: always
+ container_name: node1.emqx.io
+ image: $TARGET:$EMQX_TAG
+ env_file:
+ - conf.cluster.env
+ volumes:
+ - etc:/opt/emqx/etc
+ environment:
+ - "EMQX_HOST=node1.emqx.io"
+ ports:
+ - "11881:18083"
+# - "1883:1883"
+ command:
+ - /bin/sh
+ - -c
+ - |
+ sed -i "s 127.0.0.1 $$(ip route show |grep "link" |awk '{print $$1}') g" /opt/emqx/etc/acl.conf
+ sed -i '/emqx_telemetry/d' /opt/emqx/data/loaded_plugins
+ /opt/emqx/bin/emqx foreground
+ healthcheck:
+ test: ["CMD", "/opt/emqx/bin/emqx_ctl", "status"]
+ interval: 5s
+ timeout: 25s
+ retries: 5
+ networks:
+ emqx_bridge:
+ aliases:
+ - node1.emqx.io
+
+ emqx2:
+ restart: always
+ container_name: node2.emqx.io
+ image: $TARGET:$EMQX_TAG
+ env_file:
+ - conf.cluster.env
+ volumes:
+ - etc:/opt/emqx/etc
+ environment:
+ - "EMQX_HOST=node2.emqx.io"
+ ports:
+ - "11882:18083"
+ command:
+ - /bin/sh
+ - -c
+ - |
+ sed -i "s 127.0.0.1 $$(ip route show |grep "link" |awk '{print $$1}') g" /opt/emqx/etc/acl.conf
+ sed -i '/emqx_telemetry/d' /opt/emqx/data/loaded_plugins
+ /opt/emqx/bin/emqx foreground
+ healthcheck:
+ test: ["CMD", "/opt/emqx/bin/emqx", "ping"]
+ interval: 5s
+ timeout: 25s
+ retries: 5
+ networks:
+ emqx_bridge:
+ aliases:
+ - node2.emqx.io
+volumes:
+ etc:
+networks:
+ emqx_bridge:
+ driver: bridge
+ name: emqx_bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.100.239.0/24
+ gateway: 172.100.239.1
diff --git a/.ci/docker-compose-file/docker-compose-emqx-cluster.yaml b/.ci/docker-compose-file/docker-compose-emqx-cluster.yaml
index 18e1bb6cc..3655928e7 100644
--- a/.ci/docker-compose-file/docker-compose-emqx-cluster.yaml
+++ b/.ci/docker-compose-file/docker-compose-emqx-cluster.yaml
@@ -27,6 +27,7 @@ services:
haproxy -f /usr/local/etc/haproxy/haproxy.cfg
emqx1:
+ restart: always
container_name: node1.emqx.io
image: $TARGET:$EMQX_TAG
env_file:
@@ -51,6 +52,7 @@ services:
- node1.emqx.io
emqx2:
+ restart: always
container_name: node2.emqx.io
image: $TARGET:$EMQX_TAG
env_file:
diff --git a/.ci/docker-compose-file/docker-compose-enterprise-tomcat-tcp.yaml b/.ci/docker-compose-file/docker-compose-enterprise-tomcat-tcp.yaml
new file mode 100644
index 000000000..90306919f
--- /dev/null
+++ b/.ci/docker-compose-file/docker-compose-enterprise-tomcat-tcp.yaml
@@ -0,0 +1,10 @@
+version: '3.9'
+
+services:
+ web_server:
+ container_name: Tomcat
+ build:
+ context: ./http-service
+ image: web-server
+ networks:
+ - emqx_bridge
diff --git a/.ci/docker-compose-file/http-service/Dockerfile b/.ci/docker-compose-file/http-service/Dockerfile
new file mode 100644
index 000000000..df1f7f98c
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/Dockerfile
@@ -0,0 +1,15 @@
+FROM tomcat:10.0.5
+
+RUN wget https://downloads.apache.org/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.zip \
+ && unzip apache-maven-3.6.3-bin.zip \
+ && mv apache-maven-3.6.3 /opt/apache-maven-3.6.3/ \
+ && ln -s /opt/apache-maven-3.6.3/ /opt/maven
+ENV M2_HOME=/opt/maven
+ENV M2=$M2_HOME/bin
+ENV PATH=$M2:$PATH
+COPY ./web-server /code
+WORKDIR /code
+RUN mvn package -Dmaven.skip.test=true
+RUN mv ./target/emqx-web-0.0.1.war /usr/local/tomcat/webapps/emqx-web.war
+EXPOSE 8080
+CMD ["/usr/local/tomcat/bin/catalina.sh","run"]
diff --git a/.ci/docker-compose-file/http-service/web-server/pom.xml b/.ci/docker-compose-file/http-service/web-server/pom.xml
new file mode 100644
index 000000000..7dfd4135e
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/pom.xml
@@ -0,0 +1,65 @@
+
+ 4.0.0
+ emqx-web
+ emqx-web
+ 0.0.1
+ war
+
+
+ mysql
+ mysql-connector-java
+ 8.0.16
+
+
+ commons-dbutils
+ commons-dbutils
+ 1.7
+
+
+ commons-logging
+ commons-logging
+ 1.2
+
+
+ commons-dbcp
+ commons-dbcp
+ 1.4
+
+
+ commons-pool
+ commons-pool
+ 1.6
+
+
+ jakarta.servlet
+ jakarta.servlet-api
+ 5.0.0
+ provided
+
+
+
+
+
+ src/main/reousrce
+
+ **/*.java
+
+
+
+
+
+ maven-compiler-plugin
+ 3.8.1
+
+ 1.8
+ 1.8
+
+
+
+ maven-war-plugin
+ 3.2.3
+
+
+
+
+
\ No newline at end of file
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/dao/AuthDAO.java b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/dao/AuthDAO.java
new file mode 100644
index 000000000..61340df42
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/dao/AuthDAO.java
@@ -0,0 +1,54 @@
+package com.emqx.dao;
+
+import java.io.IOException;
+import java.sql.SQLException;
+
+import org.apache.commons.dbutils.QueryRunner;
+import org.apache.commons.dbutils.handlers.ScalarHandler;
+
+import com.emqx.util.EmqxDatabaseUtil;
+
+public class AuthDAO {
+
+ public String getUserName(String userName) throws IOException, SQLException {
+ QueryRunner runner = new QueryRunner(EmqxDatabaseUtil.getDataSource());
+ String sql = "select password from http_user where username='"+userName+"'";
+ String password =runner.query(sql, new ScalarHandler());
+ return password;
+ }
+
+ public String getClient(String clientid) throws IOException, SQLException {
+ QueryRunner runner = new QueryRunner(EmqxDatabaseUtil.getDataSource());
+ String sql = "select password from http_user where clientid='"+clientid+"'";
+ String password =runner.query(sql, new ScalarHandler());
+ return password;
+ }
+
+ public String getUserAccess(String userName) throws IOException, SQLException {
+ QueryRunner runner = new QueryRunner(EmqxDatabaseUtil.getDataSource());
+ String sql = "select access from http_acl where username='"+userName+"'";
+ String access =runner.query(sql, new ScalarHandler());
+ return access;
+ }
+
+ public String getUserTopic(String userName) throws IOException, SQLException {
+ QueryRunner runner = new QueryRunner(EmqxDatabaseUtil.getDataSource());
+ String sql = "select topic from http_acl where username='"+userName+"'";
+ String topic =runner.query(sql, new ScalarHandler());
+ return topic;
+ }
+
+ public String getClientAccess(String clientid) throws IOException, SQLException {
+ QueryRunner runner = new QueryRunner(EmqxDatabaseUtil.getDataSource());
+ String sql = "select access from http_acl where clientid='"+clientid+"'";
+ String access =runner.query(sql, new ScalarHandler());
+ return access;
+ }
+
+ public String getClientTopic(String clientid) throws IOException, SQLException {
+ QueryRunner runner = new QueryRunner(EmqxDatabaseUtil.getDataSource());
+ String sql = "select topic from http_acl where clientid='"+clientid+"'";
+ String topic =runner.query(sql, new ScalarHandler());
+ return topic;
+ }
+}
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/dao/DBUtilsTest.java b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/dao/DBUtilsTest.java
new file mode 100644
index 000000000..9836d4b11
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/dao/DBUtilsTest.java
@@ -0,0 +1,45 @@
+package com.emqx.dao;
+
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.apache.commons.dbcp.BasicDataSource;
+import org.apache.commons.dbutils.QueryRunner;
+import org.apache.commons.dbutils.handlers.ColumnListHandler;
+import org.apache.commons.dbutils.handlers.ScalarHandler;
+import org.apache.commons.dbutils.handlers.columns.StringColumnHandler;
+
+
+public class DBUtilsTest {
+
+ public static void main(String args[]) throws FileNotFoundException, IOException, SQLException {
+ Properties property = new Properties();//流文件
+
+ property.load(DBUtilsTest.class.getClassLoader().getResourceAsStream("database.properties"));
+
+ BasicDataSource dataSource = new BasicDataSource();
+ dataSource.setDriverClassName(property.getProperty("jdbc.driver"));
+ dataSource.setUrl(property.getProperty("jdbc.url"));
+ dataSource.setUsername(property.getProperty("jdbc.username"));
+ dataSource.setPassword(property.getProperty("jdbc.password"));
+
+ // 初始化连接数 if(initialSize!=null)
+ //dataSource.setInitialSize(Integer.parseInt(initialSize));
+
+ // 最小空闲连接 if(minIdle!=null)
+ //dataSource.setMinIdle(Integer.parseInt(minIdle));
+
+ // 最大空闲连接 if(maxIdle!=null)
+ //dataSource.setMaxIdle(Integer.parseInt(maxIdle));
+
+ QueryRunner runner = new QueryRunner(dataSource);
+ String sql="select username from mqtt_user where id=1";
+ String result = runner.query(sql, new ScalarHandler());
+
+ System.out.println(result);
+
+ }
+}
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/servlet/AclServlet.java b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/servlet/AclServlet.java
new file mode 100644
index 000000000..85915d550
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/servlet/AclServlet.java
@@ -0,0 +1,103 @@
+package com.emqx.servlet;
+
+import java.io.IOException;
+import java.sql.SQLException;
+
+import com.emqx.dao.AuthDAO;
+
+import jakarta.servlet.ServletException;
+import jakarta.servlet.http.HttpServlet;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
+
+public class AclServlet extends HttpServlet {
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ // TODO Auto-generated method stub
+ doPost(req, resp);
+ }
+ @Override
+ protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ String clientid = req.getParameter("clientid");
+ String username = req.getParameter("username");
+ String access = req.getParameter("access");
+ String topic = req.getParameter("topic");
+ //String password = req.getParameter("password");
+
+ //step0: password is not null, or not pass.
+
+ AuthDAO dao = new AuthDAO();
+ try {
+ //step1: check username access&topic
+ if(username != null) {
+ String access_1 = dao.getUserAccess(username);
+ String topic_1 = dao.getUserTopic(username);
+
+ if(access.equals(access_1)) {
+ if(topic.equals(topic_1)) {
+ resp.setStatus(200);
+ }
+ else {
+ if(clientid != null){
+ String access_2 = dao.getClientAccess(clientid);
+ String topic_2 = dao.getClientTopic(clientid);
+ if(access.equals(access_2)) {
+ if(topic.equals(topic_2)) {
+ resp.setStatus(200);
+ }
+ else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }
+ }else {//step2.1: username password is not match, then check clientid password
+ if(clientid != null){
+ String access_3 = dao.getClientAccess(clientid);
+ String topic_3 = dao.getClientTopic(clientid);
+ if(access.equals(access_3)) {
+ if(topic.equals(topic_3)) {
+ resp.setStatus(200);
+ }
+ else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }
+ }else {//step2.2: username is null, then check clientid password
+ if(clientid != null){
+ String access_4 = dao.getClientAccess(clientid);
+ String topic_4 = dao.getClientTopic(clientid);
+ if(access.equals(access_4)) {
+ if(topic.equals(topic_4)) {
+ resp.setStatus(200);
+ }
+ else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (SQLException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/servlet/AuthServlet.java b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/servlet/AuthServlet.java
new file mode 100644
index 000000000..a59ca7567
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/servlet/AuthServlet.java
@@ -0,0 +1,72 @@
+package com.emqx.servlet;
+
+import java.io.IOException;
+import java.sql.SQLException;
+
+import com.emqx.dao.AuthDAO;
+
+import jakarta.servlet.ServletException;
+import jakarta.servlet.http.HttpServlet;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
+
+public class AuthServlet extends HttpServlet {
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ // TODO Auto-generated method stub
+ doPost(req, resp);
+ }
+ @Override
+ protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ String clientid = req.getParameter("clientid");
+ String username =req.getParameter("username");
+ String password = req.getParameter("password");
+
+ //step0: password is not null, or not pass.
+ if(password == null) {
+ resp.setStatus(400);
+ return;
+ }
+ AuthDAO dao = new AuthDAO();
+ try {
+ //step1: check username password
+ if(username != null) {
+ String password_d = dao.getUserName(username);
+
+ if(password.equals(password_d)) {
+ resp.setStatus(200);
+ //200
+ }else {//step2.1: username password is not match, then check clientid password
+ if(clientid != null){
+ String password_c = dao.getClient(clientid);
+ if(password.equals(password_c)) {
+ resp.setStatus(200);
+ }else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }
+ }else {//step2.2: username is null, then check clientid password
+ if(clientid != null){
+ String password_c = dao.getClient(clientid);
+ if(password.equals(password_c)) {
+ resp.setStatus(200);
+ }else {
+ resp.setStatus(400);
+ }
+ }else {
+ resp.setStatus(400);
+ }
+ }
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (SQLException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/util/EmqxDatabaseUtil.java b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/util/EmqxDatabaseUtil.java
new file mode 100644
index 000000000..b8fb0f229
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/java/com/emqx/util/EmqxDatabaseUtil.java
@@ -0,0 +1,27 @@
+package com.emqx.util;
+
+import java.io.IOException;
+import java.util.Properties;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.dbcp.BasicDataSource;
+
+import com.emqx.dao.DBUtilsTest;
+
+public class EmqxDatabaseUtil {
+
+ public static DataSource getDataSource() throws IOException {
+ Properties property = new Properties();// 流文件
+
+ property.load(EmqxDatabaseUtil.class.getClassLoader().getResourceAsStream("database.properties"));
+
+ BasicDataSource dataSource = new BasicDataSource();
+ dataSource.setDriverClassName(property.getProperty("jdbc.driver"));
+ dataSource.setUrl(property.getProperty("jdbc.url"));
+ dataSource.setUsername(property.getProperty("jdbc.username"));
+ dataSource.setPassword(property.getProperty("jdbc.password"));
+
+ return dataSource;
+ }
+}
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/reousrce/database.properties b/.ci/docker-compose-file/http-service/web-server/src/main/reousrce/database.properties
new file mode 100644
index 000000000..11886f347
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/reousrce/database.properties
@@ -0,0 +1,4 @@
+jdbc.driver= com.mysql.jdbc.Driver
+jdbc.url= jdbc:mysql://mysql_server:3306/mqtt
+jdbc.username= root
+jdbc.password= public
\ No newline at end of file
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/webapp/META-INF/MANIFEST.MF b/.ci/docker-compose-file/http-service/web-server/src/main/webapp/META-INF/MANIFEST.MF
new file mode 100644
index 000000000..254272e1c
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/webapp/META-INF/MANIFEST.MF
@@ -0,0 +1,3 @@
+Manifest-Version: 1.0
+Class-Path:
+
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/webapp/WEB-INF/web.xml b/.ci/docker-compose-file/http-service/web-server/src/main/webapp/WEB-INF/web.xml
new file mode 100644
index 000000000..e779a4541
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/webapp/WEB-INF/web.xml
@@ -0,0 +1,31 @@
+
+
+ emqx-web
+
+ Auth
+ com.emqx.servlet.AuthServlet
+
+
+ Acl
+ com.emqx.servlet.AclServlet
+
+
+ Auth
+ /auth
+
+
+ Acl
+ /acl
+
+
+ index.html
+ index.htm
+ index.jsp
+ default.html
+ default.htm
+ default.jsp
+
+
\ No newline at end of file
diff --git a/.ci/docker-compose-file/http-service/web-server/src/main/webapp/index.html b/.ci/docker-compose-file/http-service/web-server/src/main/webapp/index.html
new file mode 100644
index 000000000..2db63b2ea
--- /dev/null
+++ b/.ci/docker-compose-file/http-service/web-server/src/main/webapp/index.html
@@ -0,0 +1,10 @@
+
+
+
+
+love
+
+
+It's lucky, jiabanxiang.
+
+
\ No newline at end of file
diff --git a/.ci/fvt_tests/relup.lux b/.ci/fvt_tests/relup.lux
index 93889dad5..2940f5ce0 100644
--- a/.ci/fvt_tests/relup.lux
+++ b/.ci/fvt_tests/relup.lux
@@ -23,10 +23,7 @@
?SH-PROMPT
!cd emqx
- !export EMQX_LOG__CONSOLE_HANDLER__ENABLE=true
- !export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
- !export EMQX_LOG__PRIMARY_LEVEL=debug
- !export EMQX_ZONES__DEFAULT__LISTENERS__MQTT_WSS__BIND="0.0.0.0:8085"
+ !export EMQX_LOG__LEVEL=debug
!./bin/emqx start
?EMQ X .* is started successfully!
@@ -39,9 +36,7 @@
?SH-PROMPT
!cd emqx2
- !export EMQX_LOG__CONSOLE_HANDLER__ENABLE=true
- !export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
- !export EMQX_LOG__PRIMARY_LEVEL=debug
+ !export EMQX_LOG__LEVEL=debug
!./bin/emqx start
?EMQ X .* is started successfully!
diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml
index 33b4d1e7b..befa37912 100644
--- a/.github/workflows/build_packages.yaml
+++ b/.github/workflows/build_packages.yaml
@@ -468,7 +468,7 @@ jobs:
-H "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST \
- -d "{\"ref\":\"v1.0.2\",\"inputs\":{\"version\": \"${{ env.version }}\", \"emqx_ee\": \"true\"}}" \
+ -d "{\"ref\":\"v1.0.3\",\"inputs\":{\"version\": \"${{ env.version }}\", \"emqx_ee\": \"true\"}}" \
"https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_repos.yaml/dispatches"
- name: update repo.emqx.io
if: github.event_name == 'release' && endsWith(github.repository, 'emqx') && matrix.profile == 'emqx'
@@ -477,7 +477,7 @@ jobs:
-H "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST \
- -d "{\"ref\":\"v1.0.2\",\"inputs\":{\"version\": \"${{ env.version }}\", \"emqx_ce\": \"true\"}}" \
+ -d "{\"ref\":\"v1.0.3\",\"inputs\":{\"version\": \"${{ env.version }}\", \"emqx_ce\": \"true\"}}" \
"https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_repos.yaml/dispatches"
- name: update homebrew packages
if: github.event_name == 'release' && endsWith(github.repository, 'emqx') && matrix.profile == 'emqx'
@@ -487,7 +487,7 @@ jobs:
-H "Authorization: token ${{ secrets.CI_GIT_TOKEN }}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST \
- -d "{\"ref\":\"v1.0.2\",\"inputs\":{\"version\": \"${{ env.version }}\"}}" \
+ -d "{\"ref\":\"v1.0.3\",\"inputs\":{\"version\": \"${{ env.version }}\"}}" \
"https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_homebrew.yaml/dispatches"
fi
- uses: geekyeggo/delete-artifact@v1
diff --git a/.github/workflows/run_acl_migration_tests.yaml b/.github/workflows/run_acl_migration_tests.yaml
new file mode 100644
index 000000000..855d9463c
--- /dev/null
+++ b/.github/workflows/run_acl_migration_tests.yaml
@@ -0,0 +1,22 @@
+name: ACL fix & migration integration tests
+
+on: workflow_dispatch
+
+jobs:
+ test:
+ runs-on: ubuntu-20.04
+ container: emqx/build-env:erl23.2.7.2-emqx-2-ubuntu20.04
+ strategy:
+ fail-fast: true
+ env:
+ BASE_VERSION: "4.3.0"
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ path: emqx
+ - name: Prepare scripts
+ run: |
+ cp ./emqx/.ci/acl_migration_test/*.sh ./
+ - name: Run tests
+ run: |
+ ./suite.sh emqx "$BASE_VERSION"
diff --git a/.github/workflows/run_automate_tests.yaml b/.github/workflows/run_automate_tests.yaml
new file mode 100644
index 000000000..e654e87c2
--- /dev/null
+++ b/.github/workflows/run_automate_tests.yaml
@@ -0,0 +1,437 @@
+name: Integration Test Suites
+
+on:
+ push:
+ tags:
+ - "v4.*"
+ pull_request:
+ branches:
+ - "main-v4.*"
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ outputs:
+ imgname: ${{ steps.build_docker.outputs.imgname}}
+ version: ${{ steps.build_docker.outputs.version}}
+ steps:
+ - uses: actions/checkout@v2
+ - name: build docker
+ id: build_docker
+ run: |
+ if [ -f EMQX_ENTERPRISE ]; then
+ echo "https://ci%40emqx.io:${{ secrets.CI_GIT_TOKEN }}@github.com" > $HOME/.git-credentials
+ git config --global credential.helper store
+ echo "${{ secrets.CI_GIT_TOKEN }}" >> scripts/git-token
+ make deps-emqx-ee
+ fi
+ make docker
+ echo "::set-output name=version::$(./pkg-vsn.sh)"
+ if [ -f EMQX_ENTERPRISE ]; then
+ echo "::set-output name=imgname::emqx-ee"
+ else
+ echo "::set-output name=imgname::emqx"
+ fi
+ - uses: actions/upload-artifact@v2
+ with:
+ name: emqx-docker-image-zip
+ path: _packages/${{ steps.build_docker.outputs.imgname }}/${{ steps.build_docker.outputs.imgname }}-docker-${{ steps.build_docker.outputs.version }}.zip
+
+ webhook:
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ webhook_type:
+ - webhook_data_bridge
+
+ needs: build
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/download-artifact@v2
+ with:
+ name: emqx-docker-image-zip
+ path: /tmp
+ - name: load docker image
+ env:
+ imgname: ${{ needs.build.outputs.imgname}}
+ version: ${{ needs.build.outputs.version }}
+ run: |
+ unzip -q /tmp/${imgname}-docker-${version}.zip -d /tmp
+ docker load < /tmp/${imgname}-docker-${version}
+ - name: docker compose up
+ timeout-minutes: 5
+ env:
+ TARGET: emqx/${{ needs.build.outputs.imgname }}
+ EMQX_TAG: ${{ needs.build.outputs.version }}
+ run: |
+ docker-compose \
+ -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml \
+ up -d --build
+ - uses: actions/checkout@v2
+ with:
+ repository: emqx/emqx-svt-web-server
+ ref: web-server-1.0
+ path: emqx-svt-web-server
+ - uses: actions/download-artifact@v2
+ - name: run webserver in docker
+ run: |
+ cd ./emqx-svt-web-server/svtserver
+ mvn clean package
+ cd target
+ docker run --name webserver --network emqx_bridge -d -v $(pwd)/svtserver-0.0.1.jar:/webserver/svtserver-0.0.1.jar --workdir /webserver openjdk:8-jdk bash \
+ -c "java -jar svtserver-0.0.1.jar"
+ - name: wait docker compose up
+ timeout-minutes: 5
+ run: |
+ while [ "$(docker inspect -f '{{ .State.Health.Status}}' node1.emqx.io)" != "healthy" ] || [ "$(docker inspect -f '{{ .State.Health.Status}}' node2.emqx.io)" != "healthy" ]; do
+ echo "['$(date -u +"%y-%m-%dt%h:%m:%sz")']:waiting emqx";
+ sleep 5;
+ done
+ docker ps -a
+ echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
+ echo WEB_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' webserver) >> $GITHUB_ENV
+ - uses: actions/checkout@v2
+ with:
+ repository: emqx/emqx-fvt
+ ref: integration_test_suites
+ path: scripts
+ - uses: actions/setup-java@v1
+ with:
+ java-version: '8.0.282' # The JDK version to make available on the path.
+ java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
+ architecture: x64 # (x64 or x86) - defaults to x64
+ - name: install jmeter
+ timeout-minutes: 10
+ env:
+ JMETER_VERSION: 5.3
+ run: |
+ wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz https://downloads.apache.org/jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ cd /tmp && tar -xvf apache-jmeter.tgz
+ echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ wget --no-verbose -O /tmp/apache-jmeter-$JMETER_VERSION/lib/ext/mqtt-xmeter-2.0.2-jar-with-dependencies.jar https://raw.githubusercontent.com/xmeter-net/mqtt-jmeter/master/Download/v2.0.2/mqtt-xmeter-2.0.2-jar-with-dependencies.jar
+ ln -s /tmp/apache-jmeter-$JMETER_VERSION /opt/jmeter
+ - name: run jmeter
+ run: |
+ /opt/jmeter/bin/jmeter.sh \
+ -Jjmeter.save.saveservice.output_format=xml -n \
+ -t scripts/.ci/automate-test-suite/${{ matrix.webhook_type }}.jmx \
+ -Demqx_ip=$HAPROXY_IP \
+ -Dweb_ip=$WEB_IP \
+ -l jmeter_logs/webhook_${{ matrix.webhook_type }}.jtl \
+ -j jmeter_logs/logs/webhook_${{ matrix.webhook_type }}.log
+ - name: check logs
+ run: |
+ if cat jmeter_logs/webhook_${{ matrix.webhook_type }}.jtl | grep -e 'true' > /dev/null 2>&1; then
+ echo "check logs filed"
+ exit 1
+ fi
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: jmeter_logs
+ path: ./jmeter_logs
+
+ mysql:
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ mysql_tag:
+ - 5.7
+ - 8
+ mysql_type:
+ - mysql_auth_acl
+
+ needs: build
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/download-artifact@v2
+ with:
+ name: emqx-docker-image-zip
+ path: /tmp
+ - name: load docker image
+ env:
+ imgname: ${{ needs.build.outputs.imgname }}
+ version: ${{ needs.build.outputs.version }}
+ run: |
+ unzip -q /tmp/${imgname}-docker-${version}.zip -d /tmp
+ docker load < /tmp/${imgname}-docker-${version}
+ - name: docker compose up
+ timeout-minutes: 5
+ env:
+ TARGET: emqx/${{ needs.build.outputs.imgname }}
+ EMQX_TAG: ${{ needs.build.outputs.version }}
+ MYSQL_TAG: ${{ matrix.mysql_tag }}
+ run: |
+ docker-compose \
+ -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml \
+ -f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \
+ up -d --build
+ - name: wait docker compose up
+ timeout-minutes: 5
+ run: |
+ while [ "$(docker inspect -f '{{ .State.Health.Status}}' node1.emqx.io)" != "healthy" ] || [ "$(docker inspect -f '{{ .State.Health.Status}}' node2.emqx.io)" != "healthy" ]; do
+ echo "['$(date -u +"%y-%m-%dt%h:%m:%sz")']:waiting emqx";
+ sleep 5;
+ done
+ while [ $(docker ps -a --filter name=client --filter exited=0 | wc -l) \
+ != $(docker ps -a --filter name=client | wc -l) ]; do
+ sleep 1
+ done
+ docker ps -a
+ echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
+ echo MYSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mysql) >> $GITHUB_ENV
+ - uses: actions/checkout@v2
+ with:
+ repository: emqx/emqx-fvt
+ ref: integration_test_suites
+ path: scripts
+ - uses: actions/setup-java@v1
+ with:
+ java-version: '8.0.282' # The JDK version to make available on the path.
+ java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
+ architecture: x64 # (x64 or x86) - defaults to x64
+ - name: install jmeter
+ timeout-minutes: 10
+ env:
+ JMETER_VERSION: 5.3
+ run: |
+ wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz https://downloads.apache.org/jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ cd /tmp && tar -xvf apache-jmeter.tgz
+ echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ wget --no-verbose -O /tmp/apache-jmeter-$JMETER_VERSION/lib/ext/mqtt-xmeter-2.0.2-jar-with-dependencies.jar https://raw.githubusercontent.com/xmeter-net/mqtt-jmeter/master/Download/v2.0.2/mqtt-xmeter-2.0.2-jar-with-dependencies.jar
+ ln -s /tmp/apache-jmeter-$JMETER_VERSION /opt/jmeter
+ - name: install jmeter plugin
+ run: |
+ wget --no-verbose -O "/opt/jmeter/lib/mysql-connector-java-8.0.16.jar" https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar
+ - name: run jmeter
+ run: |
+ /opt/jmeter/bin/jmeter.sh \
+ -Jjmeter.save.saveservice.output_format=xml -n \
+ -t scripts/.ci/automate-test-suite/${{ matrix.mysql_type }}.jmx \
+ -Droute="apps/emqx_auth_mysql/test/emqx_auth_mysql_SUITE_data" \
+ -Dmysql_ip=$MYSQL_IP \
+ -Demqx_ip=$HAPROXY_IP \
+ -Ddbname="mqtt" \
+ -Dmysql_user="ssluser" \
+ -Ddb_user="root" \
+ -Dmysql_pwd="public" \
+ -Dconfig_path="/tmp/etc" \
+ -Ddocker_path=".ci/docker-compose-file" \
+ -l jmeter_logs/${{ matrix.mysql_type }}_${{ matrix.mysql_tag }}.jtl \
+ -j jmeter_logs/logs/${{ matrix.mysql_type }}_${{ matrix.mysql_tag }}.log
+ - name: check logs
+ run: |
+ if cat jmeter_logs/${{ matrix.mysql_type }}_${{ matrix.mysql_tag }}.jtl | grep -e 'true' > /dev/null 2>&1; then
+ echo "check logs filed"
+ exit 1
+ fi
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: jmeter_logs
+ path: ./jmeter_logs
+
+
+ postgresql:
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ pgsql_type:
+ - pgsql_auth_acl
+ pgsql_tag:
+ - 9
+ - 10
+ - 11
+ - 12
+ - 13
+
+ needs: build
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/download-artifact@v2
+ with:
+ name: emqx-docker-image-zip
+ path: /tmp
+ - name: load docker image
+ env:
+ imgname: ${{ needs.build.outputs.imgname }}
+ version: ${{ needs.build.outputs.version }}
+ run: |
+ unzip -q /tmp/${imgname}-docker-${version}.zip -d /tmp
+ docker load < /tmp/${imgname}-docker-${version}
+ - name: docker compose up
+ timeout-minutes: 5
+ env:
+ TARGET: emqx/${{ needs.build.outputs.imgname }}
+ EMQX_TAG: ${{ needs.build.outputs.version }}
+ PGSQL_TAG: ${{ matrix.pgsql_tag }}
+ run: |
+ docker-compose \
+ -f .ci/docker-compose-file/docker-compose-emqx-broker-cluster.yaml \
+ -f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
+ up -d --build
+ - name: wait docker compose up
+ timeout-minutes: 5
+ run: |
+ while [ "$(docker inspect -f '{{ .State.Health.Status}}' node1.emqx.io)" != "healthy" ] || [ "$(docker inspect -f '{{ .State.Health.Status}}' node2.emqx.io)" != "healthy" ]; do
+ echo "['$(date -u +"%y-%m-%dt%h:%m:%sz")']:waiting emqx";
+ sleep 5;
+ done
+ docker ps -a
+ echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
+ echo PGSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' pgsql) >> $GITHUB_ENV
+ echo CONFIG_PATH=$(docker inspect -f '{{ range .Mounts }}{{ if eq .Name "docker-compose-file_etc" }}{{ .Source }}{{ end }}{{ end }}' node1.emqx.io) >> $GITHUB_ENV
+ - uses: actions/checkout@v2
+ with:
+ repository: emqx/emqx-fvt
+ ref: integration_test_suites
+ path: scripts
+ - uses: actions/setup-java@v1
+ with:
+ java-version: '8.0.282' # The JDK version to make available on the path.
+ java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
+ architecture: x64 # (x64 or x86) - defaults to x64
+ - name: install jmeter
+ timeout-minutes: 10
+ env:
+ JMETER_VERSION: 5.3
+ run: |
+ wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz https://downloads.apache.org/jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ cd /tmp && tar -xvf apache-jmeter.tgz
+ echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ wget --no-verbose -O /tmp/apache-jmeter-$JMETER_VERSION/lib/ext/mqtt-xmeter-2.0.2-jar-with-dependencies.jar https://raw.githubusercontent.com/xmeter-net/mqtt-jmeter/master/Download/v2.0.2/mqtt-xmeter-2.0.2-jar-with-dependencies.jar
+ ln -s /tmp/apache-jmeter-$JMETER_VERSION /opt/jmeter
+ - name: install jmeter plugin
+ run: |
+ wget --no-verbose -O "/opt/jmeter/lib/postgresql-42.2.18.jar" https://repo1.maven.org/maven2/org/postgresql/postgresql/42.2.18/postgresql-42.2.18.jar
+ - name: run jmeter
+ run: |
+ sudo /opt/jmeter/bin/jmeter.sh \
+ -Jjmeter.save.saveservice.output_format=xml -n \
+ -t scripts/.ci/automate-test-suite/${{ matrix.pgsql_type }}.jmx \
+ -Droute="apps/emqx_auth_pgsql/test/emqx_auth_pgsql_SUITE_data" \
+ -Dca_name="ca.pem" \
+ -Dkey_name="client-key.pem" \
+ -Dcert_name="client-cert.pem" \
+ -Ddb_ip=$PGSQL_IP \
+ -Dpgsql_ip=$PGSQL_IP \
+ -Demqx_ip=$HAPROXY_IP \
+ -Dpgsql_user="root" \
+ -Dpgsql_pwd="public" \
+ -Ddbname="mqtt" \
+ -Dpgsql_db="mqtt" \
+ -Dport="5432" \
+ -Dconfig_path=$CONFIG_PATH \
+ -Ddocker_path=".ci/docker-compose-file" \
+ -l jmeter_logs/${{ matrix.pgsql_type }}_${{ matrix.pgsql_tag }}.jtl \
+ -j jmeter_logs/logs/${{ matrix.pgsql_type }}_${{ matrix.pgsql_tag }}.log
+ - name: check logs
+ run: |
+ if cat jmeter_logs/${{ matrix.pgsql_type }}_${{ matrix.pgsql_tag }}.jtl | grep -e 'true' > /dev/null 2>&1; then
+ echo "check logs filed"
+ exit 1
+ fi
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: jmeter_logs
+ path: ./jmeter_logs
+
+ http:
+ runs-on: ubuntu-latest
+
+ needs: build
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/download-artifact@v2
+ with:
+ name: emqx-docker-image-zip
+ path: /tmp
+ - name: load docker image
+ env:
+ imgname: ${{ needs.build.outputs.imgname }}
+ version: ${{ needs.build.outputs.version }}
+ run: |
+ unzip -q /tmp/${imgname}-docker-${version}.zip -d /tmp
+ docker load < /tmp/${imgname}-docker-${version}
+ - name: docker compose up
+ timeout-minutes: 5
+ env:
+ TARGET: emqx/${{ needs.build.outputs.imgname }}
+ EMQX_TAG: ${{ needs.build.outputs.version }}
+ MYSQL_TAG: 8
+ run: |
+ docker-compose \
+ -f .ci/docker-compose-file/docker-compose-emqx-broker-cluster.yaml \
+ -f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \
+ -f .ci/docker-compose-file/docker-compose-enterprise-tomcat-tcp.yaml \
+ up -d --build
+ - name: wait docker compose up
+ timeout-minutes: 5
+ run: |
+ while [ "$(docker inspect -f '{{ .State.Health.Status}}' node1.emqx.io)" != "healthy" ] || [ "$(docker inspect -f '{{ .State.Health.Status}}' node2.emqx.io)" != "healthy" ]; do
+ echo "['$(date -u +"%y-%m-%dt%h:%m:%sz")']:waiting emqx";
+ sleep 5;
+ done
+ docker ps -a
+ echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
+ echo HTTP_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' Tomcat) >> $GITHUB_ENV
+ echo MYSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mysql) >> $GITHUB_ENV
+ echo CONFIG_PATH=$(docker inspect -f '{{ range .Mounts }}{{ if eq .Name "docker-compose-file_etc" }}{{ .Source }}{{ end }}{{ end }}' node1.emqx.io) >> $GITHUB_ENV
+ - uses: actions/checkout@v2
+ with:
+ repository: emqx/emqx-fvt
+ ref: integration_test_suites
+ path: scripts
+ - uses: actions/setup-java@v1
+ with:
+ java-version: '8.0.282' # The JDK version to make available on the path.
+ java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
+ architecture: x64 # (x64 or x86) - defaults to x64
+ - name: install jmeter
+ timeout-minutes: 10
+ env:
+ JMETER_VERSION: 5.3
+ run: |
+ wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz https://downloads.apache.org/jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ cd /tmp && tar -xvf apache-jmeter.tgz
+ echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ wget --no-verbose -O /tmp/apache-jmeter-$JMETER_VERSION/lib/ext/mqtt-xmeter-2.0.2-jar-with-dependencies.jar https://raw.githubusercontent.com/xmeter-net/mqtt-jmeter/master/Download/v2.0.2/mqtt-xmeter-2.0.2-jar-with-dependencies.jar
+ ln -s /tmp/apache-jmeter-$JMETER_VERSION /opt/jmeter
+ - name: install jmeter plugin
+ run: |
+ wget --no-verbose -O "/opt/jmeter/lib/mysql-connector-java-8.0.16.jar" https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar
+ - name: run jmeter
+ run: |
+ sudo /opt/jmeter/bin/jmeter.sh \
+ -Jjmeter.save.saveservice.output_format=xml -n \
+ -t scripts/.ci/automate-test-suite/http_auth_acl.jmx \
+ -Dmysql_ip=$MYSQL_IP \
+ -Demqx_ip=$HAPROXY_IP \
+ -Dweb_server_ip=$HTTP_IP \
+ -Dconfig_path=$CONFIG_PATH \
+ -Ddocker_path=".ci/docker-compose-file" \
+ -l jmeter_logs/http_auth_acl.jtl \
+ -j jmeter_logs/logs/http_auth_acl.log
+ - name: check logs
+ run: |
+ if cat jmeter_logs/http_auth_acl.jtl | grep -e 'true' > /dev/null 2>&1; then
+ echo "check logs filed"
+ sudo cat /var/lib/docker/volumes/docker-compose-file_etc/_data/emqx.conf
+ exit 1
+ fi
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: jmeter_logs
+ path: ./jmeter_logs
diff --git a/apps/emqx_auth_mnesia/include/emqx_auth_mnesia.hrl b/apps/emqx_auth_mnesia/include/emqx_auth_mnesia.hrl
index 034bd4f30..143f6b61e 100644
--- a/apps/emqx_auth_mnesia/include/emqx_auth_mnesia.hrl
+++ b/apps/emqx_auth_mnesia/include/emqx_auth_mnesia.hrl
@@ -1,21 +1,47 @@
-define(APP, emqx_auth_mnesia).
--type(login():: {clientid, binary()}
+-type(login() :: {clientid, binary()}
| {username, binary()}).
+-type(acl_target() :: login() | all).
+
+-type(acl_target_type() :: clientid | username | all).
+
+-type(access():: allow | deny).
+-type(action():: pub | sub).
+-type(legacy_action():: action() | pubsub).
+-type(created_at():: integer()).
+
-record(emqx_user, {
login :: login(),
password :: binary(),
- created_at :: integer()
+ created_at :: created_at()
}).
--record(emqx_acl, {
- filter:: {login() | all, emqx_topic:topic()},
- action :: pub | sub | pubsub,
- access :: allow | deny,
- created_at :: integer()
+-define(ACL_TABLE, emqx_acl).
+
+-define(MIGRATION_MARK_KEY, emqx_acl2_migration_started).
+
+-record(?ACL_TABLE, {
+ filter :: {acl_target(), emqx_topic:topic()} | ?MIGRATION_MARK_KEY,
+ action :: legacy_action(),
+ access :: access(),
+ created_at :: created_at()
}).
+-define(MIGRATION_MARK_RECORD, #?ACL_TABLE{filter = ?MIGRATION_MARK_KEY, action = pub, access = deny, created_at = 0}).
+
+-type(rule() :: {access(), action(), emqx_topic:topic(), created_at()}).
+
+-define(ACL_TABLE2, emqx_acl2).
+
+-record(?ACL_TABLE2, {
+ who :: acl_target(),
+ rules :: [ rule() ]
+ }).
+
+-type(acl_record() :: {acl_target(), emqx_topic:topic(), action(), access(), created_at()}).
+
-record(auth_metrics, {
success = 'client.auth.success',
failure = 'client.auth.failure',
diff --git a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia.erl b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia.erl
index c21955182..1e29d9121 100644
--- a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia.erl
+++ b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia.erl
@@ -18,24 +18,16 @@
-include("emqx_auth_mnesia.hrl").
--include_lib("stdlib/include/ms_transform.hrl").
-
--define(TABLE, emqx_acl).
-
%% ACL Callbacks
-export([ init/0
, register_metrics/0
, check_acl/5
, description/0
- ]).
+ ]).
init() ->
- ok = ekka_mnesia:create_table(emqx_acl, [
- {type, bag},
- {disc_copies, [node()]},
- {attributes, record_info(fields, emqx_acl)},
- {storage_properties, [{ets, [{read_concurrency, true}]}]}]),
- ok = ekka_mnesia:copy_table(emqx_acl, disc_copies).
+ ok = emqx_acl_mnesia_db:create_table(),
+ ok = emqx_acl_mnesia_db:create_table2().
-spec(register_metrics() -> ok).
register_metrics() ->
@@ -46,12 +38,12 @@ check_acl(ClientInfo = #{ clientid := Clientid }, PubSub, Topic, _NoMatchAction,
Acls = case Username of
undefined ->
- emqx_acl_mnesia_cli:lookup_acl({clientid, Clientid}) ++
- emqx_acl_mnesia_cli:lookup_acl(all);
+ emqx_acl_mnesia_db:lookup_acl({clientid, Clientid}) ++
+ emqx_acl_mnesia_db:lookup_acl(all);
_ ->
- emqx_acl_mnesia_cli:lookup_acl({clientid, Clientid}) ++
- emqx_acl_mnesia_cli:lookup_acl({username, Username}) ++
- emqx_acl_mnesia_cli:lookup_acl(all)
+ emqx_acl_mnesia_db:lookup_acl({clientid, Clientid}) ++
+ emqx_acl_mnesia_db:lookup_acl({username, Username}) ++
+ emqx_acl_mnesia_db:lookup_acl(all)
end,
case match(ClientInfo, PubSub, Topic, Acls) of
@@ -83,7 +75,6 @@ match(ClientInfo, PubSub, Topic, [ {_, ACLTopic, Action, Access, _} | Acls]) ->
match_topic(ClientInfo, Topic, ACLTopic) when is_binary(Topic) ->
emqx_topic:match(Topic, feed_var(ClientInfo, ACLTopic)).
-match_actions(_, pubsub) -> true;
match_actions(subscribe, sub) -> true;
match_actions(publish, pub) -> true;
match_actions(_, _) -> false.
diff --git a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_api.erl b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_api.erl
index fbd044d3f..10615b3e0 100644
--- a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_api.erl
+++ b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_api.erl
@@ -16,8 +16,6 @@
-module(emqx_acl_mnesia_api).
--include("emqx_auth_mnesia.hrl").
-
-include_lib("stdlib/include/ms_transform.hrl").
-import(proplists, [ get_value/2
@@ -99,26 +97,22 @@
]).
list_clientid(_Bindings, Params) ->
- MatchSpec = ets:fun2ms(
- fun({emqx_acl, {{clientid, Clientid}, Topic}, Action, Access, CreatedAt}) -> {{clientid,Clientid}, Topic, Action,Access, CreatedAt} end),
- return({ok, emqx_auth_mnesia_api:paginate(emqx_acl, MatchSpec, Params, fun emqx_acl_mnesia_cli:comparing/2, fun format/1)}).
+ Table = emqx_acl_mnesia_db:login_acl_table(clientid),
+ return({ok, emqx_auth_mnesia_api:paginate_qh(Table, count(Table), Params, fun emqx_acl_mnesia_db:comparing/2, fun format/1)}).
list_username(_Bindings, Params) ->
- MatchSpec = ets:fun2ms(
- fun({emqx_acl, {{username, Username}, Topic}, Action, Access, CreatedAt}) -> {{username, Username}, Topic, Action,Access, CreatedAt} end),
- return({ok, emqx_auth_mnesia_api:paginate(emqx_acl, MatchSpec, Params, fun emqx_acl_mnesia_cli:comparing/2, fun format/1)}).
+ Table = emqx_acl_mnesia_db:login_acl_table(username),
+ return({ok, emqx_auth_mnesia_api:paginate_qh(Table, count(Table), Params, fun emqx_acl_mnesia_db:comparing/2, fun format/1)}).
list_all(_Bindings, Params) ->
- MatchSpec = ets:fun2ms(
- fun({emqx_acl, {all, Topic}, Action, Access, CreatedAt}) -> {all, Topic, Action,Access, CreatedAt}end
- ),
- return({ok, emqx_auth_mnesia_api:paginate(emqx_acl, MatchSpec, Params, fun emqx_acl_mnesia_cli:comparing/2, fun format/1)}).
+ Table = emqx_acl_mnesia_db:login_acl_table(all),
+ return({ok, emqx_auth_mnesia_api:paginate_qh(Table, count(Table), Params, fun emqx_acl_mnesia_db:comparing/2, fun format/1)}).
lookup(#{clientid := Clientid}, _Params) ->
- return({ok, format(emqx_acl_mnesia_cli:lookup_acl({clientid, urldecode(Clientid)}))});
+ return({ok, format(emqx_acl_mnesia_db:lookup_acl({clientid, urldecode(Clientid)}))});
lookup(#{username := Username}, _Params) ->
- return({ok, format(emqx_acl_mnesia_cli:lookup_acl({username, urldecode(Username)}))}).
+ return({ok, format(emqx_acl_mnesia_db:lookup_acl({username, urldecode(Username)}))}).
add(_Bindings, Params) ->
[ P | _] = Params,
@@ -152,7 +146,7 @@ do_add(Params) ->
Access = get_value(<<"access">>, Params),
Re = case validate([login, topic, action, access], [Login, Topic, Action, Access]) of
ok ->
- emqx_acl_mnesia_cli:add_acl(Login, Topic, erlang:binary_to_atom(Action, utf8), erlang:binary_to_atom(Access, utf8));
+ emqx_acl_mnesia_db:add_acl(Login, Topic, erlang:binary_to_atom(Action, utf8), erlang:binary_to_atom(Access, utf8));
Err -> Err
end,
maps:merge(#{topic => Topic,
@@ -165,15 +159,19 @@ do_add(Params) ->
end).
delete(#{clientid := Clientid, topic := Topic}, _) ->
- return(emqx_acl_mnesia_cli:remove_acl({clientid, urldecode(Clientid)}, urldecode(Topic)));
+ return(emqx_acl_mnesia_db:remove_acl({clientid, urldecode(Clientid)}, urldecode(Topic)));
delete(#{username := Username, topic := Topic}, _) ->
- return(emqx_acl_mnesia_cli:remove_acl({username, urldecode(Username)}, urldecode(Topic)));
+ return(emqx_acl_mnesia_db:remove_acl({username, urldecode(Username)}, urldecode(Topic)));
delete(#{topic := Topic}, _) ->
- return(emqx_acl_mnesia_cli:remove_acl(all, urldecode(Topic))).
+ return(emqx_acl_mnesia_db:remove_acl(all, urldecode(Topic))).
%%------------------------------------------------------------------------------
%% Interval Funcs
%%------------------------------------------------------------------------------
+
+count(QH) ->
+ qlc:fold(fun(_, Count) -> Count + 1 end, 0, QH).
+
format({{clientid, Clientid}, Topic, Action, Access, _CreatedAt}) ->
#{clientid => Clientid, topic => Topic, action => Action, access => Access};
format({{username, Username}, Topic, Action, Access, _CreatedAt}) ->
diff --git a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_cli.erl b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_cli.erl
index 302a81637..145f0ede8 100644
--- a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_cli.erl
+++ b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_cli.erl
@@ -16,110 +16,28 @@
-module(emqx_acl_mnesia_cli).
--include("emqx_auth_mnesia.hrl").
--include_lib("emqx/include/logger.hrl").
--include_lib("stdlib/include/ms_transform.hrl").
--define(TABLE, emqx_acl).
-
-%% Acl APIs
--export([ add_acl/4
- , lookup_acl/1
- , all_acls/0
- , all_acls/1
- , remove_acl/2
- ]).
-
-export([cli/1]).
--export([comparing/2]).
-%%--------------------------------------------------------------------
-%% Acl API
-%%--------------------------------------------------------------------
-
-%% @doc Add Acls
--spec(add_acl(login() | all, emqx_topic:topic(), pub | sub | pubsub, allow | deny) ->
- ok | {error, any()}).
-add_acl(Login, Topic, Action, Access) ->
- Filter = {Login, Topic},
- Acl = #?TABLE{
- filter = Filter,
- action = Action,
- access = Access,
- created_at = erlang:system_time(millisecond)
- },
- ret(mnesia:transaction(
- fun() ->
- OldRecords = mnesia:wread({?TABLE, Filter}),
- case Action of
- pubsub ->
- update_permission(pub, Acl, OldRecords),
- update_permission(sub, Acl, OldRecords);
- _ ->
- update_permission(Action, Acl, OldRecords)
- end
- end)).
-
-%% @doc Lookup acl by login
--spec(lookup_acl(login() | all) -> list()).
-lookup_acl(undefined) -> [];
-lookup_acl(Login) ->
- MatchSpec = ets:fun2ms(fun({?TABLE, {Filter, ACLTopic}, Action, Access, CreatedAt})
- when Filter =:= Login ->
- {Filter, ACLTopic, Action, Access, CreatedAt}
- end),
- lists:sort(fun comparing/2, ets:select(?TABLE, MatchSpec)).
-
-%% @doc Remove acl
--spec(remove_acl(login() | all, emqx_topic:topic()) -> ok | {error, any()}).
-remove_acl(Login, Topic) ->
- ret(mnesia:transaction(fun mnesia:delete/1, [{?TABLE, {Login, Topic}}])).
-
-%% @doc All logins
--spec(all_acls() -> list()).
-all_acls() ->
- all_acls(clientid) ++
- all_acls(username) ++
- all_acls(all).
-
-all_acls(clientid) ->
- MatchSpec = ets:fun2ms(
- fun({?TABLE, {{clientid, Clientid}, Topic}, Action, Access, CreatedAt}) ->
- {{clientid, Clientid}, Topic, Action, Access, CreatedAt}
- end),
- lists:sort(fun comparing/2, ets:select(?TABLE, MatchSpec));
-all_acls(username) ->
- MatchSpec = ets:fun2ms(
- fun({?TABLE, {{username, Username}, Topic}, Action, Access, CreatedAt}) ->
- {{username, Username}, Topic, Action, Access, CreatedAt}
- end),
- lists:sort(fun comparing/2, ets:select(?TABLE, MatchSpec));
-all_acls(all) ->
- MatchSpec = ets:fun2ms(
- fun({?TABLE, {all, Topic}, Action, Access, CreatedAt}) ->
- {all, Topic, Action, Access, CreatedAt}
- end
- ),
- lists:sort(fun comparing/2, ets:select(?TABLE, MatchSpec)).
%%--------------------------------------------------------------------
%% ACL Cli
%%--------------------------------------------------------------------
cli(["list"]) ->
- [print_acl(Acl) || Acl <- all_acls()];
+ [print_acl(Acl) || Acl <- emqx_acl_mnesia_db:all_acls()];
cli(["list", "clientid"]) ->
- [print_acl(Acl) || Acl <- all_acls(clientid)];
+ [print_acl(Acl) || Acl <- emqx_acl_mnesia_db:all_acls(clientid)];
cli(["list", "username"]) ->
- [print_acl(Acl) || Acl <- all_acls(username)];
+ [print_acl(Acl) || Acl <- emqx_acl_mnesia_db:all_acls(username)];
cli(["list", "_all"]) ->
- [print_acl(Acl) || Acl <- all_acls(all)];
+ [print_acl(Acl) || Acl <- emqx_acl_mnesia_db:all_acls(all)];
cli(["add", "clientid", Clientid, Topic, Action, Access]) ->
case validate(action, Action) andalso validate(access, Access) of
true ->
- case add_acl(
+ case emqx_acl_mnesia_db:add_acl(
{clientid, iolist_to_binary(Clientid)},
iolist_to_binary(Topic),
list_to_existing_atom(Action),
@@ -135,7 +53,7 @@ cli(["add", "clientid", Clientid, Topic, Action, Access]) ->
cli(["add", "username", Username, Topic, Action, Access]) ->
case validate(action, Action) andalso validate(access, Access) of
true ->
- case add_acl(
+ case emqx_acl_mnesia_db:add_acl(
{username, iolist_to_binary(Username)},
iolist_to_binary(Topic),
list_to_existing_atom(Action),
@@ -151,7 +69,7 @@ cli(["add", "username", Username, Topic, Action, Access]) ->
cli(["add", "_all", Topic, Action, Access]) ->
case validate(action, Action) andalso validate(access, Access) of
true ->
- case add_acl(
+ case emqx_acl_mnesia_db:add_acl(
all,
iolist_to_binary(Topic),
list_to_existing_atom(Action),
@@ -165,16 +83,16 @@ cli(["add", "_all", Topic, Action, Access]) ->
end;
cli(["show", "clientid", Clientid]) ->
- [print_acl(Acl) || Acl <- lookup_acl({clientid, iolist_to_binary(Clientid)})];
+ [print_acl(Acl) || Acl <- emqx_acl_mnesia_db:lookup_acl({clientid, iolist_to_binary(Clientid)})];
cli(["show", "username", Username]) ->
- [print_acl(Acl) || Acl <- lookup_acl({username, iolist_to_binary(Username)})];
+ [print_acl(Acl) || Acl <- emqx_acl_mnesia_db:lookup_acl({username, iolist_to_binary(Username)})];
cli(["del", "clientid", Clientid, Topic])->
cli(["delete", "clientid", Clientid, Topic]);
cli(["delete", "clientid", Clientid, Topic])->
- case remove_acl({clientid, iolist_to_binary(Clientid)}, iolist_to_binary(Topic)) of
+ case emqx_acl_mnesia_db:remove_acl({clientid, iolist_to_binary(Clientid)}, iolist_to_binary(Topic)) of
ok -> emqx_ctl:print("ok~n");
{error, Reason} -> emqx_ctl:print("Error: ~p~n", [Reason])
end;
@@ -183,7 +101,7 @@ cli(["del", "username", Username, Topic])->
cli(["delete", "username", Username, Topic]);
cli(["delete", "username", Username, Topic])->
- case remove_acl({username, iolist_to_binary(Username)}, iolist_to_binary(Topic)) of
+ case emqx_acl_mnesia_db:remove_acl({username, iolist_to_binary(Username)}, iolist_to_binary(Topic)) of
ok -> emqx_ctl:print("ok~n");
{error, Reason} -> emqx_ctl:print("Error: ~p~n", [Reason])
end;
@@ -192,7 +110,7 @@ cli(["del", "_all", Topic])->
cli(["delete", "_all", Topic]);
cli(["delete", "_all", Topic])->
- case remove_acl(all, iolist_to_binary(Topic)) of
+ case emqx_acl_mnesia_db:remove_acl(all, iolist_to_binary(Topic)) of
ok -> emqx_ctl:print("ok~n");
{error, Reason} -> emqx_ctl:print("Error: ~p~n", [Reason])
end;
@@ -215,13 +133,6 @@ cli(_) ->
%% Internal functions
%%--------------------------------------------------------------------
-comparing({_, _, _, _, CreatedAt1},
- {_, _, _, _, CreatedAt2}) ->
- CreatedAt1 >= CreatedAt2.
-
-ret({atomic, ok}) -> ok;
-ret({aborted, Error}) -> {error, Error}.
-
validate(action, "pub") -> true;
validate(action, "sub") -> true;
validate(action, "pubsub") -> true;
@@ -244,27 +155,3 @@ print_acl({all, Topic, Action, Access, _}) ->
"Acl($all topic = ~p action = ~p access = ~p)~n",
[Topic, Action, Access]
).
-
-update_permission(Action, Acl0, OldRecords) ->
- Acl = Acl0 #?TABLE{action = Action},
- maybe_delete_shadowed_records(Action, OldRecords),
- mnesia:write(Acl).
-
-maybe_delete_shadowed_records(_, []) ->
- ok;
-maybe_delete_shadowed_records(Action1, [Rec = #emqx_acl{action = Action2} | Rest]) ->
- if Action1 =:= Action2 ->
- ok = mnesia:delete_object(Rec);
- Action2 =:= pubsub ->
- %% Perform migration from the old data format on the
- %% fly. This is needed only for the enterprise version,
- %% delete this branch on 5.0
- mnesia:delete_object(Rec),
- mnesia:write(Rec#?TABLE{action = other_action(Action1)});
- true ->
- ok
- end,
- maybe_delete_shadowed_records(Action1, Rest).
-
-other_action(pub) -> sub;
-other_action(sub) -> pub.
diff --git a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_db.erl b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_db.erl
new file mode 100644
index 000000000..b483e59df
--- /dev/null
+++ b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_db.erl
@@ -0,0 +1,339 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%--------------------------------------------------------------------
+
+-module(emqx_acl_mnesia_db).
+
+-include("emqx_auth_mnesia.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+
+%% ACL APIs
+-export([ create_table/0
+ , create_table2/0
+ ]).
+
+-export([ add_acl/4
+ , lookup_acl/1
+ , all_acls_export/0
+ , all_acls/0
+ , all_acls/1
+ , remove_acl/2
+ , merge_acl_records/3
+ , login_acl_table/1
+ , is_migration_started/0
+ ]).
+
+-export([comparing/2]).
+
+%%--------------------------------------------------------------------
+%% ACL API
+%%--------------------------------------------------------------------
+
+%% @doc Create table `emqx_acl` of old format rules
+-spec(create_table() -> ok).
+create_table() ->
+ ok = ekka_mnesia:create_table(?ACL_TABLE, [
+ {type, bag},
+ {disc_copies, [node()]},
+ {attributes, record_info(fields, ?ACL_TABLE)},
+ {storage_properties, [{ets, [{read_concurrency, true}]}]}]),
+ ok = ekka_mnesia:copy_table(?ACL_TABLE, disc_copies).
+
+%% @doc Create table `emqx_acl2` of new format rules
+-spec(create_table2() -> ok).
+create_table2() ->
+ ok = ekka_mnesia:create_table(?ACL_TABLE2, [
+ {type, ordered_set},
+ {disc_copies, [node()]},
+ {attributes, record_info(fields, ?ACL_TABLE2)},
+ {storage_properties, [{ets, [{read_concurrency, true}]}]}]),
+ ok = ekka_mnesia:copy_table(?ACL_TABLE2, disc_copies).
+
+%% @doc Add Acls
+-spec(add_acl(acl_target(), emqx_topic:topic(), legacy_action(), access()) ->
+ ok | {error, any()}).
+add_acl(Login, Topic, Action, Access) ->
+ ret(mnesia:transaction(fun() ->
+ case is_migration_started() of
+ true -> add_acl_new(Login, Topic, Action, Access);
+ false -> add_acl_old(Login, Topic, Action, Access)
+ end
+ end)).
+
+%% @doc Lookup acl by login
+-spec(lookup_acl(acl_target()) -> list(acl_record())).
+lookup_acl(undefined) -> [];
+lookup_acl(Login) ->
+ % After migration to ?ACL_TABLE2, ?ACL_TABLE never has any rules. This lookup should be removed later.
+ MatchSpec = ets:fun2ms(fun(#?ACL_TABLE{filter = {Filter, _}} = Rec)
+ when Filter =:= Login -> Rec
+ end),
+ OldRecs = ets:select(?ACL_TABLE, MatchSpec),
+
+ NewAcls = ets:lookup(?ACL_TABLE2, Login),
+ MergedAcl = merge_acl_records(Login, OldRecs, NewAcls),
+ lists:sort(fun comparing/2, acl_to_list(MergedAcl)).
+
+%% @doc Remove ACL
+-spec remove_acl(acl_target(), emqx_topic:topic()) -> ok | {error, any()}.
+remove_acl(Login, Topic) ->
+ ret(mnesia:transaction(fun() ->
+ mnesia:delete({?ACL_TABLE, {Login, Topic}}),
+ case mnesia:wread({?ACL_TABLE2, Login}) of
+ [] -> ok;
+ [#?ACL_TABLE2{rules = Rules} = Acl] ->
+ case delete_topic_rules(Topic, Rules) of
+ [] -> mnesia:delete({?ACL_TABLE2, Login});
+ [_ | _] = RemainingRules ->
+ mnesia:write(Acl#?ACL_TABLE2{rules = RemainingRules})
+ end
+ end
+ end)).
+
+%% @doc All ACL rules
+-spec(all_acls() -> list(acl_record())).
+all_acls() ->
+ all_acls(username) ++
+ all_acls(clientid) ++
+ all_acls(all).
+
+%% @doc All ACL rules of specified type
+-spec(all_acls(acl_target_type()) -> list(acl_record())).
+all_acls(AclTargetType) ->
+ lists:sort(fun comparing/2, qlc:eval(login_acl_table(AclTargetType))).
+
+%% @doc All ACL rules fetched transactionally
+-spec(all_acls_export() -> list(acl_record())).
+all_acls_export() ->
+ AclTargetTypes = [username, clientid, all],
+ MatchSpecNew = lists:flatmap(fun login_match_spec_new/1, AclTargetTypes),
+ MatchSpecOld = lists:flatmap(fun login_match_spec_old/1, AclTargetTypes),
+
+ {atomic, Records} = mnesia:transaction(
+ fun() ->
+ QH = acl_table(MatchSpecNew, MatchSpecOld, fun mnesia:table/2, fun lookup_mnesia/2),
+ qlc:eval(QH)
+ end),
+ Records.
+
+%% @doc QLC table of logins matching spec
+-spec(login_acl_table(acl_target_type()) -> qlc:query_handle()).
+login_acl_table(AclTargetType) ->
+ MatchSpecNew = login_match_spec_new(AclTargetType),
+ MatchSpecOld = login_match_spec_old(AclTargetType),
+ acl_table(MatchSpecNew, MatchSpecOld, fun ets:table/2, fun lookup_ets/2).
+
+%% @doc Combine old `emqx_acl` ACL records with a new `emqx_acl2` ACL record for a given login
+-spec(merge_acl_records(acl_target(), [#?ACL_TABLE{}], [#?ACL_TABLE2{}]) -> #?ACL_TABLE2{}).
+merge_acl_records(Login, OldRecs, Acls) ->
+ OldRules = old_recs_to_rules(OldRecs),
+ NewRules = case Acls of
+ [] -> [];
+ [#?ACL_TABLE2{rules = Rules}] -> Rules
+ end,
+ #?ACL_TABLE2{who = Login, rules = merge_rules(NewRules, OldRules)}.
+
+%% @doc Checks if background migration of ACL rules from `emqx_acl` to `emqx_acl2` format started.
+%% Should be run in transaction
+-spec(is_migration_started() -> boolean()).
+is_migration_started() ->
+ case mnesia:read({?ACL_TABLE, ?MIGRATION_MARK_KEY}) of
+ [?MIGRATION_MARK_RECORD | _] -> true;
+ [] -> false
+ end.
+
+%%--------------------------------------------------------------------
+%% Internal functions
+%%--------------------------------------------------------------------
+
+add_acl_new(Login, Topic, Action, Access) ->
+ Rule = {Access, Action, Topic, erlang:system_time(millisecond)},
+ Rules = normalize_rule(Rule),
+ OldAcl = mnesia:wread({?ACL_TABLE2, Login}),
+ NewAcl = case OldAcl of
+ [#?ACL_TABLE2{rules = OldRules} = Acl] ->
+ Acl#?ACL_TABLE2{rules = merge_rules(Rules, OldRules)};
+ [] ->
+ #?ACL_TABLE2{who = Login, rules = Rules}
+ end,
+ mnesia:write(NewAcl).
+
+add_acl_old(Login, Topic, Action, Access) ->
+ Filter = {Login, Topic},
+ Acl = #?ACL_TABLE{
+ filter = Filter,
+ action = Action,
+ access = Access,
+ created_at = erlang:system_time(millisecond)
+ },
+ OldRecords = mnesia:wread({?ACL_TABLE, Filter}),
+ case Action of
+ pubsub ->
+ update_permission(pub, Acl, OldRecords),
+ update_permission(sub, Acl, OldRecords);
+ _ ->
+ update_permission(Action, Acl, OldRecords)
+ end.
+
+old_recs_to_rules(OldRecs) ->
+ lists:flatmap(fun old_rec_to_rules/1, OldRecs).
+
+old_rec_to_rules(#?ACL_TABLE{filter = {_, Topic}, action = Action, access = Access, created_at = CreatedAt}) ->
+ normalize_rule({Access, Action, Topic, CreatedAt}).
+
+normalize_rule({Access, pubsub, Topic, CreatedAt}) ->
+ [{Access, pub, Topic, CreatedAt}, {Access, sub, Topic, CreatedAt}];
+normalize_rule({Access, Action, Topic, CreatedAt}) ->
+ [{Access, Action, Topic, CreatedAt}].
+
+merge_rules([], OldRules) -> OldRules;
+merge_rules([NewRule | RestNewRules], OldRules) ->
+ merge_rules(RestNewRules, merge_rule(NewRule, OldRules)).
+
+merge_rule({_, Action, Topic, _ } = NewRule, OldRules) ->
+ [NewRule | lists:filter(
+ fun({_, OldAction, OldTopic, _}) ->
+ {Action, Topic} =/= {OldAction, OldTopic}
+ end, OldRules)].
+
+acl_to_list(#?ACL_TABLE2{who = Login, rules = Rules}) ->
+ [{Login, Topic, Action, Access, CreatedAt} || {Access, Action, Topic, CreatedAt} <- Rules].
+
+delete_topic_rules(Topic, Rules) ->
+ [Rule || {_, _, T, _} = Rule <- Rules, T =/= Topic].
+
+comparing({_, _, _, _, CreatedAt} = Rec1,
+ {_, _, _, _, CreatedAt} = Rec2) ->
+ Rec1 >= Rec2;
+
+comparing({_, _, _, _, CreatedAt1},
+ {_, _, _, _, CreatedAt2}) ->
+ CreatedAt1 >= CreatedAt2.
+
+login_match_spec_old(all) ->
+ ets:fun2ms(fun(#?ACL_TABLE{filter = {all, _}} = Record) ->
+ Record
+ end);
+
+login_match_spec_old(Type) when (Type =:= username) or (Type =:= clientid) ->
+ ets:fun2ms(fun(#?ACL_TABLE{filter = {{RecordType, _}, _}} = Record)
+ when RecordType =:= Type -> Record
+ end).
+
+login_match_spec_new(all) ->
+ ets:fun2ms(fun(#?ACL_TABLE2{who = all} = Record) ->
+ Record
+ end);
+
+login_match_spec_new(Type) when (Type =:= username) or (Type =:= clientid) ->
+ ets:fun2ms(fun(#?ACL_TABLE2{who = {RecordType, _}} = Record)
+ when RecordType =:= Type -> Record
+ end).
+
+acl_table(MatchSpecNew, MatchSpecOld, TableFun, LookupFun) ->
+ TraverseFun =
+ fun() ->
+ CursorNew =
+ qlc:cursor(
+ TableFun(?ACL_TABLE2, [{traverse, {select, MatchSpecNew}}])),
+ CursorOld =
+ qlc:cursor(
+ TableFun(?ACL_TABLE, [{traverse, {select, MatchSpecOld}}])),
+ traverse_new(CursorNew, CursorOld, #{}, LookupFun)
+ end,
+
+ qlc:table(TraverseFun, []).
+
+
+% These are traverse funs for qlc table created by `acl_table/4`.
+% Traversing consumes memory: it collects logins present in `?ACL_TABLE` and
+% at the same time having rules in `?ACL_TABLE2`.
+% Such records appear if ACLs are inserted before migration started.
+% After migration, number of such logins is zero, so traversing starts working in
+% constant memory.
+
+traverse_new(CursorNew, CursorOld, FoundKeys, LookupFun) ->
+ Acls = qlc:next_answers(CursorNew, 1),
+ case Acls of
+ [] ->
+ qlc:delete_cursor(CursorNew),
+ traverse_old(CursorOld, FoundKeys);
+ [#?ACL_TABLE2{who = Login, rules = Rules} = Acl] ->
+ Keys = lists:usort([{Login, Topic} || {_, _, Topic, _} <- Rules]),
+ OldRecs = lists:flatmap(fun(Key) -> LookupFun(?ACL_TABLE, Key) end, Keys),
+ MergedAcl = merge_acl_records(Login, OldRecs, [Acl]),
+ NewFoundKeys =
+ lists:foldl(fun(#?ACL_TABLE{filter = Key}, Found) -> maps:put(Key, true, Found) end,
+ FoundKeys,
+ OldRecs),
+ case acl_to_list(MergedAcl) of
+ [] ->
+ traverse_new(CursorNew, CursorOld, NewFoundKeys, LookupFun);
+ List ->
+ List ++ fun() -> traverse_new(CursorNew, CursorOld, NewFoundKeys, LookupFun) end
+ end
+ end.
+
+traverse_old(CursorOld, FoundKeys) ->
+ OldAcls = qlc:next_answers(CursorOld),
+ case OldAcls of
+ [] ->
+ qlc:delete_cursor(CursorOld),
+ [];
+ _ ->
+ Records = [ {Login, Topic, Action, Access, CreatedAt}
+ || #?ACL_TABLE{filter = {Login, Topic}, action = LegacyAction, access = Access, created_at = CreatedAt} <- OldAcls,
+ {_, Action, _, _} <- normalize_rule({Access, LegacyAction, Topic, CreatedAt}),
+ not maps:is_key({Login, Topic}, FoundKeys)
+ ],
+ case Records of
+ [] -> traverse_old(CursorOld, FoundKeys);
+ List -> List ++ fun() -> traverse_old(CursorOld, FoundKeys) end
+ end
+ end.
+
+lookup_mnesia(Tab, Key) ->
+ mnesia:read({Tab, Key}).
+
+lookup_ets(Tab, Key) ->
+ ets:lookup(Tab, Key).
+
+update_permission(Action, Acl0, OldRecords) ->
+ Acl = Acl0 #?ACL_TABLE{action = Action},
+ maybe_delete_shadowed_records(Action, OldRecords),
+ mnesia:write(Acl).
+
+maybe_delete_shadowed_records(_, []) ->
+ ok;
+maybe_delete_shadowed_records(Action1, [Rec = #emqx_acl{action = Action2} | Rest]) ->
+ if Action1 =:= Action2 ->
+ ok = mnesia:delete_object(Rec);
+ Action2 =:= pubsub ->
+ %% Perform migration from the old data format on the
+ %% fly. This is needed only for the enterprise version,
+ %% delete this branch on 5.0
+ mnesia:delete_object(Rec),
+ mnesia:write(Rec#?ACL_TABLE{action = other_action(Action1)});
+ true ->
+ ok
+ end,
+ maybe_delete_shadowed_records(Action1, Rest).
+
+other_action(pub) -> sub;
+other_action(sub) -> pub.
+
+ret({atomic, ok}) -> ok;
+ret({aborted, Error}) -> {error, Error}.
diff --git a/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_migrator.erl b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_migrator.erl
new file mode 100644
index 000000000..864f00884
--- /dev/null
+++ b/apps/emqx_auth_mnesia/src/emqx_acl_mnesia_migrator.erl
@@ -0,0 +1,215 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%--------------------------------------------------------------------
+
+-module(emqx_acl_mnesia_migrator).
+
+-include("emqx_auth_mnesia.hrl").
+-include_lib("emqx/include/logger.hrl").
+-include_lib("snabbkaffe/include/snabbkaffe.hrl").
+
+-behaviour(gen_statem).
+
+-define(CHECK_ALL_NODES_INTERVAL, 60000).
+
+-type(migration_delay_reason() :: old_nodes | bad_nodes).
+
+-export([
+ callback_mode/0,
+ init/1
+]).
+
+-export([
+ waiting_all_nodes/3,
+ checking_old_table/3,
+ migrating/3
+]).
+
+-export([
+ start_link/0,
+ start_link/1,
+ start_supervised/0,
+ stop_supervised/0,
+ migrate_records/0,
+ is_migrating_on_node/1,
+ is_old_table_migrated/0
+]).
+
+%%--------------------------------------------------------------------
+%% External interface
+%%--------------------------------------------------------------------
+
+start_link() ->
+ start_link(?MODULE).
+
+start_link(Name) when is_atom(Name) ->
+ start_link(#{
+ name => Name
+ });
+
+start_link(#{name := Name} = Opts) ->
+ gen_statem:start_link({local, Name}, ?MODULE, Opts, []).
+
+start_supervised() ->
+ try
+ {ok, _} = supervisor:restart_child(emqx_auth_mnesia_sup, ?MODULE),
+ ok
+ catch
+ exit:{noproc, _} -> ok
+ end.
+
+stop_supervised() ->
+ try
+ ok = supervisor:terminate_child(emqx_auth_mnesia_sup, ?MODULE),
+ ok = supervisor:delete_child(emqx_auth_mnesia_sup, ?MODULE)
+ catch
+ exit:{noproc, _} -> ok
+ end.
+
+%%--------------------------------------------------------------------
+%% gen_statem callbacks
+%%--------------------------------------------------------------------
+
+callback_mode() -> state_functions.
+
+init(Opts) ->
+ ok = emqx_acl_mnesia_db:create_table(),
+ ok = emqx_acl_mnesia_db:create_table2(),
+ Name = maps:get(name, Opts, ?MODULE),
+ CheckNodesInterval = maps:get(check_nodes_interval, Opts, ?CHECK_ALL_NODES_INTERVAL),
+ GetNodes = maps:get(get_nodes, Opts, fun all_nodes/0),
+ Data =
+ #{name => Name,
+ check_nodes_interval => CheckNodesInterval,
+ get_nodes => GetNodes},
+ {ok, waiting_all_nodes, Data, [{state_timeout, 0, check_nodes}]}.
+
+%%--------------------------------------------------------------------
+%% state callbacks
+%%--------------------------------------------------------------------
+
+waiting_all_nodes(state_timeout, check_nodes, Data) ->
+ #{name := Name, check_nodes_interval := CheckNodesInterval, get_nodes := GetNodes} = Data,
+ case is_all_nodes_migrating(Name, GetNodes()) of
+ true ->
+ ?tp(info, emqx_acl_mnesia_migrator_check_old_table, #{}),
+ {next_state, checking_old_table, Data, [{next_event, internal, check_old_table}]};
+ {false, Reason, Nodes} ->
+ ?tp(info,
+ emqx_acl_mnesia_migrator_bad_nodes_delay,
+ #{delay => CheckNodesInterval,
+ reason => Reason,
+ name => Name,
+ nodes => Nodes}),
+ {keep_state_and_data, [{state_timeout, CheckNodesInterval, check_nodes}]}
+ end.
+
+checking_old_table(internal, check_old_table, Data) ->
+ case is_old_table_migrated() of
+ true ->
+ ?tp(info, emqx_acl_mnesia_migrator_finish, #{}),
+ {next_state, finished, Data, [{hibernate, true}]};
+ false ->
+ ?tp(info, emqx_acl_mnesia_migrator_start_migration, #{}),
+ {next_state, migrating, Data, [{next_event, internal, start_migration}]}
+ end.
+
+migrating(internal, start_migration, Data) ->
+ ok = migrate_records(),
+ {next_state, checking_old_table, Data, [{next_event, internal, check_old_table}]}.
+
+%% @doc Returns `true` if migration is started in the local node, otherwise crash.
+-spec(is_migrating_on_node(atom()) -> true).
+is_migrating_on_node(Name) ->
+ true = is_pid(erlang:whereis(Name)).
+
+%% @doc Run migration of records
+-spec(migrate_records() -> ok).
+migrate_records() ->
+ ok = add_migration_mark(),
+ Key = peek_record(),
+ do_migrate_records(Key).
+
+%% @doc Run migration of records
+-spec(is_all_nodes_migrating(atom(), list(node())) -> true | {false, migration_delay_reason(), list(node())}).
+is_all_nodes_migrating(Name, Nodes) ->
+ case rpc:multicall(Nodes, ?MODULE, is_migrating_on_node, [Name]) of
+ {Results, []} ->
+ OldNodes = [ Node || {Node, Result} <- lists:zip(Nodes, Results), Result =/= true ],
+ case OldNodes of
+ [] -> true;
+ _ -> {false, old_nodes, OldNodes}
+ end;
+ {_, [_BadNode | _] = BadNodes} ->
+ {false, bad_nodes, BadNodes}
+ end.
+
+%%--------------------------------------------------------------------
+%% Internal functions
+%%--------------------------------------------------------------------
+
+all_nodes() ->
+ ekka_mnesia:cluster_nodes(all).
+
+is_old_table_migrated() ->
+ Result =
+ mnesia:transaction(fun() ->
+ case mnesia:first(?ACL_TABLE) of
+ ?MIGRATION_MARK_KEY ->
+ case mnesia:next(?ACL_TABLE, ?MIGRATION_MARK_KEY) of
+ '$end_of_table' -> true;
+ _OtherKey -> false
+ end;
+ '$end_of_table' -> false;
+ _OtherKey -> false
+ end
+ end),
+ case Result of
+ {atomic, true} ->
+ true;
+ _ ->
+ false
+ end.
+
+add_migration_mark() ->
+ {atomic, ok} = mnesia:transaction(fun() -> mnesia:write(?MIGRATION_MARK_RECORD) end),
+ ok.
+
+peek_record() ->
+ Key = mnesia:dirty_first(?ACL_TABLE),
+ case Key of
+ ?MIGRATION_MARK_KEY ->
+ mnesia:dirty_next(?ACL_TABLE, Key);
+ _ -> Key
+ end.
+
+do_migrate_records('$end_of_table') -> ok;
+do_migrate_records({_Login, _Topic} = Key) ->
+ ?tp(emqx_acl_mnesia_migrator_record_selected, #{key => Key}),
+ _ = mnesia:transaction(fun migrate_one_record/1, [Key]),
+ do_migrate_records(peek_record()).
+
+migrate_one_record({Login, _Topic} = Key) ->
+ case mnesia:wread({?ACL_TABLE, Key}) of
+ [] ->
+ ?tp(emqx_acl_mnesia_migrator_record_missed, #{key => Key}),
+ record_missing;
+ OldRecs ->
+ Acls = mnesia:wread({?ACL_TABLE2, Login}),
+ UpdatedAcl = emqx_acl_mnesia_db:merge_acl_records(Login, OldRecs, Acls),
+ ok = mnesia:write(UpdatedAcl),
+ ok = mnesia:delete({?ACL_TABLE, Key}),
+ ?tp(emqx_acl_mnesia_migrator_record_migrated, #{key => Key})
+ end.
diff --git a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src
index e61a22f0a..b15c7fdd3 100644
--- a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src
+++ b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src
@@ -1,6 +1,6 @@
{application, emqx_auth_mnesia,
[{description, "EMQ X Authentication with Mnesia"},
- {vsn, "4.3.3"}, % strict semver, bump manually
+ {vsn, "4.3.4"}, % strict semver, bump manually
{modules, []},
{registered, []},
{applications, [kernel,stdlib,mnesia]},
diff --git a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.appup.src b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.appup.src
index abe359eef..82df99b3a 100644
--- a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.appup.src
+++ b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.appup.src
@@ -1,22 +1,31 @@
%% -*- mode: erlang -*-
{VSN,
- [{"4.3.2",
- [{load_module,emqx_acl_mnesia_api,brutal_purge,soft_purge,[]},
- {load_module,emqx_auth_mnesia_api,brutal_purge,soft_purge,[]}]},
- {"4.3.1",
- [{load_module,emqx_acl_mnesia_api,brutal_purge,soft_purge,[]},
- {load_module,emqx_auth_mnesia_api,brutal_purge,soft_purge,[]}]},
- {"4.3.0",
- [{load_module,emqx_acl_mnesia_api,brutal_purge,soft_purge,[]},
- {load_module,emqx_auth_mnesia_api,brutal_purge,soft_purge,[]}]},
- {<<".*">>,[]}],
- [{"4.3.2",
- [{load_module,emqx_acl_mnesia_api,brutal_purge,soft_purge,[]},
- {load_module,emqx_auth_mnesia_api,brutal_purge,soft_purge,[]}]},
- {"4.3.1",
- [{load_module,emqx_acl_mnesia_api,brutal_purge,soft_purge,[]},
- {load_module,emqx_auth_mnesia_api,brutal_purge,soft_purge,[]}]},
- {"4.3.0",
- [{load_module,emqx_acl_mnesia_api,brutal_purge,soft_purge,[]},
- {load_module,emqx_auth_mnesia_api,brutal_purge,soft_purge,[]}]},
- {<<".*">>,[]}]}.
+ [
+ {<<"4.3.[0-3]">>, [
+ {add_module,emqx_acl_mnesia_db},
+ {add_module,emqx_acl_mnesia_migrator, [emqx_acl_mnesia_db]},
+ {update, emqx_auth_mnesia_sup, supervisor},
+ {apply, {emqx_acl_mnesia_migrator, start_supervised, []}},
+ {load_module,emqx_auth_mnesia_api, brutal_purge,soft_purge,[]},
+ {load_module,emqx_acl_mnesia, brutal_purge,soft_purge,[]},
+ {load_module,emqx_acl_mnesia_api, brutal_purge,soft_purge,[]},
+ {load_module,emqx_acl_mnesia_cli, brutal_purge,soft_purge,[]}
+ ]},
+ {<<".*">>, [
+ ]}
+ ],
+ [
+ {<<"4.3.[0-3]">>, [
+ {apply, {emqx_acl_mnesia_migrator, stop_supervised, []}},
+ {update, emqx_auth_mnesia_sup, supervisor},
+ {load_module,emqx_acl_mnesia_cli, brutal_purge,soft_purge,[]},
+ {load_module,emqx_acl_mnesia_api, brutal_purge,soft_purge,[]},
+ {load_module,emqx_auth_mnesia_api, brutal_purge,soft_purge,[]},
+ {load_module,emqx_acl_mnesia, brutal_purge,soft_purge,[]},
+ {delete_module,emqx_acl_mnesia_migrator},
+ {delete_module,emqx_acl_mnesia_db}
+ ]},
+ {<<".*">>, [
+ ]}
+ ]
+}.
diff --git a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_api.erl b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_api.erl
index 9d9fff6f6..da24ddd53 100644
--- a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_api.erl
+++ b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_api.erl
@@ -23,7 +23,7 @@
-import(proplists, [get_value/2]).
-import(minirest, [return/1]).
--export([paginate/5]).
+-export([paginate_qh/5]).
-export([ list_clientid/2
, lookup_clientid/2
@@ -212,9 +212,12 @@ delete_username(#{username := Username}, _) ->
%% Paging Query
%%------------------------------------------------------------------------------
-paginate(Tables, MatchSpec, Params, ComparingFun, RowFun) ->
- Qh = query_handle(Tables, MatchSpec),
- Count = count(Tables, MatchSpec),
+paginate(Table, MatchSpec, Params, ComparingFun, RowFun) ->
+ Qh = query_handle(Table, MatchSpec),
+ Count = count(Table, MatchSpec),
+ paginate_qh(Qh, Count, Params, ComparingFun, RowFun).
+
+paginate_qh(Qh, Count, Params, ComparingFun, RowFun) ->
Page = page(Params),
Limit = limit(Params),
Cursor = qlc:cursor(Qh),
@@ -231,24 +234,12 @@ paginate(Tables, MatchSpec, Params, ComparingFun, RowFun) ->
query_handle(Table, MatchSpec) when is_atom(Table) ->
Options = {traverse, {select, MatchSpec}},
- qlc:q([R|| R <- ets:table(Table, Options)]);
-query_handle([Table], MatchSpec) when is_atom(Table) ->
- Options = {traverse, {select, MatchSpec}},
- qlc:q([R|| R <- ets:table(Table, Options)]);
-query_handle(Tables, MatchSpec) ->
- Options = {traverse, {select, MatchSpec}},
- qlc:append([qlc:q([E || E <- ets:table(T, Options)]) || T <- Tables]).
+ qlc:q([R || R <- ets:table(Table, Options)]).
count(Table, MatchSpec) when is_atom(Table) ->
[{MatchPattern, Where, _Re}] = MatchSpec,
NMatchSpec = [{MatchPattern, Where, [true]}],
- ets:select_count(Table, NMatchSpec);
-count([Table], MatchSpec) when is_atom(Table) ->
- [{MatchPattern, Where, _Re}] = MatchSpec,
- NMatchSpec = [{MatchPattern, Where, [true]}],
- ets:select_count(Table, NMatchSpec);
-count(Tables, MatchSpec) ->
- lists:sum([count(T, MatchSpec) || T <- Tables]).
+ ets:select_count(Table, NMatchSpec).
page(Params) ->
binary_to_integer(proplists:get_value(<<"_page">>, Params, <<"1">>)).
diff --git a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_sup.erl b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_sup.erl
index 3784eaaf6..2099eba8c 100644
--- a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_sup.erl
+++ b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_sup.erl
@@ -33,4 +33,16 @@ start_link() ->
%%--------------------------------------------------------------------
init([]) ->
- {ok, {{one_for_one, 10, 100}, []}}.
\ No newline at end of file
+ {ok, {{one_for_one, 10, 100}, [
+ child_spec(emqx_acl_mnesia_migrator, worker, [])
+ ]}}.
+
+child_spec(M, worker, Args) ->
+ #{id => M,
+ start => {M, start_link, Args},
+ restart => permanent,
+ shutdown => 5000,
+ type => worker,
+ modules => [M]
+ }.
+
diff --git a/apps/emqx_auth_mnesia/test/emqx_acl_mnesia_SUITE.erl b/apps/emqx_auth_mnesia/test/emqx_acl_mnesia_SUITE.erl
index 8ace680da..eb1ea74f3 100644
--- a/apps/emqx_auth_mnesia/test/emqx_acl_mnesia_SUITE.erl
+++ b/apps/emqx_auth_mnesia/test/emqx_acl_mnesia_SUITE.erl
@@ -22,6 +22,7 @@
-include("emqx_auth_mnesia.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
+-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-import(emqx_ct_http, [ request_api/3
, request_api/5
@@ -39,10 +40,15 @@ all() ->
emqx_ct:all(?MODULE).
groups() ->
- [].
+ [{async_migration_tests, [sequence], [
+ t_old_and_new_acl_migration_by_migrator,
+ t_old_and_new_acl_migration_repeated_by_migrator,
+ t_migration_concurrency
+ ]}].
init_per_suite(Config) ->
emqx_ct_helpers:start_apps([emqx_modules, emqx_management, emqx_auth_mnesia], fun set_special_configs/1),
+ supervisor:terminate_child(emqx_auth_mnesia_sup, emqx_acl_mnesia_migrator),
create_default_app(),
Config.
@@ -50,14 +56,32 @@ end_per_suite(_Config) ->
delete_default_app(),
emqx_ct_helpers:stop_apps([emqx_modules, emqx_management, emqx_auth_mnesia]).
-init_per_testcase(t_check_acl_as_clientid, Config) ->
+init_per_testcase_clean(_, Config) ->
+ mnesia:clear_table(?ACL_TABLE),
+ mnesia:clear_table(?ACL_TABLE2),
+ Config.
+
+init_per_testcase_emqx_hook(t_check_acl_as_clientid, Config) ->
emqx:hook('client.check_acl', fun emqx_acl_mnesia:check_acl/5, [#{key_as => clientid}]),
Config;
-
-init_per_testcase(_, Config) ->
+init_per_testcase_emqx_hook(_, Config) ->
emqx:hook('client.check_acl', fun emqx_acl_mnesia:check_acl/5, [#{key_as => username}]),
Config.
+init_per_testcase_migration(t_management_before_migration, Config) ->
+ Config;
+init_per_testcase_migration(_, Config) ->
+ emqx_acl_mnesia_migrator:migrate_records(),
+ Config.
+
+init_per_testcase(Case, Config) ->
+ PerTestInitializers = [
+ fun init_per_testcase_clean/2,
+ fun init_per_testcase_migration/2,
+ fun init_per_testcase_emqx_hook/2
+ ],
+ lists:foldl(fun(Init, Conf) -> Init(Case, Conf) end, Config, PerTestInitializers).
+
end_per_testcase(_, Config) ->
emqx:unhook('client.check_acl', fun emqx_acl_mnesia:check_acl/5),
Config.
@@ -76,25 +100,34 @@ set_special_configs(_App) ->
%% Testcases
%%------------------------------------------------------------------------------
-t_management(_Config) ->
- clean_all_acls(),
- ?assertEqual("Acl with Mnesia", emqx_acl_mnesia:description()),
- ?assertEqual([], emqx_acl_mnesia_cli:all_acls()),
+t_management_before_migration(_Config) ->
+ {atomic, IsStarted} = mnesia:transaction(fun emqx_acl_mnesia_db:is_migration_started/0),
+ ?assertNot(IsStarted),
+ run_acl_tests().
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/%c">>, sub, allow),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/+">>, pub, deny),
- ok = emqx_acl_mnesia_cli:add_acl({username, <<"test_username">>}, <<"topic/%u">>, sub, deny),
- ok = emqx_acl_mnesia_cli:add_acl({username, <<"test_username">>}, <<"topic/+">>, pub, allow),
- ok = emqx_acl_mnesia_cli:add_acl(all, <<"#">>, pubsub, deny),
+t_management_after_migration(_Config) ->
+ {atomic, IsStarted} = mnesia:transaction(fun emqx_acl_mnesia_db:is_migration_started/0),
+ ?assert(IsStarted),
+ run_acl_tests().
+
+run_acl_tests() ->
+ ?assertEqual("Acl with Mnesia", emqx_acl_mnesia:description()),
+ ?assertEqual([], emqx_acl_mnesia_db:all_acls()),
+
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/%c">>, sub, allow),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/+">>, pub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({username, <<"test_username">>}, <<"topic/%u">>, sub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({username, <<"test_username">>}, <<"topic/+">>, pub, allow),
+ ok = emqx_acl_mnesia_db:add_acl(all, <<"#">>, pubsub, deny),
%% Sleeps below are needed to hide the race condition between
%% mnesia and ets dirty select in check_acl, that make this test
%% flaky
timer:sleep(100),
- ?assertEqual(2, length(emqx_acl_mnesia_cli:lookup_acl({clientid, <<"test_clientid">>}))),
- ?assertEqual(2, length(emqx_acl_mnesia_cli:lookup_acl({username, <<"test_username">>}))),
- ?assertEqual(2, length(emqx_acl_mnesia_cli:lookup_acl(all))),
- ?assertEqual(6, length(emqx_acl_mnesia_cli:all_acls())),
+ ?assertEqual(2, length(emqx_acl_mnesia_db:lookup_acl({clientid, <<"test_clientid">>}))),
+ ?assertEqual(2, length(emqx_acl_mnesia_db:lookup_acl({username, <<"test_username">>}))),
+ ?assertEqual(2, length(emqx_acl_mnesia_db:lookup_acl(all))),
+ ?assertEqual(6, length(emqx_acl_mnesia_db:all_acls())),
User1 = #{zone => external, clientid => <<"test_clientid">>},
User2 = #{zone => external, clientid => <<"no_exist">>, username => <<"test_username">>},
@@ -110,30 +143,30 @@ t_management(_Config) ->
deny = emqx_access_control:check_acl(User3, publish, <<"topic/A/B">>),
%% Test merging of pubsub capability:
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pubsub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pubsub, deny),
timer:sleep(100),
deny = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
deny = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pub, allow),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pub, allow),
timer:sleep(100),
deny = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
allow = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pubsub, allow),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pubsub, allow),
timer:sleep(100),
allow = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
allow = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, sub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, sub, deny),
timer:sleep(100),
deny = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
allow = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pub, deny),
timer:sleep(100),
deny = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
deny = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
%% Test implicit migration of pubsub to pub and sub:
- ok = emqx_acl_mnesia_cli:remove_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>),
- ok = mnesia:dirty_write(#emqx_acl{
+ ok = emqx_acl_mnesia_db:remove_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>),
+ ok = mnesia:dirty_write(#?ACL_TABLE{
filter = {{clientid, <<"test_clientid">>}, <<"topic/mix">>},
action = pubsub,
access = allow,
@@ -142,24 +175,130 @@ t_management(_Config) ->
timer:sleep(100),
allow = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
allow = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, pub, deny),
timer:sleep(100),
allow = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
deny = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, sub, deny),
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>, sub, deny),
timer:sleep(100),
deny = emqx_access_control:check_acl(User1, subscribe, <<"topic/mix">>),
deny = emqx_access_control:check_acl(User1, publish, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:remove_acl({clientid, <<"test_clientid">>}, <<"topic/%c">>),
- ok = emqx_acl_mnesia_cli:remove_acl({clientid, <<"test_clientid">>}, <<"topic/+">>),
- ok = emqx_acl_mnesia_cli:remove_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>),
- ok = emqx_acl_mnesia_cli:remove_acl({username, <<"test_username">>}, <<"topic/%u">>),
- ok = emqx_acl_mnesia_cli:remove_acl({username, <<"test_username">>}, <<"topic/+">>),
- ok = emqx_acl_mnesia_cli:remove_acl(all, <<"#">>),
+ ok = emqx_acl_mnesia_db:remove_acl({clientid, <<"test_clientid">>}, <<"topic/%c">>),
+ ok = emqx_acl_mnesia_db:remove_acl({clientid, <<"test_clientid">>}, <<"topic/+">>),
+ ok = emqx_acl_mnesia_db:remove_acl({clientid, <<"test_clientid">>}, <<"topic/mix">>),
+ ok = emqx_acl_mnesia_db:remove_acl({username, <<"test_username">>}, <<"topic/%u">>),
+ ok = emqx_acl_mnesia_db:remove_acl({username, <<"test_username">>}, <<"topic/+">>),
+ ok = emqx_acl_mnesia_db:remove_acl(all, <<"#">>),
timer:sleep(100),
- ?assertEqual([], emqx_acl_mnesia_cli:all_acls()).
+ ?assertEqual([], emqx_acl_mnesia_db:all_acls()).
+
+t_old_and_new_acl_combination(_Config) ->
+ create_conflicting_records(),
+
+ ?assertEqual(combined_conflicting_records(), emqx_acl_mnesia_db:all_acls()),
+ ?assertEqual(
+ lists:usort(combined_conflicting_records()),
+ lists:usort(emqx_acl_mnesia_db:all_acls_export())).
+
+t_old_and_new_acl_migration(_Config) ->
+ create_conflicting_records(),
+ emqx_acl_mnesia_migrator:migrate_records(),
+
+ ?assertEqual(combined_conflicting_records(), emqx_acl_mnesia_db:all_acls()),
+ ?assertEqual(
+ lists:usort(combined_conflicting_records()),
+ lists:usort(emqx_acl_mnesia_db:all_acls_export())),
+
+ % check that old table is not popoulated anymore
+ ok = emqx_acl_mnesia_db:add_acl({clientid, <<"test_clientid">>}, <<"topic/%c">>, sub, allow),
+ ?assert(emqx_acl_mnesia_migrator:is_old_table_migrated()).
+
+
+t_migration_concurrency(_Config) ->
+ Key = {{clientid,<<"client6">>}, <<"t">>},
+ Record = #?ACL_TABLE{filter = Key, action = pubsub, access = deny, created_at = 0},
+ {atomic, ok} = mnesia:transaction(fun mnesia:write/1, [Record]),
+
+ LockWaitAndDelete =
+ fun() ->
+ [_Rec] = mnesia:wread({?ACL_TABLE, Key}),
+ {{Pid, Ref}, _} =
+ ?wait_async_action(spawn_monitor(fun emqx_acl_mnesia_migrator:migrate_records/0),
+ #{?snk_kind := emqx_acl_mnesia_migrator_record_selected},
+ 1000),
+ mnesia:delete({?ACL_TABLE, Key}),
+ {Pid, Ref}
+ end,
+
+ ?check_trace(
+ begin
+ {atomic, {Pid, Ref}} = mnesia:transaction(LockWaitAndDelete),
+ receive {'DOWN', Ref, process, Pid, _} -> ok end
+ end,
+ fun(_, Trace) ->
+ ?assertMatch([_], ?of_kind(emqx_acl_mnesia_migrator_record_missed, Trace))
+ end),
+
+ ?assert(emqx_acl_mnesia_migrator:is_old_table_migrated()),
+ ?assertEqual([], emqx_acl_mnesia_db:all_acls()).
+
+
+t_old_and_new_acl_migration_by_migrator(_Config) ->
+ create_conflicting_records(),
+
+ meck:new(fake_nodes, [non_strict]),
+ meck:expect(fake_nodes, all, fun() -> [node(), 'somebadnode@127.0.0.1'] end),
+
+ ?check_trace(
+ begin
+ % check all nodes every 30 ms
+ {ok, _} = emqx_acl_mnesia_migrator:start_link(#{
+ name => ct_migrator,
+ check_nodes_interval => 30,
+ get_nodes => fun fake_nodes:all/0
+ }),
+ timer:sleep(100)
+ end,
+ fun(_, Trace) ->
+ ?assertEqual([], ?of_kind(emqx_acl_mnesia_migrator_start_migration, Trace))
+ end),
+
+ ?check_trace(
+ begin
+ meck:expect(fake_nodes, all, fun() -> [node()] end),
+ timer:sleep(100)
+ end,
+ fun(_, Trace) ->
+ ?assertMatch([_], ?of_kind(emqx_acl_mnesia_migrator_finish, Trace))
+ end),
+
+ meck:unload(fake_nodes),
+
+ ?assertEqual(combined_conflicting_records(), emqx_acl_mnesia_db:all_acls()),
+ ?assert(emqx_acl_mnesia_migrator:is_old_table_migrated()).
+
+t_old_and_new_acl_migration_repeated_by_migrator(_Config) ->
+ create_conflicting_records(),
+ emqx_acl_mnesia_migrator:migrate_records(),
+
+ ?check_trace(
+ begin
+ {ok, _} = emqx_acl_mnesia_migrator:start_link(ct_migrator),
+ timer:sleep(100)
+ end,
+ fun(_, Trace) ->
+ ?assertEqual([], ?of_kind(emqx_acl_mnesia_migrator_start_migration, Trace)),
+ ?assertMatch([_], ?of_kind(emqx_acl_mnesia_migrator_finish, Trace))
+ end).
+
+t_start_stop_supervised(_Config) ->
+ ?assertEqual(undefined, whereis(emqx_acl_mnesia_migrator)),
+ ok = emqx_acl_mnesia_migrator:start_supervised(),
+ ?assert(is_pid(whereis(emqx_acl_mnesia_migrator))),
+ ok = emqx_acl_mnesia_migrator:stop_supervised(),
+ ?assertEqual(undefined, whereis(emqx_acl_mnesia_migrator)).
t_acl_cli(_Config) ->
meck:new(emqx_ctl, [non_strict, passthrough]),
@@ -168,8 +307,6 @@ t_acl_cli(_Config) ->
meck:expect(emqx_ctl, usage, fun(Usages) -> emqx_ctl:format_usage(Usages) end),
meck:expect(emqx_ctl, usage, fun(Cmd, Descr) -> emqx_ctl:format_usage(Cmd, Descr) end),
- clean_all_acls(),
-
?assertEqual(0, length(emqx_acl_mnesia_cli:cli(["list"]))),
emqx_acl_mnesia_cli:cli(["add", "clientid", "test_clientid", "topic/A", "pub", "deny"]),
@@ -202,8 +339,6 @@ t_acl_cli(_Config) ->
meck:unload(emqx_ctl).
t_rest_api(_Config) ->
- clean_all_acls(),
-
Params1 = [#{<<"clientid">> => <<"test_clientid">>,
<<"topic">> => <<"topic/A">>,
<<"action">> => <<"pub">>,
@@ -273,13 +408,24 @@ t_rest_api(_Config) ->
{ok, Res3} = request_http_rest_list(["$all"]),
?assertMatch([], get_http_data(Res3)).
-%%------------------------------------------------------------------------------
-%% Helpers
-%%------------------------------------------------------------------------------
-clean_all_acls() ->
- [ mnesia:dirty_delete({emqx_acl, Login})
- || Login <- mnesia:dirty_all_keys(emqx_acl)].
+create_conflicting_records() ->
+ Records = [
+ #?ACL_TABLE{filter = {{clientid,<<"client6">>}, <<"t">>}, action = pubsub, access = deny, created_at = 0},
+ #?ACL_TABLE{filter = {{clientid,<<"client5">>}, <<"t">>}, action = pubsub, access = deny, created_at = 1},
+ #?ACL_TABLE2{who = {clientid,<<"client5">>}, rules = [{allow, sub, <<"t">>, 2}]}
+ ],
+ mnesia:transaction(fun() -> lists:foreach(fun mnesia:write/1, Records) end).
+
+
+combined_conflicting_records() ->
+ % pubsub's are split, ACL_TABLE2 rules shadow ACL_TABLE rules
+ [
+ {{clientid,<<"client5">>},<<"t">>,sub,allow,2},
+ {{clientid,<<"client5">>},<<"t">>,pub,deny,1},
+ {{clientid,<<"client6">>},<<"t">>,sub,deny,0},
+ {{clientid,<<"client6">>},<<"t">>,pub,deny,0}
+ ].
%%--------------------------------------------------------------------
%% HTTP Request
diff --git a/apps/emqx_auth_mongo/rebar.config b/apps/emqx_auth_mongo/rebar.config
index f44e69543..78442c00b 100644
--- a/apps/emqx_auth_mongo/rebar.config
+++ b/apps/emqx_auth_mongo/rebar.config
@@ -1,6 +1,6 @@
{deps,
%% NOTE: mind poolboy version when updating mongodb-erlang version
- [{mongodb, {git,"https://github.com/emqx/mongodb-erlang", {tag, "v3.0.7"}}},
+ [{mongodb, {git,"https://github.com/emqx/mongodb-erlang", {tag, "v3.0.10"}}},
%% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git
%% (which has overflow_ttl feature added).
%% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07).
diff --git a/apps/emqx_auth_redis/test/emqx_auth_redis_SUITE_data/certs/redis.crt b/apps/emqx_auth_redis/test/emqx_auth_redis_SUITE_data/certs/redis.crt
index 5eefadf62..582a7bae2 100644
--- a/apps/emqx_auth_redis/test/emqx_auth_redis_SUITE_data/certs/redis.crt
+++ b/apps/emqx_auth_redis/test/emqx_auth_redis_SUITE_data/certs/redis.crt
@@ -1,23 +1,23 @@
-----BEGIN CERTIFICATE-----
-MIID1zCCAb8CCQC/+qKgZd+m/DANBgkqhkiG9w0BAQsFADA1MRMwEQYDVQQKDApS
-ZWRpcyBUZXN0MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjAx
-MDI5MDEzNDE2WhcNMjExMDI5MDEzNDE2WjAmMRMwEQYDVQQKDApSZWRpcyBUZXN0
+MIID1zCCAb8CCQC/+qKgZd+m/jANBgkqhkiG9w0BAQsFADA1MRMwEQYDVQQKDApS
+ZWRpcyBUZXN0MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEx
+MTAxMDgwMDU1WhcNMzExMDMwMDgwMDU1WjAmMRMwEQYDVQQKDApSZWRpcyBUZXN0
MQ8wDQYDVQQDDAZTZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDSs3bQ9sYi2AhFuHU75Ryk1HHSgfzA6pQAJilmJdTy0s5vyiWe1HQJaWkMcS5V
GVzGMK+c+OBqtXtDDninL3betg1YPMjSCOjPMOTC1H9K7+effwf7Iwpnw9Zro8mb
TEmMslIYhhcDedzT9Owli4QAgbgTn4l1BYuKX9CLrrKFtnr21miKu3ydViy9q7T1
pib3eigvAyk7X2fadHFArGEttsXrD6cetPPkSF/1OLWNlqzUKXzhSyrBXzO44Kks
fwR/EpTiES9g4dNOL2wvKS/YE1fNKhiCENrNxTXQo1l0yOdm2+MeyOeHFzRuS0b/
-+uGDFOPPi04KXeO6dQ5olBCPAgMBAAEwDQYJKoZIhvcNAQELBQADggIBADn0E2vG
-iQWe8/I7VbBdPhPNupVNcLvew10eIHxY2g5vSruCSVRQTgk8itVMRmDQxbb7gdDW
-jnCRbxykxbLjM9iCRljnOCsIcTi7qO7JRl8niV8dtEpPOs9lZxEdNXjIV1iZoWf3
-arBbPQSyQZvTQHG6qbFnyCdMMyyXGGvEPGQDaBiKH+Ko1qeAbCi0zupChYvxmtZ8
-hSTPlMFezDT9bKoNY0pkJSELfokEPU/Pn6Lz/NVbdzmCMjVa/xmF3s31g+DGhz95
-4AyOnCr6o0aydPVVV3pB/BCezNXPUxpp53BG0w/K2f2DnKYCvGvJbqDAaJ8bG/J1
-EFSOmwobdwVxJz3KNubmo1qJ6xOl/YT7yyqPRQRM1SY8nZW+YcoJSZjOe8wJVlob
-d0bOwN1C3HQwomyMWes187bEQP6Y36HuEbR1fK8yIOzGsGDKRFAFwQwMgw2M91lr
-EJIP5NRD3OZRuiYDiVfVhDZDaNahrAMZUcPCgeCAwc4YG6Gp2sDtdorOl4kIJYWE
-BbBZ0Jplq9+g6ciu5ChjAW8iFl0Ae5U24MxPGXnrxiRF4WWxLeZMVLXLDvlPqReD
-CHII5ifyvGEt5+RhqtZC/L+HimL+5wQgOlntqhUdLb6yWRz7YW37PFMnUXU3MXe9
-uY7m73ZLluXiLojcZxU2+cx89u5FOJxrYtrj
++uGDFOPPi04KXeO6dQ5olBCPAgMBAAEwDQYJKoZIhvcNAQELBQADggIBALRSylnk
+JJhEFRniuQ+H1kbfZlVOqnSqGkm38r8X76dnYRZfkFlxVzU2q5HPnSdiL9D3JrrH
+P7wIA5zjr76zK7GPJjkRExRZy5sTLSpsGp7YIGAZ19J3KsDVHSOvPTl38c6L217a
+YzPeQL5IrrW55URmA5PZFu3lsm9z7CNguw1wn2pCNNB+r/cRl4iELehZJT891CQe
+nV9a1YfHY/DkDoMnmrKqmeYdvje8n1uSqTnIV/wNiASU36ztxxD8ZmwprxxbjLSs
+aBjBvsR/eBHbWrz2W1dc5ppgGLuCkiEKmh6/IWX/ZQqyBCzZkmFNiTs8QiLtmoC4
+2bXkPVSyq5wT7eisGbRdcY9vGDtoW/WZOmFVA4XEDVx8M9fb4speHwoHRuTfWsA0
+6Y8P9XpYjG2tQoPpxrZaRshZ+SiHWPov7cAvY34szFePfTWR8gzbL6SgpDz30ceh
+XIuTArOMQMhfWHn3NaOc6hlkRsoviNhc5IXR9VjIdaNJCamEoLVNWZsvHJCUiP10
+yx+9/0a9vI6G+i8oKQ+eKJsfP8Ikoiolf7vU6M+/1kF+sSMxGjFwkMCxLgZB67+a
+m9kw83sVfykWLQ3eRwhdBz0/JiiYtDbbtyqgs3kPhJs9SGZUhDc/7R0lTWf4zxoJ
+l3y7pn/3nJvYrGX7uCBbWPUuqWeHVM9Ip6AZ
-----END CERTIFICATE-----
diff --git a/apps/emqx_lwm2m/etc/emqx_lwm2m.conf b/apps/emqx_lwm2m/etc/emqx_lwm2m.conf
index 968b8fd19..0aa061b1c 100644
--- a/apps/emqx_lwm2m/etc/emqx_lwm2m.conf
+++ b/apps/emqx_lwm2m/etc/emqx_lwm2m.conf
@@ -146,4 +146,4 @@ lwm2m.dtls.ciphers = ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,E
## Note that 'lwm2m.dtls.ciphers' and 'lwm2m.dtls.psk_ciphers' cannot
## be configured at the same time.
## See 'https://tools.ietf.org/html/rfc4279#section-2'.
-#lwm2m.dtls.psk_ciphers = PSK-AES128-CBC-SHA,PSK-AES256-CBC-SHA,PSK-3DES-EDE-CBC-SHA,PSK-RC4-SHA
+#lwm2m.dtls.psk_ciphers = RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384,RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256,RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA
diff --git a/apps/emqx_lwm2m/priv/emqx_lwm2m.schema b/apps/emqx_lwm2m/priv/emqx_lwm2m.schema
index bf5f144e0..ded81df05 100644
--- a/apps/emqx_lwm2m/priv/emqx_lwm2m.schema
+++ b/apps/emqx_lwm2m/priv/emqx_lwm2m.schema
@@ -185,7 +185,7 @@ end}.
OldCert = cuttlefish:conf_get("lwm2m.certfile", Conf, undefined),
%% Ciphers
- SplitFun = fun(undefined) -> undefined; (S) -> string:tokens(S, ",") end,
+ SplitFun = fun(undefined) -> []; (S) -> string:tokens(S, ",") end,
Ciphers =
case cuttlefish:conf_get("lwm2m.dtls.ciphers", Conf, undefined) of
undefined ->
@@ -198,16 +198,17 @@ end}.
undefined ->
[];
C2 ->
- Psk = lists:map(fun("PSK-AES128-CBC-SHA") -> {psk, aes_128_cbc, sha};
- ("PSK-AES256-CBC-SHA") -> {psk, aes_256_cbc, sha};
- ("PSK-3DES-EDE-CBC-SHA") -> {psk, '3des_ede_cbc', sha};
- ("PSK-RC4-SHA") -> {psk, rc4_128, sha}
- end, SplitFun(C2)),
+ Psk = lists:map(fun("PSK-AES128-CBC-SHA") -> "RSA-PSK-AES128-CBC-SHA";
+ ("PSK-AES256-CBC-SHA") -> "RSA-PSK-AES256-CBC-SHA";
+ ("PSK-3DES-EDE-CBC-SHA") -> "RSA-PSK-3DES-EDE-CBC-SHA";
+ ("PSK-RC4-SHA") -> "RSA-PSK-RC4-SHA";
+ (Suite) -> Suite
+ end, SplitFun(C2)),
[{ciphers, Psk}, {user_lookup_fun, {fun emqx_psk:lookup/3, <<>>}}]
end,
Ciphers /= []
- andalso PskCiphers /= []
- andalso cuttlefish:invalid("The 'lwm2m.dtls.ciphers' and 'lwm2m.dtls.psk_ciphers' cannot exist simultaneously."),
+ andalso PskCiphers /= []
+ andalso cuttlefish:invalid("The 'lwm2m.dtls.ciphers' and 'lwm2m.dtls.psk_ciphers' cannot coexist"),
NCiphers = Ciphers ++ PskCiphers,
diff --git a/apps/emqx_lwm2m/src/emqx_lwm2m.app.src b/apps/emqx_lwm2m/src/emqx_lwm2m.app.src
index f4afe8fbc..551cf8d07 100644
--- a/apps/emqx_lwm2m/src/emqx_lwm2m.app.src
+++ b/apps/emqx_lwm2m/src/emqx_lwm2m.app.src
@@ -1,6 +1,6 @@
{application,emqx_lwm2m,
[{description,"EMQ X LwM2M Gateway"},
- {vsn, "4.3.3"}, % strict semver, bump manually!
+ {vsn, "4.3.4"}, % strict semver, bump manually!
{modules,[]},
{registered,[emqx_lwm2m_sup]},
{applications,[kernel,stdlib,lwm2m_coap]},
diff --git a/apps/emqx_lwm2m/src/emqx_lwm2m.appup.src b/apps/emqx_lwm2m/src/emqx_lwm2m.appup.src
index 07af339fd..600cf236b 100644
--- a/apps/emqx_lwm2m/src/emqx_lwm2m.appup.src
+++ b/apps/emqx_lwm2m/src/emqx_lwm2m.appup.src
@@ -1,19 +1,21 @@
%% -*-: erlang -*-
-{"4.3.3",
+{"4.3.4",
[
- {<<"4.3.[0-1]">>, [
+ {<<"4\\.3\\.[0-1]">>, [
{restart_application, emqx_lwm2m}
]},
{"4.3.2", [
{load_module, emqx_lwm2m_message, brutal_purge, soft_purge, []}
- ]}
+ ]},
+ {"4.3.3", []} %% only config change
],
[
- {<<"4.3.[0-1]">>, [
+ {<<"4\\.3\\.[0-1]">>, [
{restart_application, emqx_lwm2m}
]},
{"4.3.2", [
{load_module, emqx_lwm2m_message, brutal_purge, soft_purge, []}
- ]}
+ ]},
+ {"4.3.3", []} %% only config change
]
}.
diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src
index 94c22f693..405b4c244 100644
--- a/apps/emqx_management/src/emqx_management.app.src
+++ b/apps/emqx_management/src/emqx_management.app.src
@@ -1,6 +1,6 @@
{application, emqx_management,
[{description, "EMQ X Management API and CLI"},
- {vsn, "4.3.7"}, % strict semver, bump manually!
+ {vsn, "4.3.8"}, % strict semver, bump manually!
{modules, []},
{registered, [emqx_management_sup]},
{applications, [kernel,stdlib,minirest]},
diff --git a/apps/emqx_management/src/emqx_management.appup.src b/apps/emqx_management/src/emqx_management.appup.src
index a4e1e6a16..e50724d6d 100644
--- a/apps/emqx_management/src/emqx_management.appup.src
+++ b/apps/emqx_management/src/emqx_management.appup.src
@@ -1,13 +1,13 @@
%% -*- mode: erlang -*-
{VSN,
- [ {<<"4.3.[0-9]">>,
+ [ {<<"4\\.3\\.[0-7]+">>,
[ {apply,{minirest,stop_http,['http:management']}},
{apply,{minirest,stop_http,['https:management']}},
{restart_application, emqx_management}
]},
{<<".*">>, []}
],
- [ {<<"4.3.[0-9]">>,
+ [ {<<"4\\.3\\.[0-7]+">>,
[ {apply,{minirest,stop_http,['http:management']}},
{apply,{minirest,stop_http,['https:management']}},
{restart_application, emqx_management}
diff --git a/apps/emqx_management/src/emqx_mgmt_api_pubsub.erl b/apps/emqx_management/src/emqx_mgmt_api_pubsub.erl
index 84763f403..53ca022bb 100644
--- a/apps/emqx_management/src/emqx_mgmt_api_pubsub.erl
+++ b/apps/emqx_management/src/emqx_mgmt_api_pubsub.erl
@@ -158,7 +158,7 @@ do_subscribe(ClientId, Topics, QoS) ->
_ -> ok
end.
-do_publish(ClientId, _Topics, _Qos, _Retain, _Payload) when not is_binary(ClientId) ->
+do_publish(ClientId, _Topics, _Qos, _Retain, _Payload) when not (is_binary(ClientId) or (ClientId =:= undefined)) ->
{ok, ?ERROR8, <<"bad clientid: must be string">>};
do_publish(_ClientId, [], _Qos, _Retain, _Payload) ->
{ok, ?ERROR15, bad_topic};
diff --git a/apps/emqx_management/src/emqx_mgmt_cli.erl b/apps/emqx_management/src/emqx_mgmt_cli.erl
index db5dd47d0..95f5121cd 100644
--- a/apps/emqx_management/src/emqx_mgmt_cli.erl
+++ b/apps/emqx_management/src/emqx_mgmt_cli.erl
@@ -191,10 +191,8 @@ clients(["show", ClientId]) ->
if_client(ClientId, fun print/1);
clients(["kick", ClientId]) ->
- case emqx_cm:kick_session(bin(ClientId)) of
- ok -> emqx_ctl:print("ok~n");
- _ -> emqx_ctl:print("Not Found.~n")
- end;
+ ok = emqx_cm:kick_session(bin(ClientId)),
+ emqx_ctl:print("ok~n");
clients(_) ->
emqx_ctl:usage([{"clients list", "List all clients"},
diff --git a/apps/emqx_management/src/emqx_mgmt_data_backup.erl b/apps/emqx_management/src/emqx_mgmt_data_backup.erl
index 3e2fe784b..6e467a8ba 100644
--- a/apps/emqx_management/src/emqx_mgmt_data_backup.erl
+++ b/apps/emqx_management/src/emqx_mgmt_data_backup.erl
@@ -118,18 +118,18 @@ export_auth_mnesia() ->
end.
export_acl_mnesia() ->
- case ets:info(emqx_acl) of
+ case ets:info(emqx_acl2) of
undefined -> [];
_ ->
- lists:map(fun({_, Filter, Action, Access, CreatedAt}) ->
- Filter1 = case Filter of
- {{Type, TypeValue}, Topic} ->
+ lists:map(fun({Login, Topic, Action, Access, CreatedAt}) ->
+ Filter1 = case Login of
+ {Type, TypeValue} ->
[{type, Type}, {type_value, TypeValue}, {topic, Topic}];
- {Type, Topic} ->
+ Type ->
[{type, Type}, {topic, Topic}]
end,
Filter1 ++ [{action, Action}, {access, Access}, {created_at, CreatedAt}]
- end, ets:tab2list(emqx_acl))
+ end, emqx_acl_mnesia_db:all_acls_export())
end.
-ifdef(EMQX_ENTERPRISE).
@@ -473,10 +473,9 @@ do_import_auth_mnesia(Auths) ->
end.
do_import_acl_mnesia_by_old_data(Acls) ->
- case ets:info(emqx_acl) of
+ case ets:info(emqx_acl2) of
undefined -> ok;
_ ->
- CreatedAt = erlang:system_time(millisecond),
lists:foreach(fun(#{<<"login">> := Login,
<<"topic">> := Topic,
<<"allow">> := Allow,
@@ -485,11 +484,11 @@ do_import_acl_mnesia_by_old_data(Acls) ->
true -> allow;
false -> deny
end,
- mnesia:dirty_write({emqx_acl, {{get_old_type(), Login}, Topic}, any_to_atom(Action), Allow1, CreatedAt})
+ emqx_acl_mnesia_db:add_acl({get_old_type(), Login}, Topic, any_to_atom(Action), Allow1)
end, Acls)
end.
do_import_acl_mnesia(Acls) ->
- case ets:info(emqx_acl) of
+ case ets:info(emqx_acl2) of
undefined -> ok;
_ ->
lists:foreach(fun(Map = #{<<"action">> := Action,
@@ -501,7 +500,7 @@ do_import_acl_mnesia(Acls) ->
Value ->
{any_to_atom(maps:get(<<"type">>, Map)), Value}
end,
- emqx_acl_mnesia_cli:add_acl(Login, Topic, any_to_atom(Action), any_to_atom(Access))
+ emqx_acl_mnesia_db:add_acl(Login, Topic, any_to_atom(Action), any_to_atom(Access))
end, Acls)
end.
diff --git a/apps/emqx_management/test/emqx_auth_mnesia_migration_SUITE.erl b/apps/emqx_management/test/emqx_auth_mnesia_migration_SUITE.erl
index 838529f03..7ccba161b 100644
--- a/apps/emqx_management/test/emqx_auth_mnesia_migration_SUITE.erl
+++ b/apps/emqx_management/test/emqx_auth_mnesia_migration_SUITE.erl
@@ -30,7 +30,7 @@ matrix() ->
, Version <- ["v4.2.10", "v4.1.5"]].
all() ->
- [t_import_4_0, t_import_4_1, t_import_4_2].
+ [t_import_4_0, t_import_4_1, t_import_4_2, t_export_import].
groups() ->
[{username, [], cases()}, {clientid, [], cases()}].
@@ -52,7 +52,8 @@ init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, _Config) ->
- {atomic,ok} = mnesia:clear_table(emqx_acl),
+ {atomic,ok} = mnesia:clear_table(?ACL_TABLE),
+ {atomic,ok} = mnesia:clear_table(?ACL_TABLE2),
{atomic,ok} = mnesia:clear_table(emqx_user),
ok.
-ifdef(EMQX_ENTERPRISE).
@@ -138,25 +139,50 @@ t_import_4_2(Config) ->
test_import(clientid, {<<"client_for_test">>, <<"public">>}),
test_import(username, {<<"user_for_test">>, <<"public">>}),
- ?assertMatch([#emqx_acl{
- filter = {{Type,<<"emqx_c">>}, <<"Topic/A">>},
- action = pub,
- access = allow
- },
- #emqx_acl{
- filter = {{Type,<<"emqx_c">>}, <<"Topic/A">>},
- action = sub,
- access = allow
- }],
- lists:sort(ets:tab2list(emqx_acl))).
+ ?assertMatch([
+ {{username, <<"emqx_c">>}, <<"Topic/A">>, pub, allow, _},
+ {{username, <<"emqx_c">>}, <<"Topic/A">>, sub, allow, _}
+ ],
+ lists:sort(emqx_acl_mnesia_db:all_acls())).
-endif.
+t_export_import(_Config) ->
+ emqx_acl_mnesia_migrator:migrate_records(),
+
+ Records = [
+ #?ACL_TABLE2{who = {clientid,<<"client1">>}, rules = [{allow, sub, <<"t1">>, 1}]},
+ #?ACL_TABLE2{who = {clientid,<<"client2">>}, rules = [{allow, pub, <<"t2">>, 2}]}
+ ],
+ mnesia:transaction(fun() -> lists:foreach(fun mnesia:write/1, Records) end),
+ timer:sleep(100),
+
+ AclData = emqx_json:encode(emqx_mgmt_data_backup:export_acl_mnesia()),
+
+ mnesia:transaction(fun() ->
+ lists:foreach(fun(#?ACL_TABLE2{who = Who}) ->
+ mnesia:delete({?ACL_TABLE2, Who})
+ end,
+ Records)
+ end),
+
+ ?assertEqual([], emqx_acl_mnesia_db:all_acls()),
+
+ emqx_mgmt_data_backup:import_acl_mnesia(emqx_json:decode(AclData, [return_maps]), "4.3"),
+ timer:sleep(100),
+
+ ?assertMatch([
+ {{clientid, <<"client1">>}, <<"t1">>, sub, allow, _},
+ {{clientid, <<"client2">>}, <<"t2">>, pub, allow, _}
+ ], lists:sort(emqx_acl_mnesia_db:all_acls())).
+
do_import(File, Config) ->
do_import(File, Config, "{}").
do_import(File, Config, Overrides) ->
- mnesia:clear_table(emqx_acl),
+ mnesia:clear_table(?ACL_TABLE),
+ mnesia:clear_table(?ACL_TABLE2),
mnesia:clear_table(emqx_user),
+ emqx_acl_mnesia_migrator:migrate_records(),
Filename = filename:join(proplists:get_value(data_dir, Config), File),
emqx_mgmt_data_backup:import(Filename, Overrides).
@@ -172,4 +198,4 @@ test_import(clientid, {ClientID, Password}) ->
Req = #{clientid => ClientID,
password => Password},
?assertMatch({stop, #{auth_result := success}},
- emqx_auth_mnesia:check(Req, #{}, #{hash_type => sha256})).
\ No newline at end of file
+ emqx_auth_mnesia:check(Req, #{}, #{hash_type => sha256})).
diff --git a/apps/emqx_management/test/emqx_mgmt_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_SUITE.erl
index 6bac9b4c7..77d46b744 100644
--- a/apps/emqx_management/test/emqx_mgmt_SUITE.erl
+++ b/apps/emqx_management/test/emqx_mgmt_SUITE.erl
@@ -158,9 +158,9 @@ t_clients_cmd(_) ->
timer:sleep(300),
emqx_mgmt_cli:clients(["list"]),
?assertMatch({match, _}, re:run(emqx_mgmt_cli:clients(["show", "client12"]), "client12")),
- ?assertEqual((emqx_mgmt_cli:clients(["kick", "client12"])), "ok~n"),
+ ?assertEqual("ok~n", emqx_mgmt_cli:clients(["kick", "client12"])),
timer:sleep(500),
- ?assertMatch({match, _}, re:run(emqx_mgmt_cli:clients(["show", "client12"]), "Not Found")),
+ ?assertEqual("ok~n", emqx_mgmt_cli:clients(["kick", "client12"])),
receive
{'EXIT', T, _} ->
ok
diff --git a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl
index e0754a522..e45acfd42 100644
--- a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl
+++ b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl
@@ -223,8 +223,8 @@ t_clients(_) ->
timer:sleep(300),
- {ok, NotFound0} = request_api(delete, api_path(["clients", binary_to_list(ClientId1)]), auth_header_()),
- ?assertEqual(?ERROR12, get(<<"code">>, NotFound0)),
+ {ok, Ok1} = request_api(delete, api_path(["clients", binary_to_list(ClientId1)]), auth_header_()),
+ ?assertEqual(?SUCCESS, get(<<"code">>, Ok1)),
{ok, Clients6} = request_api(get, api_path(["clients"]), "_limit=100&_page=1", auth_header_()),
?assertEqual(1, maps:get(<<"count">>, get(<<"meta">>, Clients6))),
@@ -447,6 +447,19 @@ t_pubsub(_) ->
after 100 ->
false
end),
+
+ % no clientid
+ {ok, Code} = request_api(post, api_path(["mqtt/publish"]), [], auth_header_(),
+ #{<<"topic">> => <<"mytopic">>,
+ <<"qos">> => 1,
+ <<"payload">> => <<"hello">>}),
+ ?assert(receive
+ {publish, #{payload := <<"hello">>}} ->
+ true
+ after 100 ->
+ false
+ end),
+
%% json payload
{ok, Code} = request_api(post, api_path(["mqtt/publish"]), [], auth_header_(),
#{<<"clientid">> => ClientId,
@@ -491,9 +504,9 @@ t_pubsub(_) ->
ok = emqtt:disconnect(C1),
- ?assertEqual(2, emqx_metrics:val('messages.qos1.received') - Qos1Received),
+ ?assertEqual(3, emqx_metrics:val('messages.qos1.received') - Qos1Received),
?assertEqual(2, emqx_metrics:val('messages.qos2.received') - Qos2Received),
- ?assertEqual(4, emqx_metrics:val('messages.received') - Received).
+ ?assertEqual(5, emqx_metrics:val('messages.received') - Received).
loop([]) -> [];
diff --git a/apps/emqx_web_hook/src/emqx_web_hook_actions.erl b/apps/emqx_web_hook/src/emqx_web_hook_actions.erl
index 5ef13587a..79aefdb85 100644
--- a/apps/emqx_web_hook/src/emqx_web_hook_actions.erl
+++ b/apps/emqx_web_hook/src/emqx_web_hook_actions.erl
@@ -57,7 +57,7 @@
type => string,
default => <<"5s">>,
title => #{en => <<"Request Timeout">>,
- zh => <<"请求超时时间时间"/utf8>>},
+ zh => <<"请求超时时间"/utf8>>},
description => #{en => <<"Request Timeout In Seconds">>,
zh => <<"请求超时时间"/utf8>>}},
pool_size => #{order => 4,
diff --git a/bin/emqx b/bin/emqx
index e8fda9c2b..0662d4a46 100755
--- a/bin/emqx
+++ b/bin/emqx
@@ -20,6 +20,41 @@ mkdir -p "$RUNNER_LOG_DIR"
# Make sure data directory exists
mkdir -p "$RUNNER_DATA_DIR"
+export ROOTDIR="$RUNNER_ROOT_DIR"
+export ERTS_DIR="$ROOTDIR/erts-$ERTS_VSN"
+export BINDIR="$ERTS_DIR/bin"
+export EMU="beam"
+export PROGNAME="erl"
+DYNLIBS_DIR="$RUNNER_ROOT_DIR/dynlibs"
+ERTS_LIB_DIR="$ERTS_DIR/../lib"
+MNESIA_DATA_DIR="$RUNNER_DATA_DIR/mnesia/$NAME"
+
+# Echo to stderr on errors
+echoerr() { echo "$*" 1>&2; }
+
+check_eralng_start() {
+ "$BINDIR/$PROGNAME" -noshell -boot "$REL_DIR/start_clean" -s crypto start -s init stop
+}
+
+if ! check_eralng_start >/dev/null 2>&1; then
+ BUILT_ON="$(head -1 "${REL_DIR}/BUILT_ON")"
+ ## failed to start, might be due to missing libs, try to be portable
+ export LD_LIBRARY_PATH="$DYNLIBS_DIR:$LD_LIBRARY_PATH"
+ if ! check_eralng_start; then
+ ## it's hopeless
+ echoerr "FATAL: Unable to start Erlang (with libcrypto)."
+ echoerr "Please make sure it's running on the correct platform with all required dependencies."
+ echoerr "This EMQ X release is built for $BUILT_ON"
+ exit 1
+ fi
+ echoerr "WARNING: There seem to be missing dynamic libs from the OS. Using libs from ${DYNLIBS_DIR}"
+fi
+
+## backward compatible
+if [ -d "$ERTS_DIR/lib" ]; then
+ export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH"
+fi
+
# cuttlefish try to read environment variables starting with "EMQX_"
export CUTTLEFISH_ENV_OVERRIDE_PREFIX='EMQX_'
@@ -120,9 +155,6 @@ if [ "$ULIMIT_F" -lt 1024 ]; then
echo "!!!!"
fi
-# Echo to stderr on errors
-echoerr() { echo "$@" 1>&2; }
-
# By default, use cuttlefish to generate app.config and vm.args
CUTTLEFISH="${USE_CUTTLEFISH:-yes}"
@@ -238,12 +270,22 @@ generate_config() {
sed '/^#/d' "$CUTTLE_GEN_ARG_FILE" | sed '/^$/d' | while IFS='' read -r ARG_LINE || [ -n "$ARG_LINE" ]; do
ARG_KEY=$(echo "$ARG_LINE" | awk '{$NF="";print}')
ARG_VALUE=$(echo "$ARG_LINE" | awk '{print $NF}')
- TMP_ARG_VALUE=$(grep "^$ARG_KEY" "$TMP_ARG_FILE" | awk '{print $NF}')
- if [ "$ARG_VALUE" != "$TMP_ARG_VALUE" ] ; then
- if [ -n "$TMP_ARG_VALUE" ]; then
- sh -c "$SED_REPLACE 's/^$ARG_KEY.*$/$ARG_LINE/' $TMP_ARG_FILE"
- else
- echo "$ARG_LINE" >> "$TMP_ARG_FILE"
+ if [ "$ARG_KEY" = '' ]; then
+ ## for the flags, e.g. -heart -emu_args etc
+ ARG_KEY=$(echo "$ARG_LINE" | awk '{print $1}')
+ ARG_VALUE=''
+ TMP_ARG_KEY=$(grep "^$ARG_KEY" "$TMP_ARG_FILE" | awk '{print $1}')
+ if [ "$TMP_ARG_KEY" = '' ]; then
+ echo "$ARG_KEY" >> "$TMP_ARG_FILE"
+ fi
+ else
+ TMP_ARG_VALUE=$(grep "^$ARG_KEY" "$TMP_ARG_FILE" | awk '{print $NF}')
+ if [ "$ARG_VALUE" != "$TMP_ARG_VALUE" ] ; then
+ if [ -n "$TMP_ARG_VALUE" ]; then
+ sh -c "$SED_REPLACE 's/^$ARG_KEY.*$/$ARG_LINE/' $TMP_ARG_FILE"
+ else
+ echo "$ARG_LINE" >> "$TMP_ARG_FILE"
+ fi
fi
fi
done
@@ -354,15 +396,6 @@ else
PROTO_DIST_ARG="-proto_dist $PROTO_DIST"
fi
-export ROOTDIR="$RUNNER_ROOT_DIR"
-export ERTS_DIR="$ROOTDIR/erts-$ERTS_VSN"
-export BINDIR="$ERTS_DIR/bin"
-export EMU="beam"
-export PROGNAME="erl"
-export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH"
-ERTS_LIB_DIR="$ERTS_DIR/../lib"
-MNESIA_DATA_DIR="$RUNNER_DATA_DIR/mnesia/$NAME"
-
cd "$ROOTDIR"
# User can specify an sname without @hostname
diff --git a/build b/build
index be7813e66..be4f88672 100755
--- a/build
+++ b/build
@@ -15,18 +15,7 @@ cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")"
PKG_VSN="${PKG_VSN:-$(./pkg-vsn.sh)}"
export PKG_VSN
-if [ "$(uname -s)" = 'Darwin' ]; then
- SYSTEM=macos
-elif [ "$(uname -s)" = 'Linux' ]; then
- if grep -q -i 'centos' /etc/*-release; then
- DIST='centos'
- VERSION_ID="$(rpm --eval '%{centos_ver}')"
- else
- DIST="$(sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g')"
- VERSION_ID="$(sed -n '/^VERSION_ID=/p' /etc/os-release | sed -r 's/VERSION_ID=(.*)/\1/g' | sed 's/"//g')"
- fi
- SYSTEM="$(echo "${DIST}${VERSION_ID}" | sed -r 's/([a-zA-Z]*)-.*/\1/g')"
-fi
+SYSTEM="$(./scripts/get-distro.sh)"
ARCH="$(uname -m)"
case "$ARCH" in
@@ -46,8 +35,8 @@ export ARCH
## Support RPM and Debian based linux systems
##
if [ "$(uname -s)" = 'Linux' ]; then
- case "${DIST:-}" in
- ubuntu|debian|raspbian)
+ case "${SYSTEM:-}" in
+ ubuntu*|debian*|raspbian*)
PKGERDIR='deb'
;;
*)
@@ -98,6 +87,18 @@ make_relup() {
./rebar3 as "$PROFILE" relup --relname emqx --relvsn "${PKG_VSN}"
}
+cp_dyn_libs() {
+ local rel_dir="$1"
+ local target_dir="${rel_dir}/dynlibs"
+ if ! [ "$(uname -s)" = 'Linux' ]; then
+ return 0;
+ fi
+ mkdir -p "$target_dir"
+ while read -r so_file; do
+ cp -L "$so_file" "$target_dir/"
+ done < <(find "$rel_dir" -type f \( -name "*.so*" -o -name "beam.smp" \) -print0 | xargs -0 ldd | grep -E '^\s+.*=>\s(/lib|/usr)' | awk '{print $3}')
+}
+
## make_zip turns .tar.gz into a .zip with a slightly different name.
## It assumes the .tar.gz has been built -- relies on Makefile dependency
make_zip() {
@@ -117,6 +118,9 @@ make_zip() {
local zipball
zipball="${pkgpath}/${PROFILE}-${SYSTEM}-${PKG_VSN}-${ARCH}.zip"
tar zxf "${tarball}" -C "${tard}/emqx"
+ ## try to be portable for zip packages.
+ ## for DEB and RPM packages the dependencies are resoved by yum and apt
+ cp_dyn_libs "${tard}/emqx"
(cd "${tard}" && zip -qr - emqx) > "${zipball}"
}
diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile
index 9e0ba9d5b..e362c6b73 100644
--- a/deploy/docker/Dockerfile
+++ b/deploy/docker/Dockerfile
@@ -41,7 +41,7 @@ LABEL org.label-schema.docker.dockerfile="Dockerfile" \
org.label-schema.url="https://emqx.io" \
org.label-schema.vcs-type="Git" \
org.label-schema.vcs-url="https://github.com/emqx/emqx" \
- maintainer="Raymond M Mouthaan , Huang Rui , EMQ X Team "
+ maintainer="EMQ X Team "
ARG QEMU_ARCH=x86_64
ARG EMQX_NAME=emqx
diff --git a/docker.mk b/docker.mk
index e2fe61d36..22f2b6e4f 100644
--- a/docker.mk
+++ b/docker.mk
@@ -1,8 +1,10 @@
#!/usr/bin/make -f
# -*- makefile -*-
-## default globals
-TARGET ?= emqx/emqx
+## default globals.
+## when built with `make docker` command the default profile is either emqx or emqx-ee (for enterprise)
+## or the TARGET varialbe can be set beforehand to force a different name
+TARGET ?= emqx/$(PROFILE)
QEMU_ARCH ?= x86_64
ARCH ?= amd64
QEMU_VERSION ?= v5.0.0-2
@@ -37,7 +39,7 @@ docker-prepare:
# enable experimental to use docker manifest command
@echo '{ "experimental": "enabled" }' | tee $$HOME/.docker/config.json
# enable experimental
- @echo '{ "experimental": true, "storage-driver": "overlay2", "max-concurrent-downloads": 50, "max-concurrent-uploads": 50 }' | tee /etc/docker/daemon.json
+ @echo '{ "experimental": true, "storage-driver": "overlay2", "max-concurrent-downloads": 50, "max-concurrent-uploads": 50 }' | tee /etc/docker/daemon.json
@service docker restart
.PHONY: docker-build
@@ -85,7 +87,7 @@ docker-tag:
.PHONY: docker-save
docker-save:
- @echo "DOCKER SAVE: Save Docker image."
+ @echo "DOCKER SAVE: Save Docker image."
@mkdir -p _packages/$(EMQX_NAME)
@@ -94,7 +96,7 @@ docker-save:
zip -r -m $(EMQX_NAME)-docker-$(PKG_VSN).zip $(EMQX_NAME)-docker-$(PKG_VSN); \
mv ./$(EMQX_NAME)-docker-$(PKG_VSN).zip _packages/$(EMQX_NAME)/$(EMQX_NAME)-docker-$(PKG_VSN).zip; \
fi
-
+
@for arch in $(ARCH_LIST); do \
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
docker save $(TARGET):$(PKG_VSN)-$(OS)-$${arch} > $(EMQX_NAME)-docker-$(PKG_VSN)-$(OS)-$${arch}; \
@@ -105,8 +107,8 @@ docker-save:
.PHONY: docker-push
docker-push:
- @echo "DOCKER PUSH: Push Docker image.";
- @echo "DOCKER PUSH: pushing - $(TARGET):$(PKG_VSN).";
+ @echo "DOCKER PUSH: Push Docker image.";
+ @echo "DOCKER PUSH: pushing - $(TARGET):$(PKG_VSN).";
@if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN))" ]; then \
docker push $(TARGET):$(PKG_VSN); \
@@ -131,7 +133,7 @@ docker-manifest-list:
fi; \
done; \
eval $$version; \
- eval $$latest;
+ eval $$latest;
for arch in $(ARCH_LIST); do \
case $${arch} in \
@@ -166,10 +168,10 @@ docker-manifest-list:
fi; \
;; \
esac; \
- done;
+ done;
docker manifest inspect $(TARGET):$(PKG_VSN)
- docker manifest push $(TARGET):$(PKG_VSN);
+ docker manifest push $(TARGET):$(PKG_VSN);
docker manifest inspect $(TARGET):latest
docker manifest push $(TARGET):latest;
diff --git a/etc/BUILT_ON b/etc/BUILT_ON
index 2997223fa..43a77ec87 100644
--- a/etc/BUILT_ON
+++ b/etc/BUILT_ON
@@ -1 +1 @@
-{{built_on_arch}}
+{{built_on_platform}}
diff --git a/etc/emqx.conf b/etc/emqx.conf
index f24d7ada2..cf1279de1 100644
--- a/etc/emqx.conf
+++ b/etc/emqx.conf
@@ -199,6 +199,16 @@ node.data_dir = {{ platform_data_dir }}
## Heartbeat monitoring of an Erlang runtime system. Comment the line to disable
## heartbeat, or set the value as 'on'
##
+## Turning this on may cause the node to restart if it becomes unresponsive to
+## the heartbeat pings.
+##
+## NOTE: When managed by systemd (or other supervision tools like systemd),
+## heart will probably only cause EMQ X to stop, but restart or not will
+## depend on systemd's restart strategy.
+## NOTE: When running in docker, the container will die as soon as the the
+## heart process kills EMQ X, but restart or not will depend on container
+## supervision strategy, such as k8s restartPolicy.
+##
## Value: on
##
## vm.args: -heart
diff --git a/include/emqx_release.hrl b/include/emqx_release.hrl
index 833451de1..c89dde010 100644
--- a/include/emqx_release.hrl
+++ b/include/emqx_release.hrl
@@ -29,7 +29,7 @@
-ifndef(EMQX_ENTERPRISE).
--define(EMQX_RELEASE, {opensource, "4.3.8"}).
+-define(EMQX_RELEASE, {opensource, "4.3.9"}).
-else.
diff --git a/lib-ce/emqx_dashboard/src/emqx_dashboard.appup.src b/lib-ce/emqx_dashboard/src/emqx_dashboard.appup.src
index 4dc02511c..902585ffb 100644
--- a/lib-ce/emqx_dashboard/src/emqx_dashboard.appup.src
+++ b/lib-ce/emqx_dashboard/src/emqx_dashboard.appup.src
@@ -1,20 +1,18 @@
%% -*- mode: erlang -*-
{VSN,
- [ {<<"4.3.[0-9]">>,
+ [ {<<".*">>,
%% load all plugins
%% NOTE: this depends on the fact that emqx_dashboard is always
%% the last application gets upgraded
[ {apply, {emqx_rule_engine, load_providers, []}}
, {restart_application, emqx_dashboard}
, {apply, {emqx_plugins, load, []}}
- ]},
- {<<".*">>, []}
+ ]}
],
- [ {<<"4.3.[0-9]">>,
+ [ {<<".*">>,
[ {apply, {emqx_rule_engine, load_providers, []}}
, {restart_application, emqx_dashboard}
, {apply, {emqx_plugins, load, []}}
- ]},
- {<<".*">>, []}
+ ]}
]
}.
diff --git a/rebar.config b/rebar.config
index bd39f9816..5a8b0db40 100644
--- a/rebar.config
+++ b/rebar.config
@@ -55,7 +55,7 @@
, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}
, {observer_cli, "1.6.1"} % NOTE: depends on recon 2.5.1
, {getopt, "1.0.1"}
- , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "0.13.0"}}}
+ , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "0.15.0"}}}
]}.
{xref_ignores,
diff --git a/rebar.config.erl b/rebar.config.erl
index 901027d2d..1000a2c92 100644
--- a/rebar.config.erl
+++ b/rebar.config.erl
@@ -173,11 +173,24 @@ relx(Vsn, RelType, PkgType) ->
, {vm_args,false}
, {release, {emqx, Vsn}, relx_apps(RelType)}
, {overlay, relx_overlay(RelType)}
- , {overlay_vars, [ {built_on_arch, rebar_utils:get_arch()}
+ , {overlay_vars, [ {built_on_platform, built_on()}
, {emqx_description, emqx_description(RelType, IsEnterprise)}
| overlay_vars(RelType, PkgType, IsEnterprise)]}
].
+built_on() ->
+ On = rebar_utils:get_arch(),
+ case distro() of
+ false -> On;
+ Distro -> On ++ "-" ++ Distro
+ end.
+
+distro() ->
+ case os:type() of
+ {unix, _} -> string:strip(os:cmd("scripts/get-distro.sh"), both, $\n);
+ _ -> false
+ end.
+
emqx_description(cloud, true) -> "EMQ X Enterprise";
emqx_description(cloud, false) -> "EMQ X Broker";
emqx_description(edge, _) -> "EMQ X Edge".
diff --git a/scripts/apps-version-check.sh b/scripts/apps-version-check.sh
index b070e3c45..596f7404a 100755
--- a/scripts/apps-version-check.sh
+++ b/scripts/apps-version-check.sh
@@ -18,7 +18,7 @@ while read -r app; do
changed="$(git diff --name-only "$latest_release"...HEAD \
-- "$app_path/src" \
-- "$app_path/priv" \
- -- "$app_path/c_src" | wc -l)"
+ -- "$app_path/c_src" | { grep -v -E 'appup\.src' || true; } | wc -l)"
if [ "$changed" -gt 0 ]; then
echo "$src_file needs a vsn bump"
bad_app_count=$(( bad_app_count + 1))
diff --git a/scripts/get-distro.sh b/scripts/get-distro.sh
new file mode 100755
index 000000000..ae52abba3
--- /dev/null
+++ b/scripts/get-distro.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+## This script prints Linux distro name and its version number
+## e.g. macos, centos8, ubuntu20.04
+
+set -euo pipefail
+
+if [ "$(uname -s)" = 'Darwin' ]; then
+ echo 'macos'
+elif [ "$(uname -s)" = 'Linux' ]; then
+ if grep -q -i 'centos' /etc/*-release; then
+ DIST='centos'
+ VERSION_ID="$(rpm --eval '%{centos_ver}')"
+ else
+ DIST="$(sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g')"
+ VERSION_ID="$(sed -n '/^VERSION_ID=/p' /etc/os-release | sed -r 's/VERSION_ID=(.*)/\1/g' | sed 's/"//g')"
+ fi
+ echo "${DIST}${VERSION_ID}" | sed -r 's/([a-zA-Z]*)-.*/\1/g'
+fi
diff --git a/scripts/one-more-emqx-ee.sh b/scripts/one-more-emqx-ee.sh
new file mode 100644
index 000000000..f94681056
--- /dev/null
+++ b/scripts/one-more-emqx-ee.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+# shellcheck disable=2090
+###############
+## args and env validation
+###############
+
+if ! [ -d "emqx" ]; then
+ echo "[error] this script must be run at the same dir as the emqx"
+ exit 1
+fi
+
+if [ $# -eq 0 ]
+ then
+ echo "[error] a new emqx name should be provided!"
+ echo "Usage: ./one_more_emqx "
+ echo " e.g. ./one_more_emqx emqx2"
+ exit 1
+fi
+
+NEW_EMQX=$1
+if [ -d "$NEW_EMQX" ]; then
+ echo "[error] a dir named ${NEW_EMQX} already exists!"
+ exit 2
+fi
+echo creating "$NEW_EMQX" ...
+
+SED_REPLACE="sed -i "
+# shellcheck disable=2089
+case $(sed --help 2>&1) in
+ *GNU*) SED_REPLACE="sed -i ";;
+ *) SED_REPLACE="sed -i ''";;
+esac
+
+PORT_INC_=$(cksum <<< "$NEW_EMQX" | cut -f 1 -d ' ')
+PORT_INC=$((PORT_INC_ % 1000))
+echo using increment factor: $PORT_INC
+
+###############
+## helpers
+###############
+process_emqx_conf() {
+ echo "processing config file: $1"
+ $SED_REPLACE '/^#/d' "$1"
+ $SED_REPLACE '/^$/d' "$1"
+
+ for entry_ in "${entries_to_be_inc[@]}"
+ do
+ echo inc port for "$entry_"
+ ip_port_=$(grep -E "$entry_"'[ \t]*=' "$1" 2> /dev/null | tail -1 | cut -d = -f 2- | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')
+ echo -- from: "$ip_port_"
+ ip_=$(echo "$ip_port_" | cut -sd : -f 1)
+ port_=$(echo "$ip_port_" | cut -sd : -f 2)
+ if [ -z "$ip_" ]
+ then
+ new_ip_port=$(( ip_port_ + PORT_INC ))
+ else
+ new_ip_port="${ip_}:$(( port_ + PORT_INC ))"
+ fi
+ echo -- to: "$new_ip_port"
+ $SED_REPLACE 's|'"$entry_"'[ \t]*=.*|'"$entry_"' = '"$new_ip_port"'|g' "$1"
+ done
+}
+
+###############
+## main
+###############
+
+cp -r emqx "$NEW_EMQX"
+
+## change the rpc ports
+$SED_REPLACE 's|tcp_server_port[ \t]*=.*|tcp_server_port = 5369|g' emqx/etc/rpc.conf
+$SED_REPLACE 's|tcp_client_port[ \t]*=.*|tcp_client_port = 5370|g' emqx/etc/rpc.conf
+$SED_REPLACE 's|tcp_client_port[ \t]*=.*|tcp_client_port = 5369|g' "$NEW_EMQX/etc/rpc.conf"
+$SED_REPLACE 's|tcp_server_port[ \t]*=.*|tcp_server_port = 5370|g' "$NEW_EMQX/etc/rpc.conf"
+$SED_REPLACE 's|.*node\.name.*|node.name='"$NEW_EMQX"'@127.0.0.1|g' "$NEW_EMQX/etc/emqx.conf"
+
+conf_ext="*.conf"
+
+find "$NEW_EMQX" -name "${conf_ext}" | while read -r conf; do
+ if [ "${conf##*/}" = 'emqx.conf' ]
+ then
+ declare -a entries_to_be_inc=("node.dist_listen_min"
+ "node.dist_listen_max")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ elif [ "${conf##*/}" = 'listeners.conf' ]
+ then
+ declare -a entries_to_be_inc=("listener.tcp.external"
+ "listener.tcp.internal"
+ "listener.ssl.external"
+ "listener.ws.external"
+ "listener.wss.external")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ elif [ "${conf##*/}" = 'emqx_management.conf' ]
+ then
+ declare -a entries_to_be_inc=("management.listener.http"
+ "management.listener.https")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ elif [ "${conf##*/}" = 'emqx_dashboard.conf' ]
+ then
+ declare -a entries_to_be_inc=("dashboard.listener.http"
+ "dashboard.listener.https")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ else
+ echo "."
+ fi
+done
diff --git a/scripts/one-more-emqx.sh b/scripts/one-more-emqx.sh
new file mode 100644
index 000000000..d905f64c4
--- /dev/null
+++ b/scripts/one-more-emqx.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# shellcheck disable=2090
+###############
+## args and env validation
+###############
+
+if ! [ -d "emqx" ]; then
+ echo "[error] this script must be run at the same dir as the emqx"
+ exit 1
+fi
+
+if [ $# -eq 0 ]
+ then
+ echo "[error] a new emqx name should be provided!"
+ echo "Usage: ./one_more_emqx "
+ echo " e.g. ./one_more_emqx emqx2"
+ exit 1
+fi
+
+NEW_EMQX=$1
+if [ -d "$NEW_EMQX" ]; then
+ echo "[error] a dir named ${NEW_EMQX} already exists!"
+ exit 2
+fi
+echo creating "$NEW_EMQX" ...
+
+SED_REPLACE="sed -i "
+# shellcheck disable=2089
+case $(sed --help 2>&1) in
+ *GNU*) SED_REPLACE="sed -i ";;
+ *) SED_REPLACE="sed -i ''";;
+esac
+
+PORT_INC_=$(cksum <<< "$NEW_EMQX" | cut -f 1 -d ' ')
+PORT_INC=$((PORT_INC_ % 1000))
+echo using increment factor: "$PORT_INC"
+
+###############
+## helpers
+###############
+process_emqx_conf() {
+ echo "processing config file: $1"
+ $SED_REPLACE '/^#/d' "$1"
+ $SED_REPLACE '/^$/d' "$1"
+ $SED_REPLACE 's|.*node\.name.*|node.name='"$NEW_EMQX"'@127.0.0.1|g' "$1"
+
+ for entry_ in "${entries_to_be_inc[@]}"
+ do
+ echo inc port for "$entry_"
+ ip_port_=$(grep -E "$entry_"'[ \t]*=' "$1" 2> /dev/null | tail -1 | cut -d = -f 2- | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')
+ echo -- from: "$ip_port_"
+ ip_=$(echo "$ip_port_" | cut -sd : -f 1)
+ port_=$(echo "$ip_port_" | cut -sd : -f 2)
+ if [ -z "$ip_" ]
+ then
+ new_ip_port=$(( ip_port_ + PORT_INC ))
+ else
+ new_ip_port="${ip_}:$(( port_ + PORT_INC ))"
+ fi
+ echo -- to: "$new_ip_port"
+ $SED_REPLACE 's|'"$entry_"'[ \t]*=.*|'"$entry_"' = '"$new_ip_port"'|g' "$1"
+ done
+}
+
+###############
+## main
+###############
+
+cp -r emqx "$NEW_EMQX"
+
+## change the rpc ports
+$SED_REPLACE 's|tcp_server_port[ \t]*=.*|tcp_server_port = 5369|g' emqx/etc/emqx.conf
+$SED_REPLACE 's|tcp_client_port[ \t]*=.*|tcp_client_port = 5370|g' emqx/etc/emqx.conf
+$SED_REPLACE 's|tcp_client_port[ \t]*=.*|tcp_client_port = 5369|g' "$NEW_EMQX/etc/emqx.conf"
+$SED_REPLACE 's|tcp_server_port[ \t]*=.*|tcp_server_port = 5370|g' "$NEW_EMQX/etc/emqx.conf"
+
+conf_ext="*.conf"
+find "$NEW_EMQX" -name "${conf_ext}" | while read -r conf; do
+ if [ "${conf##*/}" = 'emqx.conf' ]
+ then
+ declare -a entries_to_be_inc=("node.dist_listen_min"
+ "dist_listen_max"
+ "listener.tcp.external"
+ "listener.tcp.internal"
+ "listener.ssl.external"
+ "listener.ws.external"
+ "listener.wss.external")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ elif [ "${conf##*/}" = 'emqx_management.conf' ]
+ then
+ declare -a entries_to_be_inc=("management.listener.http"
+ "management.listener.https")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ elif [ "${conf##*/}" = 'emqx_dashboard.conf' ]
+ then
+ declare -a entries_to_be_inc=("dashboard.listener.http"
+ "dashboard.listener.https")
+ process_emqx_conf "$conf" "${entries_to_be_inc[@]}"
+ else
+ echo "."
+ fi
+done
diff --git a/src/emqx.appup.src b/src/emqx.appup.src
index d4f9d43ce..326c0aaf0 100644
--- a/src/emqx.appup.src
+++ b/src/emqx.appup.src
@@ -1,21 +1,27 @@
%% -*- mode: erlang -*-
{VSN,
[{"4.3.9",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_frame,brutal_purge,soft_purge,[]},
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.8",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_frame,brutal_purge,soft_purge,[]},
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.7",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
{load_module,emqx_misc,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
@@ -24,7 +30,9 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.6",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
{load_module,emqx_misc,brutal_purge,soft_purge,[]},
{load_module,emqx_ctl,brutal_purge,soft_purge,[]},
@@ -34,7 +42,8 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.5",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
{load_module,emqx_misc,brutal_purge,soft_purge,[]},
{load_module,emqx_cm,brutal_purge,soft_purge,[]},
@@ -46,7 +55,8 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.4",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
{load_module,emqx_misc,brutal_purge,soft_purge,[]},
{load_module,emqx_cm,brutal_purge,soft_purge,[]},
@@ -59,7 +69,8 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.3",
- [{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
{load_module,emqx_misc,brutal_purge,soft_purge,[]},
{load_module,emqx_packet,brutal_purge,soft_purge,[]},
{load_module,emqx_shared_sub,brutal_purge,soft_purge,[]},
@@ -137,21 +148,27 @@
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{<<".*">>,[]}],
[{"4.3.9",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_frame,brutal_purge,soft_purge,[]},
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.8",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_frame,brutal_purge,soft_purge,[]},
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.7",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
@@ -160,7 +177,9 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.6",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_cm,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
@@ -170,7 +189,8 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.5",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
@@ -182,7 +202,8 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.4",
- [{load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_ws_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
@@ -195,7 +216,8 @@
{load_module,emqx_rpc,brutal_purge,soft_purge,[]},
{load_module,emqx_app,brutal_purge,soft_purge,[]}]},
{"4.3.3",
- [{load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
+ [{load_module,emqx_channel,brutal_purge,soft_purge,[]},
+ {load_module,emqx_pqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_mqueue,brutal_purge,soft_purge,[]},
{load_module,emqx_alarm_handler,brutal_purge,soft_purge,[]},
{load_module,emqx_misc,brutal_purge,soft_purge,[]},
diff --git a/src/emqx_channel.erl b/src/emqx_channel.erl
index e3cbff692..7bfef472d 100644
--- a/src/emqx_channel.erl
+++ b/src/emqx_channel.erl
@@ -977,8 +977,11 @@ handle_info({sock_closed, Reason}, Channel =
Shutdown -> Shutdown
end;
-handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) ->
- ?LOG(error, "Unexpected sock_closed: ~p", [Reason]),
+handle_info({sock_closed, _Reason}, Channel = #channel{conn_state = disconnected}) ->
+ %% Since sock_closed messages can be generated multiple times,
+ %% we can simply ignore errors of this type in the disconnected state.
+ %% e.g. when the socket send function returns an error, there is already
+ %% a tcp_closed delivered to the process mailbox
{ok, Channel};
handle_info(clean_acl_cache, Channel) ->
diff --git a/src/emqx_cm.erl b/src/emqx_cm.erl
index 61982f569..23f078568 100644
--- a/src/emqx_cm.erl
+++ b/src/emqx_cm.erl
@@ -72,7 +72,7 @@
]).
%% Internal export
--export([stats_fun/0]).
+-export([stats_fun/0, clean_down/1]).
-type(chan_pid() :: pid()).
@@ -93,7 +93,9 @@
%% Server name
-define(CM, ?MODULE).
--define(T_TAKEOVER, 15000).
+-define(T_KICK, 5_000).
+-define(T_GET_INFO, 5_000).
+-define(T_TAKEOVER, 15_000).
%% @doc Start the channel manager.
-spec(start_link() -> startlink_ret()).
@@ -164,7 +166,7 @@ get_chan_info(ClientId, ChanPid) when node(ChanPid) == node() ->
error:badarg -> undefined
end;
get_chan_info(ClientId, ChanPid) ->
- rpc_call(node(ChanPid), get_chan_info, [ClientId, ChanPid]).
+ rpc_call(node(ChanPid), get_chan_info, [ClientId, ChanPid], ?T_GET_INFO).
%% @doc Update infos of the channel.
-spec(set_chan_info(emqx_types:clientid(), emqx_types:attrs()) -> boolean()).
@@ -189,7 +191,7 @@ get_chan_stats(ClientId, ChanPid) when node(ChanPid) == node() ->
error:badarg -> undefined
end;
get_chan_stats(ClientId, ChanPid) ->
- rpc_call(node(ChanPid), get_chan_stats, [ClientId, ChanPid]).
+ rpc_call(node(ChanPid), get_chan_stats, [ClientId, ChanPid], ?T_GET_INFO).
%% @doc Set channel's stats.
-spec(set_chan_stats(emqx_types:clientid(), emqx_types:stats()) -> boolean()).
@@ -257,7 +259,7 @@ takeover_session(ClientId) ->
takeover_session(ClientId, ChanPid);
ChanPids ->
[ChanPid|StalePids] = lists:reverse(ChanPids),
- ?LOG(error, "More than one channel found: ~p", [ChanPids]),
+ ?LOG(error, "more_than_one_channel_found: ~p", [ChanPids]),
lists:foreach(fun(StalePid) ->
catch discard_session(ClientId, StalePid)
end, StalePids),
@@ -269,77 +271,113 @@ takeover_session(ClientId, ChanPid) when node(ChanPid) == node() ->
undefined ->
{error, not_found};
ConnMod when is_atom(ConnMod) ->
+ %% TODO: if takeover times out, maybe kill the old?
Session = ConnMod:call(ChanPid, {takeover, 'begin'}, ?T_TAKEOVER),
{ok, ConnMod, ChanPid, Session}
end;
-
takeover_session(ClientId, ChanPid) ->
- rpc_call(node(ChanPid), takeover_session, [ClientId, ChanPid]).
+ rpc_call(node(ChanPid), takeover_session, [ClientId, ChanPid], ?T_TAKEOVER).
%% @doc Discard all the sessions identified by the ClientId.
-spec(discard_session(emqx_types:clientid()) -> ok).
discard_session(ClientId) when is_binary(ClientId) ->
case lookup_channels(ClientId) of
[] -> ok;
- ChanPids -> lists:foreach(fun(Pid) -> do_discard_session(ClientId, Pid) end, ChanPids)
+ ChanPids -> lists:foreach(fun(Pid) -> discard_session(ClientId, Pid) end, ChanPids)
end.
-do_discard_session(ClientId, Pid) ->
+%% @private Kick a local stale session to force it step down.
+%% If failed to kick (e.g. timeout) force a kill.
+%% Keeping the stale pid around, or returning error or raise an exception
+%% benefits nobody.
+-spec kick_or_kill(kick | discard, module(), pid()) -> ok.
+kick_or_kill(Action, ConnMod, Pid) ->
try
- discard_session(ClientId, Pid)
+ %% this is essentailly a gen_server:call implemented in emqx_connection
+ %% and emqx_ws_connection.
+ %% the handle_call is implemented in emqx_channel
+ ok = apply(ConnMod, call, [Pid, Action, ?T_KICK])
catch
_ : noproc -> % emqx_ws_connection: call
- ?tp(debug, "session_already_gone", #{pid => Pid}),
- ok;
+ ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action});
_ : {noproc, _} -> % emqx_connection: gen_server:call
- ?tp(debug, "session_already_gone", #{pid => Pid}),
- ok;
- _ : {'EXIT', {noproc, _}} -> % rpc_call/3
- ?tp(debug, "session_already_gone", #{pid => Pid}),
- ok;
+ ok = ?tp(debug, "session_already_gone", #{pid => Pid, action => Action});
+ _ : {shutdown, _} ->
+ ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action});
_ : {{shutdown, _}, _} ->
- ?tp(debug, "session_already_shutdown", #{pid => Pid}),
- ok;
+ ok = ?tp(debug, "session_already_shutdown", #{pid => Pid, action => Action});
+ _ : {timeout, {gen_server, call, _}} ->
+ ?tp(warning, "session_kick_timeout",
+ #{pid => Pid,
+ action => Action,
+ stale_channel => stale_channel_info(Pid)
+ }),
+ ok = force_kill(Pid);
_ : Error : St ->
- ?tp(error, "failed_to_discard_session",
- #{pid => Pid, reason => Error, stacktrace=>St})
+ ?tp(error, "session_kick_exception",
+ #{pid => Pid,
+ action => Action,
+ reason => Error,
+ stacktrace => St,
+ stale_channel => stale_channel_info(Pid)
+ }),
+ ok = force_kill(Pid)
end.
-discard_session(ClientId, ChanPid) when node(ChanPid) == node() ->
- case get_chann_conn_mod(ClientId, ChanPid) of
- undefined -> ok;
- ConnMod when is_atom(ConnMod) ->
- ConnMod:call(ChanPid, discard, ?T_TAKEOVER)
- end;
+force_kill(Pid) ->
+ exit(Pid, kill),
+ ok.
+
+stale_channel_info(Pid) ->
+ process_info(Pid, [status, message_queue_len, current_stacktrace]).
discard_session(ClientId, ChanPid) ->
- rpc_call(node(ChanPid), discard_session, [ClientId, ChanPid]).
+ kick_session(discard, ClientId, ChanPid).
+
+kick_session(ClientId, ChanPid) ->
+ kick_session(kick, ClientId, ChanPid).
+
+%% @private This function is shared for session 'kick' and 'discard' (as the first arg Action).
+kick_session(Action, ClientId, ChanPid) when node(ChanPid) == node() ->
+ case get_chann_conn_mod(ClientId, ChanPid) of
+ undefined ->
+ %% already deregistered
+ ok;
+ ConnMod when is_atom(ConnMod) ->
+ ok = kick_or_kill(Action, ConnMod, ChanPid)
+ end;
+kick_session(Action, ClientId, ChanPid) ->
+ %% call remote node on the old APIs because we do not know if they have upgraded
+ %% to have kick_session/3
+ Function = case Action of
+ discard -> discard_session;
+ kick -> kick_session
+ end,
+ try
+ rpc_call(node(ChanPid), Function, [ClientId, ChanPid], ?T_KICK)
+ catch
+ Error : Reason ->
+ %% This should mostly be RPC failures.
+ %% However, if the node is still running the old version
+ %% code (prior to emqx app 4.3.10) some of the RPC handler
+ %% exceptions may get propagated to a new version node
+ ?LOG(error, "failed_to_kick_session_on_remote_node ~p: ~p ~p ~p",
+ [node(ChanPid), Action, Error, Reason])
+ end.
kick_session(ClientId) ->
case lookup_channels(ClientId) of
- [] -> {error, not_found};
- [ChanPid] ->
- kick_session(ClientId, ChanPid);
+ [] ->
+ ?LOG(warning, "kiecked_an_unknown_session ~ts", [ClientId]),
+ ok;
ChanPids ->
- [ChanPid|StalePids] = lists:reverse(ChanPids),
- ?LOG(error, "More than one channel found: ~p", [ChanPids]),
- lists:foreach(fun(StalePid) ->
- catch discard_session(ClientId, StalePid)
- end, StalePids),
- kick_session(ClientId, ChanPid)
+ case length(ChanPids) > 1 of
+ true -> ?LOG(info, "more_than_one_channel_found: ~p", [ChanPids]);
+ false -> ok
+ end,
+ lists:foreach(fun(Pid) -> kick_session(ClientId, Pid) end, ChanPids)
end.
-kick_session(ClientId, ChanPid) when node(ChanPid) == node() ->
- case get_chan_info(ClientId, ChanPid) of
- #{conninfo := #{conn_mod := ConnMod}} ->
- ConnMod:call(ChanPid, kick, ?T_TAKEOVER);
- undefined ->
- {error, not_found}
- end;
-
-kick_session(ClientId, ChanPid) ->
- rpc_call(node(ChanPid), kick_session, [ClientId, ChanPid]).
-
%% @doc Is clean start?
% is_clean_start(#{clean_start := false}) -> false;
% is_clean_start(_Attrs) -> true.
@@ -375,10 +413,16 @@ lookup_channels(local, ClientId) ->
[ChanPid || {_, ChanPid} <- ets:lookup(?CHAN_TAB, ClientId)].
%% @private
-rpc_call(Node, Fun, Args) ->
- case rpc:call(Node, ?MODULE, Fun, Args, 2 * ?T_TAKEOVER) of
- {badrpc, Reason} -> error(Reason);
- Res -> Res
+rpc_call(Node, Fun, Args, Timeout) ->
+ case rpc:call(Node, ?MODULE, Fun, Args, 2 * Timeout) of
+ {badrpc, Reason} ->
+ %% since eqmx app 4.3.10, the 'kick' and 'discard' calls hanndler
+ %% should catch all exceptions and always return 'ok'.
+ %% This leaves 'badrpc' only possible when there is problem
+ %% calling the remote node.
+ error({badrpc, Reason});
+ Res ->
+ Res
end.
%% @private
@@ -411,7 +455,7 @@ handle_cast(Msg, State) ->
handle_info({'DOWN', _MRef, process, Pid, _Reason}, State = #{chan_pmon := PMon}) ->
ChanPids = [Pid | emqx_misc:drain_down(?BATCH_SIZE)],
{Items, PMon1} = emqx_pmon:erase_all(ChanPids, PMon),
- ok = emqx_pool:async_submit(fun lists:foreach/2, [fun clean_down/1, Items]),
+ ok = emqx_pool:async_submit(fun lists:foreach/2, [fun ?MODULE:clean_down/1, Items]),
{noreply, State#{chan_pmon := PMon1}};
handle_info(Info, State) ->
@@ -447,5 +491,5 @@ get_chann_conn_mod(ClientId, ChanPid) when node(ChanPid) == node() ->
error:badarg -> undefined
end;
get_chann_conn_mod(ClientId, ChanPid) ->
- rpc_call(node(ChanPid), get_chann_conn_mod, [ClientId, ChanPid]).
+ rpc_call(node(ChanPid), get_chann_conn_mod, [ClientId, ChanPid], ?T_GET_INFO).
diff --git a/test/emqx_cm_SUITE.erl b/test/emqx_cm_SUITE.erl
index 3c891240a..acafeb36f 100644
--- a/test/emqx_cm_SUITE.erl
+++ b/test/emqx_cm_SUITE.erl
@@ -32,6 +32,12 @@
conn_mod => emqx_connection,
receive_maximum => 100}}).
+-define(WAIT(PATTERN, TIMEOUT, RET),
+ fun() ->
+ receive PATTERN -> RET
+ after TIMEOUT -> error({timeout, ?LINE}) end
+ end()).
+
%%--------------------------------------------------------------------
%% CT callbacks
%%--------------------------------------------------------------------
@@ -180,25 +186,95 @@ t_open_session_race_condition(_) ->
ignored = gen_server:call(emqx_cm, ignore, infinity), %% sync
?assertEqual([], emqx_cm:lookup_channels(ClientId)).
-t_discard_session(_) ->
+t_kick_session_discard_normal(_) ->
+ test_kick_session(discard, normal).
+
+t_kick_session_discard_shutdown(_) ->
+ test_kick_session(discard, shutdown).
+
+t_kick_session_discard_shutdown_with_reason(_) ->
+ test_kick_session(discard, {shutdown, discard}).
+
+t_kick_session_discard_timeout(_) ->
+ test_kick_session(discard, timeout).
+
+t_kick_session_discard_noproc(_) ->
+ test_kick_session(discard, noproc).
+
+t_kick_session_kick_normal(_) ->
+ test_kick_session(discard, normal).
+
+t_kick_session_kick_shutdown(_) ->
+ test_kick_session(discard, shutdown).
+
+t_kick_session_kick_shutdown_with_reason(_) ->
+ test_kick_session(discard, {shutdown, discard}).
+
+t_kick_session_kick_timeout(_) ->
+ test_kick_session(discard, timeout).
+
+t_kick_session_kick_noproc(_) ->
+ test_kick_session(discard, noproc).
+
+test_kick_session(Action, Reason) ->
ClientId = rand_client_id(),
#{conninfo := ConnInfo} = ?ChanInfo,
- ok = emqx_cm:register_channel(ClientId, self(), ConnInfo),
+ FakeSessionFun =
+ fun Loop() ->
+ receive
+ {'$gen_call', From, A} when A =:= kick orelse
+ A =:= discard ->
+ case Reason of
+ normal ->
+ gen_server:reply(From, ok);
+ timeout ->
+ %% no response to the call
+ Loop();
+ _ ->
+ exit(Reason)
+ end;
+ Msg ->
+ ct:pal("(~p) fake_session_discarded ~p", [Action, Msg]),
+ Loop()
+ end
+ end,
+ {Pid1, _} = spawn_monitor(FakeSessionFun),
+ {Pid2, _} = spawn_monitor(FakeSessionFun),
+ ok = emqx_cm:register_channel(ClientId, Pid1, ConnInfo),
+ ok = emqx_cm:register_channel(ClientId, Pid1, ConnInfo),
+ ok = emqx_cm:register_channel(ClientId, Pid2, ConnInfo),
+ ?assertEqual([Pid1, Pid2], lists:sort(emqx_cm:lookup_channels(ClientId))),
+ case Reason of
+ noproc -> exit(Pid1, kill), exit(Pid2, kill);
+ _ -> ok
+ end,
+ ok = case Action of
+ kick -> emqx_cm:kick_session(ClientId);
+ discard -> emqx_cm:discard_session(ClientId)
+ end,
+ case Reason =:= timeout orelse Reason =:= noproc of
+ true ->
+ ?assertEqual(killed, ?WAIT({'DOWN', _, process, Pid1, R}, 2_000, R)),
+ ?assertEqual(killed, ?WAIT({'DOWN', _, process, Pid2, R}, 2_000, R));
+ false ->
+ ?assertEqual(Reason, ?WAIT({'DOWN', _, process, Pid1, R}, 2_000, R)),
+ ?assertEqual(Reason, ?WAIT({'DOWN', _, process, Pid2, R}, 2_000, R))
+ end,
+ ok = flush_emqx_pool(),
+ ?assertEqual([], emqx_cm:lookup_channels(ClientId)).
- ok = meck:new(emqx_connection, [passthrough, no_history]),
- ok = meck:expect(emqx_connection, call, fun(_, _) -> ok end),
- ok = meck:expect(emqx_connection, call, fun(_, _, _) -> ok end),
- ok = emqx_cm:discard_session(ClientId),
- ok = emqx_cm:register_channel(ClientId, self(), ConnInfo),
- ok = emqx_cm:discard_session(ClientId),
- ok = emqx_cm:unregister_channel(ClientId),
- ok = emqx_cm:register_channel(ClientId, self(), ConnInfo),
- ok = emqx_cm:discard_session(ClientId),
- ok = meck:expect(emqx_connection, call, fun(_, _) -> error(testing) end),
- ok = meck:expect(emqx_connection, call, fun(_, _, _) -> error(testing) end),
- ok = emqx_cm:discard_session(ClientId),
- ok = emqx_cm:unregister_channel(ClientId),
- ok = meck:unload(emqx_connection).
+%% Channel deregistration is delegated to emqx_pool as a sync tasks.
+%% The emqx_pool is pool of workers, and there is no way to know
+%% which worker was picked for the last deregistration task.
+%% This help function creates a large enough number of async tasks
+%% to sync with the pool workers.
+%% The number of tasks should be large enough to ensure all workers have
+%% the chance to work on at least one of the tasks.
+flush_emqx_pool() ->
+ Self = self(),
+ L = lists:seq(1, 1000),
+ lists:foreach(fun(I) -> emqx_pool:async_submit(fun() -> Self ! {done, I} end, []) end, L),
+ lists:foreach(fun(I) -> receive {done, I} -> ok end end, L).
t_discard_session_race(_) ->
ClientId = rand_client_id(),
@@ -231,27 +307,6 @@ t_takeover_session(_) ->
{ok, emqx_connection, _, test} = emqx_cm:takeover_session(<<"clientid">>),
emqx_cm:unregister_channel(<<"clientid">>).
-t_kick_session(_) ->
- Info = #{conninfo := ConnInfo} = ?ChanInfo,
- ok = meck:new(emqx_connection, [passthrough, no_history]),
- ok = meck:expect(emqx_connection, call, fun(_, _) -> test end),
- ok = meck:expect(emqx_connection, call, fun(_, _, _) -> test end),
- {error, not_found} = emqx_cm:kick_session(<<"clientid">>),
- ok = emqx_cm:register_channel(<<"clientid">>, self(), ConnInfo),
- ok = emqx_cm:insert_channel_info(<<"clientid">>, Info, []),
- test = emqx_cm:kick_session(<<"clientid">>),
- erlang:spawn_link(
- fun() ->
- ok = emqx_cm:register_channel(<<"clientid">>, self(), ConnInfo),
- ok = emqx_cm:insert_channel_info(<<"clientid">>, Info, []),
-
- timer:sleep(1000)
- end),
- ct:sleep(100),
- test = emqx_cm:kick_session(<<"clientid">>),
- ok = emqx_cm:unregister_channel(<<"clientid">>),
- ok = meck:unload(emqx_connection).
-
t_all_channels(_) ->
?assertEqual(true, is_list(emqx_cm:all_channels())).