feature(project): change to rebar3 umbrella project
This commit is contained in:
parent
7bea9920b5
commit
151d820058
181
Makefile
181
Makefile
|
@ -1,139 +1,56 @@
|
|||
## shallow clone for speed
|
||||
REBAR = $(CURDIR)/rebar3
|
||||
|
||||
REBAR_GIT_CLONE_OPTIONS += --depth 1
|
||||
export REBAR_GIT_CLONE_OPTIONS
|
||||
PROFILE ?= emqx
|
||||
PROFILES := emqx emqx-edge
|
||||
PKG_PROFILES := emqx-pkg emqx-edge-pkg
|
||||
|
||||
SUITES_FILES := $(shell find test -name '*_SUITE.erl' | sort)
|
||||
export DEFAULT_VSN ?= $(shell ./get-lastest-tag.escript)
|
||||
ifneq ($(shell echo $(DEFAULT_VSN) | grep -oE "^[ev0-9]+\.[0-9]+(\.[0-9]+)?"),)
|
||||
export PKG_VSN := $(patsubst v%,%,$(patsubst e%,%,$(DEFAULT_VSN)))
|
||||
else
|
||||
export PKG_VSN := $(patsubst v%,%,$(DEFAULT_VSN))
|
||||
endif
|
||||
|
||||
CT_SUITES := $(foreach value,$(SUITES_FILES),$(shell val=$$(basename $(value) .erl); echo $${val%_*}))
|
||||
|
||||
CT_NODE_NAME = emqxct@127.0.0.1
|
||||
|
||||
RUN_NODE_NAME = emqxdebug@127.0.0.1
|
||||
.PHONY: default
|
||||
default: $(REBAR) $(PROFILE)
|
||||
|
||||
.PHONY: all
|
||||
all: compile
|
||||
|
||||
.PHONY: tests
|
||||
tests: eunit ct
|
||||
|
||||
.PHONY: run
|
||||
run: run_setup unlock
|
||||
@rebar3 as test get-deps
|
||||
@rebar3 as test auto --name $(RUN_NODE_NAME) --script scripts/run_emqx.escript
|
||||
|
||||
.PHONY: run_setup
|
||||
run_setup:
|
||||
@erl -noshell -eval \
|
||||
"{ok, [[HOME]]} = init:get_argument(home), \
|
||||
FilePath = HOME ++ \"/.config/rebar3/rebar.config\", \
|
||||
case file:consult(FilePath) of \
|
||||
{ok, Term} -> \
|
||||
NewTerm = case lists:keyfind(plugins, 1, Term) of \
|
||||
false -> [{plugins, [rebar3_auto]} | Term]; \
|
||||
{plugins, OldPlugins} -> \
|
||||
NewPlugins0 = OldPlugins -- [rebar3_auto], \
|
||||
NewPlugins = [rebar3_auto | NewPlugins0], \
|
||||
lists:keyreplace(plugins, 1, Term, {plugins, NewPlugins}) \
|
||||
end, \
|
||||
ok = file:write_file(FilePath, [io_lib:format(\"~p.\n\", [I]) || I <- NewTerm]); \
|
||||
_Enoent -> \
|
||||
os:cmd(\"mkdir -p ~/.config/rebar3/ \"), \
|
||||
NewTerm=[{plugins, [rebar3_auto]}], \
|
||||
ok = file:write_file(FilePath, [io_lib:format(\"~p.\n\", [I]) || I <- NewTerm]) \
|
||||
end, \
|
||||
halt(0)."
|
||||
|
||||
.PHONY: shell
|
||||
shell:
|
||||
@rebar3 as test auto
|
||||
|
||||
compile: unlock
|
||||
@rebar3 compile
|
||||
|
||||
unlock:
|
||||
@rebar3 unlock
|
||||
|
||||
clean: distclean
|
||||
|
||||
## Cuttlefish escript is built by default when cuttlefish app (as dependency) was built
|
||||
CUTTLEFISH_SCRIPT := _build/default/lib/cuttlefish/cuttlefish
|
||||
|
||||
.PHONY: cover
|
||||
cover:
|
||||
@rebar3 cover
|
||||
|
||||
.PHONY: coveralls
|
||||
coveralls:
|
||||
@rebar3 as test coveralls send
|
||||
|
||||
.PHONY: xref
|
||||
xref:
|
||||
@rebar3 xref
|
||||
|
||||
.PHONY: dialyzer
|
||||
dialyzer:
|
||||
@rebar3 dialyzer
|
||||
|
||||
.PHONY: proper
|
||||
proper:
|
||||
@rebar3 proper -d test/props -c
|
||||
|
||||
.PHONY: deps
|
||||
deps:
|
||||
@rebar3 get-deps
|
||||
|
||||
.PHONY: eunit
|
||||
eunit:
|
||||
@rebar3 eunit -v
|
||||
|
||||
.PHONY: ct_setup
|
||||
ct_setup:
|
||||
rebar3 as test compile
|
||||
@mkdir -p data
|
||||
@if [ ! -f data/loaded_plugins ]; then touch data/loaded_plugins; fi
|
||||
@ln -s -f '../../../../etc' _build/test/lib/emqx/
|
||||
@ln -s -f '../../../../data' _build/test/lib/emqx/
|
||||
|
||||
.PHONY: ct
|
||||
ct: ct_setup
|
||||
@rebar3 ct -v --name $(CT_NODE_NAME) --suite=$(shell echo $(foreach var,$(CT_SUITES),test/$(var)_SUITE) | tr ' ' ',')
|
||||
|
||||
## Run one single CT with rebar3
|
||||
## e.g. make ct-one-suite suite=emqx_bridge
|
||||
.PHONY: $(SUITES:%=ct-%)
|
||||
$(CT_SUITES:%=ct-%): ct_setup
|
||||
@rebar3 ct -v --readable=false --name $(CT_NODE_NAME) --suite=$(@:ct-%=%)_SUITE --cover
|
||||
|
||||
.PHONY: app.config
|
||||
app.config: $(CUTTLEFISH_SCRIPT) etc/gen.emqx.conf
|
||||
$(CUTTLEFISH_SCRIPT) -l info -e etc/ -c etc/gen.emqx.conf -i priv/emqx.schema -d data/
|
||||
|
||||
$(CUTTLEFISH_SCRIPT):
|
||||
@rebar3 get-deps
|
||||
@if [ ! -f cuttlefish ]; then make -C _build/default/lib/cuttlefish; fi
|
||||
|
||||
bbmustache:
|
||||
@git clone https://github.com/soranoba/bbmustache.git && cd bbmustache && ./rebar3 compile && cd ..
|
||||
|
||||
# This hack is to generate a conf file for testing
|
||||
# relx overlay is used for release
|
||||
etc/gen.emqx.conf: bbmustache etc/emqx.conf
|
||||
@erl -noshell -pa bbmustache/_build/default/lib/bbmustache/ebin -eval \
|
||||
"{ok, Temp} = file:read_file('etc/emqx.conf'), \
|
||||
{ok, Vars0} = file:consult('vars'), \
|
||||
Vars = [{atom_to_list(N), list_to_binary(V)} || {N, V} <- Vars0], \
|
||||
Targ = bbmustache:render(Temp, Vars), \
|
||||
ok = file:write_file('etc/gen.emqx.conf', Targ), \
|
||||
halt(0)."
|
||||
|
||||
.PHONY: gen-clean
|
||||
gen-clean:
|
||||
@rm -rf bbmustache
|
||||
@rm -f etc/gen.emqx.conf etc/emqx.conf.rendered
|
||||
all: $(REBAR) $(PROFILES)
|
||||
|
||||
.PHONY: distclean
|
||||
distclean: gen-clean
|
||||
@rm -rf Mnesia.*
|
||||
@rm -rf _build cover deps logs log data
|
||||
@rm -f rebar.lock compile_commands.json cuttlefish erl_crash.dump
|
||||
distclean:
|
||||
@rm -rf _build
|
||||
|
||||
.PHONY: $(PROFILES)
|
||||
$(PROFILES:%=%): $(REBAR)
|
||||
ifneq ($(shell echo $(@) |grep edge),)
|
||||
export EMQX_DESC="EMQ X Edge"
|
||||
else
|
||||
export EMQX_DESC="EMQ X Broker"
|
||||
endif
|
||||
$(REBAR) as $(@) release
|
||||
|
||||
.PHONY: $(PROFILES:%=build-%)
|
||||
$(PROFILES:%=build-%): $(REBAR)
|
||||
$(REBAR) as $(@:build-%=%) compile
|
||||
|
||||
.PHONY: clean $(PROFILES:%=clean-%)
|
||||
clean: $(PROFILES:%=clean-%)
|
||||
$(PROFILES:%=clean-%): $(REBAR)
|
||||
@rm -rf _build/$(@:clean-%=%)
|
||||
@rm -rf _build/$(@:clean-%=%)+test
|
||||
|
||||
.PHONY: deps-all
|
||||
deps-all: $(REBAR) $(PROFILES:%=deps-%) $(PKG_PROFILES:%=deps-%)
|
||||
|
||||
.PHONY: $(PROFILES:%=deps-%) $(PKG_PROFILES:%=deps-%)
|
||||
$(PROFILES:%=deps-%) $(PKG_PROFILES:%=deps-%): $(REBAR)
|
||||
ifneq ($(shell echo $(@) |grep edge),)
|
||||
export EMQX_DESC="EMQ X Edge"
|
||||
else
|
||||
export EMQX_DESC="EMQ X Broker"
|
||||
endif
|
||||
$(REBAR) as $(@:deps-%=%) get-deps
|
||||
|
||||
include packages.mk
|
||||
include docker.mk
|
||||
|
|
|
@ -0,0 +1,636 @@
|
|||
#!/bin/sh
|
||||
# -*- tab-width:4;indent-tabs-mode:nil -*-
|
||||
# ex: ts=4 sw=4 et
|
||||
|
||||
set -e
|
||||
|
||||
ROOT_DIR="$(cd $(dirname $(readlink $0 || echo $0))/..; pwd -P)"
|
||||
. $ROOT_DIR/releases/emqx_vars
|
||||
|
||||
RUNNER_SCRIPT="$RUNNER_BIN_DIR/$REL_NAME"
|
||||
CODE_LOADING_MODE="${CODE_LOADING_MODE:-embedded}"
|
||||
REL_DIR="$RUNNER_ROOT_DIR/releases/$REL_VSN"
|
||||
|
||||
WHOAMI=$(whoami)
|
||||
|
||||
# Make sure log directory exists
|
||||
mkdir -p "$RUNNER_LOG_DIR"
|
||||
|
||||
# Make sure data directory exists
|
||||
mkdir -p "$RUNNER_DATA_DIR"
|
||||
|
||||
relx_usage() {
|
||||
command="$1"
|
||||
|
||||
case "$command" in
|
||||
unpack)
|
||||
echo "Usage: $REL_NAME unpack [VERSION]"
|
||||
echo "Unpacks a release package VERSION, it assumes that this"
|
||||
echo "release package tarball has already been deployed at one"
|
||||
echo "of the following locations:"
|
||||
echo " releases/<relname>-<version>.tar.gz"
|
||||
echo " releases/<relname>-<version>.zip"
|
||||
;;
|
||||
install)
|
||||
echo "Usage: $REL_NAME install [VERSION]"
|
||||
echo "Installs a release package VERSION, it assumes that this"
|
||||
echo "release package tarball has already been deployed at one"
|
||||
echo "of the following locations:"
|
||||
echo " releases/<relname>-<version>.tar.gz"
|
||||
echo " releases/<relname>-<version>.zip"
|
||||
echo ""
|
||||
echo " --no-permanent Install release package VERSION but"
|
||||
echo " don't make it permanent"
|
||||
;;
|
||||
uninstall)
|
||||
echo "Usage: $REL_NAME uninstall [VERSION]"
|
||||
echo "Uninstalls a release VERSION, it will only accept"
|
||||
echo "versions that are not currently in use"
|
||||
;;
|
||||
upgrade)
|
||||
echo "Usage: $REL_NAME upgrade [VERSION]"
|
||||
echo "Upgrades the currently running release to VERSION, it assumes"
|
||||
echo "that a release package tarball has already been deployed at one"
|
||||
echo "of the following locations:"
|
||||
echo " releases/<relname>-<version>.tar.gz"
|
||||
echo " releases/<relname>-<version>.zip"
|
||||
echo ""
|
||||
echo " --no-permanent Install release package VERSION but"
|
||||
echo " don't make it permanent"
|
||||
;;
|
||||
downgrade)
|
||||
echo "Usage: $REL_NAME downgrade [VERSION]"
|
||||
echo "Downgrades the currently running release to VERSION, it assumes"
|
||||
echo "that a release package tarball has already been deployed at one"
|
||||
echo "of the following locations:"
|
||||
echo " releases/<relname>-<version>.tar.gz"
|
||||
echo " releases/<relname>-<version>.zip"
|
||||
echo ""
|
||||
echo " --no-permanent Install release package VERSION but"
|
||||
echo " don't make it permanent"
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $REL_NAME {start|start_boot <file>|ertspath|foreground|stop|restart|reboot|pid|ping|console|console_clean|console_boot <file>|attach|remote_console|upgrade|downgrade|install|uninstall|versions|escript|rpc|rpcterms|eval|root_dir}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Simple way to check the correct user and fail early
|
||||
check_user() {
|
||||
# Validate that the user running the script is the owner of the
|
||||
# RUN_DIR.
|
||||
if ([ "$RUNNER_USER" ] && [ "x$WHOAMI" != "x$RUNNER_USER" ]); then
|
||||
if [ "x$WHOAMI" != "xroot" ]; then
|
||||
echo "You need to be root or use sudo to run this command"
|
||||
exit 1
|
||||
fi
|
||||
CMD="\"$RUNNER_SCRIPT\" "
|
||||
for ARG in "$@"; do
|
||||
CMD="${CMD} \"$ARG\""
|
||||
done
|
||||
# This will drop priviledges into the runner user
|
||||
# It exec's in a new shell and the current shell will exit
|
||||
exec su - $RUNNER_USER -c "$CMD"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Make sure the user running this script is the owner and/or su to that user
|
||||
check_user $@
|
||||
ES=$?
|
||||
if [ "$ES" -ne 0 ]; then
|
||||
exit $ES
|
||||
fi
|
||||
|
||||
if [ -z "$WITH_EPMD" ]; then
|
||||
EPMD_ARG="-start_epmd false -epmd_module ekka_epmd -proto_dist ekka"
|
||||
else
|
||||
EPMD_ARG="-start_epmd true $PROTO_DIST_ARG"
|
||||
fi
|
||||
|
||||
# Warn the user if ulimit -n is less than 1024
|
||||
ULIMIT_F=`ulimit -n`
|
||||
if [ "$ULIMIT_F" -lt 1024 ]; then
|
||||
echo "!!!!"
|
||||
echo "!!!! WARNING: ulimit -n is ${ULIMIT_F}; 1024 is the recommended minimum."
|
||||
echo "!!!!"
|
||||
fi
|
||||
|
||||
# Echo to stderr on errors
|
||||
echoerr() { echo "$@" 1>&2; }
|
||||
|
||||
# By default, use cuttlefish to generate app.config and vm.args
|
||||
CUTTLEFISH="${USE_CUTTLEFISH:-yes}"
|
||||
|
||||
SED_REPLACE="sed -i "
|
||||
case $(sed --help 2>&1) in
|
||||
*GNU*) SED_REPLACE="sed -i ";;
|
||||
*BusyBox*) SED_REPLACE="sed -i ";;
|
||||
*) SED_REPLACE="sed -i '' ";;
|
||||
esac
|
||||
|
||||
# Get node pid
|
||||
relx_get_pid() {
|
||||
if output="$(relx_nodetool rpcterms os getpid)"
|
||||
then
|
||||
echo "$output" | sed -e 's/"//g'
|
||||
return 0
|
||||
else
|
||||
echo "$output"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
relx_get_nodename() {
|
||||
id="longname$(relx_gen_id)-${NAME}"
|
||||
"$BINDIR/erl" -boot start_clean -eval '[Host] = tl(string:tokens(atom_to_list(node()),"@")), io:format("~s~n", [Host]), halt()' -noshell ${NAME_TYPE} $id
|
||||
}
|
||||
|
||||
# Connect to a remote node
|
||||
relx_rem_sh() {
|
||||
# Generate a unique id used to allow multiple remsh to the same node
|
||||
# transparently
|
||||
id="remsh$(relx_gen_id)-${NAME}"
|
||||
# Get the node's ticktime so that we use the same thing.
|
||||
TICKTIME="$(relx_nodetool rpcterms net_kernel get_net_ticktime)"
|
||||
|
||||
# Setup remote shell command to control node
|
||||
exec "$BINDIR/erl" "$NAME_TYPE" "$id" -remsh "$NAME" -boot start_clean \
|
||||
-boot_var ERTS_LIB_DIR "$ERTS_LIB_DIR" \
|
||||
-setcookie "$COOKIE" -hidden -kernel net_ticktime $TICKTIME $EPMD_ARG
|
||||
}
|
||||
|
||||
# Generate a random id
|
||||
relx_gen_id() {
|
||||
od -t x -N 4 /dev/urandom | head -n1 | awk '{print $2}'
|
||||
}
|
||||
|
||||
# Control a node
|
||||
relx_nodetool() {
|
||||
command="$1"; shift
|
||||
|
||||
ERL_FLAGS="$ERL_FLAGS $EPMD_ARG" \
|
||||
"$ERTS_DIR/bin/escript" "$ROOTDIR/bin/nodetool" "$NAME_TYPE" "$NAME" \
|
||||
-setcookie "$COOKIE" "$command" $@
|
||||
}
|
||||
|
||||
# Run an escript in the node's environment
|
||||
relx_escript() {
|
||||
shift; scriptpath="$1"; shift
|
||||
export RUNNER_ROOT_DIR
|
||||
|
||||
"$ERTS_DIR/bin/escript" "$ROOTDIR/$scriptpath" $@
|
||||
}
|
||||
|
||||
# Output a start command for the last argument of run_erl
|
||||
relx_start_command() {
|
||||
printf "exec \"%s\" \"%s\"" "$RUNNER_SCRIPT" \
|
||||
"$START_OPTION"
|
||||
}
|
||||
|
||||
# Function to generate app.config and vm.args
|
||||
generate_config() {
|
||||
## Delete the *.siz files first or it cann't start after
|
||||
## changing the config 'log.rotation.size'
|
||||
rm -rf "${RUNNER_LOG_DIR}"/*.siz
|
||||
|
||||
if [ "$CUTTLEFISH" != "yes" ]; then
|
||||
# Note: we have added a parameter '-vm_args' to this. It
|
||||
# appears redundant but it is not! the erlang vm allows us to
|
||||
# access all arguments to the erl command EXCEPT '-args_file',
|
||||
# so in order to get access to this file location from within
|
||||
# the vm, we need to pass it in twice.
|
||||
CONFIG_ARGS=" -config $RUNNER_ETC_DIR/app.config -args_file $RUNNER_ETC_DIR/vm.args -vm_args $RUNNER_ETC_DIR/vm.args "
|
||||
else
|
||||
CONFIG_ARGS=`$ERTS_PATH/escript $RUNNER_ROOT_DIR/bin/cuttlefish -i $REL_DIR/emqx.schema -c $RUNNER_ETC_DIR/emqx.conf -d $RUNNER_DATA_DIR/configs generate`
|
||||
|
||||
## Merge cuttlefish generated *.args into the vm.args
|
||||
CUTTLE_GEN_ARG_FILE=`echo "$CONFIG_ARGS" | sed -n 's/^.*\(vm_args[[:space:]]\)//p' | awk '{print $1}'`
|
||||
TMP_ARG_FILE="$RUNNER_DATA_DIR/configs/vm.args.tmp"
|
||||
cp "$RUNNER_ETC_DIR/vm.args" "$TMP_ARG_FILE"
|
||||
echo "" >> "$TMP_ARG_FILE"
|
||||
sed '/^#/d' $CUTTLE_GEN_ARG_FILE | sed '/^$/d' | while IFS='' read -r ARG_LINE || [ -n "$ARG_LINE" ]; do
|
||||
ARG_KEY=`echo "$ARG_LINE" | awk '{$NF="";print}'`
|
||||
ARG_VALUE=`echo "$ARG_LINE" | awk '{print $NF}'`
|
||||
TMP_ARG_VALUE=`grep "^$ARG_KEY" "$TMP_ARG_FILE" | awk '{print $NF}'`
|
||||
if [ "$ARG_VALUE" != "$TMP_ARG_VALUE" ] ; then
|
||||
if [ ! -z $TMP_ARG_VALUE ]; then
|
||||
sh -c "$SED_REPLACE 's/^$ARG_KEY.*$/$ARG_LINE/' $TMP_ARG_FILE"
|
||||
else
|
||||
echo "$ARG_LINE" >> "$TMP_ARG_FILE"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
mv -f "$TMP_ARG_FILE" "$CUTTLE_GEN_ARG_FILE"
|
||||
fi
|
||||
|
||||
if ! relx_nodetool chkconfig $CONFIG_ARGS; then
|
||||
echoerr "Error reading $CONFIG_ARGS"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Call bootstrapd for daemon commands like start/stop/console
|
||||
bootstrapd() {
|
||||
if [ -e "$RUNNER_DATA_DIR/.erlang.cookie" ]; then
|
||||
chown $RUNNER_USER $RUNNER_DATA_DIR/.erlang.cookie
|
||||
fi
|
||||
}
|
||||
|
||||
# Use $CWD/etc/sys.config if exists
|
||||
if [ -z "$RELX_CONFIG_PATH" ]; then
|
||||
if [ -f "$RUNNER_ETC_DIR/sys.config" ]; then
|
||||
RELX_CONFIG_PATH="-config $RUNNER_ETC_DIR/sys.config"
|
||||
else
|
||||
RELX_CONFIG_PATH=""
|
||||
fi
|
||||
fi
|
||||
|
||||
# Extract the target node name from node.args
|
||||
if [ -z "$NAME_ARG" ]; then
|
||||
if [ ! -z "$EMQX_NODE_NAME" ]; then
|
||||
NODENAME="$EMQX_NODE_NAME"
|
||||
elif [ ! -z `ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-name (\S*)' | awk '{print $2}'` ]; then
|
||||
NODENAME=`ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-name (\S*)' | awk '{print $2}'`
|
||||
else
|
||||
NODENAME=`egrep '^[ \t]*node.name[ \t]*=[ \t]*' "$RUNNER_ETC_DIR/emqx.conf" 2> /dev/null | tail -1 | cut -d = -f 2-`
|
||||
fi
|
||||
if [ -z "$NODENAME" ]; then
|
||||
echoerr "vm.args needs to have a -name parameter."
|
||||
echoerr " -sname is not supported."
|
||||
echoerr "perhaps you do not have read permissions on $RUNNER_ETC_DIR/emqx.conf"
|
||||
exit 1
|
||||
else
|
||||
NAME_ARG="-name ${NODENAME# *}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Extract the name type and name from the NAME_ARG for REMSH
|
||||
NAME_TYPE="$(echo "$NAME_ARG" | awk '{print $1}')"
|
||||
NAME="$(echo "$NAME_ARG" | awk '{print $2}')"
|
||||
|
||||
PIPE_DIR="${PIPE_DIR:-/$RUNNER_DATA_DIR/${WHOAMI}_erl_pipes/$NAME/}"
|
||||
|
||||
# Extract the target cookie
|
||||
if [ -z "$COOKIE_ARG" ]; then
|
||||
if [ ! -z "$EMQX_NODE_COOKIE" ]; then
|
||||
COOKIE="$EMQX_NODE_COOKIE"
|
||||
elif [ ! -z `ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-setcookie (\S*)' | awk '{print $2}'` ]; then
|
||||
COOKIE=`ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-setcookie (\S*)' | awk '{print $2}'`
|
||||
else
|
||||
COOKIE=`egrep '^[ \t]*node.cookie[ \t]*=[ \t]*' "$RUNNER_ETC_DIR/emqx.conf" 2> /dev/null | tail -1 | cut -d = -f 2-`
|
||||
fi
|
||||
if [ -z "$COOKIE" ]; then
|
||||
echoerr "vm.args needs to have a -setcookie parameter."
|
||||
echoerr "please check $RUNNER_ETC_DIR/emqx.conf"
|
||||
exit 1
|
||||
else
|
||||
COOKIE_ARG="-setcookie $COOKIE"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Extract cookie name from COOKIE_ARG
|
||||
COOKIE="$(echo "$COOKIE_ARG" | awk '{print $2}')"
|
||||
|
||||
# Support for IPv6 Dist. See: https://github.com/emqtt/emqttd/issues/1460
|
||||
PROTO_DIST=`egrep '^[ \t]*cluster.proto_dist[ \t]*=[ \t]*' "$RUNNER_ETC_DIR/emqx.conf" 2> /dev/null | tail -1 | cut -d = -f 2-`
|
||||
if [ -z "$PROTO_DIST" ]; then
|
||||
PROTO_DIST_ARG=""
|
||||
else
|
||||
PROTO_DIST_ARG="-proto_dist $PROTO_DIST"
|
||||
fi
|
||||
|
||||
export ROOTDIR="$RUNNER_ROOT_DIR"
|
||||
export ERTS_DIR="$ROOTDIR/erts-$ERTS_VSN"
|
||||
export BINDIR="$ERTS_DIR/bin"
|
||||
export EMU="beam"
|
||||
export PROGNAME="erl"
|
||||
export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH"
|
||||
ERTS_LIB_DIR="$ERTS_DIR/../lib"
|
||||
MNESIA_DATA_DIR="$RUNNER_DATA_DIR/mnesia/$NAME"
|
||||
|
||||
cd "$ROOTDIR"
|
||||
|
||||
# User can specify an sname without @hostname
|
||||
# This will fail when creating remote shell
|
||||
# So here we check for @ and add @hostname if missing
|
||||
case $NAME in
|
||||
*@*)
|
||||
# Nothing to do
|
||||
;;
|
||||
*)
|
||||
NAME=$NAME@$(relx_get_nodename)
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check the first argument for instructions
|
||||
case "$1" in
|
||||
start|start_boot)
|
||||
# Make sure a node IS not running
|
||||
if relx_nodetool "ping" >/dev/null 2>&1; then
|
||||
echo "Node is already running!"
|
||||
exit 1
|
||||
fi
|
||||
# Bootstrap daemon command (check perms & drop to $RUNNER_USER)
|
||||
bootstrapd $@
|
||||
|
||||
# Save this for later.
|
||||
CMD=$1
|
||||
case "$1" in
|
||||
start)
|
||||
shift
|
||||
START_OPTION="console"
|
||||
HEART_OPTION="start"
|
||||
;;
|
||||
start_boot)
|
||||
shift
|
||||
START_OPTION="console_boot"
|
||||
HEART_OPTION="start_boot"
|
||||
;;
|
||||
esac
|
||||
RUN_PARAM="$@"
|
||||
|
||||
# Set arguments for the heart command
|
||||
set -- "$RUNNER_SCRIPT" "$HEART_OPTION"
|
||||
[ "$RUN_PARAM" ] && set -- "$@" "$RUN_PARAM"
|
||||
|
||||
# Export the HEART_COMMAND
|
||||
HEART_COMMAND="$RUNNER_SCRIPT $CMD"
|
||||
export HEART_COMMAND
|
||||
|
||||
## See: http://erlang.org/doc/man/run_erl.html
|
||||
# Export the RUN_ERL_LOG_GENERATIONS
|
||||
export RUN_ERL_LOG_GENERATIONS=${RUN_ERL_LOG_GENERATIONS:-"5"}
|
||||
|
||||
# Export the RUN_ERL_LOG_MAXSIZE
|
||||
export RUN_ERL_LOG_MAXSIZE=${RUN_ERL_LOG_MAXSIZE:-"10485760"}
|
||||
|
||||
mkdir -p "$PIPE_DIR"
|
||||
|
||||
"$BINDIR/run_erl" -daemon "$PIPE_DIR" "$RUNNER_LOG_DIR" \
|
||||
"$(relx_start_command)"
|
||||
|
||||
WAIT_TIME=${WAIT_FOR_ERLANG:-15}
|
||||
while [ $WAIT_TIME -gt 0 ]; do
|
||||
if ! relx_nodetool "ping" >/dev/null 2>&1; then
|
||||
WAIT_TIME=`expr $WAIT_TIME - 1`
|
||||
sleep 1
|
||||
continue
|
||||
fi
|
||||
sleep 1
|
||||
if relx_nodetool "ping" >/dev/null 2>&1; then
|
||||
echo "$EMQX_DISCR $REL_VSN is started successfully!"
|
||||
exit 0
|
||||
fi
|
||||
done && echo "$EMQX_DISCR $REL_VSN failed to start within ${WAIT_FOR_ERLANG:-15} seconds,"
|
||||
echo "see the output of '$0 console' for more information."
|
||||
echo "If you want to wait longer, set the environment variable"
|
||||
echo "WAIT_FOR_ERLANG to the number of seconds to wait."
|
||||
exit 1
|
||||
;;
|
||||
|
||||
stop)
|
||||
# Wait for the node to completely stop...
|
||||
PID="$(relx_get_pid)"
|
||||
if ! relx_nodetool "stop"; then
|
||||
exit 1
|
||||
fi
|
||||
while $(kill -s 0 "$PID" 2>/dev/null); do
|
||||
sleep 1
|
||||
done
|
||||
;;
|
||||
|
||||
restart|reboot)
|
||||
echo "$EMQX_DISCR $REL_VSN is stopped: $($RUNNER_BIN_DIR/emqx stop)"
|
||||
$RUNNER_BIN_DIR/emqx start
|
||||
;;
|
||||
|
||||
pid)
|
||||
## Get the VM's pid
|
||||
if ! relx_get_pid; then
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
ping)
|
||||
## See if the VM is alive
|
||||
if ! relx_nodetool "ping"; then
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
escript)
|
||||
## Run an escript under the node's environment
|
||||
if ! relx_escript $@; then
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
attach)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Bootstrap daemon command (check perms & drop to $RUNNER_USER)
|
||||
bootstrapd $@
|
||||
|
||||
shift
|
||||
exec "$BINDIR/to_erl" "$PIPE_DIR"
|
||||
;;
|
||||
|
||||
remote_console)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Bootstrap daemon command (check perms & drop to $RUNNER_USER)
|
||||
bootstrapd $@
|
||||
|
||||
shift
|
||||
relx_rem_sh
|
||||
;;
|
||||
|
||||
upgrade|downgrade|install|unpack|uninstall)
|
||||
if [ -z "$2" ]; then
|
||||
echo "Missing version argument"
|
||||
echo "Usage: $REL_NAME $1 {version}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COMMAND="$1"; shift
|
||||
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ERL_FLAGS="$ERL_FLAGS $EPMD_ARG" \
|
||||
exec "$BINDIR/escript" "$ROOTDIR/bin/install_upgrade.escript" \
|
||||
"$COMMAND" "{'$REL_NAME', \"$NAME_TYPE\", '$NAME', '$COOKIE'}" "$@"
|
||||
;;
|
||||
|
||||
versions)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COMMAND="$1"; shift
|
||||
|
||||
ERL_FLAGS="$ERL_FLAGS $EPMD_ARG" \
|
||||
exec "$BINDIR/escript" "$ROOTDIR/bin/install_upgrade.escript" \
|
||||
"versions" "{'$REL_NAME', \"$NAME_TYPE\", '$NAME', '$COOKIE'}" "$@"
|
||||
;;
|
||||
|
||||
console|console_clean|console_boot)
|
||||
# Bootstrap daemon command (check perms & drop to $RUNNER_USER)
|
||||
bootstrapd $@
|
||||
|
||||
# .boot file typically just $REL_NAME (ie, the app name)
|
||||
# however, for debugging, sometimes start_clean.boot is useful.
|
||||
# For e.g. 'setup', one may even want to name another boot script.
|
||||
case "$1" in
|
||||
console)
|
||||
if [ -f "$REL_DIR/$REL_NAME.boot" ]; then
|
||||
BOOTFILE="$REL_DIR/$REL_NAME"
|
||||
else
|
||||
BOOTFILE="$REL_DIR/start"
|
||||
fi
|
||||
;;
|
||||
console_clean)
|
||||
BOOTFILE="$ROOTDIR/bin/start_clean"
|
||||
;;
|
||||
console_boot)
|
||||
shift
|
||||
BOOTFILE="$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
|
||||
#generate app.config and vm.args
|
||||
generate_config
|
||||
|
||||
# Setup beam-required vars
|
||||
EMU="beam"
|
||||
PROGNAME="${0#*/}"
|
||||
|
||||
export EMU
|
||||
export PROGNAME
|
||||
|
||||
# Store passed arguments since they will be erased by `set`
|
||||
ARGS="$@"
|
||||
|
||||
# Build an array of arguments to pass to exec later on
|
||||
# Build it here because this command will be used for logging.
|
||||
set -- "$BINDIR/erlexec" -boot "$BOOTFILE" -mode "$CODE_LOADING_MODE" \
|
||||
-boot_var ERTS_LIB_DIR "$ERTS_LIB_DIR" \
|
||||
-mnesia dir "\"${MNESIA_DATA_DIR}\"" \
|
||||
$RELX_CONFIG_PATH $CONFIG_ARGS $EPMD_ARG
|
||||
|
||||
# Dump environment info for logging purposes
|
||||
echo "Exec: $@" -- ${1+$ARGS}
|
||||
echo "Root: $ROOTDIR"
|
||||
|
||||
# Log the startup
|
||||
echo "$RUNNER_ROOT_DIR"
|
||||
logger -t "$REL_NAME[$$]" "Starting up"
|
||||
|
||||
# Start the VM
|
||||
exec "$@" -- ${1+$ARGS}
|
||||
;;
|
||||
|
||||
foreground)
|
||||
# Bootstrap daemon command (check perms & drop to $RUNNER_USER)
|
||||
bootstrapd $@
|
||||
# start up the release in the foreground for use by runit
|
||||
# or other supervision services
|
||||
|
||||
#generate app.config and vm.args
|
||||
generate_config
|
||||
|
||||
[ -f "$REL_DIR/$REL_NAME.boot" ] && BOOTFILE="$REL_NAME" || BOOTFILE=start
|
||||
FOREGROUNDOPTIONS="-noshell -noinput +Bd"
|
||||
|
||||
# Setup beam-required vars
|
||||
EMU=beam
|
||||
PROGNAME="${0#*/}"
|
||||
|
||||
export EMU
|
||||
export PROGNAME
|
||||
|
||||
# Store passed arguments since they will be erased by `set`
|
||||
ARGS="$@"
|
||||
|
||||
# Build an array of arguments to pass to exec later on
|
||||
# Build it here because this command will be used for logging.
|
||||
set -- "$BINDIR/erlexec" $FOREGROUNDOPTIONS \
|
||||
-boot "$REL_DIR/$BOOTFILE" -mode "$CODE_LOADING_MODE" \
|
||||
-boot_var ERTS_LIB_DIR "$ERTS_LIB_DIR" \
|
||||
-mnesia dir "\"${MNESIA_DATA_DIR}\"" \
|
||||
$RELX_CONFIG_PATH $CONFIG_ARGS $EPMD_ARG
|
||||
|
||||
# Dump environment info for logging purposes
|
||||
echo "Exec: $@" -- ${1+$ARGS}
|
||||
echo "Root: $ROOTDIR"
|
||||
|
||||
# Start the VM
|
||||
exec "$@" -- ${1+$ARGS}
|
||||
;;
|
||||
ertspath)
|
||||
echo $ERTS_PATH
|
||||
;;
|
||||
rpc)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
relx_nodetool rpc $@
|
||||
;;
|
||||
rpcterms)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
relx_nodetool rpcterms $@
|
||||
;;
|
||||
root_dir)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shift
|
||||
relx_nodetool "eval" 'code:root_dir()'
|
||||
;;
|
||||
eval)
|
||||
# Make sure a node IS running
|
||||
if ! relx_nodetool "ping" > /dev/null; then
|
||||
echo "Node is not running!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shift
|
||||
relx_nodetool "eval" $@
|
||||
;;
|
||||
*)
|
||||
relx_usage $1
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,263 @@
|
|||
:: This batch file handles managing an Erlang node as a Windows service.
|
||||
::
|
||||
:: Commands provided:
|
||||
::
|
||||
:: * install - install the release as a Windows service
|
||||
:: * start - start the service and Erlang node
|
||||
:: * stop - stop the service and Erlang node
|
||||
:: * restart - run the stop command and start command
|
||||
:: * uninstall - uninstall the service and kill a running node
|
||||
:: * ping - check if the node is running
|
||||
:: * console - start the Erlang release in a `werl` Windows shell
|
||||
:: * attach - connect to a running node and open an interactive console
|
||||
:: * list - display a listing of installed Erlang services
|
||||
:: * usage - display available commands
|
||||
|
||||
:: Set variables that describe the release
|
||||
@set rel_name=emqx
|
||||
@set rel_vsn={{ rel_vsn }}
|
||||
@set erts_vsn={{ erts_vsn }}
|
||||
@set erl_opts={{ erl_opts }}
|
||||
|
||||
@set script=%~n0
|
||||
|
||||
:: Discover the release root directory from the directory
|
||||
:: of this script
|
||||
@set script_dir=%~dp0
|
||||
@for %%A in ("%script_dir%\..") do @(
|
||||
set rel_root_dir=%%~fA
|
||||
)
|
||||
@set rel_dir=%rel_root_dir%\releases\%rel_vsn%
|
||||
|
||||
@set etc_dir=%rel_root_dir%\etc
|
||||
@set lib_dir=%rel_root_dir%\lib
|
||||
@set data_dir=%rel_root_dir%\data
|
||||
@set emqx_conf=%etc_dir%\emqx.conf
|
||||
|
||||
@call :find_erts_dir
|
||||
@call :find_vm_args
|
||||
@call :find_sys_config
|
||||
@call :set_boot_script_var
|
||||
|
||||
@set service_name=%rel_name%_%rel_vsn%
|
||||
@set bindir=%erts_dir%\bin
|
||||
@set progname=erl.exe
|
||||
@set clean_boot_script=%rel_root_dir%\bin\start_clean
|
||||
@set erlsrv="%bindir%\erlsrv.exe"
|
||||
@set epmd="%bindir%\epmd.exe"
|
||||
@set escript="%bindir%\escript.exe"
|
||||
@set werl="%bindir%\werl.exe"
|
||||
@set erl_exe="%bindir%\erl.exe"
|
||||
@set nodetool="%rel_root_dir%\bin\nodetool"
|
||||
@set cuttlefish="%rel_root_dir%\bin\cuttlefish"
|
||||
@set node_type="-name"
|
||||
|
||||
:: Extract node name from emqx.conf
|
||||
@for /f "usebackq delims=\= tokens=2" %%I in (`findstr /b node\.name "%emqx_conf%"`) do @(
|
||||
@call :set_trim node_name %%I
|
||||
)
|
||||
|
||||
:: Extract node cookie from emqx.conf
|
||||
@for /f "usebackq delims=\= tokens=2" %%I in (`findstr /b node\.cookie "%emqx_conf%"`) do @(
|
||||
@call :set_trim node_cookie= %%I
|
||||
)
|
||||
|
||||
:: Write the erl.ini file to set up paths relative to this script
|
||||
@call :write_ini
|
||||
|
||||
:: If a start.boot file is not present, copy one from the named .boot file
|
||||
@if not exist "%rel_dir%\start.boot" (
|
||||
copy "%rel_dir%\%rel_name%.boot" "%rel_dir%\start.boot" >nul
|
||||
)
|
||||
|
||||
@if "%1"=="install" @goto install
|
||||
@if "%1"=="uninstall" @goto uninstall
|
||||
@if "%1"=="start" @goto start
|
||||
@if "%1"=="stop" @goto stop
|
||||
@if "%1"=="restart" @call :stop && @goto start
|
||||
::@if "%1"=="upgrade" @goto relup
|
||||
::@if "%1"=="downgrade" @goto relup
|
||||
@if "%1"=="console" @goto console
|
||||
@if "%1"=="ping" @goto ping
|
||||
@if "%1"=="list" @goto list
|
||||
@if "%1"=="attach" @goto attach
|
||||
@if "%1"=="" @goto usage
|
||||
@echo Unknown command: "%1"
|
||||
|
||||
@goto :eof
|
||||
|
||||
:: Find the ERTS dir
|
||||
:find_erts_dir
|
||||
@set possible_erts_dir=%rel_root_dir%\erts-%erts_vsn%
|
||||
@if exist "%possible_erts_dir%" (
|
||||
call :set_erts_dir_from_default
|
||||
) else (
|
||||
call :set_erts_dir_from_erl
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:: Set the ERTS dir from the passed in erts_vsn
|
||||
:set_erts_dir_from_default
|
||||
@set erts_dir=%possible_erts_dir%
|
||||
@set rootdir=%rel_root_dir%
|
||||
@goto :eof
|
||||
|
||||
:: Set the ERTS dir from erl
|
||||
:set_erts_dir_from_erl
|
||||
@for /f "delims=" %%i in ('where erl') do @(
|
||||
set erl=%%i
|
||||
)
|
||||
@set dir_cmd="%erl%" -noshell -eval "io:format(\"~s\", [filename:nativename(code:root_dir())])." -s init stop
|
||||
@for /f %%i in ('%%dir_cmd%%') do @(
|
||||
set erl_root=%%i
|
||||
)
|
||||
@set erts_dir=%erl_root%\erts-%erts_vsn%
|
||||
@set rootdir=%erl_root%
|
||||
@goto :eof
|
||||
|
||||
:find_vm_args
|
||||
@set possible_vm=%etc_dir%\vm.args
|
||||
@if exist %possible_vm% (
|
||||
set args_file=-args_file "%possible_vm%"
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:: Find the sys.config file
|
||||
:find_sys_config
|
||||
@set possible_sys=%etc_dir%\sys.config
|
||||
@if exist %possible_sys% (
|
||||
set sys_config=-config "%possible_sys%"
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:create_mnesia_dir
|
||||
@set create_dir_cmd=%escript% %nodetool% mnesia_dir %data_dir%\mnesia %node_name%
|
||||
@for /f "delims=" %%Z in ('%%create_dir_cmd%%') do @(
|
||||
set mnesia_dir=%%Z
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:generate_app_config
|
||||
@set gen_config_cmd=%escript% %cuttlefish% -i %rel_dir%\emqx.schema -c %etc_dir%\emqx.conf -d %data_dir%\configs generate
|
||||
@for /f "delims=" %%A in ('%%gen_config_cmd%%') do @(
|
||||
set generated_config_args=%%A
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:: set boot_script variable
|
||||
:set_boot_script_var
|
||||
@if exist "%rel_dir%\%rel_name%.boot" (
|
||||
set boot_script=%rel_dir%\%rel_name%
|
||||
) else (
|
||||
set boot_script=%rel_dir%\start
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:: Write the erl.ini file
|
||||
:write_ini
|
||||
@set erl_ini=%erts_dir%\bin\erl.ini
|
||||
@set converted_bindir=%bindir:\=\\%
|
||||
@set converted_rootdir=%rootdir:\=\\%
|
||||
@echo [erlang] > "%erl_ini%"
|
||||
@echo Bindir=%converted_bindir% >> "%erl_ini%"
|
||||
@echo Progname=%progname% >> "%erl_ini%"
|
||||
@echo Rootdir=%converted_rootdir% >> "%erl_ini%"
|
||||
@goto :eof
|
||||
|
||||
:: Display usage information
|
||||
:usage
|
||||
@echo usage: %~n0 ^(install^|uninstall^|start^|stop^|restart^|console^|ping^|list^|attach^)
|
||||
@goto :eof
|
||||
|
||||
:: Install the release as a Windows service
|
||||
:: or install the specified version passed as argument
|
||||
:install
|
||||
@call :create_mnesia_dir
|
||||
@call :generate_app_config
|
||||
:: Install the service
|
||||
@set args="-boot %boot_script% %sys_config% %generated_config_args% -mnesia dir '%mnesia_dir%'"
|
||||
@set description=EMQX node %node_name% in %rootdir%
|
||||
@if "" == "%2" (
|
||||
%erlsrv% add %service_name% %node_type% "%node_name%" -on restart -c "%description%" ^
|
||||
-i "emqx" -w "%rootdir%" -m %erl_exe% -args %args% ^
|
||||
-st "init:stop()."
|
||||
sc config emqx start=delayed-auto
|
||||
) else (
|
||||
:: relup and reldown
|
||||
goto relup
|
||||
)
|
||||
|
||||
@goto :eof
|
||||
|
||||
:: Uninstall the Windows service
|
||||
:uninstall
|
||||
@%erlsrv% remove %service_name%
|
||||
@%epmd% -kill
|
||||
@goto :eof
|
||||
|
||||
:: Start the Windows service
|
||||
:start
|
||||
:: window service?
|
||||
:: @%erlsrv% start %service_name%
|
||||
@call :create_mnesia_dir
|
||||
@call :generate_app_config
|
||||
@set args=-detached %sys_config% %generated_config_args% -mnesia dir '%mnesia_dir%'
|
||||
@echo off
|
||||
cd /d %rel_root_dir%
|
||||
@echo on
|
||||
@start "%rel_name%" %werl% -boot "%boot_script%" %args%
|
||||
@goto :eof
|
||||
|
||||
:: Stop the Windows service
|
||||
:stop
|
||||
:: window service?
|
||||
:: @%erlsrv% stop %service_name%
|
||||
@%escript% %nodetool% %node_type% %node_name% -setcookie %node_cookie% stop
|
||||
@goto :eof
|
||||
|
||||
:: Relup and reldown
|
||||
:relup
|
||||
@if "" == "%2" (
|
||||
echo Missing package argument
|
||||
echo Usage: %rel_name% %1 {package base name}
|
||||
echo NOTE {package base name} MUST NOT include the .tar.gz suffix
|
||||
set ERRORLEVEL=1
|
||||
exit /b %ERRORLEVEL%
|
||||
)
|
||||
@%escript% "%rootdir%/bin/install_upgrade.escript" "%rel_name%" "%node_name%" "%node_cookie%" "%2"
|
||||
@goto :eof
|
||||
|
||||
:: Start a console
|
||||
:console
|
||||
@call :create_mnesia_dir
|
||||
@call :generate_app_config
|
||||
@set args=%sys_config% %generated_config_args% -mnesia dir '%mnesia_dir%'
|
||||
@echo off
|
||||
cd /d %rel_root_dir%
|
||||
@echo on
|
||||
@start "bin\%rel_name% console" %werl% -boot "%boot_script%" %args%
|
||||
@echo emqx is started!
|
||||
@goto :eof
|
||||
|
||||
:: Ping the running node
|
||||
:ping
|
||||
@%escript% %nodetool% ping %node_type% "%node_name%" -setcookie "%node_cookie%"
|
||||
@goto :eof
|
||||
|
||||
:: List installed Erlang services
|
||||
:list
|
||||
@%erlsrv% list %service_name%
|
||||
@goto :eof
|
||||
|
||||
:: Attach to a running node
|
||||
:attach
|
||||
:: @start "%node_name% attach"
|
||||
@start "%node_name% attach" %werl% -boot "%clean_boot_script%" ^
|
||||
-remsh %node_name% %node_type% console_%node_name% -setcookie %node_cookie%
|
||||
@goto :eof
|
||||
|
||||
:: Trim variable
|
||||
:set_trim
|
||||
@set %1=%2
|
||||
@goto :eof
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
#!/bin/sh
|
||||
# -*- tab-width:4;indent-tabs-mode:nil -*-
|
||||
# ex: ts=4 sw=4 et
|
||||
|
||||
set -e
|
||||
|
||||
ROOT_DIR="$(cd $(dirname $(readlink $0 || echo $0))/..; pwd -P)"
|
||||
. $ROOT_DIR/releases/emqx_vars
|
||||
|
||||
# Echo to stderr on errors
|
||||
echoerr() { echo "$@" 1>&2; }
|
||||
|
||||
if [ -z "$WITH_EPMD" ]; then
|
||||
EPMD_ARG="-start_epmd false -epmd_module ekka_epmd -proto_dist ekka"
|
||||
else
|
||||
EPMD_ARG="-start_epmd true"
|
||||
fi
|
||||
|
||||
relx_get_nodename() {
|
||||
id="longname$(relx_gen_id)-${NAME}"
|
||||
"$BINDIR/erl" -boot start_clean -eval '[Host] = tl(string:tokens(atom_to_list(node()),"@")), io:format("~s~n", [Host]), halt()' -noshell ${NAME_TYPE} $id
|
||||
}
|
||||
|
||||
# Control a node
|
||||
relx_nodetool() {
|
||||
command="$1"; shift
|
||||
|
||||
ERL_FLAGS="$ERL_FLAGS $EPMD_ARG $PROTO_DIST_ARG" \
|
||||
"$ERTS_DIR/bin/escript" "$ROOTDIR/bin/nodetool" "$NAME_TYPE" "$NAME" \
|
||||
-setcookie "$COOKIE" "$command" "$@"
|
||||
}
|
||||
|
||||
|
||||
# Extract the target node name from node.args
|
||||
if [ -z "$NAME_ARG" ]; then
|
||||
if [ ! -z "$EMQX_NODE_NAME" ]; then
|
||||
NODENAME="$EMQX_NODE_NAME"
|
||||
elif [ ! -z `ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-name (\S*)' | awk '{print $2}'` ]; then
|
||||
NODENAME=`ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-name (\S*)' | awk '{print $2}'`
|
||||
else
|
||||
NODENAME=`egrep '^[ \t]*node.name[ \t]*=[ \t]*' $RUNNER_ETC_DIR/emqx.conf 2> /dev/null | tail -1 | cut -d = -f 2-`
|
||||
fi
|
||||
if [ -z "$NODENAME" ]; then
|
||||
echoerr "vm.args needs to have a -name parameter."
|
||||
echoerr " -sname is not supported."
|
||||
echoerr "please check $RUNNER_ETC_DIR/emqx.conf"
|
||||
exit 1
|
||||
else
|
||||
NAME_ARG="-name ${NODENAME# *}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Extract the name type and name from the NAME_ARG for REMSH
|
||||
NAME_TYPE="$(echo "$NAME_ARG" | awk '{print $1}')"
|
||||
NAME="$(echo "$NAME_ARG" | awk '{print $2}')"
|
||||
|
||||
# Extract the target cookie
|
||||
if [ -z "$COOKIE_ARG" ]; then
|
||||
if [ ! -z "$EMQX_NODE_COOKIE" ]; then
|
||||
COOKIE="$EMQX_NODE_COOKIE"
|
||||
elif [ ! -z `ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-setcookie (\S*)' | awk '{print $2}'` ]; then
|
||||
COOKIE=`ps -ef | grep "$ERTS_PATH/beam.smp" | grep -o -E '\-setcookie (\S*)' | awk '{print $2}'`
|
||||
else
|
||||
COOKIE=`egrep '^[ \t]*node.cookie[ \t]*=[ \t]*' $RUNNER_ETC_DIR/emqx.conf 2> /dev/null | tail -1 | cut -d = -f 2-`
|
||||
fi
|
||||
if [ -z "$COOKIE" ]; then
|
||||
echoerr "vm.args needs to have a -setcookie parameter."
|
||||
echoerr "please check $RUNNER_ETC_DIR/emqx.conf"
|
||||
exit 1
|
||||
else
|
||||
COOKIE_ARG="-setcookie $COOKIE"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Extract cookie name from COOKIE_ARG
|
||||
COOKIE="$(echo "$COOKIE_ARG" | awk '{print $2}')"
|
||||
|
||||
# Support for IPv6 Dist. See: https://github.com/emqtt/emqttd/issues/1460
|
||||
PROTO_DIST=`egrep '^[ \t]*cluster.proto_dist[ \t]*=[ \t]*' $RUNNER_ETC_DIR/emqx.conf 2> /dev/null | tail -1 | cut -d = -f 2-`
|
||||
if [ -z "$PROTO_DIST" ]; then
|
||||
PROTO_DIST_ARG=""
|
||||
else
|
||||
PROTO_DIST_ARG="-proto_dist $PROTO_DIST"
|
||||
fi
|
||||
|
||||
export ROOTDIR="$RUNNER_ROOT_DIR"
|
||||
export ERTS_DIR="$ROOTDIR/erts-$ERTS_VSN"
|
||||
export BINDIR="$ERTS_DIR/bin"
|
||||
cd "$ROOTDIR"
|
||||
|
||||
relx_nodetool rpc emqx_ctl run_command "$@"
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
:: The batch file for emqx_ctl command
|
||||
|
||||
@set args=%*
|
||||
|
||||
:: Set variables that describe the release
|
||||
@set rel_name=emqx
|
||||
@set rel_vsn={{ rel_vsn }}
|
||||
@set erts_vsn={{ erts_vsn }}
|
||||
@set erl_opts={{ erl_opts }}
|
||||
|
||||
:: Discover the release root directory from the directory
|
||||
:: of this script
|
||||
@set script_dir=%~dp0
|
||||
@for %%A in ("%script_dir%\..") do @(
|
||||
set rel_root_dir=%%~fA
|
||||
)
|
||||
@set rel_dir=%rel_root_dir%\releases\%rel_vsn%
|
||||
@set emqx_conf=%rel_root_dir%\etc\emqx.conf
|
||||
|
||||
@call :find_erts_dir
|
||||
|
||||
@set bindir=%erts_dir%\bin
|
||||
@set progname=erl.exe
|
||||
@set escript="%bindir%\escript.exe"
|
||||
@set nodetool="%rel_root_dir%\bin\nodetool"
|
||||
@set node_type="-name"
|
||||
|
||||
:: Extract node name from emqx.conf
|
||||
@for /f "usebackq delims=\= tokens=2" %%I in (`findstr /b node\.name "%emqx_conf%"`) do @(
|
||||
@call :set_trim node_name %%I
|
||||
)
|
||||
|
||||
:: Extract node cookie from emqx.conf
|
||||
@for /f "usebackq delims=\= tokens=2" %%I in (`findstr /b node\.cookie "%emqx_conf%"`) do @(
|
||||
@call :set_trim node_cookie= %%I
|
||||
)
|
||||
|
||||
:: Write the erl.ini file to set up paths relative to this script
|
||||
@call :write_ini
|
||||
|
||||
:: If a start.boot file is not present, copy one from the named .boot file
|
||||
@if not exist "%rel_dir%\start.boot" (
|
||||
copy "%rel_dir%\%rel_name%.boot" "%rel_dir%\start.boot" >nul
|
||||
)
|
||||
|
||||
@%escript% %nodetool% %node_type% "%node_name%" -setcookie "%node_cookie%" rpc emqx_ctl run_command %args%
|
||||
|
||||
:: Find the ERTS dir
|
||||
:find_erts_dir
|
||||
@set possible_erts_dir=%rel_root_dir%\erts-%erts_vsn%
|
||||
@if exist "%possible_erts_dir%" (
|
||||
call :set_erts_dir_from_default
|
||||
) else (
|
||||
call :set_erts_dir_from_erl
|
||||
)
|
||||
@goto :eof
|
||||
|
||||
:: Set the ERTS dir from the passed in erts_vsn
|
||||
:set_erts_dir_from_default
|
||||
@set erts_dir=%possible_erts_dir%
|
||||
@set rootdir=%rel_root_dir%
|
||||
@goto :eof
|
||||
|
||||
:: Set the ERTS dir from erl
|
||||
:set_erts_dir_from_erl
|
||||
@for /f "delims=" %%i in ('where erl') do @(
|
||||
set erl=%%i
|
||||
)
|
||||
@set dir_cmd="%erl%" -noshell -eval "io:format(\"~s\", [filename:nativename(code:root_dir())])." -s init stop
|
||||
@for /f %%i in ('%%dir_cmd%%') do @(
|
||||
set erl_root=%%i
|
||||
)
|
||||
@set erts_dir=%erl_root%\erts-%erts_vsn%
|
||||
@set rootdir=%erl_root%
|
||||
@goto :eof
|
||||
|
||||
:: Write the erl.ini file
|
||||
:write_ini
|
||||
@set erl_ini=%erts_dir%\bin\erl.ini
|
||||
@set converted_bindir=%bindir:\=\\%
|
||||
@set converted_rootdir=%rootdir:\=\\%
|
||||
@echo [erlang] > "%erl_ini%"
|
||||
@echo Bindir=%converted_bindir% >> "%erl_ini%"
|
||||
@echo Progname=%progname% >> "%erl_ini%"
|
||||
@echo Rootdir=%converted_rootdir% >> "%erl_ini%"
|
||||
@goto :eof
|
||||
|
||||
:: Trim variable
|
||||
:set_trim
|
||||
@set %1=%2
|
||||
@goto :eof
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/sh
|
||||
|
||||
[ "x" = "x$EMQX_NODE_NAME" ] && EMQX_NODE_NAME=emqx@127.0.0.1
|
||||
[ "x" = "x$EMQX_NODE_COOKIE" ] && EMQX_NODE_COOKIE=emqxsecretcookie
|
||||
[ "x" = "x$EMQX_MAX_PACKET_SIZE" ] && EMQX_MAX_PACKET_SIZE=64KB
|
||||
[ "x" = "x$EMQX_MAX_PORTS" ] && EMQX_MAX_PORTS=65536
|
||||
[ "x" = "x$EMQX_TCP_PORT" ] && EMQX_TCP_PORT=1883
|
||||
[ "x" = "x$EMQX_SSL_PORT" ] && EMQX_SSL_PORT=8883
|
||||
[ "x" = "x$EMQX_WS_PORT" ] && EMQX_WS_PORT=8083
|
||||
[ "x" = "x$EMQX_WSS_PORT" ] && EMQX_WSS_PORT=8084
|
||||
|
|
@ -0,0 +1,377 @@
|
|||
#!/usr/bin/env escript
|
||||
%%! -noshell -noinput
|
||||
%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
|
||||
%% ex: ft=erlang ts=4 sw=4 et
|
||||
|
||||
-define(TIMEOUT, 300000).
|
||||
-define(INFO(Fmt,Args), io:format(Fmt++"~n",Args)).
|
||||
|
||||
main([Command0, DistInfoStr | CommandArgs]) ->
|
||||
%% convert the distribution info arguments string to an erlang term
|
||||
{ok, Tokens, _} = erl_scan:string(DistInfoStr ++ "."),
|
||||
{ok, DistInfo} = erl_parse:parse_term(Tokens),
|
||||
%% convert arguments into a proplist
|
||||
Opts = parse_arguments(CommandArgs),
|
||||
%% invoke the command passed as argument
|
||||
F = case Command0 of
|
||||
"install" -> fun(A, B) -> install(A, B) end;
|
||||
"unpack" -> fun(A, B) -> unpack(A, B) end;
|
||||
"upgrade" -> fun(A, B) -> upgrade(A, B) end;
|
||||
"downgrade" -> fun(A, B) -> downgrade(A, B) end;
|
||||
"uninstall" -> fun(A, B) -> uninstall(A, B) end;
|
||||
"versions" -> fun(A, B) -> versions(A, B) end
|
||||
end,
|
||||
F(DistInfo, Opts);
|
||||
main(Args) ->
|
||||
?INFO("unknown args: ~p", [Args]),
|
||||
erlang:halt(1).
|
||||
|
||||
unpack({RelName, NameTypeArg, NodeName, Cookie}, Opts) ->
|
||||
TargetNode = start_distribution(NodeName, NameTypeArg, Cookie),
|
||||
Version = proplists:get_value(version, Opts),
|
||||
case unpack_release(RelName, TargetNode, Version) of
|
||||
{ok, Vsn} ->
|
||||
?INFO("Unpacked successfully: ~p", [Vsn]);
|
||||
old ->
|
||||
%% no need to unpack, has been installed previously
|
||||
?INFO("Release ~s is marked old.",[Version]);
|
||||
unpacked ->
|
||||
?INFO("Release ~s is already unpacked.",[Version]);
|
||||
current ->
|
||||
?INFO("Release ~s is already installed and current.",[Version]);
|
||||
permanent ->
|
||||
?INFO("Release ~s is already installed and set permanent.",[Version]);
|
||||
{error, Reason} ->
|
||||
?INFO("Unpack failed: ~p.",[Reason]),
|
||||
print_existing_versions(TargetNode),
|
||||
erlang:halt(2)
|
||||
end;
|
||||
unpack(_, Args) ->
|
||||
?INFO("unpack: unknown args ~p", [Args]).
|
||||
|
||||
install({RelName, NameTypeArg, NodeName, Cookie}, Opts) ->
|
||||
TargetNode = start_distribution(NodeName, NameTypeArg, Cookie),
|
||||
Version = proplists:get_value(version, Opts),
|
||||
case unpack_release(RelName, TargetNode, Version) of
|
||||
{ok, Vsn} ->
|
||||
?INFO("Unpacked successfully: ~p.", [Vsn]),
|
||||
check_and_install(TargetNode, Vsn),
|
||||
maybe_permafy(TargetNode, RelName, Vsn, Opts);
|
||||
old ->
|
||||
%% no need to unpack, has been installed previously
|
||||
?INFO("Release ~s is marked old, switching to it.",[Version]),
|
||||
check_and_install(TargetNode, Version),
|
||||
maybe_permafy(TargetNode, RelName, Version, Opts);
|
||||
unpacked ->
|
||||
?INFO("Release ~s is already unpacked, now installing.",[Version]),
|
||||
check_and_install(TargetNode, Version),
|
||||
maybe_permafy(TargetNode, RelName, Version, Opts);
|
||||
current ->
|
||||
case proplists:get_value(permanent, Opts, true) of
|
||||
true ->
|
||||
?INFO("Release ~s is already installed and current, making permanent.",
|
||||
[Version]),
|
||||
permafy(TargetNode, RelName, Version);
|
||||
false ->
|
||||
?INFO("Release ~s is already installed and current.",
|
||||
[Version])
|
||||
end;
|
||||
permanent ->
|
||||
%% this release is marked permanent, however it might not the
|
||||
%% one currently running
|
||||
case current_release_version(TargetNode) of
|
||||
Version ->
|
||||
?INFO("Release ~s is already installed, running and set permanent.",
|
||||
[Version]);
|
||||
CurrentVersion ->
|
||||
?INFO("Release ~s is the currently running version.",
|
||||
[CurrentVersion]),
|
||||
check_and_install(TargetNode, Version),
|
||||
maybe_permafy(TargetNode, RelName, Version, Opts)
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?INFO("Unpack failed: ~p",[Reason]),
|
||||
print_existing_versions(TargetNode),
|
||||
erlang:halt(2)
|
||||
end;
|
||||
install(_, Args) ->
|
||||
?INFO("install: unknown args ~p", [Args]).
|
||||
|
||||
upgrade(DistInfo, Args) ->
|
||||
install(DistInfo, Args).
|
||||
|
||||
downgrade(DistInfo, Args) ->
|
||||
install(DistInfo, Args).
|
||||
|
||||
uninstall({_RelName, NameTypeArg, NodeName, Cookie}, Opts) ->
|
||||
TargetNode = start_distribution(NodeName, NameTypeArg, Cookie),
|
||||
WhichReleases = which_releases(TargetNode),
|
||||
Version = proplists:get_value(version, Opts),
|
||||
case proplists:get_value(Version, WhichReleases) of
|
||||
undefined ->
|
||||
?INFO("Release ~s is already uninstalled.", [Version]);
|
||||
old ->
|
||||
?INFO("Release ~s is marked old, uninstalling it.", [Version]),
|
||||
remove_release(TargetNode, Version);
|
||||
unpacked ->
|
||||
?INFO("Release ~s is marked unpacked, uninstalling it",
|
||||
[Version]),
|
||||
remove_release(TargetNode, Version);
|
||||
current ->
|
||||
?INFO("Uninstall failed: Release ~s is marked current.", [Version]),
|
||||
erlang:halt(2);
|
||||
permanent ->
|
||||
?INFO("Uninstall failed: Release ~s is running.", [Version]),
|
||||
erlang:halt(2)
|
||||
end;
|
||||
uninstall(_, Args) ->
|
||||
?INFO("uninstall: unknown args ~p", [Args]).
|
||||
|
||||
versions({_RelName, NameTypeArg, NodeName, Cookie}, []) ->
|
||||
TargetNode = start_distribution(NodeName, NameTypeArg, Cookie),
|
||||
print_existing_versions(TargetNode).
|
||||
|
||||
parse_arguments(Args) ->
|
||||
parse_arguments(Args, []).
|
||||
|
||||
parse_arguments([], Acc) -> Acc;
|
||||
parse_arguments(["--no-permanent"|Rest], Acc) ->
|
||||
parse_arguments(Rest, [{permanent, false}] ++ Acc);
|
||||
parse_arguments([VersionStr|Rest], Acc) ->
|
||||
Version = parse_version(VersionStr),
|
||||
parse_arguments(Rest, [{version, Version}] ++ Acc).
|
||||
|
||||
unpack_release(RelName, TargetNode, Version) ->
|
||||
WhichReleases = which_releases(TargetNode),
|
||||
case proplists:get_value(Version, WhichReleases) of
|
||||
undefined ->
|
||||
%% not installed, so unpack tarball:
|
||||
%% look for a release package with the intended version in the following order:
|
||||
%% releases/<relname>-<version>.tar.gz
|
||||
%% releases/<version>/<relname>-<version>.tar.gz
|
||||
%% releases/<version>/<relname>.tar.gz
|
||||
case find_and_link_release_package(Version, RelName) of
|
||||
{_, undefined} ->
|
||||
{error, release_package_not_found};
|
||||
{ReleasePackage, ReleasePackageLink} ->
|
||||
?INFO("Release ~s not found, attempting to unpack ~s",
|
||||
[Version, ReleasePackage]),
|
||||
case rpc:call(TargetNode, release_handler, unpack_release,
|
||||
[ReleasePackageLink], ?TIMEOUT) of
|
||||
{ok, Vsn} -> {ok, Vsn};
|
||||
{error, _} = Error -> Error
|
||||
end
|
||||
end;
|
||||
Other -> Other
|
||||
end.
|
||||
|
||||
%% 1. look for a release package tarball with the provided version in the following order:
|
||||
%% releases/<relname>-<version>.tar.gz
|
||||
%% releases/<version>/<relname>-<version>.tar.gz
|
||||
%% releases/<version>/<relname>.tar.gz
|
||||
%% 2. create a symlink from a fixed location (ie. releases/<version>/<relname>.tar.gz)
|
||||
%% to the release package tarball found in 1.
|
||||
%% 3. return a tuple with the paths to the release package and
|
||||
%% to the symlink that is to be provided to release handler
|
||||
find_and_link_release_package(Version, RelName) ->
|
||||
RelNameStr = atom_to_list(RelName),
|
||||
%% regardless of the location of the release package, we'll
|
||||
%% always give release handler the same path which is the symlink
|
||||
%% the path to the package link is relative to "releases/" because
|
||||
%% that's what release handler is expecting
|
||||
ReleaseHandlerPackageLink = filename:join(Version, RelNameStr),
|
||||
%% this is the symlink name we'll create once
|
||||
%% we've found where the actual release package is located
|
||||
ReleaseLink = filename:join(["releases", Version,
|
||||
RelNameStr ++ ".tar.gz"]),
|
||||
ok = unpack_zipballs(RelNameStr, Version),
|
||||
TarBalls = [
|
||||
filename:join(["releases",
|
||||
RelNameStr ++ "-" ++ Version ++ ".tar.gz"]),
|
||||
filename:join(["releases", Version,
|
||||
RelNameStr ++ "-" ++ Version ++ ".tar.gz"]),
|
||||
filename:join(["releases", Version,
|
||||
RelNameStr ++ ".tar.gz"])
|
||||
],
|
||||
case first_value(fun filelib:is_file/1, TarBalls) of
|
||||
no_value ->
|
||||
{undefined, undefined};
|
||||
%% no need to create the link since the release package we
|
||||
%% found is located in the same place as the link would be
|
||||
{ok, Filename} when is_list(Filename) andalso
|
||||
Filename =:= ReleaseLink ->
|
||||
{Filename, ReleaseHandlerPackageLink};
|
||||
{ok, Filename} when is_list(Filename) ->
|
||||
%% we now have the location of the release package, however
|
||||
%% release handler expects a fixed nomenclature (<relname>.tar.gz)
|
||||
%% so give it just that by creating a symlink to the tarball
|
||||
%% we found.
|
||||
%% make sure that the dir where we're creating the link in exists
|
||||
ok = filelib:ensure_dir(filename:join([filename:dirname(ReleaseLink), "dummy"])),
|
||||
%% create the symlink pointing to the full path name of the
|
||||
%% release package we found
|
||||
case file:make_symlink(filename:absname(Filename), ReleaseLink) of
|
||||
ok ->
|
||||
ok;
|
||||
{error, eperm} -> % windows!
|
||||
{ok,_} = file:copy(filename:absname(Filename), ReleaseLink)
|
||||
end,
|
||||
{Filename, ReleaseHandlerPackageLink}
|
||||
end.
|
||||
|
||||
unpack_zipballs(RelNameStr, Version) ->
|
||||
{ok, Cwd} = file:get_cwd(),
|
||||
GzFile = filename:absname(filename:join(["releases", RelNameStr ++ "-" ++ Version ++ ".tar.gz"])),
|
||||
ZipFiles = filelib:wildcard(filename:join(["releases", RelNameStr ++ "-*" ++ Version ++ "*.zip"])),
|
||||
?INFO("unzip ~p", [ZipFiles]),
|
||||
[begin
|
||||
TmdTarD="/tmp/emqx_untar_" ++ integer_to_list(erlang:system_time()),
|
||||
ok = filelib:ensure_dir(filename:join([TmdTarD, "dummy"])),
|
||||
{ok, _} = file:copy(Zip, filename:join([TmdTarD, "emqx.zip"])),
|
||||
ok = file:set_cwd(filename:join([TmdTarD])),
|
||||
{ok, _FileList} = zip:unzip("emqx.zip"),
|
||||
ok = file:set_cwd(filename:join([TmdTarD, "emqx"])),
|
||||
ok = erl_tar:create(GzFile, filelib:wildcard("*"), [compressed])
|
||||
end || Zip <- ZipFiles],
|
||||
file:set_cwd(Cwd).
|
||||
|
||||
first_value(_Fun, []) -> no_value;
|
||||
first_value(Fun, [Value | Rest]) ->
|
||||
case Fun(Value) of
|
||||
false ->
|
||||
first_value(Fun, Rest);
|
||||
true ->
|
||||
{ok, Value}
|
||||
end.
|
||||
|
||||
parse_version(V) when is_list(V) ->
|
||||
hd(string:tokens(V,"/")).
|
||||
|
||||
check_and_install(TargetNode, Vsn) ->
|
||||
{ok, [[CurrAppConf]]} = rpc:call(TargetNode, init, get_argument, [config], ?TIMEOUT),
|
||||
{ok, [[CurrVmArgs]]} = rpc:call(TargetNode, init, get_argument, [vm_args], ?TIMEOUT),
|
||||
case filename:extension(CurrAppConf) of
|
||||
".config" ->
|
||||
{ok, _} = file:copy(CurrAppConf, filename:join(["releases", Vsn, "sys.config"]));
|
||||
_ ->
|
||||
{ok, _} = file:copy(CurrAppConf++".config", filename:join(["releases", Vsn, "sys.config"]))
|
||||
end,
|
||||
{ok, _} = file:copy(CurrVmArgs, filename:join(["releases", Vsn, "vm.args"])),
|
||||
case rpc:call(TargetNode, release_handler,
|
||||
check_install_release, [Vsn], ?TIMEOUT) of
|
||||
{ok, _OtherVsn, _Desc} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?INFO("ERROR: release_handler:check_install_release failed: ~p.",[Reason]),
|
||||
erlang:halt(3)
|
||||
end,
|
||||
case rpc:call(TargetNode, release_handler, install_release,
|
||||
[Vsn, [{update_paths, true}]], ?TIMEOUT) of
|
||||
{ok, _, _} ->
|
||||
?INFO("Installed Release: ~s.", [Vsn]),
|
||||
ok;
|
||||
{error, {no_such_release, Vsn}} ->
|
||||
VerList =
|
||||
iolist_to_binary(
|
||||
[io_lib:format("* ~s\t~s~n",[V,S]) || {V,S} <- which_releases(TargetNode)]),
|
||||
?INFO("Installed versions:~n~s", [VerList]),
|
||||
?INFO("ERROR: Unable to revert to '~s' - not installed.", [Vsn]),
|
||||
erlang:halt(2);
|
||||
%% as described in http://erlang.org/doc/man/appup.html, when performing a relup
|
||||
%% with soft purge:
|
||||
%% If the value is soft_purge, release_handler:install_release/1
|
||||
%% returns {error,{old_processes,Mod}}
|
||||
{error, {old_processes, Mod}} ->
|
||||
?INFO("ERROR: unable to install '~s' - old processes still running code from module ~p",
|
||||
[Vsn, Mod]),
|
||||
erlang:halt(3);
|
||||
{error, Reason1} ->
|
||||
?INFO("ERROR: release_handler:install_release failed: ~p",[Reason1]),
|
||||
erlang:halt(4)
|
||||
end.
|
||||
|
||||
maybe_permafy(TargetNode, RelName, Vsn, Opts) ->
|
||||
case proplists:get_value(permanent, Opts, true) of
|
||||
true ->
|
||||
permafy(TargetNode, RelName, Vsn);
|
||||
false -> ok
|
||||
end.
|
||||
|
||||
permafy(TargetNode, RelName, Vsn) ->
|
||||
RelNameStr = atom_to_list(RelName),
|
||||
ok = rpc:call(TargetNode, release_handler,
|
||||
make_permanent, [Vsn], ?TIMEOUT),
|
||||
?INFO("Made release permanent: ~p", [Vsn]),
|
||||
%% upgrade/downgrade the scripts by replacing them
|
||||
Scripts = [RelNameStr, RelNameStr++"_ctl", "cuttlefish", "nodetool",
|
||||
"install_upgrade.escript"],
|
||||
[{ok, _} = file:copy(filename:join(["bin", File++"-"++Vsn]),
|
||||
filename:join(["bin", File]))
|
||||
|| File <- Scripts],
|
||||
%% update the vars
|
||||
UpdatedVars = io_lib:format("REL_VSN=\"~s\"~nERTS_VSN=\"~s\"~n", [Vsn, erts_vsn()]),
|
||||
file:write_file(filename:absname(filename:join(["releases", "emqx_vars"])), UpdatedVars, [append]).
|
||||
|
||||
remove_release(TargetNode, Vsn) ->
|
||||
case rpc:call(TargetNode, release_handler, remove_release, [Vsn], ?TIMEOUT) of
|
||||
ok ->
|
||||
?INFO("Uninstalled Release: ~s", [Vsn]),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?INFO("ERROR: release_handler:remove_release failed: ~p", [Reason]),
|
||||
erlang:halt(3)
|
||||
end.
|
||||
|
||||
which_releases(TargetNode) ->
|
||||
R = rpc:call(TargetNode, release_handler, which_releases, [], ?TIMEOUT),
|
||||
[ {V, S} || {_,V,_, S} <- R ].
|
||||
|
||||
%% the running release version is either the only one marked `current´
|
||||
%% or, if none exists, the one marked `permanent`
|
||||
current_release_version(TargetNode) ->
|
||||
R = rpc:call(TargetNode, release_handler, which_releases,
|
||||
[], ?TIMEOUT),
|
||||
Versions = [ {S, V} || {_,V,_, S} <- R ],
|
||||
%% current version takes priority over the permanent
|
||||
proplists:get_value(current, Versions,
|
||||
proplists:get_value(permanent, Versions)).
|
||||
|
||||
print_existing_versions(TargetNode) ->
|
||||
VerList = iolist_to_binary([
|
||||
io_lib:format("* ~s\t~s~n",[V,S])
|
||||
|| {V,S} <- which_releases(TargetNode) ]),
|
||||
?INFO("Installed versions:~n~s", [VerList]).
|
||||
|
||||
start_distribution(TargetNode, NameTypeArg, Cookie) ->
|
||||
MyNode = make_script_node(TargetNode),
|
||||
{ok, _Pid} = net_kernel:start([MyNode, get_name_type(NameTypeArg)]),
|
||||
erlang:set_cookie(node(), Cookie),
|
||||
case {net_kernel:connect_node(TargetNode),
|
||||
net_adm:ping(TargetNode)} of
|
||||
{true, pong} ->
|
||||
ok;
|
||||
{_, pang} ->
|
||||
?INFO("Node ~p not responding to pings.", [TargetNode]),
|
||||
erlang:halt(1)
|
||||
end,
|
||||
{ok, Cwd} = file:get_cwd(),
|
||||
ok = rpc:call(TargetNode, file, set_cwd, [Cwd], ?TIMEOUT),
|
||||
TargetNode.
|
||||
|
||||
make_script_node(Node) ->
|
||||
[Name, Host] = string:tokens(atom_to_list(Node), "@"),
|
||||
list_to_atom(lists:concat([Name, "_upgrader_", os:getpid(), "@", Host])).
|
||||
|
||||
%% get name type from arg
|
||||
get_name_type(NameTypeArg) ->
|
||||
case NameTypeArg of
|
||||
"-sname" ->
|
||||
shortnames;
|
||||
_ ->
|
||||
longnames
|
||||
end.
|
||||
|
||||
erts_vsn() ->
|
||||
{ok, Str} = file:read_file(filename:join(["releases", "start_erl.data"])),
|
||||
[ErtsVsn, _] = string:tokens(binary_to_list(Str), " "),
|
||||
ErtsVsn.
|
|
@ -0,0 +1,294 @@
|
|||
#!/usr/bin/env escript
|
||||
%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
|
||||
%% ex: ft=erlang ts=4 sw=4 et
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% nodetool: Helper Script for interacting with live nodes
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
-mode(compile).
|
||||
|
||||
main(Args) ->
|
||||
case os:type() of
|
||||
{win32, nt} -> ok;
|
||||
_nix ->
|
||||
case init:get_argument(start_epmd) of
|
||||
{ok, [["true"]]} ->
|
||||
ok = start_epmd();
|
||||
_ ->
|
||||
ok
|
||||
end
|
||||
end,
|
||||
ok = do_with_halt(Args, "mnesia_dir", fun create_mnesia_dir/2),
|
||||
ok = do_with_halt(Args, "chkconfig", fun("-config", X) -> chkconfig(X) end),
|
||||
ok = do_with_halt(Args, "chkconfig", fun chkconfig/1),
|
||||
Args1 = do_with_ret(Args, "-name",
|
||||
fun(TargetName) ->
|
||||
ThisNode = append_node_suffix(TargetName, "_maint_"),
|
||||
{ok, _} = net_kernel:start([ThisNode, longnames]),
|
||||
put(target_node, nodename(TargetName))
|
||||
end),
|
||||
Args2 = do_with_ret(Args1, "-sname",
|
||||
fun(TargetName) ->
|
||||
ThisNode = append_node_suffix(TargetName, "_maint_"),
|
||||
{ok, _} = net_kernel:start([ThisNode, shortnames]),
|
||||
put(target_node, nodename(TargetName))
|
||||
end),
|
||||
RestArgs = do_with_ret(Args2, "-setcookie",
|
||||
fun(Cookie) ->
|
||||
erlang:set_cookie(node(), list_to_atom(Cookie))
|
||||
end),
|
||||
|
||||
[application:start(App) || App <- [crypto, public_key, ssl]],
|
||||
TargetNode = get(target_node),
|
||||
|
||||
%% See if the node is currently running -- if it's not, we'll bail
|
||||
case {net_kernel:hidden_connect_node(TargetNode), net_adm:ping(TargetNode)} of
|
||||
{true, pong} ->
|
||||
ok;
|
||||
{false, pong} ->
|
||||
io:format(standard_error, "Failed to connect to node ~p .\n", [TargetNode]),
|
||||
halt(1);
|
||||
{_, pang} ->
|
||||
io:format(standard_error, "Node ~p not responding to pings.\n", [TargetNode]),
|
||||
halt(1)
|
||||
end,
|
||||
|
||||
case RestArgs of
|
||||
["getpid"] ->
|
||||
io:format("~p\n", [list_to_integer(rpc:call(TargetNode, os, getpid, []))]);
|
||||
["ping"] ->
|
||||
%% If we got this far, the node already responsed to a ping, so just dump
|
||||
%% a "pong"
|
||||
io:format("pong\n");
|
||||
["stop"] ->
|
||||
rpc:call(TargetNode, emqx_plugins, unload, [], 60000),
|
||||
io:format("~p\n", [rpc:call(TargetNode, init, stop, [], 60000)]);
|
||||
["restart", "-config", ConfigFile | _RestArgs1] ->
|
||||
io:format("~p\n", [rpc:call(TargetNode, emqx, restart, [ConfigFile], 60000)]);
|
||||
["rpc", Module, Function | RpcArgs] ->
|
||||
case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function),
|
||||
[RpcArgs], 60000) of
|
||||
ok ->
|
||||
ok;
|
||||
{error, cmd_not_found} ->
|
||||
halt(1);
|
||||
{error, Reason} ->
|
||||
io:format("RPC to ~s error: ~p\n", [TargetNode, Reason]),
|
||||
halt(1);
|
||||
{badrpc, Reason} ->
|
||||
io:format("RPC to ~s failed: ~p\n", [TargetNode, Reason]),
|
||||
halt(1);
|
||||
_ ->
|
||||
halt(1)
|
||||
end;
|
||||
["rpc_infinity", Module, Function | RpcArgs] ->
|
||||
case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function), [RpcArgs], infinity) of
|
||||
ok ->
|
||||
ok;
|
||||
{badrpc, Reason} ->
|
||||
io:format("RPC to ~p failed: ~p\n", [TargetNode, Reason]),
|
||||
halt(1);
|
||||
_ ->
|
||||
halt(1)
|
||||
end;
|
||||
["rpcterms", Module, Function | ArgsAsString] ->
|
||||
case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function),
|
||||
consult(lists:flatten(ArgsAsString)), 60000) of
|
||||
{badrpc, Reason} ->
|
||||
io:format("RPC to ~p failed: ~p\n", [TargetNode, Reason]),
|
||||
halt(1);
|
||||
Other ->
|
||||
io:format("~p\n", [Other])
|
||||
end;
|
||||
["eval" | ListOfArgs] ->
|
||||
% shells may process args into more than one, and end up stripping
|
||||
% spaces, so this converts all of that to a single string to parse
|
||||
String = binary_to_list(
|
||||
list_to_binary(
|
||||
join(ListOfArgs," ")
|
||||
)
|
||||
),
|
||||
|
||||
% then just as a convenience to users, if they forgot a trailing
|
||||
% '.' add it for them.
|
||||
Normalized =
|
||||
case lists:reverse(String) of
|
||||
[$. | _] -> String;
|
||||
R -> lists:reverse([$. | R])
|
||||
end,
|
||||
|
||||
% then scan and parse the string
|
||||
{ok, Scanned, _} = erl_scan:string(Normalized),
|
||||
{ok, Parsed } = erl_parse:parse_exprs(Scanned),
|
||||
|
||||
% and evaluate it on the remote node
|
||||
case rpc:call(TargetNode, erl_eval, exprs, [Parsed, [] ]) of
|
||||
{value, Value, _} ->
|
||||
io:format ("~p\n",[Value]);
|
||||
{badrpc, Reason} ->
|
||||
io:format("RPC to ~p failed: ~p\n", [TargetNode, Reason]),
|
||||
halt(1)
|
||||
end;
|
||||
Other ->
|
||||
io:format("Other: ~p\n", [Other]),
|
||||
io:format("Usage: nodetool {genconfig, chkconfig|getpid|ping|stop|restart|reboot|rpc|rpc_infinity|rpcterms|eval [Terms]} [RPC]\n")
|
||||
end,
|
||||
net_kernel:stop().
|
||||
|
||||
do_with_ret(Args, Name, Handler) ->
|
||||
{arity, Arity} = erlang:fun_info(Handler, arity),
|
||||
case take_args(Args, Name, Arity) of
|
||||
false ->
|
||||
Args;
|
||||
{Args1, Rest} ->
|
||||
_ = erlang:apply(Handler, Args1),
|
||||
Rest
|
||||
end.
|
||||
|
||||
do_with_halt(Args, Name, Handler) ->
|
||||
{arity, Arity} = erlang:fun_info(Handler, arity),
|
||||
case take_args(Args, Name, Arity) of
|
||||
false ->
|
||||
ok;
|
||||
{Args1, _Rest} ->
|
||||
erlang:apply(Handler, Args1), %% should halt
|
||||
io:format(standard_error, "~s handler did not halt", [Name]),
|
||||
halt(?LINE)
|
||||
end.
|
||||
|
||||
%% Return option args list if found, otherwise 'false'.
|
||||
take_args(Args, OptName, 0) ->
|
||||
lists:member(OptName, Args) andalso [];
|
||||
take_args(Args, OptName, OptArity) ->
|
||||
take_args(Args, OptName, OptArity, _Scanned = []).
|
||||
|
||||
take_args([], _, _, _) -> false; %% no such option
|
||||
take_args([Name | Rest], Name, Arity, Scanned) ->
|
||||
length(Rest) >= Arity orelse error({not_enough_args_for, Name}),
|
||||
{Result, Tail} = lists:split(Arity, Rest),
|
||||
{Result, lists:reverse(Scanned) ++ Tail};
|
||||
take_args([Other | Rest], Name, Arity, Scanned) ->
|
||||
take_args(Rest, Name, Arity, [Other | Scanned]).
|
||||
|
||||
start_epmd() ->
|
||||
[] = os:cmd("\"" ++ epmd_path() ++ "\" -daemon"),
|
||||
ok.
|
||||
|
||||
epmd_path() ->
|
||||
ErtsBinDir = filename:dirname(escript:script_name()),
|
||||
Name = "epmd",
|
||||
case os:find_executable(Name, ErtsBinDir) of
|
||||
false ->
|
||||
case os:find_executable(Name) of
|
||||
false ->
|
||||
io:format("Could not find epmd.~n"),
|
||||
halt(1);
|
||||
GlobalEpmd ->
|
||||
GlobalEpmd
|
||||
end;
|
||||
Epmd ->
|
||||
Epmd
|
||||
end.
|
||||
|
||||
nodename(Name) ->
|
||||
case re:split(Name, "@", [{return, list}, unicode]) of
|
||||
[_Node, _Host] ->
|
||||
list_to_atom(Name);
|
||||
[Node] ->
|
||||
[_, Host] = re:split(atom_to_list(node()), "@", [{return, list}, unicode]),
|
||||
list_to_atom(lists:concat([Node, "@", Host]))
|
||||
end.
|
||||
|
||||
append_node_suffix(Name, Suffix) ->
|
||||
case re:split(Name, "@", [{return, list}, unicode]) of
|
||||
[Node, Host] ->
|
||||
list_to_atom(lists:concat([Node, Suffix, os:getpid(), "@", Host]));
|
||||
[Node] ->
|
||||
list_to_atom(lists:concat([Node, Suffix, os:getpid()]))
|
||||
end.
|
||||
|
||||
%% For windows???
|
||||
create_mnesia_dir(DataDir, NodeName) ->
|
||||
MnesiaDir = filename:join(DataDir, NodeName),
|
||||
file:make_dir(MnesiaDir),
|
||||
io:format("~s", [MnesiaDir]),
|
||||
halt(0).
|
||||
|
||||
chkconfig(File) ->
|
||||
case file:consult(File) of
|
||||
{ok, Terms} ->
|
||||
case validate(Terms) of
|
||||
ok ->
|
||||
halt(0);
|
||||
{error, Problems} ->
|
||||
lists:foreach(fun print_issue/1, Problems),
|
||||
%% halt(1) if any problems were errors
|
||||
halt(case [x || {error, _} <- Problems] of
|
||||
[] -> 0;
|
||||
_ -> 1
|
||||
end)
|
||||
end;
|
||||
{error, {Line, Mod, Term}} ->
|
||||
io:format(standard_error, ["Error on line ", file:format_error({Line, Mod, Term}), "\n"], []),
|
||||
halt(1);
|
||||
{error, Error} ->
|
||||
io:format(standard_error, ["Error reading config file: ", File, " ", file:format_error(Error), "\n"], []),
|
||||
halt(1)
|
||||
end.
|
||||
|
||||
%%
|
||||
%% Given a string or binary, parse it into a list of terms, ala file:consult/0
|
||||
%%
|
||||
consult(Str) when is_list(Str) ->
|
||||
consult([], Str, []);
|
||||
consult(Bin) when is_binary(Bin)->
|
||||
consult([], binary_to_list(Bin), []).
|
||||
|
||||
consult(Cont, Str, Acc) ->
|
||||
case erl_scan:tokens(Cont, Str, 0) of
|
||||
{done, Result, Remaining} ->
|
||||
case Result of
|
||||
{ok, Tokens, _} ->
|
||||
{ok, Term} = erl_parse:parse_term(Tokens),
|
||||
consult([], Remaining, [Term | Acc]);
|
||||
{eof, _Other} ->
|
||||
lists:reverse(Acc);
|
||||
{error, Info, _} ->
|
||||
{error, Info}
|
||||
end;
|
||||
{more, Cont1} ->
|
||||
consult(Cont1, eof, Acc)
|
||||
end.
|
||||
|
||||
%%
|
||||
%% Validation functions for checking the app.config
|
||||
%%
|
||||
validate([Terms]) ->
|
||||
Results = [ValidateFun(Terms) || ValidateFun <- get_validation_funs()],
|
||||
Failures = [Res || Res <- Results, Res /= true],
|
||||
case Failures of
|
||||
[] ->
|
||||
ok;
|
||||
_ ->
|
||||
{error, Failures}
|
||||
end.
|
||||
|
||||
%% Some initial and basic checks for the app.config file
|
||||
get_validation_funs() ->
|
||||
[ ].
|
||||
|
||||
print_issue({warning, Warning}) ->
|
||||
io:format(standard_error, "Warning in app.config: ~s~n", [Warning]);
|
||||
print_issue({error, Error}) ->
|
||||
io:format(standard_error, "Error in app.config: ~s~n", [Error]).
|
||||
|
||||
%% string:join/2 copy; string:join/2 is getting obsoleted
|
||||
%% and replaced by lists:join/2, but lists:join/2 is too new
|
||||
%% for version support (only appeared in 19.0) so it cannot be
|
||||
%% used. Instead we just adopt join/2 locally and hope it works
|
||||
%% for most unicode use cases anyway.
|
||||
join([], Sep) when is_list(Sep) ->
|
||||
[];
|
||||
join([H|T], Sep) ->
|
||||
H ++ lists:append([Sep ++ X || X <- T]).
|
|
@ -0,0 +1,178 @@
|
|||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
## default globals
|
||||
TARGET ?= emqx/emqx
|
||||
QEMU_ARCH ?= x86_64
|
||||
ARCH ?= amd64
|
||||
QEMU_VERSION ?= v5.0.0-2
|
||||
OS ?= alpine
|
||||
|
||||
EMQX_NAME = $(subst emqx/,,$(TARGET))
|
||||
ARCH_LIST = amd64 arm64v8 arm32v7 i386 s390x
|
||||
|
||||
.PHONY: docker
|
||||
docker: docker-build docker-tag docker-save
|
||||
|
||||
.PHONY: docker-prepare
|
||||
docker-prepare:
|
||||
## Prepare the machine before any code installation scripts
|
||||
# @echo "PREPARE: Setting up dependencies."
|
||||
# @apt update -y
|
||||
# @apt install --only-upgrade docker-ce -y
|
||||
|
||||
## Update docker configuration to enable docker manifest command
|
||||
@echo "PREPARE: Updating docker configuration"
|
||||
@mkdir -p $$HOME/.docker
|
||||
|
||||
# enable experimental to use docker manifest command
|
||||
@echo '{ "experimental": "enabled" }' | tee $$HOME/.docker/config.json
|
||||
# enable experimental
|
||||
@echo '{ "experimental": true, "storage-driver": "overlay2", "max-concurrent-downloads": 50, "max-concurrent-uploads": 50 }' | tee /etc/docker/daemon.json
|
||||
@service docker restart
|
||||
|
||||
.PHONY: docker-build
|
||||
docker-build:
|
||||
## Build Docker image
|
||||
@echo "DOCKER BUILD: Build Docker image."
|
||||
@echo "DOCKER BUILD: build version -> $(PKG_VSN)."
|
||||
@echo "DOCKER BUILD: arch - $(ARCH)."
|
||||
@echo "DOCKER BUILD: qemu arch - $(QEMU_ARCH)."
|
||||
@echo "DOCKER BUILD: docker repo - $(TARGET) "
|
||||
@echo "DOCKER BUILD: emqx name - $(EMQX_NAME)."
|
||||
@echo "DOCKER BUILD: emqx version - $(EMQX_DEPS_DEFAULT_VSN)."
|
||||
|
||||
## Prepare qemu to build images other then x86_64 on travis
|
||||
@echo "PREPARE: Qemu" \
|
||||
&& docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
|
||||
@mkdir -p tmp \
|
||||
&& cd tmp \
|
||||
&& curl -L -o qemu-$(QEMU_ARCH)-static.tar.gz https://github.com/multiarch/qemu-user-static/releases/download/$(QEMU_VERSION)/qemu-$(QEMU_ARCH)-static.tar.gz \
|
||||
&& tar xzf qemu-$(QEMU_ARCH)-static.tar.gz \
|
||||
&& cd -
|
||||
|
||||
@docker build --no-cache \
|
||||
--build-arg EMQX_DEPS_DEFAULT_VSN=$(EMQX_DEPS_DEFAULT_VSN) \
|
||||
--build-arg BUILD_FROM=emqx/build-env:erl22.3-alpine-$(ARCH) \
|
||||
--build-arg RUN_FROM=$(ARCH)/alpine:3.11 \
|
||||
--build-arg EMQX_NAME=$(EMQX_NAME) \
|
||||
--build-arg QEMU_ARCH=$(QEMU_ARCH) \
|
||||
--tag $(TARGET):build-$(OS)-$(ARCH) \
|
||||
-f deploy/docker/Dockerfile .
|
||||
|
||||
.PHONY: docker-tag
|
||||
docker-tag:
|
||||
@echo "DOCKER TAG: Tag Docker image."
|
||||
@for arch in $(ARCH_LIST); do \
|
||||
if [ -n "$$(docker images -q $(TARGET):build-$(OS)-$${arch})" ]; then \
|
||||
docker tag $(TARGET):build-$(OS)-$${arch} $(TARGET):$(PKG_VSN)-$(OS)-$${arch}; \
|
||||
echo "DOCKER TAG: $(TARGET):$(PKG_VSN)-$(OS)-$${arch}"; \
|
||||
if [ $${arch} = amd64 ]; then \
|
||||
docker tag $(TARGET):$(PKG_VSN)-$(OS)-amd64 $(TARGET):$(PKG_VSN); \
|
||||
echo "DOCKER TAG: $(TARGET):$(PKG_VSN)"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
.PHONY: docker-save
|
||||
docker-save:
|
||||
@echo "DOCKER SAVE: Save Docker image."
|
||||
|
||||
@mkdir -p _packages/$(EMQX_NAME)
|
||||
|
||||
@if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN))" ]; then \
|
||||
docker save $(TARGET):$(PKG_VSN) > $(EMQX_NAME)-docker-$(PKG_VSN); \
|
||||
zip -r -m $(EMQX_NAME)-docker-$(PKG_VSN).zip $(EMQX_NAME)-docker-$(PKG_VSN); \
|
||||
mv ./$(EMQX_NAME)-docker-$(PKG_VSN).zip _packages/$(EMQX_NAME)/$(EMQX_NAME)-docker-$(PKG_VSN).zip; \
|
||||
fi
|
||||
|
||||
@for arch in $(ARCH_LIST); do \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker save $(TARGET):$(PKG_VSN)-$(OS)-$${arch} > $(EMQX_NAME)-docker-$(PKG_VSN)-$(OS)-$${arch}; \
|
||||
zip -r -m $(EMQX_NAME)-docker-$(PKG_VSN)-$(OS)-$${arch}.zip $(EMQX_NAME)-docker-$(PKG_VSN)-$(OS)-$${arch}; \
|
||||
mv ./$(EMQX_NAME)-docker-$(PKG_VSN)-$(OS)-$${arch}.zip _packages/$(EMQX_NAME)/$(EMQX_NAME)-docker-$(PKG_VSN)-$(OS)-$${arch}.zip; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push:
|
||||
@echo "DOCKER PUSH: Push Docker image.";
|
||||
@echo "DOCKER PUSH: pushing - $(TARGET):$(PKG_VSN).";
|
||||
|
||||
@if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN))" ]; then \
|
||||
docker push $(TARGET):$(PKG_VSN); \
|
||||
docker tag $(TARGET):$(PKG_VSN) $(TARGET):latest; \
|
||||
docker push $(TARGET):latest; \
|
||||
fi;
|
||||
|
||||
@for arch in $(ARCH_LIST); do \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker push $(TARGET):$(PKG_VSN)-$(OS)-$${arch}; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
.PHONY: docker-manifest-list
|
||||
docker-manifest-list:
|
||||
version="docker manifest create --amend $(TARGET):$(PKG_VSN)"; \
|
||||
latest="docker manifest create --amend $(TARGET):latest"; \
|
||||
for arch in $(ARCH_LIST); do \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ];then \
|
||||
version="$${version} $(TARGET):$(PKG_VSN)-$(OS)-$${arch} "; \
|
||||
latest="$${latest} $(TARGET):$(PKG_VSN)-$(OS)-$${arch} "; \
|
||||
fi; \
|
||||
done; \
|
||||
eval $$version; \
|
||||
eval $$latest;
|
||||
|
||||
for arch in $(ARCH_LIST); do \
|
||||
case $${arch} in \
|
||||
"amd64") \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker manifest annotate $(TARGET):$(PKG_VSN) $(TARGET):$(PKG_VSN)-$(OS)-amd64 --os=linux --arch=amd64; \
|
||||
docker manifest annotate $(TARGET):latest $(TARGET):$(PKG_VSN)-$(OS)-amd64 --os=linux --arch=amd64; \
|
||||
fi; \
|
||||
;; \
|
||||
"arm64v8") \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker manifest annotate $(TARGET):$(PKG_VSN) $(TARGET):$(PKG_VSN)-$(OS)-arm64v8 --os=linux --arch=arm64 --variant=v8; \
|
||||
docker manifest annotate $(TARGET):latest $(TARGET):$(PKG_VSN)-$(OS)-arm64v8 --os=linux --arch=arm64 --variant=v8; \
|
||||
fi; \
|
||||
;; \
|
||||
"arm32v7") \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker manifest annotate $(TARGET):$(PKG_VSN) $(TARGET):$(PKG_VSN)-$(OS)-arm32v7 --os=linux --arch=arm --variant=v7; \
|
||||
docker manifest annotate $(TARGET):latest $(TARGET):$(PKG_VSN)-$(OS)-arm32v7 --os=linux --arch=arm --variant=v7; \
|
||||
fi; \
|
||||
;; \
|
||||
"i386") \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker manifest annotate $(TARGET):$(PKG_VSN) $(TARGET):$(PKG_VSN)-$(OS)-i386 --os=linux --arch=386; \
|
||||
docker manifest annotate $(TARGET):latest $(TARGET):$(PKG_VSN)-$(OS)-i386 --os=linux --arch=386; \
|
||||
fi; \
|
||||
;; \
|
||||
"s390x") \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker manifest annotate $(TARGET):$(PKG_VSN) $(TARGET):$(PKG_VSN)-$(OS)-s390x --os=linux --arch=s390x; \
|
||||
docker manifest annotate $(TARGET):latest $(TARGET):$(PKG_VSN)-$(OS)-s390x --os=linux --arch=s390x; \
|
||||
fi; \
|
||||
;; \
|
||||
esac; \
|
||||
done;
|
||||
|
||||
docker manifest inspect $(TARGET):$(PKG_VSN)
|
||||
docker manifest push $(TARGET):$(PKG_VSN);
|
||||
docker manifest inspect $(TARGET):latest
|
||||
docker manifest push $(TARGET):latest;
|
||||
|
||||
.PHONY: docker-clean
|
||||
docker-clean:
|
||||
@echo "DOCKER CLEAN: Clean Docker image."
|
||||
|
||||
@if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN))" ]; then docker rmi -f $$(docker images -q $(TARGET):$(PKG_VSN)); fi
|
||||
|
||||
@for arch in $(ARCH_LIST); do \
|
||||
if [ -n "$$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch})" ]; then \
|
||||
docker rmi -f $$(docker images -q $(TARGET):$(PKG_VSN)-$(OS)-$${arch}); \
|
||||
fi \
|
||||
done
|
|
@ -0,0 +1,78 @@
|
|||
##--------------------------------------------------------------------
|
||||
## LDAP Auth Plugin
|
||||
##--------------------------------------------------------------------
|
||||
|
||||
## LDAP server list, seperated by ','.
|
||||
##
|
||||
## Value: String
|
||||
auth.ldap.servers = 127.0.0.1
|
||||
|
||||
## LDAP server port.
|
||||
##
|
||||
## Value: Port
|
||||
auth.ldap.port = 389
|
||||
|
||||
## LDAP pool size
|
||||
##
|
||||
## Value: String
|
||||
auth.ldap.pool = 8
|
||||
|
||||
## LDAP Bind DN.
|
||||
##
|
||||
## Value: DN
|
||||
auth.ldap.bind_dn = cn=root,dc=emqx,dc=io
|
||||
|
||||
## LDAP Bind Password.
|
||||
##
|
||||
## Value: String
|
||||
auth.ldap.bind_password = public
|
||||
|
||||
## LDAP query timeout.
|
||||
##
|
||||
## Value: Number
|
||||
auth.ldap.timeout = 30s
|
||||
|
||||
## Device DN.
|
||||
##
|
||||
## Variables:
|
||||
##
|
||||
## Value: DN
|
||||
auth.ldap.device_dn = ou=device,dc=emqx,dc=io
|
||||
|
||||
## Specified ObjectClass
|
||||
##
|
||||
## Variables:
|
||||
##
|
||||
## Value: string
|
||||
auth.ldap.match_objectclass = mqttUser
|
||||
|
||||
## attributetype for username
|
||||
##
|
||||
## Variables:
|
||||
##
|
||||
## Value: string
|
||||
auth.ldap.username.attributetype = uid
|
||||
|
||||
## attributetype for password
|
||||
##
|
||||
## Variables:
|
||||
##
|
||||
## Value: string
|
||||
auth.ldap.password.attributetype = userPassword
|
||||
|
||||
## Whether to enable SSL.
|
||||
##
|
||||
## Value: true | false
|
||||
auth.ldap.ssl = false
|
||||
|
||||
#auth.ldap.ssl.certfile = etc/certs/cert.pem
|
||||
|
||||
#auth.ldap.ssl.keyfile = etc/certs/key.pem
|
||||
|
||||
#auth.ldap.ssl.cacertfile = etc/certs/cacert.pem
|
||||
|
||||
#auth.ldap.ssl.verify = verify_peer
|
||||
|
||||
#auth.ldap.ssl.fail_if_no_peer_cert = true
|
||||
|
||||
#auth.ldap.ssl.server_name_indication = your_server_name
|
|
@ -0,0 +1,172 @@
|
|||
##--------------------------------------------------------------------
|
||||
## MongoDB Auth/ACL Plugin
|
||||
##--------------------------------------------------------------------
|
||||
|
||||
## MongoDB Topology Type.
|
||||
##
|
||||
## Value: single | unknown | sharded | rs
|
||||
auth.mongo.type = single
|
||||
|
||||
## The set name if type is rs.
|
||||
##
|
||||
## Value: String
|
||||
## auth.mongo.rs_set_name =
|
||||
|
||||
## MongoDB server list.
|
||||
##
|
||||
## Value: String
|
||||
##
|
||||
## Examples: 127.0.0.1:27017,127.0.0.2:27017...
|
||||
auth.mongo.server = 127.0.0.1:27017
|
||||
|
||||
## MongoDB pool size
|
||||
##
|
||||
## Value: Number
|
||||
auth.mongo.pool = 8
|
||||
|
||||
## MongoDB login user.
|
||||
##
|
||||
## Value: String
|
||||
## auth.mongo.login =
|
||||
|
||||
## MongoDB password.
|
||||
##
|
||||
## Value: String
|
||||
## auth.mongo.password =
|
||||
|
||||
## MongoDB AuthSource
|
||||
##
|
||||
## Value: String
|
||||
## Default: mqtt
|
||||
## auth.mongo.auth_source = admin
|
||||
|
||||
## MongoDB database
|
||||
##
|
||||
## Value: String
|
||||
auth.mongo.database = mqtt
|
||||
|
||||
## MongoDB query timeout
|
||||
##
|
||||
## Value: Duration
|
||||
## auth.mongo.query_timeout = 5s
|
||||
|
||||
## Whether to enable SSL connection.
|
||||
##
|
||||
## Value: true | false
|
||||
## auth.mongo.ssl = false
|
||||
|
||||
## SSL keyfile.
|
||||
##
|
||||
## Value: File
|
||||
## auth.mongo.ssl_opts.keyfile =
|
||||
|
||||
## SSL certfile.
|
||||
##
|
||||
## Value: File
|
||||
## auth.mongo.ssl_opts.certfile =
|
||||
|
||||
## SSL cacertfile.
|
||||
##
|
||||
## Value: File
|
||||
## auth.mongo.ssl_opts.cacertfile =
|
||||
|
||||
## MongoDB write mode.
|
||||
##
|
||||
## Value: unsafe | safe
|
||||
## auth.mongo.w_mode =
|
||||
|
||||
## Mongo read mode.
|
||||
##
|
||||
## Value: master | slave_ok
|
||||
## auth.mongo.r_mode =
|
||||
|
||||
## MongoDB topology options.
|
||||
auth.mongo.topology.pool_size = 1
|
||||
auth.mongo.topology.max_overflow = 0
|
||||
## auth.mongo.topology.overflow_ttl = 1000
|
||||
## auth.mongo.topology.overflow_check_period = 1000
|
||||
## auth.mongo.topology.local_threshold_ms = 1000
|
||||
## auth.mongo.topology.connect_timeout_ms = 20000
|
||||
## auth.mongo.topology.socket_timeout_ms = 100
|
||||
## auth.mongo.topology.server_selection_timeout_ms = 30000
|
||||
## auth.mongo.topology.wait_queue_timeout_ms = 1000
|
||||
## auth.mongo.topology.heartbeat_frequency_ms = 10000
|
||||
## auth.mongo.topology.min_heartbeat_frequency_ms = 1000
|
||||
|
||||
## -------------------------------------------------
|
||||
## Auth Query
|
||||
## -------------------------------------------------
|
||||
## Password hash.
|
||||
##
|
||||
## Value: plain | md5 | sha | sha256 | bcrypt
|
||||
auth.mongo.auth_query.password_hash = sha256
|
||||
|
||||
## sha256 with salt suffix
|
||||
## auth.mongo.auth_query.password_hash = sha256,salt
|
||||
|
||||
## sha256 with salt prefix
|
||||
## auth.mongo.auth_query.password_hash = salt,sha256
|
||||
|
||||
## bcrypt with salt prefix
|
||||
## auth.mongo.auth_query.password_hash = salt,bcrypt
|
||||
|
||||
## pbkdf2 with macfun iterations dklen
|
||||
## macfun: md4, md5, ripemd160, sha, sha224, sha256, sha384, sha512
|
||||
## auth.mongo.auth_query.password_hash = pbkdf2,sha256,1000,20
|
||||
|
||||
## Authentication query.
|
||||
auth.mongo.auth_query.collection = mqtt_user
|
||||
|
||||
## Password mainly fields
|
||||
##
|
||||
## Value: password | password,salt
|
||||
auth.mongo.auth_query.password_field = password
|
||||
|
||||
## Authentication Selector.
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
## - %C: common name of client TLS cert
|
||||
## - %d: subject of client TLS cert
|
||||
##
|
||||
## auth.mongo.auth_query.selector = {Field}={Placeholder}
|
||||
auth.mongo.auth_query.selector = username=%u
|
||||
|
||||
## -------------------------------------------------
|
||||
## Super User Query
|
||||
## -------------------------------------------------
|
||||
auth.mongo.super_query.collection = mqtt_user
|
||||
auth.mongo.super_query.super_field = is_superuser
|
||||
#auth.mongo.super_query.selector = username=%u, clientid=%c
|
||||
auth.mongo.super_query.selector = username=%u
|
||||
|
||||
## ACL Selector.
|
||||
##
|
||||
## Multiple selectors could be combined with '$or'
|
||||
## when query acl from mongo.
|
||||
##
|
||||
## e.g.
|
||||
##
|
||||
## With following 2 selectors configured:
|
||||
##
|
||||
## auth.mongo.acl_query.selector.1 = username=%u
|
||||
## auth.mongo.acl_query.selector.2 = username=$all
|
||||
##
|
||||
## And if a client connected using username 'ilyas',
|
||||
## then the following mongo command will be used to
|
||||
## retrieve acl entries:
|
||||
##
|
||||
## db.mqtt_acl.find({$or: [{username: "ilyas"}, {username: "$all"}]});
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
##
|
||||
## Examples:
|
||||
##
|
||||
## auth.mongo.acl_query.selector.1 = username=%u,clientid=%c
|
||||
## auth.mongo.acl_query.selector.2 = username=$all
|
||||
## auth.mongo.acl_query.selector.3 = clientid=$all
|
||||
auth.mongo.acl_query.collection = mqtt_acl
|
||||
auth.mongo.acl_query.selector = username=%u
|
|
@ -0,0 +1,110 @@
|
|||
##--------------------------------------------------------------------
|
||||
## PostgreSQL Auth/ACL Plugin
|
||||
##--------------------------------------------------------------------
|
||||
|
||||
## PostgreSQL server address.
|
||||
##
|
||||
## Value: Port | IP:Port
|
||||
##
|
||||
## Examples: 5432, 127.0.0.1:5432, localhost:5432
|
||||
auth.pgsql.server = 127.0.0.1:5432
|
||||
|
||||
## PostgreSQL pool size.
|
||||
##
|
||||
## Value: Number
|
||||
auth.pgsql.pool = 8
|
||||
|
||||
## PostgreSQL username.
|
||||
##
|
||||
## Value: String
|
||||
auth.pgsql.username = root
|
||||
|
||||
## PostgreSQL password.
|
||||
##
|
||||
## Value: String
|
||||
## auth.pgsql.password =
|
||||
|
||||
## PostgreSQL database.
|
||||
##
|
||||
## Value: String
|
||||
auth.pgsql.database = mqtt
|
||||
|
||||
## PostgreSQL database encoding.
|
||||
##
|
||||
## Value: String
|
||||
auth.pgsql.encoding = utf8
|
||||
|
||||
## Whether to enable SSL connection.
|
||||
##
|
||||
## Value: true | false
|
||||
auth.pgsql.ssl = false
|
||||
|
||||
## SSL keyfile.
|
||||
##
|
||||
## Value: File
|
||||
## auth.pgsql.ssl_opts.keyfile =
|
||||
|
||||
## SSL certfile.
|
||||
##
|
||||
## Value: File
|
||||
## auth.pgsql.ssl_opts.certfile =
|
||||
|
||||
## SSL cacertfile.
|
||||
##
|
||||
## Value: File
|
||||
## auth.pgsql.ssl_opts.cacertfile =
|
||||
|
||||
## Authentication query.
|
||||
##
|
||||
## Value: SQL
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
## - %C: common name of client TLS cert
|
||||
## - %d: subject of client TLS cert
|
||||
##
|
||||
auth.pgsql.auth_query = select password from mqtt_user where username = '%u' limit 1
|
||||
|
||||
## Password hash.
|
||||
##
|
||||
## Value: plain | md5 | sha | sha256 | bcrypt
|
||||
auth.pgsql.password_hash = sha256
|
||||
|
||||
## sha256 with salt prefix
|
||||
## auth.pgsql.password_hash = salt,sha256
|
||||
|
||||
## sha256 with salt suffix
|
||||
## auth.pgsql.password_hash = sha256,salt
|
||||
|
||||
## bcrypt with salt prefix
|
||||
## auth.pgsql.password_hash = salt,bcrypt
|
||||
|
||||
## pbkdf2 with macfun iterations dklen
|
||||
## macfun: md4, md5, ripemd160, sha, sha224, sha256, sha384, sha512
|
||||
## auth.pgsql.password_hash = pbkdf2,sha256,1000,20
|
||||
|
||||
## Superuser query.
|
||||
##
|
||||
## Value: SQL
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
## - %C: common name of client TLS cert
|
||||
## - %d: subject of client TLS cert
|
||||
##
|
||||
auth.pgsql.super_query = select is_superuser from mqtt_user where username = '%u' limit 1
|
||||
|
||||
## ACL query. Comment this query, the ACL will be disabled.
|
||||
##
|
||||
## Value: SQL
|
||||
##
|
||||
## Variables:
|
||||
## - %a: ipaddress
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
##
|
||||
## Note: You can add the 'ORDER BY' statement to control the rules match order
|
||||
auth.pgsql.acl_query = select allow, ipaddr, username, clientid, access, topic from mqtt_acl where ipaddr = '%a' or username = '%u' or username = '$all' or clientid = '%c'
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
##--------------------------------------------------------------------
|
||||
## Redis Auth/ACL Plugin
|
||||
##--------------------------------------------------------------------
|
||||
## Redis Server cluster type
|
||||
## single Single redis server
|
||||
## sentinel Redis cluster through sentinel
|
||||
## cluster Redis through cluster
|
||||
auth.redis.type = single
|
||||
|
||||
## Redis server address.
|
||||
##
|
||||
## Value: Port | IP:Port
|
||||
##
|
||||
## Single Redis Server: 127.0.0.1:6379, localhost:6379
|
||||
## Redis Sentinel: 127.0.0.1:26379,127.0.0.2:26379,127.0.0.3:26379
|
||||
## Redis Cluster: 127.0.0.1:6379,127.0.0.2:6379,127.0.0.3:6379
|
||||
auth.redis.server = 127.0.0.1:6379
|
||||
|
||||
## Redis sentinel cluster name.
|
||||
##
|
||||
## Value: String
|
||||
## auth.redis.sentinel = mymaster
|
||||
|
||||
## Redis pool size.
|
||||
##
|
||||
## Value: Number
|
||||
auth.redis.pool = 8
|
||||
|
||||
## Redis database no.
|
||||
##
|
||||
## Value: Number
|
||||
auth.redis.database = 0
|
||||
|
||||
## Redis password.
|
||||
##
|
||||
## Value: String
|
||||
## auth.redis.password =
|
||||
|
||||
## Redis query timeout
|
||||
##
|
||||
## Value: Duration
|
||||
## auth.redis.query_timeout = 5s
|
||||
|
||||
## Authentication query command.
|
||||
##
|
||||
## Value: Redis cmd
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
## - %C: common name of client TLS cert
|
||||
## - %d: subject of client TLS cert
|
||||
##
|
||||
## Examples:
|
||||
## - HGET mqtt_user:%u password
|
||||
## - HMGET mqtt_user:%u password
|
||||
## - HMGET mqtt_user:%u password salt
|
||||
auth.redis.auth_cmd = HMGET mqtt_user:%u password
|
||||
|
||||
## Password hash.
|
||||
##
|
||||
## Value: plain | md5 | sha | sha256 | bcrypt
|
||||
auth.redis.password_hash = plain
|
||||
|
||||
## sha256 with salt prefix
|
||||
## auth.redis.password_hash = salt,sha256
|
||||
|
||||
## sha256 with salt suffix
|
||||
## auth.redis.password_hash = sha256,salt
|
||||
|
||||
## bcrypt with salt prefix
|
||||
## auth.redis.password_hash = salt,bcrypt
|
||||
|
||||
## pbkdf2 with macfun iterations dklen
|
||||
## macfun: md4, md5, ripemd160, sha, sha224, sha256, sha384, sha512
|
||||
## auth.redis.password_hash = pbkdf2,sha256,1000,20
|
||||
|
||||
## Superuser query command.
|
||||
##
|
||||
## Value: Redis cmd
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
## - %C: common name of client TLS cert
|
||||
## - %d: subject of client TLS cert
|
||||
auth.redis.super_cmd = HGET mqtt_user:%u is_superuser
|
||||
|
||||
## ACL query command.
|
||||
##
|
||||
## Value: Redis cmd
|
||||
##
|
||||
## Variables:
|
||||
## - %u: username
|
||||
## - %c: clientid
|
||||
auth.redis.acl_cmd = HGETALL mqtt_acl:%u
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
##--------------------------------------------------------------------
|
||||
## EMQ X Lua Hook
|
||||
##--------------------------------------------------------------------
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
##--------------------------------------------------------------------
|
||||
## LwM2M Gateway
|
||||
##--------------------------------------------------------------------
|
||||
|
||||
##--------------------------------------------------------------------
|
||||
## Protocols
|
||||
|
||||
# To Limit the range of lifetime, in seconds
|
||||
lwm2m.lifetime_min = 1s
|
||||
lwm2m.lifetime_max = 86400s
|
||||
|
||||
# The time window for Q Mode, indicating that after how long time
|
||||
# the downlink commands sent to the client will be cached.
|
||||
#lwm2m.qmode_time_window = 22
|
||||
|
||||
# Auto send observer command to device
|
||||
# on | off
|
||||
#lwm2m.auto_observe = off
|
||||
|
||||
# The topic subscribed by the lwm2m client after it is connected
|
||||
# Placeholders supported:
|
||||
# '%e': Endpoint Name
|
||||
# '%a': IP Address
|
||||
lwm2m.mountpoint = lwm2m/%e/
|
||||
|
||||
# The topic subscribed by the lwm2m client after it is connected
|
||||
# Placeholders supported:
|
||||
# '%e': Endpoint Name
|
||||
# '%a': IP Address
|
||||
lwm2m.topics.command = dn/#
|
||||
|
||||
# The topic to which the lwm2m client's response is published
|
||||
lwm2m.topics.response = up/resp
|
||||
|
||||
# The topic to which the lwm2m client's notify message is published
|
||||
lwm2m.topics.notify = up/notify
|
||||
|
||||
# The topic to which the lwm2m client's register message is published
|
||||
lwm2m.topics.register = up/resp
|
||||
|
||||
# The topic to which the lwm2m client's update message is published
|
||||
lwm2m.topics.update = up/resp
|
||||
|
||||
# Dir where the object definition files can be found
|
||||
lwm2m.xml_dir = {{ platform_etc_dir }}/lwm2m_xml
|
||||
|
||||
##--------------------------------------------------------------------
|
||||
## UDP Listener options
|
||||
|
||||
## The IP and port of the LwM2M Gateway
|
||||
##
|
||||
## Default: 0.0.0.0:5683
|
||||
## Examples:
|
||||
## lwm2m.bind.udp.x = 0.0.0.0:5683 | :::5683 | 127.0.0.1:5683 | ::1:5683
|
||||
lwm2m.bind.udp.1 = 0.0.0.0:5683
|
||||
#lwm2m.bind.udp.2 = 0.0.0.0:6683
|
||||
|
||||
## Socket options, used for performance tuning
|
||||
##
|
||||
## Examples:
|
||||
## lwm2m.opts.$name = $value
|
||||
## See: https://erlang.org/doc/man/gen_udp.html#type-option
|
||||
lwm2m.opts.buffer = 1024KB
|
||||
lwm2m.opts.recbuf = 1024KB
|
||||
lwm2m.opts.sndbuf = 1024KB
|
||||
lwm2m.opts.read_packets = 20
|
||||
|
||||
##--------------------------------------------------------------------
|
||||
## DTLS Listener Options
|
||||
|
||||
## The DTLS port that LwM2M is listening on.
|
||||
##
|
||||
## Default: 0.0.0.0:5684
|
||||
##
|
||||
## Examples:
|
||||
## lwm2m.bind.dtls.x = 0.0.0.0:5684 | :::5684 | 127.0.0.1:5684 | ::1:5684
|
||||
##
|
||||
lwm2m.bind.dtls.1 = 0.0.0.0:5684
|
||||
#lwm2m.bind.dtls.2 = 0.0.0.0:6684
|
||||
|
||||
## A server only does x509-path validation in mode verify_peer,
|
||||
## as it then sends a certificate request to the client (this
|
||||
## message is not sent if the verify option is verify_none).
|
||||
## You can then also want to specify option fail_if_no_peer_cert.
|
||||
## More information at: http://erlang.org/doc/man/ssl.html
|
||||
##
|
||||
## Value: verify_peer | verify_none
|
||||
#lwm2m.dtls.verify = verify_peer
|
||||
|
||||
## Private key file for DTLS
|
||||
##
|
||||
## Value: File
|
||||
lwm2m.dtls.keyfile = {{ platform_etc_dir }}/certs/key.pem
|
||||
|
||||
## Server certificate for DTLS.
|
||||
##
|
||||
## Value: File
|
||||
lwm2m.dtls.certfile = {{ platform_etc_dir }}/certs/cert.pem
|
||||
|
||||
## PEM-encoded CA certificates for DTLS
|
||||
##
|
||||
## Value: File
|
||||
#lwm2m.dtls.cacertfile = {{ platform_etc_dir }}/certs/cacert.pem
|
||||
|
||||
## Used together with {verify, verify_peer} by an SSL server. If set to true,
|
||||
## the server fails if the client does not have a certificate to send, that is,
|
||||
## sends an empty certificate.
|
||||
##
|
||||
## Value: true | false
|
||||
#lwm2m.dtls.fail_if_no_peer_cert = false
|
||||
|
||||
## This is the single most important configuration option of an Erlang SSL
|
||||
## application. Ciphers (and their ordering) define the way the client and
|
||||
## server encrypt information over the wire, from the initial Diffie-Helman
|
||||
## key exchange, the session key encryption ## algorithm and the message
|
||||
## digest algorithm. Selecting a good cipher suite is critical for the
|
||||
## application’s data security, confidentiality and performance.
|
||||
##
|
||||
## The cipher list above offers:
|
||||
##
|
||||
## A good balance between compatibility with older browsers.
|
||||
## It can get stricter for Machine-To-Machine scenarios.
|
||||
## Perfect Forward Secrecy.
|
||||
## No old/insecure encryption and HMAC algorithms
|
||||
##
|
||||
## Most of it was copied from Mozilla’s Server Side TLS article
|
||||
##
|
||||
## Value: Ciphers
|
||||
lwm2m.dtls.ciphers = ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-DES-CBC3-SHA,ECDH-ECDSA-AES256-GCM-SHA384,ECDH-RSA-AES256-GCM-SHA384,ECDH-ECDSA-AES256-SHA384,ECDH-RSA-AES256-SHA384,DHE-DSS-AES256-GCM-SHA384,DHE-DSS-AES256-SHA256,AES256-GCM-SHA384,AES256-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDH-ECDSA-AES128-GCM-SHA256,ECDH-RSA-AES128-GCM-SHA256,ECDH-ECDSA-AES128-SHA256,ECDH-RSA-AES128-SHA256,DHE-DSS-AES128-GCM-SHA256,DHE-DSS-AES128-SHA256,AES128-GCM-SHA256,AES128-SHA256,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-DSS-AES256-SHA,ECDH-ECDSA-AES256-SHA,ECDH-RSA-AES256-SHA,AES256-SHA,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,DHE-DSS-AES128-SHA,ECDH-ECDSA-AES128-SHA,ECDH-RSA-AES128-SHA,AES128-SHA
|
||||
|
||||
## Ciphers for TLS PSK.
|
||||
##
|
||||
## Note that 'lwm2m.dtls.ciphers' and 'lwm2m.dtls.psk_ciphers' cannot
|
||||
## be configured at the same time.
|
||||
## See 'https://tools.ietf.org/html/rfc4279#section-2'.
|
||||
#lwm2m.dtls.psk_ciphers = PSK-AES128-CBC-SHA,PSK-AES256-CBC-SHA,PSK-3DES-EDE-CBC-SHA,PSK-RC4-SHA
|
|
@ -0,0 +1,13 @@
|
|||
##--------------------------------------------------------------------
|
||||
## emqx_prometheus for EMQ X
|
||||
##--------------------------------------------------------------------
|
||||
|
||||
## The Prometheus Push Gateway URL address
|
||||
##
|
||||
## Note: You can comment out this line to disable it
|
||||
prometheus.push.gateway.server = http://127.0.0.1:9091
|
||||
|
||||
## The metrics data push interval (millisecond)
|
||||
##
|
||||
## Default: 15000
|
||||
prometheus.interval = 15000
|
|
@ -0,0 +1,2 @@
|
|||
psk.file.path = {{ platform_etc_dir }}/psk.txt
|
||||
psk.file.delimiter = :
|
|
@ -0,0 +1,115 @@
|
|||
######################################################################
|
||||
## Erlang VM Args for EMQ X Broker
|
||||
######################################################################
|
||||
|
||||
## NOTE:
|
||||
##
|
||||
## Arguments configured in this file might be overridden by configs from `emqx.conf`.
|
||||
##
|
||||
## Some basic VM arguments are to be configured in `emqx.conf`,
|
||||
## such as `node.name` for `-name` and `node.cooke` for `-setcookie`.
|
||||
|
||||
## Sets the maximum number of simultaneously existing processes for this system.
|
||||
+P 2097152
|
||||
|
||||
## Sets the maximum number of simultaneously existing ports for this system.
|
||||
+Q 1048576
|
||||
|
||||
## Sets the maximum number of ETS tables
|
||||
+e 262144
|
||||
|
||||
## Sets the maximum number of atoms the virtual machine can handle.
|
||||
#+t 1048576
|
||||
|
||||
## Set the location of crash dumps
|
||||
#-env ERL_CRASH_DUMP {{ platform_log_dir }}/crash.dump
|
||||
|
||||
## Set how many times generational garbages collections can be done without
|
||||
## forcing a fullsweep collection.
|
||||
-env ERL_FULLSWEEP_AFTER 1000
|
||||
|
||||
## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive
|
||||
## (Disabled by default..use with caution!)
|
||||
#-heart
|
||||
|
||||
## Specify the erlang distributed protocol.
|
||||
## Can be one of: inet_tcp, inet6_tcp, inet_tls
|
||||
#-proto_dist inet_tcp
|
||||
|
||||
## Specify SSL Options in the file if using SSL for Erlang Distribution.
|
||||
## Used only when -proto_dist set to inet_tls
|
||||
#-ssl_dist_optfile {{ platform_etc_dir }}/ssl_dist.conf
|
||||
|
||||
## Specifies the net_kernel tick time in seconds.
|
||||
## This is the approximate time a connected node may be unresponsive until
|
||||
## it is considered down and thereby disconnected.
|
||||
-kernel net_ticktime 120
|
||||
|
||||
## Sets the distribution buffer busy limit (dist_buf_busy_limit).
|
||||
#+zdbbl 8192
|
||||
|
||||
## Sets default scheduler hint for port parallelism.
|
||||
+spp true
|
||||
|
||||
## Sets the number of threads in async thread pool. Valid range is 0-1024.
|
||||
## Increase the parameter if there are many simultaneous file I/O operations.
|
||||
+A 4
|
||||
|
||||
## Sets the default heap size of processes to the size Size.
|
||||
#+hms 233
|
||||
|
||||
## Sets the default binary virtual heap size of processes to the size Size.
|
||||
#+hmbs 46422
|
||||
|
||||
## Sets the default maximum heap size of processes to the size Size.
|
||||
## Defaults to 0, which means that no maximum heap size is used.
|
||||
##For more information, see process_flag(max_heap_size, MaxHeapSize).
|
||||
#+hmax 0
|
||||
|
||||
## Sets the default value for process flag message_queue_data. Defaults to on_heap.
|
||||
#+hmqd on_heap | off_heap
|
||||
|
||||
## Sets the number of IO pollsets to use when polling for I/O.
|
||||
#+IOp 1
|
||||
|
||||
## Sets the number of IO poll threads to use when polling for I/O.
|
||||
## Increase this for the busy systems with many concurrent connection.
|
||||
+IOt 4
|
||||
|
||||
## Sets the number of scheduler threads to create and scheduler threads to set online.
|
||||
#+S 8:8
|
||||
|
||||
## Sets the number of dirty CPU scheduler threads to create and dirty CPU scheduler threads to set online.
|
||||
#+SDcpu 8:8
|
||||
|
||||
## Sets the number of dirty I/O scheduler threads to create.
|
||||
+SDio 8
|
||||
|
||||
## Suggested stack size, in kilowords, for scheduler threads.
|
||||
#+sss 32
|
||||
|
||||
## Suggested stack size, in kilowords, for dirty CPU scheduler threads.
|
||||
#+sssdcpu 40
|
||||
|
||||
## Suggested stack size, in kilowords, for dirty IO scheduler threads.
|
||||
#+sssdio 40
|
||||
|
||||
## Sets scheduler bind type.
|
||||
## Can be one of: u, ns, ts, ps, s, nnts, nnps, tnnps, db
|
||||
#+sbt db
|
||||
|
||||
## Sets a user-defined CPU topology.
|
||||
#+sct L0-3c0-3p0N0:L4-7c0-3p1N1
|
||||
|
||||
## Sets the mapping of warning messages for error_logger
|
||||
#+W w
|
||||
|
||||
## Sets time warp mode: no_time_warp | single_time_warp | multi_time_warp
|
||||
#+C no_time_warp
|
||||
|
||||
## Prevents loading information about source filenames and line numbers.
|
||||
#+L
|
||||
|
||||
## Specifies how long time (in milliseconds) to spend shutting down the system.
|
||||
## See: http://erlang.org/doc/man/erl.html
|
||||
-shutdown_time 30000
|
|
@ -0,0 +1,114 @@
|
|||
######################################################################
|
||||
## Erlang VM Args for EMQ X Edge
|
||||
######################################################################
|
||||
|
||||
## NOTE:
|
||||
##
|
||||
## Arguments configured in this file might be overridden by configs from `emqx.conf`.
|
||||
##
|
||||
## Some basic VM arguments are to be configured in `emqx.conf`,
|
||||
## such as `node.name` for `-name` and `node.cooke` for `-setcookie`.
|
||||
|
||||
## Sets the maximum number of simultaneously existing processes for this system.
|
||||
+P 16384
|
||||
## Sets the maximum number of simultaneously existing ports for this system.
|
||||
+Q 4096
|
||||
|
||||
## Sets the maximum number of ETS tables
|
||||
+e 512
|
||||
|
||||
## Sets the maximum number of atoms the virtual machine can handle.
|
||||
+t 262144
|
||||
|
||||
## Set the location of crash dumps
|
||||
-env ERL_CRASH_DUMP {{ platform_log_dir }}/crash.dump
|
||||
|
||||
## Set how many times generational garbages collections can be done without
|
||||
## forcing a fullsweep collection.
|
||||
-env ERL_FULLSWEEP_AFTER 0
|
||||
|
||||
## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive
|
||||
## (Disabled by default..use with caution!)
|
||||
-heart
|
||||
|
||||
## Specify the erlang distributed protocol.
|
||||
## Can be one of: inet_tcp, inet6_tcp, inet_tls
|
||||
#-proto_dist inet_tcp
|
||||
|
||||
## Specify SSL Options in the file if using SSL for Erlang Distribution.
|
||||
## Used only when -proto_dist set to inet_tls
|
||||
#-ssl_dist_optfile {{ platform_etc_dir }}/ssl_dist.conf
|
||||
|
||||
## Specifies the net_kernel tick time in seconds.
|
||||
## This is the approximate time a connected node may be unresponsive until
|
||||
## it is considered down and thereby disconnected.
|
||||
#-kernel net_ticktime 60
|
||||
|
||||
## Sets the distribution buffer busy limit (dist_buf_busy_limit).
|
||||
+zdbbl 1024
|
||||
|
||||
## Sets default scheduler hint for port parallelism.
|
||||
+spp false
|
||||
|
||||
## Sets the number of threads in async thread pool. Valid range is 0-1024.
|
||||
## Increase the parameter if there are many simultaneous file I/O operations.
|
||||
+A 1
|
||||
|
||||
## Sets the default heap size of processes to the size Size.
|
||||
#+hms 233
|
||||
|
||||
## Sets the default binary virtual heap size of processes to the size Size.
|
||||
#+hmbs 46422
|
||||
|
||||
## Sets the default maximum heap size of processes to the size Size.
|
||||
## Defaults to 0, which means that no maximum heap size is used.
|
||||
##For more information, see process_flag(max_heap_size, MaxHeapSize).
|
||||
#+hmax 0
|
||||
|
||||
## Sets the default value for process flag message_queue_data. Defaults to on_heap.
|
||||
#+hmqd on_heap | off_heap
|
||||
|
||||
## Sets the number of IO pollsets to use when polling for I/O.
|
||||
+IOp 1
|
||||
|
||||
## Sets the number of IO poll threads to use when polling for I/O.
|
||||
+IOt 1
|
||||
|
||||
## Sets the number of scheduler threads to create and scheduler threads to set online.
|
||||
+S 1:1
|
||||
|
||||
## Sets the number of dirty CPU scheduler threads to create and dirty CPU scheduler threads to set online.
|
||||
+SDcpu 1:1
|
||||
|
||||
## Sets the number of dirty I/O scheduler threads to create.
|
||||
+SDio 1
|
||||
|
||||
## Suggested stack size, in kilowords, for scheduler threads.
|
||||
#+sss 32
|
||||
|
||||
## Suggested stack size, in kilowords, for dirty CPU scheduler threads.
|
||||
#+sssdcpu 40
|
||||
|
||||
## Suggested stack size, in kilowords, for dirty IO scheduler threads.
|
||||
#+sssdio 40
|
||||
|
||||
## Sets scheduler bind type.
|
||||
## Can be one of: u, ns, ts, ps, s, nnts, nnps, tnnps, db
|
||||
#+sbt db
|
||||
|
||||
## Sets a user-defined CPU topology.
|
||||
#+sct L0-3c0-3p0N0:L4-7c0-3p1N1
|
||||
|
||||
## Sets the mapping of warning messages for error_logger
|
||||
#+W w
|
||||
|
||||
## Sets time warp mode: no_time_warp | single_time_warp | multi_time_warp
|
||||
#+C no_time_warp
|
||||
|
||||
## Prevents loading information about source filenames and line numbers.
|
||||
+L
|
||||
|
||||
## Specifies how long time (in milliseconds) to spend shutting down the system.
|
||||
## See: http://erlang.org/doc/man/erl.html
|
||||
-shutdown_time 10000
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
#!/usr/bin/env escript
|
||||
%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
|
||||
%% ex: ft=erlang ts=4 sw=4 et
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% nodetool: Helper Script for interacting with live nodes
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
main(_Args) ->
|
||||
io:format(getRef()).
|
||||
|
||||
comparingFun([C1|R1], [C2|R2]) when is_list(C1), is_list(C2);
|
||||
is_integer(C1), is_integer(C2) -> C1 < C2 orelse comparingFun(R1, R2);
|
||||
comparingFun([C1|R1], [C2|R2]) when is_integer(C1), is_list(C2) -> comparingFun(R1, R2);
|
||||
comparingFun([C1|_R1], [C2|_R2]) when is_list(C1), is_integer(C2) -> true;
|
||||
comparingFun(_, _) -> false.
|
||||
|
||||
sortFun(T1, T2) ->
|
||||
C = fun(T) ->
|
||||
[case catch list_to_integer(E) of
|
||||
I when is_integer(I) -> I;
|
||||
_ -> E
|
||||
end || E <- re:split(string:sub_string(T, 2), "[.-]", [{return, list}])]
|
||||
end,
|
||||
comparingFun(C(T1), C(T2)).
|
||||
|
||||
latestTag(BranchName) ->
|
||||
Tag = os:cmd("git describe --abbrev=1 --tags --always") -- "\n",
|
||||
LatestTagCommitId = os:cmd(io_lib:format("git rev-parse --short ~s", [Tag])) -- "\n",
|
||||
case string:tokens(os:cmd(io_lib:format("git tag -l \"v*\" --points-at ~s", [LatestTagCommitId])), "\n") of
|
||||
[] -> BranchName ++ "-" ++ LatestTagCommitId;
|
||||
Tags ->
|
||||
lists:last(lists:sort(fun(T1, T2) -> sortFun(T1, T2) end, Tags))
|
||||
end.
|
||||
|
||||
branch() ->
|
||||
case os:getenv("GITHUB_RUN_ID") of
|
||||
false -> os:cmd("git branch | grep -e '^*' | cut -d' ' -f 2") -- "\n";
|
||||
_ -> re:replace(os:getenv("GITHUB_REF"), "^refs/heads/|^refs/tags/", "", [global, {return ,list}])
|
||||
end.
|
||||
|
||||
getRef() ->
|
||||
case re:run(branch(), "master|^dev/|^hotfix/", [{capture, none}]) of
|
||||
match -> branch();
|
||||
_ -> latestTag(branch())
|
||||
end.
|
|
@ -0,0 +1,40 @@
|
|||
-module(mod_config_template).
|
||||
|
||||
-export([render/1]).
|
||||
|
||||
render(Config) ->
|
||||
render_entry(proplists:delete(placeholders, Config),
|
||||
proplists:get_value(placeholders, Config)).
|
||||
|
||||
render_entry(Config, PlcHdlrs) ->
|
||||
render_entry(Config, PlcHdlrs, Config).
|
||||
|
||||
render_entry(Entry, PlcHdlrs, Config) when is_tuple(Entry) ->
|
||||
list_to_tuple(render_entry(tuple_to_list(Entry), PlcHdlrs, Config));
|
||||
render_entry(Entry, PlcHdlrs, Config) when is_list(Entry) ->
|
||||
lists:foldl(fun(Item, Acc) ->
|
||||
case render_item(Item, PlcHdlrs, Config) of
|
||||
{var, Fun} when is_function(Fun) ->
|
||||
Acc ++ [Fun(Config)];
|
||||
{var, Var} ->
|
||||
Acc ++ [render_entry(Var, PlcHdlrs, Config)];
|
||||
{elems, Fun} when is_function(Fun) ->
|
||||
Acc ++ Fun(Config);
|
||||
{elems, Elems} ->
|
||||
Acc ++ render_entry(Elems, PlcHdlrs, Config)
|
||||
end
|
||||
end, [], Entry);
|
||||
render_entry(Entry, _PlcHdlrs, _Config) ->
|
||||
Entry.
|
||||
|
||||
render_item("${"++Key0 = Entry0, PlcHdlrs, Config) ->
|
||||
Key = string:trim(Key0, trailing, "}"),
|
||||
case lists:keyfind(Key, 1, PlcHdlrs) of
|
||||
false -> {var, Entry0};
|
||||
{_, Type, Entry} ->
|
||||
{Type, render_entry(Entry, PlcHdlrs, Config)}
|
||||
end;
|
||||
render_item(Entry, PlcHdlrs, Config) when is_tuple(Entry); is_list(Entry) ->
|
||||
{var, render_entry(Entry, PlcHdlrs, Config)};
|
||||
render_item(Entry, _PlcHdlrs, _Config) ->
|
||||
{var, Entry}.
|
|
@ -0,0 +1,34 @@
|
|||
-module(mod_project).
|
||||
|
||||
-export([ get_vsn/1
|
||||
, coveralls_configs/1
|
||||
]).
|
||||
|
||||
get_vsn(_Conf) ->
|
||||
PkgVsn = case os:getenv("PKG_VSN") of
|
||||
false -> error({env_undefined, "PKG_VSN"});
|
||||
Vsn -> Vsn
|
||||
end,
|
||||
re:replace(PkgVsn, "v", "", [{return ,list}]).
|
||||
|
||||
coveralls_configs(_Config) ->
|
||||
case {os:getenv("GITHUB_ACTIONS"), os:getenv("GITHUB_TOKEN")} of
|
||||
{"true", Token} when is_list(Token) ->
|
||||
CONFIG1 = [
|
||||
{coveralls_repo_token, Token},
|
||||
{coveralls_service_job_id, os:getenv("GITHUB_RUN_ID")},
|
||||
{coveralls_commit_sha, os:getenv("GITHUB_SHA")},
|
||||
{coveralls_service_number, os:getenv("GITHUB_RUN_NUMBER")},
|
||||
{coveralls_coverdata, "_build/test/cover/*.coverdata"},
|
||||
{coveralls_service_name, "github"}
|
||||
],
|
||||
case os:getenv("GITHUB_EVENT_NAME") =:= "pull_request"
|
||||
andalso string:tokens(os:getenv("GITHUB_REF"), "/") of
|
||||
[_, "pull", PRNO, _] ->
|
||||
[{coveralls_service_pull_request, PRNO} | CONFIG1];
|
||||
_ ->
|
||||
CONFIG1
|
||||
end;
|
||||
_ ->
|
||||
[]
|
||||
end.
|
|
@ -0,0 +1,83 @@
|
|||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
PROFILES := emqx emqx-edge
|
||||
PKG_PROFILES := emqx-pkg emqx-edge-pkg
|
||||
|
||||
ifeq ($(shell uname -s),Darwin)
|
||||
SYSTEM := macos
|
||||
else ifeq ($(shell uname -s),Linux)
|
||||
ifneq ($(shell cat /etc/*-release |grep -o -i centos),)
|
||||
ID := centos
|
||||
VERSION_ID := $(shell rpm --eval '%{centos_ver}')
|
||||
else
|
||||
ID := $(shell sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g' )
|
||||
VERSION_ID := $(shell sed -n '/^VERSION_ID=/p' /etc/os-release | sed -r 's/VERSION_ID=(.*)/\1/g' | sed 's/"//g')
|
||||
endif
|
||||
SYSTEM := $(shell echo $(ID)$(VERSION_ID) | sed -r "s/([a-zA-Z]*)-.*/\1/g")
|
||||
##
|
||||
## Support RPM and Debian based linux systems
|
||||
##
|
||||
ifeq ($(ID),ubuntu)
|
||||
PKGERDIR := deb
|
||||
else ifeq ($(ID),debian)
|
||||
PKGERDIR := deb
|
||||
else ifeq ($(ID),raspbian)
|
||||
PKGERDIR := deb
|
||||
else
|
||||
PKGERDIR := rpm
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: $(PROFILES:%=relup-%)
|
||||
$(PROFILES:%=relup-%): $(REBAR)
|
||||
ifneq ($(OS),Windows_NT)
|
||||
@ln -snf _build/$(@:relup-%=%)/lib ./_checkouts
|
||||
@if [ ! -z $$(ls | grep -E "$(@:relup-%=%)-$(SYSTEM)-(.*)-$$(uname -m).zip" | head -1 ) ]; then \
|
||||
mkdir -p tmp/relup_packages/$(@:relup-%=%); \
|
||||
cp $(@:relup-%=%)-$(SYSTEM)-*-$$(uname -m).zip tmp/relup_packages/$(@:relup-%=%); \
|
||||
fi
|
||||
$(REBAR) as $(@:relup-%=%) relup
|
||||
endif
|
||||
|
||||
.PHONY: $(PROFILES:%=%-tar) $(PKG_PROFILES:%=%-tar)
|
||||
$(PROFILES:%=%-tar) $(PKG_PROFILES:%=%-tar): $(REBAR)
|
||||
ifneq ($(OS),Windows_NT)
|
||||
@ln -snf _build/$(subst -tar,,$(@))/lib ./_checkouts
|
||||
endif
|
||||
ifneq ($(shell echo $(@) |grep edge),)
|
||||
export EMQX_DESC="EMQ X Edge"
|
||||
else
|
||||
export EMQX_DESC="EMQ X Broker"
|
||||
endif
|
||||
$(REBAR) as $(subst -tar,,$(@)) tar
|
||||
|
||||
.PHONY: $(PROFILES:%=%-zip)
|
||||
$(PROFILES:%=%-zip): $(REBAR)
|
||||
ifneq ($(shell echo $(PKG_VSN) | grep -oE "^[0-9]+\.[0-9]+\.[1-9]+?"),)
|
||||
make relup-$(subst -zip,,$(@))
|
||||
endif
|
||||
make $(subst -zip,,$(@))-tar
|
||||
|
||||
@tard="/tmp/emqx_untar_$(PKG_VSN)" \
|
||||
&& rm -rf "$${tard}" && mkdir -p "$${tard}/emqx" \
|
||||
&& prof="$(subst -zip,,$(@))" \
|
||||
&& relpath="$$(pwd)/_build/$${prof}/rel/emqx" \
|
||||
&& pkgpath="$$(pwd)/_packages/$${prof}" \
|
||||
&& mkdir -p $${pkgpath} \
|
||||
&& tarball="$${relpath}/emqx-$(PKG_VSN).tar.gz" \
|
||||
&& zipball="$${pkgpath}/$${prof}-$(SYSTEM)-$(PKG_VSN)-$$(uname -m).zip" \
|
||||
&& tar zxf "$${tarball}" -C "$${tard}/emqx" \
|
||||
&& cd "$${tard}" && zip -q -r "$${zipball}" ./emqx && cd -
|
||||
|
||||
.PHONY: $(PKG_PROFILES)
|
||||
$(PKG_PROFILES:%=%): $(REBAR)
|
||||
ifneq ($(PKGERDIR),)
|
||||
make $(subst -pkg,,$(@))-zip
|
||||
make $(@)-tar
|
||||
make -C deploy/packages/$(PKGERDIR) clean
|
||||
EMQX_REL=$$(pwd) EMQX_BUILD=$(@) PKG_VSN=$(PKG_VSN) SYSTEM=$(SYSTEM) make -C deploy/packages/$(PKGERDIR)
|
||||
else
|
||||
make $(subst -pkg,,$(@))-zip
|
||||
endif
|
||||
|
|
@ -76,7 +76,7 @@
|
|||
[ {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}
|
||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.7.1"}}}
|
||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.7.2"}}}
|
||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.7.4"}}}
|
||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.7.4"}}}
|
||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.5.0"}}}
|
||||
, {cuttlefish, {git, "https://github.com/emqx/cuttlefish", {tag, "v3.0.0"}}}
|
||||
|
|
|
@ -1,20 +1,18 @@
|
|||
%% -*-: erlang -*-
|
||||
%%-*- mode: erlang -*-
|
||||
|
||||
case {os:getenv("GITHUB_ACTIONS"), os:getenv("GITHUB_TOKEN")} of
|
||||
{"true", Token} when is_list(Token) ->
|
||||
CONFIG1 = [{coveralls_repo_token, Token},
|
||||
{coveralls_service_job_id, os:getenv("GITHUB_RUN_ID")},
|
||||
{coveralls_commit_sha, os:getenv("GITHUB_SHA")},
|
||||
{coveralls_service_number, os:getenv("GITHUB_RUN_NUMBER")},
|
||||
{coveralls_coverdata, "_build/test/cover/*.coverdata"},
|
||||
{coveralls_service_name, "github"} | CONFIG],
|
||||
case os:getenv("GITHUB_EVENT_NAME") =:= "pull_request"
|
||||
andalso string:tokens(os:getenv("GITHUB_REF"), "/") of
|
||||
[_, "pull", PRNO, _] ->
|
||||
[{coveralls_service_pull_request, PRNO} | CONFIG1];
|
||||
_ ->
|
||||
CONFIG1
|
||||
end;
|
||||
_ ->
|
||||
CONFIG
|
||||
end.
|
||||
%% load the helper modules:
|
||||
CompileOpts = [verbose,report_errors,report_warnings,return_errors,binary],
|
||||
[case compile:file(ModFile, CompileOpts) of
|
||||
{ok, Mod, BinCode} ->
|
||||
{module, _} = code:load_binary(Mod, ModFile, BinCode);
|
||||
Error ->
|
||||
io:format("[error] compile:file(~p) failed: ~p~n", [ModFile, Error])
|
||||
end || ModFile <- filelib:wildcard("mods/*.erl")],
|
||||
|
||||
%% TODO: load the plugins here:
|
||||
%% get user plugins from a text file, fetch it and compile
|
||||
|
||||
%% render the rebar.config by evaluating the placeholders:
|
||||
CONFIG_R = mod_config_template:render(CONFIG),
|
||||
file:write_file("rebar.config.rendered", [io_lib:format("~p.\n", [I]) || I <- CONFIG_R]),
|
||||
CONFIG_R.
|
||||
|
|
8
vars
8
vars
|
@ -1,8 +0,0 @@
|
|||
%% vars here are for test only, not intended for release
|
||||
|
||||
{platform_bin_dir, "bin"}.
|
||||
{platform_data_dir, "data"}.
|
||||
{platform_etc_dir, "etc"}.
|
||||
{platform_lib_dir, "lib"}.
|
||||
{platform_log_dir, "log"}.
|
||||
{platform_plugins_dir, "plugins"}.
|
|
@ -0,0 +1,23 @@
|
|||
%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
|
||||
%% ex: ft=erlang ts=4 sw=4 et
|
||||
|
||||
%% Platform-specific installation paths
|
||||
{platform_bin_dir, "bin"}.
|
||||
{platform_data_dir, "data"}.
|
||||
{platform_etc_dir, "etc"}.
|
||||
{platform_lib_dir, "lib"}.
|
||||
{platform_log_dir, "log"}.
|
||||
{platform_plugins_dir, "plugins"}.
|
||||
|
||||
%%
|
||||
%% bin/emqx
|
||||
%%
|
||||
{runner_root_dir, "$(cd $(dirname $(readlink $0 || echo $0))/..; pwd -P)"}.
|
||||
{runner_bin_dir, "$RUNNER_ROOT_DIR/bin"}.
|
||||
{runner_etc_dir, "$RUNNER_ROOT_DIR/etc"}.
|
||||
{runner_lib_dir, "$RUNNER_ROOT_DIR/lib"}.
|
||||
{runner_log_dir, "$RUNNER_ROOT_DIR/log"}.
|
||||
{runner_data_dir, "$RUNNER_ROOT_DIR/data"}.
|
||||
{pipe_dir, "/tmp/$RUNNER_SCRIPT/"}.
|
||||
{runner_user, ""}.
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{enable_plugin_emqx_rule_engine, true}.
|
||||
{enable_plugin_emqx_bridge_mqtt, false}.
|
||||
{vm_args_file, "vm.args"}.
|
||||
{emqx_description, "EMQ X Broker"}.
|
|
@ -0,0 +1,4 @@
|
|||
{enable_plugin_emqx_rule_engine, false}.
|
||||
{enable_plugin_emqx_bridge_mqtt, true}.
|
||||
{vm_args_file, "vm.args.edge"}.
|
||||
{emqx_description, "EMQ X Edge"}.
|
|
@ -0,0 +1,22 @@
|
|||
%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
|
||||
%% ex: ft=erlang ts=4 sw=4 et
|
||||
|
||||
%% Platform-specific installation paths
|
||||
{platform_bin_dir, ""}.
|
||||
{platform_data_dir, "/var/lib/emqx"}.
|
||||
{platform_etc_dir, "/etc/emqx"}.
|
||||
{platform_lib_dir, ""}.
|
||||
{platform_log_dir, "/var/log/emqx"}.
|
||||
{platform_plugins_dir, "/var/lib/emqx/plugins"}.
|
||||
|
||||
%%
|
||||
%% bin/emqx
|
||||
%%
|
||||
{runner_root_dir, "/usr/lib/emqx"}.
|
||||
{runner_bin_dir, "/usr/bin"}.
|
||||
{runner_etc_dir, "/etc/emqx"}.
|
||||
{runner_lib_dir, "$RUNNER_ROOT_DIR/lib"}.
|
||||
{runner_log_dir, "/var/log/emqx"}.
|
||||
{runner_data_dir, "/var/lib/emqx"}.
|
||||
{pipe_dir, "/tmp/$RUNNER_SCRIPT/"}.
|
||||
{runner_user, "emqx"}.
|
Loading…
Reference in New Issue