Merge pull request #10308 from thalesmg/test-increase-peer-timeout-v50

test(peer): increase init and startup timeout for peer nodes
This commit is contained in:
Thales Macedo Garitezi 2023-04-03 15:46:13 -03:00 committed by GitHub
commit a8f8228a12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 21 additions and 13 deletions

View File

@ -469,8 +469,8 @@ concurrent_materialized_view_writes: 32
# accepting writes when the limit is exceeded until a flush completes,
# and will trigger a flush based on memtable_cleanup_threshold
# If omitted, Cassandra will set both to 1/4 the size of the heap.
# memtable_heap_space_in_mb: 2048
# memtable_offheap_space_in_mb: 2048
memtable_heap_space_in_mb: 2048
memtable_offheap_space_in_mb: 2048
# memtable_cleanup_threshold is deprecated. The default calculation
# is the only reasonable choice. See the comments on memtable_flush_writers

View File

@ -12,6 +12,8 @@ services:
environment:
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
HEAP_NEWSIZE: "128M"
MAX_HEAP_SIZE: "2048M"
volumes:
- ./certs:/certs
#ports:

View File

@ -660,6 +660,7 @@ start_slave(Name, Opts) when is_list(Opts) ->
start_slave(Name, Opts) when is_map(Opts) ->
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
Node = node_name(Name),
put_peer_mod(Node, SlaveMod),
DoStart =
fun() ->
case SlaveMod of
@ -669,8 +670,8 @@ start_slave(Name, Opts) when is_map(Opts) ->
[
{kill_if_fail, true},
{monitor_master, true},
{init_timeout, 10000},
{startup_timeout, 10000},
{init_timeout, 20_000},
{startup_timeout, 20_000},
{erl_flags, erl_flags()}
]
);
@ -687,7 +688,6 @@ start_slave(Name, Opts) when is_map(Opts) ->
throw(Other)
end,
pong = net_adm:ping(Node),
put_peer_mod(Node, SlaveMod),
setup_node(Node, Opts),
ok = snabbkaffe:forward_trace(Node),
Node.

View File

@ -65,7 +65,7 @@ terminate(_Reason, #{callbacks := Callbacks}) ->
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
{reply, ok, State#{callbacks := [Callback | Callbacks]}};
handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> Fun() end, Callbacks),
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks),
{stop, normal, ok, State};
handle_call(_Req, _From, State) ->
{reply, error, State}.

View File

@ -388,7 +388,9 @@ end_per_testcase(_Testcase, Config) ->
maps:values(ProducersMapping)
),
ok = wolff:stop_and_delete_supervised_client(KafkaProducerClientId),
emqx_common_test_helpers:call_janitor(),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
@ -1664,7 +1666,7 @@ t_cluster_group(Config) ->
|| {Name, Opts} <- Cluster
],
on_exit(fun() ->
lists:foreach(
emqx_misc:pmap(
fun(N) ->
ct:pal("stopping ~p", [N]),
ok = emqx_common_test_helpers:stop_slave(N)
@ -1875,7 +1877,7 @@ t_cluster_node_down(Config) ->
Cluster
),
on_exit(fun() ->
lists:foreach(
emqx_misc:pmap(
fun(N) ->
ct:pal("stopping ~p", [N]),
ok = emqx_common_test_helpers:stop_slave(N)
@ -1894,11 +1896,15 @@ t_cluster_node_down(Config) ->
{ok, _} = snabbkaffe:receive_events(SRef0),
lists:foreach(
fun(N) ->
?retry(
_Sleep1 = 100,
_Attempts1 = 50,
?assertMatch(
{ok, _},
erpc:call(N, emqx_bridge, lookup, [BridgeId]),
#{node => N}
)
)
end,
Nodes
),