Merge pull request #90 from basho/jd-test-audit

A Smörgåsbord of riak_test fixes
This commit is contained in:
Joe DeVivo 2012-11-16 10:46:58 -08:00
commit d8bdde7a97
13 changed files with 270 additions and 227 deletions

View File

@ -19,7 +19,7 @@ rm -rf $RT_DEST_DIR/current
mkdir $RT_DEST_DIR/current
cd $cwd
echo " - Copying devrel to $RT_DEST_DIR/current"
cp -a dev $RT_DEST_DIR/current
cp -p -P -R dev $RT_DEST_DIR/current
echo " - Writing $RT_DEST_DIR/current/VERSION"
echo -n $VERSION > $RT_DEST_DIR/current/VERSION
cd $RT_DEST_DIR

View File

@ -16,7 +16,7 @@ for rel in */dev; do
vsn=$(dirname "$rel")
echo " - Initializing $RT_DEST_DIR/$vsn"
mkdir "$RT_DEST_DIR/$vsn"
cp -a "$rel" "$RT_DEST_DIR/$vsn"
cp -p -P -R "$rel" "$RT_DEST_DIR/$vsn"
done
cd $RT_DEST_DIR
echo " - Creating the git repository"

View File

@ -8,7 +8,9 @@
{rtdev_path, [{root, "/tmp/rt"},
{current, "/tmp/rt/current"},
{"1.2.0", "/tmp/rt/riak-1.2.0"},
{previous, "/tmp/rt/riak-1.2.0"},
{"1.1.4", "/tmp/rt/riak-1.1.4"},
{legacy, "/tmp/rt/riak-1.1.4"},
{"1.0.3", "/tmp/rt/riak-1.0.3"}]},
{basho_bench, "/Users/joe/dev/basho/basho_bench"},
{spam_dir, "/Users/joe/dev/basho/riak_test/search-corpus/spam.0"},

View File

@ -19,8 +19,8 @@
{"1.2.0", "/tmp/rt/riak-1.2.0"},
{"1.1.4", "/tmp/rt/riak-1.1.4"},
{"1.0.3", "/tmp/rt/riak-1.0.3"}]},
{java, [{fat_be_url, "http://s3.amazonaws.com/builds.basho.com/riak-java-client/CURRENT/riak-client-1.0.6-SNAPSHOT-jar-with-dependencies-and-tests.jar"},
{tests_url, "http://s3.amazonaws.com/builds.basho.com/riak-java-client/CURRENT/riak-client-1.0.6-SNAPSHOT-tests.jar"}
{java, [{fat_be_url, "http://s3.amazonaws.com/builds.basho.com/riak-java-client/CURRENT/riak-client-1.0.7-SNAPSHOT-jar-with-dependencies-and-tests.jar"},
{tests_url, "http://s3.amazonaws.com/builds.basho.com/riak-java-client/CURRENT/riak-client-1.0.7-SNAPSHOT-tests.jar"}
]}
]}
]}.

View File

@ -85,9 +85,16 @@ execute(TestModule, TestMetaData) ->
Pid = spawn_link(TestModule, confirm, []),
Return = rec_loop(Pid, TestModule, TestMetaData),
{Status, Reason} = rec_loop(Pid, TestModule, TestMetaData),
group_leader(GroupLeader, self()),
Return.
case Status of
fail ->
ErrorHeader = "================ " ++ atom_to_list(TestModule) ++ " failure stack trace =====================",
ErrorFooter = [ $= || _X <- lists:seq(1,length(ErrorHeader))],
io:format("~s~n~p~n~s~n", [ErrorHeader, Reason, ErrorFooter]);
_ -> meh
end,
{Status, Reason}.
rec_loop(Pid, TestModule, TestMetaData) ->
receive

View File

@ -537,6 +537,8 @@ wait_until_unpingable(Node) ->
?assertEqual(ok, wait_until(Node, F)),
ok.
capability(Node, all) ->
rpc:call(Node, riak_core_capability, all, []);
capability(Node, Capability) ->
rpc:call(Node, riak_core_capability, get, [Capability]).

View File

@ -55,8 +55,7 @@ run_riak(N, Path, Cmd) ->
setup_harness(_Test, _Args) ->
Path = relpath(root),
%% Stop all discoverable nodes, not just nodes we'll be using for this test.
RTDevPaths = [ DevPath || {_Name, DevPath} <- proplists:delete(root, rt:config(rtdev_path))],
rt:pmap(fun(X) -> stop_all(X ++ "/dev") end, RTDevPaths),
rt:pmap(fun(X) -> stop_all(X ++ "/dev") end, devpaths()),
%% Reset nodes to base state
lager:info("Resetting nodes to fresh state"),
@ -87,14 +86,19 @@ upgrade(Node, NewVersion) ->
stop(Node),
OldPath = relpath(Version),
NewPath = relpath(NewVersion),
C1 = io_lib:format("cp -a \"~s/dev/dev~b/data\" \"~s/dev/dev~b\"",
Commands = [
io_lib:format("cp -p -P -R \"~s/dev/dev~b/data\" \"~s/dev/dev~b\"",
[OldPath, N, NewPath, N]),
C2 = io_lib:format("cp -a \"~s/dev/dev~b/etc\" \"~s/dev/dev~b\"",
[OldPath, N, NewPath, N]),
lager:info("Running: ~s", [C1]),
os:cmd(C1),
lager:info("Running: ~s", [C2]),
os:cmd(C2),
io_lib:format("rm -rf ~s/dev/dev~b/data/*",
[OldPath, N]),
io_lib:format("cp -p -P -R \"~s/dev/dev~b/etc\" \"~s/dev/dev~b\"",
[OldPath, N, NewPath, N])
],
[ begin
lager:info("Running: ~s", [Cmd]),
os:cmd(Cmd)
end || Cmd <- Commands],
VersionMap = orddict:store(N, NewVersion, rt:config(rt_versions)),
rt:set_config(rt_versions, VersionMap),
start(Node),
@ -112,7 +116,7 @@ all_the_app_configs(DevPath) ->
update_app_config(all, Config) ->
lager:info("rtdev:update_app_config(all, ~p)", [Config]),
[ update_app_config(DevPath, Config) || {_Name, DevPath} <- proplists:delete(root, rt:config(rtdev_path))];
[ update_app_config(DevPath, Config) || DevPath <- devpaths()];
update_app_config(Node, Config) when is_atom(Node) ->
N = node_id(Node),
Path = relpath(node_version(N)),
@ -140,7 +144,7 @@ update_app_config_file(ConfigFile, Config) ->
get_backends() ->
Backends = lists:usort(
lists:flatten([ get_backends(DevPath) || {_Name, DevPath} <- proplists:delete(root, rt:config(rtdev_path))])),
lists:flatten([ get_backends(DevPath) || DevPath <- devpaths()])),
case Backends of
[riak_kv_bitcask_backend] -> bitcask;
[riak_kv_eleveldb_backend] -> eleveldb;
@ -221,7 +225,7 @@ stop_all(DevPath) ->
end,
lager:debug("Stopping Node... ~s ~~ ~s.", [Cmd, Status])
end,
rt:pmap(Stop, Devs);
[Stop(D) || D <- Devs];
_ -> lager:debug("~s is not a directory.", [DevPath])
end,
ok.
@ -401,11 +405,13 @@ get_version() ->
teardown() ->
%% Stop all discoverable nodes, not just nodes we'll be using for this test.
RTDevPaths = [ DevPath || {_Name, DevPath} <- proplists:delete(root, rt:config(rtdev_path))],
rt:pmap(fun(X) -> stop_all(X ++ "/dev") end, RTDevPaths).
[stop_all(X ++ "/dev") || X <- devpaths()].
whats_up() ->
io:format("Here's what's running...~n"),
Up = [rpc:call(Node, os, cmd, ["pwd"]) || Node <- nodes()],
[io:format(" ~s~n",[string:substr(Dir, 1, length(Dir)-1)]) || Dir <- Up].
devpaths() ->
lists:usort([ DevPath || {_Name, DevPath} <- proplists:delete(root, rt:config(rtdev_path))]).

View File

@ -26,44 +26,50 @@ confirm() ->
%% Deploy a node to test against
lager:info("Deploy node to test command line"),
Nodes = rt:deploy_nodes(1),
[Node1] = Nodes,
?assertEqual(ok, rt:wait_until_nodes_ready([Node1])),
[Node] = rt:deploy_nodes(1),
?assertEqual(ok, rt:wait_until_nodes_ready([Node])),
%% It is possible to save some time grouping tests into whether riak
%% should be up or down when the test runs, but it is my opinion that the
%% the individual tests should handle riak in their own context at the
%% expense of testing time
console_test(Node1),
%% Verify node-up behavior
ping_up_test(Node),
attach_up_test(Node),
status_up_test(Node),
console_up_test(Node),
start_up_test(Node),
start_test(Node1),
ping_test(Node1),
restart_test(Node1),
attach_test(Node1),
status_test(Node1),
%% Stop the node, Verify node-down behavior
stop_test(Node),
ping_down_test(Node),
attach_down_test(Node),
status_down_test(Node),
console_test(Node),
start_test(Node),
pass.
console_up_test(Node) ->
lager:info("Node is already up, `riak console` should fail"),
{ok, ConsoleFail} = rt:riak(Node, ["console"]),
?assert(rt:str(ConsoleFail, "Node is already running")),
ok.
console_test(Node) ->
%% Make sure the cluster will start up with /usr/sbin/riak console, then quit
lager:info("Testing riak console on ~s", [Node]),
%% Ensure node is up to start with
rt:start_and_wait(Node),
lager:info("Node is already up, should fail"),
{ok, ConsoleFail} = rt:riak(Node, ["console"]),
?assert(rt:str(ConsoleFail, "Node is already running")),
%% Stop node, to test console working
rt:stop_and_wait(Node),
rt:console(Node, [{expect, "\(abort with ^G\)"},
{send, "riak_core_ring_manager:get_my_ring()."},
{expect, "dict,"}]),
{expect, "dict,"},
{send, "q()."},
{expect, "ok"}]),
rt:wait_until_unpingable(Node),
ok.
start_up_test(Node) ->
%% Try starting again and check you get the node is already running message
lager:info("Testing riak start now will return 'already running'"),
{ok, StartOut} = rt:riak(Node, ["start"]),
?assert(rt:str(StartOut, "Node is already running!")),
ok.
@ -71,49 +77,40 @@ start_test(Node) ->
%% Test starting with /bin/riak start
lager:info("Testing riak start works on ~s", [Node]),
%% First stop riak
rt:stop_and_wait(Node),
{ok, StartPass} = rt:riak(Node, ["start"]),
?assertMatch(StartPass, ""),
%% Try starting again and check you get the node is already running message
lager:info("Testing riak start now will return 'already running'"),
{ok, StartOut} = rt:riak(Node, ["start"]),
?assert(rt:str(StartOut, "Node is already running!")),
rt:stop_and_wait(Node),
ok.
ping_test(Node) ->
stop_test(Node) ->
?assert(rt:is_pingable(Node)),
ok = rt:stop(Node),
?assertNot(rt:is_pingable(Node)),
ok.
ping_up_test(Node) ->
%% check /usr/sbin/riak ping
lager:info("Testing riak ping on ~s", [Node]),
%% ping / pong
rt:start_and_wait(Node),
%% rt:start_and_wait(Node),
lager:info("Node up, should ping"),
{ok, PongOut} = rt:riak(Node, ["ping"]),
?assert(rt:str(PongOut, "pong")),
ok.
ping_down_test(Node) ->
%% ping / pang
lager:info("Stopping Node"),
rt:stop_and_wait(Node),
lager:info("Node down, should pang"),
{ok, PangOut} = rt:riak(Node, ["ping"]),
?assert(rt:str(PangOut, "not responding to pings")),
ok.
attach_test(Node) ->
%% check /usr/sbin/riak attach')
%% Sort of a punt on this test, it tests that attach
%% connects to the pipe, but doesn't run any commands.
%% This is probably okay for a basic cmd line test
attach_up_test(Node) ->
lager:info("Testing riak attach"),
rt:start_and_wait(Node),
%{ok, AttachOut} = rt:riak(Node, ["attach"]),
%?assert(rt:str(AttachOut, "erlang.pipe.1 \(^D to exit\)")),
rt:attach(Node, [{expect, "\(^D to exit\)"},
{send, "riak_core_ring_manager:get_my_ring()."},
@ -122,38 +119,25 @@ attach_test(Node) ->
ok.
restart_test(Node) ->
lager:info("Testing riak restart on ~s", [Node]),
%% Riak should be running
rt:start_and_wait(Node),
%% Issue restart
{ok, RestartOut} = rt:riak(Node, ["restart"]),
?assert(rt:str(RestartOut, "ok")),
%% Its not that we don't trust you 'ok'
%% but make sure riak is really up
?assert(rt:is_pingable(Node)),
attach_down_test(Node) ->
lager:info("Testing riak attach while down"),
{ok, AttachOut} = rt:riak(Node, ["attach"]),
?assert(rt:str(AttachOut, "Node is not running!")),
ok.
status_test(Node) ->
status_up_test(Node) ->
lager:info("Test riak-admin status on ~s", [Node]),
% riak-admin status needs things completely started
% to work, so are more careful to wait
rt:start_and_wait(Node),
lager:info("Waiting for status from riak_kv"),
rt:wait_until_status_ready(Node),
lager:info("Now testing 'riak-admin status'"),
{ok, StatusOut} = rt:admin(Node, ["status"]),
io:format("Result of status: ~s", [StatusOut]),
?assert(rt:str(StatusOut, "1-minute stats")),
?assert(rt:str(StatusOut, "kernel_version")),
ok.
status_down_test(Node) ->
lager:info("Test riak-admin status while down"),
{ok, StatusOut} = rt:admin(Node, ["status"]),
?assert(rt:str(StatusOut, "Node is not running!")),
ok.

View File

@ -32,17 +32,16 @@
confirm() ->
rt:config_or_os_env(basho_bench),
rt:config_or_os_env(spam_dir),
%% OldVsns = ["1.0.3", "1.1.4"],
OldVsns = ["1.1.4"],
[verify_upgrade(OldVsn) || OldVsn <- OldVsns],
verify_upgrade(),
lager:info("Test ~p passed", [?MODULE]),
pass.
verify_upgrade(OldVsn) ->
verify_upgrade() ->
%% Only run 2i for level
TestMetaData = riak_test_runner:metadata(),
%% Only run 2i for level
Backend = proplists:get_value(backend, TestMetaData),
OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous),
Config = [{riak_search, [{enabled, true}]}],
%% Uncomment to use settings more prone to cause races

View File

@ -23,8 +23,6 @@
-import(rt, [deploy_nodes/1,
enable_search_hook/2,
get_os_env/1,
get_os_env/2,
get_ring/1,
join/2,
update_app_config/2]).
@ -34,20 +32,21 @@
%% @doc This test verifies that partition repair successfully repairs
%% all data after it has wiped out by a simulated disk crash.
confirm() ->
SpamDir = get_os_env("SPAM_DIR"),
RingSize = list_to_integer(get_os_env("RING_SIZE", "16")),
NVal = get_os_env("N_VAL", undefined),
KVBackend = get_os_env("KV_BACKEND", "bitcask"),
NumNodes = list_to_integer(get_os_env("NUM_NODES", "4")),
HOConcurrency = list_to_integer(get_os_env("HO_CONCURRENCY", "2")),
{KVBackendMod, KVDataDir} = backend_mod_dir(KVBackend),
SpamDir = rt:config_or_os_env(spam_dir),
RingSize = list_to_integer(rt:config_or_os_env(ring_size, "16")),
NVal = rt:config_or_os_env(n_val, undefined),
TestMetaData = riak_test_runner:metadata(),
KVBackend = proplists:get_value(backend, TestMetaData),
NumNodes = list_to_integer(rt:config_or_os_env(num_nodes, "4")),
HOConcurrency = list_to_integer(rt:config_or_os_env(ho_concurrency, "2")),
{_KVBackendMod, KVDataDir} = backend_mod_dir(KVBackend),
Bucket = <<"scotts_spam">>,
lager:info("Build a cluster"),
lager:info("riak_search enabled: true"),
lager:info("ring_creation_size: ~p", [RingSize]),
lager:info("n_val: ~p", [NVal]),
lager:info("KV backend: ~s", [KVBackend]),
lager:info("num nodes: ~p", [NumNodes]),
lager:info("riak_core handoff_concurrency: ~p", [HOConcurrency]),
lager:info("riak_core vnode_management_timer 1000"),
@ -59,10 +58,6 @@ confirm() ->
{vnode_management_timer, 1000},
{handoff_concurrency, HOConcurrency}
]},
{riak_kv,
[
{storage_backend, KVBackendMod}
]},
{riak_search,
[
{enabled, true}
@ -279,7 +274,7 @@ stash_search({_I,{_F,_T}}=K, _Postings=V, Stash) ->
%% @todo broken when run in the style of rtdev_mixed.
stash_path(Service, Partition) ->
Path = rt:config(rtdev_path) ++ "/dev/data_stash",
Path = rt:config(rtdev_path.root) ++ "/dev/data_stash",
Path ++ "/" ++ atom_to_list(Service) ++ "/" ++ integer_to_list(Partition) ++ ".stash".
file_list(Dir) ->
@ -304,12 +299,12 @@ wait_for_repair(Service, {Partition, Node}, Tries) ->
data_path(Node, Suffix, Partition) ->
[Name, _] = string:tokens(atom_to_list(Node), "@"),
Base = rt:config(rtdev_path) ++ "/dev/" ++ Name ++ "/data",
Base = rt:config(rtdev_path.current) ++ "/dev/" ++ Name ++ "/data",
Base ++ "/" ++ Suffix ++ "/" ++ integer_to_list(Partition).
backend_mod_dir("bitcask") ->
backend_mod_dir(bitcask) ->
{riak_kv_bitcask_backend, "bitcask"};
backend_mod_dir("leveldb") ->
backend_mod_dir(eleveldb) ->
{riak_kv_eleveldb_backend, "leveldb"}.
@ -338,7 +333,7 @@ set_search_schema_nval(Bucket, NVal) ->
%% than allowing the internal format to be modified and set you
%% must send the update in the external format.
BucketStr = binary_to_list(Bucket),
SearchCmd = ?FMT("~s/dev/dev1/bin/search-cmd", [rt:config(rtdev_path)]),
SearchCmd = ?FMT("~s/dev/dev1/bin/search-cmd", [rt:config(rtdev_path.current)]),
GetSchema = ?FMT("~s show-schema ~s > current-schema",
[SearchCmd, BucketStr]),
ModifyNVal = ?FMT("sed -E 's/n_val, [0-9]+/n_val, ~s/' "

View File

@ -1,46 +0,0 @@
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2012 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(upgrade).
-export([confirm/0]).
-include_lib("eunit/include/eunit.hrl").
confirm() ->
Nodes = rt:build_cluster(["1.0.3", "1.0.3", "1.1.4", current]),
[Node1, Node2, Node3, _Node4] = Nodes,
lager:info("Writing 100 keys"),
rt:systest_write(Node1, 100, 3),
?assertEqual([], rt:systest_read(Node1, 100, 1)),
rt:upgrade(Node1, current),
lager:info("Ensuring keys still exist"),
rt:stop(Node2),
rt:stop(Node3),
rt:systest_read(Node1, 100, 1),
%% ?assertEqual([], rt:systest_read(Node1, 100, 1)),
wait_until_readable(Node1, 100),
pass.
wait_until_readable(Node, N) ->
rt:wait_until(Node,
fun(_) ->
[] == rt:systest_read(Node, N, 1)
end).

View File

@ -22,34 +22,26 @@
-include_lib("eunit/include/eunit.hrl").
confirm() ->
OldVsns = ["1.0.3", "1.1.4"],
[build_cluster(OldVsn, current) || OldVsn <- OldVsns],
[build_cluster(current, OldVsn) || OldVsn <- OldVsns],
lager:info("Test ~p passed", [?MODULE]),
pass.
TestMetaData = riak_test_runner:metadata(),
OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous),
Nodes = [Node1|_] = rt:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]),
build_cluster(Vsn1, Vsn2) ->
lager:info("Testing versions: ~p <- ~p", [Vsn1, Vsn2]),
Nodes = rt:deploy_nodes([Vsn1, Vsn2]),
[Node1, Node2] = Nodes,
lager:info("Writing 100 keys to ~p", [Node1]),
timer:sleep(1000),
rt:systest_write(Node1, 100, 3),
?assertEqual([], rt:systest_read(Node1, 100, 1)),
lager:info("Join ~p to ~p", [Node2, Node1]),
rt:join(Node2, Node1),
[upgrade(Node, current) || Node <- Nodes],
?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)),
?assertEqual(ok, rt:wait_until_ring_converged(Nodes)),
?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)),
?assertEqual([], rt:systest_read(Node1, 100, 1)),
(Vsn1 /= current) andalso rt:upgrade(Node1, current),
(Vsn2 /= current) andalso rt:upgrade(Node2, current),
%% Umm.. technically, it'd downgrade
[upgrade(Node, OldVsn) || Node <- Nodes],
pass.
upgrade(Node, NewVsn) ->
lager:info("Upgrading ~p to ~p", [Node, NewVsn]),
rt:upgrade(Node, NewVsn),
timer:sleep(1000),
lager:info("Ensuring keys still exist"),
rt:systest_read(Node1, 100, 1),
?assertEqual([], rt:systest_read(Node1, 100, 1)),
rt:systest_read(Node, 100, 1),
?assertEqual([], rt:systest_read(Node, 100, 1)),
ok.

View File

@ -21,65 +21,159 @@
-export([confirm/0]).
-include_lib("eunit/include/eunit.hrl").
%% 1.3 {riak_kv, anti_entropy} -> [disabled, enabled_v1]
confirm() ->
lager:info("Deploying mixed set of nodes"),
Nodes = rt:deploy_nodes([current, "0.14.2", "1.1.4", "1.0.3"]),
[Node1, Node2, Node3, Node4] = Nodes,
Nodes = rt:deploy_nodes([current, previous, legacy]),
[CNode, PNode, LNode] = Nodes,
lager:info("Verify vnode_routing == proxy"),
?assertEqual(ok, rt:wait_until_capability(Node1, {riak_core, vnode_routing}, proxy)),
lager:info("Verify staged_joins == true"),
?assertEqual(true, rt:capability(CNode, {riak_core, staged_joins})),
%% This test is written with the intent that 1.3 is 'current'
CCapabilities = rt:capability(CNode, all),
assert_capability(CCapabilities, {riak_kv, legacy_keylisting}, false),
assert_capability(CCapabilities, {riak_kv, listkeys_backpressure}, true),
assert_capability(CCapabilities, {riak_core, staged_joins}, true),
assert_capability(CCapabilities, {riak_kv, index_backpressure}, true),
assert_capability(CCapabilities, {riak_pipe, trace_format}, ordsets),
assert_capability(CCapabilities, {riak_kv, mapred_2i_pipe}, true),
assert_capability(CCapabilities, {riak_kv, mapred_system}, pipe),
assert_capability(CCapabilities, {riak_kv, vnode_vclocks}, true),
assert_capability(CCapabilities, {riak_core, vnode_routing}, proxy),
assert_supported(CCapabilities, {riak_core, staged_joins}, [true,false]),
assert_supported(CCapabilities, {riak_core, vnode_routing}, [proxy,legacy]),
assert_supported(CCapabilities, {riak_kv, index_backpressure}, [true,false]),
assert_supported(CCapabilities, {riak_kv, legacy_keylisting}, [false]),
assert_supported(CCapabilities, {riak_kv, listkeys_backpressure}, [true,false]),
assert_supported(CCapabilities, {riak_kv, mapred_2i_pipe}, [true,false]),
assert_supported(CCapabilities, {riak_kv, mapred_system}, [pipe]),
assert_supported(CCapabilities, {riak_kv, vnode_vclocks}, [true,false]),
assert_supported(CCapabilities, {riak_pipe, trace_format}, [ordsets,sets]),
lager:info("Crash riak_core_capability server"),
crash_capability_server(Node1),
crash_capability_server(CNode),
timer:sleep(1000),
lager:info("Verify vnode_routing == proxy after crash"),
?assertEqual(proxy, rt:capability(Node1, {riak_core, vnode_routing})),
lager:info("Verify staged_joins == true after crash"),
?assertEqual(true, rt:capability(CNode, {riak_core, staged_joins})),
lager:info("Building current + 0.14.2 cluster"),
rt:join(Node2, Node1),
?assertEqual(ok, rt:wait_until_all_members([Node1], [Node1, Node2])),
?assertEqual(ok, rt:wait_until_legacy_ringready(Node1)),
lager:info("Building current + legacy cluster"),
rt:join(LNode, CNode),
?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode])),
?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)),
lager:info("Verifying vnode_routing == legacy"),
?assertEqual(ok, rt:wait_until_capability(Node1, {riak_core, vnode_routing}, legacy)),
LCapabilities = rt:capability(CNode, all),
assert_capability(LCapabilities, {riak_kv, legacy_keylisting}, false),
assert_capability(LCapabilities, {riak_kv, listkeys_backpressure}, true),
assert_capability(LCapabilities, {riak_core, staged_joins}, false),
assert_capability(LCapabilities, {riak_kv, index_backpressure}, false),
assert_capability(LCapabilities, {riak_pipe, trace_format}, sets),
assert_capability(LCapabilities, {riak_kv, mapred_2i_pipe}, true),
assert_capability(LCapabilities, {riak_kv, mapred_system}, pipe),
assert_capability(LCapabilities, {riak_kv, vnode_vclocks}, true),
assert_capability(LCapabilities, {riak_core, vnode_routing}, proxy),
assert_supported(LCapabilities, {riak_core, staged_joins}, [true,false]),
assert_supported(LCapabilities, {riak_core, vnode_routing}, [proxy,legacy]),
assert_supported(LCapabilities, {riak_kv, index_backpressure}, [true,false]),
assert_supported(LCapabilities, {riak_kv, legacy_keylisting}, [false]),
assert_supported(LCapabilities, {riak_kv, listkeys_backpressure}, [true,false]),
assert_supported(LCapabilities, {riak_kv, mapred_2i_pipe}, [true,false]),
assert_supported(LCapabilities, {riak_kv, mapred_system}, [pipe]),
assert_supported(LCapabilities, {riak_kv, vnode_vclocks}, [true,false]),
assert_supported(LCapabilities, {riak_pipe, trace_format}, [ordsets,sets]),
lager:info("Crash riak_core_capability server"),
crash_capability_server(Node1),
crash_capability_server(CNode),
timer:sleep(1000),
lager:info("Verify vnode_routing == legacy after crash"),
?assertEqual(legacy, rt:capability(Node1, {riak_core, vnode_routing})),
lager:info("Verify staged_joins == false after crash"),
?assertEqual(false, rt:capability(CNode, {riak_core, staged_joins})),
lager:info("Adding 1.1.4 node to cluster"),
rt:join(Node3, Node2),
?assertEqual(ok, rt:wait_until_all_members([Node1], [Node1, Node2, Node3])),
?assertEqual(ok, rt:wait_until_legacy_ringready(Node1)),
lager:info("Adding previous node to cluster"),
rt:join(PNode, LNode),
?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode, PNode])),
?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)),
lager:info("Verifying vnode_routing == legacy"),
?assertEqual(legacy, rt:capability(Node1, {riak_core, vnode_routing})),
lager:info("Verify staged_joins == true after crash"),
?assertEqual(false, rt:capability(CNode, {riak_core, staged_joins})),
lager:info("Upgrade 0.14.2 node"),
rt:upgrade(Node2, current),
PCapabilities = rt:capability(CNode, all),
assert_capability(PCapabilities, {riak_kv, legacy_keylisting}, false),
assert_capability(PCapabilities, {riak_kv, listkeys_backpressure}, true),
assert_capability(PCapabilities, {riak_core, staged_joins}, false),
assert_capability(PCapabilities, {riak_kv, index_backpressure}, false),
assert_capability(PCapabilities, {riak_pipe, trace_format}, sets),
assert_capability(PCapabilities, {riak_kv, mapred_2i_pipe}, true),
assert_capability(PCapabilities, {riak_kv, mapred_system}, pipe),
assert_capability(PCapabilities, {riak_kv, vnode_vclocks}, true),
assert_capability(PCapabilities, {riak_core, vnode_routing}, proxy),
assert_supported(PCapabilities, {riak_core, staged_joins}, [true,false]),
assert_supported(PCapabilities, {riak_core, vnode_routing}, [proxy,legacy]),
assert_supported(PCapabilities, {riak_kv, index_backpressure}, [true,false]),
assert_supported(PCapabilities, {riak_kv, legacy_keylisting}, [false]),
assert_supported(PCapabilities, {riak_kv, listkeys_backpressure}, [true,false]),
assert_supported(PCapabilities, {riak_kv, mapred_2i_pipe}, [true,false]),
assert_supported(PCapabilities, {riak_kv, mapred_system}, [pipe]),
assert_supported(PCapabilities, {riak_kv, vnode_vclocks}, [true,false]),
assert_supported(PCapabilities, {riak_pipe, trace_format}, [ordsets,sets]),
lager:info("Verifying vnode_routing == proxy"),
?assertEqual(ok, rt:wait_until_capability(Node1, {riak_core, vnode_routing}, proxy)),
lager:info("Upgrade Legacy node"),
rt:upgrade(LNode, current),
lager:info("Adding 1.0.3 node to cluster"),
rt:join(Node4, Node1),
?assertEqual(ok, rt:wait_until_nodes_ready([Node1, Node2, Node3, Node4])),
lager:info("Verify staged_joins == true after upgrade of legacy -> current"),
?assertEqual(true, rt:capability(CNode, {riak_core, staged_joins})),
lager:info("Verifying vnode_routing == legacy"),
?assertEqual(legacy, rt:capability(Node1, {riak_core, vnode_routing})),
PCap2 = rt:capability(CNode, all),
assert_capability(PCap2, {riak_kv, legacy_keylisting}, false),
assert_capability(PCap2, {riak_kv, listkeys_backpressure}, true),
assert_capability(PCap2, {riak_core, staged_joins}, true),
assert_capability(PCap2, {riak_kv, index_backpressure}, false),
assert_capability(PCap2, {riak_pipe, trace_format}, sets),
assert_capability(PCap2, {riak_kv, mapred_2i_pipe}, true),
assert_capability(PCap2, {riak_kv, mapred_system}, pipe),
assert_capability(PCap2, {riak_kv, vnode_vclocks}, true),
assert_capability(PCap2, {riak_core, vnode_routing}, proxy),
assert_supported(PCap2, {riak_core, staged_joins}, [true,false]),
assert_supported(PCap2, {riak_core, vnode_routing}, [proxy,legacy]),
assert_supported(PCap2, {riak_kv, index_backpressure}, [true,false]),
assert_supported(PCap2, {riak_kv, legacy_keylisting}, [false]),
assert_supported(PCap2, {riak_kv, listkeys_backpressure}, [true,false]),
assert_supported(PCap2, {riak_kv, mapred_2i_pipe}, [true,false]),
assert_supported(PCap2, {riak_kv, mapred_system}, [pipe]),
assert_supported(PCap2, {riak_kv, vnode_vclocks}, [true,false]),
assert_supported(PCap2, {riak_pipe, trace_format}, [ordsets,sets]),
lager:info("Upgrading 1.0.3 node"),
rt:upgrade(Node4, current),
lager:info("Verifying vnode_routing changes to proxy"),
?assertEqual(ok, rt:wait_until_capability(Node1, {riak_core, vnode_routing}, proxy)),
lager:info("Upgrade 1.1.4 node"),
rt:upgrade(Node3, current),
lager:info("Upgrading Previous node"),
rt:upgrade(PNode, current),
lager:info("Verifying index_backpressue changes to true"),
?assertEqual(ok, rt:wait_until_capability(CNode, {riak_kv, index_backpressure}, true)),
lager:info("Verifying riak_pipe,trace_format changes to ordsets"),
?assertEqual(ok, rt:wait_until_capability(CNode, {riak_pipe, trace_format}, ordsets)),
CCap2 = rt:capability(CNode, all),
assert_capability(CCap2, {riak_kv, legacy_keylisting}, false),
assert_capability(CCap2, {riak_kv, listkeys_backpressure}, true),
assert_capability(CCap2, {riak_core, staged_joins}, true),
assert_capability(CCap2, {riak_kv, index_backpressure}, true),
assert_capability(CCap2, {riak_pipe, trace_format}, ordsets),
assert_capability(CCap2, {riak_kv, mapred_2i_pipe}, true),
assert_capability(CCap2, {riak_kv, mapred_system}, pipe),
assert_capability(CCap2, {riak_kv, vnode_vclocks}, true),
assert_capability(CCap2, {riak_core, vnode_routing}, proxy),
assert_supported(CCap2, {riak_core, staged_joins}, [true,false]),
assert_supported(CCap2, {riak_core, vnode_routing}, [proxy,legacy]),
assert_supported(CCap2, {riak_kv, index_backpressure}, [true,false]),
assert_supported(CCap2, {riak_kv, legacy_keylisting}, [false]),
assert_supported(CCap2, {riak_kv, listkeys_backpressure}, [true,false]),
assert_supported(CCap2, {riak_kv, mapred_2i_pipe}, [true,false]),
assert_supported(CCap2, {riak_kv, mapred_system}, [pipe]),
assert_supported(CCap2, {riak_kv, vnode_vclocks}, [true,false]),
assert_supported(CCap2, {riak_pipe, trace_format}, [ordsets,sets]),
%% All nodes are now current version. Test override behavior.
Override = fun(undefined, Prefer) ->
@ -101,23 +195,31 @@ confirm() ->
[rt:update_app_config(Node, Override(legacy, proxy)) || Node <- Nodes],
lager:info("Verify vnode_routing == legacy"),
?assertEqual(legacy, rt:capability(Node1, {riak_core, vnode_routing})),
?assertEqual(legacy, rt:capability(CNode, {riak_core, vnode_routing})),
lager:info("Override: (use: proxy), (prefer: legacy)"),
[rt:update_app_config(Node, Override(proxy, legacy)) || Node <- Nodes],
lager:info("Verify vnode_routing == proxy"),
?assertEqual(proxy, rt:capability(Node1, {riak_core, vnode_routing})),
?assertEqual(proxy, rt:capability(CNode, {riak_core, vnode_routing})),
lager:info("Override: (prefer: legacy)"),
[rt:update_app_config(Node, Override(undefined, legacy)) || Node <- Nodes],
lager:info("Verify vnode_routing == legacy"),
?assertEqual(legacy, rt:capability(Node1, {riak_core, vnode_routing})),
?assertEqual(legacy, rt:capability(CNode, {riak_core, vnode_routing})),
[rt:stop(Node) || Node <- Nodes],
pass.
assert_capability(Capabilities, Capability, Value) ->
lager:info("Checking Capability Setting ~p =:= ~p", [Capability, Value]),
?assertEqual(Value, proplists:get_value(Capability, Capabilities)).
assert_supported(Capabilities, Capability, Value) ->
lager:info("Checking Capability Supported Values ~p =:= ~p", [Capability, Value]),
?assertEqual(Value, proplists:get_value(Capability, proplists:get_value('$supported', Capabilities))).
crash_capability_server(Node) ->
Pid = rpc:call(Node, erlang, whereis, [riak_core_capability]),
rpc:call(Node, erlang, exit, [Pid, kill]).