mirror of
https://github.com/valitydev/riak_test.git
synced 2024-11-06 08:35:22 +00:00
Merge pull request #1187 from basho/kc-ts_util-update
Break ts_util into three utility modules
This commit is contained in:
commit
077e434405
@ -27,14 +27,11 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Expected =
|
||||
{ok,
|
||||
"GeoCheckin has been activated\n"
|
||||
"\n"
|
||||
"WARNING: Nodes in this cluster can no longer be\n"
|
||||
"downgraded to a version of Riak prior to 2.0\n"},
|
||||
Got = ts_util:create_and_activate_bucket_type(
|
||||
ts_util:build_cluster(multiple), DDL),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Expected = ok,
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok, _} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
Got = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -227,7 +227,7 @@ query_in_mixed_version_cluster_test(Config) ->
|
||||
"WHERE a = 1 AND b = 1 AND c >= 1000 AND c <= 5000 ",
|
||||
% ct:pal("COVERAGE ~p", [riakc_ts:get_coverage(rt:pbc(Node_A), <<"grouptab1">>, Query)]),
|
||||
{ok, {Cols, Rows}} = run_query(rt:pbc(Node_A), Query),
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, ExpectedResultSet},
|
||||
{ok,{Cols, Rows}}
|
||||
),
|
||||
@ -235,7 +235,7 @@ query_in_mixed_version_cluster_test(Config) ->
|
||||
%% Test that the 1.3 can query the current version
|
||||
%%
|
||||
{ok, {Cols, Rows}} = run_query(rt:pbc(Node_B), Query),
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, ExpectedResultSet},
|
||||
{ok,{Cols, Rows}}
|
||||
).
|
||||
|
@ -42,7 +42,7 @@ run_tests(PvalP1, PvalP2) ->
|
||||
Data = make_data(PvalP1, PvalP2),
|
||||
io:format("Data to be written:\n~p\n...\n~p\n", [hd(Data), lists:last(Data)]),
|
||||
|
||||
Cluster = ts_util:build_cluster(multiple),
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
|
||||
%% use riak-admin to create a bucket
|
||||
TableDef = io_lib:format(
|
||||
|
@ -32,11 +32,12 @@ confirm() ->
|
||||
QuantumMS = 15 * 60 * 1000,
|
||||
UpperBoundExcl = QuantaTally * QuantumMS,
|
||||
TimesGeneration = fun() -> lists:seq(1, UpperBoundExcl-1, 3124) end,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(TimesGeneration),
|
||||
Nodes = ts_util:build_cluster(multiple),
|
||||
Table = ts_util:get_default_bucket(),
|
||||
ts_util:create_table(normal, Nodes, DDL, Table),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(TimesGeneration),
|
||||
Nodes = ts_setup:start_cluster(3),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok, _} = ts_setup:create_bucket_type(Nodes, DDL, Table),
|
||||
ok = ts_setup:activate_bucket_type(Nodes, Table),
|
||||
ok = riakc_ts:put(rt:pbc(hd(Nodes)), Table, Data),
|
||||
%% First test on a small range well within the size of a normal query
|
||||
SmallData = lists:filter(fun({_, _, Time, _, _}) ->
|
||||
@ -50,7 +51,7 @@ confirm() ->
|
||||
|
||||
test_replacement_quanta(Table, ExpectedData, Nodes, NumQuanta, QuantumMS) ->
|
||||
AdminPid = rt:pbc(lists:nth(3, Nodes)),
|
||||
Qry = ts_util:get_valid_qry(-1, NumQuanta * QuantumMS),
|
||||
Qry = ts_data:get_valid_qry(-1, NumQuanta * QuantumMS),
|
||||
{ok, CoverageEntries} = riakc_ts:get_coverage(AdminPid, Table, Qry),
|
||||
?assertEqual(NumQuanta, length(CoverageEntries)),
|
||||
|
||||
@ -101,7 +102,7 @@ test_replacement_quanta(Table, ExpectedData, Nodes, NumQuanta, QuantumMS) ->
|
||||
test_quanta_range(Table, ExpectedData, Nodes, NumQuanta, QuantumMS) ->
|
||||
AdminPid = rt:pbc(lists:nth(3, Nodes)),
|
||||
OtherPid = rt:pbc(lists:nth(2, Nodes)),
|
||||
Qry = ts_util:get_valid_qry(-1, NumQuanta * QuantumMS),
|
||||
Qry = ts_data:get_valid_qry(-1, NumQuanta * QuantumMS),
|
||||
{ok, CoverageEntries} = riakc_ts:get_coverage(AdminPid, Table, Qry),
|
||||
?assertEqual(NumQuanta, length(CoverageEntries)),
|
||||
|
||||
@ -167,7 +168,7 @@ check_data_against_range(Data, {_FieldName, {{Lower, LowerIncl}, {Upper, UpperIn
|
||||
end,
|
||||
Data)).
|
||||
|
||||
%% Cheap and easy. ts_util gives us 3 nodes, we know the ports.
|
||||
%% Cheap and easy. ts_setup gives us 3 nodes, we know the ports.
|
||||
alternate_port(10017) ->
|
||||
10027;
|
||||
alternate_port(10027) ->
|
||||
|
@ -27,14 +27,10 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Expected =
|
||||
{ok,
|
||||
"GeoCheckin created\n"
|
||||
"\n"
|
||||
"WARNING: After activating GeoCheckin, nodes in this cluster\n"
|
||||
"can no longer be downgraded to a version of Riak prior to 2.0\n"},
|
||||
Got = ts_util:create_bucket_type(
|
||||
ts_util:build_cluster(multiple), DDL),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Expected = ok,
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{Got, _} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -36,7 +36,7 @@ suite() ->
|
||||
{with_quotes_prop, ?PROP_WITH_QUOTES}]).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
Cluster = ts_util:build_cluster(multiple),
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
[{cluster, Cluster} | Config].
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
@ -80,9 +80,10 @@ re_create_fail_test(Ctx) ->
|
||||
pass.
|
||||
|
||||
describe_test(Ctx) ->
|
||||
C = client_pid(Ctx),
|
||||
Qry = io_lib:format("DESCRIBE ~s", [ts_util:get_default_bucket()]),
|
||||
Got = ts_util:single_query(C, Qry),
|
||||
|
||||
Qry = io_lib:format("DESCRIBE ~s", [ts_data:get_default_bucket()]),
|
||||
Cluster = ?config(cluster, Ctx),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(table_described(), Got),
|
||||
pass.
|
||||
|
||||
@ -90,8 +91,8 @@ get_put_data_test(Ctx) ->
|
||||
C = client_pid(Ctx),
|
||||
Data = [{<<"a">>, <<"b">>, 10101010, <<"not bad">>, 42.24}],
|
||||
Key = [<<"a">>, <<"b">>, 10101010],
|
||||
?assertMatch(ok, riakc_ts:put(C, ts_util:get_default_bucket(), Data)),
|
||||
Got = riakc_ts:get(C, ts_util:get_default_bucket(), Key, []),
|
||||
?assertMatch(ok, riakc_ts:put(C, ts_data:get_default_bucket(), Data)),
|
||||
Got = riakc_ts:get(C, ts_data:get_default_bucket(), Key, []),
|
||||
lager:info("get_put_data_test Got ~p", [Got]),
|
||||
?assertMatch({ok, {_, Data}}, Got),
|
||||
pass.
|
||||
@ -117,14 +118,14 @@ get_set_property_test(Ctx) ->
|
||||
get_bucket_props_from_node(Node) ->
|
||||
rpc:call(
|
||||
Node, riak_core_claimant, get_bucket_type,
|
||||
[list_to_binary(ts_util:get_default_bucket()), undefined, false]).
|
||||
[list_to_binary(ts_data:get_default_bucket()), undefined, false]).
|
||||
|
||||
client_pid(Ctx) ->
|
||||
Nodes = ?config(cluster, Ctx),
|
||||
rt:pbc(hd(Nodes)).
|
||||
|
||||
ddl_common() ->
|
||||
ts_util:get_ddl(small).
|
||||
ts_data:get_ddl(small).
|
||||
|
||||
table_described() ->
|
||||
{ok, {[<<"Column">>,<<"Type">>,<<"Is Null">>,<<"Primary Key">>, <<"Local Key">>, <<"Interval">>, <<"Unit">>],
|
||||
|
@ -34,7 +34,7 @@ suite() ->
|
||||
[{timetrap,{minutes,10}}].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
[_Node|_] = Cluster = ts_util:build_cluster(multiple),
|
||||
[_Node|_] = Cluster = ts_setup:start_cluster(3),
|
||||
[{cluster, Cluster} | Config].
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
@ -87,7 +87,7 @@ select_grouped_field_test(Ctx) ->
|
||||
"WHERE a = 1 AND b = 1 AND c >= 1 AND c <= 1000 "
|
||||
"GROUP BY c",
|
||||
{ok, {Cols, Rows}} = run_query(Ctx, Query),
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1},{2},{3}]},
|
||||
{ok,{Cols, lists:sort(Rows)}}
|
||||
).
|
||||
@ -111,7 +111,7 @@ group_by_2_test(Ctx) ->
|
||||
"WHERE a = 1 AND b = 1 AND c >= 1 AND c <= 1000 "
|
||||
"GROUP BY d",
|
||||
{ok, {Cols, Rows}} = run_query(Ctx, Query),
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1,500.5},{2,500.5},{3,500.5}]},
|
||||
{ok,{Cols, lists:sort(Rows)}}
|
||||
).
|
||||
|
@ -103,7 +103,7 @@ basic_table_hinted_handoff_test(Config) ->
|
||||
"SELECT * FROM mytab "
|
||||
"WHERE a = 1 AND b >= 1000 AND b <= 5000",
|
||||
ExpectedResultSet = [{1,B} || B <- lists:seq(1000,5000,1000)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, ExpectedResultSet},
|
||||
run_query(rt:pbc(Node_B), Query, Config)
|
||||
),
|
||||
@ -132,7 +132,7 @@ additional_columns_on_local_key_table_hinted_handoff_test(Config) ->
|
||||
"SELECT * FROM mytab "
|
||||
"WHERE a = 1 AND b >= 1000 AND b <= 5000",
|
||||
ExpectedResultSet = [{1,B,C} || B <- lists:seq(1000,5000,1000), C <- lists:seq(1000,5000,1000)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, ExpectedResultSet},
|
||||
run_query(rt:pbc(Node_B), Query, Config)
|
||||
),
|
||||
|
@ -34,7 +34,7 @@ suite() ->
|
||||
[{timetrap,{minutes,10}}].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
[Node|_] = Cluster = ts_util:build_cluster(multiple),
|
||||
[Node|_] = Cluster = ts_setup:start_cluster(3),
|
||||
Pid = rt:pbc(Node),
|
||||
% create tables and populate them with data
|
||||
create_data_def_1(Pid),
|
||||
@ -81,7 +81,7 @@ run_query(Ctx, Query) ->
|
||||
%%%
|
||||
|
||||
create_data_def_1(Pid) ->
|
||||
ts_util:assert_row_sets({ok, {[],[]}},riakc_ts:query(Pid, table_def_1())),
|
||||
ts_data:assert_row_sets({ok, {[],[]}},riakc_ts:query(Pid, table_def_1())),
|
||||
ok = riakc_ts:put(Pid, <<"table1">>, [{1,1,N,1} || N <- lists:seq(1,6000)]).
|
||||
|
||||
column_names_def_1() ->
|
||||
@ -100,7 +100,7 @@ select_exclusive_def_1_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 0 AND c < 11",
|
||||
Results =
|
||||
[{1,1,N,1} || N <- lists:seq(1,10)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -110,7 +110,7 @@ select_exclusive_def_1_2_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 44 AND c < 54",
|
||||
Results =
|
||||
[{1,1,N,1} || N <- lists:seq(45,53)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -120,7 +120,7 @@ select_exclusive_def_1_across_quanta_1_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 500 AND c < 1500",
|
||||
Results =
|
||||
[{1,1,N,1} || N <- lists:seq(501,1499)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -131,7 +131,7 @@ select_exclusive_def_1_across_quanta_2_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 500 AND c < 4500",
|
||||
Results =
|
||||
[{1,1,N,1} || N <- lists:seq(501,4499)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -141,7 +141,7 @@ select_inclusive_def_1_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 11 AND c <= 20",
|
||||
Results =
|
||||
[{1,1,N,1} || N <- lists:seq(11,20)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -178,7 +178,7 @@ where_clause_must_cover_the_partition_key_missing_c_test(Ctx) ->
|
||||
%%%
|
||||
|
||||
create_data_def_2(Pid) ->
|
||||
ts_util:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_2())),
|
||||
ts_data:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_2())),
|
||||
ok = riakc_ts:put(Pid, <<"table2">>, [{N,1,1,1} || N <- lists:seq(1,200)]).
|
||||
|
||||
table_def_2() ->
|
||||
@ -194,7 +194,7 @@ select_exclusive_def_2_test(Ctx) ->
|
||||
"SELECT * FROM table2 WHERE a > 0 AND a < 11",
|
||||
Results =
|
||||
[{N,1,1,1} || N <- lists:seq(1,10)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -204,7 +204,7 @@ select_inclusive_def_2_test(Ctx) ->
|
||||
"SELECT * FROM table2 WHERE a >= 11 AND a <= 20",
|
||||
Results =
|
||||
[{N,1,1,1} || N <- lists:seq(11,20)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -214,7 +214,7 @@ select_inclusive_def_2_test(Ctx) ->
|
||||
%%%
|
||||
|
||||
create_data_def_3(Pid) ->
|
||||
ts_util:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_3())),
|
||||
ts_data:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_3())),
|
||||
ok = riakc_ts:put(Pid, <<"table3">>, [{1,N} || N <- lists:seq(1,200)]).
|
||||
|
||||
column_names_def_3() ->
|
||||
@ -231,7 +231,7 @@ select_exclusive_def_3_test(Ctx) ->
|
||||
"SELECT * FROM table3 WHERE b > 0 AND b < 11 AND a = 1",
|
||||
Results =
|
||||
[{1,N} || N <- lists:seq(1,10)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_3(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -241,7 +241,7 @@ select_inclusive_def_3_test(Ctx) ->
|
||||
"SELECT * FROM table3 WHERE b >= 11 AND b <= 20 AND a = 1",
|
||||
Results =
|
||||
[{1,N} || N <- lists:seq(11,20)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_3(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -252,7 +252,7 @@ select_inclusive_def_3_test(Ctx) ->
|
||||
%%%
|
||||
|
||||
create_data_def_4(Pid) ->
|
||||
ts_util:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_4())),
|
||||
ts_data:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_4())),
|
||||
ok = riakc_ts:put(Pid, <<"table4">>, [{1,1,N} || N <- lists:seq(1,200)]).
|
||||
|
||||
column_names_def_4() ->
|
||||
@ -270,7 +270,7 @@ select_exclusive_def_4_test(Ctx) ->
|
||||
"SELECT * FROM table4 WHERE a = 1 AND b = 1 AND c > 0 AND c < 11",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(1,10)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_4(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -280,7 +280,7 @@ select_inclusive_def_4_test(Ctx) ->
|
||||
"SELECT * FROM table4 WHERE a = 1 AND b = 1 AND c >= 11 AND c <= 20",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(11,20)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_4(), Results}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -300,13 +300,13 @@ table_def_5() ->
|
||||
"PRIMARY KEY ((a,b,c),a,b,c))".
|
||||
|
||||
create_data_def_5(Pid) ->
|
||||
ts_util:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_5())),
|
||||
ts_data:assert_row_sets({ok, {[],[]}}, riakc_ts:query(Pid, table_def_5())),
|
||||
ok = riakc_ts:put(Pid, <<"table5">>, [{1,1,N} || N <- lists:seq(1,200)]).
|
||||
|
||||
select_def_5_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM table5 WHERE a = 1 AND b = 1 AND c = 20",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_5(), [{1,1,20}]}},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -316,7 +316,7 @@ select_def_5_test(Ctx) ->
|
||||
%%%
|
||||
|
||||
create_data_def_8(Pid) ->
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {[],[]}},
|
||||
riakc_ts:query(Pid,
|
||||
"CREATE TABLE table8 ("
|
||||
@ -332,7 +332,7 @@ d_equal_than_filter_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM table8 "
|
||||
"WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 4500 AND d = 3000",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1,1,3000,3000}]},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -343,7 +343,7 @@ d_greater_than_filter_test(Ctx) ->
|
||||
"WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 4500 AND d > 3000",
|
||||
Results =
|
||||
[{1,1,N,N} || N <- lists:seq(3001,4500)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -354,7 +354,7 @@ d_greater_or_equal_to_filter_test(Ctx) ->
|
||||
"WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 4500 AND d >= 3000",
|
||||
Results =
|
||||
[{1,1,N,N} || N <- lists:seq(3000,4500)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -365,7 +365,7 @@ d_not_filter_test(Ctx) ->
|
||||
"WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 4500 AND d != 3000",
|
||||
Results =
|
||||
[{1,1,N,N} || N <- lists:seq(2500,4500), N /= 3000],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -456,7 +456,7 @@ double_pk_double_boolean_lk_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM double_pk_double_boolean_lk_test "
|
||||
"WHERE a = 0.5 AND b = true",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{0.5,true}]},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -476,7 +476,7 @@ boolean_pk_boolean_double_lk_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM boolean_pk_boolean_double_lk_test "
|
||||
"WHERE a = false AND b = 0.5",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{false,0.5}]},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -532,7 +532,7 @@ all_types_1_test(Ctx) ->
|
||||
I <- ts_booleans()
|
||||
,J <- Doubles
|
||||
],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -552,7 +552,7 @@ all_types_or_filter_test(Ctx) ->
|
||||
J <- Doubles,
|
||||
I == true orelse J == 0.2
|
||||
],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -591,7 +591,7 @@ all_booleans_test(Ctx) ->
|
||||
Results =
|
||||
[{true,true,true,Bd,Be,Bf,Bg} || Bd <- ts_booleans(), Be <- ts_booleans(),
|
||||
Bf <- ts_booleans(), Bg <- ts_booleans()],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns,Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -603,7 +603,7 @@ all_booleans_filter_on_g_test(Ctx) ->
|
||||
Results =
|
||||
[{true,true,true,Bd,Be,Bf,false} || Bd <- ts_booleans(), Be <- ts_booleans(),
|
||||
Bf <- ts_booleans()],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns,Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -614,7 +614,7 @@ all_booleans_filter_on_d_and_f_test(Ctx) ->
|
||||
"WHERE a = true AND b = true AND c = true AND d = false AND f = true",
|
||||
Results =
|
||||
[{true,true,true,false,Be,true,Bg} || Be <- ts_booleans(), Bg <- ts_booleans()],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns,Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -644,7 +644,7 @@ all_timestamps_across_quanta_test(Ctx) ->
|
||||
"WHERE a = 2 AND b > 200 AND b < 3000 AND c = 3",
|
||||
Results =
|
||||
[{2,B,3,4,5} || B <- lists:seq(300, 2900, 100)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns,Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
@ -655,7 +655,7 @@ all_timestamps_single_quanta_test(Ctx) ->
|
||||
"WHERE a = 2 AND b > 200 AND b <= 900 AND c = 3",
|
||||
Results =
|
||||
[{2,B,3,4,5} || B <- lists:seq(300, 900, 100)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns,Results},
|
||||
run_query(Ctx, Query)
|
||||
).
|
||||
|
@ -42,7 +42,7 @@ suite() ->
|
||||
[{timetrap,{minutes,10}}].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
Cluster = ts_util:build_cluster(single),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
[C1 | _] = [rt:pbc(Node) || Node <- Cluster],
|
||||
ok = create_tables(C1),
|
||||
Data = make_data(),
|
||||
|
@ -27,12 +27,15 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Obj = [ts_util:get_valid_obj()],
|
||||
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Obj = [ts_data:get_valid_obj()],
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
{ok, _} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Expected = ok,
|
||||
Got = ts_util:ts_put(
|
||||
ts_util:cluster_and_connect(multiple), TestType, DDL, Obj),
|
||||
Got = ts_ops:put(Cluster, Table, Obj),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
||||
|
@ -34,7 +34,7 @@ suite() ->
|
||||
[{timetrap,{minutes,10}}].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
[Node|_] = Cluster = ts_util:build_cluster(multiple),
|
||||
[Node|_] = Cluster = ts_setup:start_cluster(3),
|
||||
create_data_def_1(rt:pbc(Node)),
|
||||
[{cluster, Cluster} | Config].
|
||||
|
||||
@ -90,7 +90,7 @@ start_key_query_greater_than_1999_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 1999 AND c <= 3800",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2000,3800)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -100,7 +100,7 @@ start_key_query_greater_than_2000_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 2000 AND c <= 3800",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2001,3800)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -110,7 +110,7 @@ start_key_query_greater_than_2001_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 2001 AND c <= 3800",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2002,3800)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -120,7 +120,7 @@ start_key_query_greater_or_equal_to_1999_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 1999 AND c <= 3800",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(1999,3800)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -130,7 +130,7 @@ start_key_query_greater_or_equal_to_2000_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2000 AND c <= 3800",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2000,3800)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -140,7 +140,7 @@ start_key_query_greater_or_equal_to_2001_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 2001 AND c <= 3800",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2002,3800)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -154,7 +154,7 @@ end_key_query_less_than_3999_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2500 AND c < 3999",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2500,3998)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -164,7 +164,7 @@ end_key_query_less_than_4000_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2500 AND c < 4000",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2500,3999)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -174,7 +174,7 @@ end_key_query_less_than_4001_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2500 AND c < 4001",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2500,4000)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -184,7 +184,7 @@ end_key_query_less_than_or_equal_to_3999_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 3999",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2500,3999)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -194,7 +194,7 @@ end_key_query_less_than_or_equal_to_4000_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 4000",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2500,4000)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -204,7 +204,7 @@ end_key_query_less_than_or_equal_to_4001_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2500 AND c <= 4001",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(2500,4001)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -214,7 +214,7 @@ start_key_query_greater_than_500_one_quantum_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 500 AND c < 700",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(501,699)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -224,7 +224,7 @@ start_key_query_greater_or_equal_to_500_one_quantum_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 500 AND c < 700",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(500,699)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -234,7 +234,7 @@ start_key_query_greater_than_500_two_quantum_test(Ctx) ->
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 500 AND c < 1500",
|
||||
Results =
|
||||
[{1,1,N} || N <- lists:seq(501,1499)],
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{ok, {column_names_def_1(), Results}},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -246,7 +246,7 @@ start_key_query_greater_than_500_two_quantum_test(Ctx) ->
|
||||
one_key_before_quantum_start_key_greater_than_or_equal_to_2999_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c >= 2999 AND c < 3000",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1,1,2999}]},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -254,7 +254,7 @@ one_key_before_quantum_start_key_greater_than_or_equal_to_2999_test(Ctx) ->
|
||||
one_key_before_quantum_start_key_greater_than_2998_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 2998 AND c < 3000",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1,1,2999}]},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -262,7 +262,7 @@ one_key_before_quantum_start_key_greater_than_2998_test(Ctx) ->
|
||||
one_key_after_quantum_end_key_less_than_3001_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 2999 AND c < 3001",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1,1,3000}]},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
@ -270,7 +270,7 @@ one_key_after_quantum_end_key_less_than_3001_test(Ctx) ->
|
||||
one_key_after_quantum_less_than_or_equal_to_3000_test(Ctx) ->
|
||||
Query =
|
||||
"SELECT * FROM table1 WHERE a = 1 AND b = 1 AND c > 2999 AND c <= 3000",
|
||||
ts_util:assert_row_sets(
|
||||
ts_data:assert_row_sets(
|
||||
{rt_ignore_columns, [{1,1,3000}]},
|
||||
riakc_ts:query(client_pid(Ctx), Query)
|
||||
).
|
||||
|
@ -27,7 +27,9 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
ClusterConn = ts_util:cluster_and_connect(multiple),
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
Conn = ts_setup:conn(Cluster),
|
||||
ClusterConn = {Cluster, Conn},
|
||||
?assert(eqc:quickcheck(eqc:numtests(500, ?MODULE:prop_ts(ClusterConn)))),
|
||||
pass.
|
||||
|
||||
@ -45,15 +47,15 @@ run_query(ClusterConn, NVal, NPuts, Q, NSpans) ->
|
||||
lager:debug("DDL is ~p~n", [DDL]),
|
||||
|
||||
Data = make_data(NPuts, Q, NSpans),
|
||||
|
||||
Query = make_query(Bucket, Q, NSpans),
|
||||
|
||||
{_Cluster, Conn} = ClusterConn,
|
||||
{Cluster, Conn} = ClusterConn,
|
||||
|
||||
{ok, _} = ts_util:create_and_activate_bucket_type(ClusterConn, DDL, Bucket, NVal),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok, _} = ts_setup:create_bucket_type(Cluster, DDL, Table, NVal),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ok = riakc_ts:put(Conn, Bucket, Data),
|
||||
{ok, {_, Got}} = ts_util:single_query(Conn, Query),
|
||||
|
||||
{ok, {_, Got}} = ts_ops:query(Cluster, Query),
|
||||
?assertEqual(Data, Got),
|
||||
|
||||
true.
|
||||
@ -84,8 +86,8 @@ make_data(NPuts, Q, NSpans) ->
|
||||
Series = <<"seriesX">>,
|
||||
Times = lists:seq(1, NPuts),
|
||||
[{Family, Series, trunc((X/NPuts) * Multi),
|
||||
ts_util:get_varchar(),
|
||||
ts_util:get_float()}
|
||||
ts_data:get_varchar(),
|
||||
ts_data:get_float()}
|
||||
|| X <- Times].
|
||||
|
||||
get_multi({No, y}) -> 365*24*60*60*1000 * No;
|
||||
|
@ -62,10 +62,14 @@ confirm() ->
|
||||
[ANodes, BNodes] = rt:build_clusters([{ClusterASize, Conf}, {NumNodes - ClusterASize, Conf}]),
|
||||
|
||||
%% TS-ize the clusters
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_util:get_default_bucket(),
|
||||
ts_util:create_table(normal, ANodes, DDL, Table),
|
||||
ts_util:create_table(normal, BNodes, DDL, Table),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
|
||||
ts_setup:create_bucket_type(ANodes, DDL, Table),
|
||||
ts_setup:activate_bucket_type(ANodes, Table),
|
||||
|
||||
ts_setup:create_bucket_type(BNodes, DDL, Table),
|
||||
ts_setup:activate_bucket_type(BNodes, Table),
|
||||
|
||||
replication(ANodes, BNodes, list_to_binary(Table), <<"hey look ma no w1c">>),
|
||||
test_ddl_comparison(ANodes, BNodes),
|
||||
@ -74,10 +78,15 @@ confirm() ->
|
||||
test_ddl_comparison(ANodes, BNodes) ->
|
||||
lager:info("Testing TS realtime fails with incompatible DDLs"),
|
||||
Table = "house_of_horrors",
|
||||
SmallDDL = ts_util:get_ddl(small, Table),
|
||||
BigDDL = ts_util:get_ddl(big, Table),
|
||||
ts_util:create_table(normal, ANodes, SmallDDL, Table),
|
||||
ts_util:create_table(normal, BNodes, BigDDL, Table),
|
||||
SmallDDL = ts_data:get_ddl(small, Table),
|
||||
BigDDL = ts_data:get_ddl(big, Table),
|
||||
|
||||
ts_setup:create_bucket_type(ANodes, SmallDDL, Table),
|
||||
ts_setup:activate_bucket_type(ANodes, Table),
|
||||
|
||||
ts_setup:create_bucket_type(BNodes, BigDDL, Table),
|
||||
ts_setup:activate_bucket_type(BNodes, Table),
|
||||
|
||||
LeaderA = get_leader(hd(ANodes)),
|
||||
PortB = get_mgr_port(hd(BNodes)),
|
||||
prop_failure_replication_test(ANodes, BNodes, LeaderA, PortB, Table).
|
||||
@ -93,7 +102,7 @@ ts_num_records_present(Node, Lower, Upper, Table) when is_binary(Table) ->
|
||||
ts_num_records_present(Node, Lower, Upper, unicode:characters_to_list(Table));
|
||||
ts_num_records_present(Node, Lower, Upper, Table) ->
|
||||
%% Queries use strictly greater/less than
|
||||
Qry = ts_util:get_valid_qry(Lower-1, Upper+1, Table),
|
||||
Qry = ts_data:get_valid_qry(Lower-1, Upper+1, Table),
|
||||
{ok, {_Hdrs, Results}} = riakc_ts:query(rt:pbc(Node), Qry),
|
||||
length(Results).
|
||||
|
||||
@ -103,7 +112,7 @@ kv_num_objects_present(Node, Lower, Upper, Bucket) ->
|
||||
PotentialQty - length(FailedMatches).
|
||||
|
||||
delete_record(Node, Table, Time) ->
|
||||
[RecordAsTuple] = ts_util:get_valid_select_data(fun() -> lists:seq(Time, Time) end),
|
||||
[RecordAsTuple] = ts_data:get_valid_select_data(fun() -> lists:seq(Time, Time) end),
|
||||
RecordAsList = tuple_to_list(RecordAsTuple),
|
||||
KeyAsList = lists:sublist(RecordAsList, 3),
|
||||
lager:info("Deleting ~p from ~ts~n", [KeyAsList, Table]),
|
||||
@ -111,7 +120,7 @@ delete_record(Node, Table, Time) ->
|
||||
|
||||
put_records(Node, Table, Lower, Upper) ->
|
||||
riakc_ts:put(rt:pbc(Node), Table,
|
||||
ts_util:get_valid_select_data(
|
||||
ts_data:get_valid_select_data(
|
||||
fun() -> lists:seq(Lower, Upper) end)).
|
||||
|
||||
replication(ANodes, BNodes, Table, NormalType) ->
|
||||
|
@ -30,16 +30,17 @@
|
||||
%% we cant run the test in this process as it receives various messages
|
||||
%% and the running test interprets then as being messages to the shell
|
||||
confirm() ->
|
||||
{Nodes, Conn} = ts_util:cluster_and_connect(multiple),
|
||||
Nodes = ts_setup:start_cluster(3),
|
||||
Conn = ts_setup:conn(Nodes),
|
||||
lager:info("Built a cluster of ~p~n", [Nodes]),
|
||||
Self = self(),
|
||||
_Pid = spawn_link(fun() -> create_table_test(Self) end),
|
||||
Got1 = riak_shell_test_util:loop(),
|
||||
Result1 = ts_util:assert("Create Table", pass, Got1),
|
||||
Result1 = ts_data:assert("Create Table", pass, Got1),
|
||||
_Pid2 = spawn_link(fun() -> query_table_test(Self, Conn) end),
|
||||
Got2 = riak_shell_test_util:loop(),
|
||||
Result2 = ts_util:assert("Query Table", pass, Got2),
|
||||
ts_util:results([
|
||||
Result2 = ts_data:assert("Query Table", pass, Got2),
|
||||
ts_data:results([
|
||||
Result1,
|
||||
Result2
|
||||
]),
|
||||
@ -48,7 +49,7 @@ confirm() ->
|
||||
create_table_test(Pid) ->
|
||||
State = riak_shell_test_util:shell_init(),
|
||||
lager:info("~n~nStart running the command set-------------------------", []),
|
||||
CreateTable = lists:flatten(io_lib:format("~s;", [ts_util:get_ddl(small)])),
|
||||
CreateTable = lists:flatten(io_lib:format("~s;", [ts_data:get_ddl(small)])),
|
||||
Describe =
|
||||
"Column,Type,Is Null,Primary Key,Local Key,Interval,Unit\n"
|
||||
"myfamily,varchar,false,1,1,,\n"
|
||||
@ -79,8 +80,8 @@ create_table_test(Pid) ->
|
||||
|
||||
query_table_test(Pid, Conn) ->
|
||||
%% Throw some tests data out there
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
ok = riakc_ts:put(Conn, ts_util:get_default_bucket(), Data),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
ok = riakc_ts:put(Conn, ts_data:get_default_bucket(), Data),
|
||||
SQL = "select time, weather, temperature from GeoCheckin where myfamily='family1' and myseries='seriesX' and time > 0 and time < 1000",
|
||||
Select = lists:flatten(io_lib:format("~s;", [SQL])),
|
||||
State = riak_shell_test_util:shell_init(),
|
||||
|
@ -31,13 +31,14 @@
|
||||
%% we cant run the test in this process as it receives various messages
|
||||
%% and the running test interprets then as being messages to the shell
|
||||
confirm() ->
|
||||
{Nodes, _Conn} = ts_util:cluster_and_connect(multiple),
|
||||
Nodes = ts_setup:start_cluster(3),
|
||||
_Conn = ts_setup:conn(Nodes),
|
||||
lager:info("Built a cluster of ~p~n", [Nodes]),
|
||||
Self = self(),
|
||||
_Pid = spawn_link(fun() -> load_log_file(Self) end),
|
||||
Got1 = riak_shell_test_util:loop(),
|
||||
Result = ts_util:assert("Regression Log", pass, Got1),
|
||||
ts_util:results([
|
||||
Result = ts_data:assert("Regression Log", pass, Got1),
|
||||
ts_data:results([
|
||||
Result
|
||||
]),
|
||||
pass.
|
||||
@ -47,7 +48,7 @@ load_log_file(Pid) ->
|
||||
lager:info("~n~nLoad the log -------------------------", []),
|
||||
Cmds = [
|
||||
{{match, "No Regression Errors."},
|
||||
ts_util:flat_format("regression_log \"~s\";", [?LOG_FILE])}
|
||||
ts_data:flat_format("regression_log \"~s\";", [?LOG_FILE])}
|
||||
],
|
||||
Result = riak_shell_test_util:run_commands(Cmds, State,
|
||||
?DONT_INCREMENT_PROMPT),
|
||||
|
@ -27,14 +27,19 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Qry = ts_util:get_valid_qry(),
|
||||
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry = ts_data:get_valid_qry(),
|
||||
Expected = {ok, {
|
||||
ts_util:get_cols(),
|
||||
ts_util:exclusive_result_from_data(Data, 2, 9)}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(multiple), TestType, DDL, Data, Qry),
|
||||
ts_data:get_cols(),
|
||||
ts_data:exclusive_result_from_data(Data, 2, 9)}},
|
||||
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ok = ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -29,13 +29,17 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data_spanning_quanta(),
|
||||
Qry = ts_util:get_valid_qry_spanning_quanta(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data_spanning_quanta(),
|
||||
Qry = ts_data:get_valid_qry_spanning_quanta(),
|
||||
Expected = {ok, {
|
||||
ts_util:get_cols(),
|
||||
ts_util:exclusive_result_from_data(Data, 1, 10)}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(multiple), normal, DDL, Data, Qry),
|
||||
ts_data:get_cols(),
|
||||
ts_data:exclusive_result_from_data(Data, 1, 10)}},
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ok = ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -30,18 +30,23 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
ShuffledData = shuffle_list(Data),
|
||||
Qry = ts_util:get_valid_qry(),
|
||||
Qry = ts_data:get_valid_qry(),
|
||||
Expected = {ok, {
|
||||
ts_util:get_cols(),
|
||||
ts_util:exclusive_result_from_data(Data, 2, 9)}},
|
||||
ts_data:get_cols(),
|
||||
ts_data:exclusive_result_from_data(Data, 2, 9)}},
|
||||
% write the shuffled TS records but expect the
|
||||
% unshuffled records
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(multiple), TestType, DDL, ShuffledData, Qry),
|
||||
Cluster = ts_setup:start_cluster(3),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ok = ts_ops:put(Cluster, Table, ShuffledData),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
||||
|
@ -147,7 +147,7 @@ make_group_by_2_test(DoesSelectPass) ->
|
||||
sorted_assert(String, {ok, {ECols, Exp}}, {ok, {GCols, Got}}) ->
|
||||
Exp2 = lists:sort(Exp),
|
||||
Got2 = lists:sort(Got),
|
||||
ts_util:assert_float(String, {ECols, Exp2}, {GCols, Got2});
|
||||
ts_data:assert_float(String, {ECols, Exp2}, {GCols, Got2});
|
||||
sorted_assert(String, Exp, Got) ->
|
||||
ok = log_error(String ++ " banjo", Exp, Got),
|
||||
fail.
|
||||
|
@ -41,13 +41,13 @@ make_scenarios() ->
|
||||
|
||||
|
||||
make_scenario_invariants(Config) ->
|
||||
DDL = ts_util:get_ddl(aggregation, "~s"),
|
||||
DDL = ts_data:get_ddl(aggregation, "~s"),
|
||||
{SelectVsExpected, Data} = make_queries_and_data(),
|
||||
Create = #create{ddl = DDL, expected = {ok, {[], []}}},
|
||||
Insert = #insert{data = Data, expected = ok},
|
||||
Selects = [#select{qry = Q,
|
||||
expected = E,
|
||||
assert_mod = ts_util,
|
||||
assert_mod = ts_data,
|
||||
assert_fun = assert_float} || {Q, E} <- SelectVsExpected],
|
||||
DefaultTestSets = [#test_set{testname = "basic_select_aggregation",
|
||||
create = Create,
|
||||
@ -57,7 +57,7 @@ make_scenario_invariants(Config) ->
|
||||
|
||||
make_queries_and_data() ->
|
||||
Count = 10,
|
||||
Data = ts_util:get_valid_aggregation_data(Count),
|
||||
Data = ts_data:get_valid_aggregation_data(Count),
|
||||
Column4 = [element(?TEMPERATURE_COL_INDEX, X) || X <- Data],
|
||||
Column5 = [element(?PRESSURE_COL_INDEX, X) || X <- Data],
|
||||
Column6 = [element(?PRECIPITATION_COL_INDEX, X) || X <- Data],
|
||||
|
@ -1,7 +1,7 @@
|
||||
%% -*- Mode: Erlang -*-
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015, 2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -18,31 +18,22 @@
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
%% @doc A util module for riak_ts basic CREATE TABLE Actions
|
||||
-module(ts_util).
|
||||
%% @doc A util module for riak_ts tests
|
||||
|
||||
-export([
|
||||
activate_bucket_type/2,
|
||||
activate_bucket_type/3,
|
||||
-module(ts_data).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-export([get_ddl/0, get_ddl/1, get_ddl/2,
|
||||
assert/3,
|
||||
assert_error_regex/3,
|
||||
assert_float/3,
|
||||
assert_row_sets/2,
|
||||
build_cluster/1,
|
||||
cluster_and_connect/1,
|
||||
create_and_activate_bucket_type/2,
|
||||
create_and_activate_bucket_type/3,
|
||||
create_and_activate_bucket_type/4,
|
||||
create_bucket_type/2,
|
||||
create_bucket_type/3,
|
||||
create_bucket_type/4,
|
||||
create_table/4,
|
||||
exclusive_result_from_data/3,
|
||||
flat_format/2,
|
||||
get_bool/1,
|
||||
get_cols/0, get_cols/1,
|
||||
get_data/1,
|
||||
get_ddl/0, get_ddl/1, get_ddl/2,
|
||||
get_default_bucket/0,
|
||||
get_float/0,
|
||||
get_integer/0,
|
||||
@ -64,205 +55,13 @@
|
||||
get_valid_select_data/0, get_valid_select_data/1,
|
||||
get_valid_select_data_spanning_quanta/0,
|
||||
get_varchar/0,
|
||||
maybe_stop_a_node/2,
|
||||
remove_last/1,
|
||||
results/1,
|
||||
single_query/2,
|
||||
single_query/3,
|
||||
ts_get/6,
|
||||
ts_get/7,
|
||||
ts_insert/4,
|
||||
ts_insert_no_columns/3,
|
||||
ts_put/4,
|
||||
ts_put/5,
|
||||
ts_query/5,
|
||||
ts_query/6
|
||||
results/1
|
||||
]).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-define(MAXVARCHARLEN, 16).
|
||||
-define(MAXTIMESTAMP, trunc(math:pow(2, 63))).
|
||||
-define(MAXFLOAT, math:pow(2, 63)).
|
||||
-define(MULTIPLECLUSTERSIZE, 3).
|
||||
|
||||
ts_put(ClusterConn, TestType, DDL, Obj) ->
|
||||
Bucket = get_default_bucket(),
|
||||
ts_put(ClusterConn, TestType, DDL, Obj, Bucket).
|
||||
ts_put({Cluster, Conn}, TestType, DDL, Obj, Bucket) ->
|
||||
|
||||
create_table(TestType, Cluster, DDL, Bucket),
|
||||
lager:info("2 - writing to bucket ~ts with:~n- ~p", [Bucket, Obj]),
|
||||
riakc_ts:put(Conn, Bucket, Obj).
|
||||
|
||||
ts_get(ClusterConn, TestType, DDL, Obj, Key, Options) ->
|
||||
Bucket = get_default_bucket(),
|
||||
ts_get(ClusterConn, TestType, DDL, Obj, Key, Options, Bucket).
|
||||
ts_get({Cluster, Conn}, TestType, DDL, Obj, Key, Options, Bucket) ->
|
||||
|
||||
create_table(TestType, Cluster, DDL, Bucket),
|
||||
lager:info("2 - writing to bucket ~ts with:~n- ~p", [Bucket, Obj]),
|
||||
ok = riakc_ts:put(Conn, Bucket, Obj),
|
||||
|
||||
lager:info("3 - reading from bucket ~ts with key ~p", [Bucket, Key]),
|
||||
riakc_ts:get(Conn, Bucket, Key, Options).
|
||||
|
||||
ts_query(ClusterConn, TestType, DDL, Data, Qry) ->
|
||||
Bucket = get_default_bucket(),
|
||||
ts_query(ClusterConn, TestType, DDL, Data, Qry, Bucket).
|
||||
ts_query({Cluster, Conn}, TestType, DDL, Data, Qry, Bucket) ->
|
||||
|
||||
create_table(TestType, Cluster, DDL, Bucket),
|
||||
|
||||
lager:info("2 - writing to bucket ~ts with:~n- ~p", [Bucket, Data]),
|
||||
ok = riakc_ts:put(Conn, Bucket, Data),
|
||||
|
||||
single_query(Conn, Qry).
|
||||
|
||||
single_query(Conn, Qry) ->
|
||||
single_query(Conn, Qry, []).
|
||||
|
||||
single_query(Conn, Qry, Opts) ->
|
||||
lager:info("3 - Now run the query ~ts", [Qry]),
|
||||
Got = riakc_ts:query(Conn, Qry, Opts),
|
||||
lager:info("Result is ~p", [Got]),
|
||||
Got.
|
||||
|
||||
insert_term_format(Data, Acc) when is_binary(Data) ->
|
||||
Acc ++ flat_format("'~s',", [Data]);
|
||||
insert_term_format(Data, Acc) ->
|
||||
Acc ++ flat_format("~p,", [Data]).
|
||||
|
||||
ts_insert(Conn, Table, Columns, Data) ->
|
||||
ColFn = fun(Col, Acc) ->
|
||||
Acc ++ flat_format("~s,", [Col])
|
||||
end,
|
||||
TermFn = fun insert_term_format/2,
|
||||
ColClause = string:strip(lists:foldl(ColFn, [], Columns), right, $,),
|
||||
ValClause = string:strip(lists:foldl(TermFn, [], tuple_to_list(Data)), right, $,),
|
||||
SQL = flat_format("INSERT INTO ~s (~s) VALUES (~ts)",
|
||||
[Table, ColClause, ValClause]),
|
||||
lager:info("~ts", [SQL]),
|
||||
Got = riakc_ts:query(Conn, SQL),
|
||||
lager:info("Result is ~p", [Got]),
|
||||
Got.
|
||||
|
||||
ts_insert_no_columns(Conn, Table, Data) ->
|
||||
TermFn = fun insert_term_format/2,
|
||||
ValClause = string:strip(lists:foldl(TermFn, [], tuple_to_list(Data)), right, $,),
|
||||
SQL = flat_format("INSERT INTO ~s VALUES (~ts)",
|
||||
[Table, ValClause]),
|
||||
lager:info("~ts", [SQL]),
|
||||
Got = riakc_ts:query(Conn, SQL),
|
||||
lager:info("Result is ~p", [Got]),
|
||||
Got.
|
||||
|
||||
%%
|
||||
%% Table and Bucket Type Management
|
||||
%%
|
||||
|
||||
-spec(create_table(normal|n_val_one|no_ddl, [node()], string(), string()) -> ok).
|
||||
create_table(normal, Cluster, DDL, Bucket) ->
|
||||
lager:info("1 - Create and activate the bucket"),
|
||||
lager:debug("DDL = ~ts", [DDL]),
|
||||
create_and_activate_bucket_type(Cluster, DDL, Bucket);
|
||||
create_table(n_val_one, Cluster, DDL, Bucket) ->
|
||||
lager:info("1 - Creating and activating bucket"),
|
||||
lager:debug("DDL = ~ts", [DDL]),
|
||||
create_and_activate_bucket_type(Cluster, DDL, Bucket);
|
||||
create_table(no_ddl, _Cluster, _DDL, _Bucket) ->
|
||||
lager:info("1 - NOT Creating or activating bucket - failure test"),
|
||||
ok.
|
||||
|
||||
-spec(create_bucket_type([node()], string()) -> {ok, term()} | term()).
|
||||
create_bucket_type(Cluster, DDL) ->
|
||||
create_bucket_type(Cluster, DDL, get_default_bucket()).
|
||||
-spec(create_bucket_type(node()|{[node()],term()}, string(), string()|non_neg_integer()) -> {ok, term()} | term()).
|
||||
create_bucket_type({Cluster, _Conn}, DDL, Bucket) ->
|
||||
create_bucket_type(Cluster, DDL, Bucket);
|
||||
create_bucket_type(Cluster, DDL, Bucket) ->
|
||||
NVal = length(Cluster),
|
||||
create_bucket_type(Cluster, DDL, Bucket, NVal).
|
||||
|
||||
-spec(create_bucket_type(node()|{[node()],term()}, string(), string(), non_neg_integer()) -> {ok, term()} | term()).
|
||||
create_bucket_type([Node|_Rest], DDL, Bucket, NVal) when is_integer(NVal) ->
|
||||
Props = io_lib:format("{\\\"props\\\": {\\\"n_val\\\": ~s, \\\"table_def\\\": \\\"~s\\\"}}", [integer_to_list(NVal), DDL]),
|
||||
rt:admin(Node, ["bucket-type", "create", bucket_to_list(Bucket), lists:flatten(Props)]).
|
||||
|
||||
-spec(activate_bucket_type([node()], string()) -> {ok, string()} | term()).
|
||||
activate_bucket_type(Cluster, Bucket) ->
|
||||
activate_bucket_type(Cluster, Bucket, 3).
|
||||
%% Attempt to activate the bucket type 4 times
|
||||
activate_bucket_type(Cluster, Bucket, Retries) ->
|
||||
[Node|_Rest] = Cluster,
|
||||
{ok, Msg} = Result = rt:admin(Node, ["bucket-type", "activate", bucket_to_list(Bucket)]),
|
||||
%% Look for a successful message
|
||||
case string:str(Msg, "has been activated") of
|
||||
0 ->
|
||||
lager:error("Could not activate bucket type. Retrying. Result = ~p", [Result]),
|
||||
case Retries of
|
||||
0 -> Result;
|
||||
_ -> timer:sleep(timer:seconds(1)),
|
||||
activate_bucket_type(Cluster, Bucket, Retries-1)
|
||||
end;
|
||||
_ -> Result
|
||||
end.
|
||||
|
||||
-spec(create_and_activate_bucket_type([node()]|{[node()],term()}, string()) -> term()).
|
||||
create_and_activate_bucket_type({Cluster, _Conn}, DDL) ->
|
||||
create_and_activate_bucket_type(Cluster, DDL);
|
||||
create_and_activate_bucket_type(Cluster, DDL) ->
|
||||
create_and_activate_bucket_type(Cluster, DDL, get_default_bucket()).
|
||||
|
||||
-spec(create_and_activate_bucket_type({[node()],term()}, string(), string()) -> term()).
|
||||
create_and_activate_bucket_type({Cluster, _Conn}, DDL, Bucket) ->
|
||||
create_and_activate_bucket_type(Cluster, DDL, Bucket);
|
||||
create_and_activate_bucket_type(Cluster, DDL, Bucket)->
|
||||
{ok, _} = create_bucket_type(Cluster, DDL, Bucket),
|
||||
activate_bucket_type(Cluster, Bucket).
|
||||
-spec(create_and_activate_bucket_type({[node()],term()}, string(), string(), non_neg_integer()) -> term()).
|
||||
create_and_activate_bucket_type({Cluster, _Conn}, DDL, Bucket, NVal) ->
|
||||
{ok, _} = create_bucket_type(Cluster, DDL, Bucket, NVal),
|
||||
activate_bucket_type(Cluster, Bucket).
|
||||
|
||||
bucket_to_list(Bucket) when is_binary(Bucket) ->
|
||||
binary_to_list(Bucket);
|
||||
bucket_to_list(Bucket) ->
|
||||
Bucket.
|
||||
|
||||
%% @ignore
|
||||
maybe_stop_a_node(delayed_one_down, Cluster) ->
|
||||
maybe_stop_a_node(one_down, Cluster);
|
||||
maybe_stop_a_node(one_down, Cluster) ->
|
||||
%% Shutdown the second node, since we connect to the first one
|
||||
ok = rt:stop(hd(tl(Cluster)));
|
||||
maybe_stop_a_node(_, _) ->
|
||||
ok.
|
||||
|
||||
build_cluster(single) -> build_c2(1, all_up);
|
||||
build_cluster(multiple) -> build_c2(?MULTIPLECLUSTERSIZE, all_up);
|
||||
build_cluster(one_down) -> build_c2(?MULTIPLECLUSTERSIZE, one_down);
|
||||
build_cluster(delayed_one_down) -> build_c2(?MULTIPLECLUSTERSIZE, all_up).
|
||||
|
||||
%% Build a cluster and create a PBC connection to the first node
|
||||
-spec cluster_and_connect(single|multiple|one_down) -> {[node()], term()}.
|
||||
cluster_and_connect(ClusterType) ->
|
||||
Cluster = [Node|_Rest] = build_cluster(ClusterType),
|
||||
Conn = rt:pbc(Node),
|
||||
?assert(is_pid(Conn)),
|
||||
{Cluster, Conn}.
|
||||
%% Just build cluster and stop a node, if necessary
|
||||
-spec build_c2(non_neg_integer(), all_up|one_down) -> [node()].
|
||||
build_c2(Size, ClusterType) ->
|
||||
lager:info("Building cluster of ~p~n", [Size]),
|
||||
build_c2(Size, ClusterType, []).
|
||||
-spec build_c2(non_neg_integer(), all_up|one_down, list()) -> {[node()], term()}.
|
||||
build_c2(Size, ClusterType, Config) ->
|
||||
rt:set_backend(eleveldb),
|
||||
[_Node|Rest] = Cluster = rt:build_cluster(Size, Config),
|
||||
maybe_stop_a_node(ClusterType, Rest),
|
||||
Cluster.
|
||||
|
||||
|
||||
%% This is also the name of the table
|
||||
get_default_bucket() ->
|
||||
@ -379,7 +178,7 @@ exclusive_result_from_data(Data, Start, Finish) when is_integer(Start) andalso
|
||||
lists:sublist(Data, Start, Finish - Start + 1).
|
||||
|
||||
remove_last(Data) ->
|
||||
lists:reverse(tl(lists:reverse(Data))).
|
||||
lists:sublist(Data, length(Data)-1).
|
||||
|
||||
%% a valid DDL - the one used in the documents
|
||||
get_ddl() ->
|
||||
@ -396,7 +195,7 @@ get_ddl(aggregation) ->
|
||||
|
||||
|
||||
get_ddl(small, Table) ->
|
||||
"CREATE TABLE " ++ Table ++ " ("
|
||||
"CREATE TABLE " ++ table_to_list(Table) ++ " ("
|
||||
" myfamily varchar not null,"
|
||||
" myseries varchar not null,"
|
||||
" time timestamp not null,"
|
||||
@ -408,7 +207,7 @@ get_ddl(small, Table) ->
|
||||
%% another valid DDL - one with all the good stuff like
|
||||
%% different types and optional blah-blah
|
||||
get_ddl(big, Table) ->
|
||||
"CREATE TABLE " ++ Table ++ " ("
|
||||
"CREATE TABLE " ++ table_to_list(Table) ++ " ("
|
||||
" myfamily varchar not null,"
|
||||
" myseries varchar not null,"
|
||||
" time timestamp not null,"
|
||||
@ -426,7 +225,7 @@ get_ddl(big, Table) ->
|
||||
%% those respective modules
|
||||
|
||||
get_ddl(api, Table) ->
|
||||
"CREATE TABLE " ++ Table ++ " ("
|
||||
"CREATE TABLE " ++ table_to_list(Table) ++ " ("
|
||||
" myfamily varchar not null,"
|
||||
" myseries varchar not null,"
|
||||
" time timestamp not null,"
|
||||
@ -439,7 +238,7 @@ get_ddl(api, Table) ->
|
||||
|
||||
%% DDL for testing aggregation behavior
|
||||
get_ddl(aggregation, Table) ->
|
||||
"CREATE TABLE " ++ Table ++ " ("
|
||||
"CREATE TABLE " ++ table_to_list(Table) ++ " ("
|
||||
" myfamily varchar not null,"
|
||||
" myseries varchar not null,"
|
||||
" time timestamp not null,"
|
||||
@ -449,6 +248,11 @@ get_ddl(aggregation, Table) ->
|
||||
" PRIMARY KEY ((myfamily, myseries, quantum(time, 10, 'm')), "
|
||||
" myfamily, myseries, time))".
|
||||
|
||||
table_to_list(Table) when is_binary(Table) ->
|
||||
binary_to_list(Table);
|
||||
table_to_list(Table) ->
|
||||
Table.
|
||||
|
||||
get_data(api) ->
|
||||
[{<<"family1">>, <<"seriesX">>, 100, 1, <<"test1">>, 1.0, true}] ++
|
||||
[{<<"family1">>, <<"seriesX">>, 200, 2, <<"test2">>, 2.0, false}] ++
|
@ -29,9 +29,13 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Expected = {ok, "GeoCheckin has been created but cannot be activated yet\n"},
|
||||
Got = ts_util:create_and_activate_bucket_type(
|
||||
ts_util:build_cluster(one_down), DDL),
|
||||
|
||||
[_Node|Rest]= Cluster = ts_setup:start_cluster(3),
|
||||
ok = rt:stop(hd(tl(Rest))),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok,_} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
Got = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -34,14 +34,14 @@ confirm() ->
|
||||
OrigCCNN = lists:zip(OrigCluster, OrigClients),
|
||||
ok = rt:join_cluster(OrigCluster),
|
||||
|
||||
DDL = ts_util:get_ddl(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
?assertEqual({ok, {[], []}}, riakc_ts:query(hd(OrigClients), DDL)),
|
||||
|
||||
Table = ts_util:get_default_bucket(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
?assertEqual(ok, riakc_ts:put(hd(OrigClients), Table, Data)),
|
||||
|
||||
Qry = ts_util:get_valid_qry(0, 11),
|
||||
Qry = ts_data:get_valid_qry(0, 11),
|
||||
|
||||
ok = check_data(OrigCCNN, Qry, Data),
|
||||
|
||||
|
@ -35,14 +35,14 @@ confirm() ->
|
||||
OrigCCNN = lists:zip(OrigCluster, OrigClients),
|
||||
ok = rt:join_cluster(OrigCluster),
|
||||
|
||||
DDL = ts_util:get_ddl(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
?assertEqual({ok, {[], []}}, riakc_ts:query(hd(OrigClients), DDL)),
|
||||
|
||||
Table = ts_util:get_default_bucket(),
|
||||
Data = ts_util:get_valid_select_data_spanning_quanta(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
Data = ts_data:get_valid_select_data_spanning_quanta(),
|
||||
?assertEqual(ok, riakc_ts:put(hd(OrigClients), Table, Data)),
|
||||
|
||||
Qry = ts_util:get_valid_qry_spanning_quanta(),
|
||||
Qry = ts_data:get_valid_qry_spanning_quanta(),
|
||||
|
||||
ok = check_data(OrigCCNN, Qry, Data),
|
||||
|
||||
|
82
tests/ts_ops.erl
Normal file
82
tests/ts_ops.erl
Normal file
@ -0,0 +1,82 @@
|
||||
%% -*- Mode: Erlang -*-
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015, 2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
%% except in compliance with the License. You may obtain
|
||||
%% a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing,
|
||||
%% software distributed under the License is distributed on an
|
||||
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
%% KIND, either express or implied. See the License for the
|
||||
%% specific language governing permissions and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
%% @doc A util module for riak_ts tests
|
||||
|
||||
-module(ts_ops).
|
||||
|
||||
-export([put/3,
|
||||
get/3, get/4,
|
||||
'query'/2, 'query'/3,
|
||||
insert/4, insert_no_columns/3
|
||||
]).
|
||||
|
||||
-spec put(list(node()), string(), list(tuple())) -> 'ok'|{'error', term()}.
|
||||
put(Nodes, Table, Records) ->
|
||||
riakc_ts:put(ts_setup:conn(Nodes), Table, Records).
|
||||
|
||||
get(Nodes, Table, Key) ->
|
||||
get(Nodes, Table, Key, []).
|
||||
|
||||
get(Nodes, Table, Key, Options) ->
|
||||
riakc_ts:get(ts_setup:conn(Nodes), Table, Key, Options).
|
||||
|
||||
'query'(Nodes, Qry) ->
|
||||
'query'(Nodes, Qry, []).
|
||||
|
||||
'query'(Nodes, Qry, Options) ->
|
||||
lager:info("Running query ~ts", [Qry]),
|
||||
Got = riakc_ts:query(ts_setup:conn(Nodes), Qry, Options),
|
||||
lager:info("Result is ~p", [Got]),
|
||||
Got.
|
||||
|
||||
insert_term_format(Data, Acc) when is_binary(Data) ->
|
||||
Acc ++ flat_format("'~s',", [Data]);
|
||||
insert_term_format(Data, Acc) ->
|
||||
Acc ++ flat_format("~p,", [Data]).
|
||||
|
||||
insert(Nodes, Table, Columns, Data) ->
|
||||
Conn = ts_setup:conn(Nodes),
|
||||
ColFn = fun(Col, Acc) ->
|
||||
Acc ++ flat_format("~s,", [Col])
|
||||
end,
|
||||
TermFn = fun insert_term_format/2,
|
||||
ColClause = string:strip(lists:foldl(ColFn, [], Columns), right, $,),
|
||||
ValClause = string:strip(lists:foldl(TermFn, [], tuple_to_list(Data)), right, $,),
|
||||
SQL = flat_format("INSERT INTO ~s (~s) VALUES (~ts)",
|
||||
[Table, ColClause, ValClause]),
|
||||
lager:info("~ts", [SQL]),
|
||||
Got = riakc_ts:query(Conn, SQL),
|
||||
lager:info("Result is ~p", [Got]),
|
||||
Got.
|
||||
|
||||
insert_no_columns(Nodes, Table, Data) ->
|
||||
Conn = ts_setup:conn(Nodes),
|
||||
TermFn = fun insert_term_format/2,
|
||||
ValClause = string:strip(lists:foldl(TermFn, [], tuple_to_list(Data)), right, $,),
|
||||
SQL = flat_format("INSERT INTO ~s VALUES (~ts)",
|
||||
[Table, ValClause]),
|
||||
lager:info("~ts", [SQL]),
|
||||
Got = riakc_ts:query(Conn, SQL),
|
||||
lager:info("Result is ~p", [Got]),
|
||||
Got.
|
||||
|
||||
flat_format(Format, Args) ->
|
||||
lists:flatten(io_lib:format(Format, Args)).
|
99
tests/ts_setup.erl
Normal file
99
tests/ts_setup.erl
Normal file
@ -0,0 +1,99 @@
|
||||
%% -*- Mode: Erlang -*-
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015, 2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
%% except in compliance with the License. You may obtain
|
||||
%% a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing,
|
||||
%% software distributed under the License is distributed on an
|
||||
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
%% KIND, either express or implied. See the License for the
|
||||
%% specific language governing permissions and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
%% @doc A util module for riak_ts tests
|
||||
|
||||
-module(ts_setup).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-export([start_cluster/0, start_cluster/1, start_cluster/2,
|
||||
conn/1, conn/2, stop_a_node/1, stop_a_node/2,
|
||||
create_bucket_type/3, create_bucket_type/4,
|
||||
activate_bucket_type/2, activate_bucket_type/3]).
|
||||
|
||||
-spec start_cluster() -> list(node()).
|
||||
start_cluster() ->
|
||||
start_cluster(1).
|
||||
|
||||
-spec start_cluster(NumNodes :: pos_integer()) -> list(node()).
|
||||
start_cluster(NumNodes) ->
|
||||
start_cluster(NumNodes, []).
|
||||
|
||||
-spec start_cluster(NumNodes :: pos_integer(),
|
||||
Config :: list(tuple())) -> list(node()).
|
||||
start_cluster(NumNodes, Config) ->
|
||||
rt:set_backend(eleveldb),
|
||||
lager:info("Building cluster of ~p~n", [NumNodes]),
|
||||
rt:build_cluster(NumNodes, Config).
|
||||
|
||||
-spec conn(list(node())) -> pid().
|
||||
conn(Nodes) ->
|
||||
conn(1, Nodes).
|
||||
|
||||
-spec conn(pos_integer(), list(node())) -> pid().
|
||||
conn(Which, Nodes) ->
|
||||
Conn = rt:pbc(lists:nth(Which, Nodes)),
|
||||
?assert(is_pid(Conn)),
|
||||
Conn.
|
||||
|
||||
-spec stop_a_node(list(node())) -> ok.
|
||||
stop_a_node(Nodes) ->
|
||||
stop_a_node(2, Nodes).
|
||||
|
||||
-spec stop_a_node(pos_integer(), list(node())) -> ok.
|
||||
stop_a_node(Which, Nodes) ->
|
||||
ok = rt:stop(lists:nth(Which, Nodes)).
|
||||
|
||||
-spec create_bucket_type(list(node()), string(), string()) -> {ok, term()} | term().
|
||||
create_bucket_type(Nodes, DDL, Table) ->
|
||||
create_bucket_type(Nodes, DDL, Table, 3).
|
||||
|
||||
-spec create_bucket_type(list(node()), string(), string(), pos_integer()) -> {ok, term()} | term().
|
||||
create_bucket_type([Node|_Rest], DDL, Table, NVal) when is_integer(NVal) ->
|
||||
Props = io_lib:format("{\\\"props\\\": {\\\"n_val\\\": ~s, \\\"table_def\\\": \\\"~s\\\"}}", [integer_to_list(NVal), DDL]),
|
||||
rt:admin(Node, ["bucket-type", "create", table_to_list(Table), lists:flatten(Props)]).
|
||||
|
||||
|
||||
%% Attempt to activate the bucket type 4 times
|
||||
-spec activate_bucket_type([node()], string()) -> {ok, string()} | term().
|
||||
activate_bucket_type(Cluster, Table) ->
|
||||
activate_bucket_type(Cluster, Table, 3).
|
||||
|
||||
-spec activate_bucket_type([node()], string(), pos_integer()) -> ok | term().
|
||||
activate_bucket_type(Cluster, Table, Retries) ->
|
||||
[Node|_Rest] = Cluster,
|
||||
{ok, Msg} = Result = rt:admin(Node, ["bucket-type", "activate", table_to_list(Table)]),
|
||||
%% Look for a successful message
|
||||
case string:str(Msg, "has been activated") of
|
||||
0 ->
|
||||
lager:error("Could not activate bucket type. Retrying. Result = ~p", [Result]),
|
||||
case Retries of
|
||||
0 -> Result;
|
||||
_ -> timer:sleep(timer:seconds(1)),
|
||||
activate_bucket_type(Cluster, Table, Retries-1)
|
||||
end;
|
||||
_ -> ok
|
||||
end.
|
||||
|
||||
table_to_list(Table) when is_binary(Table) ->
|
||||
binary_to_list(Table);
|
||||
table_to_list(Table) ->
|
||||
Table.
|
@ -27,14 +27,11 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Expected =
|
||||
{ok,
|
||||
"GeoCheckin has been activated\n"
|
||||
"\n"
|
||||
"WARNING: Nodes in this cluster can no longer be\n"
|
||||
"downgraded to a version of Riak prior to 2.0\n"},
|
||||
Got = ts_util:create_and_activate_bucket_type(
|
||||
ts_util:build_cluster(single), DDL),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Expected = ok,
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok,_} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
Got = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -27,7 +27,7 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
Cluster = ts_util:build_cluster(single),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
% individual assert matches to show line numbers in failures
|
||||
?assertMatch(
|
||||
{error_creating_bucket_type, _},
|
||||
@ -111,7 +111,7 @@ confirm() ->
|
||||
|
||||
%%
|
||||
create_and_activate_bucket_type(Cluster, {TableName, DDL}) ->
|
||||
{ok, Out} = ts_util:create_bucket_type(Cluster, DDL, TableName, 3),
|
||||
{ok, Out} = ts_setup:create_bucket_type(Cluster, DDL, TableName, 3),
|
||||
case iolist_to_binary(Out) of
|
||||
<<"Error", _/binary>> ->
|
||||
{error_creating_bucket_type, Out};
|
||||
@ -119,7 +119,7 @@ create_and_activate_bucket_type(Cluster, {TableName, DDL}) ->
|
||||
{error_creating_bucket_type, Out};
|
||||
_ ->
|
||||
Retries = 0,
|
||||
ts_util:activate_bucket_type(Cluster, TableName, Retries)
|
||||
ts_setup:activate_bucket_type(Cluster, TableName, Retries)
|
||||
end.
|
||||
|
||||
%%
|
||||
|
@ -46,40 +46,60 @@ test_name(ClusterType, Name) ->
|
||||
lists:flatten(io_lib:format("~p:~p", [atom_to_list(ClusterType), Name])).
|
||||
|
||||
verify_aggregation(ClusterType) ->
|
||||
DDL = ts_util:get_ddl(aggregation),
|
||||
|
||||
Size = case ClusterType of
|
||||
single ->
|
||||
1;
|
||||
multiple ->
|
||||
3;
|
||||
one_down ->
|
||||
3;
|
||||
delayed_one_down ->
|
||||
3;
|
||||
_ ->
|
||||
1
|
||||
end,
|
||||
|
||||
DDL = ts_data:get_ddl(aggregation),
|
||||
lager:info("DDL is ~p", [DDL]),
|
||||
|
||||
{Cluster, Conn} = ts_util:cluster_and_connect(ClusterType),
|
||||
|
||||
Cluster = ts_setup:start_cluster(Size),
|
||||
Conn = ts_setup:conn(Cluster),
|
||||
Count = 10,
|
||||
Data = ts_util:get_valid_aggregation_data(Count),
|
||||
Data = ts_data:get_valid_aggregation_data(Count),
|
||||
lager:info("Data is ~p", [Data]),
|
||||
Column4 = [element(?TEMPERATURE_COL_INDEX, X) || X <- Data],
|
||||
Column5 = [element(?PRESSURE_COL_INDEX, X) || X <- Data],
|
||||
Column6 = [element(?PRECIPITATION_COL_INDEX, X) || X <- Data],
|
||||
TestType = normal,
|
||||
Bucket = "WeatherData",
|
||||
|
||||
Where = " WHERE myfamily = 'family1' and myseries = 'seriesX' and time >= 1 and time <= 10",
|
||||
|
||||
Qry = "SELECT COUNT(myseries) FROM " ++ Bucket ++ Where,
|
||||
|
||||
ts_util:create_table(TestType, Cluster, DDL, Bucket),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Bucket),
|
||||
ts_setup:activate_bucket_type(Cluster, Bucket),
|
||||
|
||||
%% Degraded clusters need to have DDL applied BEFORE taking down a node
|
||||
ts_util:maybe_stop_a_node(ClusterType, Cluster),
|
||||
case ClusterType of
|
||||
delayed_one_down ->
|
||||
rt:stop(hd(tl(Cluster)));
|
||||
one_down ->
|
||||
rt:stop(hd(tl(Cluster)));
|
||||
_ -> ok
|
||||
end,
|
||||
ok = riakc_ts:put(Conn, Bucket, Data),
|
||||
Got = ts_util:single_query(Conn, Qry),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
Expected = {ok, {[<<"COUNT(myseries)">>], [{Count}]}},
|
||||
Result = ts_util:assert(test_name(ClusterType, "Count Strings"), Expected, Got),
|
||||
Result = ts_data:assert(test_name(ClusterType, "Count Strings"), Expected, Got),
|
||||
|
||||
Qry2 = "SELECT COUNT(time) FROM " ++ Bucket ++ Where,
|
||||
Got2 = ts_util:single_query(Conn, Qry2),
|
||||
Got2 = ts_ops:query(Cluster, Qry2),
|
||||
Expected2 = {ok, {[<<"COUNT(time)">>], [{Count}]}},
|
||||
Result2 = ts_util:assert(test_name(ClusterType, "Count Timestamps"), Expected2, Got2),
|
||||
Result2 = ts_data:assert(test_name(ClusterType, "Count Timestamps"), Expected2, Got2),
|
||||
|
||||
Qry3 = "SELECT COUNT(pressure), count(temperature), cOuNt(precipitation) FROM " ++ Bucket ++ Where,
|
||||
Got3 = ts_util:single_query(Conn, Qry3),
|
||||
Got3 = ts_ops:query(Cluster, Qry3),
|
||||
Expected3 = {ok, {
|
||||
[<<"COUNT(pressure)">>,
|
||||
<<"COUNT(temperature)">>,
|
||||
@ -88,38 +108,38 @@ verify_aggregation(ClusterType) ->
|
||||
[{count_non_nulls(Column5),
|
||||
count_non_nulls(Column4),
|
||||
count_non_nulls(Column6)}]}},
|
||||
Result3 = ts_util:assert(test_name(ClusterType, "Count Multiple Floats"), Expected3, Got3),
|
||||
Result3 = ts_data:assert(test_name(ClusterType, "Count Multiple Floats"), Expected3, Got3),
|
||||
|
||||
Qry4 = "SELECT SUM(temperature) FROM " ++ Bucket ++ Where,
|
||||
Got4 = ts_util:single_query(Conn, Qry4),
|
||||
Got4 = ts_ops:query(Cluster, Qry4),
|
||||
Sum4 = lists:sum([X || X <- Column4, is_number(X)]),
|
||||
Expected4 = {ok, {[<<"SUM(temperature)">>],
|
||||
[{Sum4}]}},
|
||||
Result4 = ts_util:assert(test_name(ClusterType, "Single Float Sum"), Expected4, Got4),
|
||||
Result4 = ts_data:assert(test_name(ClusterType, "Single Float Sum"), Expected4, Got4),
|
||||
|
||||
Qry5 = "SELECT SUM(temperature), sum(pressure), sUM(precipitation) FROM " ++ Bucket ++ Where,
|
||||
Got5 = ts_util:single_query(Conn, Qry5),
|
||||
Got5 = ts_ops:query(Cluster, Qry5),
|
||||
Sum5 = lists:sum([X || X <- Column5, is_number(X)]),
|
||||
Sum6 = lists:sum([X || X <- Column6, is_number(X)]),
|
||||
Expected5 = {ok, {[<<"SUM(temperature)">>, <<"SUM(pressure)">>, <<"SUM(precipitation)">>],
|
||||
[{Sum4, Sum5, Sum6}]}},
|
||||
Result5 = ts_util:assert(test_name(ClusterType, "Multiple Float Sums"), Expected5, Got5),
|
||||
Result5 = ts_data:assert(test_name(ClusterType, "Multiple Float Sums"), Expected5, Got5),
|
||||
|
||||
Qry6 = "SELECT MIN(temperature), MIN(pressure) FROM " ++ Bucket ++ Where,
|
||||
Got6 = ts_util:single_query(Conn, Qry6),
|
||||
Got6 = ts_ops:query(Cluster, Qry6),
|
||||
Min4 = lists:min([X || X <- Column4, is_number(X)]),
|
||||
Min5 = lists:min([X || X <- Column5, is_number(X)]),
|
||||
Expected6 = {ok, {[<<"MIN(temperature)">>, <<"MIN(pressure)">>],
|
||||
[{Min4, Min5}]}},
|
||||
Result6 = ts_util:assert(test_name(ClusterType, "Min Floats"), Expected6, Got6),
|
||||
Result6 = ts_data:assert(test_name(ClusterType, "Min Floats"), Expected6, Got6),
|
||||
|
||||
Qry7 = "SELECT MAX(temperature), MAX(pressure) FROM " ++ Bucket ++ Where,
|
||||
Got7 = ts_util:single_query(Conn, Qry7),
|
||||
Got7 = ts_ops:query(Cluster, Qry7),
|
||||
Max4 = lists:max([X || X <- Column4, is_number(X)]),
|
||||
Max5 = lists:max([X || X <- Column5, is_number(X)]),
|
||||
Expected7 = {ok, {[<<"MAX(temperature)">>, <<"MAX(pressure)">>],
|
||||
[{Max4, Max5}]}},
|
||||
Result7 = ts_util:assert(test_name(ClusterType, "Max Floats"), Expected7, Got7),
|
||||
Result7 = ts_data:assert(test_name(ClusterType, "Max Floats"), Expected7, Got7),
|
||||
|
||||
C4 = [X || X <- Column4, is_number(X)],
|
||||
C5 = [X || X <- Column5, is_number(X)],
|
||||
@ -129,10 +149,10 @@ verify_aggregation(ClusterType) ->
|
||||
Avg4 = Sum4 / Count4,
|
||||
Avg5 = Sum5 / Count5,
|
||||
Qry8 = "SELECT AVG(temperature), MEAN(pressure) FROM " ++ Bucket ++ Where,
|
||||
Got8 = ts_util:single_query(Conn, Qry8),
|
||||
Got8 = ts_ops:query(Cluster, Qry8),
|
||||
Expected8 = {ok, {[<<"AVG(temperature)">>, <<"MEAN(pressure)">>],
|
||||
[{Avg4, Avg5}]}},
|
||||
Result8 = ts_util:assert(test_name(ClusterType, "Avg and Mean"), Expected8, Got8),
|
||||
Result8 = ts_data:assert(test_name(ClusterType, "Avg and Mean"), Expected8, Got8),
|
||||
|
||||
StdDevFun4 = stddev_fun_builder(Avg4),
|
||||
StdDevFun5 = stddev_fun_builder(Avg5),
|
||||
@ -143,20 +163,20 @@ verify_aggregation(ClusterType) ->
|
||||
Qry9 = "SELECT STDDEV_POP(temperature), STDDEV_POP(pressure)," ++
|
||||
" STDDEV(temperature), STDDEV(pressure), " ++
|
||||
" STDDEV_SAMP(temperature), STDDEV_SAMP(pressure) FROM " ++ Bucket ++ Where,
|
||||
Got9 = ts_util:single_query(Conn, Qry9),
|
||||
Got9 = ts_ops:query(Cluster, Qry9),
|
||||
Expected9 = {ok, {[<<"STDDEV_POP(temperature)">>, <<"STDDEV_POP(pressure)">>,
|
||||
<<"STDDEV(temperature)">>, <<"STDDEV(pressure)">>,
|
||||
<<"STDDEV_SAMP(temperature)">>, <<"STDDEV_SAMP(pressure)">>],
|
||||
[{StdDev4, StdDev5, Sample4, Sample5, Sample4, Sample5}]}},
|
||||
Result9 = ts_util:assert_float(test_name(ClusterType, "Standard Deviation"), Expected9, Got9),
|
||||
Result9 = ts_data:assert_float(test_name(ClusterType, "Standard Deviation"), Expected9, Got9),
|
||||
|
||||
Qry10 = "SELECT SUM(temperature), MIN(pressure), AVG(pressure) FROM " ++ Bucket ++ Where,
|
||||
Got10 = ts_util:single_query(Conn, Qry10),
|
||||
Got10 = ts_ops:query(Cluster, Qry10),
|
||||
Expected10 = {ok, {[<<"SUM(temperature)">>, <<"MIN(pressure)">>, <<"AVG(pressure)">>],
|
||||
[{Sum4, Min5, Avg5}]}},
|
||||
Result10 = ts_util:assert(test_name(ClusterType, "Mixter Maxter"), Expected10, Got10),
|
||||
Result10 = ts_data:assert(test_name(ClusterType, "Mixter Maxter"), Expected10, Got10),
|
||||
|
||||
ts_util:results([
|
||||
ts_data:results([
|
||||
Result,
|
||||
Result2,
|
||||
Result3,
|
||||
|
@ -27,54 +27,56 @@
|
||||
% Ensure aggregation functions only work on desired data types
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(big),
|
||||
DDL = ts_data:get_ddl(big),
|
||||
Count = 10,
|
||||
Data = ts_util:get_valid_big_data(Count),
|
||||
TestType = normal,
|
||||
Bucket = "GeoCheckin",
|
||||
|
||||
Data = ts_data:get_valid_big_data(Count),
|
||||
Bucket = ts_data:get_default_bucket(),
|
||||
Qry = "SELECT SUM(mybool) FROM " ++ Bucket,
|
||||
ClusterConn = {_Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
Got1 = ts_util:ts_query(ClusterConn, TestType, DDL, Data, Qry, Bucket),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok,_} = ts_setup:create_bucket_type(Cluster, DDL, Bucket),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Bucket),
|
||||
ok = ts_ops:put(Cluster, Bucket, Data),
|
||||
|
||||
Got1 = ts_ops:query(Cluster, Qry),
|
||||
Expected1 = {error, {1001, <<".*Function 'SUM' called with arguments of the wrong type [[]boolean[]].*">>}},
|
||||
Result1 = ts_util:assert_error_regex("SUM - boolean", Expected1, Got1),
|
||||
Result1 = ts_data:assert_error_regex("SUM - boolean", Expected1, Got1),
|
||||
|
||||
Qry2 = "SELECT AVG(myfamily) FROM " ++ Bucket,
|
||||
Got2 = ts_util:single_query(Conn, Qry2),
|
||||
Got2 = ts_ops:query(Cluster, Qry2),
|
||||
Expected2 = {error, {1001, <<".*Function 'AVG' called with arguments of the wrong type [[]varchar[]].*">>}},
|
||||
Result2 = ts_util:assert_error_regex("AVG - varchar", Expected2, Got2),
|
||||
Result2 = ts_data:assert_error_regex("AVG - varchar", Expected2, Got2),
|
||||
|
||||
Qry3 = "SELECT MIN(myseries) FROM " ++ Bucket,
|
||||
Got3 = ts_util:single_query(Conn, Qry3),
|
||||
Got3 = ts_ops:query(Cluster, Qry3),
|
||||
Expected3 = {error, {1001, <<".*Function 'MIN' called with arguments of the wrong type [[]varchar[]].*">>}},
|
||||
Result3 = ts_util:assert_error_regex("MIN - varchar", Expected3, Got3),
|
||||
Result3 = ts_data:assert_error_regex("MIN - varchar", Expected3, Got3),
|
||||
|
||||
Qry4 = "SELECT MAX(myseries) FROM " ++ Bucket,
|
||||
Got4 = ts_util:single_query(Conn, Qry4),
|
||||
Got4 = ts_ops:query(Cluster, Qry4),
|
||||
Expected4 = {error, {1001, <<".*Function 'MAX' called with arguments of the wrong type [[]varchar[]].*">>}},
|
||||
Result4 = ts_util:assert_error_regex("MIN - varchar", Expected4, Got4),
|
||||
Result4 = ts_data:assert_error_regex("MIN - varchar", Expected4, Got4),
|
||||
|
||||
Qry5 = "SELECT STDDEV(mybool) FROM " ++ Bucket,
|
||||
Got5 = ts_util:single_query(Conn, Qry5),
|
||||
Got5 = ts_ops:query(Cluster, Qry5),
|
||||
Expected5 = {error, {1001, <<".*Function 'STDDEV_SAMP' called with arguments of the wrong type [[]boolean[]].*">>}},
|
||||
Result5 = ts_util:assert_error_regex("STDDEV - boolean", Expected5, Got5),
|
||||
Result5 = ts_data:assert_error_regex("STDDEV - boolean", Expected5, Got5),
|
||||
|
||||
Qry6 = "SELECT STDDEV_SAMP(mybool) FROM " ++ Bucket,
|
||||
Got6 = ts_util:single_query(Conn, Qry6),
|
||||
Got6 = ts_ops:query(Cluster, Qry6),
|
||||
Expected6 = {error, {1001, <<".*Function 'STDDEV_SAMP' called with arguments of the wrong type [[]boolean[]].*">>}},
|
||||
Result6 = ts_util:assert_error_regex("STDDEV_SAMP - boolean", Expected6, Got6),
|
||||
Result6 = ts_data:assert_error_regex("STDDEV_SAMP - boolean", Expected6, Got6),
|
||||
|
||||
Qry7 = "SELECT STDDEV_POP(time) FROM " ++ Bucket,
|
||||
Got7 = ts_util:single_query(Conn, Qry7),
|
||||
Got7 = ts_ops:query(Cluster, Qry7),
|
||||
Expected7 = {error, {1001, <<".*Function 'STDDEV_POP' called with arguments of the wrong type [[]timestamp[]].*">>}},
|
||||
Result7 = ts_util:assert_error_regex("STDDEV_POP - timestamp", Expected7, Got7),
|
||||
Result7 = ts_data:assert_error_regex("STDDEV_POP - timestamp", Expected7, Got7),
|
||||
|
||||
Qry8 = "SELECT Mean(mybool) FROM " ++ Bucket,
|
||||
Got8 = ts_util:single_query(Conn, Qry8),
|
||||
Got8 = ts_ops:query(Cluster, Qry8),
|
||||
Expected8 = {error, {1001, <<".*Function 'AVG' called with arguments of the wrong type [[]boolean[]].*">>}},
|
||||
Result8 = ts_util:assert_error_regex("MEAN - boolean", Expected8, Got8),
|
||||
Result8 = ts_data:assert_error_regex("MEAN - boolean", Expected8, Got8),
|
||||
|
||||
ts_util:results([
|
||||
ts_data:results([
|
||||
Result1,
|
||||
Result2,
|
||||
Result3,
|
||||
|
@ -27,69 +27,75 @@
|
||||
% Ensure aggregation functions only work on desired data types
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(aggregation),
|
||||
DDL = ts_data:get_ddl(aggregation),
|
||||
Count = 10,
|
||||
Data = ts_util:get_valid_aggregation_data_not_null(Count),
|
||||
Data = ts_data:get_valid_aggregation_data_not_null(Count),
|
||||
Column4 = [element(4, X) || X <- Data],
|
||||
Column5 = [element(5, X) || X <- Data],
|
||||
Column6 = [element(6, X) || X <- Data],
|
||||
TestType = normal,
|
||||
Bucket = "WeatherData",
|
||||
|
||||
Where = " WHERE myfamily = 'family1' and myseries = 'seriesX' and time >= 1 and time <= 10",
|
||||
|
||||
Qry = "SELECT AVG(temperature) * (9/5) + 32 FROM " ++ Bucket ++ Where ++ " and temperature > 10",
|
||||
ClusterConn = {_Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
|
||||
FilteredTemp = lists:filter(fun(X) -> case X>10 andalso is_number(X) of true -> true; _ -> false end end, Column4),
|
||||
_FilteredSum4 = lists:sum(FilteredTemp),
|
||||
{_, {_, _Got}} = ts_util:ts_query(ClusterConn, TestType, DDL, Data, Qry, Bucket),
|
||||
|
||||
{ok,_} = ts_setup:create_bucket_type(Cluster, DDL, Bucket),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Bucket),
|
||||
ok = ts_ops:put(Cluster, Bucket, Data),
|
||||
{_, {_, _Got}} = ts_ops:query(Cluster, Qry),
|
||||
|
||||
%% ?assertEqual((FilteredSum4/length(FilteredTemp)) * (9/5) + 32, Got),
|
||||
|
||||
Qry2 = "SELECT SUM(pressure/precipitation) FROM " ++ Bucket ++ Where,
|
||||
{ok, {_, Got2}} = ts_util:single_query(Conn, Qry2),
|
||||
{ok, {_, Got2}} = ts_ops:query(Cluster, Qry2),
|
||||
SumDiv = lists:sum(
|
||||
[Press/Precip || {Press, Precip} <- lists:zip(Column5, Column6), Press /= [], Precip /= []]),
|
||||
?assertEqual([{SumDiv}], Got2),
|
||||
|
||||
Qry3 = "SELECT 3+5, 2.0+8, 9/2, 9.0/2 FROM " ++ Bucket ++ Where,
|
||||
{ok, {_, Got3}} = ts_util:single_query(Conn, Qry3),
|
||||
{ok, {_, Got3}} = ts_ops:query(Cluster, Qry3),
|
||||
Arithmetic = [{8, 10.0, 4, 4.5} || _ <- lists:seq(1, Count)],
|
||||
?assertEqual(Arithmetic, Got3),
|
||||
|
||||
Qry4 = "SELECT SUM(temperature+10), AVG(pressure)/10 FROM " ++ Bucket ++ Where,
|
||||
{ok, {_, Got4}} = ts_util:single_query(Conn, Qry4),
|
||||
{ok, {_, Got4}} = ts_ops:query(Cluster, Qry4),
|
||||
SumPlus = lists:sum([X+10 || X<-Column4]),
|
||||
AvgDiv = lists:sum(Column5)/Count/10,
|
||||
?assertEqual([{SumPlus, AvgDiv}], Got4),
|
||||
|
||||
div_by_zero_test(Conn, Bucket, Where),
|
||||
div_by_zero_test(Cluster, Bucket, Where),
|
||||
|
||||
div_aggregate_function_by_zero_test(Conn, Bucket, Where),
|
||||
div_aggregate_function_by_zero_test(Cluster, Bucket, Where),
|
||||
|
||||
negate_an_aggregation_test(Conn, Bucket, Where),
|
||||
negate_an_aggregation_test(Cluster, Bucket, Where),
|
||||
|
||||
pass.
|
||||
|
||||
%%
|
||||
div_by_zero_test(Conn, Bucket, Where) ->
|
||||
div_by_zero_test(Cluster, Bucket, Where) ->
|
||||
Query = "SELECT 5 / 0 FROM " ++ Bucket ++ Where,
|
||||
?assertEqual(
|
||||
{error,{1001,<<"Divide by zero">>}},
|
||||
ts_util:single_query(Conn, Query)
|
||||
ts_ops:query(Cluster, Query)
|
||||
).
|
||||
|
||||
%%
|
||||
div_aggregate_function_by_zero_test(Conn, Bucket, Where) ->
|
||||
div_aggregate_function_by_zero_test(Cluster, Bucket, Where) ->
|
||||
Query = "SELECT COUNT(*) / 0 FROM " ++ Bucket ++ Where,
|
||||
?assertEqual(
|
||||
{error,{1001,<<"Divide by zero">>}},
|
||||
ts_util:single_query(Conn, Query)
|
||||
ts_ops:query(Cluster, Query)
|
||||
).
|
||||
|
||||
%%
|
||||
negate_an_aggregation_test(Conn, Bucket, Where) ->
|
||||
negate_an_aggregation_test(Cluster, Bucket, Where) ->
|
||||
Query = "SELECT -COUNT(*), COUNT(*) FROM " ++ Bucket ++ Where,
|
||||
?assertEqual(
|
||||
{ok, {[<<"-COUNT(*)">>, <<"COUNT(*)">>],[{-10, 10}]}},
|
||||
ts_util:single_query(Conn, Query)
|
||||
ts_ops:query(Cluster, Query)
|
||||
).
|
||||
|
@ -31,17 +31,23 @@
|
||||
%---------------------------------------------------------------------
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(api),
|
||||
Data = ts_util:get_data(api),
|
||||
ClusterConn = {_Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
?assertEqual(ok, ts_util:ts_put(ClusterConn, normal, DDL, Data)),
|
||||
DDL = ts_data:get_ddl(api),
|
||||
Data = ts_data:get_data(api),
|
||||
|
||||
confirm_GtOps(Conn),
|
||||
confirm_GtEqOps(Conn),
|
||||
confirm_LtOps(Conn),
|
||||
confirm_LtEqOps(Conn),
|
||||
confirm_EqOps(Conn),
|
||||
confirm_NeqOps(Conn),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok,_} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
|
||||
?assertEqual(ok, ts_ops:put(Cluster, Table, Data)),
|
||||
|
||||
confirm_GtOps(Cluster),
|
||||
confirm_GtEqOps(Cluster),
|
||||
confirm_LtOps(Cluster),
|
||||
confirm_LtEqOps(Cluster),
|
||||
confirm_EqOps(Cluster),
|
||||
confirm_NeqOps(Cluster),
|
||||
pass.
|
||||
|
||||
%------------------------------------------------------------
|
||||
@ -111,7 +117,7 @@ confirm_NeqOps(C) ->
|
||||
%------------------------------------------------------------
|
||||
|
||||
confirm_pass(C, Qry, Expected) ->
|
||||
Got = ts_util:single_query(C, Qry),
|
||||
Got = ts_ops:query(C, Qry),
|
||||
{ok, {_Cols, Records}} = Got,
|
||||
N = length(Records),
|
||||
?assertEqual(Expected, Got),
|
||||
@ -122,7 +128,7 @@ confirm_pass(C, Qry, Expected) ->
|
||||
%------------------------------------------------------------
|
||||
|
||||
confirm_error(C, Qry, _Expected) ->
|
||||
Got = ts_util:single_query(C, Qry),
|
||||
Got = ts_ops:query(C, Qry),
|
||||
{Status, _Reason} = Got,
|
||||
?assertEqual(Status, error).
|
||||
|
||||
@ -144,7 +150,7 @@ buildList(Acc, Next) ->
|
||||
%------------------------------------------------------------
|
||||
|
||||
indexOf(Type, FieldNames) ->
|
||||
Fields = ts_util:get_map(Type),
|
||||
Fields = ts_data:get_map(Type),
|
||||
lists:foldl(fun(Name, Acc) ->
|
||||
{_Name, Index} = lists:keyfind(Name, 1, Fields),
|
||||
buildList(Acc, Index)
|
||||
@ -185,7 +191,7 @@ expected(Type, Data, Fields, CompVals, CompFn) ->
|
||||
[] ->
|
||||
{ok, {[],[]}};
|
||||
_ ->
|
||||
{ok, {ts_util:get_cols(Type), Records}}
|
||||
{ok, {ts_data:get_cols(Type), Records}}
|
||||
end.
|
||||
|
||||
%------------------------------------------------------------
|
||||
@ -232,7 +238,7 @@ confirm_Error(C, {NameAtom, TypeAtom, OpAtom, Val}) ->
|
||||
%------------------------------------------------------------
|
||||
|
||||
confirm_Template(C, {NameAtom, TypeAtom, OpAtom, Val}, Result) ->
|
||||
Data = ts_util:get_data(api),
|
||||
Data = ts_data:get_data(api),
|
||||
Qry = getQry({NameAtom, TypeAtom, OpAtom, Val}),
|
||||
Fields = [<<"time">>, <<"myfamily">>, <<"myseries">>] ++ [list_to_binary(atom_to_list(NameAtom))],
|
||||
case TypeAtom of
|
||||
|
@ -36,25 +36,27 @@
|
||||
-define(UPPER_QRY, 900050).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Qry = ts_util:get_valid_qry(?LOWER_QRY, ?UPPER_QRY),
|
||||
Data = ts_util:get_valid_select_data(fun() -> lists:seq(?LOWER_DATA,?UPPER_DATA) end),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Qry = ts_data:get_valid_qry(?LOWER_QRY, ?UPPER_QRY),
|
||||
Data = ts_data:get_valid_select_data(fun() -> lists:seq(?LOWER_DATA,?UPPER_DATA) end),
|
||||
Expected =
|
||||
{ts_util:get_cols(small),
|
||||
ts_util:exclusive_result_from_data(Data, ?LOWER_QRY-?LOWER_DATA+2, (?LOWER_QRY-?LOWER_DATA)+(?UPPER_QRY-?LOWER_QRY))},
|
||||
{[Node], Pid} = ts_util:cluster_and_connect(single),
|
||||
{ts_data:get_cols(small),
|
||||
ts_data:exclusive_result_from_data(Data, ?LOWER_QRY-?LOWER_DATA+2, (?LOWER_QRY-?LOWER_DATA)+(?UPPER_QRY-?LOWER_QRY))},
|
||||
|
||||
[Node] = ts_setup:start_cluster(1),
|
||||
Pid = ts_setup:conn([Node]),
|
||||
|
||||
|
||||
rt_intercept:add(Node, {riak_kv_eleveldb_backend,
|
||||
[{{batch_put, 4}, batch_put}]}),
|
||||
|
||||
%% Buried in the bowels of the code path behind ts_util:ts_put/4
|
||||
%% Buried in the bowels of the code path behind ts_ops:ts_put/4
|
||||
%% is a calculation that n_val is the same as the cluster size. I
|
||||
%% want a single node cluster for this test, but n_val of 4, so
|
||||
%% I'll duplicate the path here
|
||||
Bucket = ts_util:get_default_bucket(),
|
||||
{ok, _} = ts_util:create_bucket_type([Node], DDL, Bucket, 4),
|
||||
ts_util:activate_bucket_type([Node], Bucket),
|
||||
Bucket = ts_data:get_default_bucket(),
|
||||
{ok,_} = ts_setup:create_bucket_type([Node], DDL, Bucket, 4),
|
||||
ok = ts_setup:activate_bucket_type([Node], Bucket),
|
||||
|
||||
riakc_ts:put(Pid, Bucket, Data),
|
||||
|
||||
|
@ -31,22 +31,18 @@
|
||||
%%
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
ClusterConn = ts_util:cluster_and_connect(single),
|
||||
|
||||
Expected1 =
|
||||
{ok,
|
||||
"GeoCheckin has been activated\n"
|
||||
"\n"
|
||||
"WARNING: Nodes in this cluster can no longer be\n"
|
||||
"downgraded to a version of Riak prior to 2.0\n"},
|
||||
Got1 = ts_util:create_and_activate_bucket_type(ClusterConn, DDL),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Expected1 = ok,
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok,_} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
Got1 = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
?assertEqual(Expected1, Got1),
|
||||
|
||||
Expected2 =
|
||||
{ok,
|
||||
"Error creating bucket type GeoCheckin:\n"
|
||||
"already_active\n"},
|
||||
Got2 = ts_util:create_bucket_type(ClusterConn, DDL),
|
||||
Got2 = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertEqual(Expected2, Got2),
|
||||
pass.
|
||||
|
@ -36,6 +36,8 @@ confirm() ->
|
||||
" temperature double,"
|
||||
" PRIMARY KEY ((myfamily, myfamily, quantum(time, 15, 'm')),"
|
||||
" myfamily, myfamily, time))",
|
||||
{ok, Got} = ts_util:create_bucket_type(ts_util:build_cluster(single), DDL),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok, Got} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertNotEqual(0, string:str(Got, "Primary key has duplicate fields")),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -27,6 +27,7 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL =
|
||||
"CREATE TABLE GeoCheckin ("
|
||||
" myfamily varchar not null,"
|
||||
@ -34,6 +35,7 @@ confirm() ->
|
||||
" time timestamp not null,"
|
||||
" weather varchar not null,"
|
||||
" temperature double)",
|
||||
{ok, Got} = ts_util:create_bucket_type(ts_util:build_cluster(single), DDL),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok, Got} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertNotEqual(0, string:str(Got, "Missing primary key")),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -27,6 +27,7 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL =
|
||||
"CREATE TABLE GeoCheckin ("
|
||||
" myfamily varchar not null,"
|
||||
@ -36,6 +37,7 @@ confirm() ->
|
||||
" temperature double,"
|
||||
" PRIMARY KEY ((myfamily, myseries, quantum(time, 15, 'm')),"
|
||||
" myfamily, myseries, time))",
|
||||
{ok, Got} = ts_util:create_bucket_type(ts_util:build_cluster(single), DDL),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok, Got} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertNotEqual(0, string:str(Got, "Primary key has 'null' fields")),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -27,14 +27,16 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Expected =
|
||||
{ok,
|
||||
"GeoCheckin created\n"
|
||||
"\n"
|
||||
"WARNING: After activating GeoCheckin, nodes in this cluster\n"
|
||||
"can no longer be downgraded to a version of Riak prior to 2.0\n"},
|
||||
Got = ts_util:create_bucket_type(
|
||||
ts_util:build_cluster(single), DDL),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Got = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -36,6 +36,8 @@ confirm() ->
|
||||
" temperature double,"
|
||||
" PRIMARY KEY ((myfamily, myseries, quantum(time, 15, 'm')),"
|
||||
" time, myfamily, myseries, temperature))",
|
||||
{ok, Got} = ts_util:create_bucket_type(ts_util:build_cluster(single), DDL),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok, Got} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
?assertNotEqual(0, string:str(Got, "Local key does not match primary key")),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -29,12 +29,9 @@
|
||||
%% Test basic table description
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Bucket = ts_util:get_default_bucket(),
|
||||
Qry = "DESCRIBE " ++ Bucket,
|
||||
ClusterConn = {_Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
ts_util:create_and_activate_bucket_type(ClusterConn, DDL),
|
||||
Got = ts_util:single_query(Conn, Qry),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Qry = "DESCRIBE " ++ Table,
|
||||
Expected =
|
||||
{ok, {[<<"Column">>,<<"Type">>,<<"Is Null">>,<<"Primary Key">>, <<"Local Key">>, <<"Interval">>, <<"Unit">>],
|
||||
[{<<"myfamily">>, <<"varchar">>, false, 1, 1, [], []},
|
||||
@ -42,5 +39,11 @@ confirm() ->
|
||||
{<<"time">>, <<"timestamp">>, false, 3, 3, 15, <<"m">>},
|
||||
{<<"weather">>, <<"varchar">>, false, [], [], [], []},
|
||||
{<<"temperature">>,<<"double">>, true, [], [], [], []}]}},
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -28,16 +28,18 @@
|
||||
|
||||
%% Test handling of division by zero in a query select clause.
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(aggregation),
|
||||
Data = ts_util:get_valid_aggregation_data_not_null(10),
|
||||
TestType = normal,
|
||||
{Cluster, ClientConn} = ts_util:cluster_and_connect(single),
|
||||
ts_util:create_table(TestType, Cluster, DDL, table()),
|
||||
ok = riakc_ts:put(ClientConn, table(), Data),
|
||||
Table = table(),
|
||||
DDL = ts_data:get_ddl(aggregation),
|
||||
Data = ts_data:get_valid_aggregation_data_not_null(10),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
|
||||
TsQueryFn =
|
||||
fun(Query_x) ->
|
||||
ts_util:single_query(ClientConn, Query_x)
|
||||
ts_ops:query(Cluster, Query_x)
|
||||
end,
|
||||
arithmetic_int_div_int_zero_test(TsQueryFn),
|
||||
arithmetic_float_div_int_zero_test(TsQueryFn),
|
||||
|
@ -29,14 +29,17 @@
|
||||
%% Test basic table description
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(big, "MyTable"),
|
||||
ClusterConn = {_Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
ts_util:create_and_activate_bucket_type(ClusterConn, DDL, "MyTable"),
|
||||
Table = "MyTable",
|
||||
DDL = ts_data:get_ddl(big, Table),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
|
||||
Qry = "EXPLAIN SELECT myint, myfloat, myoptional FROM MyTable WHERE "
|
||||
"myfamily='wingo' AND myseries='dingo' AND time > 0 AND time < 2000000 "
|
||||
"AND ((mybool=true AND myvarchar='banana') OR (myoptional=7))",
|
||||
|
||||
Got = ts_util:single_query(Conn, Qry),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
Expected =
|
||||
{ok,{[<<"Subquery">>,
|
||||
<<"Coverage Plan">>,
|
||||
@ -45,21 +48,21 @@ confirm() ->
|
||||
<<"Range Scan End Key">>,
|
||||
<<"Is End Inclusive?">>,<<"Filter">>],
|
||||
[{1,
|
||||
<<"dev1@127.0.0.1/49">>,
|
||||
<<"dev1@127.0.0.1/49, dev1@127.0.0.1/50, dev1@127.0.0.1/51">>,
|
||||
<<"myfamily = 'wingo', myseries = 'dingo', time = 1">>,
|
||||
false,
|
||||
<<"myfamily = 'wingo', myseries = 'dingo', time = 900000">>,
|
||||
false,
|
||||
<<"((myoptional = 7) OR ((mybool = true) AND (myvarchar = 'banana')))">>},
|
||||
{2,
|
||||
<<"dev1@127.0.0.1/11">>,
|
||||
<<"dev1@127.0.0.1/11, dev1@127.0.0.1/12, dev1@127.0.0.1/13">>,
|
||||
<<"myfamily = 'wingo', myseries = 'dingo', time = 900000">>,
|
||||
false,
|
||||
<<"myfamily = 'wingo', myseries = 'dingo', time = 1800000">>,
|
||||
false,
|
||||
<<"((myoptional = 7) OR ((mybool = true) AND (myvarchar = 'banana')))">>},
|
||||
{3,
|
||||
<<"dev1@127.0.0.1/59">>,
|
||||
<<"dev1@127.0.0.1/59, dev1@127.0.0.1/60, dev1@127.0.0.1/61">>,
|
||||
<<"myfamily = 'wingo', myseries = 'dingo', time = 1800000">>,
|
||||
false,
|
||||
<<"myfamily = 'wingo', myseries = 'dingo', time = 2000000">>,
|
||||
@ -68,6 +71,6 @@ confirm() ->
|
||||
?assertEqual(Expected, Got),
|
||||
|
||||
%% Now try NOT using TTB
|
||||
Got2 = riakc_ts:query(Conn, Qry, [], undefined, [{use_ttb, false}]),
|
||||
Got2 = ts_ops:query(Cluster, Qry, [{use_ttb, false}]),
|
||||
?assertEqual(Expected, Got2),
|
||||
pass.
|
||||
|
@ -29,13 +29,18 @@
|
||||
%% Test gets which return no data, i.e., not found.
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
DataRow = hd(Data),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Key = lists:sublist(tuple_to_list(DataRow), 3),
|
||||
Expected = {ts_util:get_cols(),[DataRow]},
|
||||
{ok, Got} = ts_util:ts_get(
|
||||
ts_util:cluster_and_connect(single),
|
||||
normal, DDL, Data, Key, []),
|
||||
Expected = {ts_data:get_cols(),[DataRow]},
|
||||
{ok, Got} = ts_ops:get(Cluster, Table, Key),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
||||
|
@ -29,12 +29,12 @@
|
||||
%% Test gets which return no data, i.e., not found.
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Expected = {[],[]},
|
||||
{ok, Got} = ts_util:ts_get(
|
||||
ts_util:cluster_and_connect(single),
|
||||
normal, DDL, Data,
|
||||
[<<"nada">>, <<"nope">>, 10], []),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
{ok, Got} = ts_ops:get(Cluster, Table, [<<"nada">>, <<"nope">>, 10]),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -35,7 +35,7 @@ suite() ->
|
||||
|
||||
init_per_suite(Config) ->
|
||||
application:start(ibrowse),
|
||||
[Node|_] = Cluster = ts_util:build_cluster(single),
|
||||
[Node|_] = Cluster = ts_setup:start_cluster(1),
|
||||
rt:wait_for_service(Node, [riak_kv, riak_pipe, riak_repl]),
|
||||
[{cluster, Cluster} | Config].
|
||||
|
||||
|
@ -27,40 +27,38 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_util:get_default_bucket(),
|
||||
Columns = ts_util:get_cols(),
|
||||
{Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
Got = ts_util:create_and_activate_bucket_type(Cluster, DDL),
|
||||
ExpectedActivationMessage = Table ++ " has been activated\n",
|
||||
?assertEqual(ok, element(1, Got)),
|
||||
?assertEqual(ExpectedActivationMessage,
|
||||
lists:sublist(element(2, Got), 1, length(ExpectedActivationMessage))),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Columns = ts_data:get_cols(),
|
||||
|
||||
Data1 = ts_util:get_valid_select_data(),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
|
||||
Data1 = ts_data:get_valid_select_data(),
|
||||
Insert1Fn = fun(Datum, Acc) ->
|
||||
[ts_util:ts_insert(Conn, Table, Columns, Datum) | Acc]
|
||||
[ts_ops:insert(Cluster, Table, Columns, Datum) | Acc]
|
||||
end,
|
||||
Got1 = lists:reverse(lists:foldl(Insert1Fn, [], Data1)),
|
||||
Expected1 = lists:duplicate(10, {ok,{[],[]}}),
|
||||
Result1 = ts_util:assert("Insert With Columns", Expected1, Got1),
|
||||
Result1 = ts_data:assert("Insert With Columns", Expected1, Got1),
|
||||
|
||||
Qry2 = "select * from GeoCheckin Where time >= 1 and time <= 10 and myfamily = 'family1' and myseries ='seriesX'",
|
||||
Got2 = ts_util:single_query(Conn, Qry2),
|
||||
Expected2 = {ok, {Columns, ts_util:exclusive_result_from_data(Data1, 1, 10)}},
|
||||
Result2 = ts_util:assert("Insert With Columns (results)", Expected2, Got2),
|
||||
Got2 = ts_ops:query(Cluster, Qry2),
|
||||
Expected2 = {ok, {Columns, ts_data:exclusive_result_from_data(Data1, 1, 10)}},
|
||||
Result2 = ts_data:assert("Insert With Columns (results)", Expected2, Got2),
|
||||
|
||||
Data3 = ts_util:get_valid_select_data(fun() -> lists:seq(11, 20) end),
|
||||
Data3 = ts_data:get_valid_select_data(fun() -> lists:seq(11, 20) end),
|
||||
Insert3Fn = fun(Datum, Acc) ->
|
||||
[ts_util:ts_insert_no_columns(Conn, Table, Datum) | Acc]
|
||||
[ts_ops:insert_no_columns(Cluster, Table, Datum) | Acc]
|
||||
end,
|
||||
Got3 = lists:reverse(lists:foldl(Insert3Fn, [], Data3)),
|
||||
Result3 = ts_util:assert("Insert Without Columns", Expected1, Got3),
|
||||
Result3 = ts_data:assert("Insert Without Columns", Expected1, Got3),
|
||||
|
||||
Qry4 = "select * from GeoCheckin Where time >= 11 and time <= 20 and myfamily = 'family1' and myseries ='seriesX'",
|
||||
Got4 = ts_util:single_query(Conn, Qry4),
|
||||
Expected4 = {ok, {Columns, ts_util:exclusive_result_from_data(Data3, 1, 10)}},
|
||||
Result4 = ts_util:assert("Insert Without Columns (results)", Expected4, Got4),
|
||||
Got4 = ts_ops:query(Cluster, Qry4),
|
||||
Expected4 = {ok, {Columns, ts_data:exclusive_result_from_data(Data3, 1, 10)}},
|
||||
Result4 = ts_data:assert("Insert Without Columns (results)", Expected4, Got4),
|
||||
|
||||
%% inserting columns out of order and partial, excluding temperature
|
||||
Columns5 = [<<"myfamily">>, <<"time">>, <<"weather">>, <<"myseries">>],
|
||||
@ -72,13 +70,13 @@ confirm() ->
|
||||
} || I <- lists:seq(21, 30) ],
|
||||
|
||||
Insert5Fn = fun(Datum, Acc) ->
|
||||
[ts_util:ts_insert(Conn, Table, Columns5, Datum) | Acc]
|
||||
[ts_ops:insert(Cluster, Table, Columns5, Datum) | Acc]
|
||||
end,
|
||||
Got5 = lists:reverse(lists:foldl(Insert5Fn, [], Data5)),
|
||||
Expected5 = [ {ok,{[],[]}} || _I <- lists:seq(21, 30) ],
|
||||
Result5 = ts_util:assert("Insert with NULL (results)", Expected5, Got5),
|
||||
Result5 = ts_data:assert("Insert with NULL (results)", Expected5, Got5),
|
||||
|
||||
ts_util:results([
|
||||
ts_data:results([
|
||||
Result1,
|
||||
Result2,
|
||||
Result3,
|
||||
|
@ -27,19 +27,19 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_util:get_default_bucket(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
TooMuchData = [list_to_tuple([<<"rubbish">> | tuple_to_list(Row)]) || Row <- Data],
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
TooMuchData = [list_to_tuple(tuple_to_list(Row) ++ [<<"rubbish">>]) || Row <- Data],
|
||||
TooLittleData = [list_to_tuple(lists:reverse(tl(lists:reverse(tuple_to_list(Row))))) || Row <- Data],
|
||||
WrongColumns = TooMuchData ++ TooLittleData,
|
||||
Columns = ts_util:get_cols(),
|
||||
Columns = ts_data:get_cols(),
|
||||
|
||||
{_Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
?assertEqual({ok, {[], []}}, riakc_ts:query(Conn, DDL)),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
?assertEqual({ok, {[], []}}, ts_ops:query(Cluster, DDL)),
|
||||
|
||||
Fn = fun(Datum, Acc) ->
|
||||
[ts_util:ts_insert(Conn, Table, Columns, Datum) | Acc]
|
||||
[ts_ops:insert(Cluster, Table, Columns, Datum) | Acc]
|
||||
end,
|
||||
Got2 = lists:reverse(lists:foldl(Fn, [], WrongColumns)),
|
||||
?assertEqual(
|
||||
|
@ -39,24 +39,24 @@
|
||||
]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_util:get_default_bucket(),
|
||||
{Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
ts_util:create_and_activate_bucket_type(Cluster, DDL),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
|
||||
QryFmt = "select * from GeoCheckin Where time >= ~B and time <= ~B and myfamily = 'family1' and myseries ='seriesX'",
|
||||
|
||||
lists:foreach(
|
||||
fun({String, Epoch}) ->
|
||||
Qry = lists:flatten(
|
||||
io_lib:format(QryFmt, [Epoch-10, Epoch+10])),
|
||||
Qry = ts_data:flat_format(QryFmt, [Epoch-10, Epoch+10]),
|
||||
|
||||
{ok, {[], []}} = ts_util:single_query(Conn, Qry),
|
||||
{ok, {[], []}} = ts_ops:query(Cluster, Qry),
|
||||
|
||||
ts_util:ts_insert_no_columns(Conn, Table,
|
||||
ts_ops:insert_no_columns(Cluster, Table,
|
||||
{<<"family1">>, <<"seriesX">>,
|
||||
unicode:characters_to_binary(String), <<"cloudy">>, 5.5}),
|
||||
{ok, {_Cols, OneRow}} = ts_util:single_query(Conn, Qry),
|
||||
{ok, {_Cols, OneRow}} = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(1, length(OneRow))
|
||||
|
||||
end, ?TESTS),
|
||||
|
@ -34,9 +34,9 @@ confirm() ->
|
||||
"series VARCHAR NOT NULL, "
|
||||
"time TIMESTAMP NOT NULL, "
|
||||
"PRIMARY KEY ((family, series, quantum(time, 15, 's')), family, series, time))",
|
||||
[Node | _] = ts_util:build_cluster(single),
|
||||
{[Node|_] = Cluster,_} = ts_util:cluster_and_connect(single),
|
||||
{ok, Out} = ts_util:create_bucket_type(Cluster, TableDef, "mytãble"),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok, Out} = ts_setup:create_bucket_type(Cluster, TableDef, "mytãble"),
|
||||
case binary:match(list_to_binary(Out), <<"invalid json">>) of
|
||||
nomatch ->
|
||||
{error, "Expecting this to fail, check implications for riak_ql"};
|
||||
|
@ -27,10 +27,13 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Obj = [ts_util:get_valid_obj()],
|
||||
Got = ts_util:ts_put(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Obj),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Obj = [ts_data:get_valid_obj()],
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:put(Cluster, Table, Obj),
|
||||
?assertEqual(ok, Got),
|
||||
pass.
|
||||
|
@ -27,12 +27,15 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(big),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(big),
|
||||
N = 10,
|
||||
Data = ts_util:get_valid_big_data(N),
|
||||
Got = ts_util:ts_put(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Data),
|
||||
Data = ts_data:get_valid_big_data(N),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:put(Cluster, Table, Data),
|
||||
?assertEqual(ok, Got),
|
||||
pass.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -30,7 +30,7 @@
|
||||
-define(SPANNING_STEP, (1000)).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL =
|
||||
"CREATE TABLE GeoCheckin ("
|
||||
" myfamily varchar not null,"
|
||||
@ -47,8 +47,10 @@ confirm() ->
|
||||
Series = <<"seriesX">>,
|
||||
N = 10,
|
||||
Data = make_data(N, Family, Series, []),
|
||||
Got = ts_util:ts_put(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Data),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:put(Cluster, Table, Data),
|
||||
?assertEqual(ok, Got),
|
||||
pass.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -27,16 +27,19 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Obj =
|
||||
[{ts_util:get_varchar(),
|
||||
ts_util:get_varchar(),
|
||||
[{ts_data:get_varchar(),
|
||||
ts_data:get_varchar(),
|
||||
<<"abc">>,
|
||||
ts_util:get_varchar(),
|
||||
ts_util:get_float()}],
|
||||
ts_data:get_varchar(),
|
||||
ts_data:get_float()}],
|
||||
Expected = {error, {1003, <<"Invalid data found at row index(es) 1">>}},
|
||||
Got = ts_util:ts_put(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Obj),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:put(Cluster, Table, Obj),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -32,30 +32,36 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
ValidObj = ts_util:get_valid_obj(),
|
||||
InvalidObj = ts_util:get_invalid_obj(),
|
||||
ShortObj = ts_util:get_short_obj(),
|
||||
LongObj = ts_util:get_long_obj(),
|
||||
Bucket = ts_util:get_default_bucket(),
|
||||
{_Cluster, Conn} = ClusterConn = ts_util:cluster_and_connect(single),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Conn = ts_setup:conn(Cluster),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
|
||||
ValidObj = ts_data:get_valid_obj(),
|
||||
InvalidObj = ts_data:get_invalid_obj(),
|
||||
ShortObj = ts_data:get_short_obj(),
|
||||
LongObj = ts_data:get_long_obj(),
|
||||
|
||||
Expected1 = {error, {1003, <<"Invalid data found at row index(es) 1">>}},
|
||||
Expected2 = {error, {1003, <<"Invalid data found at row index(es) 2">>}},
|
||||
Got = ts_util:ts_put(ClusterConn, normal, DDL, [InvalidObj]),
|
||||
Got = riakc_ts:put(Conn, Table, [InvalidObj]),
|
||||
?assertEqual(Expected1, Got),
|
||||
|
||||
Got2 = riakc_ts:put(Conn, Bucket, [ShortObj]),
|
||||
Got2 = riakc_ts:put(Conn, Table, [ShortObj]),
|
||||
?assertEqual(Expected1, Got2),
|
||||
|
||||
Got3 = riakc_ts:put(Conn, Bucket, [LongObj]),
|
||||
Got3 = riakc_ts:put(Conn, Table, [LongObj]),
|
||||
?assertEqual(Expected1, Got3),
|
||||
|
||||
Got4 = riakc_ts:put(Conn, Bucket, [ValidObj, InvalidObj]),
|
||||
Got4 = riakc_ts:put(Conn, Table, [ValidObj, InvalidObj]),
|
||||
?assertEqual(Expected2, Got4),
|
||||
|
||||
Got5 = riakc_ts:put(Conn, Bucket, [ValidObj, ShortObj]),
|
||||
Got5 = riakc_ts:put(Conn, Table, [ValidObj, ShortObj]),
|
||||
?assertEqual(Expected2, Got5),
|
||||
|
||||
Got6 = riakc_ts:put(Conn, Bucket, [ValidObj, LongObj]),
|
||||
Got6 = riakc_ts:put(Conn, Table, [ValidObj, LongObj]),
|
||||
?assertEqual(Expected2, Got6),
|
||||
pass.
|
||||
|
@ -31,10 +31,12 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Obj = [ts_util:get_invalid_obj()],
|
||||
Got = ts_util:ts_put(
|
||||
ts_util:cluster_and_connect(single), no_ddl, DDL, Obj),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
Obj = [ts_data:get_invalid_obj()],
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
|
||||
Got = ts_ops:put(Cluster, Table, Obj),
|
||||
?assertMatch({error, _}, Got),
|
||||
pass.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2106 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -29,12 +29,12 @@
|
||||
-define(DETS_TABLE, riak_kv_compile_tab_v2).
|
||||
|
||||
confirm() ->
|
||||
{Cluster, _Conn} = ts_util:cluster_and_connect(single),
|
||||
Node = hd(Cluster),
|
||||
[Node | _Rest] = Cluster = ts_setup:start_cluster(1),
|
||||
lists:foreach(
|
||||
fun(Table) ->
|
||||
DDL = create_table_sql(Table),
|
||||
ts_util:create_and_activate_bucket_type(Cluster, DDL, Table)
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table)
|
||||
end, test_tables()),
|
||||
rt:stop(Node),
|
||||
simulate_old_dets_entries(),
|
||||
|
@ -27,14 +27,18 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Qry = ts_util:get_valid_qry(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry = ts_data:get_valid_qry(),
|
||||
Expected =
|
||||
{ok, {ts_util:get_cols(small),
|
||||
ts_util:exclusive_result_from_data(Data, 2, 9)}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Data, Qry),
|
||||
{ok, {ts_data:get_cols(small),
|
||||
ts_data:exclusive_result_from_data(Data, 2, 9)}},
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -30,17 +30,22 @@
|
||||
%%% FIXME failing because of RTS-388
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry =
|
||||
"SELECT * FROM GeoCheckin "
|
||||
"WHERE time > 1 and time < 10 "
|
||||
"AND myfamily = 'fa2mily1' "
|
||||
"AND myseries ='seriesX' "
|
||||
"AND weather = myseries",
|
||||
{error, {1001, Got}} = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), normal, DDL, Data, Qry),
|
||||
?assertNotEqual(0, string:str(
|
||||
binary_to_list(Got),
|
||||
"Comparing or otherwise operating on two fields is not supported")),
|
||||
pass.
|
||||
Expected =
|
||||
{error, {1001, "Comparing or otherwise operating on two fields is not supported"}},
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("No upper bound", Expected, Got).
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -27,7 +27,6 @@
|
||||
|
||||
%%
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
TableDef =
|
||||
"CREATE TABLE GeoCheckin ("
|
||||
" myfamily double not null,"
|
||||
@ -40,10 +39,16 @@ confirm() ->
|
||||
"WHERE time >= 1 AND time <= 10 "
|
||||
"AND myseries = 'series' "
|
||||
"AND myfamily = 13.777744543543500002342342342342342340000000017777445435435000023423423423423423400000000177774454354350000234234234234234234000000001",
|
||||
?assertEqual(
|
||||
{ok, {[<<"myfamily">>, <<"myseries">>, <<"time">>], input_data()}},
|
||||
ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), TestType, TableDef, input_data(), Query)),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
Data = input_data(),
|
||||
Expected = {ok, {[<<"myfamily">>, <<"myseries">>, <<"time">>], input_data()}},
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, TableDef, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Query),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
||||
%%
|
||||
|
@ -27,8 +27,9 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry =
|
||||
"SELECT * FROM GeoCheckin "
|
||||
"WHERE time > 1 AND time < 10 "
|
||||
@ -38,6 +39,10 @@ confirm() ->
|
||||
{error,
|
||||
{1001,
|
||||
<<".*incompatible_type: field myseries with type varchar cannot be compared to type float in where clause.">>}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), normal, DDL, Data, Qry),
|
||||
ts_util:assert_error_regex("Incompatible types", Expected, Got).
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("Incompatible types", Expected, Got).
|
||||
|
@ -29,8 +29,9 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry =
|
||||
"select * from GeoCheckin "
|
||||
"where time > 1 and time < 10 "
|
||||
@ -40,6 +41,10 @@ confirm() ->
|
||||
{error,
|
||||
{1001,
|
||||
<<".*incompatible_type: field myseries with type varchar cannot be compared to type integer in where clause.">>}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), normal, DDL, Data, Qry),
|
||||
ts_util:assert_error_regex("Incompatible type", Expected, Got).
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("Incompatible type", Expected, Got).
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2106 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -34,7 +34,6 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-define(SPANNING_STEP, (1000)).
|
||||
-define(TEST_TYPE, normal).
|
||||
|
||||
confirm() ->
|
||||
%% will fail if ts_simple_put_all_null_datatypes fails
|
||||
@ -54,26 +53,29 @@ confirm() ->
|
||||
Series = <<"seriesX">>,
|
||||
N = 11,
|
||||
Data = make_data(N, Family, Series, []),
|
||||
ClusterConn = ts_util:cluster_and_connect(single),
|
||||
Got = ts_util:ts_put(ClusterConn, ?TEST_TYPE, DDL, Data),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:put(Cluster, Table, Data),
|
||||
?assertEqual(ok, Got),
|
||||
Qry = "SELECT * FROM GeoCheckin WHERE"
|
||||
" myfamily='" ++ binary_to_list(Family) ++ "'"
|
||||
" AND myseries='" ++ binary_to_list(Series) ++ "'"
|
||||
" AND time >= 1000 AND time <= " ++ integer_to_list(N * 1000) ++
|
||||
" AND myvarchar IS NULL",
|
||||
{ok, {_Fields, Rows}} = ts_util:ts_query(ClusterConn, ?TEST_TYPE, DDL, Data, Qry),
|
||||
{ok, {_Fields, Rows}} = ts_ops:query(Cluster, Qry),
|
||||
?assertNotEqual(0, length(Rows)),
|
||||
NullableFields = [ "myvarchar", "myint", "myfloat", "mybool", "mytimestamp" ],
|
||||
lists:foreach(fun (Field) ->
|
||||
query_field(Field, ClusterConn, DDL, Data, Family, Series, N)
|
||||
query_field(Field, Cluster, Data, Family, Series, N)
|
||||
end, NullableFields),
|
||||
pass.
|
||||
|
||||
query_field(Field, ClusterConn, DDL, Data, Family, Series, N) ->
|
||||
RowsAll = query_all(ClusterConn, DDL, Data, Family, Series, N),
|
||||
RowsIsNull = query_is_null(ClusterConn, DDL, Data, Family, Series, N, Field),
|
||||
RowsIsNotNull = query_is_not_null(ClusterConn, DDL, Data, Family, Series, N, Field),
|
||||
query_field(Field, Cluster, Data, Family, Series, N) ->
|
||||
RowsAll = query_all(Cluster, Family, Series, N),
|
||||
RowsIsNull = query_is_null(Cluster, Data, Family, Series, N, Field),
|
||||
RowsIsNotNull = query_is_not_null(Cluster, Data, Family, Series, N, Field),
|
||||
?assertEqual(RowsAll, RowsIsNull + RowsIsNotNull).
|
||||
|
||||
query_base(Family, Series, N) ->
|
||||
@ -82,17 +84,17 @@ query_base(Family, Series, N) ->
|
||||
" AND myseries='" ++ binary_to_list(Series) ++ "'"
|
||||
" AND time >= 1000 AND time <= " ++ integer_to_list(N * 1000 + ?SPANNING_STEP).
|
||||
|
||||
query_all(ClusterConn, DDL, Data, Family, Series, N) ->
|
||||
query_all(Cluster, Family, Series, N) ->
|
||||
Qry = query_base(Family, Series, N),
|
||||
{ok, {_Fields, Rows}} = ts_util:ts_query(ClusterConn, ?TEST_TYPE, DDL, Data, Qry),
|
||||
{ok, {_Fields, Rows}} = ts_ops:query(Cluster, Qry),
|
||||
RowsN = length(Rows),
|
||||
?assertNotEqual(0, RowsN),
|
||||
RowsN.
|
||||
|
||||
query_is_null(ClusterConn, DDL, Data, Family, Series, N, Field) ->
|
||||
query_is_null(Cluster, Data, Family, Series, N, Field) ->
|
||||
Qry = query_base(Family, Series, N) ++
|
||||
" AND " ++ Field ++ " IS NULL",
|
||||
{ok, {_Fields, Rows}} = ts_util:ts_query(ClusterConn, ?TEST_TYPE, DDL, Data, Qry),
|
||||
{ok, {_Fields, Rows}} = ts_ops:query(Cluster, Qry),
|
||||
RowsN = length(Rows),
|
||||
%% the number of NULL rows can be determined by any non-key field being NULL
|
||||
RowsNull = lists:foldr(fun (El, Acc) ->
|
||||
@ -104,10 +106,10 @@ query_is_null(ClusterConn, DDL, Data, Family, Series, N, Field) ->
|
||||
?assertEqual(RowsNull, RowsN),
|
||||
RowsN.
|
||||
|
||||
query_is_not_null(ClusterConn, DDL, Data, Family, Series, N, Field) ->
|
||||
query_is_not_null(Cluster, Data, Family, Series, N, Field) ->
|
||||
Qry = query_base(Family, Series, N) ++
|
||||
" AND " ++ Field ++ " IS NOT NULL",
|
||||
{ok, {_Fields, Rows}} = ts_util:ts_query(ClusterConn, ?TEST_TYPE, DDL, Data, Qry),
|
||||
{ok, {_Fields, Rows}} = ts_ops:query(Cluster, Qry),
|
||||
RowsN = length(Rows),
|
||||
%% the number of NULL rows can be determined by any non-key field being NULL
|
||||
RowsNotNull = lists:foldr(fun (El, Acc) ->
|
||||
|
@ -83,42 +83,41 @@
|
||||
]).
|
||||
|
||||
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Start = jam:to_epoch(jam:compile(jam_iso8601:parse(?LOWER)), 3),
|
||||
End = jam:to_epoch(jam:compile(jam_iso8601:parse(?UPPER)), 3),
|
||||
AllData = ts_util:get_valid_select_data(fun() -> lists:seq(Start, End, 1000) end),
|
||||
|
||||
{Cluster, Conn} = ts_util:cluster_and_connect(single),
|
||||
Bucket = ts_util:get_default_bucket(),
|
||||
ts_util:create_table(normal, Cluster, DDL, Bucket),
|
||||
riakc_ts:put(Conn, Bucket, AllData),
|
||||
|
||||
AllData = ts_data:get_valid_select_data(fun() -> lists:seq(Start, End, 1000) end),
|
||||
QryFmt =
|
||||
"SELECT * FROM GeoCheckin "
|
||||
"WHERE time ~s '~s' and time ~s '~s' "
|
||||
"AND myfamily = 'family1' "
|
||||
"AND myseries ='seriesX' ",
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, AllData),
|
||||
|
||||
DDL = ts_data:get_ddl(),
|
||||
|
||||
lists:foreach(
|
||||
fun({Tally, {Op1, String1}, {Op2, String2}}) ->
|
||||
Qry = lists:flatten(
|
||||
io_lib:format(QryFmt, [Op1, String1,
|
||||
Op2, String2])),
|
||||
Qry = ts_data:flat_format(QryFmt, [Op1, String1,
|
||||
Op2, String2]),
|
||||
|
||||
{ok, {_Cols, Data}} = ts_util:single_query(Conn, Qry),
|
||||
{ok, {_Cols, Data}} = ts_ops:query(Cluster, Qry),
|
||||
|
||||
?assertEqual(Tally, length(Data))
|
||||
end, ?PASS_TESTS),
|
||||
|
||||
lists:foreach(
|
||||
fun({ErrCode, {Op1, String1}, {Op2, String2}}) ->
|
||||
Qry = lists:flatten(
|
||||
io_lib:format(QryFmt, [Op1, String1,
|
||||
Op2, String2])),
|
||||
Qry = ts_data:flat_format(QryFmt, [Op1, String1,
|
||||
Op2, String2]),
|
||||
|
||||
RetMsg = ts_util:single_query(Conn, Qry),
|
||||
RetMsg = ts_ops:query(Cluster, Qry),
|
||||
?assertMatch({error, {ErrCode, _}}, RetMsg)
|
||||
end, ?FAIL_TESTS),
|
||||
|
||||
|
@ -30,16 +30,21 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
% query with missing myfamily field
|
||||
Query =
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry =
|
||||
"select * from GeoCheckin "
|
||||
"where time > 1 and time < 10",
|
||||
Expected =
|
||||
{error,
|
||||
{1001,
|
||||
<<"The 'myfamily' parameter is part the primary key but not specified in the where clause.">>}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), normal, DDL, Data, Query),
|
||||
ts_util:assert_error_regex("Missing key", Expected, Got).
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -35,7 +35,7 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
Cluster = ts_util:cluster_and_connect(single),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
%% First try quantum boundaries. 135309600000 is a boundary value
|
||||
%% for a 15 minute quantum
|
||||
Base = 135309599999,
|
||||
@ -50,25 +50,22 @@ confirm() ->
|
||||
try_gap(Cluster, 5, 6).
|
||||
|
||||
try_gap(Cluster, Lower, Upper) ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = [],
|
||||
|
||||
Qry = lists:flatten(io_lib:format(
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Qry = ts_data:flat_format(
|
||||
"select * from GeoCheckin "
|
||||
"where time > ~B and time < ~B "
|
||||
"and myfamily = 'family1' "
|
||||
"and myseries ='seriesX' ",
|
||||
[Lower, Upper])),
|
||||
[Lower, Upper]),
|
||||
Expected =
|
||||
{error,
|
||||
{1001,
|
||||
<<"boundaries are equal or adjacent">>}},
|
||||
Got = ts_util:ts_query(
|
||||
Cluster, TestType, DDL, Data, Qry),
|
||||
convert_to_pass(?assert(ts_util:assert_error_regex("No gap between times", Expected, Got) == pass)).
|
||||
|
||||
convert_to_pass(ok) ->
|
||||
pass;
|
||||
convert_to_pass(_) ->
|
||||
fail.
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("No gap between times", Expected, Got).
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Technologies, Inc.
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
@ -30,8 +30,9 @@
|
||||
%% primary key.
|
||||
|
||||
confirm() ->
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
% weather is not part of the primary key, it is
|
||||
% randomly generated data so this should return
|
||||
% zero results
|
||||
@ -42,7 +43,11 @@ confirm() ->
|
||||
"AND myseries = 'seriesX' "
|
||||
"AND weather = 'summer rain'",
|
||||
Expected = {ok, {[], []}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), normal, DDL, Data, Qry),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
||||
|
@ -27,12 +27,15 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = ts_util:get_valid_select_data(),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
Qry =
|
||||
"selectah * from GeoCheckin "
|
||||
"Where time > 1 and time < 10",
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Data, Qry),
|
||||
ts_util:assert_error_regex("Unexpected Token", {error, {1020, <<".*Unexpected token.*">>}}, Got).
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ts_setup:activate_bucket_type(Cluster, Table),
|
||||
ts_ops:put(Cluster, Table, Data),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("Unexpected Token", {error, {1020, <<".*Unexpected token.*">>}}, Got).
|
||||
|
@ -27,18 +27,16 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = [],
|
||||
DDL = ts_data:get_ddl(),
|
||||
Qry = "select * from GeoCheckin "
|
||||
"where time < 10 "
|
||||
"and myfamily = 'family1' "
|
||||
"and myseries ='seriesX' ",
|
||||
Expected =
|
||||
{error,
|
||||
{1001,
|
||||
<<"Where clause has no lower bound.">>}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Data, Qry),
|
||||
ts_util:assert_error_regex("No lower bound", Expected, Got).
|
||||
|
||||
{error, {1001, <<"Where clause has no lower bound.">>}},
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok, _} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("No lower bound", Expected, Got).
|
||||
|
@ -27,16 +27,16 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
TestType = normal,
|
||||
DDL = ts_util:get_ddl(),
|
||||
Data = [],
|
||||
DDL = ts_data:get_ddl(),
|
||||
Qry = "select * from GeoCheckin "
|
||||
"where time > 10 "
|
||||
"and myfamily = 'family1' "
|
||||
"and myseries ='seriesX' ",
|
||||
Expected =
|
||||
{error, {1001, <<"Where clause has no upper bound.">>}},
|
||||
Got = ts_util:ts_query(
|
||||
ts_util:cluster_and_connect(single), TestType, DDL, Data, Qry),
|
||||
ts_util:assert_error_regex("No upper bound", Expected, Got).
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Table = ts_data:get_default_bucket(),
|
||||
{ok, _} = ts_setup:create_bucket_type(Cluster, DDL, Table),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Table),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
ts_data:assert_error_regex("No upper bound", Expected, Got).
|
||||
|
@ -27,10 +27,10 @@
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
ClusterConn = {_, Conn} = ts_util:cluster_and_connect(single),
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
|
||||
%% First test no tables
|
||||
Got = ts_util:single_query(Conn, "SHOW TABLES"),
|
||||
Got = ts_ops:query(Cluster, "SHOW TABLES"),
|
||||
?assertEqual(
|
||||
{ok, {[], []}},
|
||||
Got
|
||||
@ -43,12 +43,13 @@ confirm() ->
|
||||
" frequency timestamp not null,"
|
||||
" PRIMARY KEY ((quantum(frequency, 15, 'm')),"
|
||||
" frequency))",
|
||||
lists:foreach(fun({T}) ->
|
||||
SQL = ts_util:flat_format(Create, [T]),
|
||||
{ok, _} = ts_util:create_and_activate_bucket_type(ClusterConn, SQL, T)
|
||||
lists:foreach(fun({Table}) ->
|
||||
SQL = ts_data:flat_format(Create, [Table]),
|
||||
{ok, _} = ts_setup:create_bucket_type(Cluster, SQL, Table),
|
||||
ok = ts_setup:activate_bucket_type(Cluster, Table)
|
||||
end,
|
||||
Tables),
|
||||
Got1 = ts_util:single_query(Conn, "SHOW TABLES"),
|
||||
Got1 = ts_ops:query(Cluster, "SHOW TABLES"),
|
||||
?assertEqual(
|
||||
{ok, {[<<"Table">>], lists:usort(Tables)}},
|
||||
Got1
|
||||
|
@ -1,3 +1,23 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
%% except in compliance with the License. You may obtain
|
||||
%% a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing,
|
||||
%% software distributed under the License is distributed on an
|
||||
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
%% KIND, either express or implied. See the License for the
|
||||
%% specific language governing permissions and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
-module(ts_simple_single_key_ops).
|
||||
-behavior(riak_test).
|
||||
-compile([export_all]).
|
||||
@ -5,7 +25,7 @@
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
confirm() ->
|
||||
[Node1|_] = ts_util:build_cluster(single),
|
||||
[Node1|_] = ts_setup:start_cluster(1),
|
||||
Pid = rt:pbc(Node1),
|
||||
|
||||
create_table_def_1(Pid),
|
||||
|
@ -34,9 +34,9 @@ confirm() ->
|
||||
"series VARCHAR NOT NULL, "
|
||||
"time TIMESTAMP NOT NULL, "
|
||||
"PRIMARY KEY ((family, series, quantum(time, 15, 's')), family, series, time))",
|
||||
[Node | _] = ts_util:build_cluster(single),
|
||||
{[Node|_] = Cluster,_} = ts_util:cluster_and_connect(single),
|
||||
{ok, Out} = ts_util:create_bucket_type(Cluster, TableDef, "Αισθητήρας"),
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
{ok, Out} = ts_setup:create_bucket_type(Cluster, TableDef, "Αισθητήρας"),
|
||||
case binary:match(list_to_binary(Out), <<"invalid json">>) of
|
||||
nomatch ->
|
||||
{error, "Expecting this to fail, check implications for riak_ql"};
|
||||
|
Loading…
Reference in New Issue
Block a user