mirror of
https://github.com/valitydev/riak_test.git
synced 2024-11-06 16:45:29 +00:00
Merge branches 'riak_ts-develop' and 'query_buffer_SUITE-chmod-fix-kc' of https://github.com/basho/riak_test into query_buffer_SUITE-chmod-fix-kc
This commit is contained in:
commit
380884304c
@ -8,12 +8,12 @@
|
||||
"}}.
|
||||
{{command, "show_nodes; "}, {result, "The connected nodes are: ['dev1@127.0.0.1','dev2@127.0.0.1','dev3@127.0.0.1']"}}.
|
||||
{{command, "CREATE TABLE GeoCheckin (myfamily varchar not null, myseries varchar not null, time timestamp not null, weather varchar not null, temperature double, PRIMARY KEY ((myfamily, myseries, quantum(time, 15, 'm')), myfamily, myseries, time));\n"}, {result, ""}}.
|
||||
{{command, "describe GeoCheckin;\n"}, {result, "Column,Type,Is Null,Primary Key,Local Key,Interval,Unit
|
||||
myfamily,varchar,false,1,1,,
|
||||
myseries,varchar,false,2,2,,
|
||||
time,timestamp,false,3,3,15,m
|
||||
weather,varchar,false,,,,
|
||||
temperature,double,true,,,,
|
||||
{{command, "describe GeoCheckin;\n"}, {result, "Column,Type,Nullable,Partition Key,Local Key,Interval,Unit,Sort Order
|
||||
myfamily,varchar,false,1,1,,,
|
||||
myseries,varchar,false,2,2,,,
|
||||
time,timestamp,false,3,3,15,m,
|
||||
weather,varchar,false,,,,,
|
||||
temperature,double,true,,,,,
|
||||
"}}.
|
||||
{{command, "SHOW TABLES;\n"}, {result, "Table
|
||||
GeoCheckin
|
||||
@ -44,7 +44,7 @@ GeoCheckin
|
||||
{{command, "insert into GeoCheckin (myfamily, myseries, time, weather, temperature) values ('family1','series1',10,'hail',38.1);\n"}, {result, ""}}.
|
||||
{{command, "select time, weather, temperature from GeoCheckin where myfamily='family1' and myseries='seriesX' and time > 10 and time < 1000;\n"}, {result, ""}}.
|
||||
{{command, "select * from GeoCheckin;\n"}, {result, "Error (1001): The query must have a where clause."}}.
|
||||
{{command, "select * from GeoCheckin where myfamily = 'family1' and myseries = 'series1' and time >= 1420113600000 and time <= 1420119300000;\n"}, {result, "Error (1001): Too many subqueries (7)"}}.
|
||||
{{command, "select * from GeoCheckin where myfamily = 'family1' and myseries = 'series1' and time >= 1420113600000 and time <= 1420119300000;\n"}, {result, []}}.
|
||||
{{command, "select * from GeoCheckin where myfamily = 'family1' and myseries = 'series1' and time >= 1 and time <= 2;\n"},
|
||||
{result,
|
||||
"myfamily,myseries,time,weather,temperature
|
||||
@ -80,3 +80,133 @@ family1,series1,1970-01-01T00:00:00.007Z,cloudy,27.9
|
||||
555,1.1,10.0,0.01123
|
||||
555,1.1,10.0,0.01123
|
||||
"}}.
|
||||
{{command, "h; "}, {result, "Error: invalid function call : history_EXT:h []
|
||||
You can rerun a command by finding the command in the history list
|
||||
with `show_history;` and using the number next to it as the argument
|
||||
to `history` or `h`: `history 3;` or `h 3;` for example."}}.
|
||||
{{command, "help ; "}, {result, "The following functions are available
|
||||
|
||||
Extension 'connection':
|
||||
connect, connection_prompt, ping, reconnect, show_connection, show_cookie
|
||||
show_nodes
|
||||
|
||||
Extension 'debug':
|
||||
load, observer
|
||||
|
||||
Extension 'history':
|
||||
clear_history, h, history, show_history
|
||||
|
||||
Extension 'log':
|
||||
date_log, log, logfile, regression_log, replay_log, show_log_status
|
||||
|
||||
Extension 'shell':
|
||||
about, q, quit, show_config, show_version
|
||||
|
||||
You can get more help by calling help with the
|
||||
extension name and function name like 'help shell quit;'
|
||||
|
||||
For SQL help type 'help SQL'"}}.
|
||||
{{command, "help ; "}, {result, "The following functions are available
|
||||
|
||||
Extension 'connection':
|
||||
connect, connection_prompt, ping, reconnect, show_connection, show_cookie
|
||||
show_nodes
|
||||
|
||||
Extension 'debug':
|
||||
load, observer
|
||||
|
||||
Extension 'history':
|
||||
clear_history, h, history, show_history
|
||||
|
||||
Extension 'log':
|
||||
date_log, log, logfile, regression_log, replay_log, show_log_status
|
||||
|
||||
Extension 'shell':
|
||||
about, q, quit, show_config, show_version
|
||||
|
||||
You can get more help by calling help with the
|
||||
extension name and function name like 'help shell quit;'
|
||||
|
||||
For SQL help type 'help SQL'"}}.
|
||||
{{command, "date_log; "}, {result, "Error: invalid function call : log_EXT:date_log []
|
||||
Toggle adding a timestamp to the name of the log file with `date_log on ;`
|
||||
and off with `date_log off ;`
|
||||
The filename will be something like \"riak_shell.2016_02_15-16:42:22.log\"
|
||||
You will get a new log file for each session of riak-shell.
|
||||
|
||||
The default can be set in the config file."}}.
|
||||
{{command, "date_log off; "}, {result, "Log files will not contain a date/time stamp."}}.
|
||||
{{command, "help; "}, {result, "The following functions are available
|
||||
|
||||
Extension 'connection':
|
||||
connect, connection_prompt, ping, reconnect, show_connection, show_cookie
|
||||
show_nodes
|
||||
|
||||
Extension 'debug':
|
||||
load, observer
|
||||
|
||||
Extension 'history':
|
||||
clear_history, h, history, show_history
|
||||
|
||||
Extension 'log':
|
||||
date_log, log, logfile, regression_log, replay_log, show_log_status
|
||||
|
||||
Extension 'shell':
|
||||
about, q, quit, show_config, show_version
|
||||
|
||||
You can get more help by calling help with the
|
||||
extension name and function name like 'help shell quit;'
|
||||
|
||||
For SQL help type 'help SQL'"}}.
|
||||
{{command, "help sql ; "}, {result, "The following SQL help commands are supported:
|
||||
CREATE - using CREATE TABLE statements
|
||||
DELETE - deleting data with DELETE FROM
|
||||
DESCRIBE - examining table structures
|
||||
EXPLAIN - understanding SELECT query execution paths
|
||||
INSERT - inserting data with INSERT INTO statements
|
||||
SELECT - querying data
|
||||
SHOW - listing tables
|
||||
|
||||
SELECT can be used with ORDER BY, GROUP BY and LIMIT clauses. It supports arithmetic on column values and has a variety of aggregation functions: COUNT, SUM, MEAN, AVG, MAX, MIN, STDDEV, STDDEV_SAMP and STDDEV_POP
|
||||
|
||||
To get more help type 'help SQL SELECT' (replacing SELECT with another statement as appropriate)"}}.
|
||||
{{command, "help sql select; "}, {result, "You can use the SELECT statement to query your Time Series data.
|
||||
(this example uses the table definition from 'help SQL CREATE' and the data from 'help SQL INSERT')
|
||||
|
||||
An example of the format is shown below:
|
||||
|
||||
(1)>SELECT * FROM mytable where keyfield = 'keyvalue' and timefield > '2016-11-30 19:15:00' and timefield < '2016-11-30 19:45:00';
|
||||
|
||||
You can specify individual field names, and apply functions or arithmetic to them:
|
||||
|
||||
(2)>SELECT otherfield1 FROM mytable where keyfield = 'keyvalue' and timefield > '2016-11-30 19:15:00' and timefield < '2016-11-30 19:45:00';
|
||||
|
||||
(3)>SELECT otherfield1/2 FROM mytable where keyfield = 'keyvalue' and timefield > '2016-11-30 19:15:00' and timefield < '2016-11-30 19:45:00';
|
||||
|
||||
(4)>SELECT MEAN(otherfield1) FROM mytable where keyfield = 'keyvalue' and timefield > '2016-11-30 19:15:00' and timefield < '2016-11-30 19:45:00';
|
||||
|
||||
The functions supported are:
|
||||
* COUNT
|
||||
* SUM
|
||||
* MEAN and AVG
|
||||
* MIN
|
||||
* MAX
|
||||
* STDEV and STDDEV_SAMP
|
||||
* STDDEVPOP
|
||||
|
||||
You can also decorate SELECT statements with ORDER BY, GROUP BY and LIMIT
|
||||
|
||||
For more details please go to http://docs.basho.com/riak/ts
|
||||
"}}.
|
||||
{{command, "help history; "}, {result, "The following functions are available
|
||||
|
||||
Extension 'history':
|
||||
clear_history, h, history, show_history
|
||||
|
||||
You can get more help by calling help with the
|
||||
extension name and function name like 'help history clear_history;'
|
||||
|
||||
For SQL help type 'help SQL'"}}.
|
||||
{{command, "help history h; "}, {result, "You can rerun a command by finding the command in the history list
|
||||
with `show_history;` and using the number next to it as the argument
|
||||
to `history` or `h`: `history 3;` or `h 3;` for example."}}.
|
||||
|
@ -34,7 +34,7 @@
|
||||
"93a7f7bf798f2c55c3ae92bd0a1c7fa2fe7fe3b7"}},
|
||||
{riakc,".*",
|
||||
{git,"https://github.com/basho/riak-erlang-client",
|
||||
"37273eb13708605bb70e906168f1779fc15bfd0e"}},
|
||||
"33b2696208b789f42a5c0d20fb51c35b19ab8103"}},
|
||||
{ibrowse,".*",
|
||||
{git,"https://github.com/basho/ibrowse.git",
|
||||
"b28542d1e326ba44bcfaf7fd6d3c7f8761d20f08"}},
|
||||
|
@ -64,11 +64,13 @@ build_cluster() ->
|
||||
run_commands([], _State, _ShouldIncrement) ->
|
||||
pass;
|
||||
run_commands([{drain, discard} | T], State, ShouldIncrement) ->
|
||||
{_Error, Response, NewState, NewShdIncr} = riak_shell:loop_TEST(riak_shell:make_cmd(), State, ShouldIncrement),
|
||||
{_, Cmd} = riak_shell:make_cmd_TEST(),
|
||||
{_Error, Response, NewState, NewShdIncr} = riak_shell:loop_TEST(Cmd, State, ShouldIncrement),
|
||||
lager:info("Message drained and discared unchecked ~p", [lists:flatten(Response)]),
|
||||
run_commands(T, NewState, NewShdIncr);
|
||||
run_commands([{drain, Expected} | T], State, ShouldIncrement) ->
|
||||
{_Error, Response, NewState, NewShdIncr} = riak_shell:loop_TEST(riak_shell:make_cmd(), State, ShouldIncrement),
|
||||
{_, Cmd} = riak_shell:make_cmd_TEST(),
|
||||
{_Error, Response, NewState, NewShdIncr} = riak_shell:loop_TEST(Cmd, State, ShouldIncrement),
|
||||
case lists:flatten(Response) of
|
||||
Expected -> lager:info("Message drained successfully ~p", [Expected]),
|
||||
run_commands(T, NewState, NewShdIncr);
|
||||
@ -114,8 +116,9 @@ run_cmd(Cmd, State, ShouldIncrement) ->
|
||||
%% we have to emulate that here as we are the shell
|
||||
%% we are going to send a message at some time in the future
|
||||
%% and then go into a loop waiting for it
|
||||
timer:apply_after(500, riak_shell, send_to_shell, [self(), {command, Cmd}]),
|
||||
riak_shell:loop_TEST(riak_shell:make_cmd(Cmd), State, ShouldIncrement).
|
||||
{Toks, CmdRecord} = riak_shell:make_cmd_TEST(Cmd),
|
||||
timer:apply_after(500, riak_shell, send_to_shell, [self(), {command, Toks}]),
|
||||
riak_shell:loop_TEST(CmdRecord, State, ShouldIncrement).
|
||||
|
||||
print_error(Format, Cmd, Expected, Got) ->
|
||||
lager:info(?PREFIX ++ "Match Failure"),
|
||||
|
163
tests/ts_cluster_overload_reported.erl
Normal file
163
tests/ts_cluster_overload_reported.erl
Normal file
@ -0,0 +1,163 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% ts_cluster_overload_reported - test to ensure overload is handled correctly.
|
||||
%% based on overload test, simplified to slam TS w/ queries until an overload
|
||||
%% occurs to ensure {error, atom()} responses are handled correctly w/i TS.
|
||||
%%
|
||||
%% Copyright (c) 2016 Basho Technologies, Inc. All Rights Reserved.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
%% except in compliance with the License. You may obtain
|
||||
%% a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing,
|
||||
%% software distributed under the License is distributed on an
|
||||
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
%% KIND, either express or implied. See the License for the
|
||||
%% specific language governing permissions and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
-module(ts_cluster_overload_reported).
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-export([confirm/0]).
|
||||
|
||||
-define(NODE_COUNT, 3).
|
||||
-define(READS_COUNT, 1000).
|
||||
-define(WRITES_COUNT, 1).
|
||||
-define(READ_RETRIES, 3).
|
||||
-define(WRITE_RETRIES, 3).
|
||||
-define(VALUE, "'overload_test_value'").
|
||||
|
||||
confirm() ->
|
||||
Table = atom_to_list(?MODULE),
|
||||
Nodes = setup(Table),
|
||||
{Pids, IsOverload} = try generate_mixed_rw_traffic(Nodes, Table) of
|
||||
Ps -> {Ps, false}
|
||||
catch throw:{ts_overload, Ps} ->
|
||||
{Ps, true}
|
||||
end,
|
||||
|
||||
kill_pids(Pids),
|
||||
?assert(IsOverload),
|
||||
pass.
|
||||
|
||||
kill_pids(Pids) ->
|
||||
[exit(Pid, kill) || Pid <- Pids].
|
||||
|
||||
setup(Table) ->
|
||||
Nodes = rt:build_cluster(?NODE_COUNT, overload_config()),
|
||||
pb_create_table(hd(Nodes), Table),
|
||||
Nodes.
|
||||
|
||||
overload_config() ->
|
||||
VnodeOverloadThreshold = 2,
|
||||
VnodeCheckInterval = 1,
|
||||
VnodeCheckRequestInterval = 5,
|
||||
FsmLimit = 2,
|
||||
[{riak_core, [{ring_creation_size, 8},
|
||||
{default_bucket_props,
|
||||
[
|
||||
{n_val, ?NODE_COUNT},
|
||||
{allow_mult, true},
|
||||
{dvv_enabled, true}
|
||||
]},
|
||||
{vnode_management_timer, 1000},
|
||||
{enable_health_checks, false},
|
||||
{enable_consensus, true},
|
||||
{vnode_overload_threshold, VnodeOverloadThreshold},
|
||||
{vnode_check_interval, VnodeCheckInterval},
|
||||
{vnode_check_request_interval, VnodeCheckRequestInterval}]},
|
||||
{riak_kv, [{fsm_limit, FsmLimit},
|
||||
{storage_backend, riak_kv_eleveldb_backend},
|
||||
{anti_entropy_build_limit, {100, 1000}},
|
||||
{anti_entropy_concurrency, 100},
|
||||
{anti_entropy_tick, 100},
|
||||
{anti_entropy, {on, []}},
|
||||
{anti_entropy_timeout, 5000}]},
|
||||
{riak_api, [{pb_backlog, 1024}]}].
|
||||
|
||||
generate_mixed_rw_traffic(Nodes, Table) ->
|
||||
Node = hd(Nodes),
|
||||
WritePids = spawn_writes(Node, Table, ?VALUE, ?WRITES_COUNT, ?WRITE_RETRIES),
|
||||
ReadPids = spawn_reads(Node, Table, ?READS_COUNT, ?READ_RETRIES),
|
||||
WritePids ++ ReadPids.
|
||||
|
||||
spawn_writes(Node, Table, Value, WriteCount, WriteRetries) ->
|
||||
PBInsertFun = fun(PBPid, I) ->
|
||||
pb_insert(PBPid, Table, {I, Value})
|
||||
end,
|
||||
spawn_op(PBInsertFun, Node, WriteCount, WriteRetries).
|
||||
|
||||
spawn_reads(Node, Table, ReadCount, ReadRetries) ->
|
||||
PBReadFun = fun(PBPid, _I) ->
|
||||
pb_select(PBPid, Table)
|
||||
end,
|
||||
spawn_op(PBReadFun, Node, ReadCount, ReadRetries).
|
||||
|
||||
spawn_op(PBFun, Node, WriteCount, WriteRetries) ->
|
||||
TestPid = self(),
|
||||
Pids = [begin
|
||||
PBInsertFun = fun(PBPid) ->
|
||||
PBFun(PBPid, I)
|
||||
end,
|
||||
Pid = spawn(fun() ->
|
||||
rt:wait_until(pb_fun_fun(TestPid, Node, PBInsertFun), WriteRetries, WriteRetries)
|
||||
end),
|
||||
%% thunder on!, no sleep
|
||||
Pid
|
||||
end || I <- lists:seq(1, WriteCount)],
|
||||
Responses = [receive
|
||||
{Status, Pid} -> Status
|
||||
end || Pid <- Pids],
|
||||
[ throw({ts_overload, Pids}) || Response <- Responses,
|
||||
Response =:= sent_ts_overload ],
|
||||
Pids.
|
||||
|
||||
pb_create_table(Node, Table) ->
|
||||
PBPid = rt:pbc(Node),
|
||||
Sql = list_to_binary("CREATE TABLE " ++ Table ++
|
||||
"(ts TIMESTAMP NOT NULL," ++
|
||||
"v VARCHAR NOT NULL," ++
|
||||
"PRIMARY KEY((QUANTUM(ts, 1, 'h')), ts))"),
|
||||
riakc_ts:query(PBPid, Sql).
|
||||
|
||||
pb_fun_fun(TestPid, Node, PBFun) ->
|
||||
fun() ->
|
||||
PBPid = rt:pbc(Node),
|
||||
Result = case catch PBFun(PBPid) of
|
||||
{error, {1001, <<"overload">>}} ->
|
||||
lager:debug("ts overload detected, succeeded"),
|
||||
TestPid ! {sent_ts_overload, self()},
|
||||
true;
|
||||
{ok, _Res} ->
|
||||
lager:debug("succeeded, continuing..."),
|
||||
TestPid ! {sent_ok, self()},
|
||||
true;
|
||||
{error, Reason} ->
|
||||
lager:debug("error: ~p, continuing...", [Reason]),
|
||||
false;
|
||||
{'EXIT', Type} ->
|
||||
lager:debug("EXIT: ~p, continuing...", [Type]),
|
||||
false
|
||||
end,
|
||||
riakc_pb_socket:stop(PBPid),
|
||||
Result
|
||||
end.
|
||||
|
||||
pb_insert(PBPid, Table, {I, Value}) ->
|
||||
Sql = list_to_binary("INSERT INTO " ++ Table ++
|
||||
"(ts, v)VALUES(" ++
|
||||
integer_to_list(I) ++
|
||||
"," ++ Value ++
|
||||
")"),
|
||||
riakc_ts:query(PBPid, Sql).
|
||||
|
||||
pb_select(PBPid, Table) ->
|
||||
Sql = list_to_binary("SELECT * FROM " ++ Table ++
|
||||
" WHERE ts >= 1 AND ts <= 10"),
|
||||
riakc_ts:query(PBPid, Sql).
|
@ -31,7 +31,9 @@ confirm() ->
|
||||
DDL = ts_data:get_ddl(),
|
||||
Data = ts_data:get_valid_select_data(),
|
||||
TooMuchData = [list_to_tuple(tuple_to_list(Row) ++ [<<"rubbish">>]) || Row <- Data],
|
||||
TooLittleData = [list_to_tuple(lists:reverse(tl(lists:reverse(tuple_to_list(Row))))) || Row <- Data],
|
||||
%% remove the last 2 columns to chomp into a not null field because it is
|
||||
%% completely valid to drop the final 1 column that is nullable.
|
||||
TooLittleData = [list_to_tuple(lists:reverse(tl(tl(lists:reverse(tuple_to_list(Row)))))) || Row <- Data],
|
||||
WrongColumns = TooMuchData ++ TooLittleData,
|
||||
Columns = ts_data:get_cols(),
|
||||
|
||||
|
38
tests/ts_simple_select_table_not_existing.erl
Normal file
38
tests/ts_simple_select_table_not_existing.erl
Normal file
@ -0,0 +1,38 @@
|
||||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015-2016 Basho Technologies, Inc.
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
%% except in compliance with the License. You may obtain
|
||||
%% a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing,
|
||||
%% software distributed under the License is distributed on an
|
||||
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
%% KIND, either express or implied. See the License for the
|
||||
%% specific language governing permissions and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
-module(ts_simple_select_table_not_existing).
|
||||
|
||||
-behavior(riak_test).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-export([confirm/0]).
|
||||
|
||||
confirm() ->
|
||||
Qry =
|
||||
"SELECT * FROM dne "
|
||||
"WHERE time > 1 AND time < 10",
|
||||
Expected = {error,{1019,<<"dne is not an active table">>}},
|
||||
|
||||
Cluster = ts_setup:start_cluster(1),
|
||||
Got = ts_ops:query(Cluster, Qry),
|
||||
?assertEqual(Expected, Got),
|
||||
pass.
|
Loading…
Reference in New Issue
Block a user