mirror of
https://github.com/valitydev/bouncer.git
synced 2024-11-06 10:25:19 +00:00
MSPF-585: Implement naive audit logging (#4)
* Allow to set up multiple pulse handlers * Enable audit logging through dedicated log handler * Remove scoper use from handler * Verify that audit log overload stops handlers Though it's not apparently, so skip the test for now but keep testcase for future experimentation. Co-authored-by: Sergey Yelin <elinsn@gmail.com>
This commit is contained in:
parent
905cdda999
commit
44fa86982d
@ -112,6 +112,9 @@
|
|||||||
rules => [
|
rules => [
|
||||||
% We want to use `ct:pal/2` and friends in test code.
|
% We want to use `ct:pal/2` and friends in test code.
|
||||||
{elvis_style, no_debug_call, disable},
|
{elvis_style, no_debug_call, disable},
|
||||||
|
% Assert macros can trigger use of ignored binding, yet we want them for better
|
||||||
|
% readability.
|
||||||
|
{elvis_style, used_ignored_variable, disable},
|
||||||
% Tests are usually more comprehensible when a bit more verbose.
|
% Tests are usually more comprehensible when a bit more verbose.
|
||||||
{elvis_style, dont_repeat_yourself, #{min_complexity => 20}},
|
{elvis_style, dont_repeat_yourself, #{min_complexity => 20}},
|
||||||
{elvis_style, god_modules, #{ignore => [ct_gun_event_h]}}
|
{elvis_style, god_modules, #{ignore => [ct_gun_event_h]}}
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
1},
|
1},
|
||||||
{<<"genlib">>,
|
{<<"genlib">>,
|
||||||
{git,"https://github.com/rbkmoney/genlib.git",
|
{git,"https://github.com/rbkmoney/genlib.git",
|
||||||
{ref,"1ca08793ad8af0beb26eda8cd00687c69f7ef8b4"}},
|
{ref,"7637d915c4c769f7f45c99f8688b17922e801027"}},
|
||||||
0},
|
0},
|
||||||
{<<"gproc">>,{pkg,<<"gproc">>,<<"0.8.0">>},1},
|
{<<"gproc">>,{pkg,<<"gproc">>,<<"0.8.0">>},1},
|
||||||
{<<"gun">>,
|
{<<"gun">>,
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
-behaviour(application).
|
-behaviour(application).
|
||||||
-export([start/2]).
|
-export([start/2]).
|
||||||
|
-export([prep_stop/1]).
|
||||||
-export([stop/1]).
|
-export([stop/1]).
|
||||||
|
|
||||||
%% Supervisor callbacks
|
%% Supervisor callbacks
|
||||||
@ -19,6 +20,15 @@
|
|||||||
start(_StartType, _StartArgs) ->
|
start(_StartType, _StartArgs) ->
|
||||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||||
|
|
||||||
|
-spec prep_stop(State) ->
|
||||||
|
State.
|
||||||
|
prep_stop(State) ->
|
||||||
|
% NOTE
|
||||||
|
% We have to do it in this magic `prep_stop/1` here because for some inexplicable reason the
|
||||||
|
% usual `stop/1` callback doesn't get called in common_test runs.
|
||||||
|
ok = bouncer_audit_log:stop(genlib_app:env(?MODULE, audit, #{})),
|
||||||
|
State.
|
||||||
|
|
||||||
-spec stop(any()) ->
|
-spec stop(any()) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
@ -31,6 +41,7 @@ stop(_State) ->
|
|||||||
{ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}.
|
{ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}.
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
|
AuditPulse = bouncer_audit_log:init(genlib_app:env(?MODULE, audit, #{})),
|
||||||
ServiceOpts = genlib_app:env(?MODULE, services, #{}),
|
ServiceOpts = genlib_app:env(?MODULE, services, #{}),
|
||||||
EventHandlers = genlib_app:env(?MODULE, woody_event_handlers, [woody_event_handler_default]),
|
EventHandlers = genlib_app:env(?MODULE, woody_event_handlers, [woody_event_handler_default]),
|
||||||
Healthcheck = enable_health_logging(genlib_app:env(?MODULE, health_check, #{})),
|
Healthcheck = enable_health_logging(genlib_app:env(?MODULE, health_check, #{})),
|
||||||
@ -43,7 +54,7 @@ init([]) ->
|
|||||||
transport_opts => get_transport_opts(),
|
transport_opts => get_transport_opts(),
|
||||||
shutdown_timeout => get_shutdown_timeout(),
|
shutdown_timeout => get_shutdown_timeout(),
|
||||||
event_handler => EventHandlers,
|
event_handler => EventHandlers,
|
||||||
handlers => get_handler_specs(ServiceOpts),
|
handlers => get_handler_specs(ServiceOpts, AuditPulse),
|
||||||
additional_routes => [erl_health_handle:get_route(Healthcheck)]
|
additional_routes => [erl_health_handle:get_route(Healthcheck)]
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
@ -83,12 +94,13 @@ get_transport_opts() ->
|
|||||||
get_shutdown_timeout() ->
|
get_shutdown_timeout() ->
|
||||||
genlib_app:env(?MODULE, shutdown_timeout, 0).
|
genlib_app:env(?MODULE, shutdown_timeout, 0).
|
||||||
|
|
||||||
-spec get_handler_specs(map()) ->
|
-spec get_handler_specs(map(), bouncer_arbiter_pulse:handlers()) ->
|
||||||
[woody:http_handler(woody:th_handler())].
|
[woody:http_handler(woody:th_handler())].
|
||||||
|
|
||||||
get_handler_specs(ServiceOpts) ->
|
get_handler_specs(ServiceOpts, AuditPulse) ->
|
||||||
ArbiterService = maps:get(arbiter, ServiceOpts, #{}),
|
ArbiterService = maps:get(arbiter, ServiceOpts, #{}),
|
||||||
ArbiterOpts = maps:with([pulse], ArbiterService),
|
ArbiterPulse = maps:get(pulse, ArbiterService, []),
|
||||||
|
ArbiterOpts = #{pulse => AuditPulse ++ ArbiterPulse},
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
maps:get(path, ArbiterService, <<"/v1/arbiter">>),
|
maps:get(path, ArbiterService, <<"/v1/arbiter">>),
|
||||||
|
@ -10,11 +10,11 @@
|
|||||||
%%
|
%%
|
||||||
|
|
||||||
-type opts() :: #{
|
-type opts() :: #{
|
||||||
pulse => bouncer_arbiter_pulse:handler()
|
pulse => bouncer_arbiter_pulse:handlers()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-record(st, {
|
-record(st, {
|
||||||
pulse :: bouncer_arbiter_pulse:handler() | undefined,
|
pulse :: bouncer_arbiter_pulse:handlers(),
|
||||||
pulse_metadata :: bouncer_arbiter_pulse:metadata()
|
pulse_metadata :: bouncer_arbiter_pulse:metadata()
|
||||||
}).
|
}).
|
||||||
|
|
||||||
@ -23,13 +23,11 @@
|
|||||||
-spec handle_function(woody:func(), woody:args(), woody_context:ctx(), opts()) ->
|
-spec handle_function(woody:func(), woody:args(), woody_context:ctx(), opts()) ->
|
||||||
{ok, woody:result()}.
|
{ok, woody:result()}.
|
||||||
handle_function(Fn, Args, WoodyCtx, Opts) ->
|
handle_function(Fn, Args, WoodyCtx, Opts) ->
|
||||||
scoper:scope(arbiter, fun() ->
|
do_handle_function(Fn, Args, WoodyCtx, Opts).
|
||||||
do_handle_function(Fn, Args, WoodyCtx, Opts)
|
|
||||||
end).
|
|
||||||
|
|
||||||
do_handle_function('Judge', {RulesetID, ContextIn}, WoodyCtx, Opts) ->
|
do_handle_function('Judge', {RulesetID, ContextIn}, WoodyCtx, Opts) ->
|
||||||
St = #st{
|
St = #st{
|
||||||
pulse = maps:get(pulse, Opts, undefined),
|
pulse = maps:get(pulse, Opts, []),
|
||||||
pulse_metadata = #{woody_ctx => WoodyCtx}
|
pulse_metadata = #{woody_ctx => WoodyCtx}
|
||||||
},
|
},
|
||||||
try handle_judge(RulesetID, ContextIn, St) catch
|
try handle_judge(RulesetID, ContextIn, St) catch
|
||||||
|
@ -27,21 +27,28 @@
|
|||||||
%%
|
%%
|
||||||
|
|
||||||
-type handler() :: {module(), _Opts}.
|
-type handler() :: {module(), _Opts}.
|
||||||
|
-type handler(St) :: {module(), St}.
|
||||||
|
-type handlers() :: [handler()].
|
||||||
|
-type handlers(St) :: [handler(St)].
|
||||||
-export_type([handler/0]).
|
-export_type([handler/0]).
|
||||||
|
-export_type([handler/1]).
|
||||||
|
-export_type([handlers/0]).
|
||||||
|
-export_type([handlers/1]).
|
||||||
|
|
||||||
-callback handle_beat(beat(), metadata(), _Opts) ->
|
-callback handle_beat(beat(), metadata(), _Opts) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-export([handle_beat/3]).
|
-export([handle_beat/3]).
|
||||||
|
|
||||||
-spec handle_beat(beat(), metadata(), handler() | undefined) ->
|
-spec handle_beat(beat(), metadata(), handlers()) ->
|
||||||
ok.
|
ok.
|
||||||
handle_beat(Beat, Metadata, {Mod, Opts}) ->
|
handle_beat(Beat, Metadata, [{Mod, Opts} | Rest]) ->
|
||||||
% NOTE
|
% NOTE
|
||||||
% Generally, we don't want some fault to propagate from event handler to the business logic
|
% Generally, we don't want some fault to propagate from event handler to the business logic
|
||||||
% and affect it, causing failure. Hovewer here we deem it required because we actually need
|
% and affect it, causing failure. Hovewer here we deem it required because we actually need
|
||||||
% this kind of behaviour when doing audit logging, an inability to append to the audit log
|
% this kind of behaviour when doing audit logging, as inability to append to the audit log
|
||||||
% should cause whole operation to fail.
|
% should cause whole operation to fail.
|
||||||
Mod:handle_beat(Beat, Metadata, Opts);
|
_ = Mod:handle_beat(Beat, Metadata, Opts),
|
||||||
handle_beat(_Beat, _Metadata, undefined) ->
|
handle_beat(Beat, Metadata, Rest);
|
||||||
|
handle_beat(_Beat, _Metadata, []) ->
|
||||||
ok.
|
ok.
|
||||||
|
280
src/bouncer_audit_log.erl
Normal file
280
src/bouncer_audit_log.erl
Normal file
@ -0,0 +1,280 @@
|
|||||||
|
-module(bouncer_audit_log).
|
||||||
|
|
||||||
|
-export([init/1]).
|
||||||
|
-export([stop/1]).
|
||||||
|
|
||||||
|
-behaviour(bouncer_arbiter_pulse).
|
||||||
|
-export([handle_beat/3]).
|
||||||
|
|
||||||
|
-define(DEFAULT_LOG_LEVEL, notice).
|
||||||
|
-define(DEFAULT_FLUSH_QLEN, 10000).
|
||||||
|
-define(LOG_DOMAIN, [audit]).
|
||||||
|
|
||||||
|
-type opts() :: #{
|
||||||
|
log => log_opts() | disabled
|
||||||
|
}.
|
||||||
|
|
||||||
|
% NOTE
|
||||||
|
% Keep in sync with `opts()`.
|
||||||
|
-define(OPTS, [log]).
|
||||||
|
|
||||||
|
-type log_opts() :: #{
|
||||||
|
% Which log level to use for audit events? Defaults to `notice`.
|
||||||
|
level => logger:level(),
|
||||||
|
backend => logger_backend_opts(),
|
||||||
|
% http://erlang.org/doc/man/logger.html#type-formatter_config
|
||||||
|
formatter => {module(), logger:formatter_config()}
|
||||||
|
}.
|
||||||
|
|
||||||
|
% NOTE
|
||||||
|
% Keep in sync with `log_opts()`.
|
||||||
|
-define(LOG_OPTS, [level, backend, formatter]).
|
||||||
|
|
||||||
|
-type logger_backend_opts() :: #{
|
||||||
|
% Where to log? Defaults to `standard_io`.
|
||||||
|
type => standard_io | standard_error | file,
|
||||||
|
% Log file location. No default, MUST be set if `type` is `file`.
|
||||||
|
file => file:filename(),
|
||||||
|
% http://erlang.org/doc/man/logger_std_h.html
|
||||||
|
max_no_bytes => pos_integer() | infinity,
|
||||||
|
max_no_files => non_neg_integer(),
|
||||||
|
% Maximum number of events to queue for writing. Defaults to 10000.
|
||||||
|
% http://erlang.org/doc/apps/kernel/logger_chapter.html#message-queue-length
|
||||||
|
flush_qlen => non_neg_integer()
|
||||||
|
}.
|
||||||
|
|
||||||
|
% NOTE
|
||||||
|
% Keep in sync with `logger_backend_opts()`.
|
||||||
|
-define(LOGGER_BACKEND_OPTS, [type, file, max_no_bytes, max_no_files, flush_qlen]).
|
||||||
|
|
||||||
|
-export_type([opts/0]).
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-type st() ::
|
||||||
|
{log, logger:level()}.
|
||||||
|
|
||||||
|
-spec init(opts()) ->
|
||||||
|
bouncer_arbiter_pulse:handlers(st()).
|
||||||
|
init(Opts) ->
|
||||||
|
_ = assert_strict_opts(?OPTS, Opts),
|
||||||
|
init_log_handler(maps:get(log, Opts, #{})).
|
||||||
|
|
||||||
|
init_log_handler(LogOpts = #{}) ->
|
||||||
|
_ = assert_strict_opts(?LOG_OPTS, LogOpts),
|
||||||
|
Level = validate_log_level(maps:get(level, LogOpts, ?DEFAULT_LOG_LEVEL)),
|
||||||
|
BackendConfig = mk_logger_backend_config(maps:get(backend, LogOpts, #{})),
|
||||||
|
HandlerConfig0 = maps:with([formatter], LogOpts),
|
||||||
|
HandlerConfig1 = HandlerConfig0#{
|
||||||
|
config => BackendConfig,
|
||||||
|
% NOTE
|
||||||
|
% This two options together ensure that _only_ audit logs will flow through to the backend.
|
||||||
|
filters => [{domain, {fun logger_filters:domain/2, {log, sub, ?LOG_DOMAIN}}}],
|
||||||
|
filter_default => stop
|
||||||
|
},
|
||||||
|
ok = logger:add_handler(
|
||||||
|
?MODULE,
|
||||||
|
logger_std_h,
|
||||||
|
HandlerConfig1
|
||||||
|
),
|
||||||
|
% TODO
|
||||||
|
% Validate that global logger level doesn't suppress ours?
|
||||||
|
ok = log(Level, "audit log started", #{}),
|
||||||
|
[{?MODULE, {log, Level}}];
|
||||||
|
init_log_handler(disabled) ->
|
||||||
|
[].
|
||||||
|
|
||||||
|
validate_log_level(Level) ->
|
||||||
|
eq = logger:compare_levels(Level, Level),
|
||||||
|
Level.
|
||||||
|
|
||||||
|
mk_logger_backend_config(BackendOpts) ->
|
||||||
|
_ = assert_strict_opts(?LOGGER_BACKEND_OPTS, BackendOpts),
|
||||||
|
Type = validate_log_type(maps:get(type, BackendOpts, standard_io)),
|
||||||
|
mk_logger_backend_config(Type, BackendOpts).
|
||||||
|
|
||||||
|
validate_log_type(Type) when
|
||||||
|
Type == standard_io;
|
||||||
|
Type == standard_error;
|
||||||
|
Type == file
|
||||||
|
->
|
||||||
|
Type;
|
||||||
|
validate_log_type(Type) ->
|
||||||
|
erlang:error(badarg, [Type]).
|
||||||
|
|
||||||
|
mk_logger_backend_config(file = Type, Opts) ->
|
||||||
|
Defaults = get_default_backend_config(Type, Opts),
|
||||||
|
Filename = maps:get(file, Opts),
|
||||||
|
Config0 = maps:with([max_no_bytes, max_no_files], Opts),
|
||||||
|
Config = maps:merge(Defaults, Config0),
|
||||||
|
Config#{
|
||||||
|
type => Type,
|
||||||
|
file => Filename
|
||||||
|
};
|
||||||
|
mk_logger_backend_config(Type, Opts) ->
|
||||||
|
Defaults = get_default_backend_config(Type, Opts),
|
||||||
|
Defaults#{
|
||||||
|
type => Type
|
||||||
|
}.
|
||||||
|
|
||||||
|
get_default_backend_config(file, Opts) ->
|
||||||
|
% NOTE
|
||||||
|
% All those options chosen to push message loss probability as close to zero as possible.
|
||||||
|
% Zero doesn't seem reachable with standard logger infrastructure because of various safeguards
|
||||||
|
% around unexpected backend and formatter errors.
|
||||||
|
Config = get_default_backend_config(Opts),
|
||||||
|
Config#{
|
||||||
|
% Protects against accidental write loss upon file rotation.
|
||||||
|
file_check => 0
|
||||||
|
};
|
||||||
|
get_default_backend_config(_Type, Opts) ->
|
||||||
|
get_default_backend_config(Opts).
|
||||||
|
|
||||||
|
get_default_backend_config(Opts) ->
|
||||||
|
FlushQLen = maps:get(flush_qlen, Opts, ?DEFAULT_FLUSH_QLEN),
|
||||||
|
#{
|
||||||
|
% No need to set it up here since we'll sync on EVERY write by ourself.
|
||||||
|
filesync_repeat_interval => no_repeat,
|
||||||
|
|
||||||
|
% http://erlang.org/doc/apps/kernel/logger_chapter.html#message-queue-length
|
||||||
|
sync_mode_qlen => 0,
|
||||||
|
drop_mode_qlen => FlushQLen,
|
||||||
|
flush_qlen => FlushQLen,
|
||||||
|
|
||||||
|
% http://erlang.org/doc/apps/kernel/logger_chapter.html#controlling-bursts-of-log-requests
|
||||||
|
burst_limit_enable => false,
|
||||||
|
|
||||||
|
% http://erlang.org/doc/apps/kernel/logger_chapter.html#terminating-an-overloaded-handler
|
||||||
|
overload_kill_enable => false
|
||||||
|
}.
|
||||||
|
|
||||||
|
assert_strict_opts(Ks, Opts) ->
|
||||||
|
case maps:without(Ks, Opts) of
|
||||||
|
Empty when map_size(Empty) == 0 ->
|
||||||
|
ok;
|
||||||
|
Unrecognized ->
|
||||||
|
erlang:error({unrecognized_opts, Unrecognized})
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec stop(opts()) ->
|
||||||
|
ok.
|
||||||
|
stop(Opts = #{}) ->
|
||||||
|
stop_log_handler(maps:get(log, Opts, #{})).
|
||||||
|
|
||||||
|
-spec stop_log_handler(log_opts()) ->
|
||||||
|
ok.
|
||||||
|
stop_log_handler(LogOpts = #{}) ->
|
||||||
|
Level = maps:get(level, LogOpts, ?DEFAULT_LOG_LEVEL),
|
||||||
|
ok = log(Level, "audit log stopped", #{}),
|
||||||
|
_ = logger:remove_handler(?MODULE),
|
||||||
|
ok;
|
||||||
|
stop_log_handler(disabled) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-type beat() :: bouncer_arbiter_pulse:beat().
|
||||||
|
-type metadata() :: bouncer_arbiter_pulse:metadata().
|
||||||
|
|
||||||
|
-spec handle_beat(beat(), metadata(), st()) ->
|
||||||
|
ok.
|
||||||
|
handle_beat(Beat, Metadata, {log, Level}) ->
|
||||||
|
log(
|
||||||
|
get_severity(Beat, Level),
|
||||||
|
get_message(Beat),
|
||||||
|
extract_metadata(Metadata, get_beat_metadata(Beat))
|
||||||
|
).
|
||||||
|
|
||||||
|
log(Severity, Message, Metadata) ->
|
||||||
|
DefaultMetadata = #{
|
||||||
|
type => audit,
|
||||||
|
domain => ?LOG_DOMAIN
|
||||||
|
},
|
||||||
|
% NOTE
|
||||||
|
% Matching on `ok` here is crucial. Logger may decide to flush the queue behind the scenes so
|
||||||
|
% we need to ensure it's not happening.
|
||||||
|
ok = logger:log(Severity, Message, maps:merge(Metadata, DefaultMetadata)),
|
||||||
|
ok = logger_std_h:filesync(?MODULE),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
get_severity({judgement, started}, _Level) -> debug;
|
||||||
|
get_severity(_, Level) -> Level.
|
||||||
|
|
||||||
|
get_message({judgement, started}) -> <<"judgement started">>;
|
||||||
|
get_message({judgement, {completed, _}}) -> <<"judgement completed">>;
|
||||||
|
get_message({judgement, {failed, _}}) -> <<"judgement failed">>.
|
||||||
|
|
||||||
|
get_beat_metadata({judgement, Event}) ->
|
||||||
|
#{
|
||||||
|
judgement => case Event of
|
||||||
|
started ->
|
||||||
|
#{
|
||||||
|
event => started
|
||||||
|
};
|
||||||
|
{completed, {Resolution, Assertions}} ->
|
||||||
|
#{
|
||||||
|
event => completed,
|
||||||
|
resolution => encode_resolution(Resolution),
|
||||||
|
assertions => lists:map(fun encode_assertion/1, Assertions)
|
||||||
|
};
|
||||||
|
{failed, Error} ->
|
||||||
|
#{
|
||||||
|
event => failed,
|
||||||
|
error => encode_error(Error)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
}.
|
||||||
|
|
||||||
|
encode_resolution(allowed) -> <<"allowed">>;
|
||||||
|
encode_resolution(forbidden) -> <<"forbidden">>.
|
||||||
|
|
||||||
|
encode_assertion({Code, Details}) ->
|
||||||
|
#{code => Code, details => Details}.
|
||||||
|
|
||||||
|
encode_error({Class, Details}) when is_atom(Class) ->
|
||||||
|
#{class => Class, details => genlib:format(Details)};
|
||||||
|
encode_error(Class) when is_atom(Class) ->
|
||||||
|
#{class => Class};
|
||||||
|
encode_error(Other) ->
|
||||||
|
#{details => genlib:format(Other)}.
|
||||||
|
|
||||||
|
extract_metadata(Metadata, Acc) ->
|
||||||
|
Acc1 = extract_opt_meta(ruleset, Metadata, fun encode_id/1, Acc),
|
||||||
|
Acc2 = extract_opt_meta(context, Metadata, fun encode_context/1, Acc1),
|
||||||
|
Acc3 = extract_opt_meta(fragments, Metadata, fun encode_fragments/1, Acc2),
|
||||||
|
extract_woody_ctx(maps:get(woody_ctx, Metadata, undefined), Acc3).
|
||||||
|
|
||||||
|
extract_opt_meta(K, Metadata, EncodeFun, Acc) ->
|
||||||
|
case maps:find(K, Metadata) of
|
||||||
|
{ok, V} -> Acc#{K => EncodeFun(V)};
|
||||||
|
error -> Acc
|
||||||
|
end.
|
||||||
|
|
||||||
|
encode_id(ID) when is_binary(ID) ->
|
||||||
|
ID.
|
||||||
|
|
||||||
|
encode_context(Context = #{}) ->
|
||||||
|
% TODO
|
||||||
|
% We'll probably need something like `bouncer_context:extract_metadata/1` here when the schema
|
||||||
|
% stops being that simple.
|
||||||
|
Context.
|
||||||
|
|
||||||
|
encode_fragments(Fragments = #{}) ->
|
||||||
|
% TODO
|
||||||
|
% See above.
|
||||||
|
Fragments.
|
||||||
|
|
||||||
|
extract_woody_ctx(WoodyCtx = #{rpc_id := RpcID}, Acc) ->
|
||||||
|
extract_woody_meta(WoodyCtx, extract_woody_rpc_id(RpcID, Acc));
|
||||||
|
extract_woody_ctx(undefined, Acc) ->
|
||||||
|
Acc.
|
||||||
|
|
||||||
|
extract_woody_rpc_id(RpcID = #{span_id := _, trace_id := _, parent_id := _}, Acc) ->
|
||||||
|
maps:merge(Acc, RpcID).
|
||||||
|
|
||||||
|
extract_woody_meta(#{meta := Meta}, Acc) when map_size(Meta) > 0 ->
|
||||||
|
Acc#{woody => #{metadata => Meta}};
|
||||||
|
extract_woody_meta(#{}, Acc) ->
|
||||||
|
Acc.
|
268
test/bouncer_audit_tests_SUITE.erl
Normal file
268
test/bouncer_audit_tests_SUITE.erl
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
-module(bouncer_audit_tests_SUITE).
|
||||||
|
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
|
|
||||||
|
-export([all/0]).
|
||||||
|
-export([init_per_suite/1]).
|
||||||
|
-export([end_per_suite/1]).
|
||||||
|
-export([init_per_testcase/2]).
|
||||||
|
-export([end_per_testcase/2]).
|
||||||
|
|
||||||
|
-export([invalid_config_fails_start/1]).
|
||||||
|
-export([unrecognized_config_fails_start/1]).
|
||||||
|
-export([write_error_fails_request/1]).
|
||||||
|
-export([write_queue_overload_fails_request/1]).
|
||||||
|
|
||||||
|
-include_lib("bouncer_proto/include/bouncer_decisions_thrift.hrl").
|
||||||
|
|
||||||
|
-type config() :: ct_helper:config().
|
||||||
|
-type testcase_name() :: atom().
|
||||||
|
|
||||||
|
-define(CONFIG(Key, C), (element(2, lists:keyfind(Key, 1, C)))).
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-define(OPA_HOST, "opa").
|
||||||
|
-define(OPA_ENDPOINT, {?OPA_HOST, 8181}).
|
||||||
|
-define(API_RULESET_ID, "authz/api").
|
||||||
|
|
||||||
|
-spec all() ->
|
||||||
|
[atom()].
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
[
|
||||||
|
invalid_config_fails_start,
|
||||||
|
unrecognized_config_fails_start,
|
||||||
|
write_error_fails_request
|
||||||
|
% TODO
|
||||||
|
% This testcase is currently failing consistently.
|
||||||
|
% Turns out logger (as of Erlang/OTP 23 w/ kernel 7.0) doesn't tell callers when specific
|
||||||
|
% handler loses an event due to queue flushing, even though doc says it should have.
|
||||||
|
% Best bet is to max out `flush_qlen` option in the meantime.
|
||||||
|
% write_queue_overload_fails_request
|
||||||
|
].
|
||||||
|
|
||||||
|
-spec init_per_suite(config()) ->
|
||||||
|
config().
|
||||||
|
|
||||||
|
init_per_suite(C) ->
|
||||||
|
Apps =
|
||||||
|
genlib_app:start_application(woody) ++
|
||||||
|
genlib_app:start_application_with(scoper, [
|
||||||
|
{storage, scoper_storage_logger}
|
||||||
|
]),
|
||||||
|
[{suite_apps, Apps} | C].
|
||||||
|
|
||||||
|
-spec end_per_suite(config()) ->
|
||||||
|
ok.
|
||||||
|
end_per_suite(C) ->
|
||||||
|
genlib_app:stop_unload_applications(?CONFIG(suite_apps, C)).
|
||||||
|
|
||||||
|
-spec init_per_testcase(testcase_name(), config()) ->
|
||||||
|
config().
|
||||||
|
|
||||||
|
init_per_testcase(Name, C) ->
|
||||||
|
[{testcase, Name} | C].
|
||||||
|
|
||||||
|
-spec end_per_testcase(testcase_name(), config()) ->
|
||||||
|
config().
|
||||||
|
|
||||||
|
end_per_testcase(_Name, _C) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-define(CONTEXT(Fragments), #bdcs_Context{fragments = Fragments}).
|
||||||
|
|
||||||
|
-spec invalid_config_fails_start(config()) -> ok.
|
||||||
|
-spec unrecognized_config_fails_start(config()) -> ok.
|
||||||
|
-spec write_error_fails_request(config()) -> ok.
|
||||||
|
-spec write_queue_overload_fails_request(config()) -> ok.
|
||||||
|
|
||||||
|
invalid_config_fails_start(C) ->
|
||||||
|
?assertError(
|
||||||
|
{bouncer, {{{badkey, file}, _Stacktrace}, _}},
|
||||||
|
start_stop_bouncer([
|
||||||
|
{audit, #{log => #{
|
||||||
|
backend => #{
|
||||||
|
% NOTE
|
||||||
|
% Missing target filename here.
|
||||||
|
type => file
|
||||||
|
}
|
||||||
|
}}}
|
||||||
|
], C)
|
||||||
|
),
|
||||||
|
?assertError(
|
||||||
|
{bouncer, {{badarg, _Stacktrace}, _}},
|
||||||
|
start_stop_bouncer([
|
||||||
|
{audit, #{log => #{
|
||||||
|
level => blarg
|
||||||
|
}}}
|
||||||
|
], C)
|
||||||
|
).
|
||||||
|
|
||||||
|
unrecognized_config_fails_start(C) ->
|
||||||
|
?assertError(
|
||||||
|
{bouncer, {{{unrecognized_opts, #{blarg := _}}, _Stacktrace}, _}},
|
||||||
|
start_stop_bouncer([
|
||||||
|
{audit, #{blarg => blorg}}
|
||||||
|
], C)
|
||||||
|
),
|
||||||
|
?assertError(
|
||||||
|
{bouncer, {{{unrecognized_opts, #{blarg := _}}, _Stacktrace}, _}},
|
||||||
|
start_stop_bouncer([
|
||||||
|
{audit, #{log => #{
|
||||||
|
blarg => blorg
|
||||||
|
}}}
|
||||||
|
], C)
|
||||||
|
),
|
||||||
|
?assertError(
|
||||||
|
{bouncer, {{{unrecognized_opts, #{hello := _}}, _Stacktrace}, _}},
|
||||||
|
start_stop_bouncer([
|
||||||
|
{audit, #{log => #{
|
||||||
|
level => notice,
|
||||||
|
hello => <<"mike">>
|
||||||
|
}}}
|
||||||
|
], C)
|
||||||
|
).
|
||||||
|
|
||||||
|
start_stop_bouncer(Env, C) ->
|
||||||
|
% NOTE
|
||||||
|
% Just to be sure to have clean state inbetween testcases.
|
||||||
|
stop_bouncer(start_bouncer(Env, C)).
|
||||||
|
|
||||||
|
write_error_fails_request(C) ->
|
||||||
|
Dirname = mk_temp_dir(?CONFIG(testcase, C)),
|
||||||
|
Filename = filename:join(Dirname, "audit.log"),
|
||||||
|
C1 = start_bouncer([{audit, #{
|
||||||
|
log => #{
|
||||||
|
backend => #{type => file, file => Filename}
|
||||||
|
}
|
||||||
|
}}], C),
|
||||||
|
Client = mk_client(C1),
|
||||||
|
try
|
||||||
|
ok = file:delete(Filename),
|
||||||
|
ok = file:change_mode(Dirname, 8#555),
|
||||||
|
?assertError(
|
||||||
|
{woody_error, {external, result_unexpected, _}},
|
||||||
|
call_judge(?API_RULESET_ID, ?CONTEXT(#{}), Client)
|
||||||
|
)
|
||||||
|
after
|
||||||
|
_ = rm_temp_dir(Dirname),
|
||||||
|
stop_bouncer(C1)
|
||||||
|
end.
|
||||||
|
|
||||||
|
write_queue_overload_fails_request(C) ->
|
||||||
|
QLen = 10,
|
||||||
|
Concurrency = QLen * 10,
|
||||||
|
Dirname = mk_temp_dir(?CONFIG(testcase, C)),
|
||||||
|
Filename = filename:join(Dirname, "audit.log"),
|
||||||
|
C1 = start_bouncer([{audit, #{
|
||||||
|
log => #{
|
||||||
|
backend => #{type => file, file => Filename, flush_qlen => QLen},
|
||||||
|
formatter => {logger_logstash_formatter, #{single_line => true}}
|
||||||
|
}
|
||||||
|
}}], C),
|
||||||
|
Client = mk_client(C1),
|
||||||
|
Results = genlib_pmap:safemap(
|
||||||
|
fun (_) ->
|
||||||
|
call_judge(?API_RULESET_ID, ?CONTEXT(#{}), Client)
|
||||||
|
end,
|
||||||
|
lists:seq(1, Concurrency)
|
||||||
|
),
|
||||||
|
_ = stop_bouncer(C1),
|
||||||
|
try
|
||||||
|
{Succeeded, _Failed} = lists:partition(fun ({R, _}) -> R == ok end, Results),
|
||||||
|
{ok, LogfileContents} = file:read_file(Filename),
|
||||||
|
NumLogEvents = binary:matches(LogfileContents, <<"\"type\":\"audit\"">>), % TODO kinda hacky
|
||||||
|
?assertEqual(length(Succeeded), length(NumLogEvents))
|
||||||
|
after
|
||||||
|
rm_temp_dir(Dirname)
|
||||||
|
end.
|
||||||
|
|
||||||
|
mk_temp_dir(Name) ->
|
||||||
|
TempDir = ct_helper:get_temp_dir(),
|
||||||
|
Random = binary_to_list(genlib:unique()),
|
||||||
|
TargetDir = filename:join([TempDir, Name, Random]),
|
||||||
|
ok = filelib:ensure_dir(filename:join(TargetDir, ".")),
|
||||||
|
TargetDir.
|
||||||
|
|
||||||
|
rm_temp_dir(Dirname) ->
|
||||||
|
Root = filename:dirname(Dirname),
|
||||||
|
case file:del_dir_r(Root) of
|
||||||
|
ok ->
|
||||||
|
ok;
|
||||||
|
{error, Reason} ->
|
||||||
|
ct:pal("unable to cleanup ~p: ~p", [Root, Reason])
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
mk_client(C) ->
|
||||||
|
WoodyCtx = woody_context:new(genlib:to_binary(?CONFIG(testcase, C))),
|
||||||
|
ServiceURLs = ?CONFIG(service_urls, C),
|
||||||
|
{WoodyCtx, ServiceURLs}.
|
||||||
|
|
||||||
|
call_judge(RulesetID, Context, Client) ->
|
||||||
|
call(arbiter, 'Judge', {genlib:to_binary(RulesetID), Context}, Client).
|
||||||
|
|
||||||
|
call(ServiceName, Fn, Args, {WoodyCtx, ServiceURLs}) ->
|
||||||
|
Service = get_service_spec(ServiceName),
|
||||||
|
Opts = #{
|
||||||
|
url => maps:get(ServiceName, ServiceURLs),
|
||||||
|
event_handler => scoper_woody_event_handler
|
||||||
|
},
|
||||||
|
case woody_client:call({Service, Fn, Args}, Opts, WoodyCtx) of
|
||||||
|
{ok, Response} ->
|
||||||
|
Response;
|
||||||
|
{exception, Exception} ->
|
||||||
|
throw(Exception)
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_service_spec(arbiter) ->
|
||||||
|
{bouncer_decisions_thrift, 'Arbiter'}.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
start_bouncer(Env, C) ->
|
||||||
|
IP = "127.0.0.1",
|
||||||
|
Port = 8022,
|
||||||
|
ArbiterPath = <<"/v1/arbiter">>,
|
||||||
|
Apps = start_application_with(bouncer, [
|
||||||
|
{ip, IP},
|
||||||
|
{port, Port},
|
||||||
|
{services, #{
|
||||||
|
arbiter => #{path => ArbiterPath}
|
||||||
|
}},
|
||||||
|
{transport_opts, #{
|
||||||
|
max_connections => 1000,
|
||||||
|
num_acceptors => 4
|
||||||
|
}},
|
||||||
|
{opa, #{
|
||||||
|
endpoint => ?OPA_ENDPOINT,
|
||||||
|
transport => tcp
|
||||||
|
}}
|
||||||
|
] ++ Env),
|
||||||
|
Services = #{
|
||||||
|
arbiter => mk_url(IP, Port, ArbiterPath)
|
||||||
|
},
|
||||||
|
[{testcase_apps, Apps}, {service_urls, Services} | C].
|
||||||
|
|
||||||
|
mk_url(IP, Port, Path) ->
|
||||||
|
iolist_to_binary(["http://", IP, ":", genlib:to_binary(Port), Path]).
|
||||||
|
|
||||||
|
stop_bouncer(C) ->
|
||||||
|
ct_helper:with_config(testcase_apps, C,
|
||||||
|
fun (Apps) -> genlib_app:stop_unload_applications(Apps) end).
|
||||||
|
|
||||||
|
start_application_with(App, Env) ->
|
||||||
|
_ = application:load(App),
|
||||||
|
_ = [application:set_env(App, K, V) || {K, V} <- Env],
|
||||||
|
case application:ensure_all_started(App, temporary) of
|
||||||
|
{ok, Apps} ->
|
||||||
|
Apps;
|
||||||
|
{error, Reason} ->
|
||||||
|
_ = application:unload(App),
|
||||||
|
erlang:error(Reason)
|
||||||
|
end.
|
@ -35,7 +35,7 @@
|
|||||||
|
|
||||||
-include_lib("bouncer_proto/include/bouncer_decisions_thrift.hrl").
|
-include_lib("bouncer_proto/include/bouncer_decisions_thrift.hrl").
|
||||||
|
|
||||||
-type config() :: [{atom(), term()}].
|
-type config() :: ct_helper:config().
|
||||||
-type group_name() :: atom().
|
-type group_name() :: atom().
|
||||||
-type test_case_name() :: atom().
|
-type test_case_name() :: atom().
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ start_bouncer(Env, C) ->
|
|||||||
{services, #{
|
{services, #{
|
||||||
arbiter => #{
|
arbiter => #{
|
||||||
path => ArbiterPath,
|
path => ArbiterPath,
|
||||||
pulse => {?MODULE, StashPid}
|
pulse => [{?MODULE, StashPid}]
|
||||||
}
|
}
|
||||||
}},
|
}},
|
||||||
{transport_opts, #{
|
{transport_opts, #{
|
||||||
@ -147,8 +147,10 @@ end_per_group(_Name, C) ->
|
|||||||
stop_bouncer(C).
|
stop_bouncer(C).
|
||||||
|
|
||||||
stop_bouncer(C) ->
|
stop_bouncer(C) ->
|
||||||
with_config(group_apps, C, fun (Apps) -> genlib_app:stop_unload_applications(Apps) end),
|
ct_helper:with_config(group_apps, C,
|
||||||
with_config(stash, C, fun (Pid) -> ?assertEqual(ok, ct_stash:destroy(Pid)) end).
|
fun (Apps) -> genlib_app:stop_unload_applications(Apps) end),
|
||||||
|
ct_helper:with_config(stash, C,
|
||||||
|
fun (Pid) -> ?assertEqual(ok, ct_stash:destroy(Pid)) end).
|
||||||
|
|
||||||
-spec init_per_testcase(atom(), config()) ->
|
-spec init_per_testcase(atom(), config()) ->
|
||||||
config().
|
config().
|
||||||
@ -616,13 +618,3 @@ flush_beats({WoodyCtx, _}, C) ->
|
|||||||
get_trace_id(WoodyCtx) ->
|
get_trace_id(WoodyCtx) ->
|
||||||
RpcID = woody_context:get_rpc_id(WoodyCtx),
|
RpcID = woody_context:get_rpc_id(WoodyCtx),
|
||||||
maps:get(trace_id, RpcID).
|
maps:get(trace_id, RpcID).
|
||||||
|
|
||||||
%%
|
|
||||||
|
|
||||||
-spec with_config(atom(), config(), fun ((_) -> R)) ->
|
|
||||||
R | undefined.
|
|
||||||
with_config(Name, C, Fun) ->
|
|
||||||
case lists:keyfind(Name, 1, C) of
|
|
||||||
{_, V} -> Fun(V);
|
|
||||||
false -> undefined
|
|
||||||
end.
|
|
||||||
|
40
test/ct_helper.erl
Normal file
40
test/ct_helper.erl
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
-module(ct_helper).
|
||||||
|
|
||||||
|
-export([with_config/3]).
|
||||||
|
|
||||||
|
-export([get_temp_dir/0]).
|
||||||
|
-export([get_env/1]).
|
||||||
|
|
||||||
|
-type config() :: [{atom(), term()}].
|
||||||
|
|
||||||
|
-export_type([config/0]).
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec with_config(atom(), config(), fun ((_) -> R)) ->
|
||||||
|
R | undefined.
|
||||||
|
with_config(Name, C, Fun) ->
|
||||||
|
case lists:keyfind(Name, 1, C) of
|
||||||
|
{_, V} -> Fun(V);
|
||||||
|
false -> undefined
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec get_temp_dir() ->
|
||||||
|
file:filename_all().
|
||||||
|
get_temp_dir() ->
|
||||||
|
hd(genlib_list:compact([
|
||||||
|
get_env("TMPDIR"),
|
||||||
|
get_env("TEMP"),
|
||||||
|
get_env("TMP"),
|
||||||
|
"/tmp"
|
||||||
|
])).
|
||||||
|
|
||||||
|
-spec get_env(string()) ->
|
||||||
|
string() | undefined.
|
||||||
|
get_env(Name) ->
|
||||||
|
case os:getenv(Name) of
|
||||||
|
V when is_list(V) ->
|
||||||
|
V;
|
||||||
|
false ->
|
||||||
|
undefined
|
||||||
|
end.
|
Loading…
Reference in New Issue
Block a user