mirror of
https://github.com/valitydev/osquery-1.git
synced 2024-11-06 17:45:22 +00:00
Merge pull request #1463 from theopolis/linux_audit2
Add a Linux audit event publisher
This commit is contained in:
commit
b06fa92e76
@ -14,10 +14,9 @@
|
||||
|
||||
#include <kern/assert.h>
|
||||
|
||||
|
||||
#include "circular_queue_kern.h"
|
||||
|
||||
static inline void setup_locks(osquery_cqueue_t *queue) {
|
||||
static inline void setup_queue_locks(osquery_cqueue_t *queue) {
|
||||
/* Create locks. Cannot be done on the stack. */
|
||||
queue->lck_grp_attr = lck_grp_attr_alloc_init();
|
||||
lck_grp_attr_setstat(queue->lck_grp_attr);
|
||||
@ -29,7 +28,7 @@ static inline void setup_locks(osquery_cqueue_t *queue) {
|
||||
queue->lck = lck_spin_alloc_init(queue->lck_grp, queue->lck_attr);
|
||||
}
|
||||
|
||||
static inline void teardown_locks(osquery_cqueue_t *queue) {
|
||||
static inline void teardown_queue_locks(osquery_cqueue_t *queue) {
|
||||
lck_spin_free(queue->lck, queue->lck_grp);
|
||||
|
||||
lck_attr_free(queue->lck_attr);
|
||||
@ -88,7 +87,7 @@ static inline osquery_between_t is_between(osquery_cqueue_t *queue, void *ptr,
|
||||
void osquery_cqueue_setup(osquery_cqueue_t *queue) {
|
||||
queue->last_destruction_time = 0;
|
||||
queue->initialized = 0;
|
||||
setup_locks(queue);
|
||||
setup_queue_locks(queue);
|
||||
}
|
||||
|
||||
int osquery_cqueue_teardown(osquery_cqueue_t *queue) {
|
||||
@ -107,7 +106,7 @@ int osquery_cqueue_teardown(osquery_cqueue_t *queue) {
|
||||
clock_get_system_microtime(&seconds, µ_sec);
|
||||
if (!queue->initialized && seconds > 2 + queue->last_destruction_time) {
|
||||
lck_spin_unlock(queue->lck);
|
||||
teardown_locks(queue);
|
||||
teardown_queue_locks(queue);
|
||||
return 0;
|
||||
} else {
|
||||
lck_spin_unlock(queue->lck);
|
||||
|
@ -42,18 +42,27 @@
|
||||
#define MIN_KMEM (8 * (1 << 10))
|
||||
|
||||
static struct {
|
||||
/// The shared (user/kernel space) circular queue holding event results.
|
||||
osquery_cqueue_t cqueue;
|
||||
|
||||
/// The contiguous memory backing the circular queue.
|
||||
void *buffer;
|
||||
|
||||
/// Configured size of the circular queue buffer.
|
||||
size_t buf_size;
|
||||
|
||||
IOMemoryDescriptor *md;
|
||||
IOMemoryMap *mm;
|
||||
void *devfs;
|
||||
int major_number;
|
||||
int open_count;
|
||||
|
||||
/// IOCTL API handling lock/mutex data.
|
||||
lck_grp_attr_t *lck_grp_attr;
|
||||
lck_grp_t *lck_grp;
|
||||
lck_attr_t *lck_attr;
|
||||
|
||||
/// IOCTL API handling mutex.
|
||||
lck_mtx_t *mtx;
|
||||
} osquery = {
|
||||
.open_count = 0,
|
||||
@ -66,24 +75,23 @@ static struct {
|
||||
};
|
||||
|
||||
static inline void setup_locks() {
|
||||
/* Create locks. Cannot be done on the stack. */
|
||||
// Create locks. Cannot be done on the stack.
|
||||
osquery.lck_grp_attr = lck_grp_attr_alloc_init();
|
||||
lck_grp_attr_setstat(osquery.lck_grp_attr);
|
||||
|
||||
osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);
|
||||
|
||||
osquery.lck_attr = lck_attr_alloc_init();
|
||||
|
||||
// MTX is the IOCTL API handling lock.
|
||||
// This assures only one daemon will use the kernel API simultaneously.
|
||||
osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
|
||||
}
|
||||
|
||||
static inline void teardown_locks() {
|
||||
// Release locks and their heap memory.
|
||||
lck_mtx_free(osquery.mtx, osquery.lck_grp);
|
||||
|
||||
lck_attr_free(osquery.lck_attr);
|
||||
|
||||
lck_grp_free(osquery.lck_grp);
|
||||
|
||||
lck_grp_attr_free(osquery.lck_grp_attr);
|
||||
}
|
||||
|
||||
@ -162,19 +170,27 @@ static void cleanup_user_kernel_buffer() {
|
||||
static int allocate_user_kernel_buffer(size_t size, void **buf) {
|
||||
int err = 0;
|
||||
|
||||
// The user space daemon is requesting a new circular queue.
|
||||
// Make sure the requested size is within sane size bounds.
|
||||
if (size > MAX_KMEM || size < MIN_KMEM) {
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
// Record the requested buffer size.
|
||||
osquery.buf_size = size;
|
||||
// Allocate a contiguous region of memory.
|
||||
osquery.buffer = IOMallocAligned(osquery.buf_size, PAGE_SIZE);
|
||||
// Cannot proceed if no memory to back the circular queue is available.
|
||||
if (osquery.buffer == NULL) {
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
bzero(osquery.buffer, osquery.buf_size); // Zero memory for safety.
|
||||
|
||||
// Zero memory for safety, this memory will be shared with user space.
|
||||
bzero(osquery.buffer, osquery.buf_size);
|
||||
|
||||
// This buffer will be shared, create a descriptor.
|
||||
osquery.md
|
||||
= IOMemoryDescriptor::withAddressRange((mach_vm_address_t)osquery.buffer,
|
||||
osquery.buf_size,
|
||||
@ -183,25 +199,30 @@ static int allocate_user_kernel_buffer(size_t size, void **buf) {
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
// Now map the buffer into the user space process as read only.
|
||||
osquery.mm = osquery.md->createMappingInTask(current_task(), NULL,
|
||||
kIOMapAnywhere | kIOMapReadOnly);
|
||||
if (osquery.mm == NULL) {
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
*buf = (void *)osquery.mm->getAddress();
|
||||
|
||||
// The virtual address will be shared back to the user space queue manager.
|
||||
*buf = (void *)osquery.mm->getAddress();
|
||||
// Initialize the kernel space queue manager with the new buffer.
|
||||
osquery_cqueue_init(&osquery.cqueue, osquery.buffer, osquery.buf_size);
|
||||
|
||||
return 0;
|
||||
error_exit:
|
||||
// A drop-through error handler will clean up any intermediate allocations.
|
||||
cleanup_user_kernel_buffer();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int osquery_open(dev_t dev, int oflags, int devtype, struct proc *p) {
|
||||
// Close isnt working so leave these out for now.
|
||||
// Close is not working so leave these out for now.
|
||||
int err = 0;
|
||||
lck_mtx_lock(osquery.mtx);
|
||||
if (osquery.open_count == 0) {
|
||||
@ -218,6 +239,7 @@ static int osquery_open(dev_t dev, int oflags, int devtype, struct proc *p) {
|
||||
}
|
||||
|
||||
static int osquery_close(dev_t dev, int flag, int fmt, struct proc *p) {
|
||||
// Only one daemon should request a close.
|
||||
lck_mtx_lock(osquery.mtx);
|
||||
if (osquery.open_count == 1) {
|
||||
unsubscribe_all_events();
|
||||
@ -230,11 +252,11 @@ static int osquery_close(dev_t dev, int flag, int fmt, struct proc *p) {
|
||||
}
|
||||
|
||||
|
||||
// All control should be from a single consumer, so we wrap all these calls
|
||||
// in locks to guarantee proper use.
|
||||
static int osquery_ioctl(dev_t dev, u_long cmd, caddr_t data,
|
||||
int flag, struct proc *p) {
|
||||
#ifdef KERNEL_TEST // Reentrant code used for testing the queue functionality.
|
||||
#ifdef KERNEL_TEST
|
||||
// Reentrant code used for testing the queue functionality.
|
||||
// This test-only code allows benchmarks to stress test queue handling.
|
||||
static unsigned int test_counter = 0;
|
||||
if (cmd == OSQUERY_IOCTL_TEST) {
|
||||
if (osquery.buffer == NULL) {
|
||||
@ -272,27 +294,34 @@ static int osquery_ioctl(dev_t dev, u_long cmd, caddr_t data,
|
||||
}
|
||||
#endif // KERNEL_TEST
|
||||
|
||||
lck_mtx_lock(osquery.mtx);
|
||||
|
||||
int err = 0;
|
||||
osquery_subscription_args_t *sub = NULL;
|
||||
osquery_buf_sync_args_t *sync = NULL;
|
||||
osquery_buf_allocate_args_t *alloc = NULL;
|
||||
|
||||
// All control should be from a single daemon.
|
||||
// Wrap all IOCTL API handling in locks to guarantee proper use.
|
||||
lck_mtx_lock(osquery.mtx);
|
||||
switch (cmd) {
|
||||
// Daemon is requesting a new subscription (e.g., monitored path).
|
||||
case OSQUERY_IOCTL_SUBSCRIPTION:
|
||||
sub = (osquery_subscription_args_t *)data;
|
||||
if ((err = subscribe_to_event(sub->event, sub->subscribe, sub->udata))) {
|
||||
goto error_exit;
|
||||
}
|
||||
break;
|
||||
|
||||
// Daemon is requesting a synchronization of readable queue space.
|
||||
case OSQUERY_IOCTL_BUF_SYNC:
|
||||
sync = (osquery_buf_sync_args_t *)data;
|
||||
// The queue buffer cannot be synchronized if it has not been allocated.
|
||||
if (osquery.buffer == NULL) {
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
// Unlock while applying update logic, re-lock on error and success.
|
||||
lck_mtx_unlock(osquery.mtx);
|
||||
sync = (osquery_buf_sync_args_t *)data;
|
||||
if ((err = update_user_kernel_buffer(sync->options,
|
||||
sync->read_offset,
|
||||
&(sync->max_read_offset),
|
||||
@ -302,21 +331,25 @@ static int osquery_ioctl(dev_t dev, u_long cmd, caddr_t data,
|
||||
}
|
||||
lck_mtx_lock(osquery.mtx);
|
||||
break;
|
||||
|
||||
// Daemon is requesting an allocation for the queue, and shared region.
|
||||
case OSQUERY_IOCTL_BUF_ALLOCATE:
|
||||
alloc = (osquery_buf_allocate_args_t *)data;
|
||||
|
||||
if (alloc->version != OSQUERY_KERNEL_COMM_VERSION) {
|
||||
// Daemon tried connecting with incorrect version number.
|
||||
// The structure types and sizes are bound to the COMMs version.
|
||||
// Any non-matching daemon may not handle these structures correctly.
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
if (osquery.buffer != NULL) {
|
||||
// We don't want to allocate a second buffer.
|
||||
// There is only a single shared buffer.
|
||||
err = -EINVAL;
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
// Attempt to allocation and set up the circular queue.
|
||||
if ((err = allocate_user_kernel_buffer(alloc->size, &(alloc->buffer)))) {
|
||||
goto error_exit;
|
||||
}
|
||||
@ -329,7 +362,9 @@ static int osquery_ioctl(dev_t dev, u_long cmd, caddr_t data,
|
||||
goto error_exit;
|
||||
break;
|
||||
}
|
||||
|
||||
error_exit:
|
||||
// Unlock and return a status to the daemon.
|
||||
lck_mtx_unlock(osquery.mtx);
|
||||
return err;
|
||||
}
|
||||
@ -355,14 +390,18 @@ static struct cdevsw osquery_cdevsw = {
|
||||
kern_return_t OsqueryStart(kmod_info_t *ki, void *d) {
|
||||
dbg_printf("Kernel module starting!\n");
|
||||
|
||||
// Restart the queue and setup queue locks.
|
||||
// This does not allocate, share, or set the queue buffer or buffer values.
|
||||
osquery_cqueue_setup(&osquery.cqueue);
|
||||
|
||||
// Initialize the IOCTL (and more) device node.
|
||||
osquery.major_number = cdevsw_add(osquery.major_number, &osquery_cdevsw);
|
||||
if (osquery.major_number < 0) {
|
||||
dbg_printf("Could not get a major number!\n");
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
// Create the IOCTL (and more) device node.
|
||||
osquery.devfs = devfs_make_node(makedev(osquery.major_number, 0),
|
||||
DEVFS_CHAR, UID_ROOT, GID_WHEEL,
|
||||
0644, "osquery", 0);
|
||||
@ -371,21 +410,25 @@ kern_return_t OsqueryStart(kmod_info_t *ki, void *d) {
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
// Set up the IOCTL and kernel API locks (not queue locks).
|
||||
setup_locks();
|
||||
|
||||
return KERN_SUCCESS;
|
||||
error_exit:
|
||||
// Upon error, remove the device node if it was allocated.
|
||||
if (osquery.devfs != NULL) {
|
||||
devfs_remove(osquery.devfs);
|
||||
osquery.devfs = NULL;
|
||||
}
|
||||
|
||||
// Tear down device node data.
|
||||
if (!(osquery.major_number < 0)) {
|
||||
if (cdevsw_remove(osquery.major_number, &osquery_cdevsw) < 0) {
|
||||
panic("osquery kext: Cannot remove osquery from cdevsw");
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the queue and remove the queue locks.
|
||||
osquery_cqueue_teardown(&osquery.cqueue);
|
||||
return KERN_FAILURE;
|
||||
}
|
||||
@ -393,24 +436,31 @@ error_exit:
|
||||
kern_return_t OsqueryStop(kmod_info_t *ki, void *d) {
|
||||
dbg_printf("Kernel module stoping!\n");
|
||||
|
||||
// Only stop if there are no connected daemons.
|
||||
lck_mtx_lock(osquery.mtx);
|
||||
if (osquery.open_count > 0) {
|
||||
lck_mtx_unlock(osquery.mtx);
|
||||
return KERN_FAILURE;
|
||||
}
|
||||
|
||||
// Stop sharing the queue and remove queue locks.
|
||||
// This will potentially block as heuristics are applied to make sure the
|
||||
// queue is no longer is use.
|
||||
if (osquery_cqueue_teardown(&osquery.cqueue)) {
|
||||
lck_mtx_unlock(osquery.mtx);
|
||||
return KERN_FAILURE;
|
||||
}
|
||||
|
||||
// Remove the device node.
|
||||
devfs_remove(osquery.devfs);
|
||||
osquery.devfs = NULL;
|
||||
|
||||
// Tear down the device node data.
|
||||
if (cdevsw_remove(osquery.major_number, &osquery_cdevsw) < 0) {
|
||||
panic("osquery kext: Cannot remove osquery from cdevsw");
|
||||
}
|
||||
|
||||
// Deallocate the IOCTL and kernel API locks.
|
||||
lck_mtx_unlock(osquery.mtx);
|
||||
teardown_locks();
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <feeds.h>
|
||||
|
||||
#include "circular_queue_kern.h"
|
||||
|
||||
/** @brief Subscribe function type.
|
||||
|
@ -4,25 +4,18 @@ if(APPLE)
|
||||
ADD_OSQUERY_LINK(FALSE "-framework IOKit")
|
||||
ADD_OSQUERY_LINK(FALSE "-framework DiskArbitration")
|
||||
|
||||
ADD_OSQUERY_LIBRARY(FALSE osquery_events_darwin
|
||||
darwin/fsevents.cpp
|
||||
darwin/iokit_hid.cpp
|
||||
darwin/diskarbitration.cpp
|
||||
darwin/scnetwork.cpp
|
||||
darwin/kernel_util.cpp
|
||||
)
|
||||
|
||||
file(GLOB OSQUERY_EVENTS_DARWIN "darwin/*.cpp")
|
||||
ADD_OSQUERY_LIBRARY(FALSE osquery_events_darwin ${OSQUERY_EVENTS_DARWIN})
|
||||
elseif(FREEBSD)
|
||||
ADD_OSQUERY_LIBRARY(FALSE osquery_events_freebsd
|
||||
freebsd/kernel_util.cpp
|
||||
)
|
||||
file(GLOB OSQUERY_EVENTS_FREEBSD "freebsd/*.cpp")
|
||||
ADD_OSQUERY_LIBRARY(FALSE osquery_events_freebsd ${OSQUERY_EVENTS_FREEBSD})
|
||||
else()
|
||||
ADD_OSQUERY_LINK(FALSE "udev")
|
||||
ADD_OSQUERY_LINK(FALSE "audit")
|
||||
|
||||
ADD_OSQUERY_LIBRARY(FALSE osquery_events_linux
|
||||
linux/inotify.cpp
|
||||
linux/udev.cpp
|
||||
linux/kernel_util.cpp
|
||||
)
|
||||
file(GLOB OSQUERY_EVENTS_LINUX "linux/*.cpp")
|
||||
ADD_OSQUERY_LIBRARY(FALSE osquery_events_linux ${OSQUERY_EVENTS_LINUX})
|
||||
endif()
|
||||
|
||||
ADD_OSQUERY_LIBRARY(TRUE osquery_events
|
||||
|
@ -8,11 +8,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "osquery/events/kernel.h"
|
||||
|
||||
#include <boost/regex.hpp>
|
||||
#include <boost/xpressive/xpressive.hpp>
|
||||
|
||||
// This must come after the boost expressive headers.
|
||||
#include <IOKit/kext/KextManager.h>
|
||||
|
||||
#include <osquery/config.h>
|
||||
@ -20,6 +19,8 @@
|
||||
#include <osquery/logger.h>
|
||||
#include <osquery/sql.h>
|
||||
|
||||
#include "osquery/events/kernel.h"
|
||||
|
||||
namespace xp = boost::xpressive;
|
||||
|
||||
namespace osquery {
|
||||
|
@ -552,9 +552,12 @@ Status EventFactory::registerEventPublisher(const PluginRef& pub) {
|
||||
|
||||
// Do not set up event publisher if events are disabled.
|
||||
if (!FLAGS_disable_events) {
|
||||
if (!specialized_pub->setUp().ok()) {
|
||||
auto status = specialized_pub->setUp();
|
||||
if (!status.ok()) {
|
||||
// Only start event loop if setUp succeeds.
|
||||
return Status(1, "Event publisher setup failed");
|
||||
LOG(INFO) << "Event publisher failed setup: " << type_id << ": "
|
||||
<< status.what();
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
@ -631,7 +634,7 @@ size_t EventFactory::numSubscriptions(EventPublisherID& type_id) {
|
||||
|
||||
EventPublisherRef EventFactory::getEventPublisher(EventPublisherID& type_id) {
|
||||
if (getInstance().event_pubs_.count(type_id) == 0) {
|
||||
LOG(ERROR) << "Requested unknown event publisher: " + type_id;
|
||||
LOG(ERROR) << "Requested unknown/failed event publisher: " + type_id;
|
||||
return nullptr;
|
||||
}
|
||||
return getInstance().event_pubs_.at(type_id);
|
||||
|
@ -8,16 +8,21 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "osquery/events/kernel.h"
|
||||
|
||||
#include <osquery/logger.h>
|
||||
|
||||
#include "osquery/events/kernel.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
FLAG(bool, disable_kernel, false, "Disable osquery kernel extension");
|
||||
|
||||
static const size_t shared_buffer_size_bytes = (20 * (1 << 20));
|
||||
static const int max_events_before_sync = 1000;
|
||||
const std::string kKernelDevice = "/dev/osquery";
|
||||
|
||||
/// Kernel shared buffer size in bytes.
|
||||
static const size_t kKernelQueueSize = (20 * (1 << 20));
|
||||
|
||||
/// Handle a maximum of 1000 events before requesting a resync.
|
||||
static const int kKernelEventsSyncMax = 1000;
|
||||
|
||||
REGISTER(KernelEventPublisher, "event_publisher", "kernel");
|
||||
|
||||
@ -27,16 +32,14 @@ Status KernelEventPublisher::setUp() {
|
||||
}
|
||||
|
||||
try {
|
||||
queue_ = new CQueue(shared_buffer_size_bytes);
|
||||
queue_ = new CQueue(kKernelDevice, kKernelQueueSize);
|
||||
} catch (const CQueueException &e) {
|
||||
if (kToolType == OSQUERY_TOOL_DAEMON) {
|
||||
LOG(INFO) << "Cannot connect to kernel. " << e.what();
|
||||
}
|
||||
queue_ = nullptr;
|
||||
return Status(0, e.what());
|
||||
return Status(1, e.what());
|
||||
}
|
||||
|
||||
if (queue_ == nullptr) {
|
||||
return Status(1, "Could not allocate CQueue object.");
|
||||
return Status(1, "Could not allocate CQueue object");
|
||||
}
|
||||
|
||||
return Status(0, "OK");
|
||||
@ -59,7 +62,7 @@ void KernelEventPublisher::tearDown() {
|
||||
|
||||
Status KernelEventPublisher::run() {
|
||||
if (queue_ == nullptr) {
|
||||
return Status(1, "No kernel communication.");
|
||||
return Status(1, "No kernel communication");
|
||||
}
|
||||
|
||||
// Perform queue read min/max synchronization.
|
||||
@ -70,11 +73,11 @@ Status KernelEventPublisher::run() {
|
||||
LOG(WARNING) << "Dropping " << drops << " kernel events";
|
||||
}
|
||||
} catch (const CQueueException &e) {
|
||||
LOG(WARNING) << e.what();
|
||||
LOG(WARNING) << "Queue synchronization error: " << e.what();
|
||||
}
|
||||
|
||||
// Iterate over each event type in the queue and appropriately fire each.
|
||||
int max_before_sync = max_events_before_sync;
|
||||
int max_before_sync = kKernelEventsSyncMax;
|
||||
KernelEventContextRef ec;
|
||||
osquery_event_t event_type = OSQUERY_NULL_EVENT;
|
||||
CQueue::event *event = nullptr;
|
||||
|
@ -10,15 +10,23 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "osquery/events/kernel/circular_queue_user.h"
|
||||
#include <vector>
|
||||
|
||||
#include <osquery/events.h>
|
||||
#include <osquery/status.h>
|
||||
|
||||
#include <vector>
|
||||
#include "osquery/events/kernel/circular_queue_user.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
/**
|
||||
* @brief Name of the kernel communication device node.
|
||||
*
|
||||
* The kernel component creates an ioctl API for synchronizing kernel-based
|
||||
* subscriptions and userland access to regions of shared memory.
|
||||
*/
|
||||
extern const std::string kKernelDevice;
|
||||
|
||||
/**
|
||||
* @brief Load kernel extension if applicable.
|
||||
*/
|
||||
|
@ -8,23 +8,21 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "osquery/dispatcher/dispatcher.h"
|
||||
#include "osquery/events/kernel/circular_queue_user.h"
|
||||
|
||||
#include <sys/ioctl.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
#include "osquery/dispatcher/dispatcher.h"
|
||||
#include "osquery/events/kernel.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
#ifdef KERNEL_TEST
|
||||
|
||||
static void CommunicationBenchmark(benchmark::State &state) {
|
||||
if (state.thread_index == 0) {
|
||||
CQueue queue(8 * (1 << 20));
|
||||
static inline void producerThread(benchmark::State &state) {
|
||||
CQueue queue(kKernelDevice, 8 * (1 << 20));
|
||||
|
||||
osquery_event_t event;
|
||||
osquery::CQueue::event *event_buf = nullptr;
|
||||
@ -57,18 +55,26 @@ static void CommunicationBenchmark(benchmark::State &state) {
|
||||
reads1 * sizeof(test_event_1_data_t) +
|
||||
(reads0 + reads1) * sizeof(osquery_event_time_t));
|
||||
state.SetItemsProcessed(reads0 + reads1);
|
||||
std::string s = std::string("dropped: ") + std::to_string(drops) +
|
||||
" syncs: " + std::to_string(syncs);
|
||||
state.SetLabel(s);
|
||||
} else {
|
||||
const char *filename = "/dev/osquery";
|
||||
int fd = open(filename, O_RDWR);
|
||||
auto label = std::string("dropped: ") + std::to_string(drops) + " syncs: " +
|
||||
std::to_string(syncs);
|
||||
state.SetLabel(label);
|
||||
}
|
||||
|
||||
static inline void consumerThread(benchmark::State &state) {
|
||||
int fd = open(kKernelDevice.c_str(), O_RDWR);
|
||||
int type = state.thread_index % 2;
|
||||
while (state.KeepRunning()) {
|
||||
ioctl(fd, OSQUERY_IOCTL_TEST, &type);
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static void CommunicationBenchmark(benchmark::State &state) {
|
||||
if (state.thread_index == 0) {
|
||||
producerThread(state);
|
||||
} else {
|
||||
consumerThread(state);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK(CommunicationBenchmark)->UseRealTime()->ThreadRange(2, 32);
|
||||
|
@ -16,26 +16,25 @@
|
||||
|
||||
namespace osquery {
|
||||
|
||||
CQueue::CQueue(size_t size) {
|
||||
CQueue::CQueue(const std::string &device, size_t size) {
|
||||
buffer_ = nullptr;
|
||||
size_ = 0;
|
||||
max_read_ = nullptr;
|
||||
read_ = nullptr;
|
||||
fd_ = -1;
|
||||
|
||||
const char *filename = "/dev/osquery";
|
||||
osquery_buf_allocate_args_t alloc;
|
||||
alloc.size = size;
|
||||
alloc.buffer = nullptr;
|
||||
alloc.version = OSQUERY_KERNEL_COMM_VERSION;
|
||||
|
||||
fd_ = open(filename, O_RDWR);
|
||||
fd_ = open(device.c_str(), O_RDWR);
|
||||
if (fd_ < 0) {
|
||||
throw CQueueException("Could not open character device.");
|
||||
throw CQueueException("Could not open character device");
|
||||
}
|
||||
|
||||
if (ioctl(fd_, OSQUERY_IOCTL_BUF_ALLOCATE, &alloc)) {
|
||||
throw CQueueException("Could not allocate shared buffer.");
|
||||
throw CQueueException("Could not allocate shared buffer");
|
||||
}
|
||||
|
||||
buffer_ = (uint8_t *)alloc.buffer;
|
||||
@ -58,7 +57,7 @@ void CQueue::subscribe(osquery_event_t event, void *udata) {
|
||||
sub.udata = udata;
|
||||
|
||||
if (ioctl(fd_, OSQUERY_IOCTL_SUBSCRIPTION, &sub)) {
|
||||
throw CQueueException("Could not subscribe to event.");
|
||||
throw CQueueException("Could not subscribe to event");
|
||||
}
|
||||
}
|
||||
|
||||
@ -84,9 +83,9 @@ osquery_event_t CQueue::dequeue(CQueue::event **event) {
|
||||
return header->event;
|
||||
}
|
||||
|
||||
// return positive indicates drop, 0 is all good in the hood.
|
||||
// options are listed in kernel feeds. primarily OSQUERY_NO_BLOCK.
|
||||
int CQueue::kernelSync(int options) {
|
||||
// A positive return indicates drops, 0 is all good in the hood.
|
||||
// Options are listed in kernel feeds; primarily OSQUERY_NO_BLOCK.
|
||||
osquery_buf_sync_args_t sync;
|
||||
sync.read_offset = read_ - buffer_;
|
||||
sync.options = options;
|
||||
@ -97,7 +96,7 @@ int CQueue::kernelSync(int options) {
|
||||
max_read_ = new_max_read;
|
||||
if (err) {
|
||||
read_ = max_read_;
|
||||
throw CQueueException("Could not sync buffer with kernel properly.");
|
||||
throw CQueueException("Could not sync buffer with kernel properly");
|
||||
}
|
||||
|
||||
return sync.drops;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#include "kernel/include/feeds.h"
|
||||
|
||||
@ -18,7 +19,7 @@ namespace osquery {
|
||||
|
||||
class CQueueException : public std::runtime_error {
|
||||
public:
|
||||
explicit CQueueException(const char *str) : std::runtime_error(str) {};
|
||||
explicit CQueueException(const std::string &s) : std::runtime_error(s){};
|
||||
};
|
||||
|
||||
class CQueue {
|
||||
@ -39,9 +40,10 @@ class CQueue {
|
||||
* shared buffer of the specified size. The size must be accepted by the
|
||||
* kernel extension.
|
||||
*
|
||||
* @param device The device node path for ioctl communication.
|
||||
* @param size The size of the shared buffer used for communication.
|
||||
*/
|
||||
explicit CQueue(size_t size);
|
||||
explicit CQueue(const std::string &device, size_t size);
|
||||
|
||||
/**
|
||||
* @brief Cleanup a cqueue.
|
||||
|
@ -8,16 +8,17 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "osquery/dispatcher/dispatcher.h"
|
||||
#include "osquery/events/kernel/circular_queue_user.h"
|
||||
|
||||
#include <sys/ioctl.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "osquery/dispatcher/dispatcher.h"
|
||||
#include "osquery/events/kernel.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
class KernelCommunicationTests : public testing::Test {};
|
||||
@ -28,9 +29,9 @@ class KernelProducerRunnable : public InternalRunnable {
|
||||
explicit KernelProducerRunnable(int events_to_produce, int event_type)
|
||||
: events_to_produce_(events_to_produce),
|
||||
event_type_(event_type) {}
|
||||
|
||||
virtual void start() {
|
||||
const char *filename = "/dev/osquery";
|
||||
int fd = open(filename, O_RDWR);
|
||||
int fd = open(kKernelDevice.c_str(), O_RDWR);
|
||||
if (fd >= 0) {
|
||||
for (uint32_t i = 0; i < events_to_produce_; i ++) {
|
||||
ioctl(fd, OSQUERY_IOCTL_TEST, &event_type_);
|
||||
@ -51,7 +52,7 @@ TEST_F(KernelCommunicationTests, test_communication) {
|
||||
int drops = 0;
|
||||
int reads = 0;
|
||||
|
||||
CQueue queue(8 * (1 << 20));
|
||||
CQueue queue(kKernelDevice, 8 * (1 << 20));
|
||||
|
||||
auto& dispatcher = Dispatcher::instance();
|
||||
|
||||
|
319
osquery/events/linux/audit.cpp
Normal file
319
osquery/events/linux/audit.cpp
Normal file
@ -0,0 +1,319 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <osquery/filesystem.h>
|
||||
#include <osquery/flags.h>
|
||||
#include <osquery/logger.h>
|
||||
|
||||
#include "osquery/events/linux/audit.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
/// The audit subsystem may have a performance impact on the system.
|
||||
FLAG(bool,
|
||||
disable_audit,
|
||||
true,
|
||||
"Disable receiving events from the audit subsystem");
|
||||
|
||||
/// Control the audit subsystem by electing to be the single process sink.
|
||||
FLAG(bool, audit_persist, true, "Attempt to retain control of audit");
|
||||
|
||||
/// Control the audit subsystem by allowing subscriptions to apply rules.
|
||||
FLAG(bool,
|
||||
audit_allow_config,
|
||||
false,
|
||||
"Allow the audit publisher to change auditing configuration");
|
||||
|
||||
REGISTER(AuditEventPublisher, "event_publisher", "audit");
|
||||
|
||||
enum AuditStatus {
|
||||
AUDIT_DISABLED = 0,
|
||||
AUDIT_ENABLED = 1,
|
||||
AUDIT_IMMUTABLE = 2,
|
||||
};
|
||||
|
||||
Status AuditEventPublisher::setUp() {
|
||||
handle_ = audit_open();
|
||||
if (handle_ <= 0) {
|
||||
// Could not open the audit subsystem.
|
||||
return Status(1, "Could not open audit subsystem");
|
||||
}
|
||||
|
||||
// The setup can try to enable auditing.
|
||||
if (!FLAGS_disable_audit && FLAGS_audit_allow_config) {
|
||||
audit_set_enabled(handle_, AUDIT_ENABLED);
|
||||
}
|
||||
|
||||
auto enabled = audit_is_enabled(handle_);
|
||||
if (enabled == AUDIT_IMMUTABLE || getuid() != 0 ||
|
||||
!FLAGS_audit_allow_config) {
|
||||
// The audit subsystem is in an immutable mode.
|
||||
immutable_ = true;
|
||||
} else if (enabled != AUDIT_ENABLED) {
|
||||
// No audit subsystem is available, or an error was encountered.
|
||||
audit_close(handle_);
|
||||
return Status(1, "Audit subsystem is not enabled");
|
||||
}
|
||||
|
||||
// The auditd daemon sets its PID.
|
||||
if (!FLAGS_disable_audit && !immutable_) {
|
||||
if (audit_set_pid(handle_, getpid(), WAIT_YES) < 0) {
|
||||
// Could not set our process as the userspace auditing daemon.
|
||||
return Status(1, "Could not set audit PID");
|
||||
}
|
||||
// This process is now in control of audit.
|
||||
control_ = true;
|
||||
|
||||
// Want to set a min sane buffer and maximum number of events/second min.
|
||||
// This is normally controlled through the audit config, but we must
|
||||
// enforce sane minimums: -b 8192 -e 100
|
||||
|
||||
// Request only the highest priority of audit status messages.
|
||||
set_aumessage_mode(MSG_QUIET, DBG_NO);
|
||||
}
|
||||
return Status(0, "OK");
|
||||
}
|
||||
|
||||
void AuditEventPublisher::configure() {
|
||||
// Able to issue libaudit API calls.
|
||||
struct AuditRuleInternal rule;
|
||||
|
||||
// Before reply data is ever filled in, assure an empty message.
|
||||
memset(&reply_, 0, sizeof(struct audit_reply));
|
||||
|
||||
if (handle_ <= 0 || FLAGS_disable_audit || immutable_) {
|
||||
// No configuration or rule manipulation needed.
|
||||
// The publisher run loop may still receive audit metadata events.
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto& sub : subscriptions_) {
|
||||
auto sc = getSubscriptionContext(sub->context);
|
||||
for (const auto& scr : sc->rules) {
|
||||
// Reset all members to nothing.
|
||||
memset(&rule.rule, 0, sizeof(struct audit_rule_data));
|
||||
|
||||
if (scr.syscall != 0) {
|
||||
audit_rule_syscall_data(&rule.rule, scr.syscall);
|
||||
}
|
||||
|
||||
if (scr.filter.size() > 0) {
|
||||
// Fill in rule's filter data.
|
||||
auto* rrule = &rule.rule;
|
||||
audit_rule_fieldpair_data(&rrule, scr.filter.c_str(), scr.flags);
|
||||
}
|
||||
|
||||
// Apply this rule to the EXIT filter, ALWAYS.
|
||||
int rc = audit_add_rule_data(handle_, &rule.rule, scr.flags, scr.action);
|
||||
if (rc < 0) {
|
||||
// Problem adding rule. If errno == EEXIST then fine.
|
||||
VLOG(1) << "Cannot add audit rule: syscall=" << scr.syscall
|
||||
<< " filter='" << scr.filter << "': error " << rc;
|
||||
} else {
|
||||
// Add this rule data to the publisher's list of transient rules.
|
||||
// These will be removed during tear down or re-configure.
|
||||
rule.flags = scr.flags;
|
||||
rule.action = scr.action;
|
||||
transient_rules_.push_back(rule);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The audit library provides an API to send a netlink request that fills in
|
||||
// a netlink reply with audit rules. As such, this process will maintain a
|
||||
// single open handle and reply to audit-metadata tables with the buffered
|
||||
// content from the publisher.
|
||||
if (audit_request_rules_list_data(handle_) <= 0) {
|
||||
// Could not request audit rules.
|
||||
}
|
||||
}
|
||||
|
||||
void AuditEventPublisher::tearDown() {
|
||||
if (handle_ <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// The configure step will store successful rule adds.
|
||||
// Each of these rules has been added by the publisher and should be remove
|
||||
// when the process tears down.
|
||||
if (!immutable_) {
|
||||
for (auto& rule : transient_rules_) {
|
||||
audit_delete_rule_data(handle_, &rule.rule, rule.flags, rule.action);
|
||||
}
|
||||
}
|
||||
|
||||
audit_close(handle_);
|
||||
}
|
||||
|
||||
inline void handleAuditConfigChange(const struct audit_reply& reply) {
|
||||
// Another daemon may have taken control.
|
||||
}
|
||||
|
||||
inline bool handleAuditReply(const struct audit_reply& reply,
|
||||
AuditEventContextRef& ec) {
|
||||
// Build an event context around this reply.
|
||||
ec->type = reply.type;
|
||||
|
||||
// Tokenize the message.
|
||||
auto message = std::string(reply.message, reply.len);
|
||||
std::vector<std::string> fields;
|
||||
boost::split(fields, message, boost::is_any_of(" ="));
|
||||
std::string().swap(message);
|
||||
|
||||
// Iterate over each field=value pair.
|
||||
auto field_it = fields.begin();
|
||||
ec->preamble = std::move(*field_it);
|
||||
for (++field_it; field_it != fields.end(); field_it++) {
|
||||
const auto& key = *field_it;
|
||||
if (++field_it == fields.end()) {
|
||||
// A malformed message will have had an odd number of fields=value
|
||||
// pairs, discard the event.
|
||||
return false;
|
||||
}
|
||||
// Assign the key/value pair.
|
||||
ec->fields[key] = std::move(*field_it);
|
||||
}
|
||||
|
||||
// There is a special field for syscalls.
|
||||
if (ec->fields.count("syscall") == 1) {
|
||||
const auto& syscall_string = ec->fields.at("syscall").c_str();
|
||||
char* end = nullptr;
|
||||
long long int syscall = strtoll(syscall_string, &end, 10);
|
||||
if (end == nullptr || end == syscall_string || *end != '\0' ||
|
||||
((syscall == LLONG_MIN || syscall == LLONG_MAX) && errno == ERANGE)) {
|
||||
syscall = 0;
|
||||
}
|
||||
ec->syscall = syscall;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void AuditEventPublisher::handleListRules() {
|
||||
// Store the rules response.
|
||||
// This is not needed until there are audit meta-tables listing the rules.
|
||||
}
|
||||
|
||||
Status AuditEventPublisher::run() {
|
||||
if (!FLAGS_disable_audit && (count_ == 0 || count_++ % 10 == 0)) {
|
||||
// Request an update to the audit status.
|
||||
// This will also fill in the status on first run.
|
||||
audit_request_status(handle_);
|
||||
}
|
||||
|
||||
// Reset the reply data.
|
||||
int result = 0;
|
||||
bool handle_reply = false;
|
||||
while (true) {
|
||||
handle_reply = false;
|
||||
|
||||
// Request a reply in a non-blocking mode.
|
||||
// This allows the publisher's run loop to periodically request an audit
|
||||
// status update. These updates can check for other processes attempting to
|
||||
// gain control over the audit sink.
|
||||
// This non-blocking also allows faster receipt of multi-message events.
|
||||
result = audit_get_reply(handle_, &reply_, GET_REPLY_NONBLOCKING, 0);
|
||||
if (result > 0) {
|
||||
switch (reply_.type) {
|
||||
case NLMSG_NOOP:
|
||||
case NLMSG_DONE:
|
||||
case NLMSG_ERROR:
|
||||
// Not handled, request another reply.
|
||||
break;
|
||||
case AUDIT_LIST_RULES:
|
||||
// Build rules cache.
|
||||
handleListRules();
|
||||
break;
|
||||
case AUDIT_GET:
|
||||
// Make a copy of the status reply and store as the most-recent.
|
||||
if (reply_.status != nullptr) {
|
||||
memcpy(&status_, reply_.status, sizeof(struct audit_status));
|
||||
}
|
||||
break;
|
||||
case (AUDIT_GET + 1)...(AUDIT_LIST_RULES - 1):
|
||||
case (AUDIT_LIST_RULES + 1)... AUDIT_LAST_USER_MSG:
|
||||
// Not interested in handling meta-commands and actions.
|
||||
break;
|
||||
case AUDIT_DAEMON_START... AUDIT_DAEMON_CONFIG: // 1200 - 1203
|
||||
case AUDIT_CONFIG_CHANGE:
|
||||
handleAuditConfigChange(reply_);
|
||||
break;
|
||||
case AUDIT_SYSCALL: // 1300
|
||||
// A monitored syscall was issued, most likely part of a multi-record.
|
||||
handle_reply = true;
|
||||
break;
|
||||
case AUDIT_CWD: // 1307
|
||||
case AUDIT_PATH: // 1302
|
||||
case AUDIT_EXECVE: // // 1309 (execve arguments).
|
||||
handle_reply = true;
|
||||
case AUDIT_EOE: // 1320 (multi-record event).
|
||||
break;
|
||||
default:
|
||||
// All other cases, pass to reply.
|
||||
handle_reply = true;
|
||||
}
|
||||
} else {
|
||||
// Fall through to the run loop cool down.
|
||||
break;
|
||||
}
|
||||
|
||||
// Replies are 'handled' as potential events for several audit types.
|
||||
if (handle_reply) {
|
||||
auto ec = createEventContext();
|
||||
// Build the event context from the reply type and parse the message.
|
||||
if (handleAuditReply(reply_, ec)) {
|
||||
fire(ec);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (status_.pid != getpid()) {
|
||||
if (control_ && status_.pid != 0) {
|
||||
VLOG(1) << "Audit control lost to pid: " << status_.pid;
|
||||
// This process has lost control of audit.
|
||||
// The initial request for control was made during setup.
|
||||
control_ = false;
|
||||
}
|
||||
|
||||
if (FLAGS_audit_persist && !FLAGS_disable_audit && !immutable_) {
|
||||
audit_set_pid(handle_, getpid(), WAIT_NO);
|
||||
control_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Only apply a cool down if the reply request failed.
|
||||
osquery::publisherSleep(200);
|
||||
return Status(0, "Continue");
|
||||
}
|
||||
|
||||
bool AuditEventPublisher::shouldFire(const AuditSubscriptionContextRef& sc,
|
||||
const AuditEventContextRef& ec) const {
|
||||
// If this subscription (with set of rules) explicitly requested the audit
|
||||
// reply type.
|
||||
for (const auto& type : sc->types) {
|
||||
if (type != 0 && ec->type == type) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, if the set of rules included a syscall, match on that number.
|
||||
for (const auto& rule : sc->rules) {
|
||||
if (rule.syscall != 0 && ec->syscall == rule.syscall) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
198
osquery/events/linux/audit.h
Normal file
198
osquery/events/linux/audit.h
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
#include <libaudit.h>
|
||||
|
||||
#include <osquery/events.h>
|
||||
|
||||
namespace osquery {
|
||||
|
||||
/**
|
||||
* @brief A simple audit rule description that can be populated via a config.
|
||||
*
|
||||
* Allow a config definition to set an integer syscall or string-format filters
|
||||
* that can be iterated and transformed into libaudit rules.
|
||||
*
|
||||
* The libaudit rules are only applied if `apply_rule` is set to true.
|
||||
*/
|
||||
struct AuditRule {
|
||||
/// The rule may either contain a filter or syscall number.
|
||||
int syscall{0};
|
||||
|
||||
/// The rule may either contain a filter or a syscall number.
|
||||
std::string filter;
|
||||
|
||||
/// All rules must include an action and set of flags.
|
||||
int flags{AUDIT_FILTER_EXIT};
|
||||
int action{AUDIT_ALWAYS};
|
||||
|
||||
/// Audit rules are used for matching events to subscribers.
|
||||
/// They can also apply their syscall or filter rule during setUp.
|
||||
bool apply_rule{true};
|
||||
|
||||
/// Helper to set either a syscall, filter, or both.
|
||||
AuditRule(int _syscall, std::string _filter)
|
||||
: syscall(_syscall), filter(std::move(_filter)) {}
|
||||
};
|
||||
|
||||
/// Internal rule storage for transient rule additions/removals.
|
||||
struct AuditRuleInternal {
|
||||
struct audit_rule_data rule;
|
||||
int flags{0};
|
||||
int action{0};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Audit will generate consecutive messages related to a process event.
|
||||
*
|
||||
* The first are the syscall details, which contain information about the
|
||||
* process generating the event. That is followed by the exec arguments
|
||||
* including the canonicalized/interpreted program name. The working directory
|
||||
* and a null-delimited set of path messages follow and complete the set of
|
||||
* information.
|
||||
*/
|
||||
enum AuditProcessEventState {
|
||||
STATE_SYSCALL = AUDIT_SYSCALL,
|
||||
STATE_EXECVE = AUDIT_EXECVE,
|
||||
STATE_CWD = AUDIT_CWD,
|
||||
STATE_PATH = AUDIT_PATH,
|
||||
};
|
||||
|
||||
struct AuditSubscriptionContext : public SubscriptionContext {
|
||||
/**
|
||||
* @brief A subscription may supply a set of rules.
|
||||
*
|
||||
* These are audit rules that include syscalls to monitor or filters to
|
||||
* append. This set of rules is added to the system-configured audit rule set.
|
||||
* All rules are removed when the audit publisher is torn down.
|
||||
*/
|
||||
std::vector<AuditRule> rules;
|
||||
|
||||
/**
|
||||
* @brief Independent of the rules, supply a set of reply types used to fire.
|
||||
*
|
||||
* Matching a rule does not mean the subscription callback will fire.
|
||||
* If any of the rules included a syscall then an audit type=SYSCALL for that
|
||||
* syscall will fire. Otherwise a subscription should include the set of audit
|
||||
* reply types it handles.
|
||||
*/
|
||||
std::set<int> types;
|
||||
|
||||
private:
|
||||
friend class AuditEventPublisher;
|
||||
};
|
||||
|
||||
struct AuditEventContext : public EventContext {
|
||||
/// The audit reply type.
|
||||
int type{0};
|
||||
|
||||
/// If the type=AUDIT_SYSCALL then this is filled in with the syscall type.
|
||||
/// Otherwise this set to 0.
|
||||
int syscall{0};
|
||||
|
||||
/**
|
||||
* @brief The audit message tokenized into fields.
|
||||
*
|
||||
* If the field contained a space in the value the data will be hex encoded.
|
||||
* It is the responsibility of the subscription callback/handler to parse.
|
||||
*/
|
||||
std::map<std::string, std::string> fields;
|
||||
|
||||
/// Each message will contain the audit time.
|
||||
std::string preamble;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<AuditEventContext> AuditEventContextRef;
|
||||
typedef std::shared_ptr<AuditSubscriptionContext> AuditSubscriptionContextRef;
|
||||
|
||||
class AuditEventPublisher
|
||||
: public EventPublisher<AuditSubscriptionContext, AuditEventContext> {
|
||||
DECLARE_PUBLISHER("audit");
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Set up the process/thread for handling audit netlink replies.
|
||||
*
|
||||
* This will try to open an audit netlink descriptor. If the netlink handle
|
||||
* is opened the process will check if auditing is enabled, and attempt to
|
||||
* gain control of audit message sinks (replies). This requires root
|
||||
* credentials and will have an impact on system performance.
|
||||
*
|
||||
* An 'auditd'-like process cannot (or should not) be running in tandem. Only
|
||||
* one process may receive audit messages from the audit kernel thread over
|
||||
* the netlink API. However, multiple processes may open a handle and send
|
||||
* audit requests. If an 'auditd'-like process starts while osquery is
|
||||
* receiving audit messages, this process may optionally 'regain' or attempt
|
||||
* to persist auditing capabilities by reseting the audit reply handle
|
||||
* ownership to itself.
|
||||
*
|
||||
* See the `--audit-persist` command line option.
|
||||
*/
|
||||
Status setUp();
|
||||
|
||||
/// Fill in audit rules based on syscall/filter combinations.
|
||||
void configure();
|
||||
|
||||
/// Remove audit rules and close the handle.
|
||||
void tearDown();
|
||||
|
||||
/// Poll for replies to the netlink handle in a non-blocking mode.
|
||||
Status run();
|
||||
|
||||
AuditEventPublisher() : EventPublisher() {}
|
||||
|
||||
private:
|
||||
/// Maintain a list of audit rule data for displaying or deleting.
|
||||
void handleListRules();
|
||||
|
||||
/// Apply normal subscription to event matching logic.
|
||||
bool shouldFire(const AuditSubscriptionContextRef& mc,
|
||||
const AuditEventContextRef& ec) const;
|
||||
|
||||
private:
|
||||
/// Audit subsystem (netlink) socket descriptor.
|
||||
int handle_{0};
|
||||
|
||||
/// Audit subsystem is in an immutable state.
|
||||
bool immutable_{false};
|
||||
|
||||
/**
|
||||
* @brief The last (most current) status reply.
|
||||
*
|
||||
* This contains the: pid, enabled, rate_limit, backlog_limit, lost, and
|
||||
* failure booleans and counts.
|
||||
*/
|
||||
struct audit_status status_;
|
||||
|
||||
/**
|
||||
* @brief A counter of non-blocking netlink reads that contained no data.
|
||||
*
|
||||
* After several iterations of no data, the audit run loop will request a
|
||||
* status. It is possible another user land daemon requested control of the
|
||||
* audit subsystem. The kernel thread will only emit to a single handle.
|
||||
*/
|
||||
size_t count_{0};
|
||||
|
||||
/// Is this process in control of the audit subsystem.
|
||||
bool control_{false};
|
||||
|
||||
/// The last (most recent) audit reply.
|
||||
struct audit_reply reply_;
|
||||
|
||||
/// Track all rule data added by the publisher.
|
||||
std::vector<struct AuditRuleInternal> transient_rules_;
|
||||
};
|
||||
}
|
133
osquery/events/linux/tests/audit_tests.cpp
Normal file
133
osquery/events/linux/tests/audit_tests.cpp
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <osquery/events.h>
|
||||
#include <osquery/tables.h>
|
||||
|
||||
#include "osquery/events/linux/audit.h"
|
||||
#include "osquery/core/test_util.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
/// Internal audit publisher testable methods.
|
||||
extern bool handleAuditReply(const struct audit_reply& reply,
|
||||
AuditEventContextRef& ec);
|
||||
|
||||
/// Internal audit subscriber (process events) testable methods.
|
||||
extern std::string decodeAuditValue(const std::string& e);
|
||||
extern Status validAuditState(int type, AuditProcessEventState& state);
|
||||
|
||||
class AuditTests : public testing::Test {
|
||||
protected:
|
||||
void SetUp() { Row().swap(row_); }
|
||||
|
||||
protected:
|
||||
Row row_;
|
||||
};
|
||||
|
||||
TEST_F(AuditTests, test_handle_reply) {
|
||||
// Create the input structures.
|
||||
struct audit_reply reply;
|
||||
auto ec = std::make_shared<AuditEventContext>();
|
||||
|
||||
// A 'fake' audit message.
|
||||
std::string message =
|
||||
"audit(1440542781.644:403030): argc=3 a0=\"/bin/sh\" a1=\"-c\" a2=\"h\"";
|
||||
|
||||
reply.type = 1;
|
||||
reply.len = message.size();
|
||||
reply.message = (char*)malloc(sizeof(char) * (message.size() + 1));
|
||||
memset((void*)reply.message, 0, message.size() + 1);
|
||||
memcpy((void*)reply.message, message.c_str(), message.size());
|
||||
|
||||
// Perform the parsing.
|
||||
handleAuditReply(reply, ec);
|
||||
free((char*)reply.message);
|
||||
|
||||
EXPECT_EQ(reply.type, ec->type);
|
||||
EXPECT_EQ(ec->fields.size(), 4);
|
||||
EXPECT_EQ(ec->fields.count("argc"), 1);
|
||||
EXPECT_EQ(ec->fields["argc"], "3");
|
||||
EXPECT_EQ(ec->fields["a0"], "\"/bin/sh\"");
|
||||
}
|
||||
|
||||
TEST_F(AuditTests, test_audit_value_decode) {
|
||||
// In the normal case the decoding only removes '"' characters from the ends.
|
||||
auto decoded_normal = decodeAuditValue("\"/bin/ls\"");
|
||||
EXPECT_EQ(decoded_normal, "/bin/ls");
|
||||
|
||||
// If the first char is not '"', the value is expected to be hex-encoded.
|
||||
auto decoded_hex = decodeAuditValue("736C6565702031");
|
||||
EXPECT_EQ(decoded_hex, "sleep 1");
|
||||
|
||||
// When the hex fails to decode the input value is returned as the result.
|
||||
auto decoded_fail = decodeAuditValue("7");
|
||||
EXPECT_EQ(decoded_fail, "7");
|
||||
}
|
||||
|
||||
TEST_F(AuditTests, test_valid_audit_state) {
|
||||
AuditProcessEventState state = STATE_SYSCALL;
|
||||
|
||||
// The first state must be a syscall.
|
||||
EXPECT_TRUE(validAuditState(STATE_SYSCALL, state));
|
||||
EXPECT_EQ(state, STATE_EXECVE);
|
||||
|
||||
// Followed by an EXECVE, CWD, or PATH
|
||||
EXPECT_TRUE(validAuditState(STATE_EXECVE, state));
|
||||
EXPECT_EQ(state, STATE_CWD);
|
||||
EXPECT_TRUE(validAuditState(STATE_CWD, state));
|
||||
EXPECT_EQ(state, STATE_PATH);
|
||||
EXPECT_TRUE(validAuditState(STATE_PATH, state));
|
||||
// Finally, the state is reset to syscall.
|
||||
EXPECT_EQ(state, STATE_SYSCALL);
|
||||
}
|
||||
|
||||
TEST_F(AuditTests, test_valid_audit_state_exceptions) {
|
||||
AuditProcessEventState state = STATE_SYSCALL;
|
||||
validAuditState(STATE_SYSCALL, state);
|
||||
|
||||
// Now allow for other acceptable transitions.
|
||||
EXPECT_TRUE(validAuditState(STATE_PATH, state));
|
||||
EXPECT_EQ(state, STATE_SYSCALL);
|
||||
|
||||
state = STATE_SYSCALL;
|
||||
validAuditState(STATE_SYSCALL, state);
|
||||
EXPECT_TRUE(validAuditState(STATE_PATH, state));
|
||||
EXPECT_EQ(state, STATE_SYSCALL);
|
||||
}
|
||||
|
||||
TEST_F(AuditTests, test_valid_audit_state_failues) {
|
||||
// Now check invalid states.
|
||||
AuditProcessEventState state = STATE_SYSCALL;
|
||||
EXPECT_FALSE(validAuditState(STATE_EXECVE, state));
|
||||
EXPECT_FALSE(validAuditState(STATE_CWD, state));
|
||||
EXPECT_FALSE(validAuditState(STATE_PATH, state));
|
||||
|
||||
// Two syscalls in a row: invalid.
|
||||
state = STATE_SYSCALL;
|
||||
validAuditState(STATE_SYSCALL, state);
|
||||
EXPECT_FALSE(validAuditState(STATE_SYSCALL, state));
|
||||
|
||||
// A cwd must come after an exec.
|
||||
state = STATE_SYSCALL;
|
||||
validAuditState(STATE_SYSCALL, state);
|
||||
EXPECT_FALSE(validAuditState(STATE_CWD, state));
|
||||
|
||||
// Two execs in a row: invalid.
|
||||
state = STATE_SYSCALL;
|
||||
validAuditState(STATE_SYSCALL, state);
|
||||
validAuditState(STATE_EXECVE, state);
|
||||
EXPECT_FALSE(validAuditState(STATE_EXECVE, state));
|
||||
}
|
||||
}
|
@ -17,8 +17,7 @@ namespace pt = boost::property_tree;
|
||||
|
||||
namespace osquery {
|
||||
|
||||
class ProcessEventSubscriber
|
||||
: public EventSubscriber<KernelEventPublisher> {
|
||||
class ProcessEventSubscriber : public EventSubscriber<KernelEventPublisher> {
|
||||
public:
|
||||
/// The process event subscriber declares a kernel event type subscription.
|
||||
Status init();
|
||||
@ -44,13 +43,11 @@ Status ProcessEventSubscriber::Callback(
|
||||
const void *user_data) {
|
||||
Row r;
|
||||
r["overflows"] = "";
|
||||
r["cmdline_count"] = BIGINT(ec->event.actual_argc);
|
||||
r["cmdline_size"] = BIGINT(ec->event.arg_length);
|
||||
if (ec->event.argc != ec->event.actual_argc) {
|
||||
r["overflows"] = "cmdline";
|
||||
}
|
||||
|
||||
r["envc"] = BIGINT(ec->event.envc);
|
||||
r["environment_count"] = BIGINT(ec->event.actual_envc);
|
||||
r["environment_size"] = BIGINT(ec->event.env_length);
|
||||
if (ec->event.envc != ec->event.actual_envc) {
|
||||
@ -124,6 +121,4 @@ Status ProcessEventSubscriber::Callback(
|
||||
|
||||
return Status(0, "OK");
|
||||
}
|
||||
|
||||
|
||||
} // namespace osquery
|
183
osquery/tables/events/linux/process_events.cpp
Normal file
183
osquery/tables/events/linux/process_events.cpp
Normal file
@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
|
||||
#include <osquery/config.h>
|
||||
#include <osquery/logger.h>
|
||||
#include <osquery/sql.h>
|
||||
|
||||
#include "osquery/events/linux/audit.h"
|
||||
|
||||
namespace osquery {
|
||||
|
||||
#define AUDIT_SYSCALL_EXECVE 59
|
||||
|
||||
// Depend on the external getUptime table method.
|
||||
namespace tables {
|
||||
extern long getUptime();
|
||||
}
|
||||
|
||||
class ProcessEventSubscriber : public EventSubscriber<AuditEventPublisher> {
|
||||
public:
|
||||
/// The process event subscriber declares an audit event type subscription.
|
||||
Status init();
|
||||
|
||||
/// Kernel events matching the event type will fire.
|
||||
Status Callback(const AuditEventContextRef& ec, const void* user_data);
|
||||
|
||||
private:
|
||||
/// The next expected process event state.
|
||||
AuditProcessEventState state_{STATE_SYSCALL};
|
||||
|
||||
/// Along with the state, track the building row.
|
||||
Row row_;
|
||||
};
|
||||
|
||||
inline std::string decodeAuditValue(const std::string& s) {
|
||||
if (s.size() > 1 && s[0] == '"') {
|
||||
return s.substr(1, s.size() - 2);
|
||||
}
|
||||
try {
|
||||
return boost::algorithm::unhex(s);
|
||||
} catch (const boost::algorithm::hex_decode_error& e) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
inline Status validAuditState(int type, AuditProcessEventState& state) {
|
||||
// Define some acceptable transitions outside of the default state.
|
||||
bool acceptable = (type == STATE_PATH && state == STATE_EXECVE);
|
||||
acceptable |= (type == STATE_PATH && state == STATE_CWD);
|
||||
if (type != state && !acceptable) {
|
||||
// This is an out-of-order event, drop it.
|
||||
return Status(1, "Out of order");
|
||||
}
|
||||
|
||||
// Update the next expected state.
|
||||
if (type == AUDIT_SYSCALL) {
|
||||
state = STATE_EXECVE;
|
||||
} else if (type == AUDIT_EXECVE) {
|
||||
state = STATE_CWD;
|
||||
} else if (type == AUDIT_CWD) {
|
||||
state = STATE_PATH;
|
||||
} else {
|
||||
state = STATE_SYSCALL;
|
||||
}
|
||||
|
||||
return Status(0, "OK");
|
||||
}
|
||||
|
||||
inline void updateAuditRow(const AuditEventContextRef& ec, Row& r) {
|
||||
const auto& fields = ec->fields;
|
||||
if (ec->type == AUDIT_SYSCALL) {
|
||||
r["pid"] = (fields.count("pid")) ? fields.at("pid") : "0";
|
||||
r["parent"] = fields.count("ppid") ? fields.at("ppid") : "0";
|
||||
r["uid"] = fields.count("uid") ? fields.at("uid") : "0";
|
||||
r["euid"] = fields.count("euid") ? fields.at("euid") : "0";
|
||||
r["gid"] = fields.count("gid") ? fields.at("gid") : "0";
|
||||
r["egid"] = fields.count("egid") ? fields.at("euid") : "0";
|
||||
r["path"] = (fields.count("exe")) ? decodeAuditValue(fields.at("exe")) : "";
|
||||
|
||||
// This should get overwritten during the EXECVE state.
|
||||
r["cmdline"] = (fields.count("comm")) ? fields.at("comm") : "";
|
||||
// Do not record a cmdline size. If the final state is reached and no 'argc'
|
||||
// has been filled in then the EXECVE state was not used.
|
||||
r["cmdline_size"] = "";
|
||||
|
||||
r["overflows"] = "";
|
||||
r["environment_size"] = "0";
|
||||
r["environment_count"] = "0";
|
||||
r["environment"] = "";
|
||||
}
|
||||
|
||||
if (ec->type == AUDIT_EXECVE) {
|
||||
// Reset the temporary storage from the SYSCALL state.
|
||||
r["cmdline"] = "";
|
||||
for (const auto& arg : fields) {
|
||||
if (arg.first == "argc") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Amalgamate all the "arg*" fields.
|
||||
if (r.at("cmdline").size() > 0) {
|
||||
r["cmdline"] += " ";
|
||||
}
|
||||
r["cmdline"] += decodeAuditValue(arg.second);
|
||||
}
|
||||
|
||||
// There may be a better way to calculate actual size from audit.
|
||||
// Then an overflow could be calculated/determined based on actual/expected.
|
||||
r["cmdline_size"] = std::to_string(r.at("cmdline").size());
|
||||
}
|
||||
|
||||
if (ec->type == AUDIT_PATH) {
|
||||
r["mode"] = (fields.count("mode")) ? fields.at("mode") : "";
|
||||
r["owner_uid"] = fields.count("ouid") ? fields.at("ouid") : "0";
|
||||
r["owner_gid"] = fields.count("ogid") ? fields.at("ogid") : "0";
|
||||
|
||||
auto qd = SQL::selectAllFrom("file", "path", EQUALS, r.at("path"));
|
||||
if (qd.size() == 1) {
|
||||
r["create_time"] = qd.front().at("ctime");
|
||||
r["access_time"] = qd.front().at("atime");
|
||||
r["modify_time"] = qd.front().at("mtime");
|
||||
r["change_time"] = "0";
|
||||
}
|
||||
|
||||
// Uptime is helpful for execution-based events.
|
||||
r["uptime"] = std::to_string(tables::getUptime());
|
||||
}
|
||||
}
|
||||
|
||||
REGISTER(ProcessEventSubscriber, "event_subscriber", "process_events");
|
||||
|
||||
Status ProcessEventSubscriber::init() {
|
||||
auto sc = createSubscriptionContext();
|
||||
|
||||
// Monitor for execve syscalls.
|
||||
sc->rules.push_back({AUDIT_SYSCALL_EXECVE, ""});
|
||||
|
||||
// Request call backs for all parts of the process execution state.
|
||||
// Drop events if they are encountered outside of the expected state.
|
||||
sc->types = {AUDIT_SYSCALL, AUDIT_EXECVE, AUDIT_CWD, AUDIT_PATH};
|
||||
subscribe(&ProcessEventSubscriber::Callback, sc, nullptr);
|
||||
|
||||
return Status(0, "OK");
|
||||
}
|
||||
|
||||
Status ProcessEventSubscriber::Callback(const AuditEventContextRef& ec,
|
||||
const void* user_data) {
|
||||
// Check and set the valid state change.
|
||||
// If this is an unacceptable change reset the state and clear row data.
|
||||
if (!validAuditState(ec->type, state_).ok()) {
|
||||
state_ = STATE_SYSCALL;
|
||||
Row().swap(row_);
|
||||
return Status(0, "OK");
|
||||
}
|
||||
|
||||
// Fill in row fields based on the event state.
|
||||
updateAuditRow(ec, row_);
|
||||
|
||||
// Only add the event if finished (aka a PATH event was emitted).
|
||||
if (state_ == STATE_SYSCALL) {
|
||||
// If the EXECVE state was not used, decode the cmdline value.
|
||||
if (row_.at("cmdline_size").size() == 0) {
|
||||
// This allows at most 1 decode call per potentially-encoded item.
|
||||
row_["cmdline"] = decodeAuditValue(row_.at("cmdline"));
|
||||
row_["cmdline_size"] = "1";
|
||||
}
|
||||
|
||||
add(row_, getUnixTime());
|
||||
Row().swap(row_);
|
||||
}
|
||||
|
||||
return Status(0, "OK");
|
||||
}
|
||||
} // namespace osquery
|
@ -20,7 +20,6 @@ except ImportError:
|
||||
|
||||
import json
|
||||
import os
|
||||
import psutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
@ -40,18 +39,6 @@ RANGES = {
|
||||
}
|
||||
|
||||
|
||||
def get_stats(p, interval=1):
|
||||
"""Run psutil and downselect the information."""
|
||||
utilization = p.cpu_percent(interval=interval)
|
||||
return {
|
||||
"utilization": utilization,
|
||||
"counters": p.io_counters() if utils.platform() != "darwin" else None,
|
||||
"fds": p.num_fds(),
|
||||
"cpu_times": p.cpu_times(),
|
||||
"memory": p.memory_info_ex(),
|
||||
}
|
||||
|
||||
|
||||
def check_leaks_linux(shell, query, count=1, supp_file=None):
|
||||
"""Run valgrind using the shell and a query, parse leak reports."""
|
||||
suppressions = "" if supp_file is None else "--suppressions=%s" % supp_file
|
||||
@ -138,46 +125,15 @@ def run_query(shell, query, timeout=0, count=1):
|
||||
[shell, "--query", query, "--iterations", str(count),
|
||||
"--delay", "1"],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
p = psutil.Process(pid=proc.pid)
|
||||
|
||||
delay = 0
|
||||
step = 0.5
|
||||
|
||||
percents = []
|
||||
# Calculate the CPU utilization in intervals of 1 second.
|
||||
stats = {}
|
||||
while p.is_running() and p.status() != psutil.STATUS_ZOMBIE:
|
||||
try:
|
||||
current_stats = get_stats(p, step)
|
||||
if (current_stats["memory"].rss == 0):
|
||||
break
|
||||
stats = current_stats
|
||||
percents.append(stats["utilization"])
|
||||
except psutil.AccessDenied:
|
||||
break
|
||||
delay += step
|
||||
if timeout > 0 and delay >= timeout + 2:
|
||||
proc.kill()
|
||||
break
|
||||
duration = time.time() - start_time - 2
|
||||
|
||||
utilization = [percent for percent in percents if percent != 0]
|
||||
if len(utilization) == 0:
|
||||
avg_utilization = 0
|
||||
else:
|
||||
avg_utilization = sum(utilization) / len(utilization)
|
||||
|
||||
return {
|
||||
"utilization": avg_utilization,
|
||||
"duration": duration,
|
||||
"memory": stats["memory"].rss,
|
||||
"user_time": stats["cpu_times"].user,
|
||||
"system_time": stats["cpu_times"].system,
|
||||
"cpu_time": stats["cpu_times"].user + stats["cpu_times"].system,
|
||||
"fds": stats["fds"],
|
||||
"exit": p.wait(),
|
||||
}
|
||||
return utils.profile_cmd([
|
||||
shell,
|
||||
"--query",
|
||||
query,
|
||||
"--iterations",
|
||||
str(count),
|
||||
"--delay",
|
||||
"1"
|
||||
])
|
||||
|
||||
|
||||
def summary_line(name, result):
|
||||
@ -243,6 +199,7 @@ def profile(shell, queries, timeout=0, count=1, rounds=1):
|
||||
summary({"%s avg" % name: report[name]}, display=True)
|
||||
return report
|
||||
|
||||
|
||||
def compare(profile1, profile2):
|
||||
"""Compare two jSON profile outputs."""
|
||||
for table in profile1:
|
||||
|
@ -71,5 +71,8 @@ function main_amazon() {
|
||||
install_cppnetlib
|
||||
install_google_benchmark
|
||||
|
||||
package audit-libs-devel
|
||||
package audit-libs-static
|
||||
|
||||
gem_install fpm
|
||||
}
|
||||
|
@ -106,5 +106,8 @@ function main_centos() {
|
||||
package gettext-devel
|
||||
install_libcryptsetup
|
||||
|
||||
package audit-libs-devel
|
||||
package audit-libs-static
|
||||
|
||||
gem_install fpm
|
||||
}
|
||||
|
@ -127,6 +127,8 @@ function main_oracle() {
|
||||
fi
|
||||
|
||||
package file-libs
|
||||
package audit-libs-devel
|
||||
package audit-libs-static
|
||||
|
||||
gem_install fpm
|
||||
}
|
||||
|
@ -144,5 +144,10 @@ function main_rhel() {
|
||||
package gettext-devel
|
||||
install_libcryptsetup
|
||||
|
||||
if [[ $DISTRO = "rhel7" ]]; then
|
||||
package audit-libs-devel
|
||||
package audit-libs-static
|
||||
fi
|
||||
|
||||
gem_install fpm
|
||||
}
|
||||
|
@ -141,8 +141,10 @@ function main_ubuntu() {
|
||||
# Need headers and PC macros
|
||||
package libgcrypt-dev
|
||||
package libdevmapper-dev
|
||||
|
||||
install_libcryptsetup
|
||||
|
||||
package libmagic-dev
|
||||
|
||||
# Audit facility (kautitd) and netlink APIs
|
||||
package libaudit-dev
|
||||
}
|
||||
|
@ -12,9 +12,13 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
import shutil
|
||||
import time
|
||||
import argparse
|
||||
import subprocess
|
||||
import tempfile
|
||||
from threading import Thread
|
||||
|
||||
try:
|
||||
from utils import *
|
||||
@ -22,33 +26,114 @@ except ImportError:
|
||||
print ("Cannot import osquery testing utils from ./tools/tests")
|
||||
exit(1)
|
||||
|
||||
def stress(args):
|
||||
"""Small utility to run unittests several times."""
|
||||
times = []
|
||||
test = args["run"] if args["run"] is not None else ["make", "test"]
|
||||
for i in xrange(args["num"]):
|
||||
|
||||
def run_daemon(proc, output):
|
||||
output[proc.pid] = profile_cmd("", proc=proc)
|
||||
|
||||
def audit(args):
|
||||
def _run_procs(start):
|
||||
procs = []
|
||||
for i in range(3):
|
||||
for j in range(100):
|
||||
procs.append(subprocess.Popen("sleep %d" % 1,
|
||||
shell=True,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE))
|
||||
if not args["stat"]:
|
||||
print ("Finished launching processes: duration %6.4fs" % (
|
||||
time.time() - start))
|
||||
for p in procs:
|
||||
p.communicate()
|
||||
|
||||
proc = None
|
||||
thread = None
|
||||
results = {}
|
||||
if not args["baseline"]:
|
||||
# Start a daemon, which will modify audit rules
|
||||
test = args["run"]
|
||||
if "args" in args:
|
||||
test += " %s" % (args["args"])
|
||||
dbpath = tempfile.mkdtemp()
|
||||
test += " --database_path=%s" % (dbpath)
|
||||
proc = subprocess.Popen(test,
|
||||
shell=True,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
if not args["stat"]:
|
||||
thread = Thread(target=run_daemon, args=(proc, results,))
|
||||
thread.start()
|
||||
time.sleep(1)
|
||||
|
||||
# Run test applications to stress the audting (a fork bomb)
|
||||
start_time = time.time()
|
||||
_run_procs(start_time)
|
||||
end_time = time.time()
|
||||
|
||||
# Clean up
|
||||
if not args["baseline"]:
|
||||
proc.kill()
|
||||
shutil.rmtree(dbpath)
|
||||
if not args["stat"]:
|
||||
thread.join()
|
||||
if proc.pid in results:
|
||||
print("cpu: %6.2f, memory: %d, util: %6.2f" % (
|
||||
results[proc.pid]["cpu_time"],
|
||||
results[proc.pid]["memory"],
|
||||
results[proc.pid]["utilization"]))
|
||||
pass
|
||||
return end_time - start_time
|
||||
|
||||
|
||||
def single(args):
|
||||
start_time = time.time()
|
||||
proc = subprocess.Popen(test,
|
||||
shell=True,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
stdout, stderr = proc.communicate()
|
||||
times.append(time.time() - start_time)
|
||||
if proc.returncode is not 0:
|
||||
print (stdout)
|
||||
print (stderr)
|
||||
print ("%s Test %d failed. (total %6.4fs)" % (
|
||||
red("FAILED"), i + 1, sum(times)))
|
||||
return proc.returncode
|
||||
sys.exit(proc.returncode)
|
||||
return time.time() - start_time
|
||||
|
||||
def stress(args):
|
||||
"""Small utility to run unittests several times."""
|
||||
times = []
|
||||
test = args["run"] if args["run"] is not None else ["make", "test"]
|
||||
for i in xrange(args["num"]):
|
||||
if args["audit"]:
|
||||
times.append(audit(args))
|
||||
else:
|
||||
times.append(single(args))
|
||||
if args["stat"]:
|
||||
print ("%6.4f" % (times[-1]))
|
||||
else:
|
||||
print ("%s Tests passed (%d/%d) rounds. (average %6.4fs) " % (
|
||||
green("PASSED"), i + 1, args["num"], sum(times) / len(times)))
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run tests many times")
|
||||
parser.add_argument("-n", "--num", type=int, default=50,
|
||||
help="Number of times to run tests")
|
||||
parser.add_argument("-A", "--audit", action="store_true", default=False,
|
||||
help="Perform exec/process auditing stress tests")
|
||||
parser.add_argument("--baseline", action="store_true", default=False,
|
||||
help="Run baselines when stressing auditing")
|
||||
parser.add_argument("--args", default="",
|
||||
help="Arguments to pass to test binary")
|
||||
parser.add_argument("--stat", action="store_true", default=False,
|
||||
help="Only print numerical values")
|
||||
parser.add_argument("run", nargs="?", help="Run specific test binary")
|
||||
args = parser.parse_args()
|
||||
|
||||
exit(stress(vars(args)))
|
||||
# A baseline was requested, first run baselines then normal.
|
||||
if args.baseline:
|
||||
print("Running baseline tests...")
|
||||
stress(vars(args))
|
||||
args.baseline = False
|
||||
print("Finished. Running tests...")
|
||||
|
||||
stress(vars(args))
|
||||
|
@ -15,6 +15,9 @@ from __future__ import unicode_literals
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import psutil
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
|
||||
def red(msg):
|
||||
@ -102,3 +105,65 @@ def queries_from_tables(path, restrict):
|
||||
for table in tables:
|
||||
queries[table] = "SELECT * FROM %s;" % table.split(".", 1)[1]
|
||||
return queries
|
||||
|
||||
|
||||
def get_stats(p, interval=1):
|
||||
"""Run psutil and downselect the information."""
|
||||
utilization = p.cpu_percent(interval=interval)
|
||||
return {
|
||||
"utilization": utilization,
|
||||
"counters": p.io_counters() if platform() != "darwin" else None,
|
||||
"fds": p.num_fds(),
|
||||
"cpu_times": p.cpu_times(),
|
||||
"memory": p.memory_info_ex(),
|
||||
}
|
||||
|
||||
|
||||
def profile_cmd(cmd, proc=None, shell=False, timeout=0, count=1):
|
||||
start_time = time.time()
|
||||
if proc is None:
|
||||
proc = subprocess.Popen(cmd,
|
||||
shell=shell,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
p = psutil.Process(pid=proc.pid)
|
||||
|
||||
delay = 0
|
||||
step = 0.5
|
||||
|
||||
percents = []
|
||||
# Calculate the CPU utilization in intervals of 1 second.
|
||||
stats = {}
|
||||
while p.is_running() and p.status() != psutil.STATUS_ZOMBIE:
|
||||
try:
|
||||
current_stats = get_stats(p, step)
|
||||
if (current_stats["memory"].rss == 0):
|
||||
break
|
||||
stats = current_stats
|
||||
percents.append(stats["utilization"])
|
||||
except psutil.AccessDenied:
|
||||
break
|
||||
delay += step
|
||||
if timeout > 0 and delay >= timeout + 2:
|
||||
proc.kill()
|
||||
break
|
||||
duration = time.time() - start_time - 2
|
||||
|
||||
utilization = [percent for percent in percents if percent != 0]
|
||||
if len(utilization) == 0:
|
||||
avg_utilization = 0
|
||||
else:
|
||||
avg_utilization = sum(utilization) / len(utilization)
|
||||
|
||||
return {
|
||||
"utilization": avg_utilization,
|
||||
"duration": duration,
|
||||
"memory": stats["memory"].rss,
|
||||
"user_time": stats["cpu_times"].user,
|
||||
"system_time": stats["cpu_times"].system,
|
||||
"cpu_time": stats["cpu_times"].user + stats["cpu_times"].system,
|
||||
"fds": stats["fds"],
|
||||
"exit": p.wait(),
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user