mirror of
https://github.com/valitydev/osquery-1.git
synced 2024-11-07 18:08:53 +00:00
418 lines
13 KiB
C++
418 lines
13 KiB
C++
/*
|
|
* Copyright (c) 2014, Facebook, Inc.
|
|
* All rights reserved.
|
|
*
|
|
* This source code is licensed under the BSD-style license found in the
|
|
* LICENSE file in the root directory of this source tree. An additional grant
|
|
* of patent rights can be found in the PATENTS file in the same directory.
|
|
*
|
|
*/
|
|
|
|
#include <chrono>
|
|
#include <mutex>
|
|
#include <random>
|
|
|
|
#include <boost/property_tree/json_parser.hpp>
|
|
#include <boost/thread/shared_mutex.hpp>
|
|
|
|
#include <osquery/config.h>
|
|
#include <osquery/database.h>
|
|
#include <osquery/flags.h>
|
|
#include <osquery/hash.h>
|
|
#include <osquery/filesystem.h>
|
|
#include <osquery/logger.h>
|
|
#include <osquery/registry.h>
|
|
#include <osquery/tables.h>
|
|
|
|
namespace pt = boost::property_tree;
|
|
|
|
namespace osquery {
|
|
|
|
/// The config plugin must be known before reading options.
|
|
CLI_FLAG(string, config_plugin, "filesystem", "Config plugin name");
|
|
|
|
/**
|
|
* @brief The backing store key name for the executing query.
|
|
*
|
|
* The config maintains schedule statistics and tracks failed executions.
|
|
* On process or worker resume an initializer or config may check if the
|
|
* resume was the result of a failure during an executing query.
|
|
*/
|
|
const std::string kExecutingQuery = "executing_query";
|
|
|
|
// The config may be accessed and updated asynchronously; use mutexes.
|
|
boost::shared_mutex config_schedule_mutex_;
|
|
boost::shared_mutex config_performance_mutex_;
|
|
boost::shared_mutex config_files_mutex_;
|
|
boost::shared_mutex config_hash_mutex_;
|
|
boost::shared_mutex config_valid_mutex_;
|
|
|
|
Schedule::Schedule() {
|
|
// Check if any queries were executing when the tool last stopped.
|
|
getDatabaseValue(kPersistentSettings, kExecutingQuery, failed_query_);
|
|
if (!failed_query_.empty()) {
|
|
LOG(WARNING) << "Scheduled query may have failed: " << failed_query_;
|
|
setDatabaseValue(kPersistentSettings, kExecutingQuery, "");
|
|
}
|
|
}
|
|
|
|
void Config::addPack(const std::string& name,
|
|
const std::string& source,
|
|
const pt::ptree& tree) {
|
|
WriteLock wlock(config_schedule_mutex_);
|
|
try {
|
|
schedule_.add(Pack(name, source, tree));
|
|
} catch (const std::exception& e) {
|
|
LOG(WARNING) << "Error adding pack: " << name << ": " << e.what();
|
|
}
|
|
}
|
|
|
|
void Config::removePack(const std::string& pack) {
|
|
WriteLock wlock(config_schedule_mutex_);
|
|
return schedule_.remove(pack);
|
|
}
|
|
|
|
void Config::addFile(const std::string& category, const std::string& path) {
|
|
WriteLock wlock(config_files_mutex_);
|
|
files_[category].push_back(path);
|
|
}
|
|
|
|
void Config::scheduledQueries(std::function<
|
|
void(const std::string& name, const ScheduledQuery& query)> predicate) {
|
|
ReadLock rlock(config_schedule_mutex_);
|
|
for (Pack& pack : schedule_) {
|
|
for (const auto& it : pack.getSchedule()) {
|
|
std::string name = it.first;
|
|
if (pack.getName() != "main" && pack.getName() != "legacy_main") {
|
|
name = "pack_" + pack.getName() + "_" + it.first;
|
|
}
|
|
predicate(name, it.second);
|
|
}
|
|
}
|
|
}
|
|
|
|
void Config::packs(std::function<void(Pack& pack)> predicate) {
|
|
ReadLock rlock(config_schedule_mutex_);
|
|
for (Pack& pack : schedule_.packs_) {
|
|
predicate(pack);
|
|
}
|
|
}
|
|
|
|
void Config::clearSchedule() {
|
|
WriteLock wlock(config_schedule_mutex_);
|
|
schedule_ = Schedule();
|
|
}
|
|
|
|
void Config::clearHash() {
|
|
WriteLock wlock(config_hash_mutex_);
|
|
hash_.erase(hash_.begin(), hash_.end());
|
|
}
|
|
|
|
void Config::clearFiles() {
|
|
WriteLock wlock(config_files_mutex_);
|
|
files_.erase(files_.begin(), files_.end());
|
|
}
|
|
|
|
bool Config::isValid() {
|
|
ReadLock rlock(config_valid_mutex_);
|
|
return valid_;
|
|
}
|
|
|
|
Status Config::load() {
|
|
valid_ = false;
|
|
auto& config_plugin = Registry::getActive("config");
|
|
if (!Registry::exists("config", config_plugin)) {
|
|
return Status(1, "Missing config plugin " + config_plugin);
|
|
}
|
|
|
|
PluginResponse response;
|
|
auto status = Registry::call("config", {{"action", "genConfig"}}, response);
|
|
if (!status.ok()) {
|
|
return status;
|
|
}
|
|
|
|
// clear existing state
|
|
clearSchedule();
|
|
clearHash();
|
|
clearFiles();
|
|
valid_ = true;
|
|
|
|
// if there was a response, parse it and update internal state
|
|
if (response.size() > 0) {
|
|
return update(response[0]);
|
|
}
|
|
|
|
return Status(0, "OK");
|
|
}
|
|
|
|
Status Config::updateSource(const std::string& name, const std::string& json) {
|
|
hashSource(name, json);
|
|
|
|
// load the config (source.second) into a pt::ptree
|
|
std::stringstream json_stream;
|
|
json_stream << json;
|
|
pt::ptree tree;
|
|
try {
|
|
pt::read_json(json_stream, tree);
|
|
} catch (const pt::json_parser::json_parser_error& e) {
|
|
return Status(1, "Error parsing the config JSON");
|
|
}
|
|
|
|
// extract the "schedule" key and store it as the main pack
|
|
if (tree.count("schedule") > 0) {
|
|
auto& schedule = tree.get_child("schedule");
|
|
pt::ptree main_pack;
|
|
main_pack.add_child("queries", schedule);
|
|
addPack("main", name, main_pack);
|
|
}
|
|
|
|
if (tree.count("scheduledQueries") > 0) {
|
|
auto& scheduled_queries = tree.get_child("scheduledQueries");
|
|
pt::ptree queries;
|
|
for (const std::pair<std::string, pt::ptree>& query : scheduled_queries) {
|
|
auto query_name = query.second.get<std::string>("name", "");
|
|
if (query_name.empty()) {
|
|
return Status(1, "Error getting name from legacy scheduled query");
|
|
}
|
|
queries.add_child(query_name, query.second);
|
|
}
|
|
pt::ptree legacy_pack;
|
|
legacy_pack.add_child("queries", queries);
|
|
addPack("legacy_main", name, legacy_pack);
|
|
}
|
|
|
|
// extract the "packs" key into additional pack objects
|
|
if (tree.count("packs") > 0) {
|
|
auto& packs = tree.get_child("packs");
|
|
for (const auto& pack : packs) {
|
|
auto value = packs.get<std::string>(pack.first, "");
|
|
if (value.empty()) {
|
|
addPack(pack.first, name, pack.second);
|
|
} else {
|
|
PluginResponse response;
|
|
PluginRequest request = {
|
|
{"action", "genPack"}, {"name", pack.first}, {"value", value}};
|
|
Registry::call("config", request, response);
|
|
|
|
if (response.size() > 0 && response[0].count(pack.first) > 0) {
|
|
std::stringstream pack_stream;
|
|
pack_stream << response[0][pack.first];
|
|
pt::ptree pack_tree;
|
|
try {
|
|
pt::read_json(pack_stream, pack_tree);
|
|
addPack(pack.first, name, pack_tree);
|
|
} catch (const pt::json_parser::json_parser_error& e) {
|
|
LOG(WARNING) << "Error parsing the pack JSON: " << pack.first;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
for (const auto& plugin : Registry::all("config_parser")) {
|
|
std::shared_ptr<ConfigParserPlugin> parser;
|
|
try {
|
|
parser = std::dynamic_pointer_cast<ConfigParserPlugin>(plugin.second);
|
|
} catch (const std::bad_cast& e) {
|
|
LOG(ERROR) << "Error casting config parser plugin: " << plugin.first;
|
|
}
|
|
if (parser == nullptr || parser.get() == nullptr) {
|
|
continue;
|
|
}
|
|
|
|
// For each key requested by the parser, add a property tree reference.
|
|
std::map<std::string, pt::ptree> parser_config;
|
|
for (const auto& key : parser->keys()) {
|
|
if (tree.count(key) > 0) {
|
|
parser_config[key] = tree.get_child(key);
|
|
} else {
|
|
parser_config[key] = pt::ptree();
|
|
}
|
|
}
|
|
|
|
// The config parser plugin will receive a copy of each property tree for
|
|
// each top-level-config key. The parser may choose to update the config's
|
|
// internal state
|
|
parser->update(parser_config);
|
|
}
|
|
return Status(0, "OK");
|
|
}
|
|
|
|
Status Config::update(const std::map<std::string, std::string>& config) {
|
|
// A config plugin may call update from an extension. This will update
|
|
// the config instance within the extension process and the update must be
|
|
// reflected in the core.
|
|
if (Registry::external()) {
|
|
for (const auto& source : config) {
|
|
PluginRequest request = {
|
|
{"action", "update"},
|
|
{"source", source.first},
|
|
{"data", source.second},
|
|
};
|
|
// A "update" registry item within core should call the core's update
|
|
// method. The config plugin call action handling must also know to
|
|
// update.
|
|
Registry::call("config", "update", request);
|
|
}
|
|
}
|
|
|
|
for (const auto& source : config) {
|
|
auto status = updateSource(source.first, source.second);
|
|
if (!status.ok()) {
|
|
return status;
|
|
}
|
|
}
|
|
|
|
return Status(0, "OK");
|
|
}
|
|
|
|
void Config::recordQueryPerformance(const std::string& name,
|
|
size_t delay,
|
|
size_t size,
|
|
const Row& r0,
|
|
const Row& r1) {
|
|
WriteLock wlock(config_performance_mutex_);
|
|
if (performance_.count(name) == 0) {
|
|
performance_[name] = QueryPerformance();
|
|
}
|
|
|
|
// Grab access to the non-const schedule item.
|
|
auto& query = performance_.at(name);
|
|
auto diff = AS_LITERAL(BIGINT_LITERAL, r1.at("user_time")) -
|
|
AS_LITERAL(BIGINT_LITERAL, r0.at("user_time"));
|
|
if (diff > 0) {
|
|
query.user_time += diff;
|
|
}
|
|
|
|
diff = AS_LITERAL(BIGINT_LITERAL, r1.at("system_time")) -
|
|
AS_LITERAL(BIGINT_LITERAL, r0.at("system_time"));
|
|
if (diff > 0) {
|
|
query.system_time += diff;
|
|
}
|
|
|
|
diff = AS_LITERAL(BIGINT_LITERAL, r1.at("resident_size")) -
|
|
AS_LITERAL(BIGINT_LITERAL, r0.at("resident_size"));
|
|
if (diff > 0) {
|
|
// Memory is stored as an average of RSS changes between query executions.
|
|
query.average_memory = (query.average_memory * query.executions) + diff;
|
|
query.average_memory = (query.average_memory / (query.executions + 1));
|
|
}
|
|
|
|
query.wall_time += delay;
|
|
query.output_size += size;
|
|
query.executions += 1;
|
|
query.last_executed = getUnixTime();
|
|
|
|
// Clear the executing query (remove the dirty bit).
|
|
setDatabaseValue(kPersistentSettings, kExecutingQuery, "");
|
|
}
|
|
|
|
void Config::recordQueryStart(const std::string& name) {
|
|
// There should only ever be a single executing query in the schedule.
|
|
setDatabaseValue(kPersistentSettings, kExecutingQuery, name);
|
|
}
|
|
|
|
void Config::getPerformanceStats(
|
|
const std::string& name,
|
|
std::function<void(const QueryPerformance& query)> predicate) {
|
|
if (performance_.count(name) > 0) {
|
|
ReadLock rlock(config_performance_mutex_);
|
|
predicate(performance_.at(name));
|
|
}
|
|
}
|
|
|
|
void Config::hashSource(const std::string& source, const std::string& content) {
|
|
WriteLock wlock(config_hash_mutex_);
|
|
hash_[source] =
|
|
hashFromBuffer(HASH_TYPE_MD5, &(content.c_str())[0], content.size());
|
|
}
|
|
|
|
Status Config::getMD5(std::string& hash) {
|
|
if (!valid_) {
|
|
return Status(1, "Current config is not valid");
|
|
}
|
|
|
|
ReadLock rlock(config_hash_mutex_);
|
|
std::vector<char> buffer;
|
|
buffer.reserve(hash_.size() * 32);
|
|
auto add = [&buffer](const std::string& text) {
|
|
for (const auto& c : text) {
|
|
buffer.push_back(c);
|
|
}
|
|
};
|
|
for (const auto it : hash_) {
|
|
add(it.second);
|
|
}
|
|
|
|
hash = hashFromBuffer(HASH_TYPE_MD5, &buffer[0], buffer.size());
|
|
return Status(0, "OK");
|
|
}
|
|
|
|
const std::shared_ptr<ConfigParserPlugin> Config::getParser(
|
|
const std::string& parser) {
|
|
std::shared_ptr<ConfigParserPlugin> config_parser = nullptr;
|
|
try {
|
|
auto plugin = Registry::get("config_parser", parser);
|
|
config_parser = std::dynamic_pointer_cast<ConfigParserPlugin>(plugin);
|
|
} catch (const std::out_of_range& e) {
|
|
LOG(ERROR) << "Error getting config parser plugin " << parser << ": "
|
|
<< e.what();
|
|
} catch (const std::bad_cast& e) {
|
|
LOG(ERROR) << "Error casting " << parser
|
|
<< " as a ConfigParserPlugin: " << e.what();
|
|
}
|
|
return config_parser;
|
|
}
|
|
|
|
void Config::files(
|
|
std::function<void(const std::string& category,
|
|
const std::vector<std::string>& files)> predicate) {
|
|
ReadLock rlock(config_files_mutex_);
|
|
for (const auto& it : files_) {
|
|
predicate(it.first, it.second);
|
|
}
|
|
}
|
|
|
|
Status ConfigPlugin::genPack(const std::string& name,
|
|
const std::string& value,
|
|
std::string& pack) {
|
|
return Status(1, "Not implemented");
|
|
}
|
|
|
|
Status ConfigPlugin::call(const PluginRequest& request,
|
|
PluginResponse& response) {
|
|
if (request.count("action") == 0) {
|
|
return Status(1, "Config plugins require an action in PluginRequest");
|
|
}
|
|
|
|
if (request.at("action") == "genConfig") {
|
|
std::map<std::string, std::string> config;
|
|
auto stat = genConfig(config);
|
|
response.push_back(config);
|
|
return stat;
|
|
} else if (request.at("action") == "genPack") {
|
|
if (request.count("name") == 0 || request.count("value") == 0) {
|
|
return Status(1, "Missing name or value");
|
|
}
|
|
std::string pack;
|
|
auto stat = genPack(request.at("name"), request.at("value"), pack);
|
|
response.push_back({{request.at("name"), pack}});
|
|
return stat;
|
|
} else if (request.at("action") == "update") {
|
|
if (request.count("source") == 0 || request.count("data") == 0) {
|
|
return Status(1, "Missing source or data");
|
|
}
|
|
return Config::getInstance().update(
|
|
{{request.at("source"), request.at("data")}});
|
|
}
|
|
return Status(1, "Config plugin action unknown: " + request.at("action"));
|
|
}
|
|
|
|
Status ConfigParserPlugin::setUp() {
|
|
for (const auto& key : keys()) {
|
|
data_.put(key, "");
|
|
}
|
|
return Status(0, "OK");
|
|
}
|
|
}
|