mirror of
https://github.com/empayre/fleet.git
synced 2024-11-06 08:55:24 +00:00
Add battery info in host details response (#6394)
This commit is contained in:
parent
ae3ebf09ec
commit
539be8ee09
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@ -3,7 +3,7 @@
|
|||||||
If some of the following don't apply, delete the relevant line.
|
If some of the following don't apply, delete the relevant line.
|
||||||
|
|
||||||
- [ ] Changes file added for user-visible changes (in `changes/` and/or `orbit/changes/`).
|
- [ ] Changes file added for user-visible changes (in `changes/` and/or `orbit/changes/`).
|
||||||
- [ ] Documented any API changes (docs/Using-Fleet/REST-API.md)
|
- [ ] Documented any API changes (docs/Using-Fleet/REST-API.md or docs/Contributing/API-for-contributors.md)
|
||||||
- [ ] Documented any permissions changes
|
- [ ] Documented any permissions changes
|
||||||
- [ ] Ensured that input data is properly validated, SQL injection is prevented (using placeholders for values in statements)
|
- [ ] Ensured that input data is properly validated, SQL injection is prevented (using placeholders for values in statements)
|
||||||
- [ ] Added support on fleet's osquery simulator `cmd/osquery-perf` for new osquery data ingestion features.
|
- [ ] Added support on fleet's osquery simulator `cmd/osquery-perf` for new osquery data ingestion features.
|
||||||
|
1
changes/issue-4904-add-host-battery-condition
Normal file
1
changes/issue-4904-add-host-battery-condition
Normal file
@ -0,0 +1 @@
|
|||||||
|
* Added battery condition to host details API response for macOS hosts.
|
@ -307,6 +307,9 @@ func TestGetHosts(t *testing.T) {
|
|||||||
ds.ListPacksForHostFunc = func(ctx context.Context, hid uint) (packs []*fleet.Pack, err error) {
|
ds.ListPacksForHostFunc = func(ctx context.Context, hid uint) (packs []*fleet.Pack, err error) {
|
||||||
return make([]*fleet.Pack, 0), nil
|
return make([]*fleet.Pack, 0), nil
|
||||||
}
|
}
|
||||||
|
ds.ListHostBatteriesFunc = func(ctx context.Context, hid uint) (batteries []*fleet.HostBattery, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
defaultPolicyQuery := "select 1 from osquery_info where start_time > 1;"
|
defaultPolicyQuery := "select 1 from osquery_info where start_time > 1;"
|
||||||
ds.ListPoliciesForHostFunc = func(ctx context.Context, host *fleet.Host) ([]*fleet.HostPolicy, error) {
|
ds.ListPoliciesForHostFunc = func(ctx context.Context, host *fleet.Host) ([]*fleet.HostPolicy, error) {
|
||||||
return []*fleet.HostPolicy{
|
return []*fleet.HostPolicy{
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"text/template"
|
"text/template"
|
||||||
@ -660,6 +661,27 @@ func (a *agent) googleChromeProfiles() []map[string]string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *agent) batteries() []map[string]string {
|
||||||
|
count := rand.Intn(3) // return between 0 and 2 batteries
|
||||||
|
result := make([]map[string]string, count)
|
||||||
|
for i := range result {
|
||||||
|
health := "Good"
|
||||||
|
cycleCount := rand.Intn(2000)
|
||||||
|
switch {
|
||||||
|
case cycleCount > 1500:
|
||||||
|
health = "Poor"
|
||||||
|
case cycleCount > 1000:
|
||||||
|
health = "Fair"
|
||||||
|
}
|
||||||
|
result[i] = map[string]string{
|
||||||
|
"serial_number": fmt.Sprintf("%04d", i),
|
||||||
|
"cycle_count": strconv.Itoa(cycleCount),
|
||||||
|
"health": health,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func (a *agent) processQuery(name, query string) (handled bool, results []map[string]string, status *fleet.OsqueryStatus) {
|
func (a *agent) processQuery(name, query string) (handled bool, results []map[string]string, status *fleet.OsqueryStatus) {
|
||||||
const (
|
const (
|
||||||
hostPolicyQueryPrefix = "fleet_policy_query_"
|
hostPolicyQueryPrefix = "fleet_policy_query_"
|
||||||
@ -695,6 +717,12 @@ func (a *agent) processQuery(name, query string) (handled bool, results []map[st
|
|||||||
results = a.googleChromeProfiles()
|
results = a.googleChromeProfiles()
|
||||||
}
|
}
|
||||||
return true, results, &ss
|
return true, results, &ss
|
||||||
|
case name == hostDetailQueryPrefix+"battery":
|
||||||
|
ss := fleet.OsqueryStatus(rand.Intn(2))
|
||||||
|
if ss == fleet.StatusOK {
|
||||||
|
results = a.batteries()
|
||||||
|
}
|
||||||
|
return true, results, &ss
|
||||||
default:
|
default:
|
||||||
// Look for results in the template file.
|
// Look for results in the template file.
|
||||||
if t := a.templates.Lookup(name); t == nil {
|
if t := a.templates.Lookup(name); t == nil {
|
||||||
|
@ -2134,7 +2134,13 @@ If the scheduled queries haven't run on the host yet, the stats have zero values
|
|||||||
"issues": {
|
"issues": {
|
||||||
"failing_policies_count": 2,
|
"failing_policies_count": 2,
|
||||||
"total_issues_count": 2
|
"total_issues_count": 2
|
||||||
}
|
},
|
||||||
|
"batteries": [
|
||||||
|
{
|
||||||
|
"cycle_count": 999,
|
||||||
|
"health": "Good"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -2201,7 +2207,13 @@ Returns the information of the host specified using the `uuid`, `osquery_host_id
|
|||||||
"team_name": null,
|
"team_name": null,
|
||||||
"gigs_disk_space_available": 45.86,
|
"gigs_disk_space_available": 45.86,
|
||||||
"percent_disk_space_available": 73,
|
"percent_disk_space_available": 73,
|
||||||
"pack_stats": null
|
"pack_stats": null,
|
||||||
|
"batteries": [
|
||||||
|
{
|
||||||
|
"cycle_count": 999,
|
||||||
|
"health": "Good"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
@ -282,6 +282,7 @@ var hostRefs = []string{
|
|||||||
"host_mdm",
|
"host_mdm",
|
||||||
"host_munki_info",
|
"host_munki_info",
|
||||||
"host_device_auth",
|
"host_device_auth",
|
||||||
|
"host_batteries",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ds *Datastore) DeleteHost(ctx context.Context, hid uint) error {
|
func (ds *Datastore) DeleteHost(ctx context.Context, hid uint) error {
|
||||||
@ -1244,6 +1245,71 @@ func (ds *Datastore) ReplaceHostDeviceMapping(ctx context.Context, hid uint, map
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ds *Datastore) ReplaceHostBatteries(ctx context.Context, hid uint, mappings []*fleet.HostBattery) error {
|
||||||
|
const (
|
||||||
|
replaceStmt = `
|
||||||
|
INSERT INTO
|
||||||
|
host_batteries (
|
||||||
|
host_id,
|
||||||
|
serial_number,
|
||||||
|
cycle_count,
|
||||||
|
health
|
||||||
|
)
|
||||||
|
VALUES
|
||||||
|
%s
|
||||||
|
ON DUPLICATE KEY UPDATE
|
||||||
|
cycle_count = VALUES(cycle_count),
|
||||||
|
health = VALUES(health),
|
||||||
|
updated_at = CURRENT_TIMESTAMP
|
||||||
|
`
|
||||||
|
valuesPart = `(?, ?, ?, ?),`
|
||||||
|
|
||||||
|
deleteExceptStmt = `
|
||||||
|
DELETE FROM
|
||||||
|
host_batteries
|
||||||
|
WHERE
|
||||||
|
host_id = ? AND
|
||||||
|
serial_number NOT IN (?)
|
||||||
|
`
|
||||||
|
deleteAllStmt = `
|
||||||
|
DELETE FROM
|
||||||
|
host_batteries
|
||||||
|
WHERE
|
||||||
|
host_id = ?
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
replaceArgs := make([]interface{}, 0, len(mappings)*4)
|
||||||
|
deleteNotIn := make([]string, 0, len(mappings))
|
||||||
|
for _, hb := range mappings {
|
||||||
|
deleteNotIn = append(deleteNotIn, hb.SerialNumber)
|
||||||
|
replaceArgs = append(replaceArgs, hid, hb.SerialNumber, hb.CycleCount, hb.Health)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ds.withRetryTxx(ctx, func(tx sqlx.ExtContext) error {
|
||||||
|
// first, insert the new batteries or update the existing ones
|
||||||
|
if len(replaceArgs) > 0 {
|
||||||
|
if _, err := tx.ExecContext(ctx, fmt.Sprintf(replaceStmt, strings.TrimSuffix(strings.Repeat(valuesPart, len(mappings)), ",")), replaceArgs...); err != nil {
|
||||||
|
return ctxerr.Wrap(ctx, err, "upsert host batteries")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// then, delete the old ones
|
||||||
|
if len(deleteNotIn) > 0 {
|
||||||
|
delStmt, args, err := sqlx.In(deleteExceptStmt, hid, deleteNotIn)
|
||||||
|
if err != nil {
|
||||||
|
return ctxerr.Wrap(ctx, err, "generating host batteries delete NOT IN statement")
|
||||||
|
}
|
||||||
|
if _, err := tx.ExecContext(ctx, delStmt, args...); err != nil {
|
||||||
|
return ctxerr.Wrap(ctx, err, "delete host batteries")
|
||||||
|
}
|
||||||
|
} else if _, err := tx.ExecContext(ctx, deleteAllStmt, hid); err != nil {
|
||||||
|
return ctxerr.Wrap(ctx, err, "delete all host batteries")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (ds *Datastore) updateOrInsert(ctx context.Context, updateQuery string, insertQuery string, args ...interface{}) error {
|
func (ds *Datastore) updateOrInsert(ctx context.Context, updateQuery string, insertQuery string, args ...interface{}) error {
|
||||||
res, err := ds.writer.ExecContext(ctx, updateQuery, args...)
|
res, err := ds.writer.ExecContext(ctx, updateQuery, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1820,3 +1886,23 @@ func (ds *Datastore) HostIDsByOSVersion(
|
|||||||
|
|
||||||
return ids, nil
|
return ids, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ds *Datastore) ListHostBatteries(ctx context.Context, hid uint) ([]*fleet.HostBattery, error) {
|
||||||
|
const stmt = `
|
||||||
|
SELECT
|
||||||
|
host_id,
|
||||||
|
serial_number,
|
||||||
|
cycle_count,
|
||||||
|
health
|
||||||
|
FROM
|
||||||
|
host_batteries
|
||||||
|
WHERE
|
||||||
|
host_id = ?
|
||||||
|
`
|
||||||
|
|
||||||
|
var batteries []*fleet.HostBattery
|
||||||
|
if err := sqlx.SelectContext(ctx, ds.reader, &batteries, stmt, hid); err != nil {
|
||||||
|
return nil, ctxerr.Wrap(ctx, err, "select host batteries")
|
||||||
|
}
|
||||||
|
return batteries, nil
|
||||||
|
}
|
||||||
|
@ -118,6 +118,7 @@ func TestHosts(t *testing.T) {
|
|||||||
{"DeleteHosts", testHostsDeleteHosts},
|
{"DeleteHosts", testHostsDeleteHosts},
|
||||||
{"HostIDsByOSVersion", testHostIDsByOSVersion},
|
{"HostIDsByOSVersion", testHostIDsByOSVersion},
|
||||||
{"ShouldCleanTeamPolicies", testShouldCleanTeamPolicies},
|
{"ShouldCleanTeamPolicies", testShouldCleanTeamPolicies},
|
||||||
|
{"ReplaceHostBatteries", testHostsReplaceHostBatteries},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
@ -4172,6 +4173,9 @@ func testHostsDeleteHosts(t *testing.T, ds *Datastore) {
|
|||||||
// Update device_auth_token.
|
// Update device_auth_token.
|
||||||
err = ds.SetOrUpdateDeviceAuthToken(context.Background(), host.ID, "foo")
|
err = ds.SetOrUpdateDeviceAuthToken(context.Background(), host.ID, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// Update host_batteries
|
||||||
|
err = ds.ReplaceHostBatteries(context.Background(), host.ID, []*fleet.HostBattery{{HostID: host.ID, SerialNumber: "a"}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Check there's an entry for the host in all the associated tables.
|
// Check there's an entry for the host in all the associated tables.
|
||||||
for _, hostRef := range hostRefs {
|
for _, hostRef := range hostRefs {
|
||||||
@ -4260,3 +4264,93 @@ func testHostIDsByOSVersion(t *testing.T, ds *Datastore) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testHostsReplaceHostBatteries(t *testing.T, ds *Datastore) {
|
||||||
|
ctx := context.Background()
|
||||||
|
h1, err := ds.NewHost(ctx, &fleet.Host{
|
||||||
|
ID: 1,
|
||||||
|
OsqueryHostID: "1",
|
||||||
|
NodeKey: "1",
|
||||||
|
Platform: "linux",
|
||||||
|
Hostname: "host1",
|
||||||
|
DetailUpdatedAt: time.Now(),
|
||||||
|
LabelUpdatedAt: time.Now(),
|
||||||
|
PolicyUpdatedAt: time.Now(),
|
||||||
|
SeenTime: time.Now(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
h2, err := ds.NewHost(ctx, &fleet.Host{
|
||||||
|
ID: 2,
|
||||||
|
OsqueryHostID: "2",
|
||||||
|
NodeKey: "2",
|
||||||
|
Platform: "linux",
|
||||||
|
Hostname: "host2",
|
||||||
|
DetailUpdatedAt: time.Now(),
|
||||||
|
LabelUpdatedAt: time.Now(),
|
||||||
|
PolicyUpdatedAt: time.Now(),
|
||||||
|
SeenTime: time.Now(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = ds.ReplaceHostBatteries(ctx, h1.ID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bat1, err := ds.ListHostBatteries(ctx, h1.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, bat1, 0)
|
||||||
|
|
||||||
|
h1Bat := []*fleet.HostBattery{
|
||||||
|
{HostID: h1.ID, SerialNumber: "a", CycleCount: 1, Health: "Good"},
|
||||||
|
{HostID: h1.ID, SerialNumber: "b", CycleCount: 2, Health: "Good"},
|
||||||
|
}
|
||||||
|
err = ds.ReplaceHostBatteries(ctx, h1.ID, h1Bat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bat1, err = ds.ListHostBatteries(ctx, h1.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.ElementsMatch(t, h1Bat, bat1)
|
||||||
|
|
||||||
|
bat2, err := ds.ListHostBatteries(ctx, h2.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, bat2, 0)
|
||||||
|
|
||||||
|
// update "a", remove "b", add "c"
|
||||||
|
h1Bat = []*fleet.HostBattery{
|
||||||
|
{HostID: h1.ID, SerialNumber: "a", CycleCount: 2, Health: "Good"},
|
||||||
|
{HostID: h1.ID, SerialNumber: "c", CycleCount: 3, Health: "Bad"},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ds.ReplaceHostBatteries(ctx, h1.ID, h1Bat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bat1, err = ds.ListHostBatteries(ctx, h1.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.ElementsMatch(t, h1Bat, bat1)
|
||||||
|
|
||||||
|
// add "d" to h2
|
||||||
|
h2Bat := []*fleet.HostBattery{
|
||||||
|
{HostID: h2.ID, SerialNumber: "d", CycleCount: 1, Health: "Good"},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ds.ReplaceHostBatteries(ctx, h2.ID, h2Bat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bat2, err = ds.ListHostBatteries(ctx, h2.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.ElementsMatch(t, h2Bat, bat2)
|
||||||
|
|
||||||
|
// remove all from h1
|
||||||
|
h1Bat = []*fleet.HostBattery{}
|
||||||
|
|
||||||
|
err = ds.ReplaceHostBatteries(ctx, h1.ID, h1Bat)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bat1, err = ds.ListHostBatteries(ctx, h1.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, bat1, 0)
|
||||||
|
|
||||||
|
// h2 unchanged
|
||||||
|
bat2, err = ds.ListHostBatteries(ctx, h2.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.ElementsMatch(t, h2Bat, bat2)
|
||||||
|
}
|
||||||
|
@ -0,0 +1,37 @@
|
|||||||
|
package tables
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
MigrationClient.AddMigration(Up_20220627104817, Down_20220627104817)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Up_20220627104817(tx *sql.Tx) error {
|
||||||
|
// there may be many batteries per host, so the primary key is an
|
||||||
|
// auto-increment, not the host_id.
|
||||||
|
_, err := tx.Exec(`
|
||||||
|
CREATE TABLE host_batteries (
|
||||||
|
id INT(10) UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
|
||||||
|
host_id INT(10) UNSIGNED NOT NULL,
|
||||||
|
serial_number VARCHAR(255) NOT NULL,
|
||||||
|
cycle_count INT(10) NOT NULL,
|
||||||
|
health VARCHAR(10) NOT NULL,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
UNIQUE KEY idx_host_batteries_host_id_serial_number (host_id, serial_number)
|
||||||
|
)`)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "create table")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Down_20220627104817(tx *sql.Tx) error {
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,39 @@
|
|||||||
|
package tables
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUp_20220627104817(t *testing.T) {
|
||||||
|
db := applyUpToPrev(t)
|
||||||
|
|
||||||
|
applyNext(t, db)
|
||||||
|
|
||||||
|
query := `
|
||||||
|
INSERT INTO host_batteries (
|
||||||
|
host_id,
|
||||||
|
serial_number,
|
||||||
|
cycle_count,
|
||||||
|
health
|
||||||
|
)
|
||||||
|
VALUES (?, ?, ?, ?)
|
||||||
|
`
|
||||||
|
_, err := db.Exec(query, 1, "abc", 2, "Good")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hostID uint
|
||||||
|
serialNumber string
|
||||||
|
cycleCount int
|
||||||
|
health string
|
||||||
|
)
|
||||||
|
err = db.QueryRow(`SELECT host_id, serial_number, cycle_count, health FROM host_batteries WHERE host_id = ?`, 1).
|
||||||
|
Scan(&hostID, &serialNumber, &cycleCount, &health)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint(1), hostID)
|
||||||
|
require.Equal(t, "abc", serialNumber)
|
||||||
|
require.Equal(t, 2, cycleCount)
|
||||||
|
require.Equal(t, "Good", health)
|
||||||
|
}
|
File diff suppressed because one or more lines are too long
@ -214,6 +214,8 @@ type Datastore interface {
|
|||||||
CountHosts(ctx context.Context, filter TeamFilter, opt HostListOptions) (int, error)
|
CountHosts(ctx context.Context, filter TeamFilter, opt HostListOptions) (int, error)
|
||||||
CountHostsInLabel(ctx context.Context, filter TeamFilter, lid uint, opt HostListOptions) (int, error)
|
CountHostsInLabel(ctx context.Context, filter TeamFilter, lid uint, opt HostListOptions) (int, error)
|
||||||
ListHostDeviceMapping(ctx context.Context, id uint) ([]*HostDeviceMapping, error)
|
ListHostDeviceMapping(ctx context.Context, id uint) ([]*HostDeviceMapping, error)
|
||||||
|
// ListHostBatteries returns the list of batteries for the given host ID.
|
||||||
|
ListHostBatteries(ctx context.Context, id uint) ([]*HostBattery, error)
|
||||||
|
|
||||||
// LoadHostByDeviceAuthToken loads the host identified by the device auth token.
|
// LoadHostByDeviceAuthToken loads the host identified by the device auth token.
|
||||||
// If the token is invalid it returns a NotFoundError.
|
// If the token is invalid it returns a NotFoundError.
|
||||||
@ -529,6 +531,9 @@ type Datastore interface {
|
|||||||
|
|
||||||
ReplaceHostDeviceMapping(ctx context.Context, id uint, mappings []*HostDeviceMapping) error
|
ReplaceHostDeviceMapping(ctx context.Context, id uint, mappings []*HostDeviceMapping) error
|
||||||
|
|
||||||
|
// ReplaceHostBatteries creates or updates the battery mappings of a host.
|
||||||
|
ReplaceHostBatteries(ctx context.Context, id uint, mappings []*HostBattery) error
|
||||||
|
|
||||||
// VerifyEnrollSecret checks that the provided secret matches an active enroll secret. If it is successfully
|
// VerifyEnrollSecret checks that the provided secret matches an active enroll secret. If it is successfully
|
||||||
// matched, that secret is returned. Otherwise, an error is returned.
|
// matched, that secret is returned. Otherwise, an error is returned.
|
||||||
VerifyEnrollSecret(ctx context.Context, secret string) (*EnrollSecret, error)
|
VerifyEnrollSecret(ctx context.Context, secret string) (*EnrollSecret, error)
|
||||||
|
@ -157,6 +157,11 @@ type HostDetail struct {
|
|||||||
Packs []*Pack `json:"packs"`
|
Packs []*Pack `json:"packs"`
|
||||||
// Policies is the list of policies and whether it passes for the host
|
// Policies is the list of policies and whether it passes for the host
|
||||||
Policies *[]*HostPolicy `json:"policies,omitempty"`
|
Policies *[]*HostPolicy `json:"policies,omitempty"`
|
||||||
|
// Batteries is the list of batteries for the host. It is a pointer to a
|
||||||
|
// slice so that when set, it gets marhsaled even if the slice is empty,
|
||||||
|
// but when unset, it doesn't get marshaled (e.g. we don't return that
|
||||||
|
// information for the List Hosts endpoint).
|
||||||
|
Batteries *[]*HostBattery `json:"batteries,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -286,6 +291,15 @@ type HostMDM struct {
|
|||||||
ServerURL string `json:"server_url"`
|
ServerURL string `json:"server_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HostBattery represents a host's battery, as reported by the osquery battery
|
||||||
|
// table.
|
||||||
|
type HostBattery struct {
|
||||||
|
HostID uint `json:"-" db:"host_id"`
|
||||||
|
SerialNumber string `json:"-" db:"serial_number"`
|
||||||
|
CycleCount int `json:"cycle_count" db:"cycle_count"`
|
||||||
|
Health string `json:"health" db:"health"`
|
||||||
|
}
|
||||||
|
|
||||||
type MacadminsData struct {
|
type MacadminsData struct {
|
||||||
Munki *HostMunkiInfo `json:"munki"`
|
Munki *HostMunkiInfo `json:"munki"`
|
||||||
MDM *HostMDM `json:"mobile_device_management"`
|
MDM *HostMDM `json:"mobile_device_management"`
|
||||||
|
@ -178,6 +178,8 @@ type CountHostsInLabelFunc func(ctx context.Context, filter fleet.TeamFilter, li
|
|||||||
|
|
||||||
type ListHostDeviceMappingFunc func(ctx context.Context, id uint) ([]*fleet.HostDeviceMapping, error)
|
type ListHostDeviceMappingFunc func(ctx context.Context, id uint) ([]*fleet.HostDeviceMapping, error)
|
||||||
|
|
||||||
|
type ListHostBatteriesFunc func(ctx context.Context, id uint) ([]*fleet.HostBattery, error)
|
||||||
|
|
||||||
type LoadHostByDeviceAuthTokenFunc func(ctx context.Context, authToken string) (*fleet.Host, error)
|
type LoadHostByDeviceAuthTokenFunc func(ctx context.Context, authToken string) (*fleet.Host, error)
|
||||||
|
|
||||||
type SetOrUpdateDeviceAuthTokenFunc func(ctx context.Context, hostID uint, authToken string) error
|
type SetOrUpdateDeviceAuthTokenFunc func(ctx context.Context, hostID uint, authToken string) error
|
||||||
@ -400,6 +402,8 @@ type SetOrUpdateMDMDataFunc func(ctx context.Context, hostID uint, enrolled bool
|
|||||||
|
|
||||||
type ReplaceHostDeviceMappingFunc func(ctx context.Context, id uint, mappings []*fleet.HostDeviceMapping) error
|
type ReplaceHostDeviceMappingFunc func(ctx context.Context, id uint, mappings []*fleet.HostDeviceMapping) error
|
||||||
|
|
||||||
|
type ReplaceHostBatteriesFunc func(ctx context.Context, id uint, mappings []*fleet.HostBattery) error
|
||||||
|
|
||||||
type VerifyEnrollSecretFunc func(ctx context.Context, secret string) (*fleet.EnrollSecret, error)
|
type VerifyEnrollSecretFunc func(ctx context.Context, secret string) (*fleet.EnrollSecret, error)
|
||||||
|
|
||||||
type EnrollHostFunc func(ctx context.Context, osqueryHostId string, nodeKey string, teamID *uint, cooldown time.Duration) (*fleet.Host, error)
|
type EnrollHostFunc func(ctx context.Context, osqueryHostId string, nodeKey string, teamID *uint, cooldown time.Duration) (*fleet.Host, error)
|
||||||
@ -666,6 +670,9 @@ type DataStore struct {
|
|||||||
ListHostDeviceMappingFunc ListHostDeviceMappingFunc
|
ListHostDeviceMappingFunc ListHostDeviceMappingFunc
|
||||||
ListHostDeviceMappingFuncInvoked bool
|
ListHostDeviceMappingFuncInvoked bool
|
||||||
|
|
||||||
|
ListHostBatteriesFunc ListHostBatteriesFunc
|
||||||
|
ListHostBatteriesFuncInvoked bool
|
||||||
|
|
||||||
LoadHostByDeviceAuthTokenFunc LoadHostByDeviceAuthTokenFunc
|
LoadHostByDeviceAuthTokenFunc LoadHostByDeviceAuthTokenFunc
|
||||||
LoadHostByDeviceAuthTokenFuncInvoked bool
|
LoadHostByDeviceAuthTokenFuncInvoked bool
|
||||||
|
|
||||||
@ -999,6 +1006,9 @@ type DataStore struct {
|
|||||||
ReplaceHostDeviceMappingFunc ReplaceHostDeviceMappingFunc
|
ReplaceHostDeviceMappingFunc ReplaceHostDeviceMappingFunc
|
||||||
ReplaceHostDeviceMappingFuncInvoked bool
|
ReplaceHostDeviceMappingFuncInvoked bool
|
||||||
|
|
||||||
|
ReplaceHostBatteriesFunc ReplaceHostBatteriesFunc
|
||||||
|
ReplaceHostBatteriesFuncInvoked bool
|
||||||
|
|
||||||
VerifyEnrollSecretFunc VerifyEnrollSecretFunc
|
VerifyEnrollSecretFunc VerifyEnrollSecretFunc
|
||||||
VerifyEnrollSecretFuncInvoked bool
|
VerifyEnrollSecretFuncInvoked bool
|
||||||
|
|
||||||
@ -1439,6 +1449,11 @@ func (s *DataStore) ListHostDeviceMapping(ctx context.Context, id uint) ([]*flee
|
|||||||
return s.ListHostDeviceMappingFunc(ctx, id)
|
return s.ListHostDeviceMappingFunc(ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DataStore) ListHostBatteries(ctx context.Context, id uint) ([]*fleet.HostBattery, error) {
|
||||||
|
s.ListHostBatteriesFuncInvoked = true
|
||||||
|
return s.ListHostBatteriesFunc(ctx, id)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *DataStore) LoadHostByDeviceAuthToken(ctx context.Context, authToken string) (*fleet.Host, error) {
|
func (s *DataStore) LoadHostByDeviceAuthToken(ctx context.Context, authToken string) (*fleet.Host, error) {
|
||||||
s.LoadHostByDeviceAuthTokenFuncInvoked = true
|
s.LoadHostByDeviceAuthTokenFuncInvoked = true
|
||||||
return s.LoadHostByDeviceAuthTokenFunc(ctx, authToken)
|
return s.LoadHostByDeviceAuthTokenFunc(ctx, authToken)
|
||||||
@ -1994,6 +2009,11 @@ func (s *DataStore) ReplaceHostDeviceMapping(ctx context.Context, id uint, mappi
|
|||||||
return s.ReplaceHostDeviceMappingFunc(ctx, id, mappings)
|
return s.ReplaceHostDeviceMappingFunc(ctx, id, mappings)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DataStore) ReplaceHostBatteries(ctx context.Context, id uint, mappings []*fleet.HostBattery) error {
|
||||||
|
s.ReplaceHostBatteriesFuncInvoked = true
|
||||||
|
return s.ReplaceHostBatteriesFunc(ctx, id, mappings)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *DataStore) VerifyEnrollSecret(ctx context.Context, secret string) (*fleet.EnrollSecret, error) {
|
func (s *DataStore) VerifyEnrollSecret(ctx context.Context, secret string) (*fleet.EnrollSecret, error) {
|
||||||
s.VerifyEnrollSecretFuncInvoked = true
|
s.VerifyEnrollSecretFuncInvoked = true
|
||||||
return s.VerifyEnrollSecretFunc(ctx, secret)
|
return s.VerifyEnrollSecretFunc(ctx, secret)
|
||||||
|
@ -717,6 +717,11 @@ func (svc *Service) getHostDetails(ctx context.Context, host *fleet.Host, opts f
|
|||||||
return nil, ctxerr.Wrap(ctx, err, "get packs for host")
|
return nil, ctxerr.Wrap(ctx, err, "get packs for host")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bats, err := svc.ds.ListHostBatteries(ctx, host.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ctxerr.Wrap(ctx, err, "get batteries for host")
|
||||||
|
}
|
||||||
|
|
||||||
var policies *[]*fleet.HostPolicy
|
var policies *[]*fleet.HostPolicy
|
||||||
if opts.IncludePolicies {
|
if opts.IncludePolicies {
|
||||||
hp, err := svc.ds.ListPoliciesForHost(ctx, host)
|
hp, err := svc.ds.ListPoliciesForHost(ctx, host)
|
||||||
@ -731,7 +736,13 @@ func (svc *Service) getHostDetails(ctx context.Context, host *fleet.Host, opts f
|
|||||||
policies = &hp
|
policies = &hp
|
||||||
}
|
}
|
||||||
|
|
||||||
return &fleet.HostDetail{Host: *host, Labels: labels, Packs: packs, Policies: policies}, nil
|
return &fleet.HostDetail{
|
||||||
|
Host: *host,
|
||||||
|
Labels: labels,
|
||||||
|
Packs: packs,
|
||||||
|
Policies: policies,
|
||||||
|
Batteries: &bats,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (svc *Service) hostIDsFromFilters(ctx context.Context, opt fleet.HostListOptions, lid *uint) ([]uint, error) {
|
func (svc *Service) hostIDsFromFilters(ctx context.Context, opt fleet.HostListOptions, lid *uint) ([]uint, error) {
|
||||||
|
@ -50,6 +50,10 @@ func TestHostDetails(t *testing.T) {
|
|||||||
ds.ListPoliciesForHostFunc = func(ctx context.Context, host *fleet.Host) ([]*fleet.HostPolicy, error) {
|
ds.ListPoliciesForHostFunc = func(ctx context.Context, host *fleet.Host) ([]*fleet.HostPolicy, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
expectedBats := []*fleet.HostBattery{{HostID: host.ID, SerialNumber: "a"}}
|
||||||
|
ds.ListHostBatteriesFunc = func(ctx context.Context, hostID uint) ([]*fleet.HostBattery, error) {
|
||||||
|
return expectedBats, nil
|
||||||
|
}
|
||||||
|
|
||||||
opts := fleet.HostDetailOptions{
|
opts := fleet.HostDetailOptions{
|
||||||
IncludeCVEScores: false,
|
IncludeCVEScores: false,
|
||||||
@ -59,6 +63,8 @@ func TestHostDetails(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, expectedLabels, hostDetail.Labels)
|
assert.Equal(t, expectedLabels, hostDetail.Labels)
|
||||||
assert.Equal(t, expectedPacks, hostDetail.Packs)
|
assert.Equal(t, expectedPacks, hostDetail.Packs)
|
||||||
|
require.NotNil(t, hostDetail.Batteries)
|
||||||
|
assert.Equal(t, expectedBats, *hostDetail.Batteries)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHostAuth(t *testing.T) {
|
func TestHostAuth(t *testing.T) {
|
||||||
@ -107,6 +113,9 @@ func TestHostAuth(t *testing.T) {
|
|||||||
ds.ListPoliciesForHostFunc = func(ctx context.Context, host *fleet.Host) ([]*fleet.HostPolicy, error) {
|
ds.ListPoliciesForHostFunc = func(ctx context.Context, host *fleet.Host) ([]*fleet.HostPolicy, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
ds.ListHostBatteriesFunc = func(ctx context.Context, hostID uint) ([]*fleet.HostBattery, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
ds.DeleteHostsFunc = func(ctx context.Context, ids []uint) error {
|
ds.DeleteHostsFunc = func(ctx context.Context, ids []uint) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -4627,6 +4627,10 @@ func (s *integrationTestSuite) TestDeviceAuthenticatedEndpoints() {
|
|||||||
})
|
})
|
||||||
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), hosts[0].ID, true, "url", false))
|
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), hosts[0].ID, true, "url", false))
|
||||||
require.NoError(t, s.ds.SetOrUpdateMunkiVersion(context.Background(), hosts[0].ID, "1.3.0"))
|
require.NoError(t, s.ds.SetOrUpdateMunkiVersion(context.Background(), hosts[0].ID, "1.3.0"))
|
||||||
|
// create a battery for hosts[0]
|
||||||
|
require.NoError(t, s.ds.ReplaceHostBatteries(context.Background(), hosts[0].ID, []*fleet.HostBattery{
|
||||||
|
{HostID: hosts[0].ID, SerialNumber: "a", CycleCount: 1, Health: "Good"},
|
||||||
|
}))
|
||||||
|
|
||||||
// create an auth token for hosts[0]
|
// create an auth token for hosts[0]
|
||||||
token := "much_valid"
|
token := "much_valid"
|
||||||
@ -4652,6 +4656,8 @@ func (s *integrationTestSuite) TestDeviceAuthenticatedEndpoints() {
|
|||||||
require.False(t, getHostResp.Host.RefetchRequested)
|
require.False(t, getHostResp.Host.RefetchRequested)
|
||||||
require.Equal(t, "http://example.com/logo", getHostResp.OrgLogoURL)
|
require.Equal(t, "http://example.com/logo", getHostResp.OrgLogoURL)
|
||||||
require.Nil(t, getHostResp.Host.Policies)
|
require.Nil(t, getHostResp.Host.Policies)
|
||||||
|
require.NotNil(t, getHostResp.Host.Batteries)
|
||||||
|
require.Equal(t, &fleet.HostBattery{CycleCount: 1, Health: "Good"}, (*getHostResp.Host.Batteries)[0])
|
||||||
hostDevResp := getHostResp.Host
|
hostDevResp := getHostResp.Host
|
||||||
|
|
||||||
// make request for same host on the host details API endpoint, responses should match, except for policies
|
// make request for same host on the host details API endpoint, responses should match, except for policies
|
||||||
@ -5124,6 +5130,47 @@ func (s *integrationTestSuite) TestSSODisabled() {
|
|||||||
require.Contains(t, string(body), "/login?status=org_disabled") // html contains a script that redirects to this path
|
require.Contains(t, string(body), "/login?status=org_disabled") // html contains a script that redirects to this path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *integrationTestSuite) TestGetHostBatteries() {
|
||||||
|
t := s.T()
|
||||||
|
|
||||||
|
host, err := s.ds.NewHost(context.Background(), &fleet.Host{
|
||||||
|
DetailUpdatedAt: time.Now(),
|
||||||
|
LabelUpdatedAt: time.Now(),
|
||||||
|
PolicyUpdatedAt: time.Now(),
|
||||||
|
SeenTime: time.Now(),
|
||||||
|
NodeKey: strings.ReplaceAll(t.Name(), "/", "_") + "1",
|
||||||
|
UUID: t.Name() + "1",
|
||||||
|
Hostname: t.Name() + "foo.local",
|
||||||
|
PrimaryIP: "192.168.1.1",
|
||||||
|
PrimaryMac: "30-65-EC-6F-C4-58",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bats := []*fleet.HostBattery{
|
||||||
|
{HostID: host.ID, SerialNumber: "a", CycleCount: 1, Health: "Good"},
|
||||||
|
{HostID: host.ID, SerialNumber: "b", CycleCount: 2, Health: "Poor"},
|
||||||
|
}
|
||||||
|
require.NoError(t, s.ds.ReplaceHostBatteries(context.Background(), host.ID, bats))
|
||||||
|
|
||||||
|
var getHostResp getHostResponse
|
||||||
|
s.DoJSON("GET", fmt.Sprintf("/api/latest/fleet/hosts/%d", host.ID), nil, http.StatusOK, &getHostResp)
|
||||||
|
require.Equal(t, host.ID, getHostResp.Host.ID)
|
||||||
|
// only cycle count and health are returned
|
||||||
|
require.ElementsMatch(t, []*fleet.HostBattery{
|
||||||
|
{CycleCount: 1, Health: "Good"},
|
||||||
|
{CycleCount: 2, Health: "Poor"},
|
||||||
|
}, *getHostResp.Host.Batteries)
|
||||||
|
|
||||||
|
// same for get host by identifier
|
||||||
|
s.DoJSON("GET", fmt.Sprintf("/api/latest/fleet/hosts/identifier/%s", host.NodeKey), nil, http.StatusOK, &getHostResp)
|
||||||
|
require.Equal(t, host.ID, getHostResp.Host.ID)
|
||||||
|
// only cycle count and health are returned
|
||||||
|
require.ElementsMatch(t, []*fleet.HostBattery{
|
||||||
|
{CycleCount: 1, Health: "Good"},
|
||||||
|
{CycleCount: 2, Health: "Poor"},
|
||||||
|
}, *getHostResp.Host.Batteries)
|
||||||
|
}
|
||||||
|
|
||||||
// this test can be deleted once the "v1" version is removed.
|
// this test can be deleted once the "v1" version is removed.
|
||||||
func (s *integrationTestSuite) TestAPIVersion_v1_2022_04() {
|
func (s *integrationTestSuite) TestAPIVersion_v1_2022_04() {
|
||||||
t := s.T()
|
t := s.T()
|
||||||
|
@ -187,7 +187,7 @@ func TestAgentOptionsForHost(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// One of these queries is the disk space, only one of the two works in a platform
|
// One of these queries is the disk space, only one of the two works in a platform
|
||||||
var expectedDetailQueries = len(osquery_utils.GetDetailQueries(&fleet.AppConfig{HostSettings: fleet.HostSettings{EnableHostUsers: true}}, config.FleetConfig{})) - 1
|
var expectedDetailQueries = osquery_utils.GetDetailQueries(&fleet.AppConfig{HostSettings: fleet.HostSettings{EnableHostUsers: true}}, config.FleetConfig{})
|
||||||
|
|
||||||
func TestEnrollAgent(t *testing.T) {
|
func TestEnrollAgent(t *testing.T) {
|
||||||
ds := new(mock.Store)
|
ds := new(mock.Store)
|
||||||
@ -556,7 +556,8 @@ func TestHostDetailQueries(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, err = svc.detailQueriesForHost(context.Background(), &host)
|
queries, discovery, err = svc.detailQueriesForHost(context.Background(), &host)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+2)
|
// +1 because 2 additional queries, but -1 due to removed disk space query (only 1 of 2 active for a given platform)
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
for name := range queries {
|
for name := range queries {
|
||||||
assert.True(t,
|
assert.True(t,
|
||||||
@ -610,7 +611,8 @@ func TestLabelQueries(t *testing.T) {
|
|||||||
// should be turned on so that we can quickly fill labels)
|
// should be turned on so that we can quickly fill labels)
|
||||||
queries, discovery, acc, err := svc.GetDistributedQueries(ctx)
|
queries, discovery, acc, err := svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// -1 due to removed disk space query (only 1 of 2 active for a given platform)
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
assert.NotZero(t, acc)
|
assert.NotZero(t, acc)
|
||||||
|
|
||||||
@ -700,7 +702,8 @@ func TestLabelQueries(t *testing.T) {
|
|||||||
ctx = hostctx.NewContext(ctx, host)
|
ctx = hostctx.NewContext(ctx, host)
|
||||||
queries, discovery, acc, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, acc, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+3)
|
// +3 for label queries, -1 due to removed disk space query (only 1 of 2 active for a given platform)
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+2, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
assert.Zero(t, acc)
|
assert.Zero(t, acc)
|
||||||
|
|
||||||
@ -768,7 +771,13 @@ func TestDetailQueriesWithEmptyStrings(t *testing.T) {
|
|||||||
// queries)
|
// queries)
|
||||||
queries, discovery, acc, err := svc.GetDistributedQueries(ctx)
|
queries, discovery, acc, err := svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries-2)
|
// -4 due to windows not having battery, mdm, munki_info and removed disk space query (only 1 of 2 active for a given platform)
|
||||||
|
if !assert.Equal(t, len(expectedDetailQueries)-4, len(queries)) {
|
||||||
|
// this is just to print the diff between the expected and actual query
|
||||||
|
// keys when the count assertion fails, to help debugging - they are not
|
||||||
|
// expected to match.
|
||||||
|
require.ElementsMatch(t, osqueryMapKeys(expectedDetailQueries), distQueriesMapKeys(queries))
|
||||||
|
}
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
assert.NotZero(t, acc)
|
assert.NotZero(t, acc)
|
||||||
|
|
||||||
@ -920,7 +929,10 @@ func TestDetailQueriesWithEmptyStrings(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, acc, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, acc, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// somehow confusingly, the query response above changed the host's platform
|
||||||
|
// from windows to darwin, so now it has all expected queries except the
|
||||||
|
// extra disk space one.
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
assert.Zero(t, acc)
|
assert.Zero(t, acc)
|
||||||
}
|
}
|
||||||
@ -974,7 +986,14 @@ func TestDetailQueries(t *testing.T) {
|
|||||||
// queries)
|
// queries)
|
||||||
queries, discovery, acc, err := svc.GetDistributedQueries(ctx)
|
queries, discovery, acc, err := svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries-1)
|
// -4 due to linux platform, so battery, mdm and munki are missing, and the extra disk space query,
|
||||||
|
// then +1 due to software inventory being enabled.
|
||||||
|
if !assert.Equal(t, len(expectedDetailQueries)-3, len(queries)) {
|
||||||
|
// this is just to print the diff between the expected and actual query
|
||||||
|
// keys when the count assertion fails, to help debugging - they are not
|
||||||
|
// expected to match.
|
||||||
|
require.ElementsMatch(t, osqueryMapKeys(expectedDetailQueries), distQueriesMapKeys(queries))
|
||||||
|
}
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
assert.NotZero(t, acc)
|
assert.NotZero(t, acc)
|
||||||
|
|
||||||
@ -1230,7 +1249,9 @@ func TestDetailQueries(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, acc, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, acc, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+1)
|
// host platform changed to darwin, so all queries are present - that is, -1 for the
|
||||||
|
// extra disk space query, +1 for the software inventory enabled.
|
||||||
|
require.Equal(t, len(expectedDetailQueries), len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
assert.Zero(t, acc)
|
assert.Zero(t, acc)
|
||||||
}
|
}
|
||||||
@ -1354,7 +1375,13 @@ func TestDistributedQueryResults(t *testing.T) {
|
|||||||
// Now we should get the active distributed query
|
// Now we should get the active distributed query
|
||||||
queries, discovery, acc, err := svc.GetDistributedQueries(hostCtx)
|
queries, discovery, acc, err := svc.GetDistributedQueries(hostCtx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries-1)
|
// -4 for the non-windows queries, +1 for the distributed query for campaign ID 42
|
||||||
|
if !assert.Equal(t, len(expectedDetailQueries)-3, len(queries)) {
|
||||||
|
// this is just to print the diff between the expected and actual query
|
||||||
|
// keys when the count assertion fails, to help debugging - they are not
|
||||||
|
// expected to match.
|
||||||
|
require.ElementsMatch(t, osqueryMapKeys(expectedDetailQueries), distQueriesMapKeys(queries))
|
||||||
|
}
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
queryKey := fmt.Sprintf("%s%d", hostDistributedQueryPrefix, campaign.ID)
|
queryKey := fmt.Sprintf("%s%d", hostDistributedQueryPrefix, campaign.ID)
|
||||||
assert.Equal(t, "select * from time", queries[queryKey])
|
assert.Equal(t, "select * from time", queries[queryKey])
|
||||||
@ -2198,7 +2225,8 @@ func TestPolicyQueries(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, _, err := svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err := svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+2)
|
// all queries -1 for the extra disk space one, and +2 for the policy queries
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
|
|
||||||
checkPolicyResults := func(queries map[string]string) {
|
checkPolicyResults := func(queries map[string]string) {
|
||||||
@ -2254,7 +2282,8 @@ func TestPolicyQueries(t *testing.T) {
|
|||||||
ctx = hostctx.NewContext(context.Background(), host)
|
ctx = hostctx.NewContext(context.Background(), host)
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// all standard queries minus the extra disk space
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
noPolicyResults(queries)
|
noPolicyResults(queries)
|
||||||
|
|
||||||
@ -2263,7 +2292,8 @@ func TestPolicyQueries(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+2)
|
// all standard queries minus the extra disk space, +2 policy queries
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
checkPolicyResults(queries)
|
checkPolicyResults(queries)
|
||||||
|
|
||||||
@ -2291,7 +2321,8 @@ func TestPolicyQueries(t *testing.T) {
|
|||||||
ctx = hostctx.NewContext(context.Background(), host)
|
ctx = hostctx.NewContext(context.Background(), host)
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// all standard queries minus the extra disk space
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
noPolicyResults(queries)
|
noPolicyResults(queries)
|
||||||
|
|
||||||
@ -2300,7 +2331,8 @@ func TestPolicyQueries(t *testing.T) {
|
|||||||
ctx = hostctx.NewContext(context.Background(), host)
|
ctx = hostctx.NewContext(context.Background(), host)
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+2)
|
// all standard queries minus the extra disk space, +2 policy queries
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
checkPolicyResults(queries)
|
checkPolicyResults(queries)
|
||||||
|
|
||||||
@ -2330,7 +2362,8 @@ func TestPolicyQueries(t *testing.T) {
|
|||||||
ctx = hostctx.NewContext(context.Background(), host)
|
ctx = hostctx.NewContext(context.Background(), host)
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// all standard queries minus the extra disk space
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
noPolicyResults(queries)
|
noPolicyResults(queries)
|
||||||
}
|
}
|
||||||
@ -2395,7 +2428,8 @@ func TestPolicyWebhooks(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, _, err := svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err := svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+3)
|
// all queries -1 for extra disk space, +3 for policies
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+2, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
|
|
||||||
checkPolicyResults := func(queries map[string]string) {
|
checkPolicyResults := func(queries map[string]string) {
|
||||||
@ -2508,7 +2542,8 @@ func TestPolicyWebhooks(t *testing.T) {
|
|||||||
ctx = hostctx.NewContext(context.Background(), host)
|
ctx = hostctx.NewContext(context.Background(), host)
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// all standard queries minus the extra disk space
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
noPolicyResults(queries)
|
noPolicyResults(queries)
|
||||||
|
|
||||||
@ -2517,7 +2552,8 @@ func TestPolicyWebhooks(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err = svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries+3)
|
// all queries -1 for extra disk space, +3 for policies
|
||||||
|
require.Equal(t, len(expectedDetailQueries)+2, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
checkPolicyResults(queries)
|
checkPolicyResults(queries)
|
||||||
|
|
||||||
@ -2640,7 +2676,8 @@ func TestLiveQueriesFailing(t *testing.T) {
|
|||||||
|
|
||||||
queries, discovery, _, err := svc.GetDistributedQueries(ctx)
|
queries, discovery, _, err := svc.GetDistributedQueries(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, queries, expectedDetailQueries)
|
// all queries minus the extra disk space
|
||||||
|
require.Equal(t, len(expectedDetailQueries)-1, len(queries), distQueriesMapKeys(queries))
|
||||||
verifyDiscovery(t, queries, discovery)
|
verifyDiscovery(t, queries, discovery)
|
||||||
|
|
||||||
logs, err := ioutil.ReadAll(buf)
|
logs, err := ioutil.ReadAll(buf)
|
||||||
@ -2694,3 +2731,21 @@ func TestFleetDesktopOrbitInfo(t *testing.T) {
|
|||||||
require.Len(t, queries, 0)
|
require.Len(t, queries, 0)
|
||||||
require.Len(t, discovery, 0)
|
require.Len(t, discovery, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func distQueriesMapKeys(m map[string]string) []string {
|
||||||
|
keys := make([]string, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
keys = append(keys, strings.TrimPrefix(k, "fleet_detail_query_"))
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func osqueryMapKeys(m map[string]osquery_utils.DetailQuery) []string {
|
||||||
|
keys := make([]string, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
@ -344,6 +344,14 @@ var extraDetailQueries = map[string]DetailQuery{
|
|||||||
DirectIngestFunc: directIngestChromeProfiles,
|
DirectIngestFunc: directIngestChromeProfiles,
|
||||||
Discovery: discoveryTable("google_chrome_profiles"),
|
Discovery: discoveryTable("google_chrome_profiles"),
|
||||||
},
|
},
|
||||||
|
"battery": {
|
||||||
|
Query: `SELECT serial_number, cycle_count, health FROM battery;`,
|
||||||
|
Platforms: []string{"darwin"},
|
||||||
|
DirectIngestFunc: directIngestBattery,
|
||||||
|
// the "battery" table doesn't need a Discovery query as it is an official
|
||||||
|
// osquery table on darwin (https://osquery.io/schema/5.3.0#battery), it is
|
||||||
|
// always present.
|
||||||
|
},
|
||||||
OrbitInfoQueryName: OrbitInfoDetailQuery,
|
OrbitInfoQueryName: OrbitInfoDetailQuery,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,9 +370,9 @@ func discoveryTable(tableName string) string {
|
|||||||
return fmt.Sprintf("SELECT 1 FROM osquery_registry WHERE active = true AND registry = 'table' AND name = '%s';", tableName)
|
return fmt.Sprintf("SELECT 1 FROM osquery_registry WHERE active = true AND registry = 'table' AND name = '%s';", tableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
const usersQueryStr = `WITH cached_groups AS (select * from groups)
|
const usersQueryStr = `WITH cached_groups AS (select * from groups)
|
||||||
SELECT uid, username, type, groupname, shell
|
SELECT uid, username, type, groupname, shell
|
||||||
FROM users LEFT JOIN cached_groups USING (gid)
|
FROM users LEFT JOIN cached_groups USING (gid)
|
||||||
WHERE type <> 'special' AND shell NOT LIKE '%/false' AND shell NOT LIKE '%/nologin' AND shell NOT LIKE '%/shutdown' AND shell NOT LIKE '%/halt' AND username NOT LIKE '%$' AND username NOT LIKE '\_%' ESCAPE '\' AND NOT (username = 'sync' AND shell ='/bin/sync' AND directory <> '')`
|
WHERE type <> 'special' AND shell NOT LIKE '%/false' AND shell NOT LIKE '%/nologin' AND shell NOT LIKE '%/shutdown' AND shell NOT LIKE '%/halt' AND username NOT LIKE '%$' AND username NOT LIKE '\_%' ESCAPE '\' AND NOT (username = 'sync' AND shell ='/bin/sync' AND directory <> '')`
|
||||||
|
|
||||||
func withCachedUsers(query string) string {
|
func withCachedUsers(query string) string {
|
||||||
@ -627,6 +635,28 @@ func directIngestChromeProfiles(ctx context.Context, logger log.Logger, host *fl
|
|||||||
return ds.ReplaceHostDeviceMapping(ctx, host.ID, mapping)
|
return ds.ReplaceHostDeviceMapping(ctx, host.ID, mapping)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func directIngestBattery(ctx context.Context, logger log.Logger, host *fleet.Host, ds fleet.Datastore, rows []map[string]string, failed bool) error {
|
||||||
|
if failed {
|
||||||
|
level.Error(logger).Log("op", "directIngestBattery", "err", "failed")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mapping := make([]*fleet.HostBattery, 0, len(rows))
|
||||||
|
for _, row := range rows {
|
||||||
|
cycleCount, err := strconv.ParseInt(EmptyToZero(row["cycle_count"]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mapping = append(mapping, &fleet.HostBattery{
|
||||||
|
HostID: host.ID,
|
||||||
|
SerialNumber: row["serial_number"],
|
||||||
|
CycleCount: int(cycleCount),
|
||||||
|
Health: row["health"],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ds.ReplaceHostBatteries(ctx, host.ID, mapping)
|
||||||
|
}
|
||||||
|
|
||||||
func directIngestOrbitInfo(ctx context.Context, logger log.Logger, host *fleet.Host, ds fleet.Datastore, rows []map[string]string, failed bool) error {
|
func directIngestOrbitInfo(ctx context.Context, logger log.Logger, host *fleet.Host, ds fleet.Datastore, rows []map[string]string, failed bool) error {
|
||||||
if len(rows) != 1 {
|
if len(rows) != 1 {
|
||||||
return ctxerr.Errorf(ctx, "invalid number of orbit_info rows: %d", len(rows))
|
return ctxerr.Errorf(ctx, "invalid number of orbit_info rows: %d", len(rows))
|
||||||
|
@ -293,7 +293,7 @@ func sortedKeysCompare(t *testing.T, m map[string]DetailQuery, expectedKeys []st
|
|||||||
|
|
||||||
func TestGetDetailQueries(t *testing.T) {
|
func TestGetDetailQueries(t *testing.T) {
|
||||||
queriesNoConfig := GetDetailQueries(nil, config.FleetConfig{})
|
queriesNoConfig := GetDetailQueries(nil, config.FleetConfig{})
|
||||||
require.Len(t, queriesNoConfig, 12)
|
require.Len(t, queriesNoConfig, 13)
|
||||||
baseQueries := []string{
|
baseQueries := []string{
|
||||||
"network_interface",
|
"network_interface",
|
||||||
"os_version",
|
"os_version",
|
||||||
@ -307,15 +307,16 @@ func TestGetDetailQueries(t *testing.T) {
|
|||||||
"munki_info",
|
"munki_info",
|
||||||
"google_chrome_profiles",
|
"google_chrome_profiles",
|
||||||
"orbit_info",
|
"orbit_info",
|
||||||
|
"battery",
|
||||||
}
|
}
|
||||||
sortedKeysCompare(t, queriesNoConfig, baseQueries)
|
sortedKeysCompare(t, queriesNoConfig, baseQueries)
|
||||||
|
|
||||||
queriesWithUsers := GetDetailQueries(&fleet.AppConfig{HostSettings: fleet.HostSettings{EnableHostUsers: true}}, config.FleetConfig{App: config.AppConfig{EnableScheduledQueryStats: true}})
|
queriesWithUsers := GetDetailQueries(&fleet.AppConfig{HostSettings: fleet.HostSettings{EnableHostUsers: true}}, config.FleetConfig{App: config.AppConfig{EnableScheduledQueryStats: true}})
|
||||||
require.Len(t, queriesWithUsers, 14)
|
require.Len(t, queriesWithUsers, 15)
|
||||||
sortedKeysCompare(t, queriesWithUsers, append(baseQueries, "users", "scheduled_query_stats"))
|
sortedKeysCompare(t, queriesWithUsers, append(baseQueries, "users", "scheduled_query_stats"))
|
||||||
|
|
||||||
queriesWithUsersAndSoftware := GetDetailQueries(&fleet.AppConfig{HostSettings: fleet.HostSettings{EnableHostUsers: true, EnableSoftwareInventory: true}}, config.FleetConfig{App: config.AppConfig{EnableScheduledQueryStats: true}})
|
queriesWithUsersAndSoftware := GetDetailQueries(&fleet.AppConfig{HostSettings: fleet.HostSettings{EnableHostUsers: true, EnableSoftwareInventory: true}}, config.FleetConfig{App: config.AppConfig{EnableScheduledQueryStats: true}})
|
||||||
require.Len(t, queriesWithUsersAndSoftware, 17)
|
require.Len(t, queriesWithUsersAndSoftware, 18)
|
||||||
sortedKeysCompare(t, queriesWithUsersAndSoftware,
|
sortedKeysCompare(t, queriesWithUsersAndSoftware,
|
||||||
append(baseQueries, "users", "software_macos", "software_linux", "software_windows", "scheduled_query_stats"))
|
append(baseQueries, "users", "software_macos", "software_linux", "software_windows", "scheduled_query_stats"))
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user