mirror of
https://github.com/empayre/fleet.git
synced 2024-11-06 08:55:24 +00:00
Refetch host mdm enrollment status until unenrolled (#11740)
This commit is contained in:
parent
47de991ea6
commit
3f9eccc7f8
@ -1 +1,2 @@
|
||||
- Added device-authenticated endpoint to signal the Fleet server to send a webbook request with the device UUID and serial number to the webhook URL configured for MDM migration.
|
||||
- Added mechanism to refetch MDM enrollment status of a host pending unenrollment (due to a migration to Fleet) at a high interval.
|
||||
|
@ -18,6 +18,12 @@ func (svc *Service) RequestEncryptionKeyRotation(ctx context.Context, hostID uin
|
||||
return svc.ds.SetDiskEncryptionResetStatus(ctx, hostID, true)
|
||||
}
|
||||
|
||||
const refetchMDMUnenrollCriticalQueryDuration = 3 * time.Minute
|
||||
|
||||
// TriggerMigrateMDMDevice triggers the webhook associated with the MDM
|
||||
// migration to Fleet configuration. It is located in the ee package instead of
|
||||
// the server/webhooks one because it is a Fleet Premium only feature and for
|
||||
// licensing reasons this needs to live under this package.
|
||||
func (svc *Service) TriggerMigrateMDMDevice(ctx context.Context, host *fleet.Host) error {
|
||||
ac, err := svc.ds.AppConfig(ctx)
|
||||
if err != nil {
|
||||
@ -35,9 +41,12 @@ func (svc *Service) TriggerMigrateMDMDevice(ctx context.Context, host *fleet.Hos
|
||||
bre.InternalErr = ctxerr.New(ctx, "macOS migration webhook URL not configured")
|
||||
case !host.IsOsqueryEnrolled(), !host.MDMInfo.IsDEPCapable(), !host.MDMInfo.IsEnrolledInThirdPartyMDM():
|
||||
bre.InternalErr = ctxerr.New(ctx, "host not eligible for macOS migration")
|
||||
case host.RefetchCriticalQueriesUntil != nil && host.RefetchCriticalQueriesUntil.After(svc.clock.Now()):
|
||||
// the webhook has already been triggered successfully recently (within the
|
||||
// refetch critical queries delay), so do as if it did send it successfully
|
||||
// but do not re-send.
|
||||
return nil
|
||||
}
|
||||
// TODO: add case to check if webhok has already been sent (if host refetchUntil is not zero?)
|
||||
|
||||
if bre.InternalErr != nil {
|
||||
return &bre
|
||||
}
|
||||
@ -52,6 +61,15 @@ func (svc *Service) TriggerMigrateMDMDevice(ctx context.Context, host *fleet.Hos
|
||||
return ctxerr.Wrap(ctx, err, "posting macOS migration webhook")
|
||||
}
|
||||
|
||||
// if the webhook was successfully triggered, we update the host to
|
||||
// constantly run the query to check if it has been unenrolled from its
|
||||
// existing third-party MDM.
|
||||
refetchUntil := svc.clock.Now().Add(refetchMDMUnenrollCriticalQueryDuration)
|
||||
host.RefetchCriticalQueriesUntil = &refetchUntil
|
||||
if err := svc.ds.UpdateHost(ctx, host); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "save host with refetch critical queries timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -61,9 +61,10 @@ func (ds *Datastore) NewHost(ctx context.Context, host *fleet.Host) (*fleet.Host
|
||||
logger_tls_period,
|
||||
config_tls_refresh,
|
||||
refetch_requested,
|
||||
hardware_serial
|
||||
hardware_serial,
|
||||
refetch_critical_queries_until
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`
|
||||
result, err := tx.ExecContext(
|
||||
ctx,
|
||||
@ -87,6 +88,7 @@ func (ds *Datastore) NewHost(ctx context.Context, host *fleet.Host) (*fleet.Host
|
||||
host.ConfigTLSRefresh,
|
||||
host.RefetchRequested,
|
||||
host.HardwareSerial,
|
||||
host.RefetchCriticalQueriesUntil,
|
||||
)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "new host")
|
||||
@ -426,6 +428,7 @@ SELECT
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -621,6 +624,7 @@ func (ds *Datastore) ListHosts(ctx context.Context, filter fleet.TeamFilter, opt
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -1402,6 +1406,7 @@ func (ds *Datastore) EnrollHost(ctx context.Context, isMDMEnabled bool, osqueryH
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -1483,6 +1488,7 @@ func (ds *Datastore) LoadHostByNodeKey(ctx context.Context, nodeKey string) (*fl
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -1557,6 +1563,7 @@ func (ds *Datastore) LoadHostByOrbitNodeKey(ctx context.Context, nodeKey string)
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -1650,6 +1657,7 @@ func (ds *Datastore) LoadHostByDeviceAuthToken(ctx context.Context, authToken st
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -1795,6 +1803,7 @@ func (ds *Datastore) SearchHosts(ctx context.Context, filter fleet.TeamFilter, m
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -1890,7 +1899,8 @@ SELECT
|
||||
label_updated_at,
|
||||
last_enrolled_at,
|
||||
policy_updated_at,
|
||||
refetch_requested
|
||||
refetch_requested,
|
||||
refetch_critical_queries_until
|
||||
FROM hosts
|
||||
WHERE uuid IN (?) AND %s
|
||||
`, ds.whereFilterHostsByTeams(filter, "hosts"),
|
||||
@ -1947,6 +1957,7 @@ func (ds *Datastore) HostByIdentifier(ctx context.Context, identifier string) (*
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -3350,6 +3361,7 @@ func (ds *Datastore) HostLite(ctx context.Context, id uint) (*fleet.Host, error)
|
||||
"last_enrolled_at",
|
||||
"policy_updated_at",
|
||||
"refetch_requested",
|
||||
"refetch_critical_queries_until",
|
||||
).Where(goqu.I("id").Eq(id)).ToSQL()
|
||||
if err != nil {
|
||||
return nil, ctxerr.Wrap(ctx, err, "sql build")
|
||||
@ -3432,7 +3444,8 @@ func (ds *Datastore) UpdateHost(ctx context.Context, host *fleet.Host) error {
|
||||
primary_mac = ?,
|
||||
public_ip = ?,
|
||||
refetch_requested = ?,
|
||||
orbit_node_key = ?
|
||||
orbit_node_key = ?,
|
||||
refetch_critical_queries_until = ?
|
||||
WHERE id = ?
|
||||
`
|
||||
_, err := ds.writer.ExecContext(ctx, sqlStatement,
|
||||
@ -3469,6 +3482,7 @@ func (ds *Datastore) UpdateHost(ctx context.Context, host *fleet.Host) error {
|
||||
host.PublicIP,
|
||||
host.RefetchRequested,
|
||||
host.OrbitNodeKey,
|
||||
host.RefetchCriticalQueriesUntil,
|
||||
host.ID,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -182,6 +182,10 @@ func testUpdateHost(t *testing.T, ds *Datastore, updateHostFunc func(context.Con
|
||||
err = updateHostFunc(context.Background(), host)
|
||||
require.NoError(t, err)
|
||||
|
||||
host.RefetchCriticalQueriesUntil = ptr.Time(time.Now().UTC().Add(time.Hour))
|
||||
err = updateHostFunc(context.Background(), host)
|
||||
require.NoError(t, err)
|
||||
|
||||
host, err = ds.Host(context.Background(), host.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -189,6 +193,8 @@ func testUpdateHost(t *testing.T, ds *Datastore, updateHostFunc func(context.Con
|
||||
assert.Equal(t, "192.168.1.1", host.PrimaryIP)
|
||||
assert.Equal(t, "30-65-EC-6F-C4-58", host.PrimaryMac)
|
||||
assert.Equal(t, policyUpdatedAt.UTC(), host.PolicyUpdatedAt)
|
||||
assert.NotNil(t, host.RefetchCriticalQueriesUntil)
|
||||
assert.True(t, time.Now().Before(*host.RefetchCriticalQueriesUntil))
|
||||
|
||||
additionalJSON := json.RawMessage(`{"foobar": "bim"}`)
|
||||
err = ds.SaveHostAdditional(context.Background(), host.ID, &additionalJSON)
|
||||
@ -203,9 +209,14 @@ func testUpdateHost(t *testing.T, ds *Datastore, updateHostFunc func(context.Con
|
||||
err = updateHostFunc(context.Background(), host)
|
||||
require.NoError(t, err)
|
||||
|
||||
host.RefetchCriticalQueriesUntil = nil
|
||||
err = updateHostFunc(context.Background(), host)
|
||||
require.NoError(t, err)
|
||||
|
||||
host, err = ds.Host(context.Background(), host.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, host)
|
||||
require.Nil(t, host.RefetchCriticalQueriesUntil)
|
||||
|
||||
p, err := ds.NewPack(context.Background(), &fleet.Pack{
|
||||
Name: t.Name(),
|
||||
|
@ -497,6 +497,7 @@ func (ds *Datastore) ListHostsInLabel(ctx context.Context, filter fleet.TeamFilt
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
@ -631,6 +632,7 @@ func (ds *Datastore) ListUniqueHostsInLabels(ctx context.Context, filter fleet.T
|
||||
h.label_updated_at,
|
||||
h.last_enrolled_at,
|
||||
h.refetch_requested,
|
||||
h.refetch_critical_queries_until,
|
||||
h.team_id,
|
||||
h.policy_updated_at,
|
||||
h.public_ip,
|
||||
|
@ -0,0 +1,22 @@
|
||||
package tables
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
MigrationClient.AddMigration(Up_20230517152807, Down_20230517152807)
|
||||
}
|
||||
|
||||
func Up_20230517152807(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
ALTER TABLE hosts ADD COLUMN refetch_critical_queries_until TIMESTAMP NULL;
|
||||
`)
|
||||
return errors.Wrap(err, "add refetch_critical_queries_until")
|
||||
}
|
||||
|
||||
func Down_20230517152807(tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
package tables
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUp_20230517152807(t *testing.T) {
|
||||
db := applyUpToPrev(t)
|
||||
|
||||
someString := func() string {
|
||||
s, err := server.GenerateRandomText(16)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
insertStmt := `
|
||||
INSERT INTO hosts (
|
||||
osquery_host_id,
|
||||
detail_updated_at,
|
||||
label_updated_at,
|
||||
policy_updated_at,
|
||||
node_key,
|
||||
hostname,
|
||||
computer_name,
|
||||
uuid,
|
||||
platform,
|
||||
osquery_version,
|
||||
os_version,
|
||||
uptime,
|
||||
memory,
|
||||
team_id,
|
||||
distributed_interval,
|
||||
logger_tls_period,
|
||||
config_tls_refresh,
|
||||
refetch_requested,
|
||||
hardware_serial
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
|
||||
|
||||
newHostArgs := func() []any {
|
||||
return []any{
|
||||
someString(),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
someString(),
|
||||
someString(),
|
||||
someString(),
|
||||
someString(),
|
||||
someString(),
|
||||
someString(),
|
||||
someString(),
|
||||
1337,
|
||||
1337,
|
||||
nil,
|
||||
1337,
|
||||
1337,
|
||||
1337,
|
||||
true,
|
||||
someString(),
|
||||
}
|
||||
}
|
||||
|
||||
args := newHostArgs()
|
||||
execNoErr(t, db, insertStmt, args...)
|
||||
|
||||
// Apply current migration.
|
||||
applyNext(t, db)
|
||||
|
||||
// existing host has a null refetch_critical_queries_until
|
||||
var until *time.Time
|
||||
err := db.Get(&until, "SELECT refetch_critical_queries_until FROM hosts WHERE osquery_host_id = ?", args[0])
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, until)
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -281,6 +281,22 @@ type Host struct {
|
||||
// other host fields, it is not filled in by all host-returning datastore
|
||||
// methods.
|
||||
MDMInfo *HostMDM `json:"-" csv:"-"`
|
||||
|
||||
// RefetchCriticalQueriesUntil can be set to a timestamp up to which the
|
||||
// "critical" queries will be constantly reported to the host that checks in
|
||||
// to be re-executed until a condition is met (or the timestamp expires). The
|
||||
// notion of "critical query" is voluntarily loosely defined so that future
|
||||
// requirements may use this mechanism. The difference with RefetchRequested
|
||||
// is that the latter is a one-time request, while this one is a persistent
|
||||
// until the timestamp expires. The initial use-case is to check for a host
|
||||
// to be unenrolled from its old MDM solution, in the "migrate to Fleet MDM"
|
||||
// workflow.
|
||||
//
|
||||
// In the future, if we want to use it for more than one use-case, we could
|
||||
// add a "reason" field with well-known labels so we know what condition(s)
|
||||
// are expected to clear the timestamp. For now there's a single use-case
|
||||
// so we don't need this.
|
||||
RefetchCriticalQueriesUntil *time.Time `json:"-" db:"refetch_critical_queries_until" csv:"-"`
|
||||
}
|
||||
|
||||
type MDMHostData struct {
|
||||
|
@ -3172,7 +3172,9 @@ func (s *integrationMDMTestSuite) TestMigrateMDMDeviceWebhook() {
|
||||
|
||||
h := createHostAndDeviceToken(t, s.ds, "good-token")
|
||||
|
||||
var webhookCalled bool
|
||||
webhookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
webhookCalled = true
|
||||
w.WriteHeader(http.StatusOK)
|
||||
switch r.URL.Path {
|
||||
case "/test_mdm_migration":
|
||||
@ -3213,25 +3215,63 @@ func (s *integrationMDMTestSuite) TestMigrateMDMDeviceWebhook() {
|
||||
// host is a server so migration is not allowed
|
||||
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), h.ID, isServer, enrolled, mdmURL, installedFromDEP, mdmName))
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusBadRequest)
|
||||
require.False(t, webhookCalled)
|
||||
|
||||
// host is not DEP so migration is not allowed
|
||||
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), h.ID, !isServer, enrolled, mdmURL, !installedFromDEP, mdmName))
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusBadRequest)
|
||||
require.False(t, webhookCalled)
|
||||
|
||||
// host is not enrolled to MDM so migration is not allowed
|
||||
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), h.ID, !isServer, !enrolled, mdmURL, installedFromDEP, mdmName))
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusBadRequest)
|
||||
require.False(t, webhookCalled)
|
||||
|
||||
// host is already enrolled to Fleet MDM so migration is not allowed
|
||||
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), h.ID, !isServer, enrolled, mdmURL, installedFromDEP, fleet.WellKnownMDMFleet))
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusBadRequest)
|
||||
require.False(t, webhookCalled)
|
||||
|
||||
// up to this point, the refetch critical queries timestamp has not been set
|
||||
// on the host.
|
||||
h, err := s.ds.Host(context.Background(), h.ID)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, h.RefetchCriticalQueriesUntil)
|
||||
|
||||
// host is enrolled to a third-party MDM so migration is allowed
|
||||
require.NoError(t, s.ds.SetOrUpdateMDMData(context.Background(), h.ID, !isServer, enrolled, mdmURL, installedFromDEP, mdmName))
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusNoContent)
|
||||
require.True(t, webhookCalled)
|
||||
webhookCalled = false
|
||||
|
||||
// the refetch critical queries timestamp has been set in the future
|
||||
h, err = s.ds.Host(context.Background(), h.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h.RefetchCriticalQueriesUntil)
|
||||
require.True(t, h.RefetchCriticalQueriesUntil.After(time.Now()))
|
||||
|
||||
// calling again works but does not trigger the webhook, as it was called recently
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusNoContent)
|
||||
require.False(t, webhookCalled)
|
||||
|
||||
// setting the refetch critical queries timestamp in the past triggers the webhook again
|
||||
h.RefetchCriticalQueriesUntil = ptr.Time(time.Now().Add(-1 * time.Minute))
|
||||
err = s.ds.UpdateHost(context.Background(), h)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusNoContent)
|
||||
require.True(t, webhookCalled)
|
||||
webhookCalled = false
|
||||
|
||||
// the refetch critical queries timestamp has been updated to the future
|
||||
h, err = s.ds.Host(context.Background(), h.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h.RefetchCriticalQueriesUntil)
|
||||
require.True(t, h.RefetchCriticalQueriesUntil.After(time.Now()))
|
||||
|
||||
// bad token
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "bad-token"), nil, http.StatusUnauthorized)
|
||||
require.False(t, webhookCalled)
|
||||
|
||||
// disable macos migration
|
||||
s.DoJSON("PATCH", "/api/latest/fleet/config", json.RawMessage(`{
|
||||
@ -3247,6 +3287,7 @@ func (s *integrationMDMTestSuite) TestMigrateMDMDeviceWebhook() {
|
||||
|
||||
// expect error if macos migration is not configured
|
||||
s.Do("POST", fmt.Sprintf("/api/v1/fleet/device/%s/migrate_mdm", "good-token"), nil, http.StatusBadRequest)
|
||||
require.False(t, webhookCalled)
|
||||
}
|
||||
|
||||
func (s *integrationMDMTestSuite) TestMDMMacOSSetup() {
|
||||
|
@ -608,12 +608,25 @@ func (svc *Service) GetDistributedQueries(ctx context.Context) (queries map[stri
|
||||
|
||||
const alwaysTrueQuery = "SELECT 1"
|
||||
|
||||
// list of detail queries that are returned when only the critical queries
|
||||
// should be returned (due to RefetchCriticalQueriesUntil timestamp being set).
|
||||
var criticalDetailQueries = map[string]bool{
|
||||
"mdm": true,
|
||||
}
|
||||
|
||||
// detailQueriesForHost returns the map of detail+additional queries that should be executed by
|
||||
// osqueryd to fill in the host details.
|
||||
func (svc *Service) detailQueriesForHost(ctx context.Context, host *fleet.Host) (queries map[string]string, discovery map[string]string, err error) {
|
||||
var criticalQueriesOnly bool
|
||||
if !svc.shouldUpdate(host.DetailUpdatedAt, svc.config.Osquery.DetailUpdateInterval, host.ID) && !host.RefetchRequested {
|
||||
// would not return anything, check if critical queries should be returned
|
||||
if host.RefetchCriticalQueriesUntil != nil && host.RefetchCriticalQueriesUntil.After(svc.clock.Now()) {
|
||||
// return only those critical queries
|
||||
criticalQueriesOnly = true
|
||||
} else {
|
||||
return nil, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
appConfig, err := svc.ds.AppConfig(ctx)
|
||||
if err != nil {
|
||||
@ -630,6 +643,10 @@ func (svc *Service) detailQueriesForHost(ctx context.Context, host *fleet.Host)
|
||||
|
||||
detailQueries := osquery_utils.GetDetailQueries(ctx, svc.config, appConfig, features)
|
||||
for name, query := range detailQueries {
|
||||
if criticalQueriesOnly && !criticalDetailQueries[name] {
|
||||
continue
|
||||
}
|
||||
|
||||
if query.RunsForPlatform(host.Platform) {
|
||||
queryName := hostDetailQueryPrefix + name
|
||||
queries[queryName] = query.Query
|
||||
@ -641,7 +658,7 @@ func (svc *Service) detailQueriesForHost(ctx context.Context, host *fleet.Host)
|
||||
}
|
||||
}
|
||||
|
||||
if features.AdditionalQueries == nil {
|
||||
if features.AdditionalQueries == nil || criticalQueriesOnly {
|
||||
// No additional queries set
|
||||
return queries, discovery, nil
|
||||
}
|
||||
@ -833,6 +850,7 @@ func (svc *Service) SubmitDistributedQueryResults(
|
||||
additionalUpdated := false
|
||||
labelResults := map[uint]*bool{}
|
||||
policyResults := map[uint]*bool{}
|
||||
refetchCriticalSet := host.RefetchCriticalQueriesUntil != nil
|
||||
|
||||
svc.maybeDebugHost(ctx, host, results, statuses, messages)
|
||||
|
||||
@ -936,8 +954,9 @@ func (svc *Service) SubmitDistributedQueryResults(
|
||||
if refetchRequested {
|
||||
host.RefetchRequested = false
|
||||
}
|
||||
refetchCriticalCleared := refetchCriticalSet && host.RefetchCriticalQueriesUntil == nil
|
||||
|
||||
if refetchRequested || detailUpdated {
|
||||
if refetchRequested || detailUpdated || refetchCriticalCleared {
|
||||
appConfig, err := svc.ds.AppConfig(ctx)
|
||||
if err != nil {
|
||||
logging.WithErr(ctx, err)
|
||||
|
@ -519,6 +519,7 @@ func verifyDiscovery(t *testing.T, queries, discovery map[string]string) {
|
||||
}
|
||||
|
||||
func TestHostDetailQueries(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ds := new(mock.Store)
|
||||
additional := json.RawMessage(`{"foobar": "select foo", "bim": "bam"}`)
|
||||
ds.AppConfigFunc = func(ctx context.Context) (*fleet.AppConfig, error) {
|
||||
@ -553,23 +554,26 @@ func TestHostDetailQueries(t *testing.T) {
|
||||
jitterH: make(map[time.Duration]*jitterHashTable),
|
||||
}
|
||||
|
||||
queries, discovery, err := svc.detailQueriesForHost(context.Background(), &host)
|
||||
// detail_updated_at is now, so nothing gets returned by default
|
||||
queries, discovery, err := svc.detailQueriesForHost(ctx, &host)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, queries)
|
||||
verifyDiscovery(t, queries, discovery)
|
||||
|
||||
// With refetch requested detail queries should be returned
|
||||
host.RefetchRequested = true
|
||||
queries, discovery, err = svc.detailQueriesForHost(context.Background(), &host)
|
||||
queries, discovery, err = svc.detailQueriesForHost(ctx, &host)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, queries)
|
||||
// +2: additional queries: bim, foobar
|
||||
require.Equal(t, len(expectedDetailQueriesForPlatform(host.Platform))+2, len(queries), distQueriesMapKeys(queries))
|
||||
verifyDiscovery(t, queries, discovery)
|
||||
host.RefetchRequested = false
|
||||
|
||||
// Advance the time
|
||||
mockClock.AddTime(1*time.Hour + 1*time.Minute)
|
||||
|
||||
queries, discovery, err = svc.detailQueriesForHost(context.Background(), &host)
|
||||
// all queries returned now that detail udpated at is in the past
|
||||
queries, discovery, err = svc.detailQueriesForHost(ctx, &host)
|
||||
require.NoError(t, err)
|
||||
// +2: additional queries: bim, foobar
|
||||
require.Equal(t, len(expectedDetailQueriesForPlatform(host.Platform))+2, len(queries), distQueriesMapKeys(queries))
|
||||
@ -581,6 +585,31 @@ func TestHostDetailQueries(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, "bam", queries[hostAdditionalQueryPrefix+"bim"])
|
||||
assert.Equal(t, "select foo", queries[hostAdditionalQueryPrefix+"foobar"])
|
||||
|
||||
host.DetailUpdatedAt = mockClock.Now()
|
||||
|
||||
// detail_updated_at is now, so nothing gets returned
|
||||
queries, discovery, err = svc.detailQueriesForHost(ctx, &host)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, queries)
|
||||
verifyDiscovery(t, queries, discovery)
|
||||
|
||||
// setting refetch_critical_queries_until in the past still returns nothing
|
||||
host.RefetchCriticalQueriesUntil = ptr.Time(mockClock.Now().Add(-1 * time.Minute))
|
||||
queries, discovery, err = svc.detailQueriesForHost(ctx, &host)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, queries)
|
||||
verifyDiscovery(t, queries, discovery)
|
||||
|
||||
// setting refetch_critical_queries_until in the future returns only the critical queries
|
||||
host.RefetchCriticalQueriesUntil = ptr.Time(mockClock.Now().Add(1 * time.Minute))
|
||||
queries, discovery, err = svc.detailQueriesForHost(ctx, &host)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(criticalDetailQueries), len(queries), distQueriesMapKeys(queries))
|
||||
for name := range criticalDetailQueries {
|
||||
assert.Contains(t, queries, hostDetailQueryPrefix+name)
|
||||
}
|
||||
verifyDiscovery(t, queries, discovery)
|
||||
}
|
||||
|
||||
func TestQueriesAndHostFeatures(t *testing.T) {
|
||||
|
@ -1250,13 +1250,20 @@ func directIngestMDMMac(ctx context.Context, logger log.Logger, host *fleet.Host
|
||||
}
|
||||
}
|
||||
|
||||
mdmSolutionName := deduceMDMNameMacOS(rows[0])
|
||||
if !enrolled && installedFromDep && mdmSolutionName != fleet.WellKnownMDMFleet && host.RefetchCriticalQueriesUntil != nil {
|
||||
// the host was unenrolled from a non-Fleet DEP MDM solution, and the
|
||||
// refetch critical queries timestamp was set, so clear it.
|
||||
host.RefetchCriticalQueriesUntil = nil
|
||||
}
|
||||
|
||||
return ds.SetOrUpdateMDMData(ctx,
|
||||
host.ID,
|
||||
false,
|
||||
enrolled,
|
||||
rows[0]["server_url"],
|
||||
installedFromDep,
|
||||
deduceMDMNameMacOS(rows[0]),
|
||||
mdmSolutionName,
|
||||
)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user