mirror of
https://github.com/empayre/fleet.git
synced 2024-11-06 00:45:19 +00:00
Send DeviceConfigured
MDM command after DEP enrollment (#17737)
This commit is contained in:
parent
8d2deb37e5
commit
994040b1c9
@ -1 +1,2 @@
|
||||
* Added the `enable_release_device_manually` configuration setting for a team and no team. **Note** that the macOS automatic enrollment profile cannot set the `await_device_configured` option anymore, this setting is controlled by Fleet via the new `enable_release_device_manually` option.
|
||||
* Automatically release a macOS DEP-enrolled device after enrollment commands and profiles have been delivered, unless `enable_release_device_manually` is set to `true`.
|
||||
|
@ -228,7 +228,7 @@ func (svc *Service) updateAppConfigMDMAppleSetup(ctx context.Context, payload fl
|
||||
return err
|
||||
}
|
||||
|
||||
var didUpdate, didUpdateMacOSEndUserAuth bool
|
||||
var didUpdate, didUpdateMacOSEndUserAuth, didUpdateMacOSReleaseDevice bool
|
||||
if payload.EnableEndUserAuthentication != nil {
|
||||
if ac.MDM.MacOSSetup.EnableEndUserAuthentication != *payload.EnableEndUserAuthentication {
|
||||
ac.MDM.MacOSSetup.EnableEndUserAuthentication = *payload.EnableEndUserAuthentication
|
||||
@ -241,6 +241,7 @@ func (svc *Service) updateAppConfigMDMAppleSetup(ctx context.Context, payload fl
|
||||
if ac.MDM.MacOSSetup.EnableReleaseDeviceManually.Value != *payload.EnableReleaseDeviceManually {
|
||||
ac.MDM.MacOSSetup.EnableReleaseDeviceManually = optjson.SetBool(*payload.EnableReleaseDeviceManually)
|
||||
didUpdate = true
|
||||
didUpdateMacOSReleaseDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,6 +249,11 @@ func (svc *Service) updateAppConfigMDMAppleSetup(ctx context.Context, payload fl
|
||||
if err := svc.ds.SaveAppConfig(ctx, ac); err != nil {
|
||||
return err
|
||||
}
|
||||
if didUpdateMacOSReleaseDevice {
|
||||
if err := svc.updateMacOSSetupEnableReleaseDevice(ctx, ac.MDM.MacOSSetup.EnableReleaseDeviceManually.Value, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if didUpdateMacOSEndUserAuth {
|
||||
if err := svc.updateMacOSSetupEnableEndUserAuth(ctx, ac.MDM.MacOSSetup.EnableEndUserAuthentication, nil, nil); err != nil {
|
||||
return err
|
||||
@ -257,6 +263,13 @@ func (svc *Service) updateAppConfigMDMAppleSetup(ctx context.Context, payload fl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svc *Service) updateMacOSSetupEnableReleaseDevice(ctx context.Context, enable bool, teamID *uint, teamName *string) error {
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, svc.ds, svc.logger, worker.MacosSetupAssistantUpdateProfile, teamID); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue macos setup assistant update profile job")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svc *Service) updateMacOSSetupEnableEndUserAuth(ctx context.Context, enable bool, teamID *uint, teamName *string) error {
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, svc.ds, svc.logger, worker.MacosSetupAssistantUpdateProfile, teamID); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue macos setup assistant update profile job")
|
||||
|
@ -1136,6 +1136,11 @@ func (svc *Service) editTeamFromSpec(
|
||||
}
|
||||
}
|
||||
|
||||
if didUpdateEnableReleaseManually {
|
||||
if err := svc.updateMacOSSetupEnableReleaseDevice(ctx, spec.MDM.MacOSSetup.EnableReleaseDeviceManually.Value, &team.ID, &team.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if didUpdateMacOSEndUserAuth {
|
||||
if err := svc.updateMacOSSetupEnableEndUserAuth(ctx, spec.MDM.MacOSSetup.EnableEndUserAuthentication, &team.ID, &team.Name); err != nil {
|
||||
return err
|
||||
@ -1241,7 +1246,7 @@ func (svc *Service) updateTeamMDMDiskEncryption(ctx context.Context, tm *fleet.T
|
||||
}
|
||||
|
||||
func (svc *Service) updateTeamMDMAppleSetup(ctx context.Context, tm *fleet.Team, payload fleet.MDMAppleSetupPayload) error {
|
||||
var didUpdate, didUpdateMacOSEndUserAuth bool
|
||||
var didUpdate, didUpdateMacOSEndUserAuth, didUpdateMacOSReleaseDevice bool
|
||||
if payload.EnableEndUserAuthentication != nil {
|
||||
if tm.Config.MDM.MacOSSetup.EnableEndUserAuthentication != *payload.EnableEndUserAuthentication {
|
||||
tm.Config.MDM.MacOSSetup.EnableEndUserAuthentication = *payload.EnableEndUserAuthentication
|
||||
@ -1254,6 +1259,7 @@ func (svc *Service) updateTeamMDMAppleSetup(ctx context.Context, tm *fleet.Team,
|
||||
if tm.Config.MDM.MacOSSetup.EnableReleaseDeviceManually.Value != *payload.EnableReleaseDeviceManually {
|
||||
tm.Config.MDM.MacOSSetup.EnableReleaseDeviceManually = optjson.SetBool(*payload.EnableReleaseDeviceManually)
|
||||
didUpdate = true
|
||||
didUpdateMacOSReleaseDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
@ -1261,6 +1267,11 @@ func (svc *Service) updateTeamMDMAppleSetup(ctx context.Context, tm *fleet.Team,
|
||||
if _, err := svc.ds.SaveTeam(ctx, tm); err != nil {
|
||||
return err
|
||||
}
|
||||
if didUpdateMacOSReleaseDevice {
|
||||
if err := svc.updateMacOSSetupEnableReleaseDevice(ctx, tm.Config.MDM.MacOSSetup.EnableReleaseDeviceManually.Value, &tm.ID, &tm.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if didUpdateMacOSEndUserAuth {
|
||||
if err := svc.updateMacOSSetupEnableEndUserAuth(ctx, tm.Config.MDM.MacOSSetup.EnableEndUserAuthentication, &tm.ID, &tm.Name); err != nil {
|
||||
return err
|
||||
|
@ -35,7 +35,7 @@ VALUES (?, ?, ?, ?, ?, COALESCE(?, NOW()))
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (ds *Datastore) GetQueuedJobs(ctx context.Context, maxNumJobs int) ([]*fleet.Job, error) {
|
||||
func (ds *Datastore) GetQueuedJobs(ctx context.Context, maxNumJobs int, now time.Time) ([]*fleet.Job, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id, created_at, updated_at, name, args, state, retries, error, not_before
|
||||
@ -43,14 +43,18 @@ FROM
|
||||
jobs
|
||||
WHERE
|
||||
state = ? AND
|
||||
not_before <= NOW()
|
||||
not_before <= ?
|
||||
ORDER BY
|
||||
updated_at ASC
|
||||
LIMIT ?
|
||||
`
|
||||
|
||||
if now.IsZero() {
|
||||
now = time.Now().UTC()
|
||||
}
|
||||
|
||||
var jobs []*fleet.Job
|
||||
err := sqlx.SelectContext(ctx, ds.reader(ctx), &jobs, query, fleet.JobStateQueued, maxNumJobs)
|
||||
err := sqlx.SelectContext(ctx, ds.reader(ctx), &jobs, query, fleet.JobStateQueued, now, maxNumJobs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -11,6 +11,9 @@ import (
|
||||
|
||||
func TestJobs(t *testing.T) {
|
||||
ds := CreateMySQLDS(t)
|
||||
// call TruncateTables before the first test, because a DB migation may have
|
||||
// created job entries.
|
||||
TruncateTables(t, ds)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@ -30,7 +33,7 @@ func testQueueAndProcessJobs(t *testing.T, ds *Datastore) {
|
||||
ctx := context.Background()
|
||||
|
||||
// no jobs yet
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
@ -45,7 +48,7 @@ func testQueueAndProcessJobs(t *testing.T, ds *Datastore) {
|
||||
require.NotZero(t, j2.ID)
|
||||
|
||||
// only j1 is returned
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, j1.ID, jobs[0].ID)
|
||||
@ -58,7 +61,7 @@ func testQueueAndProcessJobs(t *testing.T, ds *Datastore) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// no jobs queued for now
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
@ -68,7 +71,7 @@ func testQueueAndProcessJobs(t *testing.T, ds *Datastore) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// j2 is returned
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, j2.ID, jobs[0].ID)
|
||||
|
@ -0,0 +1,64 @@
|
||||
package tables
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
MigrationClient.AddMigration(Up_20240320145650, Down_20240320145650)
|
||||
}
|
||||
|
||||
func Up_20240320145650(tx *sql.Tx) error {
|
||||
// This migration is to re-generate and re-register with Apple the DEP
|
||||
// enrollment profile(s) so that await_device_configured is set to true.
|
||||
// We do this by doing the equivalent of:
|
||||
//
|
||||
// worker.QueueMacosSetupAssistantJob(ctx, ds, logger,
|
||||
// worker.MacosSetupAssistantUpdateAllProfiles, nil)
|
||||
//
|
||||
// but without calling that function, in case the code changes in the future,
|
||||
// breaking this migration. Instead we insert directly the job in the
|
||||
// database, and the worker will process it shortly after Fleet restarts.
|
||||
|
||||
const (
|
||||
jobName = "macos_setup_assistant"
|
||||
taskName = "update_all_profiles"
|
||||
jobStateQueued = "queued"
|
||||
)
|
||||
|
||||
type macosSetupAssistantArgs struct {
|
||||
Task string `json:"task"`
|
||||
TeamID *uint `json:"team_id,omitempty"`
|
||||
HostSerialNumbers []string `json:"host_serial_numbers,omitempty"`
|
||||
}
|
||||
argsJSON, err := json.Marshal(macosSetupAssistantArgs{Task: taskName})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to JSON marshal the job arguments: %w", err)
|
||||
}
|
||||
|
||||
// hard-coded timestamps are used so that schema.sql is stable
|
||||
const query = `
|
||||
INSERT INTO jobs (
|
||||
name,
|
||||
args,
|
||||
state,
|
||||
error,
|
||||
not_before,
|
||||
created_at,
|
||||
updated_at
|
||||
)
|
||||
VALUES (?, ?, ?, '', ?, ?, ?)
|
||||
`
|
||||
ts := time.Date(2024, 3, 20, 0, 0, 0, 0, time.UTC)
|
||||
if _, err := tx.Exec(query, jobName, argsJSON, jobStateQueued, ts, ts, ts); err != nil {
|
||||
return fmt.Errorf("failed to insert worker job: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Down_20240320145650(tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
package tables
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUp_20240320145650(t *testing.T) {
|
||||
db := applyUpToPrev(t)
|
||||
|
||||
type macosSetupAssistantArgs struct {
|
||||
Task string `json:"task"`
|
||||
TeamID *uint `json:"team_id,omitempty"`
|
||||
HostSerialNumbers []string `json:"host_serial_numbers,omitempty"`
|
||||
}
|
||||
|
||||
type job struct {
|
||||
ID uint `json:"id" db:"id"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
Name string `json:"name" db:"name"`
|
||||
Args *json.RawMessage `json:"args" db:"args"`
|
||||
State string `json:"state" db:"state"`
|
||||
Retries int `json:"retries" db:"retries"`
|
||||
Error string `json:"error" db:"error"`
|
||||
NotBefore time.Time `json:"not_before" db:"not_before"`
|
||||
}
|
||||
|
||||
var jobs []*job
|
||||
err := db.Select(&jobs, `SELECT id, name, args, state, retries, error, not_before FROM jobs`)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
applyNext(t, db)
|
||||
|
||||
err = db.Select(&jobs, `SELECT id, name, args, state, retries, error, not_before FROM jobs`)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
|
||||
require.Equal(t, "macos_setup_assistant", jobs[0].Name)
|
||||
require.Equal(t, 0, jobs[0].Retries)
|
||||
require.LessOrEqual(t, jobs[0].NotBefore, time.Now().UTC())
|
||||
require.NotNil(t, jobs[0].Args)
|
||||
|
||||
var args macosSetupAssistantArgs
|
||||
err = json.Unmarshal(*jobs[0].Args, &args)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "update_all_profiles", args.Task)
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -844,8 +844,9 @@ type Datastore interface {
|
||||
// NewJob inserts a new job into the jobs table (queue).
|
||||
NewJob(ctx context.Context, job *Job) (*Job, error)
|
||||
|
||||
// GetQueuedJobs gets queued jobs from the jobs table (queue).
|
||||
GetQueuedJobs(ctx context.Context, maxNumJobs int) ([]*Job, error)
|
||||
// GetQueuedJobs gets queued jobs from the jobs table (queue) ready to be
|
||||
// processed. If now is the zero time, the current time will be used.
|
||||
GetQueuedJobs(ctx context.Context, maxNumJobs int, now time.Time) ([]*Job, error)
|
||||
|
||||
// UpdateJobs updates an existing job. Call this after processing a job.
|
||||
UpdateJob(ctx context.Context, id uint, job *Job) (*Job, error)
|
||||
|
@ -91,17 +91,16 @@ type DEPService struct {
|
||||
// getDefaultProfile returns a godep.Profile with default values set.
|
||||
func (d *DEPService) getDefaultProfile() *godep.Profile {
|
||||
return &godep.Profile{
|
||||
ProfileName: "FleetDM default enrollment profile",
|
||||
AllowPairing: true,
|
||||
AutoAdvanceSetup: false,
|
||||
AwaitDeviceConfigured: false,
|
||||
IsSupervised: false,
|
||||
IsMultiUser: false,
|
||||
IsMandatory: false,
|
||||
IsMDMRemovable: true,
|
||||
Language: "en",
|
||||
OrgMagic: "1",
|
||||
Region: "US",
|
||||
ProfileName: "FleetDM default enrollment profile",
|
||||
AllowPairing: true,
|
||||
AutoAdvanceSetup: false,
|
||||
IsSupervised: false,
|
||||
IsMultiUser: false,
|
||||
IsMandatory: false,
|
||||
IsMDMRemovable: true,
|
||||
Language: "en",
|
||||
OrgMagic: "1",
|
||||
Region: "US",
|
||||
SkipSetupItems: []string{
|
||||
"Accessibility",
|
||||
"Appearance",
|
||||
@ -207,6 +206,10 @@ func (d *DEPService) RegisterProfileWithAppleDEPServer(ctx context.Context, team
|
||||
// ensure `url` is the same as `configuration_web_url`, to not leak the URL
|
||||
// to get a token without SSO enabled
|
||||
jsonProf.URL = jsonProf.ConfigurationWebURL
|
||||
// always set await_device_configured to true - it will be released either
|
||||
// automatically by Fleet or manually by the user if
|
||||
// enable_release_device_manually is true.
|
||||
jsonProf.AwaitDeviceConfigured = true
|
||||
|
||||
depClient := NewDEPClient(d.depStorage, d.ds, d.logger)
|
||||
res, err := depClient.DefineProfile(ctx, DEPName, &jsonProf)
|
||||
|
@ -45,6 +45,7 @@ func TestDEPService(t *testing.T) {
|
||||
require.Contains(t, got.ConfigurationWebURL, serverURL+"api/mdm/apple/enroll?token=")
|
||||
got.URL = ""
|
||||
got.ConfigurationWebURL = ""
|
||||
defaultProfile.AwaitDeviceConfigured = true // this is now always set to true
|
||||
require.Equal(t, defaultProfile, &got)
|
||||
default:
|
||||
require.Fail(t, "unexpected path: %s", r.URL.Path)
|
||||
|
@ -226,6 +226,24 @@ func (svc *MDMAppleCommander) AccountConfiguration(ctx context.Context, hostUUID
|
||||
return svc.EnqueueCommand(ctx, hostUUIDs, raw)
|
||||
}
|
||||
|
||||
func (svc *MDMAppleCommander) DeviceConfigured(ctx context.Context, hostUUID, cmdUUID string) error {
|
||||
raw := fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Command</key>
|
||||
<dict>
|
||||
<key>RequestType</key>
|
||||
<string>DeviceConfigured</string>
|
||||
</dict>
|
||||
<key>CommandUUID</key>
|
||||
<string>%s</string>
|
||||
</dict>
|
||||
</plist>`, cmdUUID)
|
||||
|
||||
return svc.EnqueueCommand(ctx, []string{hostUUID}, raw)
|
||||
}
|
||||
|
||||
// EnqueueCommand takes care of enqueuing the commands and sending push
|
||||
// notifications to the devices.
|
||||
//
|
||||
|
@ -8,12 +8,14 @@ import (
|
||||
// Profile corresponds to the Apple DEP API "Profile" structure.
|
||||
// See https://developer.apple.com/documentation/devicemanagement/profile
|
||||
type Profile struct {
|
||||
ProfileName string `json:"profile_name"`
|
||||
URL string `json:"url"`
|
||||
AllowPairing bool `json:"allow_pairing,omitempty"`
|
||||
IsSupervised bool `json:"is_supervised,omitempty"`
|
||||
IsMultiUser bool `json:"is_multi_user,omitempty"`
|
||||
IsMandatory bool `json:"is_mandatory,omitempty"`
|
||||
ProfileName string `json:"profile_name"`
|
||||
URL string `json:"url"`
|
||||
AllowPairing bool `json:"allow_pairing,omitempty"`
|
||||
IsSupervised bool `json:"is_supervised,omitempty"`
|
||||
IsMultiUser bool `json:"is_multi_user,omitempty"`
|
||||
IsMandatory bool `json:"is_mandatory,omitempty"`
|
||||
// AwaitDeviceConfigured should never be set in the profiles we store in the
|
||||
// database - it is now always forced to true when registering with Apple.
|
||||
AwaitDeviceConfigured bool `json:"await_device_configured,omitempty"`
|
||||
IsMDMRemovable bool `json:"is_mdm_removable"` // default true
|
||||
SupportPhoneNumber string `json:"support_phone_number,omitempty"`
|
||||
|
@ -580,7 +580,7 @@ type SerialUpdateHostFunc func(ctx context.Context, host *fleet.Host) error
|
||||
|
||||
type NewJobFunc func(ctx context.Context, job *fleet.Job) (*fleet.Job, error)
|
||||
|
||||
type GetQueuedJobsFunc func(ctx context.Context, maxNumJobs int) ([]*fleet.Job, error)
|
||||
type GetQueuedJobsFunc func(ctx context.Context, maxNumJobs int, now time.Time) ([]*fleet.Job, error)
|
||||
|
||||
type UpdateJobFunc func(ctx context.Context, id uint, job *fleet.Job) (*fleet.Job, error)
|
||||
|
||||
@ -4089,11 +4089,11 @@ func (s *DataStore) NewJob(ctx context.Context, job *fleet.Job) (*fleet.Job, err
|
||||
return s.NewJobFunc(ctx, job)
|
||||
}
|
||||
|
||||
func (s *DataStore) GetQueuedJobs(ctx context.Context, maxNumJobs int) ([]*fleet.Job, error) {
|
||||
func (s *DataStore) GetQueuedJobs(ctx context.Context, maxNumJobs int, now time.Time) ([]*fleet.Job, error) {
|
||||
s.mu.Lock()
|
||||
s.GetQueuedJobsFuncInvoked = true
|
||||
s.mu.Unlock()
|
||||
return s.GetQueuedJobsFunc(ctx, maxNumJobs)
|
||||
return s.GetQueuedJobsFunc(ctx, maxNumJobs, now)
|
||||
}
|
||||
|
||||
func (s *DataStore) UpdateJob(ctx context.Context, id uint, job *fleet.Job) (*fleet.Job, error) {
|
||||
|
@ -630,7 +630,9 @@ func (svc *Service) ModifyAppConfig(ctx context.Context, p []byte, applyOpts fle
|
||||
mdmSSOSettingsChanged := oldAppConfig.MDM.EndUserAuthentication.SSOProviderSettings !=
|
||||
appConfig.MDM.EndUserAuthentication.SSOProviderSettings
|
||||
serverURLChanged := oldAppConfig.ServerSettings.ServerURL != appConfig.ServerSettings.ServerURL
|
||||
if (mdmEnableEndUserAuthChanged || mdmSSOSettingsChanged || serverURLChanged) && license.IsPremium() {
|
||||
mdmEnableReleaseDeviceChanged := oldAppConfig.MDM.MacOSSetup.EnableReleaseDeviceManually.Value !=
|
||||
appConfig.MDM.MacOSSetup.EnableReleaseDeviceManually.Value
|
||||
if (mdmEnableEndUserAuthChanged || mdmEnableReleaseDeviceChanged || mdmSSOSettingsChanged || serverURLChanged) && license.IsPremium() {
|
||||
if err := svc.EnterpriseOverrides.MDMAppleSyncDEPProfiles(ctx); err != nil {
|
||||
return nil, ctxerr.Wrap(ctx, err, "sync DEP profiles")
|
||||
}
|
||||
|
1083
server/service/integration_mdm_dep_test.go
Normal file
1083
server/service/integration_mdm_dep_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@ -116,29 +116,37 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
||||
scepStorage, err := s.ds.NewSCEPDepot(testCertPEM, testKeyPEM)
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
pushLog := kitlog.NewJSONLogger(os.Stdout)
|
||||
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
|
||||
pushLog = kitlog.NewNopLogger()
|
||||
}
|
||||
pushFactory, pushProvider := newMockAPNSPushProviderFactory()
|
||||
mdmPushService := nanomdm_pushsvc.New(
|
||||
mdmStorage,
|
||||
mdmStorage,
|
||||
pushFactory,
|
||||
NewNanoMDMLogger(kitlog.NewJSONLogger(os.Stdout)),
|
||||
NewNanoMDMLogger(pushLog),
|
||||
)
|
||||
mdmCommander := apple_mdm.NewMDMAppleCommander(mdmStorage, mdmPushService)
|
||||
redisPool := redistest.SetupRedis(s.T(), "zz", false, false, false)
|
||||
s.withServer.lq = live_query_mock.New(s.T())
|
||||
|
||||
wlog := kitlog.NewJSONLogger(os.Stdout)
|
||||
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
|
||||
wlog = kitlog.NewNopLogger()
|
||||
}
|
||||
macosJob := &worker.MacosSetupAssistant{
|
||||
Datastore: s.ds,
|
||||
Log: kitlog.NewJSONLogger(os.Stdout),
|
||||
DEPService: apple_mdm.NewDEPService(s.ds, depStorage, kitlog.NewJSONLogger(os.Stdout)),
|
||||
DEPClient: apple_mdm.NewDEPClient(depStorage, s.ds, kitlog.NewJSONLogger(os.Stdout)),
|
||||
Log: wlog,
|
||||
DEPService: apple_mdm.NewDEPService(s.ds, depStorage, wlog),
|
||||
DEPClient: apple_mdm.NewDEPClient(depStorage, s.ds, wlog),
|
||||
}
|
||||
appleMDMJob := &worker.AppleMDM{
|
||||
Datastore: s.ds,
|
||||
Log: kitlog.NewJSONLogger(os.Stdout),
|
||||
Log: wlog,
|
||||
Commander: mdmCommander,
|
||||
}
|
||||
workr := worker.NewWorker(s.ds, kitlog.NewJSONLogger(os.Stdout))
|
||||
workr := worker.NewWorker(s.ds, wlog)
|
||||
workr.TestIgnoreUnknownJobs = true
|
||||
workr.Register(macosJob, appleMDMJob)
|
||||
s.worker = workr
|
||||
@ -146,6 +154,10 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
||||
var depSchedule *schedule.Schedule
|
||||
var integrationsSchedule *schedule.Schedule
|
||||
var profileSchedule *schedule.Schedule
|
||||
cronLog := kitlog.NewJSONLogger(os.Stdout)
|
||||
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
|
||||
cronLog = kitlog.NewNopLogger()
|
||||
}
|
||||
config := TestServerOpts{
|
||||
License: &fleet.LicenseInfo{
|
||||
Tier: fleet.TierPremium,
|
||||
@ -161,7 +173,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
||||
func(ctx context.Context, ds fleet.Datastore) fleet.NewCronScheduleFunc {
|
||||
return func() (fleet.CronSchedule, error) {
|
||||
const name = string(fleet.CronAppleMDMDEPProfileAssigner)
|
||||
logger := kitlog.NewJSONLogger(os.Stdout)
|
||||
logger := cronLog
|
||||
fleetSyncer := apple_mdm.NewDEPService(ds, depStorage, logger)
|
||||
depSchedule = schedule.New(
|
||||
ctx, name, s.T().Name(), 1*time.Hour, ds, ds,
|
||||
@ -181,7 +193,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
||||
func(ctx context.Context, ds fleet.Datastore) fleet.NewCronScheduleFunc {
|
||||
return func() (fleet.CronSchedule, error) {
|
||||
const name = string(fleet.CronMDMAppleProfileManager)
|
||||
logger := kitlog.NewJSONLogger(os.Stdout)
|
||||
logger := cronLog
|
||||
profileSchedule = schedule.New(
|
||||
ctx, name, s.T().Name(), 1*time.Hour, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
@ -208,7 +220,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
||||
func(ctx context.Context, ds fleet.Datastore) fleet.NewCronScheduleFunc {
|
||||
return func() (fleet.CronSchedule, error) {
|
||||
const name = string(fleet.CronWorkerIntegrations)
|
||||
logger := kitlog.NewJSONLogger(os.Stdout)
|
||||
logger := cronLog
|
||||
integrationsSchedule = schedule.New(
|
||||
ctx, name, s.T().Name(), 1*time.Minute, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
@ -288,6 +300,8 @@ func (s *integrationMDMTestSuite) TearDownTest() {
|
||||
appCfg.MDM.WindowsEnabledAndConfigured = true
|
||||
// ensure global disk encryption is disabled on exit
|
||||
appCfg.MDM.EnableDiskEncryption = optjson.SetBool(false)
|
||||
// ensure enable release manually is false
|
||||
appCfg.MDM.MacOSSetup.EnableReleaseDeviceManually = optjson.SetBool(false)
|
||||
// ensure global Windows OS updates are always disabled for the next test
|
||||
appCfg.MDM.WindowsUpdates = fleet.WindowsUpdates{}
|
||||
err := s.ds.SaveAppConfig(ctx, &appCfg.AppConfig)
|
||||
@ -305,6 +319,10 @@ func (s *integrationMDMTestSuite) TearDownTest() {
|
||||
_, err := q.ExecContext(ctx, "DELETE FROM mdm_windows_configuration_profiles")
|
||||
return err
|
||||
})
|
||||
mysql.ExecAdhocSQL(t, s.ds, func(q sqlx.ExtContext) error {
|
||||
_, err := q.ExecContext(ctx, "DELETE FROM mdm_apple_bootstrap_packages")
|
||||
return err
|
||||
})
|
||||
|
||||
// clear any pending worker job
|
||||
mysql.ExecAdhocSQL(t, s.ds, func(q sqlx.ExtContext) error {
|
||||
@ -1981,745 +1999,6 @@ func createWindowsHostThenEnrollMDM(ds fleet.Datastore, fleetServerURL string, t
|
||||
return host, mdmDevice
|
||||
}
|
||||
|
||||
func (s *integrationMDMTestSuite) TestDEPProfileAssignment() {
|
||||
t := s.T()
|
||||
|
||||
ctx := context.Background()
|
||||
devices := []godep.Device{
|
||||
{SerialNumber: uuid.New().String(), Model: "MacBook Pro", OS: "osx", OpType: "added"},
|
||||
{SerialNumber: uuid.New().String(), Model: "MacBook Mini", OS: "osx", OpType: "added"},
|
||||
{SerialNumber: uuid.New().String(), Model: "MacBook Mini", OS: "osx", OpType: ""},
|
||||
{SerialNumber: uuid.New().String(), Model: "MacBook Mini", OS: "osx", OpType: "modified"},
|
||||
}
|
||||
|
||||
type profileAssignmentReq struct {
|
||||
ProfileUUID string `json:"profile_uuid"`
|
||||
Devices []string `json:"devices"`
|
||||
}
|
||||
profileAssignmentReqs := []profileAssignmentReq{}
|
||||
|
||||
// add global profiles
|
||||
globalProfile := mobileconfigForTest("N1", "I1")
|
||||
s.Do("POST", "/api/v1/fleet/mdm/apple/profiles/batch", batchSetMDMAppleProfilesRequest{Profiles: [][]byte{globalProfile}}, http.StatusNoContent)
|
||||
|
||||
checkPostEnrollmentCommands := func(mdmDevice *mdmtest.TestAppleMDMClient, shouldReceive bool) {
|
||||
// run the worker to process the DEP enroll request
|
||||
s.runWorker()
|
||||
// run the worker to assign configuration profiles
|
||||
s.awaitTriggerProfileSchedule(t)
|
||||
|
||||
var fleetdCmd, installProfileCmd *micromdm.CommandPayload
|
||||
cmd, err := mdmDevice.Idle()
|
||||
require.NoError(t, err)
|
||||
for cmd != nil {
|
||||
if cmd.Command.RequestType == "InstallEnterpriseApplication" &&
|
||||
cmd.Command.InstallEnterpriseApplication.ManifestURL != nil &&
|
||||
strings.Contains(*cmd.Command.InstallEnterpriseApplication.ManifestURL, apple_mdm.FleetdPublicManifestURL) {
|
||||
fleetdCmd = cmd
|
||||
} else if cmd.Command.RequestType == "InstallProfile" {
|
||||
installProfileCmd = cmd
|
||||
}
|
||||
cmd, err = mdmDevice.Acknowledge(cmd.CommandUUID)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if shouldReceive {
|
||||
// received request to install fleetd
|
||||
require.NotNil(t, fleetdCmd, "host didn't get a command to install fleetd")
|
||||
require.NotNil(t, fleetdCmd.Command, "host didn't get a command to install fleetd")
|
||||
|
||||
// received request to install the global configuration profile
|
||||
require.NotNil(t, installProfileCmd, "host didn't get a command to install profiles")
|
||||
require.NotNil(t, installProfileCmd.Command, "host didn't get a command to install profiles")
|
||||
} else {
|
||||
require.Nil(t, fleetdCmd, "host got a command to install fleetd")
|
||||
require.Nil(t, installProfileCmd, "host got a command to install profiles")
|
||||
}
|
||||
}
|
||||
|
||||
checkAssignProfileRequests := func(serial string, profUUID *string) {
|
||||
require.NotEmpty(t, profileAssignmentReqs)
|
||||
require.Len(t, profileAssignmentReqs, 1)
|
||||
require.Len(t, profileAssignmentReqs[0].Devices, 1)
|
||||
require.Equal(t, serial, profileAssignmentReqs[0].Devices[0])
|
||||
if profUUID != nil {
|
||||
require.Equal(t, *profUUID, profileAssignmentReqs[0].ProfileUUID)
|
||||
}
|
||||
}
|
||||
|
||||
type hostDEPRow struct {
|
||||
HostID uint `db:"host_id"`
|
||||
ProfileUUID string `db:"profile_uuid"`
|
||||
AssignProfileResponse string `db:"assign_profile_response"`
|
||||
ResponseUpdatedAt time.Time `db:"response_updated_at"`
|
||||
RetryJobID uint `db:"retry_job_id"`
|
||||
}
|
||||
checkHostDEPAssignProfileResponses := func(deviceSerials []string, expectedProfileUUID string, expectedStatus fleet.DEPAssignProfileResponseStatus) map[string]hostDEPRow {
|
||||
bySerial := make(map[string]hostDEPRow, len(deviceSerials))
|
||||
for _, deviceSerial := range deviceSerials {
|
||||
mysql.ExecAdhocSQL(t, s.ds, func(q sqlx.ExtContext) error {
|
||||
var dest hostDEPRow
|
||||
err := sqlx.GetContext(ctx, q, &dest, "SELECT host_id, assign_profile_response, profile_uuid, response_updated_at, retry_job_id FROM host_dep_assignments WHERE profile_uuid = ? AND host_id = (SELECT id FROM hosts WHERE hardware_serial = ?)", expectedProfileUUID, deviceSerial)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(expectedStatus), dest.AssignProfileResponse)
|
||||
bySerial[deviceSerial] = dest
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return bySerial
|
||||
}
|
||||
|
||||
checkPendingMacOSSetupAssistantJob := func(expectedTask string, expectedTeamID *uint, expectedSerials []string, expectedJobID uint) {
|
||||
pending, err := s.ds.GetQueuedJobs(context.Background(), 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pending, 1)
|
||||
require.Equal(t, "macos_setup_assistant", pending[0].Name)
|
||||
require.NotNil(t, pending[0].Args)
|
||||
var gotArgs struct {
|
||||
Task string `json:"task"`
|
||||
TeamID *uint `json:"team_id,omitempty"`
|
||||
HostSerialNumbers []string `json:"host_serial_numbers,omitempty"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(*pending[0].Args, &gotArgs))
|
||||
require.Equal(t, expectedTask, gotArgs.Task)
|
||||
if expectedTeamID != nil {
|
||||
require.NotNil(t, gotArgs.TeamID)
|
||||
require.Equal(t, *expectedTeamID, *gotArgs.TeamID)
|
||||
} else {
|
||||
require.Nil(t, gotArgs.TeamID)
|
||||
}
|
||||
require.Equal(t, expectedSerials, gotArgs.HostSerialNumbers)
|
||||
|
||||
if expectedJobID != 0 {
|
||||
require.Equal(t, expectedJobID, pending[0].ID)
|
||||
}
|
||||
}
|
||||
|
||||
checkNoJobsPending := func() {
|
||||
pending, err := s.ds.GetQueuedJobs(context.Background(), 1)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, pending)
|
||||
}
|
||||
|
||||
expectNoJobID := ptr.Uint(0) // used when expect no retry job
|
||||
checkHostCooldown := func(serial, profUUID string, status fleet.DEPAssignProfileResponseStatus, expectUpdatedAt *time.Time, expectRetryJobID *uint) hostDEPRow {
|
||||
bySerial := checkHostDEPAssignProfileResponses([]string{serial}, profUUID, status)
|
||||
d, ok := bySerial[serial]
|
||||
require.True(t, ok)
|
||||
if expectUpdatedAt != nil {
|
||||
require.Equal(t, *expectUpdatedAt, d.ResponseUpdatedAt)
|
||||
}
|
||||
if expectRetryJobID != nil {
|
||||
require.Equal(t, *expectRetryJobID, d.RetryJobID)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
checkListHostDEPError := func(serial string, expectStatus string, expectError bool) *fleet.HostResponse {
|
||||
listHostsRes := listHostsResponse{}
|
||||
s.DoJSON("GET", fmt.Sprintf("/api/latest/fleet/hosts?query=%s", serial), nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, 1)
|
||||
require.Equal(t, serial, listHostsRes.Hosts[0].HardwareSerial)
|
||||
require.Equal(t, expectStatus, *listHostsRes.Hosts[0].MDM.EnrollmentStatus)
|
||||
require.Equal(t, expectError, listHostsRes.Hosts[0].MDM.DEPProfileError)
|
||||
|
||||
return &listHostsRes.Hosts[0]
|
||||
}
|
||||
|
||||
setAssignProfileResponseUpdatedAt := func(serial string, updatedAt time.Time) {
|
||||
mysql.ExecAdhocSQL(t, s.ds, func(q sqlx.ExtContext) error {
|
||||
_, err := q.ExecContext(ctx, `UPDATE host_dep_assignments SET response_updated_at = ? WHERE host_id = (SELECT id FROM hosts WHERE hardware_serial = ?)`, updatedAt, serial)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
expectAssignProfileResponseFailed := "" // set to device serial when testing the failed profile assignment flow
|
||||
expectAssignProfileResponseNotAccessible := "" // set to device serial when testing the not accessible profile assignment flow
|
||||
s.mockDEPResponse(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
encoder := json.NewEncoder(w)
|
||||
switch r.URL.Path {
|
||||
case "/session":
|
||||
err := encoder.Encode(map[string]string{"auth_session_token": "xyz"})
|
||||
require.NoError(t, err)
|
||||
case "/profile":
|
||||
err := encoder.Encode(godep.ProfileResponse{ProfileUUID: uuid.New().String()})
|
||||
require.NoError(t, err)
|
||||
case "/server/devices":
|
||||
// This endpoint is used to get an initial list of
|
||||
// devices, return a single device
|
||||
err := encoder.Encode(godep.DeviceResponse{Devices: devices[:1]})
|
||||
require.NoError(t, err)
|
||||
case "/devices/sync":
|
||||
// This endpoint is polled over time to sync devices from
|
||||
// ABM, send a repeated serial and a new one
|
||||
err := encoder.Encode(godep.DeviceResponse{Devices: devices, Cursor: "foo"})
|
||||
require.NoError(t, err)
|
||||
case "/profile/devices":
|
||||
b, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
var prof profileAssignmentReq
|
||||
require.NoError(t, json.Unmarshal(b, &prof))
|
||||
profileAssignmentReqs = append(profileAssignmentReqs, prof)
|
||||
var resp godep.ProfileResponse
|
||||
resp.ProfileUUID = prof.ProfileUUID
|
||||
resp.Devices = make(map[string]string, len(prof.Devices))
|
||||
for _, device := range prof.Devices {
|
||||
switch device {
|
||||
case expectAssignProfileResponseNotAccessible:
|
||||
resp.Devices[device] = string(fleet.DEPAssignProfileResponseNotAccessible)
|
||||
case expectAssignProfileResponseFailed:
|
||||
resp.Devices[device] = string(fleet.DEPAssignProfileResponseFailed)
|
||||
default:
|
||||
resp.Devices[device] = string(fleet.DEPAssignProfileResponseSuccess)
|
||||
}
|
||||
}
|
||||
err = encoder.Encode(resp)
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
}
|
||||
}))
|
||||
|
||||
// query all hosts
|
||||
listHostsRes := listHostsResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/hosts", nil, http.StatusOK, &listHostsRes)
|
||||
require.Empty(t, listHostsRes.Hosts)
|
||||
|
||||
// trigger a profile sync
|
||||
s.runDEPSchedule()
|
||||
|
||||
// all hosts should be returned from the hosts endpoint
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/hosts", nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, len(devices))
|
||||
var wantSerials []string
|
||||
var gotSerials []string
|
||||
for i, device := range devices {
|
||||
wantSerials = append(wantSerials, device.SerialNumber)
|
||||
gotSerials = append(gotSerials, listHostsRes.Hosts[i].HardwareSerial)
|
||||
// entries for all hosts should be created in the host_dep_assignments table
|
||||
_, err := s.ds.GetHostDEPAssignment(ctx, listHostsRes.Hosts[i].ID)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.ElementsMatch(t, wantSerials, gotSerials)
|
||||
// called two times:
|
||||
// - one when we get the initial list of devices (/server/devices)
|
||||
// - one when we do the device sync (/device/sync)
|
||||
require.Len(t, profileAssignmentReqs, 2)
|
||||
require.Len(t, profileAssignmentReqs[0].Devices, 1)
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
require.Len(t, profileAssignmentReqs[1].Devices, len(devices))
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[1].Devices, profileAssignmentReqs[1].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
// record the default profile to be used in other tests
|
||||
defaultProfileUUID := profileAssignmentReqs[1].ProfileUUID
|
||||
|
||||
// create a new host
|
||||
nonDEPHost := createHostAndDeviceToken(t, s.ds, "not-dep")
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/hosts", nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, len(devices)+1)
|
||||
|
||||
// filtering by MDM status works
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/hosts?mdm_enrollment_status=pending", nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, len(devices))
|
||||
|
||||
// searching by display name works
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", fmt.Sprintf("/api/latest/fleet/hosts?query=%s", url.QueryEscape("MacBook Mini")), nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, 3)
|
||||
for _, host := range listHostsRes.Hosts {
|
||||
require.Equal(t, "MacBook Mini", host.HardwareModel)
|
||||
require.Equal(t, host.DisplayName, fmt.Sprintf("MacBook Mini (%s)", host.HardwareSerial))
|
||||
}
|
||||
|
||||
s.pushProvider.PushFunc = func(pushes []*mdm.Push) (map[string]*push.Response, error) {
|
||||
return map[string]*push.Response{}, nil
|
||||
}
|
||||
|
||||
// Enroll one of the hosts
|
||||
depURLToken := loadEnrollmentProfileDEPToken(t, s.ds)
|
||||
mdmDevice := mdmtest.NewTestMDMClientAppleDEP(s.server.URL, depURLToken)
|
||||
mdmDevice.SerialNumber = devices[0].SerialNumber
|
||||
err := mdmDevice.Enroll()
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the host gets post enrollment requests
|
||||
checkPostEnrollmentCommands(mdmDevice, true)
|
||||
|
||||
// only one shows up as pending
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/hosts?mdm_enrollment_status=pending", nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, len(devices)-1)
|
||||
|
||||
activities := listActivitiesResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/activities", nil, http.StatusOK, &activities, "order_key", "created_at")
|
||||
found := false
|
||||
for _, activity := range activities.Activities {
|
||||
if activity.Type == "mdm_enrolled" &&
|
||||
strings.Contains(string(*activity.Details), devices[0].SerialNumber) {
|
||||
found = true
|
||||
require.Nil(t, activity.ActorID)
|
||||
require.Nil(t, activity.ActorFullName)
|
||||
require.JSONEq(
|
||||
t,
|
||||
fmt.Sprintf(
|
||||
`{"host_serial": "%s", "host_display_name": "%s (%s)", "installed_from_dep": true, "mdm_platform": "apple"}`,
|
||||
devices[0].SerialNumber, devices[0].Model, devices[0].SerialNumber,
|
||||
),
|
||||
string(*activity.Details),
|
||||
)
|
||||
}
|
||||
}
|
||||
require.True(t, found)
|
||||
|
||||
// add devices[1].SerialNumber to a team
|
||||
teamName := t.Name() + "team1"
|
||||
team := &fleet.Team{
|
||||
Name: teamName,
|
||||
Description: "desc team1",
|
||||
}
|
||||
var createTeamResp teamResponse
|
||||
s.DoJSON("POST", "/api/latest/fleet/teams", team, http.StatusOK, &createTeamResp)
|
||||
require.NotZero(t, createTeamResp.Team.ID)
|
||||
team = createTeamResp.Team
|
||||
for _, h := range listHostsRes.Hosts {
|
||||
if h.HardwareSerial == devices[1].SerialNumber {
|
||||
err = s.ds.AddHostsToTeam(ctx, &team.ID, []uint{h.ID})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// modify the response and trigger another sync to include:
|
||||
//
|
||||
// 1. A repeated device with "added"
|
||||
// 2. A repeated device with "modified"
|
||||
// 3. A device with "deleted"
|
||||
// 4. A new device
|
||||
deletedSerial := devices[2].SerialNumber
|
||||
addedSerial := uuid.New().String()
|
||||
devices = []godep.Device{
|
||||
{SerialNumber: devices[0].SerialNumber, Model: "MacBook Pro", OS: "osx", OpType: "added"},
|
||||
{SerialNumber: devices[1].SerialNumber, Model: "MacBook Mini", OS: "osx", OpType: "modified"},
|
||||
{SerialNumber: deletedSerial, Model: "MacBook Mini", OS: "osx", OpType: "deleted"},
|
||||
{SerialNumber: addedSerial, Model: "MacBook Mini", OS: "osx", OpType: "added"},
|
||||
}
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
|
||||
// all hosts should be returned from the hosts endpoint
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", "/api/latest/fleet/hosts", nil, http.StatusOK, &listHostsRes)
|
||||
// all previous devices + the manually added host + the new `addedSerial`
|
||||
wantSerials = append(wantSerials, devices[3].SerialNumber, nonDEPHost.HardwareSerial)
|
||||
require.Len(t, listHostsRes.Hosts, len(wantSerials))
|
||||
gotSerials = []string{}
|
||||
var deletedHostID uint
|
||||
var addedHostID uint
|
||||
var mdmDeviceID uint
|
||||
for _, device := range listHostsRes.Hosts {
|
||||
gotSerials = append(gotSerials, device.HardwareSerial)
|
||||
switch device.HardwareSerial {
|
||||
case deletedSerial:
|
||||
deletedHostID = device.ID
|
||||
case addedSerial:
|
||||
addedHostID = device.ID
|
||||
case mdmDevice.SerialNumber:
|
||||
mdmDeviceID = device.ID
|
||||
}
|
||||
}
|
||||
require.ElementsMatch(t, wantSerials, gotSerials)
|
||||
require.Len(t, profileAssignmentReqs, 3)
|
||||
|
||||
// first request to get a list of profiles
|
||||
// TODO: seems like we're doing this request on each loop?
|
||||
require.Len(t, profileAssignmentReqs[0].Devices, 1)
|
||||
require.Equal(t, devices[0].SerialNumber, profileAssignmentReqs[0].Devices[0])
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
|
||||
// profileAssignmentReqs[1] and [2] can be in any order
|
||||
ix2Devices, ix1Device := 1, 2
|
||||
if len(profileAssignmentReqs[1].Devices) == 1 {
|
||||
ix2Devices, ix1Device = ix1Device, ix2Devices
|
||||
}
|
||||
|
||||
// - existing device with "added"
|
||||
// - new device with "added"
|
||||
require.Len(t, profileAssignmentReqs[ix2Devices].Devices, 2, "%#+v", profileAssignmentReqs)
|
||||
require.ElementsMatch(t, []string{devices[0].SerialNumber, addedSerial}, profileAssignmentReqs[ix2Devices].Devices)
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[ix2Devices].Devices, profileAssignmentReqs[ix2Devices].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
|
||||
// - existing device with "modified" and a different team (thus different profile request)
|
||||
require.Len(t, profileAssignmentReqs[ix1Device].Devices, 1)
|
||||
require.Equal(t, devices[1].SerialNumber, profileAssignmentReqs[ix1Device].Devices[0])
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[ix1Device].Devices, profileAssignmentReqs[ix1Device].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
|
||||
// entries for all hosts except for the one with OpType = "deleted"
|
||||
assignment, err := s.ds.GetHostDEPAssignment(ctx, deletedHostID)
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, assignment.DeletedAt)
|
||||
|
||||
_, err = s.ds.GetHostDEPAssignment(ctx, addedHostID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// send a TokenUpdate command, it shouldn't re-send the post-enrollment commands
|
||||
err = mdmDevice.TokenUpdate()
|
||||
require.NoError(t, err)
|
||||
checkPostEnrollmentCommands(mdmDevice, false)
|
||||
|
||||
// enroll the device again, it should get the post-enrollment commands
|
||||
err = mdmDevice.Enroll()
|
||||
require.NoError(t, err)
|
||||
checkPostEnrollmentCommands(mdmDevice, true)
|
||||
|
||||
// delete the device from Fleet
|
||||
var delResp deleteHostResponse
|
||||
s.DoJSON("DELETE", fmt.Sprintf("/api/latest/fleet/hosts/%d", mdmDeviceID), nil, http.StatusOK, &delResp)
|
||||
|
||||
// the device comes back as pending
|
||||
listHostsRes = listHostsResponse{}
|
||||
s.DoJSON("GET", fmt.Sprintf("/api/latest/fleet/hosts?query=%s", mdmDevice.UUID), nil, http.StatusOK, &listHostsRes)
|
||||
require.Len(t, listHostsRes.Hosts, 1)
|
||||
require.Equal(t, mdmDevice.SerialNumber, listHostsRes.Hosts[0].HardwareSerial)
|
||||
|
||||
// we assign a DEP profile to the device
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runWorker()
|
||||
require.Equal(t, mdmDevice.SerialNumber, profileAssignmentReqs[0].Devices[0])
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
|
||||
// it should get the post-enrollment commands
|
||||
require.NoError(t, mdmDevice.Enroll())
|
||||
checkPostEnrollmentCommands(mdmDevice, true)
|
||||
|
||||
// delete all MDM info
|
||||
mysql.ExecAdhocSQL(t, s.ds, func(q sqlx.ExtContext) error {
|
||||
_, err := q.ExecContext(ctx, `DELETE FROM host_mdm WHERE host_id = ?`, listHostsRes.Hosts[0].ID)
|
||||
return err
|
||||
})
|
||||
|
||||
// it should still get the post-enrollment commands
|
||||
require.NoError(t, mdmDevice.Enroll())
|
||||
checkPostEnrollmentCommands(mdmDevice, true)
|
||||
|
||||
// The user unenrolls from Fleet (e.g. was DEP enrolled but with `is_mdm_removable: true`
|
||||
// so the user removes the enrollment profile).
|
||||
err = mdmDevice.Checkout()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Simulate a refetch where we clean up the MDM data since the host is not enrolled anymore
|
||||
mysql.ExecAdhocSQL(t, s.ds, func(q sqlx.ExtContext) error {
|
||||
_, err := q.ExecContext(ctx, `DELETE FROM host_mdm WHERE host_id = ?`, mdmDeviceID)
|
||||
return err
|
||||
})
|
||||
|
||||
// Simulate fleetd re-enrolling automatically.
|
||||
err = mdmDevice.Enroll()
|
||||
require.NoError(t, err)
|
||||
|
||||
// The last activity should have `installed_from_dep=true`.
|
||||
s.lastActivityMatches(
|
||||
"mdm_enrolled",
|
||||
fmt.Sprintf(
|
||||
`{"host_serial": "%s", "host_display_name": "%s (%s)", "installed_from_dep": true, "mdm_platform": "apple"}`,
|
||||
mdmDevice.SerialNumber, mdmDevice.Model, mdmDevice.SerialNumber,
|
||||
),
|
||||
0,
|
||||
)
|
||||
|
||||
// enroll a host into Fleet
|
||||
eHost, err := s.ds.NewHost(context.Background(), &fleet.Host{
|
||||
ID: 1,
|
||||
OsqueryHostID: ptr.String("Desktop-ABCQWE"),
|
||||
NodeKey: ptr.String("Desktop-ABCQWE"),
|
||||
UUID: uuid.New().String(),
|
||||
Hostname: fmt.Sprintf("%sfoo.local", s.T().Name()),
|
||||
Platform: "darwin",
|
||||
HardwareSerial: uuid.New().String(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// on team transfer, we don't assign a DEP profile to the device
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: &team.ID, HostIDs: []uint{eHost.ID}}, http.StatusOK)
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runWorker()
|
||||
require.Empty(t, profileAssignmentReqs)
|
||||
|
||||
// assign the host in ABM
|
||||
devices = []godep.Device{
|
||||
{SerialNumber: eHost.HardwareSerial, Model: "MacBook Pro", OS: "osx", OpType: "modified"},
|
||||
}
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
require.NotEmpty(t, profileAssignmentReqs)
|
||||
require.Equal(t, eHost.HardwareSerial, profileAssignmentReqs[0].Devices[0])
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
|
||||
// report MDM info via osquery
|
||||
require.NoError(t, s.ds.SetOrUpdateMDMData(ctx, eHost.ID, false, true, s.server.URL, true, fleet.WellKnownMDMFleet, ""))
|
||||
checkListHostDEPError(eHost.HardwareSerial, "On (automatic)", false)
|
||||
|
||||
// transfer to "no team", we assign a DEP profile to the device
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: nil, HostIDs: []uint{eHost.ID}}, http.StatusOK)
|
||||
s.runWorker()
|
||||
require.NotEmpty(t, profileAssignmentReqs)
|
||||
require.Equal(t, eHost.HardwareSerial, profileAssignmentReqs[0].Devices[0])
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
checkListHostDEPError(eHost.HardwareSerial, "On (automatic)", false)
|
||||
|
||||
// transfer to the team back again, we assign a DEP profile to the device again
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: &team.ID, HostIDs: []uint{eHost.ID}}, http.StatusOK)
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runWorker()
|
||||
require.NotEmpty(t, profileAssignmentReqs)
|
||||
require.Equal(t, eHost.HardwareSerial, profileAssignmentReqs[0].Devices[0])
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
checkListHostDEPError(eHost.HardwareSerial, "On (automatic)", false)
|
||||
|
||||
// transfer to "no team", but simulate a failed profile assignment
|
||||
expectAssignProfileResponseFailed = eHost.HardwareSerial
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: nil, HostIDs: []uint{eHost.ID}}, http.StatusOK)
|
||||
checkPendingMacOSSetupAssistantJob("hosts_transferred", nil, []string{eHost.HardwareSerial}, 0)
|
||||
|
||||
s.runIntegrationsSchedule()
|
||||
checkAssignProfileRequests(eHost.HardwareSerial, nil)
|
||||
profUUID := profileAssignmentReqs[0].ProfileUUID
|
||||
d := checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, nil, expectNoJobID)
|
||||
require.NotZero(t, d.ResponseUpdatedAt)
|
||||
failedAt := d.ResponseUpdatedAt
|
||||
checkNoJobsPending()
|
||||
// list hosts shows dep profile error
|
||||
checkListHostDEPError(eHost.HardwareSerial, "On (automatic)", true)
|
||||
|
||||
// run the integrations schedule during the cooldown period
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // no new request during cooldown
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// create a new team
|
||||
var tmResp teamResponse
|
||||
s.DoJSON("POST", "/api/latest/fleet/teams", &fleet.Team{
|
||||
Name: t.Name() + "dummy",
|
||||
Description: "desc dummy",
|
||||
}, http.StatusOK, &tmResp)
|
||||
require.NotZero(t, createTeamResp.Team.ID)
|
||||
dummyTeam := tmResp.Team
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: &dummyTeam.ID, HostIDs: []uint{eHost.ID}}, http.StatusOK)
|
||||
checkPendingMacOSSetupAssistantJob("hosts_transferred", &dummyTeam.ID, []string{eHost.HardwareSerial}, 0)
|
||||
|
||||
// expect no assign profile request during cooldown
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // screened for cooldown
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// cooldown hosts are screened from update profile jobs that would assign profiles
|
||||
_, err = worker.QueueMacosSetupAssistantJob(ctx, s.ds, kitlog.NewNopLogger(), worker.MacosSetupAssistantUpdateProfile, &dummyTeam.ID, eHost.HardwareSerial)
|
||||
require.NoError(t, err)
|
||||
checkPendingMacOSSetupAssistantJob("update_profile", &dummyTeam.ID, []string{eHost.HardwareSerial}, 0)
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // screened for cooldown
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// cooldown hosts are screened from delete profile jobs that would assign profiles
|
||||
_, err = worker.QueueMacosSetupAssistantJob(ctx, s.ds, kitlog.NewNopLogger(), worker.MacosSetupAssistantProfileDeleted, &dummyTeam.ID, eHost.HardwareSerial)
|
||||
require.NoError(t, err)
|
||||
checkPendingMacOSSetupAssistantJob("profile_deleted", &dummyTeam.ID, []string{eHost.HardwareSerial}, 0)
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // screened for cooldown
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// // TODO: Restore this test when FIXME on DeleteTeam is addressed
|
||||
// s.Do("DELETE", fmt.Sprintf("/api/v1/fleet/teams/%d", dummyTeam.ID), nil, http.StatusOK)
|
||||
// checkPendingMacOSSetupAssistantJob("team_deleted", nil, []string{eHost.HardwareSerial}, 0)
|
||||
// s.runIntegrationsSchedule()
|
||||
// require.Empty(t, profileAssignmentReqs) // screened for cooldown
|
||||
// bySerial = checkHostDEPAssignProfileResponses([]string{eHost.HardwareSerial}, profUUID, fleet.DEPAssignProfileResponseFailed)
|
||||
// d, ok = bySerial[eHost.HardwareSerial]
|
||||
// require.True(t, ok)
|
||||
// require.Equal(t, failedAt, d.ResponseUpdatedAt)
|
||||
// require.Zero(t, d.RetryJobID) // cooling down so no retry job
|
||||
// checkNoJobsPending()
|
||||
|
||||
// transfer back to no team, expect no assign profile request during cooldown
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: nil, HostIDs: []uint{eHost.ID}}, http.StatusOK)
|
||||
checkPendingMacOSSetupAssistantJob("hosts_transferred", nil, []string{eHost.HardwareSerial}, 0)
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // screened for cooldown
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// simulate expired cooldown
|
||||
failedAt = failedAt.Add(-2 * time.Hour)
|
||||
setAssignProfileResponseUpdatedAt(eHost.HardwareSerial, failedAt)
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // assign profile request will be made when the retry job is processed on the next worker run
|
||||
d = checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, nil)
|
||||
require.NotZero(t, d.RetryJobID) // retry job created
|
||||
jobID := d.RetryJobID
|
||||
checkPendingMacOSSetupAssistantJob("hosts_cooldown", nil, []string{eHost.HardwareSerial}, jobID)
|
||||
|
||||
// running the DEP schedule should not trigger a profile assignment request when the retry job is pending
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // assign profile request will be made when the retry job is processed on the next worker run
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, &jobID) // no change
|
||||
checkPendingMacOSSetupAssistantJob("hosts_cooldown", nil, []string{eHost.HardwareSerial}, jobID)
|
||||
checkListHostDEPError(eHost.HardwareSerial, "On (automatic)", true)
|
||||
|
||||
// run the inregration schedule and expect success
|
||||
expectAssignProfileResponseFailed = ""
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
checkAssignProfileRequests(eHost.HardwareSerial, &profUUID)
|
||||
d = checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseSuccess, nil, expectNoJobID) // retry job cleared
|
||||
require.True(t, d.ResponseUpdatedAt.After(failedAt))
|
||||
succeededAt := d.ResponseUpdatedAt
|
||||
checkNoJobsPending()
|
||||
checkListHostDEPError(eHost.HardwareSerial, "On (automatic)", false)
|
||||
|
||||
// run the integrations schedule and expect no changes
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs)
|
||||
checkHostCooldown(eHost.HardwareSerial, profUUID, fleet.DEPAssignProfileResponseSuccess, &succeededAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// ingest new device via DEP but the profile assignment fails
|
||||
serial := uuid.NewString()
|
||||
devices = []godep.Device{
|
||||
{SerialNumber: serial, Model: "MacBook Pro", OS: "osx", OpType: "added"},
|
||||
}
|
||||
expectAssignProfileResponseFailed = serial
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
checkAssignProfileRequests(serial, nil)
|
||||
profUUID = profileAssignmentReqs[0].ProfileUUID
|
||||
d = checkHostCooldown(serial, profUUID, fleet.DEPAssignProfileResponseFailed, nil, expectNoJobID)
|
||||
require.NotZero(t, d.ResponseUpdatedAt)
|
||||
failedAt = d.ResponseUpdatedAt
|
||||
checkNoJobsPending()
|
||||
h := checkListHostDEPError(serial, "Pending", true) // list hosts shows device pending and dep profile error
|
||||
|
||||
// transfer to team, no profile assignment request is made during the cooldown period
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.Do("POST", "/api/v1/fleet/hosts/transfer",
|
||||
addHostsToTeamRequest{TeamID: &team.ID, HostIDs: []uint{h.ID}}, http.StatusOK)
|
||||
checkPendingMacOSSetupAssistantJob("hosts_transferred", &team.ID, []string{serial}, 0)
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // screened by cooldown
|
||||
checkHostCooldown(serial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// run the integrations schedule and expect no changes
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs)
|
||||
checkHostCooldown(serial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// simulate expired cooldown
|
||||
failedAt = failedAt.Add(-2 * time.Hour)
|
||||
setAssignProfileResponseUpdatedAt(serial, failedAt)
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs) // assign profile request will be made when the retry job is processed on the next worker run
|
||||
d = checkHostCooldown(serial, profUUID, fleet.DEPAssignProfileResponseFailed, &failedAt, nil)
|
||||
require.NotZero(t, d.RetryJobID) // retry job created
|
||||
jobID = d.RetryJobID
|
||||
checkPendingMacOSSetupAssistantJob("hosts_cooldown", &team.ID, []string{serial}, jobID)
|
||||
|
||||
// run the inregration schedule and expect success
|
||||
expectAssignProfileResponseFailed = ""
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
checkAssignProfileRequests(serial, nil)
|
||||
require.NotEqual(t, profUUID, profileAssignmentReqs[0].ProfileUUID) // retry job will use the current team profile instead
|
||||
profUUID = profileAssignmentReqs[0].ProfileUUID
|
||||
d = checkHostCooldown(serial, profUUID, fleet.DEPAssignProfileResponseSuccess, nil, expectNoJobID) // retry job cleared
|
||||
require.True(t, d.ResponseUpdatedAt.After(failedAt))
|
||||
checkNoJobsPending()
|
||||
// list hosts shows pending (because MDM detail query hasn't been reported) but dep profile
|
||||
// error has been cleared
|
||||
checkListHostDEPError(serial, "Pending", false)
|
||||
|
||||
// ingest another device via DEP but the profile assignment is not accessible
|
||||
serial = uuid.NewString()
|
||||
devices = []godep.Device{
|
||||
{SerialNumber: serial, Model: "MacBook Pro", OS: "osx", OpType: "added"},
|
||||
}
|
||||
expectAssignProfileResponseNotAccessible = serial
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
require.Len(t, profileAssignmentReqs, 2) // FIXME: When new device is added in ABM, we see two profile assign requests when device is not accessible: first during the "fetch" phase, then during the "sync" phase
|
||||
expectProfileUUID := ""
|
||||
for _, req := range profileAssignmentReqs {
|
||||
require.Len(t, req.Devices, 1)
|
||||
require.Equal(t, serial, req.Devices[0])
|
||||
if expectProfileUUID == "" {
|
||||
expectProfileUUID = req.ProfileUUID
|
||||
} else {
|
||||
require.Equal(t, expectProfileUUID, req.ProfileUUID)
|
||||
}
|
||||
d := checkHostCooldown(serial, req.ProfileUUID, fleet.DEPAssignProfileResponseNotAccessible, nil, expectNoJobID) // not accessible responses aren't retried
|
||||
require.NotZero(t, d.ResponseUpdatedAt)
|
||||
failedAt = d.ResponseUpdatedAt
|
||||
}
|
||||
// list hosts shows device pending and no dep profile error for not accessible responses
|
||||
checkListHostDEPError(serial, "Pending", false)
|
||||
|
||||
// no retry job for not accessible responses even if cooldown expires
|
||||
failedAt = failedAt.Add(-2 * time.Hour)
|
||||
setAssignProfileResponseUpdatedAt(serial, failedAt)
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runIntegrationsSchedule()
|
||||
require.Empty(t, profileAssignmentReqs)
|
||||
checkHostCooldown(serial, expectProfileUUID, fleet.DEPAssignProfileResponseNotAccessible, &failedAt, expectNoJobID) // no change
|
||||
checkNoJobsPending()
|
||||
|
||||
// run with devices that already have valid and invalid profiles
|
||||
// assigned, we shouldn't re-assign the valid ones.
|
||||
devices = []godep.Device{
|
||||
{SerialNumber: uuid.NewString(), Model: "MacBook Pro", OS: "osx", OpType: "added", ProfileUUID: defaultProfileUUID}, // matches existing profile
|
||||
{SerialNumber: uuid.NewString(), Model: "MacBook Mini", OS: "osx", OpType: "modified", ProfileUUID: defaultProfileUUID}, // matches existing profile
|
||||
{SerialNumber: uuid.NewString(), Model: "MacBook Pro", OS: "osx", OpType: "added", ProfileUUID: "bar"}, // doesn't match an existing profile
|
||||
{SerialNumber: uuid.NewString(), Model: "MacBook Mini", OS: "osx", OpType: "modified", ProfileUUID: "foo"}, // doesn't match an existing profile
|
||||
{SerialNumber: addedSerial, Model: "MacBook Pro", OS: "osx", OpType: "added", ProfileUUID: defaultProfileUUID}, // matches existing profile
|
||||
{SerialNumber: serial, Model: "MacBook Mini", OS: "osx", OpType: "modified", ProfileUUID: defaultProfileUUID}, // matches existing profile
|
||||
}
|
||||
expectAssignProfileResponseNotAccessible = ""
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
require.NotEmpty(t, profileAssignmentReqs)
|
||||
require.Len(t, profileAssignmentReqs[0].Devices, 2)
|
||||
require.ElementsMatch(t, []string{devices[2].SerialNumber, devices[3].SerialNumber}, profileAssignmentReqs[0].Devices)
|
||||
checkHostDEPAssignProfileResponses(profileAssignmentReqs[0].Devices, profileAssignmentReqs[0].ProfileUUID, fleet.DEPAssignProfileResponseSuccess)
|
||||
|
||||
// run with only a device that already has the right profile, no errors and no assignments
|
||||
devices = []godep.Device{
|
||||
{SerialNumber: uuid.NewString(), Model: "MacBook Pro", OS: "osx", OpType: "added", ProfileUUID: defaultProfileUUID}, // matches existing profile
|
||||
}
|
||||
profileAssignmentReqs = []profileAssignmentReq{}
|
||||
s.runDEPSchedule()
|
||||
require.Empty(t, profileAssignmentReqs)
|
||||
}
|
||||
|
||||
func loadEnrollmentProfileDEPToken(t *testing.T, ds *mysql.Datastore) string {
|
||||
var token string
|
||||
mysql.ExecAdhocSQL(t, ds, func(q sqlx.ExtContext) error {
|
||||
@ -7140,10 +6419,21 @@ func (s *integrationMDMTestSuite) TestGitOpsUserActions() {
|
||||
//
|
||||
s.setTokenForTest(t, "gitops1-mdm@example.com", test.GoodPassword)
|
||||
|
||||
// Attempt to edit global MDM settings, should allow.
|
||||
// Attempt to edit global MDM settings, should allow (also ensure the IdP settings are cleared).
|
||||
acResp := appConfigResponse{}
|
||||
s.DoJSON("PATCH", "/api/latest/fleet/config", json.RawMessage(`{
|
||||
"mdm": { "enable_disk_encryption": true }
|
||||
"mdm": {
|
||||
"macos_setup": {
|
||||
"enable_end_user_authentication": false
|
||||
},
|
||||
"enable_disk_encryption": true,
|
||||
"end_user_authentication": {
|
||||
"entity_id": "",
|
||||
"issuer_uri": "",
|
||||
"idp_name": "",
|
||||
"metadata_url": ""
|
||||
}
|
||||
}
|
||||
}`), http.StatusOK, &acResp)
|
||||
assert.True(t, acResp.MDM.EnableDiskEncryption.Value)
|
||||
|
||||
@ -10643,7 +9933,7 @@ func (s *integrationMDMTestSuite) TestMDMEnabledAndConfigured() {
|
||||
func (s *integrationMDMTestSuite) runWorker() {
|
||||
err := s.worker.ProcessJobs(context.Background())
|
||||
require.NoError(s.T(), err)
|
||||
pending, err := s.ds.GetQueuedJobs(context.Background(), 1)
|
||||
pending, err := s.ds.GetQueuedJobs(context.Background(), 1, time.Time{})
|
||||
require.NoError(s.T(), err)
|
||||
require.Empty(s.T(), pending)
|
||||
}
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
@ -25,6 +27,7 @@ type AppleMDMTask string
|
||||
const (
|
||||
AppleMDMPostDEPEnrollmentTask AppleMDMTask = "post_dep_enrollment"
|
||||
AppleMDMPostManualEnrollmentTask AppleMDMTask = "post_manual_enrollment"
|
||||
AppleMDMPostDEPReleaseDeviceTask AppleMDMTask = "post_dep_release_device"
|
||||
)
|
||||
|
||||
// AppleMDM is the job processor for the apple_mdm job.
|
||||
@ -41,10 +44,11 @@ func (a *AppleMDM) Name() string {
|
||||
|
||||
// appleMDMArgs is the payload for the Apple MDM job.
|
||||
type appleMDMArgs struct {
|
||||
Task AppleMDMTask `json:"task"`
|
||||
HostUUID string `json:"host_uuid"`
|
||||
TeamID *uint `json:"team_id,omitempty"`
|
||||
EnrollReference string `json:"enroll_reference,omitempty"`
|
||||
Task AppleMDMTask `json:"task"`
|
||||
HostUUID string `json:"host_uuid"`
|
||||
TeamID *uint `json:"team_id,omitempty"`
|
||||
EnrollReference string `json:"enroll_reference,omitempty"`
|
||||
EnrollmentCommands []string `json:"enrollment_commands,omitempty"`
|
||||
}
|
||||
|
||||
// Run executes the apple_mdm job.
|
||||
@ -64,16 +68,22 @@ func (a *AppleMDM) Run(ctx context.Context, argsJSON json.RawMessage) error {
|
||||
case AppleMDMPostDEPEnrollmentTask:
|
||||
err := a.runPostDEPEnrollment(ctx, args)
|
||||
return ctxerr.Wrap(ctx, err, "running post Apple DEP enrollment task")
|
||||
|
||||
case AppleMDMPostManualEnrollmentTask:
|
||||
err := a.runPostManualEnrollment(ctx, args)
|
||||
return ctxerr.Wrap(ctx, err, "running post Apple manual enrollment task")
|
||||
|
||||
case AppleMDMPostDEPReleaseDeviceTask:
|
||||
err := a.runPostDEPReleaseDevice(ctx, args)
|
||||
return ctxerr.Wrap(ctx, err, "running post Apple DEP release device task")
|
||||
|
||||
default:
|
||||
return ctxerr.Errorf(ctx, "unknown task: %v", args.Task)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AppleMDM) runPostManualEnrollment(ctx context.Context, args appleMDMArgs) error {
|
||||
if err := a.installFleetd(ctx, args.HostUUID); err != nil {
|
||||
if _, err := a.installFleetd(ctx, args.HostUUID); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "installing post-enrollment packages")
|
||||
}
|
||||
|
||||
@ -81,13 +91,21 @@ func (a *AppleMDM) runPostManualEnrollment(ctx context.Context, args appleMDMArg
|
||||
}
|
||||
|
||||
func (a *AppleMDM) runPostDEPEnrollment(ctx context.Context, args appleMDMArgs) error {
|
||||
if err := a.installFleetd(ctx, args.HostUUID); err != nil {
|
||||
var awaitCmdUUIDs []string
|
||||
|
||||
fleetdCmdUUID, err := a.installFleetd(ctx, args.HostUUID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "installing post-enrollment packages")
|
||||
}
|
||||
awaitCmdUUIDs = append(awaitCmdUUIDs, fleetdCmdUUID)
|
||||
|
||||
if err := a.installBootstrapPackage(ctx, args.HostUUID, args.TeamID); err != nil {
|
||||
bootstrapCmdUUID, err := a.installBootstrapPackage(ctx, args.HostUUID, args.TeamID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "installing post-enrollment packages")
|
||||
}
|
||||
if bootstrapCmdUUID != "" {
|
||||
awaitCmdUUIDs = append(awaitCmdUUIDs, bootstrapCmdUUID)
|
||||
}
|
||||
|
||||
if ref := args.EnrollReference; ref != "" {
|
||||
a.Log.Log("info", "got an enroll_reference", "host_uuid", args.HostUUID, "ref", ref)
|
||||
@ -112,30 +130,143 @@ func (a *AppleMDM) runPostDEPEnrollment(ctx context.Context, args appleMDMArgs)
|
||||
|
||||
if ssoEnabled {
|
||||
a.Log.Log("info", "setting username and fullname", "host_uuid", args.HostUUID)
|
||||
cmdUUID := uuid.New().String()
|
||||
if err := a.Commander.AccountConfiguration(
|
||||
ctx,
|
||||
[]string{args.HostUUID},
|
||||
uuid.New().String(),
|
||||
cmdUUID,
|
||||
acct.Fullname,
|
||||
acct.Username,
|
||||
); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "sending AccountConfiguration command")
|
||||
}
|
||||
awaitCmdUUIDs = append(awaitCmdUUIDs, cmdUUID)
|
||||
}
|
||||
}
|
||||
|
||||
var manualRelease bool
|
||||
if args.TeamID == nil {
|
||||
ac, err := a.Datastore.AppConfig(ctx)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "get AppConfig to read enable_release_device_manually")
|
||||
}
|
||||
manualRelease = ac.MDM.MacOSSetup.EnableReleaseDeviceManually.Value
|
||||
} else {
|
||||
tm, err := a.Datastore.Team(ctx, *args.TeamID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "get Team to read enable_release_device_manually")
|
||||
}
|
||||
manualRelease = tm.Config.MDM.MacOSSetup.EnableReleaseDeviceManually.Value
|
||||
}
|
||||
|
||||
if !manualRelease {
|
||||
// send all command uuids for the commands sent here during post-DEP
|
||||
// enrollment and enqueue a job to look for the status of those commands to
|
||||
// be final and same for MDM profiles of that host; it means the DEP
|
||||
// enrollment process is done and the device can be released.
|
||||
if err := QueueAppleMDMJob(ctx, a.Datastore, a.Log, AppleMDMPostDEPReleaseDeviceTask,
|
||||
args.HostUUID, args.TeamID, args.EnrollReference, awaitCmdUUIDs...); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue Apple Post-DEP release device job")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AppleMDM) installFleetd(ctx context.Context, hostUUID string) error {
|
||||
func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArgs) error {
|
||||
// Edge cases:
|
||||
// - if the device goes offline for a long time, should we go ahead and
|
||||
// release after a while?
|
||||
// - if some commands/profiles failed (a final state), should we go ahead
|
||||
// and release?
|
||||
// - if the device keeps moving team, or profiles keep being added/removed
|
||||
// from its team, it's possible that its profiles will never settle and
|
||||
// always have pending statuses. Same as going offline, should we release
|
||||
// after a while?
|
||||
//
|
||||
// We opted "yes" to all those, and we want to release after a few minutes,
|
||||
// not hours, so we'll allow only a couple retries.
|
||||
|
||||
level.Debug(a.Log).Log(
|
||||
"task", "runPostDEPReleaseDevice",
|
||||
"msg", fmt.Sprintf("awaiting commands %v and profiles to settle for host %s", args.EnrollmentCommands, args.HostUUID),
|
||||
)
|
||||
|
||||
if retryNum, _ := ctx.Value(retryNumberCtxKey).(int); retryNum > 2 {
|
||||
// give up and release the device
|
||||
a.Log.Log("info", "releasing device after too many attempts", "host_uuid", args.HostUUID, "retries", retryNum)
|
||||
if err := a.Commander.DeviceConfigured(ctx, args.HostUUID, uuid.NewString()); err != nil {
|
||||
return ctxerr.Wrapf(ctx, err, "failed to enqueue DeviceConfigured command after %d retries", retryNum)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, cmdUUID := range args.EnrollmentCommands {
|
||||
if cmdUUID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := a.Datastore.GetMDMAppleCommandResults(ctx, cmdUUID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "failed to get MDM command results")
|
||||
}
|
||||
|
||||
var completed bool
|
||||
for _, r := range res {
|
||||
// succeeded or failed, it is done (final state)
|
||||
if r.Status == fleet.MDMAppleStatusAcknowledged || r.Status == fleet.MDMAppleStatusError ||
|
||||
r.Status == fleet.MDMAppleStatusCommandFormatError {
|
||||
completed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !completed {
|
||||
// DEP enrollment commands are not done being delivered to that device,
|
||||
// cannot release it now.
|
||||
return fmt.Errorf("device not ready for release, still awaiting result for command %s, will retry", cmdUUID)
|
||||
}
|
||||
level.Debug(a.Log).Log(
|
||||
"task", "runPostDEPReleaseDevice",
|
||||
"msg", fmt.Sprintf("command %s has completed", cmdUUID),
|
||||
)
|
||||
}
|
||||
|
||||
// all DEP-enrollment commands are done, check the host's profiles
|
||||
profs, err := a.Datastore.GetHostMDMAppleProfiles(ctx, args.HostUUID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "failed to get host MDM profiles")
|
||||
}
|
||||
for _, prof := range profs {
|
||||
// if it has any pending profiles, then its profiles are not done being
|
||||
// delivered (installed or removed).
|
||||
if prof.Status == nil || *prof.Status == fleet.MDMDeliveryPending {
|
||||
return fmt.Errorf("device not ready for release, profile %s is still pending, will retry", prof.Identifier)
|
||||
}
|
||||
level.Debug(a.Log).Log(
|
||||
"task", "runPostDEPReleaseDevice",
|
||||
"msg", fmt.Sprintf("profile %s has been deployed", prof.Identifier),
|
||||
)
|
||||
}
|
||||
|
||||
// release the device
|
||||
a.Log.Log("info", "releasing device, all DEP enrollment commands and profiles have completed", "host_uuid", args.HostUUID)
|
||||
if err := a.Commander.DeviceConfigured(ctx, args.HostUUID, uuid.NewString()); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "failed to enqueue DeviceConfigured command")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AppleMDM) installFleetd(ctx context.Context, hostUUID string) (string, error) {
|
||||
cmdUUID := uuid.New().String()
|
||||
if err := a.Commander.InstallEnterpriseApplication(ctx, []string{hostUUID}, cmdUUID, apple_mdm.FleetdPublicManifestURL); err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
a.Log.Log("info", "sent command to install fleetd", "host_uuid", hostUUID)
|
||||
return nil
|
||||
return cmdUUID, nil
|
||||
}
|
||||
|
||||
func (a *AppleMDM) installBootstrapPackage(ctx context.Context, hostUUID string, teamID *uint) error {
|
||||
func (a *AppleMDM) installBootstrapPackage(ctx context.Context, hostUUID string, teamID *uint) (string, error) {
|
||||
// GetMDMAppleBootstrapPackageMeta expects team id 0 for no team
|
||||
var tmID uint
|
||||
if teamID != nil {
|
||||
@ -146,34 +277,34 @@ func (a *AppleMDM) installBootstrapPackage(ctx context.Context, hostUUID string,
|
||||
var nfe fleet.NotFoundError
|
||||
if errors.As(err, &nfe) {
|
||||
a.Log.Log("info", "unable to find a bootstrap package for DEP enrolled device, skipping installation", "host_uuid", hostUUID)
|
||||
return nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
appCfg, err := a.Datastore.AppConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
url, err := meta.URL(appCfg.ServerSettings.ServerURL)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
manifest := appmanifest.NewFromSha(meta.Sha256, url)
|
||||
cmdUUID := uuid.New().String()
|
||||
err = a.Commander.InstallEnterpriseApplicationWithEmbeddedManifest(ctx, []string{hostUUID}, cmdUUID, manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
err = a.Datastore.RecordHostBootstrapPackage(ctx, cmdUUID, hostUUID)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
a.Log.Log("info", "sent command to install bootstrap package", "host_uuid", hostUUID)
|
||||
return nil
|
||||
return cmdUUID, nil
|
||||
}
|
||||
|
||||
// QueueAppleMDMJob queues a apple_mdm job for one of the supported tasks, to
|
||||
@ -186,6 +317,7 @@ func QueueAppleMDMJob(
|
||||
hostUUID string,
|
||||
teamID *uint,
|
||||
enrollReference string,
|
||||
enrollmentCommandUUIDs ...string,
|
||||
) error {
|
||||
attrs := []interface{}{
|
||||
"enabled", "true",
|
||||
@ -196,15 +328,25 @@ func QueueAppleMDMJob(
|
||||
if teamID != nil {
|
||||
attrs = append(attrs, "team_id", *teamID)
|
||||
}
|
||||
if len(enrollmentCommandUUIDs) > 0 {
|
||||
attrs = append(attrs, "enrollment_commands", enrollmentCommandUUIDs)
|
||||
}
|
||||
level.Info(logger).Log(attrs...)
|
||||
|
||||
args := &appleMDMArgs{
|
||||
Task: task,
|
||||
HostUUID: hostUUID,
|
||||
TeamID: teamID,
|
||||
EnrollReference: enrollReference,
|
||||
Task: task,
|
||||
HostUUID: hostUUID,
|
||||
TeamID: teamID,
|
||||
EnrollReference: enrollReference,
|
||||
EnrollmentCommands: enrollmentCommandUUIDs,
|
||||
}
|
||||
job, err := QueueJob(ctx, ds, appleMDMJobName, args)
|
||||
|
||||
// the release device task is always added with a delay
|
||||
var delay time.Duration
|
||||
if task == AppleMDMPostDEPReleaseDeviceTask {
|
||||
delay = 30 * time.Second
|
||||
}
|
||||
job, err := QueueJobWithDelay(ctx, ds, appleMDMJobName, args, delay)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/pkg/optjson"
|
||||
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
|
||||
@ -40,6 +41,8 @@ func TestAppleMDM(t *testing.T) {
|
||||
// specific internals (sequence and number of calls, etc.). The MDM storage
|
||||
// and pusher are mocks.
|
||||
ds := mysql.CreateMySQLDS(t)
|
||||
// call TruncateTables immediately as a DB migation may have created jobs
|
||||
mysql.TruncateTables(t, ds)
|
||||
|
||||
mdmStorage, err := ds.NewMDMAppleMDMStorage([]byte("test"), []byte("test"))
|
||||
require.NoError(t, err)
|
||||
@ -92,6 +95,32 @@ func TestAppleMDM(t *testing.T) {
|
||||
return commands
|
||||
}
|
||||
|
||||
enableManualRelease := func(t *testing.T, teamID *uint) {
|
||||
if teamID == nil {
|
||||
enableAppCfg := func(enable bool) {
|
||||
ac, err := ds.AppConfig(ctx)
|
||||
require.NoError(t, err)
|
||||
ac.MDM.MacOSSetup.EnableReleaseDeviceManually = optjson.SetBool(enable)
|
||||
err = ds.SaveAppConfig(ctx, ac)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
enableAppCfg(true)
|
||||
t.Cleanup(func() { enableAppCfg(false) })
|
||||
} else {
|
||||
enableTm := func(enable bool) {
|
||||
tm, err := ds.Team(ctx, *teamID)
|
||||
require.NoError(t, err)
|
||||
tm.Config.MDM.MacOSSetup.EnableReleaseDeviceManually = optjson.SetBool(enable)
|
||||
_, err = ds.SaveTeam(ctx, tm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
enableTm(true)
|
||||
t.Cleanup(func() { enableTm(false) })
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("no-op with nil commander", func(t *testing.T) {
|
||||
defer mysql.TruncateTables(t, ds)
|
||||
|
||||
@ -115,7 +144,7 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
})
|
||||
@ -143,7 +172,7 @@ func TestAppleMDM(t *testing.T) {
|
||||
// ensure the job's not_before allows it to be returned
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
require.Contains(t, jobs[0].Error, "unknown task: no-such-task")
|
||||
@ -175,9 +204,49 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
|
||||
// the post-DEP release device job is pending
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, appleMDMJobName, jobs[0].Name)
|
||||
require.Contains(t, string(*jobs[0].Args), AppleMDMPostDEPReleaseDeviceTask)
|
||||
require.Equal(t, 0, jobs[0].Retries) // hasn't run yet
|
||||
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
})
|
||||
|
||||
t.Run("installs default manifest, manual release", func(t *testing.T) {
|
||||
t.Cleanup(func() { mysql.TruncateTables(t, ds) })
|
||||
|
||||
h := createEnrolledHost(t, 1, nil, true)
|
||||
enableManualRelease(t, nil)
|
||||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, nil, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
err = w.ProcessJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ensure the job's not_before allows it to be returned if it were to run
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
|
||||
// there is no post-DEP release device job pending
|
||||
require.Empty(t, jobs)
|
||||
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
})
|
||||
|
||||
@ -213,9 +282,15 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
// the post-DEP release device job is pending
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, appleMDMJobName, jobs[0].Name)
|
||||
require.Contains(t, string(*jobs[0].Args), AppleMDMPostDEPReleaseDeviceTask)
|
||||
require.Equal(t, 0, jobs[0].Retries) // hasn't run yet
|
||||
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication", "InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
|
||||
ms, err := ds.GetHostMDMMacOSSetup(ctx, h.ID)
|
||||
@ -258,9 +333,64 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
|
||||
// the post-DEP release device job is pending
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, appleMDMJobName, jobs[0].Name)
|
||||
require.Contains(t, string(*jobs[0].Args), AppleMDMPostDEPReleaseDeviceTask)
|
||||
require.Equal(t, 0, jobs[0].Retries) // hasn't run yet
|
||||
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication", "InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
|
||||
ms, err := ds.GetHostMDMMacOSSetup(ctx, h.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "custom-team-bootstrap", ms.BootstrapPackageName)
|
||||
})
|
||||
|
||||
t.Run("installs custom bootstrap manifest of a team, manual release", func(t *testing.T) {
|
||||
t.Cleanup(func() { mysql.TruncateTables(t, ds) })
|
||||
|
||||
tm, err := ds.NewTeam(ctx, &fleet.Team{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
enableManualRelease(t, &tm.ID)
|
||||
|
||||
h := createEnrolledHost(t, 1, &tm.ID, true)
|
||||
err = ds.InsertMDMAppleBootstrapPackage(ctx, &fleet.MDMAppleBootstrapPackage{
|
||||
Name: "custom-team-bootstrap",
|
||||
TeamID: tm.ID,
|
||||
Bytes: []byte("test"),
|
||||
Sha256: []byte("test"),
|
||||
Token: "token",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, &tm.ID, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
err = w.ProcessJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ensure the job's not_before allows it to be returned if it were to run
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
|
||||
// there is no post-DEP release device job pending
|
||||
require.Empty(t, jobs)
|
||||
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication", "InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
|
||||
ms, err := ds.GetHostMDMMacOSSetup(ctx, h.ID)
|
||||
@ -292,7 +422,7 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
require.Contains(t, jobs[0].Error, "MDMIdPAccount with uuid abcd was not found")
|
||||
@ -334,9 +464,15 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
// the post-DEP release device job is pending, having failed its first attempt
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, appleMDMJobName, jobs[0].Name)
|
||||
require.Contains(t, string(*jobs[0].Args), AppleMDMPostDEPReleaseDeviceTask)
|
||||
require.Equal(t, 0, jobs[0].Retries) // hasn't run yet
|
||||
|
||||
// confirm that AccountConfiguration command was not enqueued
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
})
|
||||
@ -383,9 +519,15 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
// the post-DEP release device job is pending
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, appleMDMJobName, jobs[0].Name)
|
||||
require.Contains(t, string(*jobs[0].Args), AppleMDMPostDEPReleaseDeviceTask)
|
||||
require.Equal(t, 0, jobs[0].Retries) // hasn't run yet
|
||||
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication", "AccountConfiguration"}, getEnqueuedCommandTypes(t))
|
||||
})
|
||||
|
||||
@ -413,7 +555,7 @@ func TestAppleMDM(t *testing.T) {
|
||||
// again
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 1, time.Now().UTC().Add(time.Minute)) // look in the future to catch any delayed job
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
require.ElementsMatch(t, []string{"InstallEnterpriseApplication"}, getEnqueuedCommandTypes(t))
|
||||
|
@ -25,6 +25,8 @@ import (
|
||||
func TestMacosSetupAssistant(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ds := mysql.CreateMySQLDS(t)
|
||||
// call TruncateTables immediately as some DB migrations may create jobs
|
||||
mysql.TruncateTables(t, ds)
|
||||
|
||||
// create a couple hosts for no team, team 1 and team 2 (none for team 3)
|
||||
hosts := make([]*fleet.Host, 6)
|
||||
@ -140,7 +142,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
||||
err = w.ProcessJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
// no remaining jobs to process
|
||||
pending, err := ds.GetQueuedJobs(ctx, 10)
|
||||
pending, err := ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, pending)
|
||||
}
|
||||
|
@ -12,11 +12,17 @@ import (
|
||||
"github.com/go-kit/kit/log/level"
|
||||
)
|
||||
|
||||
type ctxKey int
|
||||
|
||||
const (
|
||||
maxRetries = 5
|
||||
// nvdCVEURL is the base link to a CVE on the NVD website, only the CVE code
|
||||
// needs to be appended to make it a valid link.
|
||||
nvdCVEURL = "https://nvd.nist.gov/vuln/detail/"
|
||||
|
||||
// context key for the retry number of a job, made available via the context
|
||||
// to the job processor.
|
||||
retryNumberCtxKey = ctxKey(0)
|
||||
)
|
||||
|
||||
const (
|
||||
@ -89,14 +95,26 @@ func (w *Worker) Register(jobs ...Job) {
|
||||
// identified by the name (e.g. "jira"). The args value is marshaled as JSON
|
||||
// and provided to the job processor when the job is executed.
|
||||
func QueueJob(ctx context.Context, ds fleet.Datastore, name string, args interface{}) (*fleet.Job, error) {
|
||||
return QueueJobWithDelay(ctx, ds, name, args, 0)
|
||||
}
|
||||
|
||||
// QueueJobWithDelay is like QueueJob but does not make the job available
|
||||
// before a specified delay (or no delay if delay is <= 0).
|
||||
func QueueJobWithDelay(ctx context.Context, ds fleet.Datastore, name string, args interface{}, delay time.Duration) (*fleet.Job, error) {
|
||||
argsJSON, err := json.Marshal(args)
|
||||
if err != nil {
|
||||
return nil, ctxerr.Wrap(ctx, err, "marshal args")
|
||||
}
|
||||
|
||||
var notBefore time.Time
|
||||
if delay > 0 {
|
||||
notBefore = time.Now().UTC().Add(delay)
|
||||
}
|
||||
job := &fleet.Job{
|
||||
Name: name,
|
||||
Args: (*json.RawMessage)(&argsJSON),
|
||||
State: fleet.JobStateQueued,
|
||||
Name: name,
|
||||
Args: (*json.RawMessage)(&argsJSON),
|
||||
State: fleet.JobStateQueued,
|
||||
NotBefore: notBefore,
|
||||
}
|
||||
|
||||
return ds.NewJob(ctx, job)
|
||||
@ -122,7 +140,7 @@ func (w *Worker) ProcessJobs(ctx context.Context) error {
|
||||
// process jobs until there are none left or the context is cancelled
|
||||
seen := make(map[uint]struct{})
|
||||
for {
|
||||
jobs, err := w.ds.GetQueuedJobs(ctx, maxNumJobs)
|
||||
jobs, err := w.ds.GetQueuedJobs(ctx, maxNumJobs, time.Time{})
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "get queued jobs")
|
||||
}
|
||||
@ -191,6 +209,7 @@ func (w *Worker) processJob(ctx context.Context, job *fleet.Job) error {
|
||||
args = *job.Args
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, retryNumberCtxKey, job.Retries)
|
||||
return j.Run(ctx, args)
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ func TestWorker(t *testing.T) {
|
||||
|
||||
// set up mocks
|
||||
getQueuedJobsCalled := 0
|
||||
ds.GetQueuedJobsFunc = func(ctx context.Context, maxNumJobs int) ([]*fleet.Job, error) {
|
||||
ds.GetQueuedJobsFunc = func(ctx context.Context, maxNumJobs int, now time.Time) ([]*fleet.Job, error) {
|
||||
if getQueuedJobsCalled > 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -93,7 +93,7 @@ func TestWorkerRetries(t *testing.T) {
|
||||
State: fleet.JobStateQueued,
|
||||
Retries: 0,
|
||||
}
|
||||
ds.GetQueuedJobsFunc = func(ctx context.Context, maxNumJobs int) ([]*fleet.Job, error) {
|
||||
ds.GetQueuedJobsFunc = func(ctx context.Context, maxNumJobs int, now time.Time) ([]*fleet.Job, error) {
|
||||
if theJob.State == fleet.JobStateQueued {
|
||||
return []*fleet.Job{theJob}, nil
|
||||
}
|
||||
@ -173,7 +173,7 @@ func TestWorkerMiddleJobFails(t *testing.T) {
|
||||
Retries: 0,
|
||||
},
|
||||
}
|
||||
ds.GetQueuedJobsFunc = func(ctx context.Context, maxNumJobs int) ([]*fleet.Job, error) {
|
||||
ds.GetQueuedJobsFunc = func(ctx context.Context, maxNumJobs int, now time.Time) ([]*fleet.Job, error) {
|
||||
var queued []*fleet.Job
|
||||
for _, j := range jobs {
|
||||
if j.State == fleet.JobStateQueued {
|
||||
@ -241,6 +241,8 @@ func TestWorkerMiddleJobFails(t *testing.T) {
|
||||
func TestWorkerWithRealDatastore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ds := mysql.CreateMySQLDS(t)
|
||||
// call TruncateTables immediately, because a DB migration may create jobs
|
||||
mysql.TruncateTables(t, ds)
|
||||
|
||||
oldDelayPerRetry := delayPerRetry
|
||||
delayPerRetry = []time.Duration{
|
||||
@ -295,7 +297,7 @@ func TestWorkerWithRealDatastore(t *testing.T) {
|
||||
// timestamp in mysql vs the one set in ProcessJobs (time.Now().Add(...)).
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err := ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, j2.ID, jobs[0].ID)
|
||||
@ -311,7 +313,7 @@ func TestWorkerWithRealDatastore(t *testing.T) {
|
||||
// timestamp in mysql vs the one set in ProcessJobs (time.Now().Add(...)).
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, jobs, 1)
|
||||
require.Equal(t, j2.ID, jobs[0].ID)
|
||||
@ -326,7 +328,7 @@ func TestWorkerWithRealDatastore(t *testing.T) {
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10)
|
||||
jobs, err = ds.GetQueuedJobs(ctx, 10, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, jobs)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user