Changed query performance statistics to uint64 to match osquery reports. (#15505)

#15472

# Checklist for submitter
- [x] Changes file added for user-visible changes in `changes/` or
`orbit/changes/`.
- [x] Added/updated tests
- [x] Manual QA for all new/changed functionality
This commit is contained in:
Victor Lyuboslavsky 2023-12-11 11:29:17 -06:00 committed by GitHub
parent 883652cac6
commit 9236a19342
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 147 additions and 38 deletions

View File

@ -0,0 +1 @@
Changed query performance statistics to uint64 to match osquery reports.

View File

@ -3598,7 +3598,7 @@ func testHostsSavePackStatsConcurrent(t *testing.T, ds *Datastore) {
PackID: pack1.ID,
AverageMemory: 8000,
Denylisted: false,
Executions: rand.Intn(1000),
Executions: uint64(rand.Intn(1000)),
Interval: 30,
LastExecuted: time.Now().UTC(),
OutputSize: 1337,
@ -3619,7 +3619,7 @@ func testHostsSavePackStatsConcurrent(t *testing.T, ds *Datastore) {
PackID: pack2.ID,
AverageMemory: 8000,
Denylisted: false,
Executions: rand.Intn(1000),
Executions: uint64(rand.Intn(1000)),
Interval: 30,
LastExecuted: time.Now().UTC(),
OutputSize: 1337,

View File

@ -0,0 +1,40 @@
package tables
import (
"database/sql"
"fmt"
)
func init() {
MigrationClient.AddMigration(Up_20231207133731, Down_20231207133731)
}
func Up_20231207133731(tx *sql.Tx) error {
// Updating some bad data from osquery (which will be ignored in a later PR).
// Seems safer to update rather than delete.
stmt := `
UPDATE scheduled_query_stats SET last_executed = '1970-01-01 00:00:01' WHERE YEAR(last_executed) = '0000';
`
if _, err := tx.Exec(stmt); err != nil {
return fmt.Errorf("fixing last_executed in scheduled_query_stats: %w", err)
}
stmt = `
ALTER TABLE scheduled_query_stats
MODIFY COLUMN average_memory BIGINT UNSIGNED NOT NULL,
MODIFY COLUMN executions BIGINT UNSIGNED NOT NULL,
MODIFY COLUMN output_size BIGINT UNSIGNED NOT NULL,
MODIFY COLUMN system_time BIGINT UNSIGNED NOT NULL,
MODIFY COLUMN user_time BIGINT UNSIGNED NOT NULL,
MODIFY COLUMN wall_time BIGINT UNSIGNED NOT NULL;
`
if _, err := tx.Exec(stmt); err != nil {
return fmt.Errorf("changing data types for scheduled_query_stats: %w", err)
}
return nil
}
func Down_20231207133731(tx *sql.Tx) error {
return nil
}

View File

@ -0,0 +1,68 @@
package tables
import (
"github.com/stretchr/testify/require"
"math"
"testing"
)
func TestUp_20231207133731(t *testing.T) {
db := applyUpToPrev(t)
setupStmt := `
INSERT INTO scheduled_query_stats (host_id, scheduled_query_id, average_memory, denylisted, executions, schedule_interval, output_size, system_time, user_time, wall_time, last_executed) VALUES
(?,?,?,?,?,?,?,?,?,?,?);
`
_, err := db.Exec(setupStmt, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "2023-12-07 13:17:17")
require.NoError(t, err)
// Apply current migration.
applyNext(t, db)
stmt := `
SELECT host_id, average_memory FROM scheduled_query_stats WHERE host_id = 1;
`
rows, err := db.Query(stmt)
require.NoError(t, rows.Err())
require.NoError(t, err)
defer rows.Close()
count := 0
for rows.Next() {
count += 1
var hostId int
var avgMem uint64
err := rows.Scan(&hostId, &avgMem)
require.NoError(t, err)
require.Equal(t, 1, hostId)
require.Equal(t, uint64(3), avgMem)
}
require.Equal(t, 1, count)
_, err = db.Exec(setupStmt, 2, 2, uint64(math.MaxUint64), 4, uint64(math.MaxUint64-1), 6, uint64(math.MaxUint64-2), uint64(math.MaxUint64-3), uint64(math.MaxUint64-4), uint64(math.MaxUint64-5), "2023-12-07 13:17:17")
require.NoError(t, err)
stmt = `
SELECT host_id, average_memory, executions, output_size, system_time, user_time, wall_time FROM scheduled_query_stats WHERE host_id = 2;
`
rows, err = db.Query(stmt)
require.NoError(t, rows.Err())
require.NoError(t, err)
defer rows.Close()
count = 0
for rows.Next() {
count += 1
var hostId int
var avgMem, executions, outputSize, systemTime, userTime, wallTime uint64
err := rows.Scan(&hostId, &avgMem, &executions, &outputSize, &systemTime, &userTime, &wallTime)
require.NoError(t, err)
require.Equal(t, 2, hostId)
require.Equal(t, uint64(math.MaxUint64), avgMem)
require.Equal(t, uint64(math.MaxUint64-1), executions)
require.Equal(t, uint64(math.MaxUint64-2), outputSize)
require.Equal(t, uint64(math.MaxUint64-3), systemTime)
require.Equal(t, uint64(math.MaxUint64-4), userTime)
require.Equal(t, uint64(math.MaxUint64-5), wallTime)
}
require.Equal(t, 1, count)
}

View File

@ -444,15 +444,15 @@ func randomPackStatsForHost(packID uint, packName string, packType string, sched
QueryName: sq.QueryName,
Description: sq.Description,
PackID: packID,
AverageMemory: rand.Intn(100),
AverageMemory: uint64(rand.Intn(100)),
Denylisted: false,
Executions: rand.Intn(100),
Executions: uint64(rand.Intn(100)),
Interval: rand.Intn(100),
LastExecuted: time.Now(),
OutputSize: rand.Intn(1000),
SystemTime: rand.Intn(1000),
UserTime: rand.Intn(1000),
WallTime: rand.Intn(1000),
OutputSize: uint64(rand.Intn(1000)),
SystemTime: uint64(rand.Intn(1000)),
UserTime: uint64(rand.Intn(1000)),
WallTime: uint64(rand.Intn(1000)),
})
}
return []fleet.PackStats{

View File

@ -508,7 +508,7 @@ func testScheduledQueriesAsyncBatchSaveStats(t *testing.T, ds *Datastore) {
for hid, stats := range m {
for _, st := range stats {
ExecAdhocSQL(t, ds, func(tx sqlx.ExtContext) error {
var got int
var got uint64
err := sqlx.GetContext(ctx, tx, &got, `SELECT executions FROM scheduled_query_stats WHERE host_id = ? AND scheduled_query_id = ?`, hid, st.ScheduledQueryID)
if err != nil {
return err

File diff suppressed because one or more lines are too long

View File

@ -410,16 +410,16 @@ type QueryStats struct {
TeamID *uint `json:"team_id" db:"team_id"`
// From osquery directly
AverageMemory int `json:"average_memory" db:"average_memory"`
Denylisted bool `json:"denylisted" db:"denylisted"`
Executions int `json:"executions" db:"executions"`
AverageMemory uint64 `json:"average_memory" db:"average_memory"`
Denylisted bool `json:"denylisted" db:"denylisted"`
Executions uint64 `json:"executions" db:"executions"`
// Note schedule_interval is used for DB since "interval" is a reserved word in MySQL
Interval int `json:"interval" db:"schedule_interval"`
LastExecuted time.Time `json:"last_executed" db:"last_executed"`
OutputSize int `json:"output_size" db:"output_size"`
SystemTime int `json:"system_time" db:"system_time"`
UserTime int `json:"user_time" db:"user_time"`
WallTime int `json:"wall_time" db:"wall_time"`
OutputSize uint64 `json:"output_size" db:"output_size"`
SystemTime uint64 `json:"system_time" db:"system_time"`
UserTime uint64 `json:"user_time" db:"user_time"`
WallTime uint64 `json:"wall_time" db:"wall_time"`
}
// MapQueryReportsResultsToRows converts the scheduled query results as stored in Fleet's database

View File

@ -151,16 +151,16 @@ type ScheduledQueryStats struct {
PackID uint `json:"pack_id,omitempty" db:"pack_id"`
// From osquery directly
AverageMemory int `json:"average_memory" db:"average_memory"`
Denylisted bool `json:"denylisted" db:"denylisted"`
Executions int `json:"executions" db:"executions"`
AverageMemory uint64 `json:"average_memory" db:"average_memory"`
Denylisted bool `json:"denylisted" db:"denylisted"`
Executions uint64 `json:"executions" db:"executions"`
// Note schedule_interval is used for DB since "interval" is a reserved word in MySQL
Interval int `json:"interval" db:"schedule_interval"`
LastExecuted time.Time `json:"last_executed" db:"last_executed"`
OutputSize int `json:"output_size" db:"output_size"`
SystemTime int `json:"system_time" db:"system_time"`
UserTime int `json:"user_time" db:"user_time"`
WallTime int `json:"wall_time" db:"wall_time"`
OutputSize uint64 `json:"output_size" db:"output_size"`
SystemTime uint64 `json:"system_time" db:"system_time"`
UserTime uint64 `json:"user_time" db:"user_time"`
WallTime uint64 `json:"wall_time" db:"wall_time"`
}
// TeamID returns the team id if the stat is for a team query stat result

View File

@ -242,7 +242,7 @@ func testRecordScheduledQueryStatsAsync(t *testing.T, ds *mock.Store, pool fleet
var sqStat fleet.ScheduledQueryStats
err = json.Unmarshal([]byte(res["p1\x00sq1"]), &sqStat)
require.NoError(t, err)
require.Equal(t, 1, sqStat.Executions, res["p1\x00sq1"])
require.Equal(t, uint64(1), sqStat.Executions, res["p1\x00sq1"])
count, err := redigo.Int(conn.Do("ZCARD", scheduledQueryStatsHostIDsKey))
require.NoError(t, err)

View File

@ -1177,16 +1177,16 @@ func directIngestScheduledQueryStats(ctx context.Context, logger log.Logger, hos
stats := fleet.ScheduledQueryStats{
ScheduledQueryName: scheduledName,
PackName: packName,
AverageMemory: cast.ToInt(row["average_memory"]),
AverageMemory: cast.ToUint64(row["average_memory"]),
Denylisted: cast.ToBool(row["denylisted"]),
Executions: cast.ToInt(row["executions"]),
Executions: cast.ToUint64(row["executions"]),
Interval: cast.ToInt(row["interval"]),
// Cast to int first to allow cast.ToTime to interpret the unix timestamp.
LastExecuted: time.Unix(cast.ToInt64(row["last_executed"]), 0).UTC(),
OutputSize: cast.ToInt(row["output_size"]),
SystemTime: cast.ToInt(row["system_time"]),
UserTime: cast.ToInt(row["user_time"]),
WallTime: cast.ToInt(row["wall_time"]),
OutputSize: cast.ToUint64(row["output_size"]),
SystemTime: cast.ToUint64(row["system_time"]),
UserTime: cast.ToUint64(row["user_time"]),
WallTime: cast.ToUint64(row["wall_time"]),
}
packs[packName] = append(packs[packName], stats)
}