2016-09-26 18:48:55 +00:00
|
|
|
package service
|
2016-09-06 21:28:07 +00:00
|
|
|
|
|
|
|
import (
|
2016-10-01 02:18:27 +00:00
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
2016-09-21 03:08:11 +00:00
|
|
|
"fmt"
|
2016-10-01 02:18:27 +00:00
|
|
|
"strings"
|
2016-11-14 18:22:54 +00:00
|
|
|
"sync"
|
2016-09-06 21:28:07 +00:00
|
|
|
"testing"
|
2016-09-21 03:08:11 +00:00
|
|
|
"time"
|
2016-09-06 21:28:07 +00:00
|
|
|
|
2016-11-03 01:17:23 +00:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
2016-09-21 03:08:11 +00:00
|
|
|
"github.com/WatchBeam/clock"
|
2016-11-25 18:08:22 +00:00
|
|
|
"github.com/kolide/kolide-ose/server/config"
|
2016-09-29 04:21:39 +00:00
|
|
|
hostctx "github.com/kolide/kolide-ose/server/contexts/host"
|
2016-11-22 21:56:05 +00:00
|
|
|
"github.com/kolide/kolide-ose/server/contexts/viewer"
|
2016-11-16 13:47:49 +00:00
|
|
|
"github.com/kolide/kolide-ose/server/datastore/inmem"
|
2016-09-26 18:48:55 +00:00
|
|
|
"github.com/kolide/kolide-ose/server/kolide"
|
2016-11-14 18:22:54 +00:00
|
|
|
"github.com/kolide/kolide-ose/server/pubsub"
|
2016-12-13 22:22:05 +00:00
|
|
|
"github.com/kolide/kolide-ose/server/test"
|
2016-09-06 21:28:07 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2016-09-21 03:08:11 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2016-09-06 21:28:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestEnrollAgent(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestService(ds, nil)
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err := ds.ListHosts(kolide.ListOptions{})
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, hosts, 0)
|
|
|
|
|
2016-09-21 03:08:11 +00:00
|
|
|
nodeKey, err := svc.EnrollAgent(ctx, "", "host123")
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.NotEmpty(t, nodeKey)
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err = ds.ListHosts(kolide.ListOptions{})
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, hosts, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEnrollAgentIncorrectEnrollSecret(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestService(ds, nil)
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err := ds.ListHosts(kolide.ListOptions{})
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, hosts, 0)
|
|
|
|
|
|
|
|
nodeKey, err := svc.EnrollAgent(ctx, "not_correct", "host123")
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
assert.Empty(t, nodeKey)
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err = ds.ListHosts(kolide.ListOptions{})
|
2016-09-06 21:28:07 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, hosts, 0)
|
|
|
|
}
|
2016-09-21 03:08:11 +00:00
|
|
|
|
2016-10-01 02:18:27 +00:00
|
|
|
func TestSubmitStatusLogs(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-10-01 02:18:27 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
mockClock := clock.NewMockClock()
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestServiceWithClock(ds, nil, mockClock)
|
2016-10-01 02:18:27 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
_, err = svc.EnrollAgent(ctx, "", "host123")
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err := ds.ListHosts(kolide.ListOptions{})
|
2016-10-01 02:18:27 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
require.Len(t, hosts, 1)
|
|
|
|
host := hosts[0]
|
|
|
|
|
|
|
|
// Hack to get at the service internals and modify the writer
|
|
|
|
serv := ((svc.(validationMiddleware)).Service).(service)
|
|
|
|
|
|
|
|
// Error due to missing host
|
|
|
|
err = serv.SubmitResultLogs(ctx, []kolide.OsqueryResultLog{})
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
assert.Contains(t, err.Error(), "missing host")
|
|
|
|
|
|
|
|
// Add that host
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
|
|
|
|
|
|
|
var statusBuf bytes.Buffer
|
|
|
|
serv.osqueryStatusLogWriter = &statusBuf
|
|
|
|
|
|
|
|
logs := []string{
|
|
|
|
`{"severity":"0","filename":"tls.cpp","line":"216","message":"some message","version":"1.8.2","decorations":{"host_uuid":"uuid_foobar","username":"zwass"}}`,
|
|
|
|
`{"severity":"1","filename":"buffered.cpp","line":"122","message":"warning!","version":"1.8.2","decorations":{"host_uuid":"uuid_foobar","username":"zwass"}}`,
|
|
|
|
}
|
|
|
|
logJSON := fmt.Sprintf("[%s]", strings.Join(logs, ","))
|
|
|
|
|
|
|
|
var status []kolide.OsqueryStatusLog
|
|
|
|
err = json.Unmarshal([]byte(logJSON), &status)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
err = serv.SubmitStatusLogs(ctx, status)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
statusJSON := statusBuf.String()
|
|
|
|
statusJSON = strings.TrimRight(statusJSON, "\n")
|
|
|
|
statusLines := strings.Split(statusJSON, "\n")
|
|
|
|
|
|
|
|
if assert.Equal(t, len(logs), len(statusLines)) {
|
|
|
|
for i, line := range statusLines {
|
|
|
|
assert.JSONEq(t, logs[i], line)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the update time is set appropriately
|
|
|
|
checkHost, err := ds.Host(host.ID)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Equal(t, mockClock.Now(), checkHost.UpdatedAt)
|
|
|
|
|
|
|
|
// Advance clock time and check that time is updated on new logs
|
|
|
|
mockClock.AddTime(1 * time.Minute)
|
|
|
|
|
|
|
|
err = serv.SubmitStatusLogs(ctx, []kolide.OsqueryStatusLog{})
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
checkHost, err = ds.Host(host.ID)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Equal(t, mockClock.Now(), checkHost.UpdatedAt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSubmitResultLogs(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-10-01 02:18:27 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
mockClock := clock.NewMockClock()
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestServiceWithClock(ds, nil, mockClock)
|
2016-10-01 02:18:27 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
_, err = svc.EnrollAgent(ctx, "", "host123")
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err := ds.ListHosts(kolide.ListOptions{})
|
2016-10-01 02:18:27 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
require.Len(t, hosts, 1)
|
|
|
|
host := hosts[0]
|
|
|
|
|
|
|
|
// Hack to get at the service internals and modify the writer
|
|
|
|
serv := ((svc.(validationMiddleware)).Service).(service)
|
|
|
|
|
|
|
|
// Error due to missing host
|
|
|
|
err = serv.SubmitResultLogs(ctx, []kolide.OsqueryResultLog{})
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
assert.Contains(t, err.Error(), "missing host")
|
|
|
|
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
|
|
|
|
|
|
|
var resultBuf bytes.Buffer
|
|
|
|
serv.osqueryResultLogWriter = &resultBuf
|
|
|
|
|
|
|
|
logs := []string{
|
|
|
|
`{"name":"system_info","hostIdentifier":"some_uuid","calendarTime":"Fri Sep 30 17:55:15 2016 UTC","unixTime":"1475258115","decorations":{"host_uuid":"some_uuid","username":"zwass"},"columns":{"cpu_brand":"Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz","hostname":"hostimus","physical_memory":"17179869184"},"action":"added"}`,
|
|
|
|
`{"name":"encrypted","hostIdentifier":"some_uuid","calendarTime":"Fri Sep 30 21:19:15 2016 UTC","unixTime":"1475270355","decorations":{"host_uuid":"4740D59F-699E-5B29-960B-979AAF9BBEEB","username":"zwass"},"columns":{"encrypted":"1","name":"\/dev\/disk1","type":"AES-XTS","uid":"","user_uuid":"","uuid":"some_uuid"},"action":"added"}`,
|
2017-01-10 20:54:35 +00:00
|
|
|
`{"snapshot":[{"hour":"20","minutes":"8"}],"action":"snapshot","name":"time","hostIdentifier":"1379f59d98f4","calendarTime":"Tue Jan 10 20:08:51 2017 UTC","unixTime":"1484078931","decorations":{"host_uuid":"EB714C9D-C1F8-A436-B6DA-3F853C5502EA"}}`,
|
2016-10-01 02:18:27 +00:00
|
|
|
}
|
|
|
|
logJSON := fmt.Sprintf("[%s]", strings.Join(logs, ","))
|
|
|
|
|
|
|
|
var results []kolide.OsqueryResultLog
|
|
|
|
err = json.Unmarshal([]byte(logJSON), &results)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
err = serv.SubmitResultLogs(ctx, results)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
resultJSON := resultBuf.String()
|
|
|
|
resultJSON = strings.TrimRight(resultJSON, "\n")
|
|
|
|
resultLines := strings.Split(resultJSON, "\n")
|
|
|
|
|
|
|
|
if assert.Equal(t, len(logs), len(resultLines)) {
|
|
|
|
for i, line := range resultLines {
|
|
|
|
assert.JSONEq(t, logs[i], line)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the update time is set appropriately
|
|
|
|
checkHost, err := ds.Host(host.ID)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Equal(t, mockClock.Now(), checkHost.UpdatedAt)
|
|
|
|
|
|
|
|
// Advance clock time and check that time is updated on new logs
|
|
|
|
mockClock.AddTime(1 * time.Minute)
|
|
|
|
|
|
|
|
err = serv.SubmitResultLogs(ctx, []kolide.OsqueryResultLog{})
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
checkHost, err = ds.Host(host.ID)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Equal(t, mockClock.Now(), checkHost.UpdatedAt)
|
|
|
|
}
|
|
|
|
|
2016-09-21 03:08:11 +00:00
|
|
|
func TestHostDetailQueries(t *testing.T) {
|
2016-10-05 00:17:55 +00:00
|
|
|
mockClock := clock.NewMockClock()
|
2016-09-21 03:08:11 +00:00
|
|
|
host := kolide.Host{
|
2016-11-16 13:47:49 +00:00
|
|
|
ID: 1,
|
|
|
|
UpdateCreateTimestamps: kolide.UpdateCreateTimestamps{
|
|
|
|
UpdateTimestamp: kolide.UpdateTimestamp{
|
|
|
|
UpdatedAt: mockClock.Now(),
|
|
|
|
},
|
|
|
|
CreateTimestamp: kolide.CreateTimestamp{
|
|
|
|
CreatedAt: mockClock.Now(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
DetailUpdateTime: mockClock.Now(),
|
|
|
|
NodeKey: "test_key",
|
|
|
|
HostName: "test_hostname",
|
|
|
|
UUID: "test_uuid",
|
2016-09-21 03:08:11 +00:00
|
|
|
}
|
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
svc := service{clock: mockClock}
|
2016-09-21 03:08:11 +00:00
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
queries := svc.hostDetailQueries(host)
|
|
|
|
assert.Empty(t, queries)
|
2016-09-21 03:08:11 +00:00
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
// Advance the time
|
|
|
|
mockClock.AddTime(1*time.Hour + 1*time.Minute)
|
|
|
|
|
|
|
|
queries = svc.hostDetailQueries(host)
|
|
|
|
assert.Len(t, queries, len(detailQueries))
|
|
|
|
for name, _ := range queries {
|
|
|
|
assert.True(t,
|
|
|
|
strings.HasPrefix(name, hostDetailQueryPrefix),
|
|
|
|
fmt.Sprintf("%s not prefixed with %s", name, hostDetailQueryPrefix),
|
|
|
|
)
|
|
|
|
}
|
2016-09-21 03:08:11 +00:00
|
|
|
}
|
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
func TestLabelQueries(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-09-21 03:08:11 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
mockClock := clock.NewMockClock()
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestServiceWithClock(ds, nil, mockClock)
|
2016-09-21 03:08:11 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
_, err = svc.EnrollAgent(ctx, "", "host123")
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err := ds.ListHosts(kolide.ListOptions{})
|
2016-09-21 03:08:11 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
require.Len(t, hosts, 1)
|
|
|
|
host := hosts[0]
|
|
|
|
|
2016-09-29 04:21:39 +00:00
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
2016-09-21 03:08:11 +00:00
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
// With a new host, we should get the detail queries
|
2016-09-21 03:08:11 +00:00
|
|
|
queries, err := svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
2016-10-05 00:17:55 +00:00
|
|
|
assert.Len(t, queries, len(detailQueries))
|
2016-09-21 03:08:11 +00:00
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
// Simulate the detail queries being added
|
|
|
|
host.DetailUpdateTime = mockClock.Now().Add(-1 * time.Minute)
|
2016-09-21 03:08:11 +00:00
|
|
|
host.Platform = "darwin"
|
2016-09-29 04:21:39 +00:00
|
|
|
ds.SaveHost(host)
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
2016-09-21 03:08:11 +00:00
|
|
|
|
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, 0)
|
|
|
|
|
2016-10-05 15:56:29 +00:00
|
|
|
labels := []kolide.Label{
|
|
|
|
kolide.Label{
|
2016-11-03 01:17:23 +00:00
|
|
|
Name: "label1",
|
|
|
|
Query: "query1",
|
|
|
|
Platform: "darwin",
|
2016-09-21 03:08:11 +00:00
|
|
|
},
|
2016-10-05 15:56:29 +00:00
|
|
|
kolide.Label{
|
2016-11-03 01:17:23 +00:00
|
|
|
Name: "label2",
|
|
|
|
Query: "query2",
|
|
|
|
Platform: "darwin",
|
2016-09-21 03:08:11 +00:00
|
|
|
},
|
2016-10-05 15:56:29 +00:00
|
|
|
kolide.Label{
|
2016-11-03 01:17:23 +00:00
|
|
|
Name: "label3",
|
|
|
|
Query: "query3",
|
|
|
|
Platform: "darwin,linux",
|
2016-09-21 03:08:11 +00:00
|
|
|
},
|
2016-10-05 15:56:29 +00:00
|
|
|
kolide.Label{
|
2016-11-03 01:17:23 +00:00
|
|
|
Name: "label4",
|
|
|
|
Query: "query4",
|
|
|
|
Platform: "linux",
|
2016-09-21 03:08:11 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, label := range labels {
|
2016-10-05 15:56:29 +00:00
|
|
|
_, err := ds.NewLabel(&label)
|
2016-09-30 01:19:51 +00:00
|
|
|
assert.Nil(t, err)
|
2016-09-21 03:08:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we should get the label queries
|
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, 3)
|
|
|
|
|
|
|
|
// Record a query execution
|
2016-10-05 15:56:29 +00:00
|
|
|
err = svc.SubmitDistributedQueryResults(
|
|
|
|
ctx,
|
|
|
|
map[string][]map[string]string{
|
|
|
|
hostLabelQueryPrefix + "1": {{"col1": "val1"}},
|
|
|
|
},
|
Push distributed query errors over results websocket (#878)
As of recently, osquery will report when a distributed query fails. We now
expose errors over the results websocket. When a query errored on the host, the
`error` key in the result will be non-null. Note that osquery currently doesn't
provide any details so the error string will always be "failed". I anticipate
that we will fix this and the string is included for future-proofing.
Successful result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 15,
"host": {
... omitted ...
},
"rows": [
{
"hour": "1"
}
],
"error": null
}
}
```
Failed result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 14,
"host": {
... omitted ...
},
"rows": [
],
"error": "failed"
}
}
```
2017-01-11 03:34:32 +00:00
|
|
|
map[string]string{},
|
2016-10-05 15:56:29 +00:00
|
|
|
)
|
2016-09-30 01:19:51 +00:00
|
|
|
assert.Nil(t, err)
|
2016-09-21 03:08:11 +00:00
|
|
|
|
2016-10-05 15:56:29 +00:00
|
|
|
// Verify that labels are set appropriately
|
2016-10-14 15:59:27 +00:00
|
|
|
hostLabels, err := ds.ListLabelsForHost(host.ID)
|
2016-10-05 15:56:29 +00:00
|
|
|
assert.Len(t, hostLabels, 1)
|
|
|
|
assert.Equal(t, "label1", hostLabels[0].Name)
|
|
|
|
|
2016-09-21 03:08:11 +00:00
|
|
|
// Now that query should not be returned
|
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, 2)
|
|
|
|
assert.NotContains(t, queries, "kolide_label_query_1")
|
|
|
|
|
|
|
|
// Advance the time
|
|
|
|
mockClock.AddTime(1*time.Hour + 1*time.Minute)
|
|
|
|
|
2016-10-05 00:17:55 +00:00
|
|
|
// Keep the host details fresh
|
|
|
|
host.DetailUpdateTime = mockClock.Now().Add(-1 * time.Minute)
|
|
|
|
ds.SaveHost(host)
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
|
|
|
|
2016-09-21 03:08:11 +00:00
|
|
|
// Now we should get all the label queries again
|
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, 3)
|
|
|
|
|
2016-10-05 15:56:29 +00:00
|
|
|
// Record a query execution
|
|
|
|
err = svc.SubmitDistributedQueryResults(
|
|
|
|
ctx,
|
|
|
|
map[string][]map[string]string{
|
|
|
|
hostLabelQueryPrefix + "2": {{"col1": "val1"}},
|
|
|
|
hostLabelQueryPrefix + "3": {},
|
|
|
|
},
|
Push distributed query errors over results websocket (#878)
As of recently, osquery will report when a distributed query fails. We now
expose errors over the results websocket. When a query errored on the host, the
`error` key in the result will be non-null. Note that osquery currently doesn't
provide any details so the error string will always be "failed". I anticipate
that we will fix this and the string is included for future-proofing.
Successful result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 15,
"host": {
... omitted ...
},
"rows": [
{
"hour": "1"
}
],
"error": null
}
}
```
Failed result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 14,
"host": {
... omitted ...
},
"rows": [
],
"error": "failed"
}
}
```
2017-01-11 03:34:32 +00:00
|
|
|
map[string]string{},
|
2016-10-05 15:56:29 +00:00
|
|
|
)
|
2016-09-30 01:19:51 +00:00
|
|
|
assert.Nil(t, err)
|
2016-09-21 03:08:11 +00:00
|
|
|
|
|
|
|
// Now these should no longer show up in the necessary to run queries
|
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
2016-09-30 01:19:51 +00:00
|
|
|
assert.Nil(t, err)
|
2016-11-03 01:17:23 +00:00
|
|
|
assert.Len(t, queries, 1)
|
2016-10-05 15:56:29 +00:00
|
|
|
|
|
|
|
// Verify that labels are set appropriately
|
2016-10-14 15:59:27 +00:00
|
|
|
hostLabels, err = ds.ListLabelsForHost(host.ID)
|
2016-10-05 15:56:29 +00:00
|
|
|
assert.Len(t, hostLabels, 2)
|
|
|
|
expectLabelNames := map[string]bool{"label1": true, "label2": true}
|
|
|
|
for _, label := range hostLabels {
|
|
|
|
assert.Contains(t, expectLabelNames, label.Name)
|
|
|
|
delete(expectLabelNames, label.Name)
|
|
|
|
}
|
2016-09-21 03:08:11 +00:00
|
|
|
}
|
2016-10-03 03:14:35 +00:00
|
|
|
|
|
|
|
func TestGetClientConfig(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2017-01-05 17:27:56 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
require.Nil(t, ds.MigrateData())
|
2016-10-03 03:14:35 +00:00
|
|
|
|
|
|
|
mockClock := clock.NewMockClock()
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestServiceWithClock(ds, nil, mockClock)
|
2016-10-03 03:14:35 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err := ds.ListHosts(kolide.ListOptions{})
|
2016-10-03 03:14:35 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
require.Len(t, hosts, 0)
|
|
|
|
|
|
|
|
_, err = svc.EnrollAgent(ctx, "", "user.local")
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-10-14 15:59:27 +00:00
|
|
|
hosts, err = ds.ListHosts(kolide.ListOptions{})
|
2016-10-03 03:14:35 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
require.Len(t, hosts, 1)
|
|
|
|
host := hosts[0]
|
|
|
|
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
|
|
|
|
|
|
|
// with no queries, packs, labels, etc. verify the state of a fresh host
|
|
|
|
// asking for a config
|
|
|
|
config, err := svc.GetClientConfig(ctx)
|
|
|
|
require.Nil(t, err)
|
|
|
|
assert.NotNil(t, config)
|
2016-12-31 17:56:54 +00:00
|
|
|
val, ok := config.Options["disable_distributed"]
|
|
|
|
require.True(t, ok)
|
|
|
|
disabled, ok := val.(bool)
|
|
|
|
require.True(t, ok)
|
|
|
|
assert.False(t, disabled)
|
|
|
|
val, ok = config.Options["pack_delimiter"]
|
|
|
|
require.True(t, ok)
|
|
|
|
delim, ok := val.(string)
|
|
|
|
require.True(t, ok)
|
|
|
|
assert.Equal(t, "/", delim)
|
2016-10-03 03:14:35 +00:00
|
|
|
|
|
|
|
// this will be greater than 0 if we ever start inserting an administration
|
|
|
|
// pack
|
|
|
|
assert.Len(t, config.Packs, 0)
|
|
|
|
|
|
|
|
// let's populate the database with some info
|
|
|
|
|
|
|
|
infoQuery := &kolide.Query{
|
2016-12-13 22:22:05 +00:00
|
|
|
Name: "Info",
|
|
|
|
Query: "select * from osquery_info;",
|
2016-10-03 03:14:35 +00:00
|
|
|
}
|
2016-12-13 22:22:05 +00:00
|
|
|
infoQueryInterval := uint(60)
|
2016-10-03 03:14:35 +00:00
|
|
|
infoQuery, err = ds.NewQuery(infoQuery)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
monitoringPack := &kolide.Pack{
|
|
|
|
Name: "monitoring",
|
|
|
|
}
|
2016-11-16 13:47:49 +00:00
|
|
|
_, err = ds.NewPack(monitoringPack)
|
2016-10-03 03:14:35 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-12-13 22:22:05 +00:00
|
|
|
test.NewScheduledQuery(t, ds, monitoringPack.ID, infoQuery.ID, infoQueryInterval, false, false)
|
2016-10-03 03:14:35 +00:00
|
|
|
|
|
|
|
mysqlLabel := &kolide.Label{
|
2016-11-03 01:17:23 +00:00
|
|
|
Name: "MySQL Monitoring",
|
|
|
|
Query: "select pid from processes where name = 'mysqld';",
|
2016-10-03 03:14:35 +00:00
|
|
|
}
|
|
|
|
mysqlLabel, err = ds.NewLabel(mysqlLabel)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
err = ds.AddLabelToPack(mysqlLabel.ID, monitoringPack.ID)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2016-11-03 01:17:23 +00:00
|
|
|
err = ds.RecordLabelQueryExecutions(
|
|
|
|
host,
|
2017-01-17 06:03:51 +00:00
|
|
|
map[uint]bool{mysqlLabel.ID: true},
|
2016-11-03 01:17:23 +00:00
|
|
|
mockClock.Now(),
|
|
|
|
)
|
2016-10-03 03:14:35 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
// with a minimal setup of packs, labels, and queries, will our host get the
|
|
|
|
// pack
|
|
|
|
config, err = svc.GetClientConfig(ctx)
|
|
|
|
require.Nil(t, err)
|
|
|
|
assert.Len(t, config.Packs, 1)
|
|
|
|
assert.Len(t, config.Packs["monitoring"].Queries, 1)
|
|
|
|
}
|
2016-10-05 00:17:55 +00:00
|
|
|
|
|
|
|
func TestDetailQueries(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-10-05 00:17:55 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
mockClock := clock.NewMockClock()
|
|
|
|
|
2016-11-14 18:22:54 +00:00
|
|
|
svc, err := newTestServiceWithClock(ds, nil, mockClock)
|
2016-10-05 00:17:55 +00:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
nodeKey, err := svc.EnrollAgent(ctx, "", "host123")
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
host, err := ds.AuthenticateHost(nodeKey)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
|
|
|
|
|
|
|
// With a new host, we should get the detail queries
|
|
|
|
queries, err := svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, len(detailQueries))
|
|
|
|
|
|
|
|
resultJSON := `
|
|
|
|
{
|
|
|
|
"kolide_detail_query_network_interface": [
|
|
|
|
{
|
|
|
|
"address": "192.168.0.1",
|
|
|
|
"broadcast": "192.168.0.255",
|
|
|
|
"ibytes": "1601207629",
|
2016-12-01 17:00:00 +00:00
|
|
|
"ierrors": "314179",
|
2016-10-05 00:17:55 +00:00
|
|
|
"interface": "en0",
|
|
|
|
"ipackets": "25698094",
|
|
|
|
"last_change": "1474233476",
|
|
|
|
"mac": "5f:3d:4b:10:25:82",
|
|
|
|
"mask": "255.255.255.0",
|
2016-12-01 17:00:00 +00:00
|
|
|
"metric": "1",
|
2016-10-05 00:17:55 +00:00
|
|
|
"mtu": "1453",
|
|
|
|
"obytes": "2607283152",
|
2016-12-01 17:00:00 +00:00
|
|
|
"oerrors": "101010",
|
2016-10-05 00:17:55 +00:00
|
|
|
"opackets": "12264603",
|
|
|
|
"point_to_point": "",
|
|
|
|
"type": "6"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"kolide_detail_query_os_version": [
|
|
|
|
{
|
|
|
|
"build": "15G1004",
|
|
|
|
"major": "10",
|
|
|
|
"minor": "10",
|
|
|
|
"name": "Mac OS X",
|
|
|
|
"patch": "6"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"kolide_detail_query_osquery_info": [
|
|
|
|
{
|
|
|
|
"build_distro": "10.10",
|
|
|
|
"build_platform": "darwin",
|
|
|
|
"config_hash": "3c6e4537c4d0eb71a7c6dda19d",
|
|
|
|
"config_valid": "1",
|
|
|
|
"extensions": "active",
|
|
|
|
"pid": "38113",
|
|
|
|
"start_time": "1475603155",
|
|
|
|
"version": "1.8.2",
|
|
|
|
"watcher": "38112"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"kolide_detail_query_system_info": [
|
|
|
|
{
|
|
|
|
"computer_name": "computer",
|
|
|
|
"cpu_brand": "Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz",
|
|
|
|
"cpu_logical_cores": "8",
|
|
|
|
"cpu_physical_cores": "4",
|
|
|
|
"cpu_subtype": "Intel x86-64h Haswell",
|
|
|
|
"cpu_type": "x86_64h",
|
|
|
|
"hardware_model": "MacBookPro11,4",
|
|
|
|
"hardware_serial": "ABCDEFGH",
|
|
|
|
"hardware_vendor": "Apple Inc.",
|
|
|
|
"hardware_version": "1.0",
|
|
|
|
"hostname": "computer.local",
|
|
|
|
"physical_memory": "17179869184",
|
|
|
|
"uuid": "uuid"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"kolide_detail_query_uptime": [
|
|
|
|
{
|
|
|
|
"days": "20",
|
|
|
|
"hours": "0",
|
|
|
|
"minutes": "48",
|
|
|
|
"seconds": "13",
|
|
|
|
"total_seconds": "1730893"
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
`
|
|
|
|
|
|
|
|
var results kolide.OsqueryDistributedQueryResults
|
|
|
|
err = json.Unmarshal([]byte(resultJSON), &results)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
// Verify that results are ingested properly
|
Push distributed query errors over results websocket (#878)
As of recently, osquery will report when a distributed query fails. We now
expose errors over the results websocket. When a query errored on the host, the
`error` key in the result will be non-null. Note that osquery currently doesn't
provide any details so the error string will always be "failed". I anticipate
that we will fix this and the string is included for future-proofing.
Successful result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 15,
"host": {
... omitted ...
},
"rows": [
{
"hour": "1"
}
],
"error": null
}
}
```
Failed result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 14,
"host": {
... omitted ...
},
"rows": [
],
"error": "failed"
}
}
```
2017-01-11 03:34:32 +00:00
|
|
|
svc.SubmitDistributedQueryResults(ctx, results, map[string]string{})
|
2016-10-05 00:17:55 +00:00
|
|
|
|
|
|
|
// Make sure the result saved to the datastore
|
|
|
|
host, err = ds.AuthenticateHost(nodeKey)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
// osquery_info
|
|
|
|
assert.Equal(t, "darwin", host.Platform)
|
|
|
|
assert.Equal(t, "1.8.2", host.OsqueryVersion)
|
|
|
|
|
|
|
|
// system_info
|
|
|
|
assert.Equal(t, 17179869184, host.PhysicalMemory)
|
|
|
|
assert.Equal(t, "computer.local", host.HostName)
|
|
|
|
assert.Equal(t, "uuid", host.UUID)
|
|
|
|
|
|
|
|
// os_version
|
|
|
|
assert.Equal(t, "Mac OS X 10.10.6", host.OSVersion)
|
|
|
|
|
|
|
|
// uptime
|
|
|
|
assert.Equal(t, 1730893*time.Second, host.Uptime)
|
|
|
|
|
2017-01-11 18:48:24 +00:00
|
|
|
mockClock.AddTime(1 * time.Minute)
|
2016-10-05 00:17:55 +00:00
|
|
|
|
|
|
|
// Now no detail queries should be required
|
2017-01-11 18:48:24 +00:00
|
|
|
host, err = ds.AuthenticateHost(nodeKey)
|
|
|
|
require.Nil(t, err)
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
2016-10-05 00:17:55 +00:00
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, 0)
|
|
|
|
|
|
|
|
// Advance clock and queries should exist again
|
|
|
|
mockClock.AddTime(1*time.Hour + 1*time.Minute)
|
|
|
|
|
2017-01-11 18:48:24 +00:00
|
|
|
err = svc.SubmitDistributedQueryResults(ctx, kolide.OsqueryDistributedQueryResults{}, map[string]string{})
|
|
|
|
require.Nil(t, err)
|
|
|
|
host, err = ds.AuthenticateHost(nodeKey)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
2016-10-05 00:17:55 +00:00
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Len(t, queries, len(detailQueries))
|
|
|
|
}
|
2016-11-14 18:22:54 +00:00
|
|
|
|
|
|
|
func TestDistributedQueries(t *testing.T) {
|
2016-11-25 18:08:22 +00:00
|
|
|
ds, err := inmem.New(config.TestConfig())
|
2016-11-14 18:22:54 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
mockClock := clock.NewMockClock()
|
|
|
|
|
|
|
|
rs := pubsub.NewInmemQueryResults()
|
|
|
|
|
|
|
|
svc, err := newTestServiceWithClock(ds, rs, mockClock)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
nodeKey, err := svc.EnrollAgent(ctx, "", "host123")
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
host, err := ds.AuthenticateHost(nodeKey)
|
|
|
|
require.Nil(t, err)
|
2017-01-04 21:16:17 +00:00
|
|
|
err = ds.MarkHostSeen(host, mockClock.Now())
|
|
|
|
require.Nil(t, err)
|
2016-11-14 18:22:54 +00:00
|
|
|
|
|
|
|
ctx = hostctx.NewContext(ctx, *host)
|
|
|
|
|
|
|
|
// Create label
|
|
|
|
n := "foo"
|
|
|
|
q := "select * from foo;"
|
|
|
|
label, err := svc.NewLabel(ctx, kolide.LabelPayload{
|
|
|
|
Name: &n,
|
|
|
|
Query: &q,
|
|
|
|
})
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
// Record match with label
|
2016-11-22 21:56:05 +00:00
|
|
|
ctx = viewer.NewContext(ctx, viewer.Viewer{
|
|
|
|
User: &kolide.User{
|
|
|
|
ID: 0,
|
|
|
|
},
|
|
|
|
})
|
2017-01-17 06:03:51 +00:00
|
|
|
err = ds.RecordLabelQueryExecutions(host, map[uint]bool{label.ID: true}, mockClock.Now())
|
2016-11-14 18:22:54 +00:00
|
|
|
require.Nil(t, err)
|
2017-01-04 21:16:17 +00:00
|
|
|
err = ds.MarkHostSeen(host, mockClock.Now())
|
|
|
|
require.Nil(t, err)
|
2016-11-14 18:22:54 +00:00
|
|
|
|
|
|
|
q = "select year, month, day, hour, minutes, seconds from time"
|
2016-11-22 21:56:05 +00:00
|
|
|
campaign, err := svc.NewDistributedQueryCampaign(ctx, q, []uint{}, []uint{label.ID})
|
2016-11-14 18:22:54 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
2016-12-01 18:31:16 +00:00
|
|
|
// Manually set the campaign to running (so that it shows up when
|
|
|
|
// requesting queries)
|
|
|
|
campaign.Status = kolide.QueryRunning
|
|
|
|
err = ds.SaveDistributedQueryCampaign(campaign)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
2016-11-16 21:07:50 +00:00
|
|
|
queryKey := fmt.Sprintf("%s%d", hostDistributedQueryPrefix, campaign.ID)
|
2016-11-14 18:22:54 +00:00
|
|
|
|
|
|
|
// Now we should get the active distributed query
|
|
|
|
queries, err := svc.GetDistributedQueries(ctx)
|
|
|
|
require.Nil(t, err)
|
|
|
|
assert.Len(t, queries, len(detailQueries)+1)
|
|
|
|
assert.Equal(t, q, queries[queryKey])
|
|
|
|
|
|
|
|
expectedRows := []map[string]string{
|
|
|
|
{
|
|
|
|
"year": "2016",
|
|
|
|
"month": "11",
|
|
|
|
"day": "11",
|
|
|
|
"hour": "6",
|
|
|
|
"minutes": "12",
|
|
|
|
"seconds": "10",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
results := map[string][]map[string]string{
|
|
|
|
queryKey: expectedRows,
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO use service method
|
2016-11-16 21:07:50 +00:00
|
|
|
readChan, err := rs.ReadChannel(ctx, *campaign)
|
2016-11-14 18:22:54 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
// We need to listen for the result in a separate thread to prevent the
|
|
|
|
// write to the result channel from failing
|
|
|
|
var waitSetup, waitComplete sync.WaitGroup
|
|
|
|
waitSetup.Add(1)
|
|
|
|
waitComplete.Add(1)
|
|
|
|
go func() {
|
|
|
|
waitSetup.Done()
|
|
|
|
select {
|
|
|
|
case val := <-readChan:
|
|
|
|
if res, ok := val.(kolide.DistributedQueryResult); ok {
|
2016-11-16 21:07:50 +00:00
|
|
|
assert.Equal(t, campaign.ID, res.DistributedQueryCampaignID)
|
2016-11-14 18:22:54 +00:00
|
|
|
assert.Equal(t, expectedRows, res.Rows)
|
|
|
|
assert.Equal(t, *host, res.Host)
|
|
|
|
} else {
|
|
|
|
t.Error("Wrong result type")
|
|
|
|
}
|
|
|
|
assert.NotNil(t, val)
|
|
|
|
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
t.Error("No result received")
|
|
|
|
}
|
|
|
|
waitComplete.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
waitSetup.Wait()
|
|
|
|
// Sleep a short time to ensure that the above goroutine is blocking on
|
|
|
|
// the channel read (the waitSetup.Wait() is not necessarily sufficient
|
|
|
|
// if there is a context switch immediately after waitSetup.Done() is
|
|
|
|
// called). This should be a small price to pay to prevent flakiness in
|
|
|
|
// this test.
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
|
Push distributed query errors over results websocket (#878)
As of recently, osquery will report when a distributed query fails. We now
expose errors over the results websocket. When a query errored on the host, the
`error` key in the result will be non-null. Note that osquery currently doesn't
provide any details so the error string will always be "failed". I anticipate
that we will fix this and the string is included for future-proofing.
Successful result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 15,
"host": {
... omitted ...
},
"rows": [
{
"hour": "1"
}
],
"error": null
}
}
```
Failed result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 14,
"host": {
... omitted ...
},
"rows": [
],
"error": "failed"
}
}
```
2017-01-11 03:34:32 +00:00
|
|
|
err = svc.SubmitDistributedQueryResults(ctx, results, map[string]string{})
|
2016-11-14 18:22:54 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
// Now the distributed query should be completed and not returned
|
|
|
|
queries, err = svc.GetDistributedQueries(ctx)
|
|
|
|
require.Nil(t, err)
|
|
|
|
assert.Len(t, queries, len(detailQueries))
|
|
|
|
assert.NotContains(t, queries, queryKey)
|
|
|
|
|
|
|
|
waitComplete.Wait()
|
|
|
|
}
|
2016-12-27 15:35:19 +00:00
|
|
|
|
|
|
|
func TestOrphanedQueryCampaign(t *testing.T) {
|
|
|
|
ds, err := inmem.New(config.TestConfig())
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
rs := pubsub.NewInmemQueryResults()
|
|
|
|
|
|
|
|
svc, err := newTestService(ds, rs)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
nodeKey, err := svc.EnrollAgent(ctx, "", "host123")
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
host, err := ds.AuthenticateHost(nodeKey)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ctx = viewer.NewContext(context.Background(), viewer.Viewer{
|
|
|
|
User: &kolide.User{
|
|
|
|
ID: 0,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
q := "select year, month, day, hour, minutes, seconds from time"
|
|
|
|
campaign, err := svc.NewDistributedQueryCampaign(ctx, q, []uint{}, []uint{})
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
campaign.Status = kolide.QueryRunning
|
|
|
|
err = ds.SaveDistributedQueryCampaign(campaign)
|
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
queryKey := fmt.Sprintf("%s%d", hostDistributedQueryPrefix, campaign.ID)
|
|
|
|
|
|
|
|
expectedRows := []map[string]string{
|
|
|
|
{
|
|
|
|
"year": "2016",
|
|
|
|
"month": "11",
|
|
|
|
"day": "11",
|
|
|
|
"hour": "6",
|
|
|
|
"minutes": "12",
|
|
|
|
"seconds": "10",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
results := map[string][]map[string]string{
|
|
|
|
queryKey: expectedRows,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Submit results
|
|
|
|
ctx = hostctx.NewContext(context.Background(), *host)
|
Push distributed query errors over results websocket (#878)
As of recently, osquery will report when a distributed query fails. We now
expose errors over the results websocket. When a query errored on the host, the
`error` key in the result will be non-null. Note that osquery currently doesn't
provide any details so the error string will always be "failed". I anticipate
that we will fix this and the string is included for future-proofing.
Successful result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 15,
"host": {
... omitted ...
},
"rows": [
{
"hour": "1"
}
],
"error": null
}
}
```
Failed result:
```
{
"type": "result",
"data": {
"distributed_query_execution_id": 14,
"host": {
... omitted ...
},
"rows": [
],
"error": "failed"
}
}
```
2017-01-11 03:34:32 +00:00
|
|
|
err = svc.SubmitDistributedQueryResults(ctx, results, map[string]string{})
|
2016-12-27 15:35:19 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
// The campaign should be set to completed because it is orphaned
|
|
|
|
campaign, err = ds.DistributedQueryCampaign(campaign.ID)
|
|
|
|
require.Nil(t, err)
|
|
|
|
assert.Equal(t, kolide.QueryComplete, campaign.Status)
|
|
|
|
}
|