2021-04-05 18:04:53 +00:00
|
|
|
// Package mysql is a MySQL implementation of the Datastore interface.
|
2016-11-16 13:47:49 +00:00
|
|
|
package mysql
|
|
|
|
|
|
|
|
import (
|
2017-02-17 00:14:00 +00:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
2017-05-30 19:42:00 +00:00
|
|
|
"database/sql"
|
2016-11-16 13:47:49 +00:00
|
|
|
"fmt"
|
2017-02-17 00:14:00 +00:00
|
|
|
"io/ioutil"
|
2018-06-11 14:31:39 +00:00
|
|
|
"net/url"
|
2020-04-22 20:59:40 +00:00
|
|
|
"regexp"
|
2021-05-25 04:34:08 +00:00
|
|
|
"strconv"
|
2021-01-04 15:58:43 +00:00
|
|
|
"strings"
|
2016-11-16 13:47:49 +00:00
|
|
|
"time"
|
|
|
|
|
2021-03-17 18:23:52 +00:00
|
|
|
"github.com/VividCortex/mysqlerr"
|
2016-11-16 13:47:49 +00:00
|
|
|
"github.com/WatchBeam/clock"
|
2020-03-11 01:14:02 +00:00
|
|
|
"github.com/cenkalti/backoff/v4"
|
2021-06-26 04:46:51 +00:00
|
|
|
"github.com/fleetdm/fleet/v4/server/config"
|
|
|
|
"github.com/fleetdm/fleet/v4/server/datastore/mysql/migrations/data"
|
|
|
|
"github.com/fleetdm/fleet/v4/server/datastore/mysql/migrations/tables"
|
|
|
|
"github.com/fleetdm/fleet/v4/server/fleet"
|
2020-12-10 19:05:15 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
2021-05-25 04:34:08 +00:00
|
|
|
"github.com/go-kit/kit/log/level"
|
2020-12-10 19:05:15 +00:00
|
|
|
"github.com/go-sql-driver/mysql"
|
|
|
|
"github.com/jmoiron/sqlx"
|
2017-02-17 00:14:00 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-11-16 13:47:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-12-16 17:16:55 +00:00
|
|
|
defaultSelectLimit = 1000000
|
|
|
|
mySQLTimestampFormat = "2006-01-02 15:04:05" // %Y/%m/%d %H:%M:%S
|
2016-11-16 13:47:49 +00:00
|
|
|
)
|
|
|
|
|
2020-04-22 20:59:40 +00:00
|
|
|
var (
|
2021-02-17 00:53:42 +00:00
|
|
|
// Matches all non-word and '-' characters for replacement
|
2020-04-22 20:59:40 +00:00
|
|
|
columnCharsRegexp = regexp.MustCompile(`[^\w-]`)
|
|
|
|
)
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
// Datastore is an implementation of fleet.Datastore interface backed by
|
2016-11-16 13:47:49 +00:00
|
|
|
// MySQL
|
|
|
|
type Datastore struct {
|
|
|
|
db *sqlx.DB
|
|
|
|
logger log.Logger
|
|
|
|
clock clock.Clock
|
2017-01-03 16:54:24 +00:00
|
|
|
config config.MysqlConfig
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
|
|
|
|
2020-03-11 01:14:02 +00:00
|
|
|
type txFn func(*sqlx.Tx) error
|
|
|
|
|
2021-03-17 18:23:52 +00:00
|
|
|
// retryableError determines whether a MySQL error can be retried. By default
|
|
|
|
// errors are considered non-retryable. Only errors that we know have a
|
|
|
|
// possibility of succeeding on a retry should return true in this function.
|
|
|
|
func retryableError(err error) bool {
|
|
|
|
base := errors.Cause(err)
|
|
|
|
if b, ok := base.(*mysql.MySQLError); ok {
|
|
|
|
switch b.Number {
|
|
|
|
// Consider lock related errors to be retryable
|
|
|
|
case mysqlerr.ER_LOCK_DEADLOCK, mysqlerr.ER_LOCK_WAIT_TIMEOUT:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:14:02 +00:00
|
|
|
// withRetryTxx provides a common way to commit/rollback a txFn wrapped in a retry with exponential backoff
|
|
|
|
func (d *Datastore) withRetryTxx(fn txFn) (err error) {
|
|
|
|
operation := func() error {
|
|
|
|
tx, err := d.db.Beginx()
|
|
|
|
if err != nil {
|
2021-03-17 18:23:52 +00:00
|
|
|
return errors.Wrap(err, "create transaction")
|
2020-03-11 01:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if p := recover(); p != nil {
|
2020-12-10 19:05:15 +00:00
|
|
|
if err := tx.Rollback(); err != nil {
|
|
|
|
d.logger.Log("err", err, "msg", "error encountered during transaction panic rollback")
|
|
|
|
}
|
2020-03-11 01:14:02 +00:00
|
|
|
panic(p)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-03-17 18:23:52 +00:00
|
|
|
if err := fn(tx); err != nil {
|
2020-03-11 01:14:02 +00:00
|
|
|
rbErr := tx.Rollback()
|
|
|
|
if rbErr != nil && rbErr != sql.ErrTxDone {
|
2021-03-17 18:23:52 +00:00
|
|
|
// Consider rollback errors to be non-retryable
|
|
|
|
return backoff.Permanent(errors.Wrapf(err, "got err '%s' rolling back after err", rbErr.Error()))
|
2020-03-11 01:14:02 +00:00
|
|
|
}
|
2021-03-17 18:23:52 +00:00
|
|
|
|
|
|
|
if retryableError(err) {
|
|
|
|
return err
|
2020-12-10 19:05:15 +00:00
|
|
|
}
|
2021-03-17 18:23:52 +00:00
|
|
|
|
|
|
|
// Consider any other errors to be non-retryable
|
|
|
|
return backoff.Permanent(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tx.Commit(); err != nil {
|
|
|
|
err = errors.Wrap(err, "commit transaction")
|
|
|
|
|
|
|
|
if retryableError(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return backoff.Permanent(errors.Wrap(err, "commit transaction"))
|
2020-03-11 01:14:02 +00:00
|
|
|
}
|
|
|
|
|
2020-12-10 19:05:15 +00:00
|
|
|
return nil
|
2020-03-11 01:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bo := backoff.NewExponentialBackOff()
|
|
|
|
bo.MaxElapsedTime = 5 * time.Second
|
|
|
|
return backoff.Retry(operation, bo)
|
|
|
|
}
|
|
|
|
|
2021-07-09 16:12:21 +00:00
|
|
|
// withTx provides a common way to commit/rollback a txFn
|
|
|
|
func (d *Datastore) withTx(fn txFn) (err error) {
|
|
|
|
tx, err := d.db.Beginx()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "create transaction")
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if p := recover(); p != nil {
|
|
|
|
if err := tx.Rollback(); err != nil {
|
|
|
|
d.logger.Log("err", err, "msg", "error encountered during transaction panic rollback")
|
|
|
|
}
|
|
|
|
panic(p)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err := fn(tx); err != nil {
|
|
|
|
rbErr := tx.Rollback()
|
|
|
|
if rbErr != nil && rbErr != sql.ErrTxDone {
|
|
|
|
return errors.Wrapf(err, "got err '%s' rolling back after err", rbErr.Error())
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tx.Commit(); err != nil {
|
|
|
|
return errors.Wrap(err, "commit transaction")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-16 13:47:49 +00:00
|
|
|
// New creates an MySQL datastore.
|
2017-01-03 16:54:24 +00:00
|
|
|
func New(config config.MysqlConfig, c clock.Clock, opts ...DBOption) (*Datastore, error) {
|
2016-11-16 13:47:49 +00:00
|
|
|
options := &dbOptions{
|
|
|
|
maxAttempts: defaultMaxAttempts,
|
|
|
|
logger: log.NewNopLogger(),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, setOpt := range opts {
|
|
|
|
setOpt(options)
|
|
|
|
}
|
|
|
|
|
2021-01-04 15:58:43 +00:00
|
|
|
if config.PasswordPath != "" && config.Password != "" {
|
|
|
|
return nil, errors.New("A MySQL password and a MySQL password file were provided - please specify only one")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if the flag is populated
|
|
|
|
// Check if file exists on disk
|
|
|
|
// If file exists read contents
|
|
|
|
if config.PasswordPath != "" {
|
|
|
|
fileContents, err := ioutil.ReadFile(config.PasswordPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
config.Password = strings.TrimSpace(string(fileContents))
|
|
|
|
}
|
|
|
|
|
2021-04-28 15:31:19 +00:00
|
|
|
if config.TLSCA != "" {
|
|
|
|
config.TLSConfig = "custom"
|
2017-02-17 00:14:00 +00:00
|
|
|
err := registerTLS(config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "register TLS config for mysql")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dsn := generateMysqlConnectionString(config)
|
|
|
|
db, err := sqlx.Open("mysql", dsn)
|
2016-11-16 13:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-12-19 21:52:52 +00:00
|
|
|
db.SetMaxIdleConns(config.MaxIdleConns)
|
|
|
|
db.SetMaxOpenConns(config.MaxOpenConns)
|
2020-07-30 16:00:42 +00:00
|
|
|
db.SetConnMaxLifetime(time.Second * time.Duration(config.ConnMaxLifetime))
|
2017-12-19 21:52:52 +00:00
|
|
|
|
2016-11-16 13:47:49 +00:00
|
|
|
var dbError error
|
|
|
|
for attempt := 0; attempt < options.maxAttempts; attempt++ {
|
|
|
|
dbError = db.Ping()
|
|
|
|
if dbError == nil {
|
|
|
|
// we're connected!
|
|
|
|
break
|
|
|
|
}
|
2016-11-28 15:35:05 +00:00
|
|
|
interval := time.Duration(attempt) * time.Second
|
2016-11-16 13:47:49 +00:00
|
|
|
options.logger.Log("mysql", fmt.Sprintf(
|
2016-11-28 15:35:05 +00:00
|
|
|
"could not connect to db: %v, sleeping %v", dbError, interval))
|
|
|
|
time.Sleep(interval)
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if dbError != nil {
|
|
|
|
return nil, dbError
|
|
|
|
}
|
|
|
|
|
2017-01-03 16:54:24 +00:00
|
|
|
ds := &Datastore{
|
|
|
|
db: db,
|
|
|
|
logger: options.logger,
|
|
|
|
clock: c,
|
|
|
|
config: config,
|
|
|
|
}
|
2016-11-16 13:47:49 +00:00
|
|
|
|
|
|
|
return ds, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
func (d *Datastore) Begin() (fleet.Transaction, error) {
|
2017-05-30 19:42:00 +00:00
|
|
|
return d.db.Beginx()
|
|
|
|
}
|
|
|
|
|
2016-11-16 13:47:49 +00:00
|
|
|
func (d *Datastore) Name() string {
|
|
|
|
return "mysql"
|
|
|
|
}
|
|
|
|
|
2017-01-05 17:27:56 +00:00
|
|
|
func (d *Datastore) MigrateTables() error {
|
2017-12-04 14:43:43 +00:00
|
|
|
return tables.MigrationClient.Up(d.db.DB, "")
|
2017-01-05 17:27:56 +00:00
|
|
|
}
|
2016-11-16 13:47:49 +00:00
|
|
|
|
2017-01-05 17:27:56 +00:00
|
|
|
func (d *Datastore) MigrateData() error {
|
2017-12-04 14:43:43 +00:00
|
|
|
return data.MigrationClient.Up(d.db.DB, "")
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
func (d *Datastore) MigrationStatus() (fleet.MigrationStatus, error) {
|
2017-03-08 17:17:07 +00:00
|
|
|
if tables.MigrationClient.Migrations == nil || data.MigrationClient.Migrations == nil {
|
2017-03-09 18:40:52 +00:00
|
|
|
return 0, errors.New("unexpected nil migrations list")
|
2017-03-08 17:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
lastTablesMigration, err := tables.MigrationClient.Migrations.Last()
|
|
|
|
if err != nil {
|
2021-01-28 17:49:06 +00:00
|
|
|
return 0, errors.Wrap(err, "missing tables migrations")
|
2017-03-08 17:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
currentTablesVersion, err := tables.MigrationClient.GetDBVersion(d.db.DB)
|
|
|
|
if err != nil {
|
2021-01-28 17:49:06 +00:00
|
|
|
return 0, errors.Wrap(err, "cannot get table migration status")
|
2017-03-08 17:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
lastDataMigration, err := data.MigrationClient.Migrations.Last()
|
|
|
|
if err != nil {
|
2021-01-28 17:49:06 +00:00
|
|
|
return 0, errors.Wrap(err, "missing data migrations")
|
2017-03-08 17:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
currentDataVersion, err := data.MigrationClient.GetDBVersion(d.db.DB)
|
|
|
|
if err != nil {
|
2021-01-28 17:49:06 +00:00
|
|
|
return 0, errors.Wrap(err, "cannot get data migration status")
|
2017-03-08 17:17:07 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 18:40:52 +00:00
|
|
|
switch {
|
|
|
|
case currentDataVersion == 0 && currentTablesVersion == 0:
|
2021-06-06 22:07:29 +00:00
|
|
|
return fleet.NoMigrationsCompleted, nil
|
2017-03-08 17:17:07 +00:00
|
|
|
|
2017-03-09 18:40:52 +00:00
|
|
|
case currentTablesVersion != lastTablesMigration.Version ||
|
|
|
|
currentDataVersion != lastDataMigration.Version:
|
2021-06-06 22:07:29 +00:00
|
|
|
return fleet.SomeMigrationsCompleted, nil
|
2017-03-09 18:40:52 +00:00
|
|
|
|
|
|
|
default:
|
2021-06-06 22:07:29 +00:00
|
|
|
return fleet.AllMigrationsCompleted, nil
|
2017-03-09 18:40:52 +00:00
|
|
|
}
|
2017-03-08 17:17:07 +00:00
|
|
|
}
|
|
|
|
|
2016-11-16 13:47:49 +00:00
|
|
|
// Drop removes database
|
|
|
|
func (d *Datastore) Drop() error {
|
2017-01-03 16:54:24 +00:00
|
|
|
tables := []struct {
|
|
|
|
Name string `db:"TABLE_NAME"`
|
|
|
|
}{}
|
2016-11-16 13:47:49 +00:00
|
|
|
|
2017-01-03 16:54:24 +00:00
|
|
|
sql := `
|
|
|
|
SELECT TABLE_NAME
|
|
|
|
FROM INFORMATION_SCHEMA.TABLES
|
|
|
|
WHERE TABLE_SCHEMA = ?;
|
|
|
|
`
|
2016-11-16 13:47:49 +00:00
|
|
|
|
2017-01-03 16:54:24 +00:00
|
|
|
if err := d.db.Select(&tables, sql, d.config.Database); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-16 13:47:49 +00:00
|
|
|
|
2017-01-03 16:54:24 +00:00
|
|
|
tx, err := d.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 16:54:24 +00:00
|
|
|
_, err = tx.Exec("SET FOREIGN_KEY_CHECKS = 0")
|
|
|
|
if err != nil {
|
|
|
|
return tx.Rollback()
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, table := range tables {
|
|
|
|
_, err = tx.Exec(fmt.Sprintf("DROP TABLE %s;", table.Name))
|
|
|
|
if err != nil {
|
|
|
|
return tx.Rollback()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_, err = tx.Exec("SET FOREIGN_KEY_CHECKS = 1")
|
|
|
|
if err != nil {
|
|
|
|
return tx.Rollback()
|
|
|
|
}
|
|
|
|
return tx.Commit()
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
|
|
|
|
2016-12-22 17:07:47 +00:00
|
|
|
// HealthCheck returns an error if the MySQL backend is not healthy.
|
|
|
|
func (d *Datastore) HealthCheck() error {
|
|
|
|
_, err := d.db.Exec("select 1")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-16 13:47:49 +00:00
|
|
|
// Close frees resources associated with underlying mysql connection
|
|
|
|
func (d *Datastore) Close() error {
|
|
|
|
return d.db.Close()
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:59:40 +00:00
|
|
|
func sanitizeColumn(col string) string {
|
|
|
|
return columnCharsRegexp.ReplaceAllString(col, "")
|
|
|
|
}
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
func appendListOptionsToSQL(sql string, opts fleet.ListOptions) string {
|
2016-11-16 13:47:49 +00:00
|
|
|
if opts.OrderKey != "" {
|
|
|
|
direction := "ASC"
|
2021-06-06 22:07:29 +00:00
|
|
|
if opts.OrderDirection == fleet.OrderDescending {
|
2016-11-16 13:47:49 +00:00
|
|
|
direction = "DESC"
|
|
|
|
}
|
2020-04-22 20:59:40 +00:00
|
|
|
orderKey := sanitizeColumn(opts.OrderKey)
|
2016-11-16 13:47:49 +00:00
|
|
|
|
2020-04-22 20:59:40 +00:00
|
|
|
sql = fmt.Sprintf("%s ORDER BY %s %s", sql, orderKey, direction)
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
|
|
|
// REVIEW: If caller doesn't supply a limit apply a default limit of 1000
|
|
|
|
// to insure that an unbounded query with many results doesn't consume too
|
|
|
|
// much memory or hang
|
|
|
|
if opts.PerPage == 0 {
|
|
|
|
opts.PerPage = defaultSelectLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
sql = fmt.Sprintf("%s LIMIT %d", sql, opts.PerPage)
|
|
|
|
|
|
|
|
offset := opts.PerPage * opts.Page
|
|
|
|
|
|
|
|
if offset > 0 {
|
|
|
|
sql = fmt.Sprintf("%s OFFSET %d", sql, offset)
|
|
|
|
}
|
|
|
|
|
|
|
|
return sql
|
|
|
|
}
|
|
|
|
|
2021-05-25 04:34:08 +00:00
|
|
|
// whereFilterHostsByTeams returns the appropriate condition to use in the WHERE
|
|
|
|
// clause to render only the appropriate teams.
|
|
|
|
//
|
|
|
|
// filter provides the filtering parameters that should be used. hostKey is the
|
|
|
|
// name/alias of the hosts table to use in generating the SQL.
|
2021-06-06 22:07:29 +00:00
|
|
|
func (d *Datastore) whereFilterHostsByTeams(filter fleet.TeamFilter, hostKey string) string {
|
2021-05-25 04:34:08 +00:00
|
|
|
if filter.User == nil {
|
|
|
|
// This is likely unintentional, however we would like to return no
|
|
|
|
// results rather than panicking or returning some other error. At least
|
|
|
|
// log.
|
|
|
|
level.Info(d.logger).Log("err", "team filter missing user")
|
|
|
|
return "FALSE"
|
|
|
|
}
|
|
|
|
|
2021-05-25 22:46:46 +00:00
|
|
|
if filter.User.GlobalRole != nil {
|
|
|
|
switch *filter.User.GlobalRole {
|
2021-05-25 04:34:08 +00:00
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
case fleet.RoleAdmin, fleet.RoleMaintainer:
|
2021-05-25 04:34:08 +00:00
|
|
|
return "TRUE"
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
case fleet.RoleObserver:
|
2021-05-25 22:46:46 +00:00
|
|
|
if filter.IncludeObserver {
|
|
|
|
return "TRUE"
|
|
|
|
} else {
|
|
|
|
return "FALSE"
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
// Fall through to specific teams
|
|
|
|
}
|
2021-05-25 04:34:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect matching teams
|
|
|
|
var idStrs []string
|
|
|
|
for _, team := range filter.User.Teams {
|
2021-06-06 22:07:29 +00:00
|
|
|
if team.Role == fleet.RoleAdmin || team.Role == fleet.RoleMaintainer ||
|
|
|
|
(team.Role == fleet.RoleObserver && filter.IncludeObserver) {
|
2021-05-25 04:34:08 +00:00
|
|
|
idStrs = append(idStrs, strconv.Itoa(int(team.ID)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(idStrs) == 0 {
|
|
|
|
// User has no global role and no teams allowed by includeObserver.
|
|
|
|
return "FALSE"
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s.team_id IN (%s)", hostKey, strings.Join(idStrs, ","))
|
|
|
|
}
|
|
|
|
|
2021-05-27 20:18:00 +00:00
|
|
|
// whereFilterTeams returns the appropriate condition to use in the WHERE
|
|
|
|
// clause to render only the appropriate teams.
|
|
|
|
//
|
|
|
|
// filter provides the filtering parameters that should be used. hostKey is the
|
|
|
|
// name/alias of the teams table to use in generating the SQL.
|
2021-06-06 22:07:29 +00:00
|
|
|
func (d *Datastore) whereFilterTeams(filter fleet.TeamFilter, teamKey string) string {
|
2021-05-27 20:18:00 +00:00
|
|
|
if filter.User == nil {
|
|
|
|
// This is likely unintentional, however we would like to return no
|
|
|
|
// results rather than panicking or returning some other error. At least
|
|
|
|
// log.
|
|
|
|
level.Info(d.logger).Log("err", "team filter missing user")
|
|
|
|
return "FALSE"
|
|
|
|
}
|
|
|
|
|
|
|
|
if filter.User.GlobalRole != nil {
|
|
|
|
switch *filter.User.GlobalRole {
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
case fleet.RoleAdmin, fleet.RoleMaintainer:
|
2021-05-27 20:18:00 +00:00
|
|
|
return "TRUE"
|
|
|
|
|
2021-06-06 22:07:29 +00:00
|
|
|
case fleet.RoleObserver:
|
2021-05-27 20:18:00 +00:00
|
|
|
if filter.IncludeObserver {
|
|
|
|
return "TRUE"
|
|
|
|
} else {
|
|
|
|
return "FALSE"
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
// Fall through to specific teams
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect matching teams
|
|
|
|
var idStrs []string
|
|
|
|
for _, team := range filter.User.Teams {
|
2021-06-06 22:07:29 +00:00
|
|
|
if team.Role == fleet.RoleAdmin || team.Role == fleet.RoleMaintainer ||
|
|
|
|
(team.Role == fleet.RoleObserver && filter.IncludeObserver) {
|
2021-05-27 20:18:00 +00:00
|
|
|
idStrs = append(idStrs, strconv.Itoa(int(team.ID)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(idStrs) == 0 {
|
|
|
|
// User has no global role and no teams allowed by includeObserver.
|
|
|
|
return "FALSE"
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s.id IN (%s)", teamKey, strings.Join(idStrs, ","))
|
|
|
|
}
|
|
|
|
|
|
|
|
// whereOmitIDs returns the appropriate condition to use in the WHERE
|
|
|
|
// clause to omit the provided IDs from the selection.
|
|
|
|
func (d *Datastore) whereOmitIDs(colName string, omit []uint) string {
|
|
|
|
if len(omit) == 0 {
|
|
|
|
return "TRUE"
|
|
|
|
}
|
|
|
|
|
|
|
|
var idStrs []string
|
|
|
|
for _, id := range omit {
|
|
|
|
idStrs = append(idStrs, strconv.Itoa(int(id)))
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s NOT IN (%s)", colName, strings.Join(idStrs, ","))
|
|
|
|
}
|
|
|
|
|
2017-02-17 00:14:00 +00:00
|
|
|
// registerTLS adds client certificate configuration to the mysql connection.
|
|
|
|
func registerTLS(config config.MysqlConfig) error {
|
|
|
|
rootCertPool := x509.NewCertPool()
|
2017-02-22 15:22:19 +00:00
|
|
|
pem, err := ioutil.ReadFile(config.TLSCA)
|
2017-02-17 00:14:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "read server-ca pem")
|
|
|
|
}
|
|
|
|
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
|
|
|
return errors.New("failed to append PEM.")
|
|
|
|
}
|
|
|
|
cfg := tls.Config{
|
2021-04-28 15:31:19 +00:00
|
|
|
RootCAs: rootCertPool,
|
|
|
|
}
|
|
|
|
if config.TLSCert != "" {
|
|
|
|
clientCert := make([]tls.Certificate, 0, 1)
|
|
|
|
certs, err := tls.LoadX509KeyPair(config.TLSCert, config.TLSKey)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "load mysql client cert and key")
|
|
|
|
}
|
|
|
|
clientCert = append(clientCert, certs)
|
|
|
|
|
|
|
|
cfg.Certificates = clientCert
|
2017-02-17 00:14:00 +00:00
|
|
|
}
|
2017-02-22 15:22:19 +00:00
|
|
|
if config.TLSServerName != "" {
|
|
|
|
cfg.ServerName = config.TLSServerName
|
2017-02-17 00:14:00 +00:00
|
|
|
}
|
|
|
|
if err := mysql.RegisterTLSConfig(config.TLSConfig, &cfg); err != nil {
|
|
|
|
return errors.Wrap(err, "register mysql tls config")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-03 16:54:24 +00:00
|
|
|
// generateMysqlConnectionString returns a MySQL connection string using the
|
2016-11-16 13:47:49 +00:00
|
|
|
// provided configuration.
|
2017-01-03 16:54:24 +00:00
|
|
|
func generateMysqlConnectionString(conf config.MysqlConfig) string {
|
2018-06-11 14:31:39 +00:00
|
|
|
tz := url.QueryEscape("'-00:00'")
|
2017-02-17 00:14:00 +00:00
|
|
|
dsn := fmt.Sprintf(
|
2019-12-04 17:48:24 +00:00
|
|
|
"%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=true&loc=UTC&time_zone=%s&clientFoundRows=true&allowNativePasswords=true",
|
2016-11-16 13:47:49 +00:00
|
|
|
conf.Username,
|
|
|
|
conf.Password,
|
2019-12-04 17:48:24 +00:00
|
|
|
conf.Protocol,
|
2016-11-16 13:47:49 +00:00
|
|
|
conf.Address,
|
|
|
|
conf.Database,
|
2018-06-11 14:31:39 +00:00
|
|
|
tz,
|
2016-11-16 13:47:49 +00:00
|
|
|
)
|
2017-02-17 00:14:00 +00:00
|
|
|
|
|
|
|
if conf.TLSConfig != "" {
|
|
|
|
dsn = fmt.Sprintf("%s&tls=%s", dsn, conf.TLSConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
return dsn
|
2016-11-16 13:47:49 +00:00
|
|
|
}
|
2018-01-03 19:18:05 +00:00
|
|
|
|
|
|
|
// isForeignKeyError checks if the provided error is a MySQL child foreign key
|
|
|
|
// error (Error #1452)
|
|
|
|
func isChildForeignKeyError(err error) bool {
|
|
|
|
mysqlErr, ok := err.(*mysql.MySQLError)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// https://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html#error_er_no_referenced_row_2
|
|
|
|
const ER_NO_REFERENCED_ROW_2 = 1452
|
|
|
|
return mysqlErr.Number == ER_NO_REFERENCED_ROW_2
|
|
|
|
}
|
2021-02-17 00:53:42 +00:00
|
|
|
|
|
|
|
// searchLike adds SQL and parameters for a "search" using LIKE syntax.
|
|
|
|
//
|
|
|
|
// The input columns must be sanitized if they are provided by the user.
|
|
|
|
func searchLike(sql string, params []interface{}, match string, columns ...string) (string, []interface{}) {
|
|
|
|
if len(columns) == 0 {
|
|
|
|
return sql, params
|
|
|
|
}
|
|
|
|
|
|
|
|
match = strings.Replace(match, "_", "\\_", -1)
|
|
|
|
match = strings.Replace(match, "%", "\\%", -1)
|
|
|
|
pattern := "%" + match + "%"
|
|
|
|
ors := make([]string, 0, len(columns))
|
|
|
|
for _, column := range columns {
|
|
|
|
ors = append(ors, column+" LIKE ?")
|
|
|
|
params = append(params, pattern)
|
|
|
|
}
|
|
|
|
|
|
|
|
sql += " AND (" + strings.Join(ors, " OR ") + ")"
|
|
|
|
return sql, params
|
|
|
|
}
|