mirror of
https://github.com/valitydev/redash.git
synced 2024-11-07 09:28:51 +00:00
5a5fdecdde
* add rq and an rq_worker service
* add rq_scheduler and an rq_scheduler service
* move beat schedule to periodic_jobs queue
* move version checks to RQ
* move query result cleanup to RQ
* use timedelta and DRY up a bit
* move custom tasks to RQ
* do actual schema refreshes in rq
* rename 'period_jobs' to 'periodic', as it obviously holds jobs
* move send_email to rq
* DRY up enqueues
* ditch and use a partially applied decorator
* move subscribe to rq
* move check_alerts_for_query to rq
* move record_event to rq
* make tests play nicely with rq
* 👋 beat
* rename rq_scheduler to plain scheduler, now that there's no Celery scheduler entrypoint
* add some color to rq-worker's output
* add logging context to rq jobs (while keeping execute_query context via get_task_logger for now)
* move schedule to its own module
* cancel previously scheduled periodic jobs. not sure this is a good idea.
* rename redash.scheduler to redash.schedule
* allow custom dynamic jobs to be added decleratively
* add basic monitoring to rq queues
* add worker monitoring
* pleasing the CodeClimate overlords
* adjust cypress docker-compose.yml to include rq changes
* DRY up Cypress docker-compose
* add rq dependencies to cypress docker-compose service
* an odd attempt at watching docker-compose logs when running with Cypress
* Revert "an odd attempt at watching docker-compose logs when running with Cypress"
This reverts commit 016bd1a93e3efa84a9f27d0f2acb972ce1957bcd.
* show docker-compose logs at Cypress shutdown
* Revert "DRY up Cypress docker-compose"
This reverts commit 43abac7084c207ab9e39192ac79d520448c2c527.
* minimal version for binding is 3.2
* remove unneccesary code reloads on cypress
* add a command which errors if any of the workers running inside the current machine haven't been active in the last minute
* SCHEMAS_REFRESH_QUEUE is no longer a required setting
* split tasks/queries.py to execution.py and maintenance.py
* fix tests after query execution split
* pleasing the CodeClimate overlords
* rename worker to celery_worker and rq_worker to worker
* use /rq_status instead of /jobs
* show started jobs' time ago according to UTC
* replace all spaces in column names
* fix query tests after execution split
* exit with an int
* general lint
* add an entrypoint for rq_healthcheck
* fix indentation
* delete all existing periodic jobs before scheduling them
* remove some unrequired requires
* move schedule example to redash.schedule
* add RQ integration to Sentry's setup
* pleasing the CodeClimate overlords
* remove replication settings from docker-compose - a proper way to scale using docker-compose would be the --scale CLI option, which will be described in the knowledge based
* revert to calling a function in dynamic settings to allow periodic jobs to be scheduled after app has been loaded
* don't need to depend on context when templating failure reports
* set the timeout_ttl to double the interval to avoid job results from expiring and having periodic jobs not reschedule
* whoops, bad merge
* describe custom jobs and don't actually schedule them
* fix merge
169 lines
4.0 KiB
Bash
Executable File
169 lines
4.0 KiB
Bash
Executable File
#!/bin/bash
|
|
set -e
|
|
|
|
celery_worker() {
|
|
WORKERS_COUNT=${WORKERS_COUNT:-2}
|
|
QUEUES=${QUEUES:-queries,scheduled_queries}
|
|
WORKER_EXTRA_OPTIONS=${WORKER_EXTRA_OPTIONS:-}
|
|
|
|
echo "Starting $WORKERS_COUNT workers for queues: $QUEUES..."
|
|
exec /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo --max-tasks-per-child=10 -Ofair $WORKER_EXTRA_OPTIONS
|
|
}
|
|
|
|
scheduler() {
|
|
echo "Starting RQ scheduler..."
|
|
|
|
exec /app/manage.py rq scheduler
|
|
}
|
|
|
|
dev_scheduler() {
|
|
echo "Starting dev RQ scheduler..."
|
|
|
|
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq scheduler
|
|
}
|
|
|
|
worker() {
|
|
echo "Starting RQ worker..."
|
|
|
|
exec /app/manage.py rq worker $QUEUES
|
|
}
|
|
|
|
dev_worker() {
|
|
echo "Starting dev RQ worker..."
|
|
|
|
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- ./manage.py rq worker $QUEUES
|
|
}
|
|
|
|
dev_celery_worker() {
|
|
WORKERS_COUNT=${WORKERS_COUNT:-2}
|
|
QUEUES=${QUEUES:-queries,scheduled_queries}
|
|
|
|
echo "Starting $WORKERS_COUNT workers for queues: $QUEUES..."
|
|
|
|
exec watchmedo auto-restart --directory=./redash/ --pattern=*.py --recursive -- /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo --max-tasks-per-child=10 -Ofair
|
|
}
|
|
|
|
server() {
|
|
# Recycle gunicorn workers every n-th request. See http://docs.gunicorn.org/en/stable/settings.html#max-requests for more details.
|
|
MAX_REQUESTS=${MAX_REQUESTS:-1000}
|
|
MAX_REQUESTS_JITTER=${MAX_REQUESTS_JITTER:-100}
|
|
exec /usr/local/bin/gunicorn -b 0.0.0.0:5000 --name redash -w${REDASH_WEB_WORKERS:-4} redash.wsgi:app --max-requests $MAX_REQUESTS --max-requests-jitter $MAX_REQUESTS_JITTER
|
|
}
|
|
|
|
create_db() {
|
|
exec /app/manage.py database create_tables
|
|
}
|
|
|
|
celery_healthcheck() {
|
|
exec /usr/local/bin/celery inspect ping --app=redash.worker -d celery@$HOSTNAME
|
|
}
|
|
|
|
rq_healthcheck() {
|
|
exec /app/manage.py rq healthcheck
|
|
}
|
|
|
|
help() {
|
|
echo "Redash Docker."
|
|
echo ""
|
|
echo "Usage:"
|
|
echo ""
|
|
|
|
echo "server -- start Redash server (with gunicorn)"
|
|
echo "celery_worker -- start Celery worker"
|
|
echo "dev_celery_worker -- start Celery worker process which picks up code changes and reloads"
|
|
echo "worker -- start a single RQ worker"
|
|
echo "dev_worker -- start a single RQ worker with code reloading"
|
|
echo "scheduler -- start an rq-scheduler instance"
|
|
echo "dev_scheduler -- start an rq-scheduler instance with code reloading"
|
|
echo "celery_healthcheck -- runs a Celery healthcheck. Useful for Docker's HEALTHCHECK mechanism."
|
|
echo "rq_healthcheck -- runs a RQ healthcheck that verifies that all local workers are active. Useful for Docker's HEALTHCHECK mechanism."
|
|
echo ""
|
|
echo "shell -- open shell"
|
|
echo "dev_server -- start Flask development server with debugger and auto reload"
|
|
echo "debug -- start Flask development server with remote debugger via ptvsd"
|
|
echo "create_db -- create database tables"
|
|
echo "manage -- CLI to manage redash"
|
|
echo "tests -- run tests"
|
|
}
|
|
|
|
tests() {
|
|
export REDASH_DATABASE_URL="postgresql://postgres@postgres/tests"
|
|
|
|
if [ $# -eq 0 ]; then
|
|
TEST_ARGS=tests/
|
|
else
|
|
TEST_ARGS=$@
|
|
fi
|
|
exec pytest $TEST_ARGS
|
|
}
|
|
|
|
case "$1" in
|
|
worker)
|
|
shift
|
|
worker
|
|
;;
|
|
server)
|
|
shift
|
|
server
|
|
;;
|
|
scheduler)
|
|
shift
|
|
scheduler
|
|
;;
|
|
dev_scheduler)
|
|
shift
|
|
dev_scheduler
|
|
;;
|
|
celery_worker)
|
|
shift
|
|
celery_worker
|
|
;;
|
|
dev_celery_worker)
|
|
shift
|
|
dev_celery_worker
|
|
;;
|
|
dev_worker)
|
|
shift
|
|
dev_worker
|
|
;;
|
|
rq_healthcheck)
|
|
shift
|
|
rq_healthcheck
|
|
;;
|
|
celery_healthcheck)
|
|
shift
|
|
celery_healthcheck
|
|
;;
|
|
dev_server)
|
|
export FLASK_DEBUG=1
|
|
exec /app/manage.py runserver --debugger --reload -h 0.0.0.0
|
|
;;
|
|
debug)
|
|
export FLASK_DEBUG=1
|
|
export REMOTE_DEBUG=1
|
|
exec /app/manage.py runserver --debugger --no-reload -h 0.0.0.0
|
|
;;
|
|
shell)
|
|
exec /app/manage.py shell
|
|
;;
|
|
create_db)
|
|
create_db
|
|
;;
|
|
manage)
|
|
shift
|
|
exec /app/manage.py $*
|
|
;;
|
|
tests)
|
|
shift
|
|
tests $@
|
|
;;
|
|
help)
|
|
shift
|
|
help
|
|
;;
|
|
*)
|
|
exec "$@"
|
|
;;
|
|
esac
|
|
|