-
-
{{table.name}}
- ({{table.size}})
+
+
+
+
+
+ {{table.name}}
+ ({{table.size}})
+
{{column}}
diff --git a/client/app/pages/queries/schema-browser.js b/client/app/pages/queries/schema-browser.js
index e1912790..2f0e71ce 100644
--- a/client/app/pages/queries/schema-browser.js
+++ b/client/app/pages/queries/schema-browser.js
@@ -1,31 +1,33 @@
import template from './schema-browser.html';
-function schemaBrowser() {
- return {
- restrict: 'E',
- scope: {
- schema: '=',
- },
- template,
- link($scope) {
- $scope.showTable = (table) => {
- table.collapsed = !table.collapsed;
- $scope.$broadcast('vsRepeatTrigger');
- };
+function SchemaBrowserCtrl($scope) {
+ 'ngInject';
- $scope.getSize = (table) => {
- let size = 18;
+ this.showTable = (table) => {
+ table.collapsed = !table.collapsed;
+ $scope.$broadcast('vsRepeatTrigger');
+ };
- if (!table.collapsed) {
- size += 18 * table.columns.length;
- }
+ this.getSize = (table) => {
+ let size = 18;
- return size;
- };
- },
+ if (!table.collapsed) {
+ size += 18 * table.columns.length;
+ }
+
+ return size;
};
}
+const SchemaBrowser = {
+ bindings: {
+ schema: '<',
+ onRefresh: '&',
+ },
+ controller: SchemaBrowserCtrl,
+ template,
+};
+
export default function (ngModule) {
- ngModule.directive('schemaBrowser', schemaBrowser);
+ ngModule.component('schemaBrowser', SchemaBrowser);
}
diff --git a/client/app/pages/queries/source-view.js b/client/app/pages/queries/source-view.js
index 86db0e03..b9d4ff1e 100644
--- a/client/app/pages/queries/source-view.js
+++ b/client/app/pages/queries/source-view.js
@@ -29,24 +29,19 @@ function QuerySourceCtrl(Events, toastr, $controller, $scope, $location, $http,
},
});
- $scope.shortcuts = {
- 'meta+s': function save() {
+ const shortcuts = {
+ 'mod+s': function save() {
if ($scope.canEdit) {
$scope.saveQuery();
}
},
- 'ctrl+s': function save() {
- if ($scope.canEdit) {
- $scope.saveQuery();
- }
- },
- // Cmd+Enter for Mac
- 'meta+enter': $scope.executeQuery,
- // Ctrl+Enter for PC
- 'ctrl+enter': $scope.executeQuery,
};
- KeyboardShortcuts.bind($scope.shortcuts);
+ KeyboardShortcuts.bind(shortcuts);
+
+ $scope.$on('$destroy', () => {
+ KeyboardShortcuts.unbind(shortcuts);
+ });
// @override
$scope.saveQuery = (options, data) => {
@@ -106,10 +101,6 @@ function QuerySourceCtrl(Events, toastr, $controller, $scope, $location, $http,
$scope.$watch('query.query', (newQueryText) => {
$scope.isDirty = (newQueryText !== queryText);
});
-
- $scope.$on('$destroy', () => {
- KeyboardShortcuts.unbind($scope.shortcuts);
- });
}
export default function (ngModule) {
diff --git a/client/app/pages/queries/view.js b/client/app/pages/queries/view.js
index 3c1dc568..da5de16a 100644
--- a/client/app/pages/queries/view.js
+++ b/client/app/pages/queries/view.js
@@ -1,9 +1,9 @@
import { pick, any, some, find } from 'underscore';
import template from './query.html';
-function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $window, $q,
- Title, AlertDialog, Notifications, clientConfig, toastr, $uibModal, currentUser,
- Query, DataSource) {
+function QueryViewCtrl($scope, Events, $route, $routeParams, $location, $window, $q,
+ KeyboardShortcuts, Title, AlertDialog, Notifications, clientConfig, toastr, $uibModal,
+ currentUser, Query, DataSource) {
const DEFAULT_TAB = 'table';
function getQueryResult(maxAge) {
@@ -43,26 +43,36 @@ function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $
return dataSourceId;
}
- function updateSchema() {
- $scope.hasSchema = false;
- $scope.editorSize = 'col-md-12';
- DataSource.getSchema({ id: $scope.query.data_source_id }, (data) => {
- if (data && data.length > 0) {
+ function toggleSchemaBrowser(hasSchema) {
+ $scope.hasSchema = hasSchema;
+ $scope.editorSize = hasSchema ? 'col-md-9' : 'col-md-12';
+ }
+
+ function getSchema(refresh = undefined) {
+ DataSource.getSchema({ id: $scope.query.data_source_id, refresh }, (data) => {
+ const hasPrevSchema = refresh ? ($scope.schema && ($scope.schema.length > 0)) : false;
+ const hasSchema = data && (data.length > 0);
+
+ if (hasSchema) {
$scope.schema = data;
data.forEach((table) => {
table.collapsed = true;
});
-
- $scope.editorSize = 'col-md-9';
- $scope.hasSchema = true;
- } else {
- $scope.schema = undefined;
- $scope.hasSchema = false;
- $scope.editorSize = 'col-md-12';
+ } else if (hasPrevSchema) {
+ toastr.error('Schema refresh failed. Please try again later.');
}
+
+ toggleSchemaBrowser(hasSchema || hasPrevSchema);
});
}
+ function updateSchema() {
+ toggleSchemaBrowser(false);
+ getSchema();
+ }
+
+ $scope.refreshSchema = () => getSchema(true);
+
function updateDataSources(dataSources) {
// Filter out data sources the user can't query (or used by current query):
$scope.dataSources = dataSources.filter(dataSource =>
@@ -85,11 +95,38 @@ function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $
updateSchema();
}
+ $scope.executeQuery = () => {
+ if (!$scope.canExecuteQuery()) {
+ return;
+ }
+
+ if (!$scope.query.query) {
+ return;
+ }
+
+ getQueryResult(0);
+ $scope.lockButton(true);
+ $scope.cancelling = false;
+ Events.record('execute', 'query', $scope.query.id);
+
+ Notifications.getPermissions();
+ };
+
+
$scope.currentUser = currentUser;
$scope.dataSource = {};
$scope.query = $route.current.locals.query;
$scope.showPermissionsControl = clientConfig.showPermissionsControl;
+ const shortcuts = {
+ 'mod+enter': $scope.executeQuery,
+ };
+
+ KeyboardShortcuts.bind(shortcuts);
+
+ $scope.$on('$destroy', () => {
+ KeyboardShortcuts.unbind(shortcuts);
+ });
Events.record('view', 'query', $scope.query.id);
if ($scope.query.hasResult() || $scope.query.paramsRequired()) {
@@ -172,23 +209,6 @@ function QueryViewCtrl($scope, Events, $route, $routeParams, $http, $location, $
$scope.saveQuery(undefined, { name: $scope.query.name });
};
- $scope.executeQuery = () => {
- if (!$scope.canExecuteQuery()) {
- return;
- }
-
- if (!$scope.query.query) {
- return;
- }
-
- getQueryResult(0);
- $scope.lockButton(true);
- $scope.cancelling = false;
- Events.record('execute', 'query', $scope.query.id);
-
- Notifications.getPermissions();
- };
-
$scope.cancelExecution = () => {
$scope.cancelling = true;
$scope.queryResult.cancelExecution();
diff --git a/client/app/pages/queries/visualization-embed.html b/client/app/pages/queries/visualization-embed.html
index 59300baa..d7838ca5 100644
--- a/client/app/pages/queries/visualization-embed.html
+++ b/client/app/pages/queries/visualization-embed.html
@@ -2,7 +2,7 @@
-
+
{{$ctrl.query.name}}
diff --git a/client/app/services/data-source.js b/client/app/services/data-source.js
index d7a9cbc8..b6d42379 100644
--- a/client/app/services/data-source.js
+++ b/client/app/services/data-source.js
@@ -3,7 +3,7 @@ function DataSource($resource) {
get: { method: 'GET', cache: false, isArray: false },
query: { method: 'GET', cache: false, isArray: true },
test: { method: 'POST', cache: false, isArray: false, url: 'api/data_sources/:id/test' },
- getSchema: { method: 'GET', cache: true, isArray: true, url: 'api/data_sources/:id/schema' },
+ getSchema: { method: 'GET', cache: false, isArray: true, url: 'api/data_sources/:id/schema' },
};
const DataSourceResource = $resource('api/data_sources/:id', { id: '@id' }, actions);
diff --git a/client/app/services/keyboard-shortcuts.js b/client/app/services/keyboard-shortcuts.js
index f81c607c..d8201320 100644
--- a/client/app/services/keyboard-shortcuts.js
+++ b/client/app/services/keyboard-shortcuts.js
@@ -1,10 +1,12 @@
import { each } from 'underscore';
import Mousetrap from 'mousetrap';
+import 'mousetrap/plugins/global-bind/mousetrap-global-bind';
+
function KeyboardShortcuts() {
this.bind = function bind(keymap) {
each(keymap, (fn, key) => {
- Mousetrap.bind(key, (e) => {
+ Mousetrap.bindGlobal(key, (e) => {
e.preventDefault();
fn();
});
diff --git a/client/app/services/query-result.js b/client/app/services/query-result.js
index 01974d2c..4680f03b 100644
--- a/client/app/services/query-result.js
+++ b/client/app/services/query-result.js
@@ -216,15 +216,20 @@ function QueryResultService($resource, $timeout, $q) {
return this.filteredData;
}
+ isEmpty() {
+ return this.getData() === null || this.getData().length === 0;
+ }
+
getChartData(mapping) {
const series = {};
this.getData().forEach((row) => {
- const point = {};
+ let point = {};
let seriesName;
let xValue = 0;
const yValues = {};
let eValue = null;
+ let sizeValue = null;
each(row, (v, definition) => {
const name = definition.split('::')[0] || definition.split('__')[0];
@@ -258,6 +263,11 @@ function QueryResultService($resource, $timeout, $q) {
seriesName = String(value);
}
+ if (type === 'size') {
+ point[type] = value;
+ sizeValue = value;
+ }
+
if (type === 'multiFilter' || type === 'multi-filter') {
seriesName = String(value);
}
@@ -265,11 +275,15 @@ function QueryResultService($resource, $timeout, $q) {
if (seriesName === undefined) {
each(yValues, (yValue, ySeriesName) => {
+ point = { x: xValue, y: yValue };
if (eValue !== null) {
- addPointToSeries({ x: xValue, y: yValue, yError: eValue }, series, ySeriesName);
- } else {
- addPointToSeries({ x: xValue, y: yValue }, series, ySeriesName);
+ point.yError = eValue;
}
+
+ if (sizeValue !== null) {
+ point.size = sizeValue;
+ }
+ addPointToSeries(point, series, ySeriesName);
});
} else {
addPointToSeries(point, series, seriesName);
@@ -339,7 +353,11 @@ function QueryResultService($resource, $timeout, $q) {
filters.forEach((filter) => {
filter.values.push(row[filter.name]);
if (filter.values.length === 1) {
- filter.current = row[filter.name];
+ if (filter.multiple) {
+ filter.current = [row[filter.name]];
+ } else {
+ filter.current = row[filter.name];
+ }
}
});
});
diff --git a/client/app/services/query.js b/client/app/services/query.js
index 2e98abb3..ef98857b 100644
--- a/client/app/services/query.js
+++ b/client/app/services/query.js
@@ -43,6 +43,43 @@ class QueryResultError {
}
+class Parameter {
+ constructor(parameter) {
+ this.title = parameter.title;
+ this.name = parameter.name;
+ this.type = parameter.type;
+ this.value = parameter.value;
+ this.global = parameter.global;
+ }
+
+ get ngModel() {
+ if (this.type === 'date' || this.type === 'datetime-local' || this.type === 'datetime-with-seconds') {
+ this.$$value = this.$$value || moment(this.value).toDate();
+ return this.$$value;
+ } else if (this.type === 'number') {
+ this.$$value = this.$$value || parseInt(this.value, 10);
+ return this.$$value;
+ }
+
+ return this.value;
+ }
+
+ set ngModel(value) {
+ if (value && this.type === 'date') {
+ this.value = moment(value).format('YYYY-MM-DD');
+ this.$$value = moment(this.value).toDate();
+ } else if (value && this.type === 'datetime-local') {
+ this.value = moment(value).format('YYYY-MM-DD HH:mm');
+ this.$$value = moment(this.value).toDate();
+ } else if (value && this.type === 'datetime-with-seconds') {
+ this.value = moment(value).format('YYYY-MM-DD HH:mm:ss');
+ this.$$value = moment(this.value).toDate();
+ } else {
+ this.value = this.$$value = value;
+ }
+ }
+}
+
class Parameters {
constructor(query, queryString) {
this.query = query;
@@ -84,7 +121,8 @@ class Parameters {
});
const parameterExists = p => contains(parameterNames, p.name);
- this.query.options.parameters = this.query.options.parameters.filter(parameterExists);
+ this.query.options.parameters =
+ this.query.options.parameters.filter(parameterExists).map(p => new Parameter(p));
}
initFromQueryString(queryString) {
diff --git a/client/app/visualizations/box-plot/index.js b/client/app/visualizations/box-plot/index.js
index 7ce3973d..b20cc258 100644
--- a/client/app/visualizations/box-plot/index.js
+++ b/client/app/visualizations/box-plot/index.js
@@ -176,7 +176,7 @@ export default function (ngModule) {
VisualizationProvider.registerVisualization({
type: 'BOXPLOT',
- name: 'Boxplot',
+ name: 'Boxplot (Deprecated)',
renderTemplate,
editorTemplate: editTemplate,
});
diff --git a/client/app/visualizations/chart/chart-editor.html b/client/app/visualizations/chart/chart-editor.html
index 09b79d0e..80b23a88 100644
--- a/client/app/visualizations/chart/chart-editor.html
+++ b/client/app/visualizations/chart/chart-editor.html
@@ -72,6 +72,18 @@
+
+
+
+
+ {{$select.selected}}
+
+
+
+
+
+
+
@@ -91,6 +103,13 @@
+
+
+
+
@@ -104,6 +123,13 @@
+
+
+
+
diff --git a/client/app/visualizations/chart/index.js b/client/app/visualizations/chart/index.js
index e82218c3..b9f93b90 100644
--- a/client/app/visualizations/chart/index.js
+++ b/client/app/visualizations/chart/index.js
@@ -1,4 +1,4 @@
-import { extend, has, partial, intersection, without, contains, isUndefined, sortBy, each, pluck, keys, difference } from 'underscore';
+import { some, extend, has, partial, intersection, without, contains, isUndefined, sortBy, each, pluck, keys, difference } from 'underscore';
import plotly from './plotly';
import template from './chart.html';
import editorTemplate from './chart-editor.html';
@@ -68,6 +68,8 @@ function ChartEditor(ColorPalette, clientConfig) {
area: { name: 'Area', icon: 'area-chart' },
pie: { name: 'Pie', icon: 'pie-chart' },
scatter: { name: 'Scatter', icon: 'circle-o' },
+ bubble: { name: 'Bubble', icon: 'circle-o' },
+ box: { name: 'Box', icon: 'square-o' },
};
if (clientConfig.allowCustomJSVisualizations) {
@@ -83,6 +85,8 @@ function ChartEditor(ColorPalette, clientConfig) {
});
};
+ scope.showSizeColumnPicker = () => some(scope.options.seriesOptions, options => options.type === 'bubble');
+
scope.options.customCode = `// Available variables are x, ys, element, and Plotly
// Type console.log(x, ys); for more info about x and ys
// To plot your graph call Plotly.plot(element, ...)
@@ -191,6 +195,15 @@ function ChartEditor(ColorPalette, clientConfig) {
}
});
+ scope.$watch('form.sizeColumn', (value, old) => {
+ if (old !== undefined) {
+ unsetColumn(old);
+ }
+ if (value !== undefined) {
+ setColumnRole('size', value);
+ }
+ });
+
scope.$watch('form.groupby', (value, old) => {
if (old !== undefined) {
@@ -222,6 +235,8 @@ function ChartEditor(ColorPalette, clientConfig) {
scope.form.groupby = key;
} else if (value === 'yError') {
scope.form.errorColumn = key;
+ } else if (value === 'size') {
+ scope.form.sizeColumn = key;
}
});
}
diff --git a/client/app/visualizations/chart/plotly.js b/client/app/visualizations/chart/plotly.js
index d6e44b65..e94e97cf 100644
--- a/client/app/visualizations/chart/plotly.js
+++ b/client/app/visualizations/chart/plotly.js
@@ -4,10 +4,11 @@ import Plotly from 'plotly.js/lib/core';
import bar from 'plotly.js/lib/bar';
import pie from 'plotly.js/lib/pie';
import histogram from 'plotly.js/lib/histogram';
+import box from 'plotly.js/lib/box';
import moment from 'moment';
-Plotly.register([bar, pie, histogram]);
+Plotly.register([bar, pie, histogram, box]);
Plotly.setPlotConfig({
modeBarButtonsToRemove: ['sendDataToCloud'],
});
@@ -140,7 +141,7 @@ function percentBarStacking(seriesList) {
sum += seriesList[j].y[i];
}
for (let j = 0; j < seriesList.length; j += 1) {
- const value = seriesList[j].y[i] / (sum * 100);
+ const value = seriesList[j].y[i] / sum * 100;
seriesList[j].text.push(`Value: ${seriesList[j].y[i]}
Relative: ${value.toFixed(2)}%`);
seriesList[j].y[i] = value;
}
@@ -197,6 +198,9 @@ const PlotlyChart = () => {
link(scope, element) {
function calculateHeight() {
const height = Math.max(scope.height, (scope.height - 50) + bottomMargin);
+ if (scope.options.globalSeriesType === 'box') {
+ return scope.options.height || height;
+ }
return height;
}
@@ -211,6 +215,11 @@ const PlotlyChart = () => {
} else if (type === 'scatter') {
series.type = 'scatter';
series.mode = 'markers';
+ } else if (type === 'bubble') {
+ series.mode = 'markers';
+ } else if (type === 'box') {
+ series.type = 'box';
+ series.mode = 'markers';
}
}
@@ -271,6 +280,12 @@ const PlotlyChart = () => {
return;
}
+ if (scope.options.globalSeriesType === 'box') {
+ scope.options.sortX = false;
+ scope.layout.boxmode = 'group';
+ scope.layout.boxgroupgap = 0.50;
+ }
+
let hasY2 = false;
const sortX = scope.options.sortX === true || scope.options.sortX === undefined;
const useUnifiedXaxis = sortX && scope.options.xAxis.type === 'category';
@@ -333,6 +348,28 @@ const PlotlyChart = () => {
if (!plotlySeries.error_y.length) {
delete plotlySeries.error_y.length;
}
+
+ if (seriesOptions.type === 'bubble') {
+ plotlySeries.marker = {
+ size: pluck(data, 'size'),
+ };
+ }
+
+ if (seriesOptions.type === 'box') {
+ plotlySeries.boxpoints = 'outliers';
+ plotlySeries.marker = {
+ size: 3,
+ };
+ if (scope.options.showpoints) {
+ plotlySeries.boxpoints = 'all';
+ plotlySeries.jitter = 0.3;
+ plotlySeries.pointpos = -1.8;
+ plotlySeries.marker = {
+ size: 3,
+ };
+ }
+ }
+
scope.data.push(plotlySeries);
});
diff --git a/client/app/visualizations/counter/counter-editor.html b/client/app/visualizations/counter/counter-editor.html
index df0cda25..ab760ef9 100644
--- a/client/app/visualizations/counter/counter-editor.html
+++ b/client/app/visualizations/counter/counter-editor.html
@@ -2,13 +2,13 @@
@@ -25,4 +25,10 @@
+
diff --git a/client/app/visualizations/counter/index.js b/client/app/visualizations/counter/index.js
index 1e2a7f87..11b24a8e 100644
--- a/client/app/visualizations/counter/index.js
+++ b/client/app/visualizations/counter/index.js
@@ -14,10 +14,11 @@ function CounterRenderer() {
const counterColName = $scope.visualization.options.counterColName;
const targetColName = $scope.visualization.options.targetColName;
- if (counterColName) {
+ if ($scope.visualization.options.countRow) {
+ $scope.counterValue = queryData.length;
+ } else if (counterColName) {
$scope.counterValue = queryData[rowNumber][counterColName];
}
-
if (targetColName) {
$scope.targetValue = queryData[targetRowNumber][targetColName];
diff --git a/client/app/visualizations/map/index.js b/client/app/visualizations/map/index.js
index 342d3242..d1190fe6 100644
--- a/client/app/visualizations/map/index.js
+++ b/client/app/visualizations/map/index.js
@@ -204,7 +204,7 @@ function mapRenderer() {
}
}
- $scope.$watch('queryResult && queryResult.getData()', render, true);
+ $scope.$watch('queryResult && queryResult.getData()', render);
$scope.$watch('visualization.options', render, true);
angular.element(window).on('resize', resize);
$scope.$watch('visualization.options.height', resize);
diff --git a/client/app/visualizations/word-cloud/word-cloud-editor.html b/client/app/visualizations/word-cloud/word-cloud-editor.html
index 84206dd9..91b4c321 100644
--- a/client/app/visualizations/word-cloud/word-cloud-editor.html
+++ b/client/app/visualizations/word-cloud/word-cloud-editor.html
@@ -2,7 +2,7 @@
diff --git a/docker-compose.production.yml b/docker-compose.production.yml
index fee1751f..665fa978 100644
--- a/docker-compose.production.yml
+++ b/docker-compose.production.yml
@@ -7,7 +7,7 @@
version: '2'
services:
server:
- build: .
+ image: redash/redash:latest
command: server
depends_on:
- postgres
@@ -21,7 +21,7 @@ services:
REDASH_DATABASE_URL: "postgresql://postgres@postgres/postgres"
REDASH_COOKIE_SECRET: veryverysecret
worker:
- build: .
+ image: redash/redash:latest
command: scheduler
environment:
PYTHONUNBUFFERED: 0
@@ -31,9 +31,9 @@ services:
QUEUES: "queries,scheduled_queries,celery"
WORKERS_COUNT: 2
redis:
- image: redis:2.8
+ image: redis:3.0-alpine
postgres:
- image: postgres:9.3
+ image: postgres:9.5.6-alpine
# volumes:
# - /opt/postgres-data:/var/lib/postgresql/data
nginx:
diff --git a/docker-compose.yml b/docker-compose.yml
index ccda9f59..4227bc92 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -32,9 +32,9 @@ services:
QUEUES: "queries,scheduled_queries,celery"
WORKERS_COUNT: 2
redis:
- image: redis:2.8
+ image: redis:3.0-alpine
postgres:
- image: postgres:9.3
+ image: postgres:9.5.6-alpine
# The following turns the DB into less durable, but gains significant performance improvements for the tests run (x3
# improvement on my personal machine). We should consider moving this into a dedicated Docker Compose configuration for
# tests.
diff --git a/migrations/versions/d1eae8b9893e_.py b/migrations/versions/d1eae8b9893e_.py
new file mode 100644
index 00000000..9d7d5fc5
--- /dev/null
+++ b/migrations/versions/d1eae8b9893e_.py
@@ -0,0 +1,25 @@
+"""add Query.schedule_failures
+
+Revision ID: d1eae8b9893e
+Revises: 65fc9ede4746
+Create Date: 2017-02-03 01:45:02.954923
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'd1eae8b9893e'
+down_revision = '65fc9ede4746'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.add_column('queries', sa.Column('schedule_failures', sa.Integer(),
+ nullable=False, server_default='0'))
+
+
+def downgrade():
+ op.drop_column('queries', 'schedule_failures')
diff --git a/npm-shrinkwrap.json b/npm-shrinkwrap.json
index f0e5e899..298af0af 100644
--- a/npm-shrinkwrap.json
+++ b/npm-shrinkwrap.json
@@ -1,6 +1,6 @@
{
"name": "redash-client",
- "version": "1.0.0",
+ "version": "1.0.3",
"dependencies": {
"3d-view": {
"version": "2.0.0",
@@ -8,15 +8,20 @@
"resolved": "https://registry.npmjs.org/3d-view/-/3d-view-2.0.0.tgz"
},
"3d-view-controls": {
- "version": "2.1.1",
- "from": "3d-view-controls@>=2.0.0 <3.0.0",
- "resolved": "https://registry.npmjs.org/3d-view-controls/-/3d-view-controls-2.1.1.tgz"
+ "version": "2.2.0",
+ "from": "3d-view-controls@>=2.2.0 <3.0.0",
+ "resolved": "https://registry.npmjs.org/3d-view-controls/-/3d-view-controls-2.2.0.tgz"
},
"a-big-triangle": {
"version": "1.0.3",
"from": "a-big-triangle@>=1.0.0 <2.0.0",
"resolved": "https://registry.npmjs.org/a-big-triangle/-/a-big-triangle-1.0.3.tgz"
},
+ "acorn": {
+ "version": "4.0.4",
+ "from": "acorn@4.0.4",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.4.tgz"
+ },
"add-line-numbers": {
"version": "1.0.1",
"from": "add-line-numbers@>=1.0.1 <2.0.0",
@@ -32,6 +37,11 @@
"from": "align-text@>=0.1.3 <0.2.0",
"resolved": "https://registry.npmjs.org/align-text/-/align-text-0.1.4.tgz"
},
+ "almost-equal": {
+ "version": "1.1.0",
+ "from": "almost-equal@>=1.1.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/almost-equal/-/almost-equal-1.1.0.tgz"
+ },
"alpha-complex": {
"version": "1.0.0",
"from": "alpha-complex@>=1.0.0 <2.0.0",
@@ -179,9 +189,9 @@
"optional": true
},
"big-rat": {
- "version": "1.0.2",
- "from": "big-rat@>=1.0.1 <2.0.0",
- "resolved": "https://registry.npmjs.org/big-rat/-/big-rat-1.0.2.tgz"
+ "version": "1.0.4",
+ "from": "big-rat@>=1.0.3 <2.0.0",
+ "resolved": "https://registry.npmjs.org/big-rat/-/big-rat-1.0.4.tgz"
},
"big.js": {
"version": "3.1.3",
@@ -199,14 +209,14 @@
"resolved": "https://registry.npmjs.org/bit-twiddle/-/bit-twiddle-1.0.2.tgz"
},
"bl": {
- "version": "1.2.0",
+ "version": "1.2.1",
"from": "bl@>=1.0.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.0.tgz"
+ "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.1.tgz"
},
"bn.js": {
- "version": "2.2.0",
- "from": "bn.js@>=2.0.5 <3.0.0",
- "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-2.2.0.tgz"
+ "version": "4.11.6",
+ "from": "bn.js@>=4.11.6 <5.0.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.6.tgz"
},
"boom": {
"version": "2.10.1",
@@ -343,10 +353,15 @@
"from": "circumradius@>=1.0.0 <2.0.0",
"resolved": "https://registry.npmjs.org/circumradius/-/circumradius-1.0.0.tgz"
},
+ "clamp": {
+ "version": "1.0.1",
+ "from": "clamp@>=1.0.1 <2.0.0",
+ "resolved": "https://registry.npmjs.org/clamp/-/clamp-1.0.1.tgz"
+ },
"clean-pslg": {
- "version": "1.1.0",
+ "version": "1.1.2",
"from": "clean-pslg@>=1.1.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/clean-pslg/-/clean-pslg-1.1.0.tgz"
+ "resolved": "https://registry.npmjs.org/clean-pslg/-/clean-pslg-1.1.2.tgz"
},
"cliui": {
"version": "2.1.0",
@@ -365,6 +380,31 @@
"from": "clone@>=1.0.2 <2.0.0",
"resolved": "https://registry.npmjs.org/clone/-/clone-1.0.2.tgz"
},
+ "color-id": {
+ "version": "1.0.3",
+ "from": "color-id@>=1.0.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/color-id/-/color-id-1.0.3.tgz"
+ },
+ "color-name": {
+ "version": "1.1.2",
+ "from": "color-name@>=1.1.1 <2.0.0",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.2.tgz"
+ },
+ "color-parse": {
+ "version": "1.3.2",
+ "from": "color-parse@>=1.2.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/color-parse/-/color-parse-1.3.2.tgz"
+ },
+ "color-rgba": {
+ "version": "1.1.0",
+ "from": "color-rgba@>=1.0.4 <2.0.0",
+ "resolved": "https://registry.npmjs.org/color-rgba/-/color-rgba-1.1.0.tgz"
+ },
+ "color-space": {
+ "version": "1.14.7",
+ "from": "color-space@>=1.14.6 <2.0.0",
+ "resolved": "https://registry.npmjs.org/color-space/-/color-space-1.14.7.tgz"
+ },
"colormap": {
"version": "2.2.0",
"from": "colormap@>=2.1.0 <3.0.0",
@@ -429,7 +469,7 @@
},
"core-js": {
"version": "2.4.1",
- "from": "core-js@>=2.4.0 <3.0.0",
+ "from": "https://registry.npmjs.org/core-js/-/core-js-2.4.1.tgz",
"resolved": "https://registry.npmjs.org/core-js/-/core-js-2.4.1.tgz"
},
"core-util-is": {
@@ -443,9 +483,9 @@
"resolved": "git+https://github.com/restorando/cornelius.git#24d935811186c165c8ba63244ff363da71f32dcf"
},
"country-regex": {
- "version": "1.0.3",
- "from": "country-regex@>=1.0.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/country-regex/-/country-regex-1.0.3.tgz"
+ "version": "1.1.0",
+ "from": "country-regex@>=1.1.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/country-regex/-/country-regex-1.1.0.tgz"
},
"cryptiles": {
"version": "2.0.5",
@@ -463,9 +503,9 @@
"resolved": "https://registry.npmjs.org/cubic-hermite/-/cubic-hermite-1.0.0.tgz"
},
"cwise": {
- "version": "1.0.9",
+ "version": "1.0.10",
"from": "cwise@>=1.0.3 <2.0.0",
- "resolved": "https://registry.npmjs.org/cwise/-/cwise-1.0.9.tgz"
+ "resolved": "https://registry.npmjs.org/cwise/-/cwise-1.0.10.tgz"
},
"cwise-compiler": {
"version": "1.1.2",
@@ -600,9 +640,9 @@
"resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz"
},
"es-abstract": {
- "version": "1.6.1",
+ "version": "1.7.0",
"from": "es-abstract@>=1.5.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.6.1.tgz"
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.7.0.tgz"
},
"es-to-primitive": {
"version": "1.1.1",
@@ -648,9 +688,9 @@
"resolved": "https://registry.npmjs.org/esprima/-/esprima-2.7.3.tgz"
},
"espurify": {
- "version": "1.6.0",
+ "version": "1.7.0",
"from": "espurify@>=1.3.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/espurify/-/espurify-1.6.0.tgz"
+ "resolved": "https://registry.npmjs.org/espurify/-/espurify-1.7.0.tgz"
},
"estraverse": {
"version": "1.5.1",
@@ -736,6 +776,11 @@
}
}
},
+ "font-atlas-sdf": {
+ "version": "1.2.0",
+ "from": "font-atlas-sdf@>=1.0.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/font-atlas-sdf/-/font-atlas-sdf-1.2.0.tgz"
+ },
"font-awesome": {
"version": "4.7.0",
"from": "font-awesome@latest",
@@ -1174,9 +1219,9 @@
}
},
"gl-plot3d": {
- "version": "1.5.1",
- "from": "gl-plot3d@>=1.5.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/gl-plot3d/-/gl-plot3d-1.5.1.tgz",
+ "version": "1.5.4",
+ "from": "gl-plot3d@>=1.5.4 <2.0.0",
+ "resolved": "https://registry.npmjs.org/gl-plot3d/-/gl-plot3d-1.5.4.tgz",
"dependencies": {
"bl": {
"version": "0.9.5",
@@ -1304,11 +1349,16 @@
}
}
},
- "gl-scatter2d-fancy": {
- "version": "1.2.1",
- "from": "gl-scatter2d-fancy@>=1.2.1 <2.0.0",
- "resolved": "https://registry.npmjs.org/gl-scatter2d-fancy/-/gl-scatter2d-fancy-1.2.1.tgz",
+ "gl-scatter2d-sdf": {
+ "version": "1.3.4",
+ "from": "gl-scatter2d-sdf@1.3.4",
+ "resolved": "https://registry.npmjs.org/gl-scatter2d-sdf/-/gl-scatter2d-sdf-1.3.4.tgz",
"dependencies": {
+ "binary-search-bounds": {
+ "version": "2.0.3",
+ "from": "binary-search-bounds@>=2.0.3 <3.0.0",
+ "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.3.tgz"
+ },
"bl": {
"version": "0.9.5",
"from": "bl@>=0.9.4 <0.10.0",
@@ -1343,6 +1393,11 @@
"version": "1.0.34",
"from": "readable-stream@~1.0.26",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz"
+ },
+ "snap-points-2d": {
+ "version": "3.1.0",
+ "from": "snap-points-2d@>=3.1.0 <4.0.0",
+ "resolved": "https://registry.npmjs.org/snap-points-2d/-/snap-points-2d-3.1.0.tgz"
}
}
},
@@ -1708,6 +1763,11 @@
"from": "http-signature@>=1.1.0 <1.2.0",
"resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.1.1.tgz"
},
+ "husl": {
+ "version": "5.0.3",
+ "from": "husl@>=5.0.0 <6.0.0",
+ "resolved": "https://registry.npmjs.org/husl/-/husl-5.0.3.tgz"
+ },
"ieee754": {
"version": "1.1.8",
"from": "ieee754@>=1.1.4 <2.0.0",
@@ -1763,20 +1823,30 @@
"from": "is-function@>=1.0.0 <1.1.0",
"resolved": "https://registry.npmjs.org/is-function/-/is-function-1.0.1.tgz"
},
+ "is-mobile": {
+ "version": "0.2.2",
+ "from": "is-mobile@>=0.2.2 <0.3.0",
+ "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-0.2.2.tgz"
+ },
"is-my-json-valid": {
"version": "2.15.0",
"from": "is-my-json-valid@>=2.10.0 <3.0.0",
"resolved": "https://registry.npmjs.org/is-my-json-valid/-/is-my-json-valid-2.15.0.tgz"
},
+ "is-plain-obj": {
+ "version": "1.1.0",
+ "from": "is-plain-obj@>=1.0.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz"
+ },
"is-property": {
"version": "1.0.2",
"from": "is-property@>=1.0.0 <2.0.0",
"resolved": "https://registry.npmjs.org/is-property/-/is-property-1.0.2.tgz"
},
"is-regex": {
- "version": "1.0.3",
+ "version": "1.0.4",
"from": "is-regex@>=1.0.3 <2.0.0",
- "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.3.tgz"
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.4.tgz"
},
"is-symbol": {
"version": "1.0.1",
@@ -1950,23 +2020,25 @@
"mapbox-gl": {
"version": "0.22.1",
"from": "mapbox-gl@>=0.22.0 <0.23.0",
- "resolved": "https://registry.npmjs.org/mapbox-gl/-/mapbox-gl-0.22.1.tgz"
+ "resolved": "https://registry.npmjs.org/mapbox-gl/-/mapbox-gl-0.22.1.tgz",
+ "dependencies": {
+ "mapbox-gl-shaders": {
+ "version": "1.0.0",
+ "from": "mapbox/mapbox-gl-shaders#de2ab007455aa2587c552694c68583f94c9f2747",
+ "resolved": "git://github.com/mapbox/mapbox-gl-shaders.git#de2ab007455aa2587c552694c68583f94c9f2747"
+ },
+ "mapbox-gl-style-spec": {
+ "version": "8.8.0",
+ "from": "mapbox/mapbox-gl-style-spec#83b1a3e5837d785af582efd5ed1a212f2df6a4ae",
+ "resolved": "git://github.com/mapbox/mapbox-gl-style-spec.git#83b1a3e5837d785af582efd5ed1a212f2df6a4ae"
+ }
+ }
},
"mapbox-gl-function": {
"version": "1.3.0",
"from": "mapbox-gl-function@>=1.2.1 <2.0.0",
"resolved": "https://registry.npmjs.org/mapbox-gl-function/-/mapbox-gl-function-1.3.0.tgz"
},
- "mapbox-gl-shaders": {
- "version": "1.0.0",
- "from": "mapbox/mapbox-gl-shaders#de2ab007455aa2587c552694c68583f94c9f2747",
- "resolved": "https://github.com/mapbox/mapbox-gl-shaders.git#de2ab007455aa2587c552694c68583f94c9f2747"
- },
- "mapbox-gl-style-spec": {
- "version": "8.8.0",
- "from": "mapbox/mapbox-gl-style-spec#83b1a3e5837d785af582efd5ed1a212f2df6a4ae",
- "resolved": "https://github.com/mapbox/mapbox-gl-style-spec.git#83b1a3e5837d785af582efd5ed1a212f2df6a4ae"
- },
"mapbox-gl-supported": {
"version": "1.2.0",
"from": "mapbox-gl-supported@>=1.2.0 <2.0.0",
@@ -2003,9 +2075,9 @@
"resolved": "https://registry.npmjs.org/material-design-iconic-font/-/material-design-iconic-font-2.2.0.tgz"
},
"matrix-camera-controller": {
- "version": "2.1.1",
- "from": "matrix-camera-controller@>=2.1.1 <3.0.0",
- "resolved": "https://registry.npmjs.org/matrix-camera-controller/-/matrix-camera-controller-2.1.1.tgz"
+ "version": "2.1.3",
+ "from": "matrix-camera-controller@>=2.1.3 <3.0.0",
+ "resolved": "https://registry.npmjs.org/matrix-camera-controller/-/matrix-camera-controller-2.1.3.tgz"
},
"mime-db": {
"version": "1.24.0",
@@ -2047,6 +2119,11 @@
"from": "mouse-event@>=1.0.0 <2.0.0",
"resolved": "https://registry.npmjs.org/mouse-event/-/mouse-event-1.0.5.tgz"
},
+ "mouse-event-offset": {
+ "version": "3.0.2",
+ "from": "mouse-event-offset@>=3.0.2 <4.0.0",
+ "resolved": "https://registry.npmjs.org/mouse-event-offset/-/mouse-event-offset-3.0.2.tgz"
+ },
"mouse-wheel": {
"version": "1.2.0",
"from": "mouse-wheel@>=1.0.2 <2.0.0",
@@ -2081,6 +2158,11 @@
}
}
},
+ "mumath": {
+ "version": "3.3.4",
+ "from": "mumath@>=3.0.0 <4.0.0",
+ "resolved": "https://registry.npmjs.org/mumath/-/mumath-3.3.4.tgz"
+ },
"murmurhash-js": {
"version": "1.0.0",
"from": "murmurhash-js@>=1.0.0 <2.0.0",
@@ -2102,9 +2184,9 @@
"resolved": "https://registry.npmjs.org/ndarray-extract-contour/-/ndarray-extract-contour-1.0.1.tgz"
},
"ndarray-fill": {
- "version": "1.0.1",
- "from": "ndarray-fill@>=1.0.1 <2.0.0",
- "resolved": "https://registry.npmjs.org/ndarray-fill/-/ndarray-fill-1.0.1.tgz"
+ "version": "1.0.2",
+ "from": "ndarray-fill@>=1.0.2 <2.0.0",
+ "resolved": "https://registry.npmjs.org/ndarray-fill/-/ndarray-fill-1.0.2.tgz"
},
"ndarray-gradient": {
"version": "1.0.0",
@@ -2226,9 +2308,9 @@
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.0.tgz"
},
"object-inspect": {
- "version": "1.2.1",
+ "version": "1.2.2",
"from": "object-inspect@>=1.2.1 <1.3.0",
- "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.2.1.tgz"
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.2.2.tgz"
},
"object-keys": {
"version": "1.0.11",
@@ -2333,9 +2415,9 @@
"resolved": "https://registry.npmjs.org/planar-graph-to-polyline/-/planar-graph-to-polyline-1.0.5.tgz"
},
"plotly.js": {
- "version": "1.21.2",
- "from": "plotly.js@1.21.2",
- "resolved": "https://registry.npmjs.org/plotly.js/-/plotly.js-1.21.2.tgz"
+ "version": "1.26.1",
+ "from": "plotly.js@1.26.1",
+ "resolved": "https://registry.npmjs.org/plotly.js/-/plotly.js-1.26.1.tgz"
},
"pngjs": {
"version": "2.3.1",
@@ -2420,9 +2502,9 @@
}
},
"rat-vec": {
- "version": "1.1.0",
- "from": "rat-vec@>=1.1.0 <2.0.0",
- "resolved": "https://registry.npmjs.org/rat-vec/-/rat-vec-1.1.0.tgz"
+ "version": "1.1.1",
+ "from": "rat-vec@>=1.1.1 <2.0.0",
+ "resolved": "https://registry.npmjs.org/rat-vec/-/rat-vec-1.1.1.tgz"
},
"readable-stream": {
"version": "2.1.5",
@@ -2434,6 +2516,11 @@
"from": "reduce-simplicial-complex@>=1.0.0 <2.0.0",
"resolved": "https://registry.npmjs.org/reduce-simplicial-complex/-/reduce-simplicial-complex-1.0.0.tgz"
},
+ "regl": {
+ "version": "1.3.0",
+ "from": "regl@>=1.3.0 <2.0.0",
+ "resolved": "https://registry.npmjs.org/regl/-/regl-1.3.0.tgz"
+ },
"repeat-string": {
"version": "1.6.1",
"from": "repeat-string@>=1.5.2 <2.0.0",
@@ -2765,9 +2852,9 @@
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz"
},
"supercluster": {
- "version": "2.2.0",
+ "version": "2.3.0",
"from": "supercluster@>=2.0.1 <3.0.0",
- "resolved": "https://registry.npmjs.org/supercluster/-/supercluster-2.2.0.tgz"
+ "resolved": "https://registry.npmjs.org/supercluster/-/supercluster-2.3.0.tgz"
},
"superscript-text": {
"version": "1.0.0",
@@ -2818,6 +2905,11 @@
}
}
},
+ "tiny-sdf": {
+ "version": "1.0.2",
+ "from": "tiny-sdf@>=1.0.2 <2.0.0",
+ "resolved": "https://registry.npmjs.org/tiny-sdf/-/tiny-sdf-1.0.2.tgz"
+ },
"tinycolor2": {
"version": "1.4.1",
"from": "tinycolor2@>=1.3.0 <2.0.0",
@@ -2926,11 +3018,6 @@
"from": "unassert@>=1.3.1 <2.0.0",
"resolved": "https://registry.npmjs.org/unassert/-/unassert-1.5.1.tgz",
"dependencies": {
- "acorn": {
- "version": "4.0.4",
- "from": "acorn@>=4.0.0 <5.0.0",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.4.tgz"
- },
"estraverse": {
"version": "4.2.0",
"from": "estraverse@>=4.1.0 <5.0.0",
@@ -2943,11 +3030,6 @@
"from": "unassertify@>=2.0.0 <3.0.0",
"resolved": "https://registry.npmjs.org/unassertify/-/unassertify-2.0.4.tgz",
"dependencies": {
- "acorn": {
- "version": "4.0.4",
- "from": "acorn@>=4.0.0 <5.0.0",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.4.tgz"
- },
"escodegen": {
"version": "1.8.1",
"from": "escodegen@>=1.6.1 <2.0.0",
diff --git a/package.json b/package.json
index 3ea22efb..f3a53df3 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "redash-client",
- "version": "1.0.0",
+ "version": "1.0.3",
"description": "The frontend part of Redash.",
"main": "index.js",
"scripts": {
@@ -33,6 +33,7 @@
"angular-ui-bootstrap": "^2.2.0",
"angular-vs-repeat": "^1.1.7",
"brace": "^0.9.0",
+ "core-js": "https://registry.npmjs.org/core-js/-/core-js-2.4.1.tgz",
"cornelius": "git+https://github.com/restorando/cornelius.git",
"d3": "^3.5.17",
"d3-cloud": "^1.2.1",
@@ -51,7 +52,7 @@
"ng-annotate-loader": "^0.2.0",
"pace-progress": "git+https://github.com/getredash/pace.git",
"pivottable": "^2.3.0",
- "plotly.js": "1.21.2",
+ "plotly.js": "1.26.1",
"ui-select": "^0.19.6",
"underscore": "^1.8.3",
"underscore.string": "^3.3.4"
@@ -59,6 +60,7 @@
"devDependencies": {
"babel-core": "^6.18.0",
"babel-loader": "^6.2.7",
+ "babel-plugin-transform-object-assign": "^6.22.0",
"babel-preset-es2015": "^6.18.0",
"babel-preset-stage-2": "^6.18.0",
"css-loader": "^0.25.0",
diff --git a/redash/__init__.py b/redash/__init__.py
index 2d8c0bc9..abaedcdd 100644
--- a/redash/__init__.py
+++ b/redash/__init__.py
@@ -16,7 +16,7 @@ from redash.query_runner import import_query_runners
from redash.destinations import import_destinations
-__version__ = '1.0.0'
+__version__ = '1.0.3'
def setup_logging():
diff --git a/redash/authentication/google_oauth.py b/redash/authentication/google_oauth.py
index 4653f7bc..aaca8f9f 100644
--- a/redash/authentication/google_oauth.py
+++ b/redash/authentication/google_oauth.py
@@ -85,10 +85,10 @@ def org_login(org_slug):
@blueprint.route('/oauth/google', endpoint="authorize")
def login():
callback = url_for('.callback', _external=True)
- next = request.args.get('next', url_for("redash.index", org_slug=session.get('org_slug')))
+ next_path = request.args.get('next', url_for("redash.index", org_slug=session.get('org_slug')))
logger.debug("Callback url: %s", callback)
- logger.debug("Next is: %s", next)
- return google_remote_app().authorize(callback=callback, state=next)
+ logger.debug("Next is: %s", next_path)
+ return google_remote_app().authorize(callback=callback, state=next_path)
@blueprint.route('/oauth/google_callback', endpoint="callback")
@@ -118,6 +118,6 @@ def authorized():
create_and_login_user(org, profile['name'], profile['email'])
- next = request.args.get('state') or url_for("redash.index", org_slug=org.slug)
+ next_path = request.args.get('state') or url_for("redash.index", org_slug=org.slug)
- return redirect(next)
+ return redirect(next_path)
diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py
index 81510ae3..8193d57c 100644
--- a/redash/handlers/query_results.py
+++ b/redash/handlers/query_results.py
@@ -1,13 +1,10 @@
-import csv
import json
-import cStringIO
import time
import pystache
from flask import make_response, request
from flask_login import current_user
from flask_restful import abort
-import xlsxwriter
from redash import models, settings, utils
from redash.tasks import QueryTask, record_event
from redash.permissions import require_permission, not_view_only, has_access, require_access, view_only
@@ -189,39 +186,13 @@ class QueryResultResource(BaseResource):
@staticmethod
def make_csv_response(query_result):
- s = cStringIO.StringIO()
-
- query_data = json.loads(query_result.data)
- writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
- writer.writer = utils.UnicodeWriter(s)
- writer.writeheader()
- for row in query_data['rows']:
- writer.writerow(row)
-
headers = {'Content-Type': "text/csv; charset=UTF-8"}
- return make_response(s.getvalue(), 200, headers)
+ return make_response(query_result.make_csv_content(), 200, headers)
@staticmethod
def make_excel_response(query_result):
- s = cStringIO.StringIO()
-
- query_data = json.loads(query_result.data)
- book = xlsxwriter.Workbook(s)
- sheet = book.add_worksheet("result")
-
- column_names = []
- for (c, col) in enumerate(query_data['columns']):
- sheet.write(0, c, col['name'])
- column_names.append(col['name'])
-
- for (r, row) in enumerate(query_data['rows']):
- for (c, name) in enumerate(column_names):
- sheet.write(r + 1, c, row.get(name))
-
- book.close()
-
headers = {'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
- return make_response(s.getvalue(), 200, headers)
+ return make_response(query_result.make_excel_content(), 200, headers)
class JobResource(BaseResource):
diff --git a/redash/models.py b/redash/models.py
index 9f00f44b..e8035061 100644
--- a/redash/models.py
+++ b/redash/models.py
@@ -4,6 +4,9 @@ import hashlib
import itertools
import json
import logging
+import cStringIO
+import csv
+import xlsxwriter
from funcy import project
from flask_sqlalchemy import SQLAlchemy
@@ -13,7 +16,7 @@ from sqlalchemy.event import listens_for
from sqlalchemy.inspection import inspect
from sqlalchemy.types import TypeDecorator
from sqlalchemy.ext.mutable import Mutable
-from sqlalchemy.orm import object_session, backref
+from sqlalchemy.orm import object_session, backref, joinedload, subqueryload
# noinspection PyUnresolvedReferences
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import or_
@@ -28,7 +31,9 @@ from redash.utils import generate_token, json_dumps
from redash.utils.configuration import ConfigurationContainer
from redash.metrics import database
-db = SQLAlchemy()
+db = SQLAlchemy(session_options={
+ 'expire_on_commit': False
+})
Column = functools.partial(db.Column, nullable=False)
# AccessPermission and Change use a 'generic foreign key' approach to refer to
@@ -424,6 +429,9 @@ class DataSource(BelongsToOrgMixin, db.Model):
__tablename__ = 'data_sources'
__table_args__ = (db.Index('data_sources_org_id_name', 'org_id', 'name'),)
+ def __eq__(self, other):
+ return self.id == other.id
+
def to_dict(self, all=False, with_permissions_for=None):
d = {
'id': self.id,
@@ -641,8 +649,40 @@ class QueryResult(db.Model, BelongsToOrgMixin):
def groups(self):
return self.data_source.groups
+ def make_csv_content(self):
+ s = cStringIO.StringIO()
-def should_schedule_next(previous_iteration, now, schedule):
+ query_data = json.loads(self.data)
+ writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
+ writer.writer = utils.UnicodeWriter(s)
+ writer.writeheader()
+ for row in query_data['rows']:
+ writer.writerow(row)
+
+ return s.getvalue()
+
+ def make_excel_content(self):
+ s = cStringIO.StringIO()
+
+ query_data = json.loads(self.data)
+ book = xlsxwriter.Workbook(s)
+ sheet = book.add_worksheet("result")
+
+ column_names = []
+ for (c, col) in enumerate(query_data['columns']):
+ sheet.write(0, c, col['name'])
+ column_names.append(col['name'])
+
+ for (r, row) in enumerate(query_data['rows']):
+ for (c, name) in enumerate(column_names):
+ sheet.write(r + 1, c, row.get(name))
+
+ book.close()
+
+ return s.getvalue()
+
+
+def should_schedule_next(previous_iteration, now, schedule, failures):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
@@ -659,7 +699,8 @@ def should_schedule_next(previous_iteration, now, schedule):
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
-
+ if failures:
+ next_iteration += datetime.timedelta(minutes=2**failures)
return now > next_iteration
@@ -685,6 +726,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
is_archived = Column(db.Boolean, default=False, index=True)
is_draft = Column(db.Boolean, default=True, index=True)
schedule = Column(db.String(10), nullable=True)
+ schedule_failures = Column(db.Integer, default=0)
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
@@ -764,12 +806,12 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
@classmethod
def all_queries(cls, group_ids, user_id=None, drafts=False):
- q = (cls.query.join(User, Query.user_id == User.id)
- .outerjoin(QueryResult)
+ q = (cls.query
+ .options(joinedload(Query.user),
+ joinedload(Query.latest_query_data).load_only('runtime', 'retrieved_at'))
.join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)
.filter(Query.is_archived == False)
.filter(DataSourceGroup.group_id.in_(group_ids))\
- .group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)
.order_by(Query.created_at.desc()))
if not drafts:
@@ -784,15 +826,20 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
@classmethod
def outdated_queries(cls):
queries = (db.session.query(Query)
- .join(QueryResult)
- .join(DataSource)
- .filter(Query.schedule != None))
+ .options(joinedload(Query.latest_query_data).load_only('retrieved_at'))
+ .filter(Query.schedule != None)
+ .order_by(Query.id))
now = utils.utcnow()
outdated_queries = {}
for query in queries:
- if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
- key = "{}:{}".format(query.query_hash, query.data_source.id)
+ if query.latest_query_data:
+ retrieved_at = query.latest_query_data.retrieved_at
+ else:
+ retrieved_at = now
+
+ if should_schedule_next(retrieved_at, now, query.schedule, query.schedule_failures):
+ key = "{}:{}".format(query.query_hash, query.data_source_id)
outdated_queries[key] = query
return outdated_queries.values()
@@ -818,12 +865,11 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
Query.data_source_id == DataSourceGroup.data_source_id)
.filter(where)).distinct()
- return Query.query.join(User, Query.user_id == User.id).filter(
- Query.id.in_(query_ids))
+ return Query.query.options(joinedload(Query.user)).filter(Query.id.in_(query_ids))
@classmethod
def recent(cls, group_ids, user_id=None, limit=20):
- query = (cls.query.join(User, Query.user_id == User.id)
+ query = (cls.query.options(subqueryload(Query.user))
.filter(Event.created_at > (db.func.current_date() - 7))
.join(Event, Query.id == Event.object_id.cast(db.Integer))
.join(DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id)
@@ -835,7 +881,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
DataSourceGroup.group_id.in_(group_ids),
or_(Query.is_draft == False, Query.user_id == user_id),
Query.is_archived == False)
- .group_by(Event.object_id, Query.id, User.id)
+ .group_by(Event.object_id, Query.id)
.order_by(db.desc(db.func.count(0))))
if user_id:
@@ -889,6 +935,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
@listens_for(Query.query_text, 'set')
def gen_query_hash(target, val, oldval, initiator):
target.query_hash = utils.gen_query_hash(val)
+ target.schedule_failures = 0
@listens_for(Query.user_id, 'set')
@@ -1024,12 +1071,11 @@ class Alert(TimestampMixin, db.Model):
@classmethod
def all(cls, group_ids):
- # TODO: there was a join with user here to prevent N+1 queries. need to revisit this.
return db.session.query(Alert)\
+ .options(joinedload(Alert.user), joinedload(Alert.query_rel))\
.join(Query)\
.join(DataSourceGroup, DataSourceGroup.data_source_id==Query.data_source_id)\
- .filter(DataSourceGroup.group_id.in_(group_ids))\
- .group_by(Alert)
+ .filter(DataSourceGroup.group_id.in_(group_ids))
@classmethod
def get_by_id_and_org(cls, id, org):
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py
index b85b09e8..232d2127 100644
--- a/redash/query_runner/__init__.py
+++ b/redash/query_runner/__init__.py
@@ -1,6 +1,8 @@
+import sys
import logging
import json
+from collections import OrderedDict
from redash import settings
logger = logging.getLogger(__name__)
@@ -138,6 +140,7 @@ class BaseSQLQueryRunner(BaseQueryRunner):
res = self._run_query_internal('select count(*) as cnt from %s' % t)
tables_dict[t]['size'] = res[0]['cnt']
+
query_runners = {}
@@ -147,7 +150,8 @@ def register(query_runner_class):
logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
query_runners[query_runner_class.type()] = query_runner_class
else:
- logger.debug("%s query runner enabled but not supported, not registering. Either disable or install missing dependencies.", query_runner_class.name())
+ logger.debug("%s query runner enabled but not supported, not registering. Either disable or install missing "
+ "dependencies.", query_runner_class.name())
def get_query_runner(query_runner_type, configuration):
diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py
index 2642377c..23339446 100644
--- a/redash/query_runner/athena.py
+++ b/redash/query_runner/athena.py
@@ -4,8 +4,10 @@ import os
import requests
from redash.query_runner import BaseQueryRunner, register
+from redash.settings import parse_boolean
PROXY_URL = os.environ.get('ATHENA_PROXY_URL')
+ANNOTATE_QUERY = parse_boolean(os.environ.get('ATHENA_ANNOTATE_QUERY', 'true'))
class Athena(BaseQueryRunner):
noop_query = 'SELECT 1'
@@ -40,6 +42,9 @@ class Athena(BaseQueryRunner):
'secret': ['aws_secret_key']
}
+ @classmethod
+ def annotate_query(cls):
+ return ANNOTATE_QUERY
def get_schema(self, get_stats=False):
schema = {}
diff --git a/redash/query_runner/axibase_tsd.py b/redash/query_runner/axibase_tsd.py
new file mode 100644
index 00000000..9737b6bc
--- /dev/null
+++ b/redash/query_runner/axibase_tsd.py
@@ -0,0 +1,201 @@
+from io import StringIO
+import json
+import logging
+import sys
+import uuid
+import csv
+
+from redash.query_runner import *
+from redash.utils import JSONEncoder
+
+logger = logging.getLogger(__name__)
+
+try:
+ import atsd_client
+ from atsd_client.exceptions import SQLException
+ from atsd_client.services import SQLService, MetricsService
+ enabled = True
+except ImportError:
+ enabled = False
+
+types_map = {
+ 'long': TYPE_INTEGER,
+
+ 'bigint': TYPE_INTEGER,
+ 'integer': TYPE_INTEGER,
+ 'smallint': TYPE_INTEGER,
+
+ 'float': TYPE_FLOAT,
+ 'double': TYPE_FLOAT,
+ 'decimal': TYPE_FLOAT,
+
+ 'string': TYPE_STRING,
+ 'date': TYPE_DATE,
+ 'xsd:dateTimeStamp': TYPE_DATETIME
+}
+
+
+def resolve_redash_type(type_in_atsd):
+ """
+ Retrieve corresponding redash type
+ :param type_in_atsd: `str`
+ :return: redash type constant
+ """
+ if isinstance(type_in_atsd, dict):
+ type_in_redash = types_map.get(type_in_atsd['base'])
+ else:
+ type_in_redash = types_map.get(type_in_atsd)
+ return type_in_redash
+
+
+def generate_rows_and_columns(csv_response):
+ """
+ Prepare rows and columns in redash format from ATSD csv response
+ :param csv_response: `str`
+ :return: prepared rows and columns
+ """
+ meta, data = csv_response.split('\n', 1)
+ meta = meta[1:]
+
+ meta_with_padding = meta + '=' * (4 - len(meta) % 4)
+ meta_decoded = meta_with_padding.decode('base64')
+ meta_json = json.loads(meta_decoded)
+ meta_columns = meta_json['tableSchema']['columns']
+
+ reader = csv.reader(data.splitlines())
+ next(reader)
+
+ columns = [{'friendly_name': i['titles'],
+ 'type': resolve_redash_type(i['datatype']),
+ 'name': i['name']}
+ for i in meta_columns]
+ column_names = [c['name'] for c in columns]
+ rows = [dict(zip(column_names, row)) for row in reader]
+ return columns, rows
+
+
+class AxibaseTSD(BaseQueryRunner):
+ noop_query = "SELECT 1"
+
+ @classmethod
+ def enabled(cls):
+ return enabled
+
+ @classmethod
+ def name(cls):
+ return "Axibase Time Series Database"
+
+ @classmethod
+ def configuration_schema(cls):
+ return {
+ 'type': 'object',
+ 'properties': {
+ 'protocol': {
+ 'type': 'string',
+ 'title': 'Protocol',
+ 'default': 'http'
+ },
+ 'hostname': {
+ 'type': 'string',
+ 'title': 'Host',
+ 'default': 'axibase_tsd_hostname'
+ },
+ 'port': {
+ 'type': 'number',
+ 'title': 'Port',
+ 'default': 8088
+ },
+ 'username': {
+ 'type': 'string'
+ },
+ 'password': {
+ 'type': 'string',
+ 'title': 'Password'
+ },
+ 'timeout': {
+ 'type': 'number',
+ 'default': 600,
+ 'title': 'Connection Timeout'
+ },
+ 'min_insert_date': {
+ 'type': 'string',
+ 'title': 'Metric Minimum Insert Date'
+ },
+ 'expression': {
+ 'type': 'string',
+ 'title': 'Metric Filter'
+ },
+ 'limit': {
+ 'type': 'number',
+ 'default': 5000,
+ 'title': 'Metric Limit'
+ },
+ 'trust_certificate': {
+ 'type': 'boolean',
+ 'title': 'Trust SSL Certificate'
+ }
+ },
+ 'required': ['username', 'password', 'hostname', 'protocol', 'port'],
+ 'secret': ['password']
+ }
+
+ def __init__(self, configuration):
+ super(AxibaseTSD, self).__init__(configuration)
+ self.url = '{0}://{1}:{2}'.format(self.configuration.get('protocol', 'http'),
+ self.configuration.get('hostname', 'localhost'),
+ self.configuration.get('port', 8088))
+
+ def run_query(self, query, user):
+ connection = atsd_client.connect_url(self.url,
+ self.configuration.get('username'),
+ self.configuration.get('password'),
+ verify=self.configuration.get('trust_certificate', False),
+ timeout=self.configuration.get('timeout', 600))
+ sql = SQLService(connection)
+ query_id = str(uuid.uuid4())
+
+ try:
+ logger.debug("SQL running query: %s", query)
+ data = sql.query_with_params(query, {'outputFormat': 'csv', 'metadataFormat': 'EMBED',
+ 'queryId': query_id})
+
+ columns, rows = generate_rows_and_columns(data)
+
+ data = {'columns': columns, 'rows': rows}
+ json_data = json.dumps(data, cls=JSONEncoder)
+ error = None
+
+ except SQLException as e:
+ json_data = None
+ error = e.content
+ except (KeyboardInterrupt, InterruptException):
+ sql.cancel_query(query_id)
+ error = "Query cancelled by user."
+ json_data = None
+ except Exception:
+ raise sys.exc_info()[1], None, sys.exc_info()[2]
+
+ return json_data, error
+
+ def get_schema(self, get_stats=False):
+ connection = atsd_client.connect_url(self.url,
+ self.configuration.get('username'),
+ self.configuration.get('password'),
+ verify=self.configuration.get('trust_certificate', False),
+ timeout=self.configuration.get('timeout', 600))
+ metrics = MetricsService(connection)
+ ml = metrics.list(expression=self.configuration.get('expression', None),
+ minInsertDate=self.configuration.get('min_insert_date', None),
+ limit=self.configuration.get('limit', 5000))
+ metrics_list = [i.name.encode('utf-8') for i in ml]
+ metrics_list.append('atsd_series')
+ schema = {}
+ default_columns = ['entity', 'datetime', 'time', 'metric', 'value', 'text',
+ 'tags', 'entity.tags', 'metric.tags']
+ for table_name in metrics_list:
+ schema[table_name] = {'name': "'{}'".format(table_name),
+ 'columns': default_columns}
+ values = schema.values()
+ return values
+
+register(AxibaseTSD)
diff --git a/redash/query_runner/big_query.py b/redash/query_runner/big_query.py
index 0be4597b..4b5e0518 100644
--- a/redash/query_runner/big_query.py
+++ b/redash/query_runner/big_query.py
@@ -18,7 +18,7 @@ try:
import apiclient.errors
from apiclient.discovery import build
from apiclient.errors import HttpError
- from oauth2client.client import SignedJwtAssertionCredentials
+ from oauth2client.service_account import ServiceAccountCredentials
from oauth2client import gce
enabled = True
@@ -113,6 +113,10 @@ class BigQuery(BaseQueryRunner):
'loadSchema': {
"type": "boolean",
"title": "Load Schema"
+ },
+ 'maximumBillingTier': {
+ "type": "number",
+ "title": "Maximum Billing Tier"
}
},
'required': ['jsonKeyFile', 'projectId'],
@@ -134,9 +138,9 @@ class BigQuery(BaseQueryRunner):
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
- credentials = SignedJwtAssertionCredentials(key['client_email'], key['private_key'], scope=scope)
+ creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
http = httplib2.Http(timeout=settings.BIGQUERY_HTTP_TIMEOUT)
- http = credentials.authorize(http)
+ http = creds.authorize(http)
return build("bigquery", "v2", http=http)
@@ -148,10 +152,10 @@ class BigQuery(BaseQueryRunner):
"query": query,
"dryRun": True,
}
-
+
if self.configuration.get('useStandardSql', False):
job_data['useLegacySql'] = False
-
+
response = jobs.query(projectId=self._get_project_id(), body=job_data).execute()
return int(response["totalBytesProcessed"])
@@ -164,7 +168,7 @@ class BigQuery(BaseQueryRunner):
}
}
}
-
+
if self.configuration.get('useStandardSql', False):
job_data['configuration']['query']['useLegacySql'] = False
@@ -174,6 +178,9 @@ class BigQuery(BaseQueryRunner):
job_data["configuration"]["query"]["userDefinedFunctionResources"] = map(
lambda resource_uri: {"resourceUri": resource_uri}, resource_uris)
+ if "maximumBillingTier" in self.configuration:
+ job_data["configuration"]["query"]["maximumBillingTier"] = self.configuration["maximumBillingTier"]
+
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
current_row = 0
query_reply = _get_query_results(jobs, project_id=project_id,
diff --git a/redash/query_runner/cass.py b/redash/query_runner/cass.py
index 7a577e6e..b211cbe2 100644
--- a/redash/query_runner/cass.py
+++ b/redash/query_runner/cass.py
@@ -1,5 +1,6 @@
import json
import logging
+import uuid
from redash.query_runner import BaseQueryRunner, register
from redash.utils import JSONEncoder
@@ -14,6 +15,13 @@ except ImportError:
enabled = False
+class CassandraJSONEncoder(JSONEncoder):
+ def default(self, o):
+ if isinstance(o, uuid.UUID):
+ return str(o)
+ return super(CassandraJSONEncoder, self).default(o)
+
+
class Cassandra(BaseQueryRunner):
noop_query = "SELECT dateof(now()) FROM system.local"
@@ -44,6 +52,11 @@ class Cassandra(BaseQueryRunner):
'password': {
'type': 'string',
'title': 'Password'
+ },
+ 'protocol': {
+ 'type': 'number',
+ 'title': 'Protocol Version',
+ 'default': 3
}
},
'required': ['keyspace', 'host']
@@ -77,10 +90,12 @@ class Cassandra(BaseQueryRunner):
if self.configuration.get('username', '') and self.configuration.get('password', ''):
auth_provider = PlainTextAuthProvider(username='{}'.format(self.configuration.get('username', '')),
password='{}'.format(self.configuration.get('password', '')))
- connection = Cluster([self.configuration.get('host', '')], auth_provider=auth_provider, protocol_version=3)
+ connection = Cluster([self.configuration.get('host', '')],
+ auth_provider=auth_provider,
+ protocol_version=self.configuration.get('protocol', 3))
else:
- connection = Cluster([self.configuration.get('host', '')], protocol_version=3)
-
+ connection = Cluster([self.configuration.get('host', '')],
+ protocol_version=self.configuration.get('protocol', 3))
session = connection.connect()
session.set_keyspace(self.configuration['keyspace'])
logger.debug("Cassandra running query: %s", query)
@@ -93,7 +108,7 @@ class Cassandra(BaseQueryRunner):
rows = [dict(zip(column_names, row)) for row in result]
data = {'columns': columns, 'rows': rows}
- json_data = json.dumps(data, cls=JSONEncoder)
+ json_data = json.dumps(data, cls=CassandraJSONEncoder)
error = None
except KeyboardInterrupt:
diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py
index c22f9dd5..7de1b396 100644
--- a/redash/query_runner/clickhouse.py
+++ b/redash/query_runner/clickhouse.py
@@ -3,6 +3,7 @@ import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
import requests
+import re
logger = logging.getLogger(__name__)
@@ -74,13 +75,16 @@ class ClickHouse(BaseSQLQueryRunner):
@staticmethod
def _define_column_type(column):
c = column.lower()
- if 'int' in c:
+ f = re.search(r'^nullable\((.*)\)$', c)
+ if f is not None:
+ c = f.group(1)
+ if c.startswith('int') or c.startswith('uint'):
return TYPE_INTEGER
- elif 'float' in c:
+ elif c.startswith('float'):
return TYPE_FLOAT
- elif 'datetime' == c:
+ elif c == 'datetime':
return TYPE_DATETIME
- elif 'date' == c:
+ elif c == 'date':
return TYPE_DATE
else:
return TYPE_STRING
diff --git a/redash/query_runner/dynamodb_sql.py b/redash/query_runner/dynamodb_sql.py
index 503b5275..7d433b2e 100644
--- a/redash/query_runner/dynamodb_sql.py
+++ b/redash/query_runner/dynamodb_sql.py
@@ -2,7 +2,6 @@ import json
import logging
import sys
-
from redash.query_runner import *
from redash.utils import JSONEncoder
@@ -98,12 +97,17 @@ class DynamoDBSQL(BaseSQLQueryRunner):
try:
engine = self._connect()
- res_dict = engine.execute(query if str(query).endswith(';') else str(query)+';')
+ result = engine.execute(query if str(query).endswith(';') else str(query)+';')
columns = []
rows = []
- for item in res_dict:
+ # When running a count query it returns the value as a string, in which case
+ # we transform it into a dictionary to be the same as regular queries.
+ if isinstance(result, basestring):
+ result = [{"value": result}]
+
+ for item in result:
if not columns:
for k, v in item.iteritems():
columns.append({
diff --git a/redash/query_runner/google_analytics.py b/redash/query_runner/google_analytics.py
index 26ecc861..0993a1ea 100644
--- a/redash/query_runner/google_analytics.py
+++ b/redash/query_runner/google_analytics.py
@@ -10,7 +10,7 @@ from datetime import datetime
logger = logging.getLogger(__name__)
try:
- from oauth2client.client import SignedJwtAssertionCredentials
+ from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
import httplib2
enabled = True
@@ -81,8 +81,8 @@ class GoogleAnalytics(BaseSQLQueryRunner):
def _get_analytics_service(self):
scope = ['https://www.googleapis.com/auth/analytics.readonly']
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
- credentials = SignedJwtAssertionCredentials(key['client_email'], key["private_key"], scope=scope)
- return build('analytics', 'v3', http=credentials.authorize(httplib2.Http()))
+ creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
+ return build('analytics', 'v3', http=creds.authorize(httplib2.Http()))
def run_query(self, query, user):
logger.debug("Analytics is about to execute query: %s", query)
diff --git a/redash/query_runner/google_spreadsheets.py b/redash/query_runner/google_spreadsheets.py
index 2184da70..1b0509ed 100644
--- a/redash/query_runner/google_spreadsheets.py
+++ b/redash/query_runner/google_spreadsheets.py
@@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
try:
import gspread
- from oauth2client.client import SignedJwtAssertionCredentials
+ from oauth2client.service_account import ServiceAccountCredentials
enabled = True
except ImportError:
enabled = False
@@ -164,8 +164,8 @@ class GoogleSpreadsheet(BaseQueryRunner):
]
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
- credentials = SignedJwtAssertionCredentials(key['client_email'], key["private_key"], scope=scope)
- spreadsheetservice = gspread.authorize(credentials)
+ creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
+ spreadsheetservice = gspread.authorize(creds)
return spreadsheetservice
def test_connection(self):
diff --git a/redash/query_runner/impala_ds.py b/redash/query_runner/impala_ds.py
index e3337006..2c3d94e7 100644
--- a/redash/query_runner/impala_ds.py
+++ b/redash/query_runner/impala_ds.py
@@ -82,11 +82,11 @@ class Impala(BaseSQLQueryRunner):
def _get_tables(self, schema_dict):
schemas_query = "show schemas;"
tables_query = "show tables in %s;"
- columns_query = "show column stats %s;"
+ columns_query = "show column stats %s.%s;"
- for schema_name in map(lambda a: a['name'], self._run_query_internal(schemas_query)):
- for table_name in map(lambda a: a['name'], self._run_query_internal(tables_query % schema_name)):
- columns = map(lambda a: a['Column'], self._run_query_internal(columns_query % table_name))
+ for schema_name in map(lambda a: unicode(a['name']), self._run_query_internal(schemas_query)):
+ for table_name in map(lambda a: unicode(a['name']), self._run_query_internal(tables_query % schema_name)):
+ columns = map(lambda a: unicode(a['Column']), self._run_query_internal(columns_query % (schema_name, table_name)))
if schema_name != 'default':
table_name = '{}.{}'.format(schema_name, table_name)
diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py
index 2b37339c..37b1f345 100644
--- a/redash/query_runner/jql.py
+++ b/redash/query_runner/jql.py
@@ -54,7 +54,7 @@ def parse_issue(issue, field_mapping):
if 'watchCount' in v:
result[output_name] = v['watchCount']
-
+
elif isinstance(v, list):
if len(member_names) > 0:
# if field mapping with dict member mappings defined get value of each member
@@ -104,7 +104,7 @@ class FieldMapping:
for k, v in query_field_mapping.iteritems():
field_name = k
member_name = None
-
+
# check for member name contained in field name
member_parser = re.search('(\w+)\.(\w+)', k)
if (member_parser):
@@ -183,6 +183,8 @@ class JiraJQL(BaseQueryRunner):
if query_type == 'count':
query['maxResults'] = 1
query['fields'] = ''
+ else:
+ query['maxResults'] = query.get('maxResults', 1000)
response = requests.get(jql_url, params=query, auth=(self.configuration.get('username'), self.configuration.get('password')))
diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py
index d9eb890c..28db2099 100644
--- a/redash/query_runner/pg.py
+++ b/redash/query_runner/pg.py
@@ -71,6 +71,7 @@ class PostgreSQL(BaseSQLQueryRunner):
"title": "Database Name"
}
},
+ "order": ['host', 'port', 'user', 'password'],
"required": ["dbname"],
"secret": ["password"]
}
diff --git a/redash/query_runner/presto.py b/redash/query_runner/presto.py
index 3b6e16b2..7e68ed12 100644
--- a/redash/query_runner/presto.py
+++ b/redash/query_runner/presto.py
@@ -118,9 +118,15 @@ class Presto(BaseQueryRunner):
default_message = 'Unspecified DatabaseError: {0}'.format(db.message)
message = db.message.get('failureInfo', {'message', None}).get('message')
error = default_message if message is None else message
- except Exception, ex:
+ except (KeyboardInterrupt, InterruptException) as e:
+ cursor.cancel()
+ error = "Query cancelled by user."
+ json_data = None
+ except Exception as ex:
json_data = None
error = ex.message
+ if not isinstance(error, basestring):
+ error = unicode(error)
return json_data, error
diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py
new file mode 100644
index 00000000..6d9678b0
--- /dev/null
+++ b/redash/query_runner/salesforce.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+
+import re
+import logging
+from collections import OrderedDict
+from redash.query_runner import BaseQueryRunner, register
+from redash.query_runner import TYPE_STRING, TYPE_DATE, TYPE_DATETIME, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN
+from redash.utils import json_dumps
+logger = logging.getLogger(__name__)
+
+try:
+ from simple_salesforce import Salesforce as SimpleSalesforce
+ from simple_salesforce.api import SalesforceError
+ enabled = True
+except ImportError as e:
+ enabled = False
+
+# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
+TYPES_MAP = dict(
+ id=TYPE_STRING,
+ string=TYPE_STRING,
+ currency=TYPE_FLOAT,
+ reference=TYPE_STRING,
+ double=TYPE_FLOAT,
+ picklist=TYPE_STRING,
+ date=TYPE_DATE,
+ url=TYPE_STRING,
+ phone=TYPE_STRING,
+ textarea=TYPE_STRING,
+ int=TYPE_INTEGER,
+ datetime=TYPE_DATETIME,
+ boolean=TYPE_BOOLEAN,
+ percent=TYPE_FLOAT,
+ multipicklist=TYPE_STRING,
+ masterrecord=TYPE_STRING,
+ location=TYPE_STRING,
+ JunctionIdList=TYPE_STRING,
+ encryptedstring=TYPE_STRING,
+ email=TYPE_STRING,
+ DataCategoryGroupReference=TYPE_STRING,
+ combobox=TYPE_STRING,
+ calculated=TYPE_STRING,
+ anyType=TYPE_STRING,
+ address=TYPE_STRING
+)
+
+# Query Runner for Salesforce SOQL Queries
+# For example queries, see:
+# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
+
+
+class Salesforce(BaseQueryRunner):
+
+ @classmethod
+ def enabled(cls):
+ return enabled
+
+ @classmethod
+ def annotate_query(cls):
+ return False
+
+ @classmethod
+ def configuration_schema(cls):
+ return {
+ "type": "object",
+ "properties": {
+ "username": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "token": {
+ "type": "string",
+ "title": "Security Token"
+ },
+ "sandbox": {
+ "type": "boolean"
+ }
+ },
+ "required": ["username", "password", "token"],
+ "secret": ["password", "token"]
+ }
+
+ def test_connection(self):
+ response = self._get_sf().describe()
+ if response is None:
+ raise Exception("Failed describing objects.")
+ pass
+
+ def _get_sf(self):
+ sf = SimpleSalesforce(username=self.configuration['username'],
+ password=self.configuration['password'],
+ security_token=self.configuration['token'],
+ sandbox=self.configuration['sandbox'],
+ client_id='Redash')
+ return sf
+
+ def _clean_value(self, value):
+ if isinstance(value, OrderedDict) and 'records' in value:
+ value = value['records']
+ for row in value:
+ row.pop('attributes', None)
+ return value
+
+ def _get_value(self, dct, dots):
+ for key in dots.split('.'):
+ dct = dct.get(key)
+ return dct
+
+ def _get_column_name(self, key, parents=[]):
+ return '.'.join(parents + [key])
+
+ def _build_columns(self, sf, child, parents=[]):
+ child_type = child['attributes']['type']
+ child_desc = sf.__getattr__(child_type).describe()
+ child_type_map = dict((f['name'], f['type'])for f in child_desc['fields'])
+ columns = []
+ for key in child.keys():
+ if key != 'attributes':
+ if isinstance(child[key], OrderedDict) and 'attributes' in child[key]:
+ columns.extend(self._build_columns(sf, child[key], parents + [key]))
+ else:
+ column_name = self._get_column_name(key, parents)
+ key_type = child_type_map.get(key, 'string')
+ column_type = TYPES_MAP.get(key_type, TYPE_STRING)
+ columns.append((column_name, column_type))
+ return columns
+
+ def _build_rows(self, columns, records):
+ rows = []
+ for record in records:
+ record.pop('attributes', None)
+ row = dict()
+ for column in columns:
+ key = column[0]
+ value = self._get_value(record, key)
+ row[key] = self._clean_value(value)
+ rows.append(row)
+ return rows
+
+ def run_query(self, query, user):
+ logger.debug("Salesforce is about to execute query: %s", query)
+ query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
+ try:
+ columns = []
+ rows = []
+ sf = self._get_sf()
+ response = sf.query_all(query)
+ records = response['records']
+ if response['totalSize'] > 0 and len(records) == 0:
+ columns = self.fetch_columns([('Count', TYPE_INTEGER)])
+ rows = [{'Count': response['totalSize']}]
+ elif len(records) > 0:
+ cols = self._build_columns(sf, records[0])
+ rows = self._build_rows(cols, records)
+ columns = self.fetch_columns(cols)
+ error = None
+ data = {'columns': columns, 'rows': rows}
+ json_data = json_dumps(data)
+ except SalesforceError as err:
+ error = err.message
+ json_data = None
+ return json_data, error
+
+ def get_schema(self, get_stats=False):
+ sf = self._get_sf()
+ response = sf.describe()
+ if response is None:
+ raise Exception("Failed describing objects.")
+
+ schema = {}
+ for sobject in response['sobjects']:
+ table_name = sobject['name']
+ if sobject['queryable'] is True and table_name not in schema:
+ desc = sf.__getattr__(sobject['name']).describe()
+ fields = desc['fields']
+ schema[table_name] = {'name': table_name, 'columns': [f['name'] for f in fields]}
+ return schema.values()
+
+register(Salesforce)
diff --git a/redash/settings.py b/redash/settings.py
index f26f4e65..08eebbaa 100644
--- a/redash/settings.py
+++ b/redash/settings.py
@@ -185,7 +185,9 @@ default_query_runners = [
'redash.query_runner.mssql',
'redash.query_runner.jql',
'redash.query_runner.google_analytics',
- 'redash.query_runner.snowflake'
+ 'redash.query_runner.snowflake',
+ 'redash.query_runner.axibase_tsd',
+ 'redash.query_runner.salesforce'
]
enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
diff --git a/redash/static/images/favicon-16x16.png b/redash/static/images/favicon-16x16.png
deleted file mode 120000
index 0c74c25f..00000000
--- a/redash/static/images/favicon-16x16.png
+++ /dev/null
@@ -1 +0,0 @@
-../../../frontend/app/assets/images/favicon-16x16.png
\ No newline at end of file
diff --git a/redash/static/images/favicon-16x16.png b/redash/static/images/favicon-16x16.png
new file mode 100755
index 00000000..2eb05945
Binary files /dev/null and b/redash/static/images/favicon-16x16.png differ
diff --git a/redash/static/images/favicon-32x32.png b/redash/static/images/favicon-32x32.png
deleted file mode 120000
index 3b34fa1f..00000000
--- a/redash/static/images/favicon-32x32.png
+++ /dev/null
@@ -1 +0,0 @@
-../../../frontend/app/assets/images/favicon-32x32.png
\ No newline at end of file
diff --git a/redash/static/images/favicon-32x32.png b/redash/static/images/favicon-32x32.png
new file mode 100755
index 00000000..451b7c86
Binary files /dev/null and b/redash/static/images/favicon-32x32.png differ
diff --git a/redash/static/images/favicon-96x96.png b/redash/static/images/favicon-96x96.png
deleted file mode 120000
index 345ca99c..00000000
--- a/redash/static/images/favicon-96x96.png
+++ /dev/null
@@ -1 +0,0 @@
-../../../frontend/app/assets/images/favicon-96x96.png
\ No newline at end of file
diff --git a/redash/static/images/favicon-96x96.png b/redash/static/images/favicon-96x96.png
new file mode 100755
index 00000000..b9c5ac96
Binary files /dev/null and b/redash/static/images/favicon-96x96.png differ
diff --git a/redash/tasks/alerts.py b/redash/tasks/alerts.py
index bd5bd0cc..657d7302 100644
--- a/redash/tasks/alerts.py
+++ b/redash/tasks/alerts.py
@@ -4,7 +4,6 @@ import datetime
from redash.worker import celery
from redash import utils
from redash import models, settings
-from .base import BaseTask
logger = get_task_logger(__name__)
@@ -34,7 +33,7 @@ def should_notify(alert, new_state):
return new_state != alert.state or (alert.state == models.Alert.TRIGGERED_STATE and passed_rearm_threshold)
-@celery.task(name="redash.tasks.check_alerts_for_query", base=BaseTask)
+@celery.task(name="redash.tasks.check_alerts_for_query")
def check_alerts_for_query(query_id):
logger.debug("Checking query %d for alerts", query_id)
diff --git a/redash/tasks/base.py b/redash/tasks/base.py
deleted file mode 100644
index 81ac989d..00000000
--- a/redash/tasks/base.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from celery import Task
-from redash import create_app
-from flask import has_app_context, current_app
-
-
-class BaseTask(Task):
- abstract = True
-
- def after_return(self, *args, **kwargs):
- if hasattr(self, 'app_ctx'):
- self.app_ctx.pop()
-
- def __call__(self, *args, **kwargs):
- if not has_app_context():
- flask_app = current_app or create_app()
- self.app_ctx = flask_app.app_context()
- self.app_ctx.push()
- return super(BaseTask, self).__call__(*args, **kwargs)
diff --git a/redash/tasks/general.py b/redash/tasks/general.py
index d03bfc7d..23936424 100644
--- a/redash/tasks/general.py
+++ b/redash/tasks/general.py
@@ -4,12 +4,11 @@ from flask_mail import Message
from redash.worker import celery
from redash.version_check import run_version_check
from redash import models, mail, settings
-from .base import BaseTask
logger = get_task_logger(__name__)
-@celery.task(name="redash.tasks.record_event", base=BaseTask)
+@celery.task(name="redash.tasks.record_event")
def record_event(raw_event):
event = models.Event.record(raw_event)
models.db.session.commit()
@@ -28,7 +27,7 @@ def record_event(raw_event):
logger.exception("Failed posting to %s", hook)
-@celery.task(name="redash.tasks.version_check", base=BaseTask)
+@celery.task(name="redash.tasks.version_check")
def version_check():
run_version_check()
@@ -46,7 +45,7 @@ def subscribe(form):
requests.post('https://beacon.redash.io/subscribe', json=data)
-@celery.task(name="redash.tasks.send_mail", base=BaseTask)
+@celery.task(name="redash.tasks.send_mail")
def send_mail(to, subject, html, text):
from redash.wsgi import app
diff --git a/redash/tasks/queries.py b/redash/tasks/queries.py
index 1bcbe3be..788bf94f 100644
--- a/redash/tasks/queries.py
+++ b/redash/tasks/queries.py
@@ -9,7 +9,6 @@ from redash import redis_connection, models, statsd_client, settings, utils
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.query_runner import InterruptException
-from .base import BaseTask
from .alerts import check_alerts_for_query
logger = get_task_logger(__name__)
@@ -155,23 +154,25 @@ class QueryTask(object):
return self._async_result.id
def to_dict(self):
- if self._async_result.status == 'STARTED':
- updated_at = self._async_result.result.get('start_time', 0)
+ task_info = self._async_result._get_task_meta()
+ result, task_status = task_info['result'], task_info['status']
+ if task_status == 'STARTED':
+ updated_at = result.get('start_time', 0)
else:
updated_at = 0
- status = self.STATUSES[self._async_result.status]
+ status = self.STATUSES[task_status]
- if isinstance(self._async_result.result, Exception):
- error = self._async_result.result.message
+ if isinstance(result, Exception):
+ error = result.message
status = 4
- elif self._async_result.status == 'REVOKED':
+ elif task_status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
- if self._async_result.successful() and not error:
- query_result_id = self._async_result.result
+ if task_status == 'SUCCESS' and not error:
+ query_result_id = result
else:
query_result_id = None
@@ -198,7 +199,7 @@ class QueryTask(object):
return self._async_result.revoke(terminate=True, signal='SIGINT')
-def enqueue_query(query, data_source, user_id, scheduled=False, metadata={}):
+def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
@@ -224,14 +225,21 @@ def enqueue_query(query, data_source, user_id, scheduled=False, metadata={}):
if not job:
pipe.multi()
- if scheduled:
+ if scheduled_query:
queue_name = data_source.scheduled_queue_name
+ scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
+ scheduled_query_id = None
- result = execute_query.apply_async(args=(query, data_source.id, metadata, user_id), queue=queue_name)
+ result = execute_query.apply_async(args=(
+ query, data_source.id, metadata, user_id,
+ scheduled_query_id),
+ queue=queue_name)
job = QueryTask(async_result=result)
- tracker = QueryTaskTracker.create(result.id, 'created', query_hash, data_source.id, scheduled, metadata)
+ tracker = QueryTaskTracker.create(
+ result.id, 'created', query_hash, data_source.id,
+ scheduled_query is not None, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
@@ -248,7 +256,7 @@ def enqueue_query(query, data_source, user_id, scheduled=False, metadata={}):
return job
-@celery.task(name="redash.tasks.refresh_queries", base=BaseTask)
+@celery.task(name="redash.tasks.refresh_queries")
def refresh_queries():
logger.info("Refreshing queries...")
@@ -263,7 +271,7 @@ def refresh_queries():
logging.info("Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason)
else:
enqueue_query(query.query_text, query.data_source, query.user_id,
- scheduled=True,
+ scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
@@ -285,7 +293,7 @@ def refresh_queries():
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
-@celery.task(name="redash.tasks.cleanup_tasks", base=BaseTask)
+@celery.task(name="redash.tasks.cleanup_tasks")
def cleanup_tasks():
in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
for tracker in in_progress:
@@ -317,7 +325,7 @@ def cleanup_tasks():
QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
-@celery.task(name="redash.tasks.cleanup_query_results", base=BaseTask)
+@celery.task(name="redash.tasks.cleanup_query_results")
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than
@@ -338,7 +346,7 @@ def cleanup_query_results():
logger.info("Deleted %d unused query results.", deleted_count)
-@celery.task(name="redash.tasks.refresh_schemas", base=BaseTask)
+@celery.task(name="redash.tasks.refresh_schemas")
def refresh_schemas():
"""
Refreshes the data sources schemas.
@@ -379,7 +387,8 @@ class QueryExecutionError(Exception):
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
- def __init__(self, task, query, data_source_id, user_id, metadata):
+ def __init__(self, task, query, data_source_id, user_id, metadata,
+ scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
@@ -390,6 +399,7 @@ class QueryExecutor(object):
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
+ self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
@@ -424,7 +434,14 @@ class QueryExecutor(object):
if error:
self.tracker.update(state='failed')
result = QueryExecutionError(error)
+ if self.scheduled_query:
+ self.scheduled_query.schedule_failures += 1
+ models.db.session.add(self.scheduled_query)
else:
+ if (self.scheduled_query and
+ self.scheduled_query.schedule_failures > 0):
+ self.scheduled_query.schedule_failures = 0
+ models.db.session.add(self.scheduled_query)
query_result, updated_query_ids = models.QueryResult.store_result(
self.data_source.org, self.data_source,
self.query_hash, self.query, data,
@@ -451,10 +468,14 @@ class QueryExecutor(object):
return annotated_query
def _log_progress(self, state):
- logger.info(u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
- state,
- self.query_hash, self.data_source.type, self.data_source.id, self.task.request.id, self.task.request.delivery_info['routing_key'],
- self.metadata.get('Query ID', 'unknown'), self.metadata.get('Username', 'unknown'))
+ logger.info(
+ u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d "
+ "task_id=%s queue=%s query_id=%s username=%s",
+ state, self.query_hash, self.data_source.type, self.data_source.id,
+ self.task.request.id,
+ self.task.request.delivery_info['routing_key'],
+ self.metadata.get('Query ID', 'unknown'),
+ self.metadata.get('Username', 'unknown'))
self.tracker.update(state=state)
def _load_data_source(self):
@@ -464,6 +485,12 @@ class QueryExecutor(object):
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
-@celery.task(name="redash.tasks.execute_query", bind=True, base=BaseTask, track_started=True)
-def execute_query(self, query, data_source_id, metadata, user_id=None):
- return QueryExecutor(self, query, data_source_id, user_id, metadata).run()
+@celery.task(name="redash.tasks.execute_query", bind=True, track_started=True)
+def execute_query(self, query, data_source_id, metadata, user_id=None,
+ scheduled_query_id=None):
+ if scheduled_query_id is not None:
+ scheduled_query = models.Query.query.get(scheduled_query_id)
+ else:
+ scheduled_query = None
+ return QueryExecutor(self, query, data_source_id, user_id, metadata,
+ scheduled_query).run()
diff --git a/redash/worker.py b/redash/worker.py
index f1288119..19fabad9 100644
--- a/redash/worker.py
+++ b/redash/worker.py
@@ -2,10 +2,12 @@ from __future__ import absolute_import
from random import randint
from celery import Celery
+from flask import current_app
from datetime import timedelta
from celery.schedules import crontab
-from redash import settings, __version__
-from redash.metrics import celery
+from celery.signals import worker_process_init
+from redash import settings, __version__, create_app
+from redash.metrics import celery as celery_metrics
celery = Celery('redash',
@@ -48,9 +50,29 @@ celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
if settings.SENTRY_DSN:
from raven import Client
- from raven.contrib.celery import register_signal, register_logger_signal
+ from raven.contrib.celery import register_signal
client = Client(settings.SENTRY_DSN, release=__version__)
register_signal(client)
+# Create a new Task base class, that pushes a new Flask app context to allow DB connections if needed.
+TaskBase = celery.Task
+
+
+class ContextTask(TaskBase):
+ abstract = True
+
+ def __call__(self, *args, **kwargs):
+ with current_app.app_context():
+ return TaskBase.__call__(self, *args, **kwargs)
+
+celery.Task = ContextTask
+
+
+# Create Flask app after forking a new worker, to make sure no resources are shared between processes.
+@worker_process_init.connect
+def init_celery_flask_app(**kwargs):
+ app = create_app()
+ app.app_context().push()
+
diff --git a/requirements_all_ds.txt b/requirements_all_ds.txt
index 0f823331..03f22068 100644
--- a/requirements_all_ds.txt
+++ b/requirements_all_ds.txt
@@ -1,10 +1,10 @@
-google-api-python-client==1.2
+google-api-python-client==1.5.1
gspread==0.2.5
impyla==0.10.0
influxdb==2.7.1
MySQL-python==1.2.5
-oauth2client==1.2
-pyhive==0.1.6
+oauth2client==3.0.0
+pyhive==0.3.0
pymongo==3.2.1
pyOpenSSL==0.14
vertica-python==0.5.1
@@ -18,5 +18,7 @@ thrift>=0.8.0
thrift_sasl>=0.1.0
cassandra-driver==3.1.1
snowflake_connector_python==1.3.7
+atsd_client==2.0.12
+simple_salesforce==0.72.2
# certifi is needed to support MongoDB and SSL:
certifi
diff --git a/setup/amazon_linux/README.md b/setup/amazon_linux/README.md
index d30254ea..6d7f2cb7 100644
--- a/setup/amazon_linux/README.md
+++ b/setup/amazon_linux/README.md
@@ -1 +1,4 @@
+# DEPRECATED
+(left for reference purposes only)
+
Bootstrap script for Amazon Linux AMI. *Not supported*, we recommend to use the Docker images instead.
diff --git a/setup/packer.json b/setup/packer.json
index 27693ff5..d145ed77 100644
--- a/setup/packer.json
+++ b/setup/packer.json
@@ -7,18 +7,32 @@
},
"builders": [
{
- "name": "redash-eu-west-1",
+ "name": "redash-us-east-1",
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
- "region": "eu-west-1",
- "source_ami": "ami-6177f712",
+ "region": "us-east-1",
+ "source_ami": "ami-4dd2575b",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
- "ami_name": "redash-{{user `image_version`}}-eu-west-1"
+ "ami_name": "redash-{{user `image_version`}}-us-east-1"
+ },
+ {
+ "type": "googlecompute",
+ "account_file": "account.json",
+ "project_id": "redash-bird-123",
+ "source_image_family": "ubuntu-1604-lts",
+ "zone": "us-central1-a",
+ "ssh_username": "arik"
}
],
"provisioners": [
+ {
+ "type": "shell",
+ "inline": [
+ "sleep 30"
+ ]
+ },
{
"type": "shell",
"script": "ubuntu/bootstrap.sh",
@@ -33,5 +47,15 @@
"type": "shell",
"inline": "sudo rm /home/ubuntu/.ssh/authorized_keys || true"
}
+ ],
+ "post-processors": [
+ {
+ "type": "googlecompute-export",
+ "only": ["googlecompute"],
+ "paths": [
+ "gs://redash-images/redash.{{user `redash_version`}}.tar.gz"
+ ],
+ "keep_input_artifact": true
+ }
]
}
diff --git a/setup/ubuntu/README.md b/setup/ubuntu/README.md
index 63b648e4..c3795b84 100644
--- a/setup/ubuntu/README.md
+++ b/setup/ubuntu/README.md
@@ -1 +1 @@
-Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04, although should work with 12.04).
+Bootstrap scripts for Ubuntu 16.04.
diff --git a/setup/ubuntu/bootstrap.sh b/setup/ubuntu/bootstrap.sh
index 1168b85e..55d4d8cb 100644
--- a/setup/ubuntu/bootstrap.sh
+++ b/setup/ubuntu/bootstrap.sh
@@ -1,195 +1,110 @@
#!/bin/bash
+#
+# This script setups Redash along with supervisor, nginx, PostgreSQL and Redis. It was written to be used on
+# Ubuntu 16.04. Technically it can work with other Ubuntu versions, but you might get non compatible versions
+# of PostgreSQL, Redis and maybe some other dependencies.
+#
+# This script is not idempotent and if it stops in the middle, you can't just run it again. You should either
+# understand what parts of it to exclude or just start over on a new VM (assuming you're using a VM).
+
set -eu
REDASH_BASE_PATH=/opt/redash
-
-# Default branch/version to master if not specified in REDASH_BRANCH env var
-REDASH_BRANCH="${REDASH_BRANCH:-master}"
-
-# Install latest version if not specified in REDASH_VERSION env var
-REDASH_VERSION=${REDASH_VERSION-0.12.0.b2449}
-LATEST_URL="https://github.com/getredash/redash/releases/download/v${REDASH_VERSION}/redash.${REDASH_VERSION}.tar.gz"
+REDASH_BRANCH="${REDASH_BRANCH:-master}" # Default branch/version to master if not specified in REDASH_BRANCH env var
+REDASH_VERSION=${REDASH_VERSION-1.0.1.b2833} # Install latest version if not specified in REDASH_VERSION env var
+LATEST_URL="https://s3.amazonaws.com/redash-releases/redash.${REDASH_VERSION}.tar.gz"
VERSION_DIR="/opt/redash/redash.${REDASH_VERSION}"
REDASH_TARBALL=/tmp/redash.tar.gz
+FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/${REDASH_BRANCH}/setup/ubuntu/files
-FILES_BASE_URL=https://raw.githubusercontent.com/getredash/redash/${REDASH_BRANCH}/setup/ubuntu/files/
+cd /tmp/
-# Verify running as root:
-if [ "$(id -u)" != "0" ]; then
- if [ $# -ne 0 ]; then
- echo "Failed running with sudo. Exiting." 1>&2
- exit 1
+verify_root() {
+ # Verify running as root:
+ if [ "$(id -u)" != "0" ]; then
+ if [ $# -ne 0 ]; then
+ echo "Failed running with sudo. Exiting." 1>&2
+ exit 1
+ fi
+ echo "This script must be run as root. Trying to run with sudo."
+ sudo bash "$0" --with-sudo
+ exit 0
fi
- echo "This script must be run as root. Trying to run with sudo."
- sudo bash "$0" --with-sudo
- exit 0
-fi
-
-# Base packages
-apt-get -y update
-DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade
-apt-get install -y python-pip python-dev nginx curl build-essential pwgen
-# BigQuery dependencies:
-apt-get install -y libffi-dev libssl-dev
-# MySQL dependencies:
-apt-get install -y libmysqlclient-dev
-# Microsoft SQL Server dependencies:
-apt-get install -y freetds-dev
-# Hive dependencies:
-apt-get install -y libsasl2-dev
-#Saml dependency
-apt-get install -y xmlsec1
-
-# Upgrade pip if host is Ubuntu 16.04
-if [[ $(lsb_release -d) = *Ubuntu* ]] && [[ $(lsb_release -rs) = *16.04* ]]; then
- pip install --upgrade pip
-fi
-pip install -U setuptools==23.1.0
-
-# redash user
-# TODO: check user doesn't exist yet?
-adduser --system --no-create-home --disabled-login --gecos "" redash
-
-# PostgreSQL
-pg_available=0
-psql --version || pg_available=$?
-if [ $pg_available -ne 0 ]; then
- wget $FILES_BASE_URL"postgres_apt.sh" -O /tmp/postgres_apt.sh
- bash /tmp/postgres_apt.sh
- apt-get update
- apt-get -y install postgresql-9.3 postgresql-server-dev-9.3
-fi
-
-add_service() {
- service_name=$1
- service_command="/etc/init.d/$service_name"
-
- echo "Adding service: $service_name (/etc/init.d/$service_name)."
- chmod +x "$service_command"
-
- if command -v chkconfig >/dev/null 2>&1; then
- # we're chkconfig, so lets add to chkconfig and put in runlevel 345
- chkconfig --add "$service_name" && echo "Successfully added to chkconfig!"
- chkconfig --level 345 "$service_name" on && echo "Successfully added to runlevels 345!"
- elif command -v update-rc.d >/dev/null 2>&1; then
- #if we're not a chkconfig box assume we're able to use update-rc.d
- update-rc.d "$service_name" defaults && echo "Success!"
- else
- echo "No supported init tool found."
- fi
-
- $service_command start
}
-# Redis
-redis_available=0
-redis-cli --version || redis_available=$?
-if [ $redis_available -ne 0 ]; then
- wget http://download.redis.io/releases/redis-2.8.17.tar.gz
- tar xzf redis-2.8.17.tar.gz
- rm redis-2.8.17.tar.gz
- (cd redis-2.8.17
- make
- make install
+create_redash_user() {
+ adduser --system --no-create-home --disabled-login --gecos "" redash
+}
- # Setup process init & configuration
+install_system_packages() {
+ apt-get -y update
+ # Base packages
+ apt install -y python-pip python-dev nginx curl build-essential pwgen
+ # Data sources dependencies:
+ apt install -y libffi-dev libssl-dev libmysqlclient-dev libpq-dev freetds-dev libsasl2-dev
+ # SAML dependency
+ apt install -y xmlsec1
+ # Storage servers
+ apt install -y postgresql redis-server
+ apt install -y supervisor
+}
- REDIS_PORT=6379
- REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
- REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
- REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
+create_directories() {
+ mkdir /opt/redash
+ chown redash /opt/redash
+
+ # Default config file
+ if [ ! -f "/opt/redash/.env" ]; then
+ sudo -u redash wget "$FILES_BASE_URL/env" -O /opt/redash/.env
+ fi
- mkdir -p "$(dirname "$REDIS_CONFIG_FILE")" || die "Could not create redis config directory"
- mkdir -p "$(dirname "$REDIS_LOG_FILE")" || die "Could not create redis log dir"
- mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
+ COOKIE_SECRET=$(pwgen -1s 32)
+ echo "export REDASH_COOKIE_SECRET=$COOKIE_SECRET" >> /opt/redash/.env
+}
- wget -O /etc/init.d/redis_6379 $FILES_BASE_URL"redis_init"
- wget -O $REDIS_CONFIG_FILE $FILES_BASE_URL"redis.conf"
-
- add_service "redis_$REDIS_PORT"
- )
- rm -rf redis-2.8.17
-fi
-
-# Directories
-if [ ! -d "$REDASH_BASE_PATH" ]; then
- sudo mkdir /opt/redash
- sudo chown redash /opt/redash
- sudo -u redash mkdir /opt/redash/logs
-fi
-
-# Default config file
-if [ ! -f "/opt/redash/.env" ]; then
- sudo -u redash wget $FILES_BASE_URL"env" -O /opt/redash/.env
- echo 'export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/"' >> /opt/redash/.env
-fi
-
-if [ ! -d "$VERSION_DIR" ]; then
+extract_redash_sources() {
sudo -u redash wget "$LATEST_URL" -O "$REDASH_TARBALL"
sudo -u redash mkdir "$VERSION_DIR"
sudo -u redash tar -C "$VERSION_DIR" -xvf "$REDASH_TARBALL"
ln -nfs "$VERSION_DIR" /opt/redash/current
ln -nfs /opt/redash/.env /opt/redash/current/.env
+}
- cd /opt/redash/current
-
+install_python_packages() {
+ pip install --upgrade pip
# TODO: venv?
- pip install -r requirements.txt
-fi
+ pip install setproctitle # setproctitle is used by Celery for "pretty" process titles
+ pip install -r /opt/redash/current/requirements.txt
+ pip install -r /opt/redash/current/requirements_all_ds.txt
+}
-# Create database / tables
-pg_user_exists=0
-sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$?
-if [ $pg_user_exists -ne 0 ]; then
- echo "Creating redash postgres user & database."
+create_database() {
+ # Create user and database
sudo -u postgres createuser redash --no-superuser --no-createdb --no-createrole
sudo -u postgres createdb redash --owner=redash
cd /opt/redash/current
sudo -u redash bin/run ./manage.py database create_tables
-fi
+}
-# Create default admin user
-cd /opt/redash/current
-# TODO: make sure user created only once
-# TODO: generate temp password and print to screen
-sudo -u redash bin/run ./manage.py users create --admin --password admin "Admin" "admin"
+setup_supervisor() {
+ wget -O /etc/supervisor/conf.d/redash.conf "$FILES_BASE_URL/supervisord.conf"
+ service supervisor restart
+}
-# Create Redash read only pg user & setup data source
-pg_user_exists=0
-sudo -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$?
-if [ $pg_user_exists -ne 0 ]; then
- echo "Creating redash reader postgres user."
- REDASH_READER_PASSWORD=$(pwgen -1)
- sudo -u postgres psql -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN"
- sudo -u redash psql -c "grant select(id,name,type) ON data_sources to redash_reader;" redash
- sudo -u redash psql -c "grant select(id,name) ON users to redash_reader;" redash
- sudo -u redash psql -c "grant select on alerts, alert_subscriptions, groups, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash
+setup_nginx() {
+ rm /etc/nginx/sites-enabled/default
+ wget -O /etc/nginx/sites-available/redash "$FILES_BASE_URL/nginx_redash_site"
+ ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
+ service nginx restart
+}
- cd /opt/redash/current
- sudo -u redash bin/run ./manage.py ds new "Redash Metadata" --type "pg" --options "{\"user\": \"redash_reader\", \"password\": \"$REDASH_READER_PASSWORD\", \"host\": \"localhost\", \"dbname\": \"redash\"}"
-fi
-
-# Pip requirements for all data source types
-cd /opt/redash/current
-pip install -r requirements_all_ds.txt
-
-# Setup supervisord + sysv init startup script
-sudo -u redash mkdir -p /opt/redash/supervisord
-pip install supervisor==3.1.2 # TODO: move to requirements.txt
-
-# Get supervisord startup script
-sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf"
-
-wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init"
-add_service "redash_supervisord"
-
-# Nginx setup
-rm /etc/nginx/sites-enabled/default
-wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site"
-ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash
-service nginx restart
-
-# Hotfix: missing query snippets table:
-cd /opt/redash/current
-sudo -u redash bin/run python -c "from redash import models; models.QuerySnippet.create_table()"
+verify_root
+install_system_packages
+create_redash_user
+create_directories
+extract_redash_sources
+install_python_packages
+create_database
+setup_supervisor
+setup_nginx
\ No newline at end of file
diff --git a/setup/ubuntu/files/env b/setup/ubuntu/files/env
index 7df78824..e1c2e86d 100644
--- a/setup/ubuntu/files/env
+++ b/setup/ubuntu/files/env
@@ -1,4 +1,3 @@
export REDASH_LOG_LEVEL="INFO"
export REDASH_REDIS_URL=redis://localhost:6379/0
-export REDASH_DATABASE_URL="postgresql://redash"
-export REDASH_COOKIE_SECRET=veryverysecret
+export REDASH_DATABASE_URL="postgresql:///redash"
diff --git a/setup/ubuntu/files/postgres_apt.sh b/setup/ubuntu/files/postgres_apt.sh
deleted file mode 100644
index 35018d94..00000000
--- a/setup/ubuntu/files/postgres_apt.sh
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/bin/sh
-
-# script to add apt.postgresql.org to sources.list
-
-# from command line
-CODENAME="$1"
-# lsb_release is the best interface, but not always available
-if [ -z "$CODENAME" ]; then
- CODENAME=$(lsb_release -cs 2>/dev/null)
-fi
-# parse os-release (unreliable, does not work on Ubuntu)
-if [ -z "$CODENAME" -a -f /etc/os-release ]; then
- . /etc/os-release
- # Debian: VERSION="7.0 (wheezy)"
- # Ubuntu: VERSION="13.04, Raring Ringtail"
- CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/')
-fi
-# guess from sources.list
-if [ -z "$CODENAME" ]; then
- CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }')
-fi
-# complain if no result yet
-if [ -z "$CODENAME" ]; then
- cat <
/etc/apt/sources.list.d/pgdg.list <
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin
-NAME=supervisord
-DESC="process supervisor"
-DAEMON=/usr/local/bin/$NAME
-DAEMON_ARGS="--configuration /opt/redash/supervisord/supervisord.conf "
-PIDFILE=/opt/redash/supervisord/supervisord.pid
-SCRIPTNAME=/etc/init.d/redash_supervisord
-USER=redash
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --pidfile $PIDFILE --user $USER --chuid $USER --exec $DAEMON -- \
- $DAEMON_ARGS \
- || return 2
- # Add code here, if necessary, that waits for the process to be ready
- # to handle requests from services started subsequently which depend
- # on this one. As a last resort, sleep for some time.
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --chuid $USER --name $NAME
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --chuid $USER --exec $DAEMON
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- restart)
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/setup/ubuntu/files/redis.conf b/setup/ubuntu/files/redis.conf
deleted file mode 100644
index efe1244f..00000000
--- a/setup/ubuntu/files/redis.conf
+++ /dev/null
@@ -1,785 +0,0 @@
-## Generated by install_server.sh ##
-# Redis configuration file example
-
-# Note on units: when memory size is needed, it is possible to specify
-# it in the usual form of 1k 5GB 4M and so forth:
-#
-# 1k => 1000 bytes
-# 1kb => 1024 bytes
-# 1m => 1000000 bytes
-# 1mb => 1024*1024 bytes
-# 1g => 1000000000 bytes
-# 1gb => 1024*1024*1024 bytes
-#
-# units are case insensitive so 1GB 1Gb 1gB are all the same.
-
-################################## INCLUDES ###################################
-
-# Include one or more other config files here. This is useful if you
-# have a standard template that goes to all Redis server but also need
-# to customize a few per-server settings. Include files can include
-# other files, so use this wisely.
-#
-# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
-# from admin or Redis Sentinel. Since Redis always uses the last processed
-# line as value of a configuration directive, you'd better put includes
-# at the beginning of this file to avoid overwriting config change at runtime.
-#
-# If instead you are interested in using includes to override configuration
-# options, it is better to use include as the last line.
-#
-# include /path/to/local.conf
-# include /path/to/other.conf
-
-################################ GENERAL #####################################
-
-# By default Redis does not run as a daemon. Use 'yes' if you need it.
-# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
-daemonize yes
-
-# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
-# default. You can specify a custom pid file location here.
-pidfile /var/run/redis_6379.pid
-
-# Accept connections on the specified port, default is 6379.
-# If port 0 is specified Redis will not listen on a TCP socket.
-port 6379
-
-# TCP listen() backlog.
-#
-# In high requests-per-second environments you need an high backlog in order
-# to avoid slow clients connections issues. Note that the Linux kernel
-# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
-# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
-# in order to get the desired effect.
-tcp-backlog 511
-
-# By default Redis listens for connections from all the network interfaces
-# available on the server. It is possible to listen to just one or multiple
-# interfaces using the "bind" configuration directive, followed by one or
-# more IP addresses.
-#
-# Examples:
-#
-# bind 192.168.1.100 10.0.0.1
-bind 127.0.0.1
-
-# Specify the path for the Unix socket that will be used to listen for
-# incoming connections. There is no default, so Redis will not listen
-# on a unix socket when not specified.
-#
-# unixsocket /tmp/redis.sock
-# unixsocketperm 700
-
-# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 0
-
-# TCP keepalive.
-#
-# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
-# of communication. This is useful for two reasons:
-#
-# 1) Detect dead peers.
-# 2) Take the connection alive from the point of view of network
-# equipment in the middle.
-#
-# On Linux, the specified value (in seconds) is the period used to send ACKs.
-# Note that to close the connection the double of the time is needed.
-# On other kernels the period depends on the kernel configuration.
-#
-# A reasonable value for this option is 60 seconds.
-tcp-keepalive 0
-
-# Specify the server verbosity level.
-# This can be one of:
-# debug (a lot of information, useful for development/testing)
-# verbose (many rarely useful info, but not a mess like the debug level)
-# notice (moderately verbose, what you want in production probably)
-# warning (only very important / critical messages are logged)
-loglevel notice
-
-# Specify the log file name. Also the empty string can be used to force
-# Redis to log on the standard output. Note that if you use standard
-# output for logging but daemonize, logs will be sent to /dev/null
-logfile /var/log/redis_6379.log
-
-# To enable logging to the system logger, just set 'syslog-enabled' to yes,
-# and optionally update the other syslog parameters to suit your needs.
-# syslog-enabled no
-
-# Specify the syslog identity.
-# syslog-ident redis
-
-# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
-# syslog-facility local0
-
-# Set the number of databases. The default database is DB 0, you can select
-# a different one on a per-connection basis using SELECT where
-# dbid is a number between 0 and 'databases'-1
-databases 16
-
-################################ SNAPSHOTTING ################################
-#
-# Save the DB on disk:
-#
-# save
-#
-# Will save the DB if both the given number of seconds and the given
-# number of write operations against the DB occurred.
-#
-# In the example below the behaviour will be to save:
-# after 900 sec (15 min) if at least 1 key changed
-# after 300 sec (5 min) if at least 10 keys changed
-# after 60 sec if at least 10000 keys changed
-#
-# Note: you can disable saving at all commenting all the "save" lines.
-#
-# It is also possible to remove all the previously configured save
-# points by adding a save directive with a single empty string argument
-# like in the following example:
-#
-# save ""
-
-save 900 1
-save 300 10
-save 60 10000
-
-# By default Redis will stop accepting writes if RDB snapshots are enabled
-# (at least one save point) and the latest background save failed.
-# This will make the user aware (in a hard way) that data is not persisting
-# on disk properly, otherwise chances are that no one will notice and some
-# disaster will happen.
-#
-# If the background saving process will start working again Redis will
-# automatically allow writes again.
-#
-# However if you have setup your proper monitoring of the Redis server
-# and persistence, you may want to disable this feature so that Redis will
-# continue to work as usual even if there are problems with disk,
-# permissions, and so forth.
-stop-writes-on-bgsave-error yes
-
-# Compress string objects using LZF when dump .rdb databases?
-# For default that's set to 'yes' as it's almost always a win.
-# If you want to save some CPU in the saving child set it to 'no' but
-# the dataset will likely be bigger if you have compressible values or keys.
-rdbcompression yes
-
-# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
-# This makes the format more resistant to corruption but there is a performance
-# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
-# for maximum performances.
-#
-# RDB files created with checksum disabled have a checksum of zero that will
-# tell the loading code to skip the check.
-rdbchecksum yes
-
-# The filename where to dump the DB
-dbfilename dump.rdb
-
-# The working directory.
-#
-# The DB will be written inside this directory, with the filename specified
-# above using the 'dbfilename' configuration directive.
-#
-# The Append Only File will also be created inside this directory.
-#
-# Note that you must specify a directory here, not a file name.
-dir /var/lib/redis/6379
-
-################################# REPLICATION #################################
-
-# Master-Slave replication. Use slaveof to make a Redis instance a copy of
-# another Redis server. A few things to understand ASAP about Redis replication.
-#
-# 1) Redis replication is asynchronous, but you can configure a master to
-# stop accepting writes if it appears to be not connected with at least
-# a given number of slaves.
-# 2) Redis slaves are able to perform a partial resynchronization with the
-# master if the replication link is lost for a relatively small amount of
-# time. You may want to configure the replication backlog size (see the next
-# sections of this file) with a sensible value depending on your needs.
-# 3) Replication is automatic and does not need user intervention. After a
-# network partition slaves automatically try to reconnect to masters
-# and resynchronize with them.
-#
-# slaveof
-
-# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the slave to authenticate before
-# starting the replication synchronization process, otherwise the master will
-# refuse the slave request.
-#
-# masterauth
-
-# When a slave loses its connection with the master, or when the replication
-# is still in progress, the slave can act in two different ways:
-#
-# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
-# still reply to client requests, possibly with out of date data, or the
-# data set may just be empty if this is the first synchronization.
-#
-# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
-# an error "SYNC with master in progress" to all the kind of commands
-# but to INFO and SLAVEOF.
-#
-slave-serve-stale-data yes
-
-# You can configure a slave instance to accept writes or not. Writing against
-# a slave instance may be useful to store some ephemeral data (because data
-# written on a slave will be easily deleted after resync with the master) but
-# may also cause problems if clients are writing to it because of a
-# misconfiguration.
-#
-# Since Redis 2.6 by default slaves are read-only.
-#
-# Note: read only slaves are not designed to be exposed to untrusted clients
-# on the internet. It's just a protection layer against misuse of the instance.
-# Still a read only slave exports by default all the administrative commands
-# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
-# security of read only slaves using 'rename-command' to shadow all the
-# administrative / dangerous commands.
-slave-read-only yes
-
-# Slaves send PINGs to server in a predefined interval. It's possible to change
-# this interval with the repl_ping_slave_period option. The default value is 10
-# seconds.
-#
-# repl-ping-slave-period 10
-
-# The following option sets the replication timeout for:
-#
-# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
-# 2) Master timeout from the point of view of slaves (data, pings).
-# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
-#
-# It is important to make sure that this value is greater than the value
-# specified for repl-ping-slave-period otherwise a timeout will be detected
-# every time there is low traffic between the master and the slave.
-#
-# repl-timeout 60
-
-# Disable TCP_NODELAY on the slave socket after SYNC?
-#
-# If you select "yes" Redis will use a smaller number of TCP packets and
-# less bandwidth to send data to slaves. But this can add a delay for
-# the data to appear on the slave side, up to 40 milliseconds with
-# Linux kernels using a default configuration.
-#
-# If you select "no" the delay for data to appear on the slave side will
-# be reduced but more bandwidth will be used for replication.
-#
-# By default we optimize for low latency, but in very high traffic conditions
-# or when the master and slaves are many hops away, turning this to "yes" may
-# be a good idea.
-repl-disable-tcp-nodelay no
-
-# Set the replication backlog size. The backlog is a buffer that accumulates
-# slave data when slaves are disconnected for some time, so that when a slave
-# wants to reconnect again, often a full resync is not needed, but a partial
-# resync is enough, just passing the portion of data the slave missed while
-# disconnected.
-#
-# The biggest the replication backlog, the longer the time the slave can be
-# disconnected and later be able to perform a partial resynchronization.
-#
-# The backlog is only allocated once there is at least a slave connected.
-#
-# repl-backlog-size 1mb
-
-# After a master has no longer connected slaves for some time, the backlog
-# will be freed. The following option configures the amount of seconds that
-# need to elapse, starting from the time the last slave disconnected, for
-# the backlog buffer to be freed.
-#
-# A value of 0 means to never release the backlog.
-#
-# repl-backlog-ttl 3600
-
-# The slave priority is an integer number published by Redis in the INFO output.
-# It is used by Redis Sentinel in order to select a slave to promote into a
-# master if the master is no longer working correctly.
-#
-# A slave with a low priority number is considered better for promotion, so
-# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
-# pick the one with priority 10, that is the lowest.
-#
-# However a special priority of 0 marks the slave as not able to perform the
-# role of master, so a slave with priority of 0 will never be selected by
-# Redis Sentinel for promotion.
-#
-# By default the priority is 100.
-slave-priority 100
-
-# It is possible for a master to stop accepting writes if there are less than
-# N slaves connected, having a lag less or equal than M seconds.
-#
-# The N slaves need to be in "online" state.
-#
-# The lag in seconds, that must be <= the specified value, is calculated from
-# the last ping received from the slave, that is usually sent every second.
-#
-# This option does not GUARANTEES that N replicas will accept the write, but
-# will limit the window of exposure for lost writes in case not enough slaves
-# are available, to the specified number of seconds.
-#
-# For example to require at least 3 slaves with a lag <= 10 seconds use:
-#
-# min-slaves-to-write 3
-# min-slaves-max-lag 10
-#
-# Setting one or the other to 0 disables the feature.
-#
-# By default min-slaves-to-write is set to 0 (feature disabled) and
-# min-slaves-max-lag is set to 10.
-
-################################## SECURITY ###################################
-
-# Require clients to issue AUTH before processing any other
-# commands. This might be useful in environments in which you do not trust
-# others with access to the host running redis-server.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
-# Warning: since Redis is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-#
-# requirepass foobared
-
-# Command renaming.
-#
-# It is possible to change the name of dangerous commands in a shared
-# environment. For instance the CONFIG command may be renamed into something
-# hard to guess so that it will still be available for internal-use tools
-# but not available for general clients.
-#
-# Example:
-#
-# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
-#
-# It is also possible to completely kill a command by renaming it into
-# an empty string:
-#
-# rename-command CONFIG ""
-#
-# Please note that changing the name of commands that are logged into the
-# AOF file or transmitted to slaves may cause problems.
-
-################################### LIMITS ####################################
-
-# Set the max number of connected clients at the same time. By default
-# this limit is set to 10000 clients, however if the Redis server is not
-# able to configure the process file limit to allow for the specified limit
-# the max number of allowed clients is set to the current file limit
-# minus 32 (as Redis reserves a few file descriptors for internal uses).
-#
-# Once the limit is reached Redis will close all the new connections sending
-# an error 'max number of clients reached'.
-#
-# maxclients 10000
-
-# Don't use more memory than the specified amount of bytes.
-# When the memory limit is reached Redis will try to remove keys
-# according to the eviction policy selected (see maxmemory-policy).
-#
-# If Redis can't remove keys according to the policy, or if the policy is
-# set to 'noeviction', Redis will start to reply with errors to commands
-# that would use more memory, like SET, LPUSH, and so on, and will continue
-# to reply to read-only commands like GET.
-#
-# This option is usually useful when using Redis as an LRU cache, or to set
-# a hard memory limit for an instance (using the 'noeviction' policy).
-#
-# WARNING: If you have slaves attached to an instance with maxmemory on,
-# the size of the output buffers needed to feed the slaves are subtracted
-# from the used memory count, so that network problems / resyncs will
-# not trigger a loop where keys are evicted, and in turn the output
-# buffer of slaves is full with DELs of keys evicted triggering the deletion
-# of more keys, and so forth until the database is completely emptied.
-#
-# In short... if you have slaves attached it is suggested that you set a lower
-# limit for maxmemory so that there is some free RAM on the system for slave
-# output buffers (but this is not needed if the policy is 'noeviction').
-#
-# maxmemory
-
-# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
-# is reached. You can select among five behaviors:
-#
-# volatile-lru -> remove the key with an expire set using an LRU algorithm
-# allkeys-lru -> remove any key accordingly to the LRU algorithm
-# volatile-random -> remove a random key with an expire set
-# allkeys-random -> remove a random key, any key
-# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
-# noeviction -> don't expire at all, just return an error on write operations
-#
-# Note: with any of the above policies, Redis will return an error on write
-# operations, when there are not suitable keys for eviction.
-#
-# At the date of writing this commands are: set setnx setex append
-# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
-# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
-# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
-# getset mset msetnx exec sort
-#
-# The default is:
-#
-# maxmemory-policy volatile-lru
-
-# LRU and minimal TTL algorithms are not precise algorithms but approximated
-# algorithms (in order to save memory), so you can select as well the sample
-# size to check. For instance for default Redis will check three keys and
-# pick the one that was used less recently, you can change the sample size
-# using the following configuration directive.
-#
-# maxmemory-samples 3
-
-############################## APPEND ONLY MODE ###############################
-
-# By default Redis asynchronously dumps the dataset on disk. This mode is
-# good enough in many applications, but an issue with the Redis process or
-# a power outage may result into a few minutes of writes lost (depending on
-# the configured save points).
-#
-# The Append Only File is an alternative persistence mode that provides
-# much better durability. For instance using the default data fsync policy
-# (see later in the config file) Redis can lose just one second of writes in a
-# dramatic event like a server power outage, or a single write if something
-# wrong with the Redis process itself happens, but the operating system is
-# still running correctly.
-#
-# AOF and RDB persistence can be enabled at the same time without problems.
-# If the AOF is enabled on startup Redis will load the AOF, that is the file
-# with the better durability guarantees.
-#
-# Please check http://redis.io/topics/persistence for more information.
-
-appendonly no
-
-# The name of the append only file (default: "appendonly.aof")
-
-appendfilename "appendonly.aof"
-
-# The fsync() call tells the Operating System to actually write data on disk
-# instead to wait for more data in the output buffer. Some OS will really flush
-# data on disk, some other OS will just try to do it ASAP.
-#
-# Redis supports three different modes:
-#
-# no: don't fsync, just let the OS flush the data when it wants. Faster.
-# always: fsync after every write to the append only log . Slow, Safest.
-# everysec: fsync only one time every second. Compromise.
-#
-# The default is "everysec", as that's usually the right compromise between
-# speed and data safety. It's up to you to understand if you can relax this to
-# "no" that will let the operating system flush the output buffer when
-# it wants, for better performances (but if you can live with the idea of
-# some data loss consider the default persistence mode that's snapshotting),
-# or on the contrary, use "always" that's very slow but a bit safer than
-# everysec.
-#
-# More details please check the following article:
-# http://antirez.com/post/redis-persistence-demystified.html
-#
-# If unsure, use "everysec".
-
-# appendfsync always
-appendfsync everysec
-# appendfsync no
-
-# When the AOF fsync policy is set to always or everysec, and a background
-# saving process (a background save or AOF log background rewriting) is
-# performing a lot of I/O against the disk, in some Linux configurations
-# Redis may block too long on the fsync() call. Note that there is no fix for
-# this currently, as even performing fsync in a different thread will block
-# our synchronous write(2) call.
-#
-# In order to mitigate this problem it's possible to use the following option
-# that will prevent fsync() from being called in the main process while a
-# BGSAVE or BGREWRITEAOF is in progress.
-#
-# This means that while another child is saving, the durability of Redis is
-# the same as "appendfsync none". In practical terms, this means that it is
-# possible to lose up to 30 seconds of log in the worst scenario (with the
-# default Linux settings).
-#
-# If you have latency problems turn this to "yes". Otherwise leave it as
-# "no" that is the safest pick from the point of view of durability.
-
-no-appendfsync-on-rewrite no
-
-# Automatic rewrite of the append only file.
-# Redis is able to automatically rewrite the log file implicitly calling
-# BGREWRITEAOF when the AOF log size grows by the specified percentage.
-#
-# This is how it works: Redis remembers the size of the AOF file after the
-# latest rewrite (if no rewrite has happened since the restart, the size of
-# the AOF at startup is used).
-#
-# This base size is compared to the current size. If the current size is
-# bigger than the specified percentage, the rewrite is triggered. Also
-# you need to specify a minimal size for the AOF file to be rewritten, this
-# is useful to avoid rewriting the AOF file even if the percentage increase
-# is reached but it is still pretty small.
-#
-# Specify a percentage of zero in order to disable the automatic AOF
-# rewrite feature.
-
-auto-aof-rewrite-percentage 100
-auto-aof-rewrite-min-size 64mb
-
-# An AOF file may be found to be truncated at the end during the Redis
-# startup process, when the AOF data gets loaded back into memory.
-# This may happen when the system where Redis is running
-# crashes, especially when an ext4 filesystem is mounted without the
-# data=ordered option (however this can't happen when Redis itself
-# crashes or aborts but the operating system still works correctly).
-#
-# Redis can either exit with an error when this happens, or load as much
-# data as possible (the default now) and start if the AOF file is found
-# to be truncated at the end. The following option controls this behavior.
-#
-# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
-# the Redis server starts emitting a log to inform the user of the event.
-# Otherwise if the option is set to no, the server aborts with an error
-# and refuses to start. When the option is set to no, the user requires
-# to fix the AOF file using the "redis-check-aof" utility before to restart
-# the server.
-#
-# Note that if the AOF file will be found to be corrupted in the middle
-# the server will still exit with an error. This option only applies when
-# Redis will try to read more data from the AOF file but not enough bytes
-# will be found.
-aof-load-truncated yes
-
-################################ LUA SCRIPTING ###############################
-
-# Max execution time of a Lua script in milliseconds.
-#
-# If the maximum execution time is reached Redis will log that a script is
-# still in execution after the maximum allowed time and will start to
-# reply to queries with an error.
-#
-# When a long running script exceed the maximum execution time only the
-# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
-# used to stop a script that did not yet called write commands. The second
-# is the only way to shut down the server in the case a write commands was
-# already issue by the script but the user don't want to wait for the natural
-# termination of the script.
-#
-# Set it to 0 or a negative value for unlimited execution without warnings.
-lua-time-limit 5000
-
-################################## SLOW LOG ###################################
-
-# The Redis Slow Log is a system to log queries that exceeded a specified
-# execution time. The execution time does not include the I/O operations
-# like talking with the client, sending the reply and so forth,
-# but just the time needed to actually execute the command (this is the only
-# stage of command execution where the thread is blocked and can not serve
-# other requests in the meantime).
-#
-# You can configure the slow log with two parameters: one tells Redis
-# what is the execution time, in microseconds, to exceed in order for the
-# command to get logged, and the other parameter is the length of the
-# slow log. When a new command is logged the oldest one is removed from the
-# queue of logged commands.
-
-# The following time is expressed in microseconds, so 1000000 is equivalent
-# to one second. Note that a negative number disables the slow log, while
-# a value of zero forces the logging of every command.
-slowlog-log-slower-than 10000
-
-# There is no limit to this length. Just be aware that it will consume memory.
-# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 128
-
-################################ LATENCY MONITOR ##############################
-
-# The Redis latency monitoring subsystem samples different operations
-# at runtime in order to collect data related to possible sources of
-# latency of a Redis instance.
-#
-# Via the LATENCY command this information is available to the user that can
-# print graphs and obtain reports.
-#
-# The system only logs operations that were performed in a time equal or
-# greater than the amount of milliseconds specified via the
-# latency-monitor-threshold configuration directive. When its value is set
-# to zero, the latency monitor is turned off.
-#
-# By default latency monitoring is disabled since it is mostly not needed
-# if you don't have latency issues, and collecting data has a performance
-# impact, that while very small, can be measured under big load. Latency
-# monitoring can easily be enalbed at runtime using the command
-# "CONFIG SET latency-monitor-threshold " if needed.
-latency-monitor-threshold 0
-
-############################# Event notification ##############################
-
-# Redis can notify Pub/Sub clients about events happening in the key space.
-# This feature is documented at http://redis.io/topics/notifications
-#
-# For instance if keyspace events notification is enabled, and a client
-# performs a DEL operation on key "foo" stored in the Database 0, two
-# messages will be published via Pub/Sub:
-#
-# PUBLISH __keyspace@0__:foo del
-# PUBLISH __keyevent@0__:del foo
-#
-# It is possible to select the events that Redis will notify among a set
-# of classes. Every class is identified by a single character:
-#
-# K Keyspace events, published with __keyspace@__ prefix.
-# E Keyevent events, published with __keyevent@__ prefix.
-# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
-# $ String commands
-# l List commands
-# s Set commands
-# h Hash commands
-# z Sorted set commands
-# x Expired events (events generated every time a key expires)
-# e Evicted events (events generated when a key is evicted for maxmemory)
-# A Alias for g$lshzxe, so that the "AKE" string means all the events.
-#
-# The "notify-keyspace-events" takes as argument a string that is composed
-# by zero or multiple characters. The empty string means that notifications
-# are disabled at all.
-#
-# Example: to enable list and generic events, from the point of view of the
-# event name, use:
-#
-# notify-keyspace-events Elg
-#
-# Example 2: to get the stream of the expired keys subscribing to channel
-# name __keyevent@0__:expired use:
-#
-# notify-keyspace-events Ex
-#
-# By default all notifications are disabled because most users don't need
-# this feature and the feature has some overhead. Note that if you don't
-# specify at least one of K or E, no events will be delivered.
-notify-keyspace-events ""
-
-############################### ADVANCED CONFIG ###############################
-
-# Hashes are encoded using a memory efficient data structure when they have a
-# small number of entries, and the biggest entry does not exceed a given
-# threshold. These thresholds can be configured using the following directives.
-hash-max-ziplist-entries 512
-hash-max-ziplist-value 64
-
-# Similarly to hashes, small lists are also encoded in a special way in order
-# to save a lot of space. The special representation is only used when
-# you are under the following limits:
-list-max-ziplist-entries 512
-list-max-ziplist-value 64
-
-# Sets have a special encoding in just one case: when a set is composed
-# of just strings that happens to be integers in radix 10 in the range
-# of 64 bit signed integers.
-# The following configuration setting sets the limit in the size of the
-# set in order to use this special memory saving encoding.
-set-max-intset-entries 512
-
-# Similarly to hashes and lists, sorted sets are also specially encoded in
-# order to save a lot of space. This encoding is only used when the length and
-# elements of a sorted set are below the following limits:
-zset-max-ziplist-entries 128
-zset-max-ziplist-value 64
-
-# HyperLogLog sparse representation bytes limit. The limit includes the
-# 16 bytes header. When an HyperLogLog using the sparse representation crosses
-# this limit, it is converted into the dense representation.
-#
-# A value greater than 16000 is totally useless, since at that point the
-# dense representation is more memory efficient.
-#
-# The suggested value is ~ 3000 in order to have the benefits of
-# the space efficient encoding without slowing down too much PFADD,
-# which is O(N) with the sparse encoding. The value can be raised to
-# ~ 10000 when CPU is not a concern, but space is, and the data set is
-# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
-hll-sparse-max-bytes 3000
-
-# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
-# order to help rehashing the main Redis hash table (the one mapping top-level
-# keys to values). The hash table implementation Redis uses (see dict.c)
-# performs a lazy rehashing: the more operation you run into a hash table
-# that is rehashing, the more rehashing "steps" are performed, so if the
-# server is idle the rehashing is never complete and some more memory is used
-# by the hash table.
-#
-# The default is to use this millisecond 10 times every second in order to
-# active rehashing the main dictionaries, freeing memory when possible.
-#
-# If unsure:
-# use "activerehashing no" if you have hard latency requirements and it is
-# not a good thing in your environment that Redis can reply form time to time
-# to queries with 2 milliseconds delay.
-#
-# use "activerehashing yes" if you don't have such hard requirements but
-# want to free memory asap when possible.
-activerehashing yes
-
-# The client output buffer limits can be used to force disconnection of clients
-# that are not reading data from the server fast enough for some reason (a
-# common reason is that a Pub/Sub client can't consume messages as fast as the
-# publisher can produce them).
-#
-# The limit can be set differently for the three different classes of clients:
-#
-# normal -> normal clients including MONITOR clients
-# slave -> slave clients
-# pubsub -> clients subscribed to at least one pubsub channel or pattern
-#
-# The syntax of every client-output-buffer-limit directive is the following:
-#
-# client-output-buffer-limit
-#
-# A client is immediately disconnected once the hard limit is reached, or if
-# the soft limit is reached and remains reached for the specified number of
-# seconds (continuously).
-# So for instance if the hard limit is 32 megabytes and the soft limit is
-# 16 megabytes / 10 seconds, the client will get disconnected immediately
-# if the size of the output buffers reach 32 megabytes, but will also get
-# disconnected if the client reaches 16 megabytes and continuously overcomes
-# the limit for 10 seconds.
-#
-# By default normal clients are not limited because they don't receive data
-# without asking (in a push way), but just after a request, so only
-# asynchronous clients may create a scenario where data is requested faster
-# than it can read.
-#
-# Instead there is a default limit for pubsub and slave clients, since
-# subscribers and slaves receive data in a push fashion.
-#
-# Both the hard or the soft limit can be disabled by setting them to zero.
-client-output-buffer-limit normal 0 0 0
-client-output-buffer-limit slave 256mb 64mb 60
-client-output-buffer-limit pubsub 32mb 8mb 60
-
-# Redis calls an internal function to perform many background tasks, like
-# closing connections of clients in timeout, purging expired keys that are
-# never requested, and so forth.
-#
-# Not all tasks are performed with the same frequency, but Redis checks for
-# tasks to perform accordingly to the specified "hz" value.
-#
-# By default "hz" is set to 10. Raising the value will use more CPU when
-# Redis is idle, but at the same time will make Redis more responsive when
-# there are many keys expiring at the same time, and timeouts may be
-# handled with more precision.
-#
-# The range is between 1 and 500, however a value over 100 is usually not
-# a good idea. Most users should use the default of 10 and raise this up to
-# 100 only in environments where very low latency is required.
-hz 10
-
-# When a child rewrites the AOF file, if the following option is enabled
-# the file will be fsync-ed every 32 MB of data generated. This is useful
-# in order to commit the file to the disk more incrementally and avoid
-# big latency spikes.
-aof-rewrite-incremental-fsync yes
diff --git a/setup/ubuntu/files/redis_init b/setup/ubuntu/files/redis_init
deleted file mode 100644
index e20d856a..00000000
--- a/setup/ubuntu/files/redis_init
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/sh
-
-EXEC=/usr/local/bin/redis-server
-CLIEXEC=/usr/local/bin/redis-cli
-PIDFILE=/var/run/redis_6379.pid
-CONF="/etc/redis/6379.conf"
-REDISPORT="6379"
-###############
-# SysV Init Information
-# chkconfig: - 58 74
-# description: redis_6379 is the redis daemon.
-### BEGIN INIT INFO
-# Provides: redis_6379
-# Required-Start: $network $local_fs $remote_fs
-# Required-Stop: $network $local_fs $remote_fs
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Should-Start: $syslog $named
-# Should-Stop: $syslog $named
-# Short-Description: start and stop redis_6379
-# Description: Redis daemon
-### END INIT INFO
-
-
-case "$1" in
- start)
- if [ -f $PIDFILE ]
- then
- echo "$PIDFILE exists, process is already running or crashed"
- else
- echo "Starting Redis server..."
- $EXEC $CONF
- fi
- ;;
- stop)
- if [ ! -f $PIDFILE ]
- then
- echo "$PIDFILE does not exist, process is not running"
- else
- PID=$(cat $PIDFILE)
- echo "Stopping ..."
- $CLIEXEC -p $REDISPORT shutdown
- while [ -x /proc/${PID} ]
- do
- echo "Waiting for Redis to shutdown ..."
- sleep 1
- done
- echo "Redis stopped"
- fi
- ;;
- status)
- if [ ! -f $PIDFILE ]
- then
- echo 'Redis is not running'
- else
- echo "Redis is running ($(<$PIDFILE))"
- fi
- ;;
- restart)
- $0 stop
- $0 start
- ;;
- *)
- echo "Please use start, stop, restart or status as first argument"
- ;;
-esac
diff --git a/setup/ubuntu/files/supervisord.conf b/setup/ubuntu/files/supervisord.conf
index 5be2cad2..98bb227d 100644
--- a/setup/ubuntu/files/supervisord.conf
+++ b/setup/ubuntu/files/supervisord.conf
@@ -1,24 +1,14 @@
-[supervisord]
-nodaemon=false
-logfile=/opt/redash/logs/supervisord.log
-pidfile=/opt/redash/supervisord/supervisord.pid
-directory=/opt/redash/current
-
[inet_http_server]
port = 127.0.0.1:9001
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
[program:redash_server]
command=/opt/redash/current/bin/run gunicorn -b 127.0.0.1:5000 --name redash -w 4 --max-requests 1000 redash.wsgi:app
+directory=/opt/redash/current
process_name=redash_server
+user=redash
numprocs=1
-priority=999
autostart=true
autorestart=true
-stdout_logfile=/opt/redash/logs/api.log
-stderr_logfile=/opt/redash/logs/api_error.log
# There are two queue types here: one for ad-hoc queries, and one for the refresh of scheduled queries
# (note that "scheduled_queries" appears only in the queue list of "redash_celery_scheduled").
@@ -26,20 +16,18 @@ stderr_logfile=/opt/redash/logs/api_error.log
[program:redash_celery]
command=/opt/redash/current/bin/run celery worker --app=redash.worker --beat -c2 -Qqueries,celery --maxtasksperchild=10 -Ofair
+directory=/opt/redash/current
process_name=redash_celery
+user=redash
numprocs=1
-priority=999
autostart=true
autorestart=true
-stdout_logfile=/opt/redash/logs/celery.log
-stderr_logfile=/opt/redash/logs/celery_error.log
[program:redash_celery_scheduled]
command=/opt/redash/current/bin/run celery worker --app=redash.worker -c2 -Qscheduled_queries --maxtasksperchild=10 -Ofair
+directory=/opt/redash/current
process_name=redash_celery_scheduled
+user=redash
numprocs=1
-priority=999
autostart=true
autorestart=true
-stdout_logfile=/opt/redash/logs/celery.log
-stderr_logfile=/opt/redash/logs/celery_error.log
diff --git a/tests/factories.py b/tests/factories.py
index 1cc878fa..216f868e 100644
--- a/tests/factories.py
+++ b/tests/factories.py
@@ -69,7 +69,7 @@ api_key_factory = ModelFactory(redash.models.ApiKey,
query_factory = ModelFactory(redash.models.Query,
name='Query',
description='',
- query_text='SELECT 1',
+ query_text=u'SELECT 1',
user=user_factory.create,
is_archived=False,
is_draft=False,
diff --git a/tests/tasks/test_queries.py b/tests/tasks/test_queries.py
index 198c2ea0..06dddc1f 100644
--- a/tests/tasks/test_queries.py
+++ b/tests/tasks/test_queries.py
@@ -1,11 +1,14 @@
-from tests import BaseTestCase
-from redash import redis_connection
-from redash.tasks.queries import QueryTaskTracker, enqueue_query, execute_query
from unittest import TestCase
-from mock import MagicMock
from collections import namedtuple
import uuid
+import mock
+
+from tests import BaseTestCase
+from redash import redis_connection, models
+from redash.query_runner.pg import PostgreSQL
+from redash.tasks.queries import QueryTaskTracker, enqueue_query, execute_query
+
class TestPrune(TestCase):
def setUp(self):
@@ -45,11 +48,11 @@ def gen_hash(*args, **kwargs):
class TestEnqueueTask(BaseTestCase):
def test_multiple_enqueue_of_same_query(self):
query = self.factory.create_query()
- execute_query.apply_async = MagicMock(side_effect=gen_hash)
+ execute_query.apply_async = mock.MagicMock(side_effect=gen_hash)
- enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
- enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
- enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
+ enqueue_query(query.query_text, query.data_source, query.user_id, query, {'Username': 'Arik', 'Query ID': query.id})
+ enqueue_query(query.query_text, query.data_source, query.user_id, query, {'Username': 'Arik', 'Query ID': query.id})
+ enqueue_query(query.query_text, query.data_source, query.user_id, query, {'Username': 'Arik', 'Query ID': query.id})
self.assertEqual(1, execute_query.apply_async.call_count)
self.assertEqual(1, redis_connection.zcard(QueryTaskTracker.WAITING_LIST))
@@ -58,13 +61,84 @@ class TestEnqueueTask(BaseTestCase):
def test_multiple_enqueue_of_different_query(self):
query = self.factory.create_query()
- execute_query.apply_async = MagicMock(side_effect=gen_hash)
+ execute_query.apply_async = mock.MagicMock(side_effect=gen_hash)
- enqueue_query(query.query_text, query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
- enqueue_query(query.query_text + '2', query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
- enqueue_query(query.query_text + '3', query.data_source, True, {'Username': 'Arik', 'Query ID': query.id})
+ enqueue_query(query.query_text, query.data_source, query.user_id, None, {'Username': 'Arik', 'Query ID': query.id})
+ enqueue_query(query.query_text + '2', query.data_source, query.user_id, None, {'Username': 'Arik', 'Query ID': query.id})
+ enqueue_query(query.query_text + '3', query.data_source, query.user_id, None, {'Username': 'Arik', 'Query ID': query.id})
self.assertEqual(3, execute_query.apply_async.call_count)
self.assertEqual(3, redis_connection.zcard(QueryTaskTracker.WAITING_LIST))
self.assertEqual(0, redis_connection.zcard(QueryTaskTracker.IN_PROGRESS_LIST))
self.assertEqual(0, redis_connection.zcard(QueryTaskTracker.DONE_LIST))
+
+
+class QueryExecutorTests(BaseTestCase):
+
+ def test_success(self):
+ """
+ ``execute_query`` invokes the query runner and stores a query result.
+ """
+ cm = mock.patch("celery.app.task.Context.delivery_info", {'routing_key': 'test'})
+ with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.return_value = ([1, 2], None)
+ result_id = execute_query("SELECT 1, 2", self.factory.data_source.id, {})
+ self.assertEqual(1, qr.call_count)
+ result = models.QueryResult.query.get(result_id)
+ self.assertEqual(result.data, '{1,2}')
+
+ def test_success_scheduled(self):
+ """
+ Scheduled queries remember their latest results.
+ """
+ cm = mock.patch("celery.app.task.Context.delivery_info",
+ {'routing_key': 'test'})
+ q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
+ with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.return_value = ([1, 2], None)
+ result_id = execute_query(
+ "SELECT 1, 2",
+ self.factory.data_source.id, {},
+ scheduled_query_id=q.id)
+ q = models.Query.get_by_id(q.id)
+ self.assertEqual(q.schedule_failures, 0)
+ result = models.QueryResult.query.get(result_id)
+ self.assertEqual(q.latest_query_data, result)
+
+ def test_failure_scheduled(self):
+ """
+ Scheduled queries that fail have their failure recorded.
+ """
+ cm = mock.patch("celery.app.task.Context.delivery_info",
+ {'routing_key': 'test'})
+ q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
+ with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.exception = ValueError("broken")
+ execute_query("SELECT 1, 2", self.factory.data_source.id, {}, scheduled_query_id=q.id)
+ self.assertEqual(q.schedule_failures, 1)
+ execute_query("SELECT 1, 2", self.factory.data_source.id, {}, scheduled_query_id=q.id)
+ q = models.Query.get_by_id(q.id)
+ self.assertEqual(q.schedule_failures, 2)
+
+ def test_success_after_failure(self):
+ """
+ Query execution success resets the failure counter.
+ """
+ cm = mock.patch("celery.app.task.Context.delivery_info",
+ {'routing_key': 'test'})
+ q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
+ with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.exception = ValueError("broken")
+ execute_query("SELECT 1, 2",
+ self.factory.data_source.id, {},
+ scheduled_query_id=q.id)
+ q = models.Query.get_by_id(q.id)
+ self.assertEqual(q.schedule_failures, 1)
+
+ with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.return_value = ([1, 2], None)
+ execute_query("SELECT 1, 2",
+ self.factory.data_source.id, {},
+ scheduled_query_id=q.id)
+ q = models.Query.get_by_id(q.id)
+ self.assertEqual(q.schedule_failures, 0)
diff --git a/tests/tasks/test_refresh_queries.py b/tests/tasks/test_refresh_queries.py
index 03dbfdd8..90641ed1 100644
--- a/tests/tasks/test_refresh_queries.py
+++ b/tests/tasks/test_refresh_queries.py
@@ -1,108 +1,47 @@
-import datetime
from mock import patch, call, ANY
from tests import BaseTestCase
-from redash.utils import utcnow
from redash.tasks import refresh_queries
-from redash.models import db
+from redash.models import Query
-# TODO: this test should be split into two:
-# 1. tests for Query.outdated_queries method
-# 2. test for the refresh_query task
-class TestRefreshQueries(BaseTestCase):
+class TestRefreshQuery(BaseTestCase):
def test_enqueues_outdated_queries(self):
- query = self.factory.create_query(schedule="60")
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
- query.latest_query_data = query_result
- db.session.add(query)
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
+ """
+ refresh_queries() launches an execution task for each query returned
+ from Query.outdated_queries().
+ """
+ query1 = self.factory.create_query()
+ query2 = self.factory.create_query(
+ query_text="select 42;",
+ data_source=self.factory.create_data_source())
+ oq = staticmethod(lambda: [query1, query2])
+ with patch('redash.tasks.queries.enqueue_query') as add_job_mock, \
+ patch.object(Query, 'outdated_queries', oq):
refresh_queries()
- add_job_mock.assert_called_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)
+ self.assertEqual(add_job_mock.call_count, 2)
+ add_job_mock.assert_has_calls([
+ call(query1.query_text, query1.data_source, query1.user_id,
+ scheduled_query=query1, metadata=ANY),
+ call(query2.query_text, query2.data_source, query2.user_id,
+ scheduled_query=query2, metadata=ANY)], any_order=True)
def test_doesnt_enqueue_outdated_queries_for_paused_data_source(self):
- query = self.factory.create_query(schedule="60")
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
- query.latest_query_data = query_result
- db.session.add(query)
-
+ """
+ refresh_queries() does not launch execution tasks for queries whose
+ data source is paused.
+ """
+ query = self.factory.create_query()
+ oq = staticmethod(lambda: [query])
query.data_source.pause()
+ with patch.object(Query, 'outdated_queries', oq):
+ with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
+ refresh_queries()
+ add_job_mock.assert_not_called()
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- add_job_mock.assert_not_called()
+ query.data_source.resume()
- query.data_source.resume()
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- add_job_mock.assert_called_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)
-
- def test_skips_fresh_queries(self):
- query = self.factory.create_query(schedule="1200")
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- self.assertFalse(add_job_mock.called)
-
- def test_skips_queries_with_no_ttl(self):
- query = self.factory.create_query(schedule=None)
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- self.assertFalse(add_job_mock.called)
-
- def test_enqueues_query_only_once(self):
- query = self.factory.create_query(schedule="60")
- query2 = self.factory.create_query(schedule="60", query_text=query.query_text, query_hash=query.query_hash)
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
- query.latest_query_data = query_result
- query2.latest_query_data = query_result
- db.session.add_all([query, query2])
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- add_job_mock.assert_called_once_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)#{'Query ID': query.id, 'Username': 'Scheduled'})
-
- def test_enqueues_query_with_correct_data_source(self):
- query = self.factory.create_query(schedule="60", data_source=self.factory.create_data_source())
- query2 = self.factory.create_query(schedule="60", query_text=query.query_text, query_hash=query.query_hash)
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
- query.latest_query_data = query_result
- query2.latest_query_data = query_result
- db.session.add_all([query, query2])
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- add_job_mock.assert_has_calls([call(query2.query_text, query2.data_source, query2.user_id, scheduled=True, metadata=ANY),
- call(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)],
- any_order=True)
- self.assertEquals(2, add_job_mock.call_count)
-
- def test_enqueues_only_for_relevant_data_source(self):
- query = self.factory.create_query(schedule="60")
- query2 = self.factory.create_query(schedule="3600", query_text=query.query_text, query_hash=query.query_hash)
- retrieved_at = utcnow() - datetime.timedelta(minutes=10)
- query_result = self.factory.create_query_result(retrieved_at=retrieved_at, query_text=query.query_text,
- query_hash=query.query_hash)
- query.latest_query_data = query_result
- query2.latest_query_data = query_result
- db.session.add_all([query, query2])
-
- with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
- refresh_queries()
- add_job_mock.assert_called_once_with(query.query_text, query.data_source, query.user_id, scheduled=True, metadata=ANY)
+ with patch('redash.tasks.queries.enqueue_query') as add_job_mock:
+ refresh_queries()
+ add_job_mock.assert_called_with(
+ query.query_text, query.data_source, query.user_id,
+ scheduled_query=query, metadata=ANY)
diff --git a/tests/test_models.py b/tests/test_models.py
index 90576b3b..81597def 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -30,31 +30,45 @@ class ShouldScheduleNextTest(TestCase):
def test_interval_schedule_that_needs_reschedule(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
- self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600"))
+ self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
+ 0))
def test_interval_schedule_that_doesnt_need_reschedule(self):
now = utcnow()
half_an_hour_ago = now - datetime.timedelta(minutes=30)
- self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600"))
+ self.assertFalse(models.should_schedule_next(half_an_hour_ago, now,
+ "3600", 0))
def test_exact_time_that_needs_reschedule(self):
now = utcnow()
yesterday = now - datetime.timedelta(days=1)
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
- self.assertTrue(models.should_schedule_next(yesterday, now, scheduled_time))
+ self.assertTrue(models.should_schedule_next(yesterday, now,
+ scheduled_time, 0))
def test_exact_time_that_doesnt_need_reschedule(self):
now = date_parse("2015-10-16 20:10")
yesterday = date_parse("2015-10-15 23:07")
schedule = "23:00"
- self.assertFalse(models.should_schedule_next(yesterday, now, schedule))
+ self.assertFalse(models.should_schedule_next(yesterday, now, schedule,
+ 0))
def test_exact_time_with_day_change(self):
now = utcnow().replace(hour=0, minute=1)
- previous = (now - datetime.timedelta(days=2)).replace(hour=23, minute=59)
+ previous = (now - datetime.timedelta(days=2)).replace(hour=23,
+ minute=59)
schedule = "23:59".format(now.hour + 3)
- self.assertTrue(models.should_schedule_next(previous, now, schedule))
+ self.assertTrue(models.should_schedule_next(previous, now, schedule,
+ 0))
+
+ def test_backoff(self):
+ now = utcnow()
+ two_hours_ago = now - datetime.timedelta(hours=2)
+ self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
+ 5))
+ self.assertFalse(models.should_schedule_next(two_hours_ago, now,
+ "3600", 10))
class QueryOutdatedQueriesTest(BaseTestCase):
@@ -65,6 +79,12 @@ class QueryOutdatedQueriesTest(BaseTestCase):
self.assertNotIn(query, queries)
+ def test_outdated_queries_skips_unscheduled_queries(self):
+ query = self.factory.create_query(schedule='60')
+ queries = models.Query.outdated_queries()
+
+ self.assertNotIn(query, queries)
+
def test_outdated_queries_works_with_ttl_based_schedule(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule="3600")
@@ -92,6 +112,79 @@ class QueryOutdatedQueriesTest(BaseTestCase):
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
+ def test_enqueues_query_only_once(self):
+ """
+ Only one query per data source with the same text will be reported by
+ Query.outdated_queries().
+ """
+ query = self.factory.create_query(schedule="60")
+ query2 = self.factory.create_query(
+ schedule="60", query_text=query.query_text,
+ query_hash=query.query_hash)
+ retrieved_at = utcnow() - datetime.timedelta(minutes=10)
+ query_result = self.factory.create_query_result(
+ retrieved_at=retrieved_at, query_text=query.query_text,
+ query_hash=query.query_hash)
+ query.latest_query_data = query_result
+ query2.latest_query_data = query_result
+
+ self.assertEqual(list(models.Query.outdated_queries()), [query2])
+
+ def test_enqueues_query_with_correct_data_source(self):
+ """
+ Queries from different data sources will be reported by
+ Query.outdated_queries() even if they have the same query text.
+ """
+ query = self.factory.create_query(
+ schedule="60", data_source=self.factory.create_data_source())
+ query2 = self.factory.create_query(
+ schedule="60", query_text=query.query_text,
+ query_hash=query.query_hash)
+ retrieved_at = utcnow() - datetime.timedelta(minutes=10)
+ query_result = self.factory.create_query_result(
+ retrieved_at=retrieved_at, query_text=query.query_text,
+ query_hash=query.query_hash)
+ query.latest_query_data = query_result
+ query2.latest_query_data = query_result
+
+ self.assertEqual(list(models.Query.outdated_queries()),
+ [query2, query])
+
+ def test_enqueues_only_for_relevant_data_source(self):
+ """
+ If multiple queries with the same text exist, only ones that are
+ scheduled to be refreshed are reported by Query.outdated_queries().
+ """
+ query = self.factory.create_query(schedule="60")
+ query2 = self.factory.create_query(
+ schedule="3600", query_text=query.query_text,
+ query_hash=query.query_hash)
+ retrieved_at = utcnow() - datetime.timedelta(minutes=10)
+ query_result = self.factory.create_query_result(
+ retrieved_at=retrieved_at, query_text=query.query_text,
+ query_hash=query.query_hash)
+ query.latest_query_data = query_result
+ query2.latest_query_data = query_result
+
+ self.assertEqual(list(models.Query.outdated_queries()), [query])
+
+ def test_failure_extends_schedule(self):
+ """
+ Execution failures recorded for a query result in exponential backoff
+ for scheduling future execution.
+ """
+ query = self.factory.create_query(schedule="60", schedule_failures=4)
+ retrieved_at = utcnow() - datetime.timedelta(minutes=16)
+ query_result = self.factory.create_query_result(
+ retrieved_at=retrieved_at, query_text=query.query_text,
+ query_hash=query.query_hash)
+ query.latest_query_data = query_result
+
+ self.assertEqual(list(models.Query.outdated_queries()), [])
+
+ query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
+ self.assertEqual(list(models.Query.outdated_queries()), [query])
+
class QueryArchiveTest(BaseTestCase):
def setUp(self):