diff --git a/client/app/visualizations/edit-visualization-dialog.js b/client/app/visualizations/edit-visualization-dialog.js
index c3d61b6b03..1d695de21d 100644
--- a/client/app/visualizations/edit-visualization-dialog.js
+++ b/client/app/visualizations/edit-visualization-dialog.js
@@ -1,6 +1,7 @@
import { map } from 'lodash';
import { copy } from 'angular';
import template from './edit-visualization-dialog.html';
+import './edit-visualization-dialog.css';
const EditVisualizationDialog = {
template,
@@ -21,6 +22,8 @@ const EditVisualizationDialog = {
// Don't allow to change type after creating visualization
this.canChangeType = !(this.visualization && this.visualization.id);
+ this.warning_three_column_groupby = 'You have more than 2 columns in your result set. To ensure the chart is accurate, please do one of the following:
Change the SQL query to give 2 result columns. You can CONCAT() columns together if you wish.
Select column(s) to group by.
';
+ this.warning_three_column_stacking = 'You have more than 2 columns in your result set. You may wish to make the Stacking option equal to `Enabled` or `Percent`.';
this.newVisualization = () => ({
type: Visualization.defaultVisualization.type,
@@ -46,6 +49,24 @@ const EditVisualizationDialog = {
}
};
+ this.has3plusColumnsFunction = () => {
+ let has3plusColumns = false;
+ if ((JSON.stringify(this.visualization.options.columnMapping).match(/,/g) || []).length > 2) {
+ has3plusColumns = true;
+ }
+ return has3plusColumns;
+ };
+
+ this.disableSubmit = () => {
+ if (this.visualization.options.globalSeriesType === 'column'
+ && this.has3plusColumnsFunction()
+ && !JSON.stringify(this.visualization.options.columnMapping).includes('"":')
+ && JSON.stringify(this.visualization.options.columnMapping).includes('unused')) {
+ return true;
+ }
+ return false;
+ };
+
this.submit = () => {
if (this.visualization.id) {
Events.record('update', 'visualization', this.visualization.id, { type: this.visualization.type });
diff --git a/docker-compose.yml b/docker-compose.yml
index e01be5bfd1..18ae07bf1c 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -43,3 +43,13 @@ services:
- "15432:5432"
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
restart: unless-stopped
+ flower:
+ image: mher/flower:latest
+ command: flower
+ environment:
+ CELERY_BROKER_URL: redis://redis:6379/0
+ CELERY_RESULT_BACKEND: redis://redis:6379/0
+ ports:
+ - "5555:5555"
+ links:
+ - redis
diff --git a/migrations/versions/15041b7085fe_.py b/migrations/versions/15041b7085fe_.py
new file mode 100644
index 0000000000..fcb10aa78f
--- /dev/null
+++ b/migrations/versions/15041b7085fe_.py
@@ -0,0 +1,24 @@
+"""empty message
+
+Revision ID: 15041b7085fe
+Revises: f9571a5ab4f3, 969126bd800f
+Create Date: 2018-02-14 17:52:17.138127
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '15041b7085fe'
+down_revision = ('f9571a5ab4f3', '969126bd800f')
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/migrations/versions/2ba47e9812b1_.py b/migrations/versions/2ba47e9812b1_.py
new file mode 100644
index 0000000000..93d0f59268
--- /dev/null
+++ b/migrations/versions/2ba47e9812b1_.py
@@ -0,0 +1,24 @@
+"""empty message
+
+Revision ID: 2ba47e9812b1
+Revises: 71477dadd6ef, 9d7678c47452
+Create Date: 2018-07-25 16:09:54.769289
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '2ba47e9812b1'
+down_revision = ('71477dadd6ef', '9d7678c47452', )
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/migrations/versions/40384fa03dd1_.py b/migrations/versions/40384fa03dd1_.py
new file mode 100644
index 0000000000..f2c53711c0
--- /dev/null
+++ b/migrations/versions/40384fa03dd1_.py
@@ -0,0 +1,40 @@
+"""Upgrade 'data_scanned' column to form used in upstream
+
+Revision ID: 40384fa03dd1
+Revises: 58f810489c47
+Create Date: 2018-01-18 18:44:04.917081
+
+"""
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects.postgresql import JSONB
+from sqlalchemy.sql.expression import func, cast
+
+# revision identifiers, used by Alembic.
+revision = '40384fa03dd1'
+down_revision = 'fbc0849e2674'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ qr = sa.sql.table('query_results',
+ sa.sql.column('data_scanned', sa.String),
+ sa.sql.column('data', sa.String))
+ op.execute(
+ qr.update()
+ .where(qr.c.data_scanned != '')
+ .where(qr.c.data_scanned != 'error')
+ .where(qr.c.data_scanned != 'N/A')
+ .values(data=cast(
+ func.jsonb_set(cast(qr.c.data, JSONB),
+ '{metadata}',
+ cast('{"data_scanned": ' +
+ qr.c.data_scanned + '}',
+ JSONB)),
+ sa.String)))
+ op.drop_column('query_results', 'data_scanned')
+
+
+def downgrade():
+ op.add_column('query_results', sa.Column('data_scanned', sa.String(length=255), nullable=True))
diff --git a/migrations/versions/58f810489c47_.py b/migrations/versions/58f810489c47_.py
new file mode 100644
index 0000000000..1ed4190288
--- /dev/null
+++ b/migrations/versions/58f810489c47_.py
@@ -0,0 +1,28 @@
+"""add 'data_scanned' column to query_results
+
+Revision ID: 58f810489c47
+Revises: eb2f788f997e
+Create Date: 2017-06-25 21:24:54.942119
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '58f810489c47'
+down_revision = 'eb2f788f997e'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('query_results', sa.Column('data_scanned', sa.String(length=255), nullable=True))
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('query_results', 'data_scanned')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/9d7678c47452_.py b/migrations/versions/9d7678c47452_.py
new file mode 100644
index 0000000000..d351153c87
--- /dev/null
+++ b/migrations/versions/9d7678c47452_.py
@@ -0,0 +1,34 @@
+"""Incremental query results aggregation
+
+Revision ID: 9d7678c47452
+Revises: 15041b7085fe
+Create Date: 2018-03-08 04:36:12.802199
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '9d7678c47452'
+down_revision = '15041b7085fe'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.create_table('query_resultsets',
+ sa.Column('query_id', sa.Integer(), nullable=False),
+ sa.Column('result_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['query_id'], ['queries.id'], ),
+ sa.ForeignKeyConstraint(['result_id'], ['query_results.id'], ),
+ sa.PrimaryKeyConstraint('query_id', 'result_id')
+ )
+ op.add_column(u'queries', sa.Column('schedule_resultset_size', sa.Integer(), nullable=True))
+1
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column(u'queries', 'schedule_resultset_size')
+ op.drop_table('query_resultsets')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/b8a479422596_.py b/migrations/versions/b8a479422596_.py
new file mode 100644
index 0000000000..d838ab0e07
--- /dev/null
+++ b/migrations/versions/b8a479422596_.py
@@ -0,0 +1,74 @@
+"""
+Migrate schedule_until to schedule.until
+
+Revision ID: b8a479422596
+Revises: 73beceabb948
+Create Date: 2018-10-10 14:53:20.042470
+
+"""
+from datetime import datetime
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.sql import table
+
+from redash.models import MutableDict, PseudoJSON
+
+
+# revision identifiers, used by Alembic.
+revision = 'b8a479422596'
+down_revision = '73beceabb948'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ queries = table(
+ 'queries',
+ sa.Column('id', sa.Integer, primary_key=True),
+ sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)),
+ sa.Column('schedule_until', sa.DateTime(True), nullable=True))
+
+ conn = op.get_bind()
+ for query in conn.execute(queries.select()):
+ if query.schedule_until is None:
+ continue
+
+ schedule_json = query.schedule
+ if schedule_json is None:
+ schedule_json = {
+ 'interval': None,
+ 'day_of_week': None,
+ 'time': None
+ }
+ schedule_json['until'] = query.schedule_until.strftime('%Y-%m-%d')
+
+ conn.execute(
+ queries
+ .update()
+ .where(queries.c.id == query.id)
+ .values(schedule=MutableDict(schedule_json)))
+
+ op.drop_column('queries', 'schedule_until')
+
+
+def downgrade():
+ op.add_column('queries', sa.Column('schedule_until', sa.DateTime(True), nullable=True))
+
+ queries = table(
+ 'queries',
+ sa.Column('id', sa.Integer, primary_key=True),
+ sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)),
+ sa.Column('schedule_until', sa.DateTime(True), nullable=True))
+
+ conn = op.get_bind()
+ for query in conn.execute(queries.select()):
+ if query.schedule is None or query.schedule['until'] is None:
+ continue
+
+ scheduleUntil = datetime.strptime(query.schedule['until'], '%Y-%m-%d')
+
+ conn.execute(
+ queries
+ .update()
+ .where(queries.c.id == query.id)
+ .values(schedule_until=scheduleUntil))
diff --git a/migrations/versions/eb2f788f997e_.py b/migrations/versions/eb2f788f997e_.py
new file mode 100644
index 0000000000..71fd2bd5b3
--- /dev/null
+++ b/migrations/versions/eb2f788f997e_.py
@@ -0,0 +1,27 @@
+"""Add 'schedule_until' column to queries.
+
+Revision ID: eb2f788f997e
+Revises: d1eae8b9893e
+Create Date: 2017-03-02 12:20:00.029066
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'eb2f788f997e'
+down_revision = 'd1eae8b9893e'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.add_column(
+ 'queries',
+ sa.Column('schedule_until', sa.DateTime(timezone=True), nullable=True))
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('queries', 'schedule_until')
diff --git a/migrations/versions/f9571a5ab4f3_.py b/migrations/versions/f9571a5ab4f3_.py
new file mode 100644
index 0000000000..da1ba02d6d
--- /dev/null
+++ b/migrations/versions/f9571a5ab4f3_.py
@@ -0,0 +1,28 @@
+"""Rename 'image_url' to 'profile_image_url'
+
+ a revision was changed after we pulled it from upstream in m12, so it had to
+ be fixed here.
+
+
+Revision ID: f9571a5ab4f3
+Revises: 40384fa03dd1
+Create Date: 2018-01-18 18:04:07.943843
+"""
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = 'f9571a5ab4f3'
+down_revision = '40384fa03dd1'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # Upstream changed the column name in migration revision 7671dca4e604 --
+ # see git revision 62e5e3892603502c5f3a6da277c33c73510b8819
+ op.alter_column('users', 'image_url', new_column_name='profile_image_url')
+
+
+def downgrade():
+ op.alter_column('users', 'profile_image_url', new_column_name='image_url')
diff --git a/migrations/versions/fbc0849e2674_.py b/migrations/versions/fbc0849e2674_.py
new file mode 100644
index 0000000000..6195141496
--- /dev/null
+++ b/migrations/versions/fbc0849e2674_.py
@@ -0,0 +1,26 @@
+"""
+Merge upstream fulltext search
+
+This formerly merged the fulltext search changes (6b5be7e0a0ef, 5ec5c84ba61e)
+with upstream's 7671dca4e604 - but then those changes moved in the revision
+graph to be direct descendants of that upstream revision, so the merge point
+has been moved.
+
+Revision ID: fbc0849e2674
+Revises: 6b5be7e0a0ef, eb2f788f997e
+Create Date: 2017-12-12 04:45:34.360587
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'fbc0849e2674'
+down_revision = ('6b5be7e0a0ef', '58f810489c47')
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/package.json b/package.json
index 565de8fd4d..3c6557af31 100644
--- a/package.json
+++ b/package.json
@@ -55,6 +55,7 @@
"d3": "^3.5.17",
"d3-cloud": "^1.2.4",
"debug": "^3.1.0",
+ "diff": "^3.3.0",
"font-awesome": "^4.7.0",
"gridstack": "^0.3.0",
"jquery": "^3.2.1",
diff --git a/redash/__init__.py b/redash/__init__.py
index 942550908c..963e6fa6d8 100644
--- a/redash/__init__.py
+++ b/redash/__init__.py
@@ -127,6 +127,11 @@ def create_app(load_admin=True):
app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
app.config.update(settings.all_settings())
+ def set_response_headers(response):
+ response.headers['X-Content-Type-Options'] = 'nosniff'
+ return response
+
+ app.after_request(set_response_headers)
provision_app(app)
db.init_app(app)
migrate.init_app(app, db)
diff --git a/redash/authentication/remote_user_auth.py b/redash/authentication/remote_user_auth.py
index 77002e9324..61dfd793d0 100644
--- a/redash/authentication/remote_user_auth.py
+++ b/redash/authentication/remote_user_auth.py
@@ -31,6 +31,21 @@ def login(org_slug=None):
logger.error("Cannot use remote user for login when it's not provided in the request (looked in headers['" + settings.REMOTE_USER_HEADER + "'])")
return redirect(url_for('redash.index', next=next_path, org_slug=org_slug))
+ # Check if there is a header of user groups and if yes
+ # check it against a list of allowed user groups from the settings
+ if settings.REMOTE_GROUPS_ENABLED:
+ remote_groups = settings.set_from_string(
+ request.headers.get(settings.REMOTE_GROUPS_HEADER) or ''
+ )
+ allowed_groups = settings.REMOTE_GROUPS_ALLOWED
+ if not allowed_groups.intersection(remote_groups):
+ logger.error(
+ "User groups provided in the %s header are not "
+ "matching the allowed groups.",
+ settings.REMOTE_GROUPS_HEADER
+ )
+ return redirect(url_for('redash.index', next=next_path))
+
logger.info("Logging in " + email + " via remote user")
user = create_and_login_user(current_org, email, email)
diff --git a/redash/handlers/api.py b/redash/handlers/api.py
index f8ef199857..4518bffcce 100644
--- a/redash/handlers/api.py
+++ b/redash/handlers/api.py
@@ -6,11 +6,11 @@
from redash.handlers.base import org_scoped_rule
from redash.handlers.permissions import ObjectPermissionsListResource, CheckPermissionResource
from redash.handlers.alerts import AlertResource, AlertListResource, AlertSubscriptionListResource, AlertSubscriptionResource
-from redash.handlers.dashboards import DashboardListResource, DashboardResource, DashboardShareResource, PublicDashboardResource
+from redash.handlers.dashboards import DashboardListResource, DashboardResource, DashboardShareResource, PublicDashboardResource
from redash.handlers.data_sources import DataSourceTypeListResource, DataSourceListResource, DataSourceSchemaResource, DataSourceResource, DataSourcePauseResource, DataSourceTestResource
from redash.handlers.events import EventsResource
-from redash.handlers.queries import QueryForkResource, QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource, MyQueriesResource
-from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource
+from redash.handlers.queries import QueryForkResource, QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource, MyQueriesResource, QueryVersionListResource, ChangeResource
+from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource, QueryResultSetResource
from redash.handlers.users import UserResource, UserListResource, UserInviteResource, UserResetPasswordResource, UserDisableResource
from redash.handlers.visualizations import VisualizationListResource
from redash.handlers.visualizations import VisualizationResource
@@ -84,6 +84,9 @@ def json_representation(data, code, headers=None):
api.add_org_resource(QueryRefreshResource, '/api/queries//refresh', endpoint='query_refresh')
api.add_org_resource(QueryResource, '/api/queries/', endpoint='query')
api.add_org_resource(QueryForkResource, '/api/queries//fork', endpoint='query_fork')
+api.add_org_resource(QueryResultSetResource, '/api/queries//resultset', endpoint='query_aggregate_results')
+api.add_org_resource(QueryVersionListResource, '/api/queries//version', endpoint='query_versions')
+api.add_org_resource(ChangeResource, '/api/changes/', endpoint='changes')
api.add_org_resource(ObjectPermissionsListResource, '/api///acl', endpoint='object_permissions')
api.add_org_resource(CheckPermissionResource, '/api///acl/', endpoint='check_permissions')
diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py
index 5739fc872a..ffa47a41b4 100644
--- a/redash/handlers/dashboards.py
+++ b/redash/handlers/dashboards.py
@@ -10,6 +10,7 @@
from redash.permissions import (can_modify, require_admin_or_owner,
require_object_modify_permission,
require_permission)
+from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import StaleDataError
@@ -104,6 +105,7 @@ def post(self):
user=self.current_user,
is_draft=True,
layout='[]')
+ dashboard.record_changes(changed_by=self.current_user)
models.db.session.add(dashboard)
models.db.session.commit()
return serialize_dashboard(dashboard)
@@ -197,7 +199,11 @@ def post(self, dashboard_slug):
try:
models.db.session.commit()
except StaleDataError:
+ models.db.session.rollback()
abort(409)
+ except IntegrityError:
+ models.db.session.rollback()
+ abort(400)
result = serialize_dashboard(dashboard, with_widgets=True, user=self.current_user)
diff --git a/redash/handlers/data_sources.py b/redash/handlers/data_sources.py
index 65532ee509..a13854fd8d 100644
--- a/redash/handlers/data_sources.py
+++ b/redash/handlers/data_sources.py
@@ -55,6 +55,7 @@ def post(self, data_source_id):
try:
models.db.session.commit()
except IntegrityError as e:
+ models.db.session.rollback()
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
@@ -130,6 +131,7 @@ def post(self):
models.db.session.commit()
except IntegrityError as e:
+ models.db.session.rollback()
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py
index 7f33a52843..db39f872c2 100644
--- a/redash/handlers/queries.py
+++ b/redash/handlers/queries.py
@@ -113,6 +113,8 @@ def post(self):
:json string query: Query text
:>json string query_hash: Hash of query text
:>json string schedule: Schedule interval, in seconds, for repeated execution of this query
+ :json string api_key: Key for public access to this query's results.
:>json boolean is_archived: Whether this query is displayed in indexes and search results or not.
:>json boolean is_draft: Whether this query is a draft or not
@@ -149,7 +152,10 @@ def post(self):
query_def['data_source'] = data_source
query_def['org'] = self.current_org
query_def['is_draft'] = True
+ if query_def.get('schedule_resultset_size') == 1:
+ query_def['schedule_resultset_size'] = None
query = models.Query.create(**query_def)
+ query.record_changes(changed_by=self.current_user)
models.db.session.add(query)
models.db.session.commit()
@@ -299,6 +305,7 @@ def post(self, query_id):
try:
self.update_model(query, query_def)
+ query.record_changes(self.current_user)
models.db.session.commit()
except StaleDataError:
abort(409)
@@ -403,3 +410,16 @@ def get(self):
for name, count in tags
]
}
+
+
+class QueryVersionListResource(BaseResource):
+ @require_permission('view_query')
+ def get(self, query_id):
+ results = models.Change.list_versions(models.Query.get_by_id(query_id))
+ return [q.to_dict() for q in results]
+
+
+class ChangeResource(BaseResource):
+ @require_permission('view_query')
+ def get(self, change_id):
+ return models.Change.query.get(change_id).to_dict()
diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py
index f752cae01a..a453b080c5 100644
--- a/redash/handlers/query_results.py
+++ b/redash/handlers/query_results.py
@@ -137,6 +137,33 @@ def post(self):
ONE_YEAR = 60 * 60 * 24 * 365.25
+class QueryResultSetResource(BaseResource):
+ @require_permission('view_query')
+ def get(self, query_id=None, filetype='json'):
+ query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
+ if not query.schedule_resultset_size:
+ abort(404, message="query does not keep multiple results")
+
+ # Synthesize a result set from the last N results.
+ total = len(query.query_results)
+ offset = max(total - query.schedule_resultset_size, 0)
+ results = [qr.to_dict() for qr in query.query_results[offset:]]
+ if not results:
+ aggregate_result = {}
+ else:
+ # Start a synthetic data set with the data from the first result...
+ aggregate_result = results[0].copy()
+ aggregate_result['data'] = {'columns': results[0]['data']['columns'],
+ 'rows': []}
+ # .. then add each subsequent result set into it.
+ for r in results:
+ aggregate_result['data']['rows'].extend(r['data']['rows'])
+
+ data = json_dumps({'query_result': aggregate_result})
+ headers = {'Content-Type': "application/json"}
+ return make_response(data, 200, headers)
+
+
class QueryResultResource(BaseResource):
@staticmethod
def add_cors_headers(headers):
diff --git a/redash/handlers/users.py b/redash/handlers/users.py
index e7244de4b8..a008da9d57 100644
--- a/redash/handlers/users.py
+++ b/redash/handlers/users.py
@@ -105,6 +105,7 @@ def post(self):
models.db.session.add(user)
models.db.session.commit()
except IntegrityError as e:
+ models.db.session.rollback()
if "email" in e.message:
abort(400, message='Email already taken.')
abort(500)
@@ -199,7 +200,7 @@ def post(self, user_id):
message = "Email already taken."
else:
message = "Error updating record"
-
+ models.db.session.rollback()
abort(400, message=message)
self.record_event({
diff --git a/redash/models.py b/redash/models.py
index d8d1904346..80dab05206 100644
--- a/redash/models.py
+++ b/redash/models.py
@@ -207,10 +207,6 @@ class ChangeTrackingMixin(object):
skipped_fields = ('id', 'created_at', 'updated_at', 'version')
_clean_values = None
- def __init__(self, *a, **kw):
- super(ChangeTrackingMixin, self).__init__(*a, **kw)
- self.record_changes(self.user)
-
def prep_cleanvalues(self):
self.__dict__['_clean_values'] = {}
for attr in inspect(self.__class__).column_attrs:
@@ -221,10 +217,10 @@ def prep_cleanvalues(self):
def __setattr__(self, key, value):
if self._clean_values is None:
self.prep_cleanvalues()
- for attr in inspect(self.__class__).column_attrs:
- col, = attr.columns
- previous = getattr(self, attr.key, None)
- self._clean_values[col.name] = previous
+
+ if key in inspect(self.__class__).column_attrs:
+ previous = getattr(self, key, None)
+ self._clean_values[key] = previous
super(ChangeTrackingMixin, self).__setattr__(key, value)
@@ -235,13 +231,19 @@ def record_changes(self, changed_by):
for attr in inspect(self.__class__).column_attrs:
col, = attr.columns
if attr.key not in self.skipped_fields:
- changes[col.name] = {'previous': self._clean_values[col.name],
- 'current': getattr(self, attr.key)}
+ prev = self._clean_values[col.name]
+ current = getattr(self, attr.key)
+ if prev != current:
+ changes[col.name] = {'previous': prev, 'current': current}
- db.session.add(Change(object=self,
- object_version=self.version,
- user=changed_by,
- change=changes))
+ if changes:
+ self.version = (self.version or 0) + 1
+ change = Change(object=self,
+ object_version=self.version,
+ user=changed_by,
+ change=changes)
+ db.session.add(change)
+ return change
class BelongsToOrgMixin(object):
@@ -484,6 +486,8 @@ def to_dict(self, with_api_key=False):
if with_api_key:
d['api_key'] = self.api_key
+ d['last_active_at'] = Event.query.filter(Event.user_id == self.id).with_entities(Event.created_at).order_by(Event.created_at.desc()).first()
+
return d
def is_api_user(self):
@@ -682,6 +686,8 @@ def add_group(self, group, view_only=False):
db.session.add(dsg)
return dsg
+ setattr(self, 'data_source_groups', dsg)
+
def remove_group(self, group):
db.session.query(DataSourceGroup).filter(
DataSourceGroup.group == group,
@@ -754,9 +760,11 @@ def to_dict(self):
def unused(cls, days=7):
age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)
- unused_results = (db.session.query(QueryResult.id).filter(
- Query.id == None, QueryResult.retrieved_at < age_threshold)
- .outerjoin(Query))
+ unused_results = db.session.query(QueryResult.id).filter(
+ QueryResult.retrieved_at < age_threshold,
+ Query.id == None,
+ ~QueryResultSet.query.filter(QueryResultSet.result_id == QueryResult.id).exists()
+ ).outerjoin(Query)
return unused_results
@@ -799,6 +807,8 @@ def store_result(cls, org, data_source, query_hash, query, data, run_time, retri
q.latest_query_data = query_result
q.skip_updated_at = True
db.session.add(q)
+ if q.schedule_resultset_size > 0:
+ q.query_results.append(query_result)
query_ids = [q.id for q in queries]
logging.info("Updated %s queries with result (%s).", len(query_ids), query_hash)
@@ -872,13 +882,14 @@ def should_schedule_next(previous_iteration, now, schedule, failures):
@python_2_unicode_compatible
class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
id = Column(db.Integer, primary_key=True)
- version = Column(db.Integer, default=1)
+ version = Column(db.Integer, default=0)
org_id = Column(db.Integer, db.ForeignKey('organizations.id'))
org = db.relationship(Organization, backref="queries")
data_source_id = Column(db.Integer, db.ForeignKey("data_sources.id"), nullable=True)
data_source = db.relationship(DataSource, backref='queries')
latest_query_data_id = Column(db.Integer, db.ForeignKey("query_results.id"), nullable=True)
latest_query_data = db.relationship(QueryResult)
+ query_results = db.relationship("QueryResult", secondary="query_resultsets")
name = Column(db.String(255))
description = Column(db.String(4096), nullable=True)
query_text = Column("query", db.Text)
@@ -893,6 +904,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
is_draft = Column(db.Boolean, default=True, index=True)
schedule = Column(db.String(10), nullable=True)
schedule_failures = Column(db.Integer, default=0)
+ schedule_resultset_size = Column(db.Integer, nullable=True)
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
search_vector = Column(TSVectorType('id', 'name', 'description', 'query',
@@ -1016,7 +1028,9 @@ def by_user(cls, user):
def outdated_queries(cls):
queries = (db.session.query(Query)
.options(joinedload(Query.latest_query_data).load_only('retrieved_at'))
- .filter(Query.schedule != None)
+ .filter(Query.schedule != None,
+ (Query.schedule_until == None) |
+ (Query.schedule_until > db.func.now()))
.order_by(Query.id))
now = utils.utcnow()
@@ -1043,6 +1057,37 @@ def search(cls, term, group_ids, user_id=None, include_drafts=False, limit=None)
# sort the result using the weight as defined in the search vector column
return all_queries.search(term, sort=True).limit(limit)
+ @classmethod
+ def delete_stale_resultsets(cls):
+ delete_count = 0
+ texts = [c[0] for c in db.session.query(Query.query_text)
+ .filter(Query.schedule_resultset_size != None).distinct()]
+ for text in texts:
+ queries = (Query.query.filter(Query.query_text == text,
+ Query.schedule_resultset_size != None)
+ .order_by(Query.schedule_resultset_size.desc()))
+ # Multiple queries with the same text may request multiple result sets
+ # be kept. We start with the one that keeps the most, and delete both
+ # the unneeded bridge rows and result sets.
+ first_query = queries.first()
+ if first_query is not None and first_query.schedule_resultset_size:
+ resultsets = QueryResultSet.query.filter(QueryResultSet.query_rel == first_query).order_by(QueryResultSet.result_id)
+ resultset_count = resultsets.count()
+ if resultset_count > first_query.schedule_resultset_size:
+ n_to_delete = resultset_count - first_query.schedule_resultset_size
+ r_ids = [r.result_id for r in resultsets][:n_to_delete]
+ QueryResultSet.query.filter(QueryResultSet.result_id.in_(r_ids)).delete(synchronize_session=False)
+ delete_count += QueryResult.query.filter(QueryResult.id.in_(r_ids)).delete(synchronize_session=False)
+ # By this point there are no stale result sets left.
+ # Delete unneeded bridge rows for the remaining queries.
+ for q in queries[1:]:
+ resultsets = db.session.query(QueryResultSet.result_id).filter(QueryResultSet.query_rel == q).order_by(QueryResultSet.result_id)
+ n_to_delete = resultsets.count() - q.schedule_resultset_size
+ if n_to_delete > 0:
+ stale_r = QueryResultSet.query.filter(QueryResultSet.result_id.in_(resultsets.limit(n_to_delete).subquery()))
+ stale_r.delete(synchronize_session=False)
+ return delete_count
+
@classmethod
def search_by_user(cls, term, user, limit=None):
return cls.by_user(user).search(term, sort=True).limit(limit)
@@ -1081,6 +1126,7 @@ def fork(self, user):
kwargs = {a: getattr(self, a) for a in forked_list}
forked_query = Query.create(name=u'Copy of (#{}) {}'.format(self.id, self.name),
user=user, **kwargs)
+ forked_query.record_changes(changed_by=user)
for v in self.visualizations:
if v.type == 'TABLE':
@@ -1123,6 +1169,16 @@ def __repr__(self):
return '' % (self.id, self.name or 'untitled')
+class QueryResultSet(db.Model):
+ query_id = Column(db.Integer, db.ForeignKey("queries.id"),
+ primary_key=True)
+ query_rel = db.relationship(Query)
+ result_id = Column(db.Integer, db.ForeignKey("query_results.id"),
+ primary_key=True)
+ result = db.relationship(QueryResult)
+ __tablename__ = 'query_resultsets'
+
+
@vectorizer(db.Integer)
def integer_vectorizer(column):
return db.func.cast(column, db.Text)
@@ -1254,7 +1310,6 @@ def to_dict(self, full=True):
'id': self.id,
'object_id': self.object_id,
'object_type': self.object_type,
- 'change_type': self.change_type,
'object_version': self.object_version,
'change': self.change,
'created_at': self.created_at
@@ -1274,6 +1329,12 @@ def last_change(cls, obj):
cls.object_type == obj.__class__.__tablename__).order_by(
cls.object_version.desc()).first()
+ @classmethod
+ def list_versions(cls, query):
+ return cls.query.filter(
+ cls.object_id == query.id,
+ cls.object_type == 'queries')
+
class Alert(TimestampMixin, db.Model):
UNKNOWN_STATE = 'unknown'
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py
index 73d6c49368..411bb65aea 100644
--- a/redash/query_runner/__init__.py
+++ b/redash/query_runner/__init__.py
@@ -51,6 +51,7 @@ class NotSupported(Exception):
class BaseQueryRunner(object):
noop_query = None
+ configuration_properties = None
def __init__(self, configuration):
self.syntax = 'sql'
@@ -76,6 +77,12 @@ def annotate_query(cls):
def configuration_schema(cls):
return {}
+ @classmethod
+ def add_configuration_property(cls, property, value):
+ if cls.configuration_properties is None:
+ raise NotImplementedError()
+ cls.configuration_properties[property] = value
+
def test_connection(self):
if self.noop_query is None:
raise NotImplementedError()
@@ -150,25 +157,36 @@ class BaseHTTPQueryRunner(BaseQueryRunner):
url_title = 'URL base path'
username_title = 'HTTP Basic Auth Username'
password_title = 'HTTP Basic Auth Password'
+ configuration_properties = {
+ 'url': {
+ 'type': 'string',
+ 'title': url_title,
+ },
+ 'username': {
+ 'type': 'string',
+ 'title': username_title,
+ },
+ 'password': {
+ 'type': 'string',
+ 'title': password_title,
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": (
+ "This string will be used to toggle visibility of "
+ "tables in the schema browser when editing a query "
+ "in order to remove non-useful tables from sight."
+ ),
+ },
+ }
@classmethod
def configuration_schema(cls):
schema = {
'type': 'object',
- 'properties': {
- 'url': {
- 'type': 'string',
- 'title': cls.url_title,
- },
- 'username': {
- 'type': 'string',
- 'title': cls.username_title,
- },
- 'password': {
- 'type': 'string',
- 'title': cls.password_title,
- },
- },
+ 'properties': cls.configuration_properties,
'secret': ['password']
}
diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py
index e7f1bb4ad5..b9a9944956 100644
--- a/redash/query_runner/athena.py
+++ b/redash/query_runner/athena.py
@@ -78,6 +78,12 @@ def configuration_schema(cls):
'type': 'boolean',
'title': 'Use Glue Data Catalog',
},
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ }
},
'required': ['region', 's3_staging_dir'],
'order': ['region', 'aws_access_key', 'aws_secret_key', 's3_staging_dir', 'schema'],
@@ -141,9 +147,10 @@ def get_schema(self, get_stats=False):
schema = {}
query = """
- SELECT table_schema, table_name, column_name
+ SELECT table_schema, table_name, column_name, data_type as column_type, comment as extra_info
FROM information_schema.columns
WHERE table_schema NOT IN ('information_schema')
+ ORDER BY 1, 5 DESC
"""
results, error = self.run_query(query, None)
@@ -155,7 +162,16 @@ def get_schema(self, get_stats=False):
table_name = '{0}.{1}'.format(row['table_schema'], row['table_name'])
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
- schema[table_name]['columns'].append(row['column_name'])
+
+ if row['extra_info'] == 'Partition Key':
+ schema[table_name]['columns'].append('[P] ' + row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'] == 'integer' or row['column_type'] == 'varchar' or row['column_type'] == 'timestamp' or row['column_type'] == 'boolean' or row['column_type'] == 'bigint':
+ schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'][0:2] == 'row' or row['column_type'][0:2] == 'map' or row['column_type'][0:2] == 'arr':
+ schema[table_name]['columns'].append(row['column_name'] + ' (row or map or array)')
+ else:
+ schema[table_name]['columns'].append(row['column_name'])
+
return schema.values()
diff --git a/redash/query_runner/axibase_tsd.py b/redash/query_runner/axibase_tsd.py
index 78f533fdbf..4514f886d5 100644
--- a/redash/query_runner/axibase_tsd.py
+++ b/redash/query_runner/axibase_tsd.py
@@ -132,6 +132,12 @@ def configuration_schema(cls):
'trust_certificate': {
'type': 'boolean',
'title': 'Trust SSL Certificate'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
}
},
'required': ['username', 'password', 'hostname', 'protocol', 'port'],
diff --git a/redash/query_runner/big_query.py b/redash/query_runner/big_query.py
index 594d79d203..f4eeaf47b8 100644
--- a/redash/query_runner/big_query.py
+++ b/redash/query_runner/big_query.py
@@ -82,6 +82,47 @@ def _get_query_results(jobs, project_id, location, job_id, start_index):
class BigQuery(BaseQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ 'projectId': {
+ 'type': 'string',
+ 'title': 'Project ID'
+ },
+ 'jsonKeyFile': {
+ "type": "string",
+ 'title': 'JSON Key File'
+ },
+ 'totalMBytesProcessedLimit': {
+ "type": "number",
+ 'title': 'Scanned Data Limit (MB)'
+ },
+ 'userDefinedFunctionResourceUri': {
+ "type": "string",
+ 'title': 'UDF Source URIs (i.e. gs://bucket/date_utils.js, gs://bucket/string_utils.js )'
+ },
+ 'useStandardSql': {
+ "type": "boolean",
+ 'title': "Use Standard SQL (Beta)",
+ },
+ 'location': {
+ "type": "string",
+ "title": "Processing Location",
+ "default": "US",
+ },
+ 'loadSchema': {
+ "type": "boolean",
+ "title": "Load Schema"
+ },
+ 'maximumBillingTier': {
+ "type": "number",
+ "title": "Maximum Billing Tier"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def enabled(cls):
@@ -91,41 +132,7 @@ def enabled(cls):
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'projectId': {
- 'type': 'string',
- 'title': 'Project ID'
- },
- 'jsonKeyFile': {
- "type": "string",
- 'title': 'JSON Key File'
- },
- 'totalMBytesProcessedLimit': {
- "type": "number",
- 'title': 'Scanned Data Limit (MB)'
- },
- 'userDefinedFunctionResourceUri': {
- "type": "string",
- 'title': 'UDF Source URIs (i.e. gs://bucket/date_utils.js, gs://bucket/string_utils.js )'
- },
- 'useStandardSql': {
- "type": "boolean",
- 'title': "Use Standard SQL",
- "default": True,
- },
- 'location': {
- "type": "string",
- "title": "Processing Location",
- },
- 'loadSchema': {
- "type": "boolean",
- "title": "Load Schema"
- },
- 'maximumBillingTier': {
- "type": "number",
- "title": "Maximum Billing Tier"
- }
- },
+ 'properties': cls.configuration_properties,
'required': ['jsonKeyFile', 'projectId'],
"order": ['projectId', 'jsonKeyFile', 'loadSchema', 'useStandardSql', 'location', 'totalMBytesProcessedLimit', 'maximumBillingTier', 'userDefinedFunctionResourceUri'],
'secret': ['jsonKeyFile']
diff --git a/redash/query_runner/cass.py b/redash/query_runner/cass.py
index 0f0c72ff66..e59f8d0ce2 100644
--- a/redash/query_runner/cass.py
+++ b/redash/query_runner/cass.py
@@ -23,6 +23,43 @@ def default(self, o):
class Cassandra(BaseQueryRunner):
noop_query = "SELECT dateof(now()) FROM system.local"
+ configuration_properties = {
+ 'host': {
+ 'type': 'string',
+ },
+ 'port': {
+ 'type': 'number',
+ 'default': 9042,
+ },
+ 'keyspace': {
+ 'type': 'string',
+ 'title': 'Keyspace name'
+ },
+ 'username': {
+ 'type': 'string',
+ 'title': 'Username'
+ },
+ 'password': {
+ 'type': 'string',
+ 'title': 'Password'
+ },
+ 'protocol': {
+ 'type': 'number',
+ 'title': 'Protocol Version',
+ 'default': 3
+ },
+ 'timeout': {
+ 'type': 'number',
+ 'title': 'Timeout',
+ 'default': 10
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def enabled(cls):
@@ -32,37 +69,7 @@ def enabled(cls):
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'host': {
- 'type': 'string',
- },
- 'port': {
- 'type': 'number',
- 'default': 9042,
- },
- 'keyspace': {
- 'type': 'string',
- 'title': 'Keyspace name'
- },
- 'username': {
- 'type': 'string',
- 'title': 'Username'
- },
- 'password': {
- 'type': 'string',
- 'title': 'Password'
- },
- 'protocol': {
- 'type': 'number',
- 'title': 'Protocol Version',
- 'default': 3
- },
- 'timeout': {
- 'type': 'number',
- 'title': 'Timeout',
- 'default': 10
- }
- },
+ 'properties': cls.configuration_properties,
'required': ['keyspace', 'host']
}
diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py
index a51328531a..86d9e0c7b3 100644
--- a/redash/query_runner/clickhouse.py
+++ b/redash/query_runner/clickhouse.py
@@ -36,6 +36,12 @@ def configuration_schema(cls):
"type": "number",
"title": "Request Timeout",
"default": 30
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
}
},
"required": ["dbname"],
diff --git a/redash/query_runner/db2.py b/redash/query_runner/db2.py
index 3253cee0b9..7413189459 100644
--- a/redash/query_runner/db2.py
+++ b/redash/query_runner/db2.py
@@ -3,7 +3,7 @@
import logging
from redash.query_runner import *
-from redash.utils import JSONEncoder
+from redash.utils import json_dumps
logger = logging.getLogger(__name__)
@@ -129,7 +129,7 @@ def run_query(self, query, user):
data = {'columns': columns, 'rows': rows}
error = None
- json_data = json.dumps(data, cls=JSONEncoder)
+ json_data = json_dumps(data)
else:
error = 'Query completed but it returned no data.'
json_data = None
diff --git a/redash/query_runner/dynamodb_sql.py b/redash/query_runner/dynamodb_sql.py
index 5f7c8f09d8..3623e6a6f0 100644
--- a/redash/query_runner/dynamodb_sql.py
+++ b/redash/query_runner/dynamodb_sql.py
@@ -32,22 +32,31 @@
class DynamoDBSQL(BaseSQLQueryRunner):
+ noop_query = "SELECT 1"
+ configuration_properties = {
+ "region": {
+ "type": "string",
+ "default": "us-east-1"
+ },
+ "access_key": {
+ "type": "string",
+ },
+ "secret_key": {
+ "type": "string",
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
+
@classmethod
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "region": {
- "type": "string",
- "default": "us-east-1"
- },
- "access_key": {
- "type": "string",
- },
- "secret_key": {
- "type": "string",
- }
- },
+ "properties": cls.configuration_properties,
"required": ["access_key", "secret_key"],
"secret": ["secret_key"]
}
diff --git a/redash/query_runner/elasticsearch.py b/redash/query_runner/elasticsearch.py
index e9327e504a..9b7817f2fb 100644
--- a/redash/query_runner/elasticsearch.py
+++ b/redash/query_runner/elasticsearch.py
@@ -45,25 +45,32 @@
class BaseElasticSearch(BaseQueryRunner):
DEBUG_ENABLED = False
+ configuration_properties = {
+ 'server': {
+ 'type': 'string',
+ 'title': 'Base URL'
+ },
+ 'basic_auth_user': {
+ 'type': 'string',
+ 'title': 'Basic Auth User'
+ },
+ 'basic_auth_password': {
+ 'type': 'string',
+ 'title': 'Basic Auth Password'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'server': {
- 'type': 'string',
- 'title': 'Base URL'
- },
- 'basic_auth_user': {
- 'type': 'string',
- 'title': 'Basic Auth User'
- },
- 'basic_auth_password': {
- 'type': 'string',
- 'title': 'Basic Auth Password'
- }
- },
+ 'properties': cls.configuration_properties,
"secret": ["basic_auth_password"],
"required": ["server"]
}
diff --git a/redash/query_runner/google_analytics.py b/redash/query_runner/google_analytics.py
index 71be522015..117205a763 100644
--- a/redash/query_runner/google_analytics.py
+++ b/redash/query_runner/google_analytics.py
@@ -102,6 +102,12 @@ def configuration_schema(cls):
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
}
},
'required': ['jsonKeyFile'],
diff --git a/redash/query_runner/google_spreadsheets.py b/redash/query_runner/google_spreadsheets.py
index 620fe770a1..0af6fb484b 100644
--- a/redash/query_runner/google_spreadsheets.py
+++ b/redash/query_runner/google_spreadsheets.py
@@ -147,6 +147,18 @@ def request(self, *args, **kwargs):
class GoogleSpreadsheet(BaseQueryRunner):
+ configuration_properties = {
+ 'jsonKeyFile': {
+ "type": "string",
+ 'title': 'JSON Key File'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def annotate_query(cls):
@@ -164,12 +176,7 @@ def enabled(cls):
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'jsonKeyFile': {
- "type": "string",
- 'title': 'JSON Key File'
- }
- },
+ 'properties': cls.configuration_properties,
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
diff --git a/redash/query_runner/graphite.py b/redash/query_runner/graphite.py
index 6b394e81ec..98e5ddd514 100644
--- a/redash/query_runner/graphite.py
+++ b/redash/query_runner/graphite.py
@@ -43,7 +43,13 @@ def configuration_schema(cls):
'verify': {
'type': 'boolean',
'title': 'Verify SSL certificate'
- }
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
},
'required': ['url'],
'secret': ['password']
diff --git a/redash/query_runner/hive_ds.py b/redash/query_runner/hive_ds.py
index b3c78bf431..cab6ff1d96 100644
--- a/redash/query_runner/hive_ds.py
+++ b/redash/query_runner/hive_ds.py
@@ -37,25 +37,32 @@
class Hive(BaseSQLQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ "host": {
+ "type": "string"
+ },
+ "port": {
+ "type": "number"
+ },
+ "database": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "host": {
- "type": "string"
- },
- "port": {
- "type": "number"
- },
- "database": {
- "type": "string"
- },
- "username": {
- "type": "string"
- },
- },
+ "properties": cls.configuration_properties,
"order": ["host", "port", "database", "username"],
"required": ["host"]
}
@@ -98,14 +105,14 @@ def _get_connection(self):
database=self.configuration.get('database', 'default'),
username=self.configuration.get('username', None),
)
-
+
return connection
def run_query(self, query, user):
connection = None
try:
- connection = self._get_connection()
+ connection = self._get_connection()
cursor = connection.cursor()
cursor.execute(query)
@@ -214,7 +221,7 @@ def _get_connection(self):
# create connection
connection = hive.connect(thrift_transport=transport)
-
+
return connection
diff --git a/redash/query_runner/impala_ds.py b/redash/query_runner/impala_ds.py
index 5b8b590777..111d39b4ae 100644
--- a/redash/query_runner/impala_ds.py
+++ b/redash/query_runner/impala_ds.py
@@ -34,38 +34,45 @@
class Impala(BaseSQLQueryRunner):
noop_query = "show schemas"
+ configuration_properties = {
+ "host": {
+ "type": "string"
+ },
+ "port": {
+ "type": "number"
+ },
+ "protocol": {
+ "type": "string",
+ "title": "Please specify beeswax or hiveserver2"
+ },
+ "database": {
+ "type": "string"
+ },
+ "use_ldap": {
+ "type": "boolean"
+ },
+ "ldap_user": {
+ "type": "string"
+ },
+ "ldap_password": {
+ "type": "string"
+ },
+ "timeout": {
+ "type": "number"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "host": {
- "type": "string"
- },
- "port": {
- "type": "number"
- },
- "protocol": {
- "type": "string",
- "title": "Please specify beeswax or hiveserver2"
- },
- "database": {
- "type": "string"
- },
- "use_ldap": {
- "type": "boolean"
- },
- "ldap_user": {
- "type": "string"
- },
- "ldap_password": {
- "type": "string"
- },
- "timeout": {
- "type": "number"
- }
- },
+ "properties": cls.configuration_properties,
"required": ["host"],
"secret": ["ldap_password"]
}
diff --git a/redash/query_runner/influx_db.py b/redash/query_runner/influx_db.py
index 47f3a4201f..d3351312c1 100644
--- a/redash/query_runner/influx_db.py
+++ b/redash/query_runner/influx_db.py
@@ -49,16 +49,23 @@ def _transform_result(results):
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
+ configuration_properties = {
+ 'url': {
+ 'type': 'string'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'url': {
- 'type': 'string'
- }
- },
+ 'properties': cls.configuration_properties,
'required': ['url']
}
diff --git a/redash/query_runner/kylin.py b/redash/query_runner/kylin.py
index a9f5d1fdb4..261fa3f5e0 100644
--- a/redash/query_runner/kylin.py
+++ b/redash/query_runner/kylin.py
@@ -1,12 +1,11 @@
import os
-import json
import logging
import requests
from requests.auth import HTTPBasicAuth
from redash import settings
from redash.query_runner import *
-from redash.utils import JSONEncoder
+from redash.utils import json_dumps
logger = logging.getLogger(__name__)
@@ -102,7 +101,7 @@ def run_query(self, query, user):
columns = self.get_columns(data['columnMetas'])
rows = self.get_rows(columns, data['results'])
- return json.dumps({'columns': columns, 'rows': rows}), None
+ return json_dumps({'columns': columns, 'rows': rows}), None
def get_schema(self, get_stats=False):
url = self.configuration['url']
diff --git a/redash/query_runner/memsql_ds.py b/redash/query_runner/memsql_ds.py
index bbec2836d4..b573b529ff 100644
--- a/redash/query_runner/memsql_ds.py
+++ b/redash/query_runner/memsql_ds.py
@@ -55,6 +55,12 @@ def configuration_schema(cls):
},
"password": {
"type": "string"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
}
},
diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py
index bfe40f485e..72db989843 100644
--- a/redash/query_runner/mongodb.py
+++ b/redash/query_runner/mongodb.py
@@ -117,24 +117,32 @@ def parse_results(results):
class MongoDB(BaseQueryRunner):
+ configuration_properties = {
+ 'connectionString': {
+ 'type': 'string',
+ 'title': 'Connection String'
+ },
+ 'dbName': {
+ 'type': 'string',
+ 'title': "Database Name"
+ },
+ 'replicaSetName': {
+ 'type': 'string',
+ 'title': 'Replica Set Name'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
+
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'connectionString': {
- 'type': 'string',
- 'title': 'Connection String'
- },
- 'dbName': {
- 'type': 'string',
- 'title': "Database Name"
- },
- 'replicaSetName': {
- 'type': 'string',
- 'title': 'Replica Set Name'
- },
- },
+ 'properties': cls.configuration_properties,
'required': ['connectionString', 'dbName']
}
diff --git a/redash/query_runner/mssql.py b/redash/query_runner/mssql.py
index 007aa825b6..b2c188d112 100644
--- a/redash/query_runner/mssql.py
+++ b/redash/query_runner/mssql.py
@@ -27,41 +27,48 @@
class SqlServer(BaseSQLQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ "user": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "server": {
+ "type": "string",
+ "default": "127.0.0.1"
+ },
+ "port": {
+ "type": "number",
+ "default": 1433
+ },
+ "tds_version": {
+ "type": "string",
+ "default": "7.0",
+ "title": "TDS Version"
+ },
+ "charset": {
+ "type": "string",
+ "default": "UTF-8",
+ "title": "Character Set"
+ },
+ "db": {
+ "type": "string",
+ "title": "Database Name"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "user": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "server": {
- "type": "string",
- "default": "127.0.0.1"
- },
- "port": {
- "type": "number",
- "default": 1433
- },
- "tds_version": {
- "type": "string",
- "default": "7.0",
- "title": "TDS Version"
- },
- "charset": {
- "type": "string",
- "default": "UTF-8",
- "title": "Character Set"
- },
- "db": {
- "type": "string",
- "title": "Database Name"
- }
- },
+ "properties": cls.configuration_properties,
"required": ["db"],
"secret": ["password"]
}
diff --git a/redash/query_runner/mysql.py b/redash/query_runner/mysql.py
index bfd6e7198e..18ce41f72a 100644
--- a/redash/query_runner/mysql.py
+++ b/redash/query_runner/mysql.py
@@ -28,6 +28,33 @@
class Mysql(BaseSQLQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ 'host': {
+ 'type': 'string',
+ 'default': '127.0.0.1'
+ },
+ 'user': {
+ 'type': 'string'
+ },
+ 'passwd': {
+ 'type': 'string',
+ 'title': 'Password'
+ },
+ 'db': {
+ 'type': 'string',
+ 'title': 'Database name'
+ },
+ 'port': {
+ 'type': 'number',
+ 'default': 3306,
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
@@ -35,27 +62,7 @@ def configuration_schema(cls):
schema = {
'type': 'object',
- 'properties': {
- 'host': {
- 'type': 'string',
- 'default': '127.0.0.1'
- },
- 'user': {
- 'type': 'string'
- },
- 'passwd': {
- 'type': 'string',
- 'title': 'Password'
- },
- 'db': {
- 'type': 'string',
- 'title': 'Database name'
- },
- 'port': {
- 'type': 'number',
- 'default': 3306,
- }
- },
+ 'properties': cls.configuration_properties,
"order": ['host', 'port', 'user', 'passwd', 'db'],
'required': ['db'],
'secret': ['passwd']
@@ -78,7 +85,7 @@ def configuration_schema(cls):
'ssl_key': {
'type': 'string',
'title': 'Path to private key file (SSL)'
- }
+ },
})
return schema
@@ -90,7 +97,7 @@ def name(cls):
@classmethod
def enabled(cls):
try:
- import MySQLdb
+ import pymysql
except ImportError:
return False
@@ -100,7 +107,8 @@ def _get_tables(self, schema):
query = """
SELECT col.table_schema as table_schema,
col.table_name as table_name,
- col.column_name as column_name
+ col.column_name as column_name,
+ col.column_type as column_type
FROM `information_schema`.`columns` col
WHERE col.table_schema NOT IN ('information_schema', 'performance_schema', 'mysql', 'sys');
"""
@@ -121,16 +129,16 @@ def _get_tables(self, schema):
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
- schema[table_name]['columns'].append(row['column_name'])
+ schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
return schema.values()
def run_query(self, query, user):
- import MySQLdb
+ import pymysql
connection = None
try:
- connection = MySQLdb.connect(host=self.configuration.get('host', ''),
+ connection = pymysql.connect(host=self.configuration.get('host', ''),
user=self.configuration.get('user', ''),
passwd=self.configuration.get('passwd', ''),
db=self.configuration['db'],
@@ -160,7 +168,7 @@ def run_query(self, query, user):
error = "No data was returned."
cursor.close()
- except MySQLdb.Error as e:
+ except pymysql.Error as e:
json_data = None
error = e.args[1]
except KeyboardInterrupt:
diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py
index eff9250042..7acb9f0038 100644
--- a/redash/query_runner/oracle.py
+++ b/redash/query_runner/oracle.py
@@ -29,8 +29,33 @@
logger = logging.getLogger(__name__)
+
class Oracle(BaseSQLQueryRunner):
noop_query = "SELECT 1 FROM dual"
+ configuration_properties = {
+ "user": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "host": {
+ "type": "string"
+ },
+ "port": {
+ "type": "number"
+ },
+ "servicename": {
+ "type": "string",
+ "title": "DSN Service Name"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def get_col_type(cls, col_type, scale):
@@ -47,24 +72,7 @@ def enabled(cls):
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "user": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "host": {
- "type": "string"
- },
- "port": {
- "type": "number"
- },
- "servicename": {
- "type": "string",
- "title": "DSN Service Name"
- }
- },
+ "properties": cls.configuration_properties,
"required": ["servicename", "user", "password", "host", "port"],
"secret": ["password"]
}
diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py
index 96aa03c07d..1590166ae4 100644
--- a/redash/query_runner/pg.py
+++ b/redash/query_runner/pg.py
@@ -46,36 +46,43 @@ def _wait(conn, timeout=None):
class PostgreSQL(BaseSQLQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ "user": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "host": {
+ "type": "string",
+ "default": "127.0.0.1"
+ },
+ "port": {
+ "type": "number",
+ "default": 5432
+ },
+ "dbname": {
+ "type": "string",
+ "title": "Database Name"
+ },
+ "sslmode": {
+ "type": "string",
+ "title": "SSL Mode",
+ "default": "prefer"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "user": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "host": {
- "type": "string",
- "default": "127.0.0.1"
- },
- "port": {
- "type": "number",
- "default": 5432
- },
- "dbname": {
- "type": "string",
- "title": "Database Name"
- },
- "sslmode": {
- "type": "string",
- "title": "SSL Mode",
- "default": "prefer"
- }
- },
+ "properties": cls.configuration_properties,
"order": ['host', 'port', 'user', 'password'],
"required": ["dbname"],
"secret": ["password"]
@@ -102,7 +109,7 @@ def _get_definitions(self, schema, query):
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
- schema[table_name]['columns'].append(row['column_name'])
+ schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
def _get_tables(self, schema):
'''
@@ -122,6 +129,7 @@ def _get_tables(self, schema):
query = """
SELECT s.nspname as table_schema,
c.relname as table_name,
+ t.typname as column_type,
a.attname as column_name
FROM pg_class c
JOIN pg_namespace s
@@ -131,6 +139,8 @@ def _get_tables(self, schema):
ON a.attrelid = c.oid
AND a.attnum > 0
AND NOT a.attisdropped
+ JOIN pg_type t
+ ON c.reltype = t.oid
WHERE c.relkind IN ('r', 'v', 'm', 'f', 'p')
"""
@@ -186,6 +196,36 @@ def run_query(self, query, user):
class Redshift(PostgreSQL):
+ configuration_properties = {
+ "user": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "host": {
+ "type": "string"
+ },
+ "port": {
+ "type": "number"
+ },
+ "dbname": {
+ "type": "string",
+ "title": "Database Name"
+ },
+ "sslmode": {
+ "type": "string",
+ "title": "SSL Mode",
+ "default": "prefer"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
+
@classmethod
def type(cls):
return "redshift"
@@ -209,29 +249,7 @@ def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "user": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "host": {
- "type": "string"
- },
- "port": {
- "type": "number"
- },
- "dbname": {
- "type": "string",
- "title": "Database Name"
- },
- "sslmode": {
- "type": "string",
- "title": "SSL Mode",
- "default": "prefer"
- }
- },
+ "properties": cls.configuration_properties,
"order": ['host', 'port', 'user', 'password'],
"required": ["dbname", "user", "password", "host", "port"],
"secret": ["password"]
diff --git a/redash/query_runner/presto.py b/redash/query_runner/presto.py
index 975ea70c07..631b384fdd 100644
--- a/redash/query_runner/presto.py
+++ b/redash/query_runner/presto.py
@@ -1,3 +1,5 @@
+from markupsafe import escape
+
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
@@ -31,32 +33,35 @@
class Presto(BaseQueryRunner):
noop_query = 'SHOW TABLES'
+ configuration_properties = {
+ 'host': {
+ 'type': 'string'
+ },
+ 'port': {
+ 'type': 'number'
+ },
+ 'schema': {
+ 'type': 'string'
+ },
+ 'catalog': {
+ 'type': 'string'
+ },
+ 'username': {
+ 'type': 'string'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'host': {
- 'type': 'string'
- },
- 'protocol': {
- 'type': 'string',
- 'default': 'http'
- },
- 'port': {
- 'type': 'number'
- },
- 'schema': {
- 'type': 'string'
- },
- 'catalog': {
- 'type': 'string'
- },
- 'username': {
- 'type': 'string'
- },
- },
+ 'properties': cls.configuration_properties,
'order': ['host', 'protocol', 'port', 'username', 'schema', 'catalog'],
'required': ['host']
}
@@ -72,9 +77,10 @@ def type(cls):
def get_schema(self, get_stats=False):
schema = {}
query = """
- SELECT table_schema, table_name, column_name
+ SELECT table_schema, table_name, column_name, data_type as column_type, extra_info
FROM information_schema.columns
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
+ ORDER BY 1, 5 DESC
"""
results, error = self.run_query(query, None)
@@ -90,7 +96,14 @@ def get_schema(self, get_stats=False):
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
- schema[table_name]['columns'].append(row['column_name'])
+ if row['extra_info'] == 'partition key':
+ schema[table_name]['columns'].append('[P] ' + row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'] == 'integer' or row['column_type'] == 'varchar' or row['column_type'] == 'timestamp' or row['column_type'] == 'boolean' or row['column_type'] == 'bigint':
+ schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'][0:2] == 'row' or row['column_type'][0:2] == 'map' or row['column_type'][0:2] == 'arr':
+ schema[table_name]['columns'].append(row['column_name'] + ' (row or map or array)')
+ else:
+ schema[table_name]['columns'].append(row['column_name'])
return schema.values()
@@ -111,6 +124,9 @@ def run_query(self, query, user):
column_tuples = [(i[0], PRESTO_TYPES_MAPPING.get(i[1], None)) for i in cursor.description]
columns = self.fetch_columns(column_tuples)
rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
+ for row in rows:
+ for field in row:
+ field = escape(field)
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
error = None
diff --git a/redash/query_runner/python.py b/redash/query_runner/python.py
index f6cc2fbcd9..9b29128c2f 100644
--- a/redash/query_runner/python.py
+++ b/redash/query_runner/python.py
@@ -44,19 +44,27 @@ class Python(BaseQueryRunner):
'tuple', 'set', 'list', 'dict', 'bool',
)
+ configuration_properties = {
+ 'allowedImportModules': {
+ 'type': 'string',
+ 'title': 'Modules to import prior to running the script'
+ },
+ 'additionalModulesPaths': {
+ 'type': 'string'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
+
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'allowedImportModules': {
- 'type': 'string',
- 'title': 'Modules to import prior to running the script'
- },
- 'additionalModulesPaths': {
- 'type': 'string'
- }
- },
+ 'properties': cls.configuration_properties
}
@classmethod
diff --git a/redash/query_runner/rockset.py b/redash/query_runner/rockset.py
index 5d0d30d99d..8b0abe7c83 100644
--- a/redash/query_runner/rockset.py
+++ b/redash/query_runner/rockset.py
@@ -1,7 +1,7 @@
import requests
import os
from redash.query_runner import *
-from redash.utils import JSONEncoder
+from redash.utils import json_dumps
import json
@@ -96,7 +96,7 @@ def run_query(self, query, user):
columns = []
for k in rows[0]:
columns.append({'name': k, 'friendly_name': k, 'type': _get_type(rows[0][k])})
- data = json.dumps({'columns': columns, 'rows': rows}, cls=JSONEncoder)
+ data = json_dumps({'columns': columns, 'rows': rows})
return data, None
diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py
index 527f1e26ec..7222028fd0 100644
--- a/redash/query_runner/salesforce.py
+++ b/redash/query_runner/salesforce.py
@@ -81,6 +81,12 @@ def configuration_schema(cls):
"type": "string",
"title": "Salesforce API Version",
"default": DEFAULT_API_VERSION
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
}
},
"required": ["username", "password", "token"],
diff --git a/redash/query_runner/script.py b/redash/query_runner/script.py
index 38e3ae62c5..808d1024a2 100644
--- a/redash/query_runner/script.py
+++ b/redash/query_runner/script.py
@@ -29,6 +29,23 @@ def run_script(script, shell):
class Script(BaseQueryRunner):
+ configuration_properties = {
+ 'path': {
+ 'type': 'string',
+ 'title': 'Scripts path'
+ },
+ 'shell': {
+ 'type': 'boolean',
+ 'title': 'Execute command through the shell'
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
+
@classmethod
def annotate_query(cls):
return False
@@ -41,16 +58,7 @@ def enabled(cls):
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'path': {
- 'type': 'string',
- 'title': 'Scripts path'
- },
- 'shell': {
- 'type': 'boolean',
- 'title': 'Execute command through the shell'
- }
- },
+ 'properties': cls.configuration_properties,
'required': ['path']
}
diff --git a/redash/query_runner/snowflake.py b/redash/query_runner/snowflake.py
index 3bf2bd64aa..21fddf2af3 100644
--- a/redash/query_runner/snowflake.py
+++ b/redash/query_runner/snowflake.py
@@ -45,6 +45,12 @@ def configuration_schema(cls):
},
"database": {
"type": "string"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
}
},
"required": ["user", "password", "account", "database", "warehouse"],
diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py
index c1933d81e6..79c4f9c3e4 100644
--- a/redash/query_runner/sqlite.py
+++ b/redash/query_runner/sqlite.py
@@ -12,17 +12,24 @@
class Sqlite(BaseSQLQueryRunner):
noop_query = "pragma quick_check"
+ configuration_properties = {
+ "dbpath": {
+ "type": "string",
+ "title": "Database Path"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
"type": "object",
- "properties": {
- "dbpath": {
- "type": "string",
- "title": "Database Path"
- }
- },
+ "properties": cls.configuration_properties,
"required": ["dbpath"],
}
diff --git a/redash/query_runner/treasuredata.py b/redash/query_runner/treasuredata.py
index 5e3673ed78..5321706801 100644
--- a/redash/query_runner/treasuredata.py
+++ b/redash/query_runner/treasuredata.py
@@ -35,31 +35,38 @@
class TreasureData(BaseQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ 'endpoint': {
+ 'type': 'string'
+ },
+ 'apikey': {
+ 'type': 'string'
+ },
+ 'type': {
+ 'type': 'string'
+ },
+ 'db': {
+ 'type': 'string',
+ 'title': 'Database Name'
+ },
+ 'get_schema': {
+ 'type': 'boolean',
+ 'title': 'Auto Schema Retrieval',
+ 'default': False
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'endpoint': {
- 'type': 'string'
- },
- 'apikey': {
- 'type': 'string'
- },
- 'type': {
- 'type': 'string'
- },
- 'db': {
- 'type': 'string',
- 'title': 'Database Name'
- },
- 'get_schema': {
- 'type': 'boolean',
- 'title': 'Auto Schema Retrieval',
- 'default': False
- }
- },
+ 'properties': cls.configuration_properties,
'required': ['apikey','db']
}
diff --git a/redash/query_runner/vertica.py b/redash/query_runner/vertica.py
index 92ab864c1a..6bffece1ea 100644
--- a/redash/query_runner/vertica.py
+++ b/redash/query_runner/vertica.py
@@ -29,38 +29,45 @@
class Vertica(BaseSQLQueryRunner):
noop_query = "SELECT 1"
+ configuration_properties = {
+ 'host': {
+ 'type': 'string'
+ },
+ 'user': {
+ 'type': 'string'
+ },
+ 'password': {
+ 'type': 'string',
+ 'title': 'Password'
+ },
+ 'database': {
+ 'type': 'string',
+ 'title': 'Database name'
+ },
+ "port": {
+ "type": "number"
+ },
+ "read_timeout": {
+ "type": "number",
+ "title": "Read Timeout"
+ },
+ "connection_timeout": {
+ "type": "number",
+ "title": "Connection Timeout"
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
+ },
+ }
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
- 'properties': {
- 'host': {
- 'type': 'string'
- },
- 'user': {
- 'type': 'string'
- },
- 'password': {
- 'type': 'string',
- 'title': 'Password'
- },
- 'database': {
- 'type': 'string',
- 'title': 'Database name'
- },
- "port": {
- "type": "number"
- },
- "read_timeout": {
- "type": "number",
- "title": "Read Timeout"
- },
- "connection_timeout": {
- "type": "number",
- "title": "Connection Timeout"
- },
- },
+ 'properties': cls.configuration_properties,
'required': ['database'],
'order': ['host', 'port', 'user', 'password', 'database', 'read_timeout', 'connection_timeout'],
'secret': ['password']
@@ -117,7 +124,7 @@ def run_query(self, query, user):
'database': self.configuration.get('database', ''),
'read_timeout': self.configuration.get('read_timeout', 600)
}
-
+
if self.configuration.get('connection_timeout'):
conn_info['connection_timeout'] = self.configuration.get('connection_timeout')
diff --git a/redash/serializers.py b/redash/serializers.py
index d809a1f73e..84f1274813 100644
--- a/redash/serializers.py
+++ b/redash/serializers.py
@@ -22,8 +22,19 @@ def public_widget(widget):
'created_at': widget.created_at
}
- if widget.visualization and widget.visualization.id:
- query_data = models.QueryResult.query.get(widget.visualization.query_rel.latest_query_data_id).to_dict()
+ if (widget.visualization and
+ widget.visualization.id and
+ widget.visualization.query_rel is not None):
+ q = widget.visualization.query_rel
+ # make sure the widget's query has a latest_query_data_id that is
+ # not null so public dashboards work
+ if q.latest_query_data_id is None:
+ # this import is inline since it triggers a circular
+ # import otherwise
+ from redash.handlers.query_results import run_query_sync
+ run_query_sync(q.data_source, {}, q.query_text)
+
+ query_data = q.latest_query_data.to_dict()
res['visualization'] = {
'type': widget.visualization.type,
'name': widget.visualization.name,
@@ -32,9 +43,10 @@ def public_widget(widget):
'updated_at': widget.visualization.updated_at,
'created_at': widget.visualization.created_at,
'query': {
+ 'id': q.id,
'query': ' ', # workaround, as otherwise the query data won't be loaded.
- 'name': widget.visualization.query_rel.name,
- 'description': widget.visualization.query_rel.description,
+ 'name': q.name,
+ 'description': q.description,
'options': {},
'latest_query_data': query_data
}
@@ -91,6 +103,7 @@ def serialize_query(query, with_stats=False, with_visualizations=False, with_use
'query': query.query_text,
'query_hash': query.query_hash,
'schedule': query.schedule,
+ 'schedule_resultset_size': query.schedule_resultset_size,
'api_key': query.api_key,
'is_archived': query.is_archived,
'is_draft': query.is_draft,
diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py
index ef23e5e8e3..605ba37013 100644
--- a/redash/settings/__init__.py
+++ b/redash/settings/__init__.py
@@ -15,6 +15,7 @@ def all_settings():
return settings
+SESSION_COOKIE_SECURE = True
REDIS_URL = os.environ.get('REDASH_REDIS_URL', os.environ.get('REDIS_URL', "redis://localhost:6379/0"))
PROXIES_COUNT = int(os.environ.get('REDASH_PROXIES_COUNT', "1"))
@@ -83,6 +84,13 @@ def all_settings():
REMOTE_USER_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_REMOTE_USER_LOGIN_ENABLED", "false"))
REMOTE_USER_HEADER = os.environ.get("REDASH_REMOTE_USER_HEADER", "X-Forwarded-Remote-User")
+# When enabled this will match the given remote groups request header with a
+# configured list of allowed user groups using UNIX shell-style wildcards such
+# as * and ?.
+REMOTE_GROUPS_ENABLED = parse_boolean(os.environ.get("REDASH_REMOTE_GROUPS_ENABLED", "false"))
+REMOTE_GROUPS_HEADER = os.environ.get("REDASH_REMOTE_GROUPS_HEADER", "X-Forwarded-Remote-Groups")
+REMOTE_GROUPS_ALLOWED = set_from_string(os.environ.get("REDASH_REMOTE_GROUPS_ALLOWED", ""))
+
# If the organization setting auth_password_login_enabled is not false, then users will still be
# able to login through Redash instead of the LDAP server
LDAP_LOGIN_ENABLED = parse_boolean(os.environ.get('REDASH_LDAP_LOGIN_ENABLED', 'false'))
diff --git a/redash/settings/helpers.py b/redash/settings/helpers.py
index 98946d81e4..4d6f84185b 100644
--- a/redash/settings/helpers.py
+++ b/redash/settings/helpers.py
@@ -11,7 +11,7 @@ def array_from_string(s):
if "" in array:
array.remove("")
- return array
+ return [item.strip() for item in array]
def set_from_string(s):
diff --git a/redash/tasks/queries.py b/redash/tasks/queries.py
index 4f44c3b854..abc967959b 100644
--- a/redash/tasks/queries.py
+++ b/redash/tasks/queries.py
@@ -354,6 +354,7 @@ def cleanup_query_results():
deleted_count = models.QueryResult.query.filter(
models.QueryResult.id.in_(unused_query_results.subquery())
).delete(synchronize_session=False)
+ deleted_count += models.Query.delete_stale_resultsets()
models.db.session.commit()
logger.info("Deleted %d unused query results.", deleted_count)
diff --git a/requirements.txt b/requirements.txt
index 8ad41077df..450d069d56 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -54,3 +54,4 @@ disposable-email-domains
# Uncomment the requirement for ldap3 if using ldap.
# It is not included by default because of the GPL license conflict.
# ldap3==2.2.4
+redash-stmo>=2018.12.0
diff --git a/requirements_all_ds.txt b/requirements_all_ds.txt
index c2af9ebd7a..79e9d0e5e9 100644
--- a/requirements_all_ds.txt
+++ b/requirements_all_ds.txt
@@ -2,7 +2,7 @@ google-api-python-client==1.5.1
gspread==0.6.2
impyla==0.10.0
influxdb==2.7.1
-MySQL-python==1.2.5
+PyMySQL==0.7.11
oauth2client==3.0.0
pyhive==0.5.1
pymongo[tls,srv]==3.6.1
@@ -19,7 +19,7 @@ cassandra-driver==3.11.0
memsql==2.16.0
atsd_client==2.0.12
simple_salesforce==0.72.2
-PyAthena>=1.0.0
+PyAthena>=1.2.0
pymapd>=0.2.1
qds-sdk>=1.9.6
ibm-db>=2.0.9
diff --git a/tests/factories.py b/tests/factories.py
index 0b56ac016d..2ffc6349fe 100644
--- a/tests/factories.py
+++ b/tests/factories.py
@@ -111,7 +111,9 @@ def __call__(self):
query_hash=gen_query_hash('SELECT 1'),
data_source=data_source_factory.create,
org_id=1)
-
+query_resultset_factory = ModelFactory(redash.models.QueryResultSet,
+ query_rel=query_factory.create,
+ result=query_result_factory.create)
visualization_factory = ModelFactory(redash.models.Visualization,
type='CHART',
query_rel=query_factory.create,
@@ -297,6 +299,9 @@ def create_query_result(self, **kwargs):
return query_result_factory.create(**args)
+ def create_query_resultset(self, **kwargs):
+ return query_resultset_factory.create(**kwargs)
+
def create_visualization(self, **kwargs):
args = {
'query_rel': self.create_query()
diff --git a/tests/handlers/test_data_sources.py b/tests/handlers/test_data_sources.py
index f07a2b3719..4590056fd4 100644
--- a/tests/handlers/test_data_sources.py
+++ b/tests/handlers/test_data_sources.py
@@ -60,7 +60,8 @@ def test_updates_data_source(self):
new_name = 'New Name'
new_options = {"dbname": "newdb"}
rv = self.make_request('post', self.path,
- data={'name': new_name, 'type': 'pg', 'options': new_options},
+ data={'name': new_name, 'type': 'pg', 'options': new_options,
+ 'doc_url': None},
user=admin)
self.assertEqual(rv.status_code, 200)
@@ -101,7 +102,9 @@ def test_returns_400_when_configuration_invalid(self):
def test_creates_data_source(self):
admin = self.factory.create_admin()
rv = self.make_request('post', '/api/data_sources',
- data={'name': 'DS 1', 'type': 'pg', 'options': {"dbname": "redash"}}, user=admin)
+ data={'name': 'DS 1', 'type': 'pg',
+ 'options': {"dbname": "redash"},
+ 'doc_url': None}, user=admin)
self.assertEqual(rv.status_code, 200)
diff --git a/tests/handlers/test_embed.py b/tests/handlers/test_embed.py
index 18f119d786..905a6f8672 100644
--- a/tests/handlers/test_embed.py
+++ b/tests/handlers/test_embed.py
@@ -1,5 +1,8 @@
+import mock
+
from tests import BaseTestCase
from redash.models import db
+from redash.query_runner.pg import PostgreSQL
class TestEmbedVisualization(BaseTestCase):
@@ -97,6 +100,15 @@ def test_inactive_token(self):
res = self.make_request('get', '/api/dashboards/public/{}'.format(api_key.api_key), user=False, is_json=False)
self.assertEqual(res.status_code, 404)
+ def test_dashboard_widgets(self):
+ dashboard = self.factory.create_dashboard()
+ w1 = self.factory.create_widget(dashboard=dashboard)
+ w2 = self.factory.create_widget(dashboard=dashboard, visualization=None, text="a text box")
+ api_key = self.factory.create_api_key(object=dashboard)
+ with mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.return_value = ("[1, 2]", None)
+ res = self.make_request('get', '/api/dashboards/public/{}'.format(api_key.api_key), user=False, is_json=False)
+ self.assertEqual(res.status_code, 200)
# Not relevant for now, as tokens in api_keys table are only created for dashboards. Once this changes, we should
# add this test.
# def test_token_doesnt_belong_to_dashboard(self):
diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py
index 8e2352553e..135d29c69a 100644
--- a/tests/handlers/test_queries.py
+++ b/tests/handlers/test_queries.py
@@ -1,3 +1,5 @@
+import json
+
from tests import BaseTestCase
from redash import models
from redash.models import db
@@ -259,3 +261,107 @@ def test_format_sql_query(self):
self.assertEqual(rv.json['query'], expected)
+
+class ChangeResourceTests(BaseTestCase):
+ def test_list(self):
+ query = self.factory.create_query()
+ query.name = 'version A'
+ query.record_changes(self.factory.user)
+ query.name = 'version B'
+ query.record_changes(self.factory.user)
+ rv = self.make_request('get', '/api/queries/{0}/version'.format(query.id))
+ self.assertEquals(rv.status_code, 200)
+ self.assertEquals(len(rv.json), 2)
+ self.assertEquals(rv.json[0]['change']['name']['current'], 'version A')
+ self.assertEquals(rv.json[1]['change']['name']['current'], 'version B')
+
+ def test_get(self):
+ query = self.factory.create_query()
+ query.name = 'version A'
+ ch1 = query.record_changes(self.factory.user)
+ query.name = 'version B'
+ ch2 = query.record_changes(self.factory.user)
+ rv1 = self.make_request('get', '/api/changes/' + str(ch1.id))
+ self.assertEqual(rv1.status_code, 200)
+ self.assertEqual(rv1.json['change']['name']['current'], 'version A')
+ rv2 = self.make_request('get', '/api/changes/' + str(ch2.id))
+ self.assertEqual(rv2.status_code, 200)
+ self.assertEqual(rv2.json['change']['name']['current'], 'version B')
+
+
+class AggregateResultsTests(BaseTestCase):
+ def test_aggregate(self):
+ qtxt = "SELECT x FROM mytable;"
+ q = self.factory.create_query(query_text=qtxt, schedule_resultset_size=3)
+ qr0 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'eve', 'color': 'grue'},
+ {'name': 'mallory', 'color': 'bleen'}]}))
+ qr1 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'}]}))
+ qr2 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'}]}))
+ qr3 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'dave', 'color': 'yellow'},
+ {'name': 'carol', 'color': 'taupe'}]}))
+ for qr in (qr0, qr1, qr2, qr3):
+ self.factory.create_query_resultset(query_rel=q, result=qr)
+ rv = self.make_request('get', '/api/queries/{}/resultset'.format(q.id))
+ self.assertEqual(rv.status_code, 200)
+ self.assertEqual(rv.json['query_result']['data'],
+ {'columns': ['name', 'color'],
+ 'rows': [
+ {'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'},
+ {'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'},
+ {'name': 'dave', 'color': 'yellow'},
+ {'name': 'carol', 'color': 'taupe'}
+ ]})
+
+ def test_underfilled_aggregate(self):
+ qtxt = "SELECT x FROM mytable;"
+ q = self.factory.create_query(query_text=qtxt,
+ schedule_resultset_size=3)
+ qr1 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'}]}))
+ qr2 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'}]}))
+ for qr in (qr1, qr2):
+ self.factory.create_query_resultset(query_rel=q, result=qr)
+ rv = self.make_request('get', '/api/queries/{}/resultset'.format(q.id))
+ self.assertEqual(rv.status_code, 200)
+ self.assertEqual(rv.json['query_result']['data'],
+ {'columns': ['name', 'color'],
+ 'rows': [
+ {'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'},
+ {'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'}
+ ]})
+
+ def test_no_aggregate(self):
+ qtxt = "SELECT x FROM mytable;"
+ q = self.factory.create_query(query_text=qtxt)
+ self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'eve', 'color': 'grue'},
+ {'name': 'mallory', 'color': 'bleen'}]}))
+ rv = self.make_request('get', '/api/queries/{}/resultset'.format(q.id))
+ self.assertEqual(rv.status_code, 404)
diff --git a/tests/models/test_changes.py b/tests/models/test_changes.py
index 124e17a30d..3d7c7496e8 100644
--- a/tests/models/test_changes.py
+++ b/tests/models/test_changes.py
@@ -56,23 +56,12 @@ def test_properly_log_modification(self):
obj.record_changes(changed_by=self.factory.user)
obj.name = 'Query 2'
obj.description = 'description'
- db.session.flush()
obj.record_changes(changed_by=self.factory.user)
change = Change.last_change(obj)
self.assertIsNotNone(change)
- # TODO: https://github.com/getredash/redash/issues/1550
- # self.assertEqual(change.object_version, 2)
+ self.assertEqual(change.object_version, 2)
self.assertEqual(change.object_version, obj.version)
self.assertIn('name', change.change)
self.assertIn('description', change.change)
-
- def test_logs_create_method(self):
- q = Query(name='Query', description='', query_text='',
- user=self.factory.user, data_source=self.factory.data_source,
- org=self.factory.org)
- change = Change.last_change(q)
-
- self.assertIsNotNone(change)
- self.assertEqual(q.user, change.user)
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 3fb016f099..fa5e081a5b 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -16,7 +16,7 @@ def test_interactive_new(self):
result = runner.invoke(
manager,
['ds', 'new'],
- input="test\n%s\n\n\nexample.com\n\n\ntestdb\n" % (pg_i,))
+ input="test\n%s\n\n\n\n\nexample.com\n\n\ntestdb\n" % (pg_i,))
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertEqual(DataSource.query.count(), 1)
diff --git a/tests/test_models.py b/tests/test_models.py
index 5ccf6e4af0..f521a138f4 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -180,7 +180,8 @@ def test_failure_extends_schedule(self):
Execution failures recorded for a query result in exponential backoff
for scheduling future execution.
"""
- query = self.factory.create_query(schedule="60", schedule_failures=4)
+ query = self.factory.create_query(schedule="60")
+ query.schedule_failures = 4
retrieved_at = utcnow() - datetime.timedelta(minutes=16)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
@@ -192,6 +193,34 @@ def test_failure_extends_schedule(self):
query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
self.assertEqual(list(models.Query.outdated_queries()), [query])
+ def test_schedule_until_after(self):
+ """
+ Queries with non-null ``schedule_until`` are not reported by
+ Query.outdated_queries() after the given time is past.
+ """
+ three_hours_ago = utcnow() - datetime.timedelta(hours=3)
+ two_hours_ago = utcnow() - datetime.timedelta(hours=2)
+ query = self.factory.create_query(schedule="3600", schedule_until=three_hours_ago)
+ query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
+ query.latest_query_data = query_result
+
+ queries = models.Query.outdated_queries()
+ self.assertNotIn(query, queries)
+
+ def test_schedule_until_before(self):
+ """
+ Queries with non-null ``schedule_until`` are reported by
+ Query.outdated_queries() before the given time is past.
+ """
+ one_hour_from_now = utcnow() + datetime.timedelta(hours=1)
+ two_hours_ago = utcnow() - datetime.timedelta(hours=2)
+ query = self.factory.create_query(schedule="3600", schedule_until=one_hour_from_now)
+ query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
+ query.latest_query_data = query_result
+
+ queries = models.Query.outdated_queries()
+ self.assertIn(query, queries)
+
class QueryArchiveTest(BaseTestCase):
def setUp(self):
@@ -249,22 +278,74 @@ def test_deletes_alerts(self):
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
- qr = self.factory.create_query_result()
- query = self.factory.create_query(latest_query_data=qr)
+ qt = "SELECT 1"
+ qr = self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
+ query = self.factory.create_query(query_text=qt, latest_query_data=qr)
+ unused_qr = self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
db.session.flush()
- unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
self.assertIn((unused_qr.id,), models.QueryResult.unused())
self.assertNotIn((qr.id,), list(models.QueryResult.unused()))
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
- unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
+ qt = "SELECT 1"
+ unused_qr = self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
db.session.flush()
- new_unused_qr = self.factory.create_query_result()
-
+ new_unused_qr = self.factory.create_query_result(query_text=qt)
self.assertIn((unused_qr.id,), models.QueryResult.unused())
self.assertNotIn((new_unused_qr.id,), models.QueryResult.unused())
+ def test_doesnt_return_live_incremental_results(self):
+ two_weeks_ago = utcnow() - datetime.timedelta(days=14)
+ qt = "SELECT 1"
+ qrs = [self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
+ for _ in range(5)]
+ q = self.factory.create_query(query_text=qt, latest_query_data=qrs[0],
+ schedule_resultset_size=3)
+ for qr in qrs:
+ self.factory.create_query_resultset(query_rel=q, result=qr)
+ db.session.flush()
+ self.assertEqual([], list(models.QueryResult.unused()))
+
+ def test_deletes_stale_resultsets(self):
+ qt = "SELECT 17"
+ query = self.factory.create_query(query_text=qt,
+ schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt)
+ self.factory.create_query_resultset(query_rel=query, result=r)
+ qt2 = "SELECT 100"
+ query2 = self.factory.create_query(query_text=qt2, schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt2)
+ self.factory.create_query_resultset(query_rel=query2, result=r)
+ db.session.flush()
+ self.assertEqual(models.QueryResultSet.query.count(), 20)
+ self.assertEqual(models.Query.delete_stale_resultsets(), 10)
+ self.assertEqual(models.QueryResultSet.query.count(), 10)
+
+ def test_deletes_stale_resultsets_with_dupe_queries(self):
+ qt = "SELECT 17"
+ query = self.factory.create_query(query_text=qt,
+ schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt)
+ self.factory.create_query_resultset(query_rel=query, result=r)
+ query2 = self.factory.create_query(query_text=qt,
+ schedule_resultset_size=3)
+ for _ in range(10):
+ self.factory.create_query_result(query_text=qt)
+ self.factory.create_query_resultset(query_rel=query2)
+ qt2 = "SELECT 100"
+ query3 = self.factory.create_query(query_text=qt2, schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt2)
+ self.factory.create_query_resultset(query_rel=query3, result=r)
+ db.session.flush()
+ self.assertEqual(models.QueryResultSet.query.count(), 30)
+ self.assertEqual(models.Query.delete_stale_resultsets(), 10)
+ self.assertEqual(models.QueryResultSet.query.count(), 13)
+
class TestQueryAll(BaseTestCase):
def test_returns_only_queries_in_given_groups(self):
diff --git a/webpack.config.js b/webpack.config.js
index b476c7abf6..934d94a3af 100644
--- a/webpack.config.js
+++ b/webpack.config.js
@@ -226,6 +226,14 @@ if (process.env.DEV_SERVER_HOST) {
config.devServer.host = process.env.DEV_SERVER_HOST;
}
+if (isProduction) {
+ config.plugins.push(
+ new webpack.DefinePlugin({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ })
+ );
+}
+
if (process.env.BUNDLE_ANALYZER) {
config.plugins.push(new BundleAnalyzerPlugin());
}