Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release-19.1: stats,opt: finalize automatic stats cluster settings and join reorder limit setting #36085

Merged
merged 2 commits into from
Mar 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,10 @@
<tr><td><code>sql.metrics.statement_details.threshold</code></td><td>duration</td><td><code>0s</code></td><td>minimum execution time to cause statistics to be collected</td></tr>
<tr><td><code>sql.parallel_scans.enabled</code></td><td>boolean</td><td><code>true</code></td><td>parallelizes scanning different ranges when the maximum result size can be deduced</td></tr>
<tr><td><code>sql.query_cache.enabled</code></td><td>boolean</td><td><code>true</code></td><td>enable the query cache</td></tr>
<tr><td><code>sql.stats.experimental_automatic_collection.enabled</code></td><td>boolean</td><td><code>true</code></td><td>experimental automatic statistics collection mode</td></tr>
<tr><td><code>sql.stats.experimental_automatic_collection.max_fraction_idle</code></td><td>float</td><td><code>0.9</code></td><td>maximum fraction of time that automatic statistics sampler processors are idle</td></tr>
<tr><td><code>sql.stats.automatic_collection.enabled</code></td><td>boolean</td><td><code>true</code></td><td>automatic statistics collection mode</td></tr>
<tr><td><code>sql.stats.automatic_collection.fraction_stale_rows</code></td><td>float</td><td><code>0.2</code></td><td>target fraction of stale rows per table that will trigger a statistics refresh</td></tr>
<tr><td><code>sql.stats.automatic_collection.max_fraction_idle</code></td><td>float</td><td><code>0.9</code></td><td>maximum fraction of time that automatic statistics sampler processors are idle</td></tr>
<tr><td><code>sql.stats.automatic_collection.min_stale_rows</code></td><td>integer</td><td><code>500</code></td><td>target minimum number of stale rows per table that will trigger a statistics refresh</td></tr>
<tr><td><code>sql.stats.post_events.enabled</code></td><td>boolean</td><td><code>false</code></td><td>if set, an event is shown for every CREATE STATISTICS job</td></tr>
<tr><td><code>sql.tablecache.lease.refresh_limit</code></td><td>integer</td><td><code>50</code></td><td>maximum number of tables to periodically refresh leases for</td></tr>
<tr><td><code>sql.trace.log_statement_execute</code></td><td>boolean</td><td><code>false</code></td><td>set to true to enable logging of executed statements</td></tr>
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/backupccl/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2812,7 +2812,7 @@ func TestCreateStatsAfterRestore(t *testing.T) {
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
defer cleanupFn()

sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.experimental_automatic_collection.enabled=true`)
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=true`)

sqlDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE "data 2"`)
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/importccl/import_stmt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2447,7 +2447,7 @@ func TestCreateStatsAfterImport(t *testing.T) {
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)

sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.experimental_automatic_collection.enabled=true`)
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=true`)

sqlDB.Exec(t, "IMPORT PGDUMP ($1)", "nodelocal:///cockroachdump/dump.sql")

Expand Down
6 changes: 3 additions & 3 deletions pkg/ccl/workloadccl/fixture.go
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ func importFixtureTable(
func disableAutoStats(ctx context.Context, sqlDB *gosql.DB) func() {
var autoStatsEnabled bool
err := sqlDB.QueryRow(
`SHOW CLUSTER SETTING sql.stats.experimental_automatic_collection.enabled`,
`SHOW CLUSTER SETTING sql.stats.automatic_collection.enabled`,
).Scan(&autoStatsEnabled)
if err != nil {
log.Warningf(ctx, "error retrieving automatic stats cluster setting: %v", err)
Expand All @@ -544,15 +544,15 @@ func disableAutoStats(ctx context.Context, sqlDB *gosql.DB) func() {

if autoStatsEnabled {
_, err = sqlDB.Exec(
`SET CLUSTER SETTING sql.stats.experimental_automatic_collection.enabled=false`,
`SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`,
)
if err != nil {
log.Warningf(ctx, "error disabling automatic stats: %v", err)
return func() {}
}
return func() {
_, err := sqlDB.Exec(
`SET CLUSTER SETTING sql.stats.experimental_automatic_collection.enabled=true`,
`SET CLUSTER SETTING sql.stats.automatic_collection.enabled=true`,
)
if err != nil {
log.Warningf(ctx, "error enabling automatic stats: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/ccl/workloadccl/fixture_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ func TestImportFixture(t *testing.T) {
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)

sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.experimental_automatic_collection.enabled=true`)
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=true`)

gen := makeTestWorkload()
flag := fmt.Sprintf(`val=%d`, timeutil.Now().UnixNano())
Expand Down
3 changes: 2 additions & 1 deletion pkg/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -669,6 +669,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) {
}

s.statsRefresher = stats.MakeRefresher(
s.st,
internalExecutor,
execCfg.TableStatsCache,
stats.DefaultAsOfTime,
Expand Down Expand Up @@ -1611,7 +1612,7 @@ func (s *Server) Start(ctx context.Context) error {

// Start the background thread for periodically refreshing table statistics.
if err := s.statsRefresher.Start(
ctx, &s.st.SV, s.stopper, stats.DefaultRefreshInterval,
ctx, s.stopper, stats.DefaultRefreshInterval,
); err != nil {
return err
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/sql/distsql_plan_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,12 @@ func (dsp *DistSQLPlanner) createStatsPlan(

var rowsExpected uint64
if len(tableStats) > 0 {
overhead := sqlstats.AutomaticStatisticsFractionStaleRows.Get(&dsp.st.SV)
// Convert to a signed integer first to make the linter happy.
rowsExpected = uint64(int64(
// The total expected number of rows is the same number that was measured
// most recently, plus some overhead for possible insertions.
float64(tableStats[0].RowCount) * (1 + sqlstats.TargetFractionOfRowsUpdatedBeforeRefresh),
float64(tableStats[0].RowCount) * (1 + overhead),
))
}

Expand Down
Loading