Skip to content

Commit

Permalink
Add debugging GUC for reduced compression locking
Browse files Browse the repository at this point in the history
This GUC is used to check if reducing the locking during
compression can help with locking issues around rolling
up compressed chunks and cagg refresh.
  • Loading branch information
antekresic committed Sep 27, 2024
1 parent 0cc00e7 commit 747f328
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 1 deletion.
12 changes: 12 additions & 0 deletions src/guc.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ DebugRequireOption ts_guc_debug_require_vector_agg = DRO_Allow;
#endif

bool ts_guc_debug_compression_path_info = false;
bool ts_guc_debug_reduce_compression_locking = false;

static bool ts_guc_enable_hypertable_create = true;
static bool ts_guc_enable_hypertable_compression = true;
Expand Down Expand Up @@ -832,6 +833,17 @@ _guc_init(void)
/* assign_hook= */ NULL,
/* show_hook= */ NULL);

DefineCustomBoolVariable(/* name= */ MAKE_EXTOPTION("debug_reduce_compression_locking"),
/* short_desc= */ "Reduce locking behavior for compression operations",
/* long_desc= */ "this is for debugging purposes",
/* valueAddr= */ &ts_guc_debug_reduce_compression_locking,
/* bootValue= */ false,
/* context= */ PGC_USERSET,
/* flags= */ 0,
/* check_hook= */ NULL,
/* assign_hook= */ NULL,
/* show_hook= */ NULL);

#ifdef USE_TELEMETRY
DefineCustomStringVariable(/* name= */ "timescaledb_telemetry.cloud",
/* short_desc= */ "cloud provider",
Expand Down
1 change: 1 addition & 0 deletions src/guc.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ extern TSDLLEXPORT DebugRequireOption ts_guc_debug_require_vector_agg;
#endif

extern TSDLLEXPORT bool ts_guc_debug_compression_path_info;
extern TSDLLEXPORT bool ts_guc_debug_reduce_compression_locking;

extern TSDLLEXPORT bool ts_guc_debug_require_batch_sorted_merge;

Expand Down
10 changes: 9 additions & 1 deletion tsl/src/compression/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,16 @@ compress_chunk(Oid in_table, Oid out_table, int insert_options)
* a RowExclusive lock, and let other operations read and write this table
* as we work. However, we currently compress each table as a oneshot, so
* we're taking the stricter lock to prevent accidents.
*
* Putting RowExclusiveMode behind a GUC so we can try this out with
* rollups during compression.
*/
Relation out_rel = relation_open(out_table, ExclusiveLock);
int out_rel_lockmode = ExclusiveLock;
if (ts_guc_debug_reduce_compression_locking)
{
out_rel_lockmode = RowExclusiveLock;
}
Relation out_rel = relation_open(out_table, out_rel_lockmode);

/* Sanity check we are dealing with relations */
Ensure(in_rel->rd_rel->relkind == RELKIND_RELATION, "compress_chunk called on non-relation");
Expand Down
25 changes: 25 additions & 0 deletions tsl/test/expected/compression_merge.out
Original file line number Diff line number Diff line change
Expand Up @@ -820,3 +820,28 @@ NOTICE: chunk "_hyper_17_344_chunk" is already compressed
(1 row)

ROLLBACK;
-- Test RowExclusiveLock on compressed chunk during chunk rollup using a GUC
CREATE TABLE test10 ("Time" timestamptz, i integer, value integer);
SELECT table_name from create_hypertable('test10', 'Time', chunk_time_interval=> INTERVAL '1 hour');
NOTICE: adding not-null constraint to column "Time"
table_name
------------
test10
(1 row)

INSERT INTO test10
SELECT t, i, gen_rand_minstd()
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-02 3:59', '10 minutes') t
CROSS JOIN generate_series(1, 3, 1) i;
ALTER TABLE test10 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='2 hours');
SET timescaledb.debug_reduce_compression_locking to on;
SELECT compress_chunk(show_chunks('test10'));
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_19_351_chunk
_timescaledb_internal._hyper_19_351_chunk
_timescaledb_internal._hyper_19_353_chunk
(3 rows)

RESET timescaledb.debug_reduce_compression_locking;
DROP TABLE test10;
17 changes: 17 additions & 0 deletions tsl/test/sql/compression_merge.sql
Original file line number Diff line number Diff line change
Expand Up @@ -307,3 +307,20 @@ BEGIN;
-- should be rolled up
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
ROLLBACK;

-- Test RowExclusiveLock on compressed chunk during chunk rollup using a GUC
CREATE TABLE test10 ("Time" timestamptz, i integer, value integer);
SELECT table_name from create_hypertable('test10', 'Time', chunk_time_interval=> INTERVAL '1 hour');

INSERT INTO test10
SELECT t, i, gen_rand_minstd()
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-02 3:59', '10 minutes') t
CROSS JOIN generate_series(1, 3, 1) i;

ALTER TABLE test10 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='2 hours');
SET timescaledb.debug_reduce_compression_locking to on;

SELECT compress_chunk(show_chunks('test10'));

RESET timescaledb.debug_reduce_compression_locking;
DROP TABLE test10;

0 comments on commit 747f328

Please sign in to comment.