diff --git a/CHANGELOG.md b/CHANGELOG.md index 77d33cad..b94f81fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,10 @@ We use the following categories for changes: `_prom_catalog.lock_for_vacuum_engine`, and `_prom_catalog.unlock_for_vacuum_engine()` [#511] +### Changed + +- Switch from logical to time-based epoch [#512] + ## [0.6.0] - 2022-08-25 ### Added diff --git a/docs/sql-api.md b/docs/sql-api.md index ed902096..e6bd1b0e 100644 --- a/docs/sql-api.md +++ b/docs/sql-api.md @@ -564,7 +564,7 @@ function void **_prom_catalog.delay_compression_job**(ht_table text, new_start t ### _prom_catalog.delete_expired_series ``` -function void **_prom_catalog.delete_expired_series**(metric_schema text, metric_table text, metric_series_table text, ran_at timestamp with time zone, present_epoch bigint, last_updated_epoch timestamp with time zone) +function void **_prom_catalog.delete_expired_series**(metric_schema text, metric_table text, metric_series_table text, ran_at timestamp with time zone) ``` ### _prom_catalog.delete_series_catalog_row @@ -594,7 +594,7 @@ procedure void **_prom_catalog.drop_metric_chunks**(IN schema_name text, IN metr ### _prom_catalog.epoch_abort ABORT an INSERT transaction due to the ID epoch being out of date ``` -function void **_prom_catalog.epoch_abort**(user_epoch bigint) +function void **_prom_catalog.epoch_abort**(user_epoch timestamp with time zone) ``` ### _prom_catalog.execute_compression_policy compress data according to the policy. This procedure should be run regularly in a cron job @@ -821,6 +821,12 @@ function TABLE(hypertable_name text, node_name text, node_up boolean) **_prom_ca ``` function TABLE(hypertable_name text, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) **_prom_catalog.hypertable_remote_size**(schema_name_in text) ``` +### _prom_catalog.initialize_current_epoch +The current_epoch field of _prom_catalog.global_epoch is initialized to a value in the past. + This must be correctly initialized to prevent spurious epoch aborts during ingestion. +``` +function void **_prom_catalog.initialize_current_epoch**() +``` ### _prom_catalog.insert_exemplar_row ``` diff --git a/migration/idempotent/001-base.sql b/migration/idempotent/001-base.sql index 9e6be5a2..602a7ebf 100644 --- a/migration/idempotent/001-base.sql +++ b/migration/idempotent/001-base.sql @@ -474,7 +474,7 @@ BEGIN id bigint NOT NULL, metric_id int NOT NULL, labels prom_api.label_array NOT NULL, - delete_epoch BIGINT NULL DEFAULT NULL, + mark_for_deletion_epoch TIMESTAMPTZ NULL DEFAULT NULL, CHECK(labels[1] = %2$L AND labels[1] IS NOT NULL), CHECK(metric_id = %3$L), CONSTRAINT series_labels_id_%3$s UNIQUE(labels) INCLUDE (id), @@ -488,7 +488,7 @@ BEGIN --these indexes are logically on all series tables but they cannot be defined on the parent due to --dump/restore issues. EXECUTE format('CREATE INDEX series_labels_%s ON prom_data_series.%I USING GIN (labels)', NEW.id, NEW.table_name); - EXECUTE format('CREATE INDEX series_delete_epoch_id_%s ON prom_data_series.%I (delete_epoch, id) WHERE delete_epoch IS NOT NULL', NEW.id, NEW.table_name); + EXECUTE format('CREATE INDEX series_mark_for_deletion_epoch_id_%s ON prom_data_series.%I (mark_for_deletion_epoch) INCLUDE (id) WHERE mark_for_deletion_epoch IS NOT NULL', NEW.id, NEW.table_name); EXECUTE format('ALTER TABLE prom_data_series.%1$I OWNER TO prom_admin', NEW.table_name); EXECUTE format('GRANT ALL PRIVILEGES ON TABLE prom_data_series.%I TO prom_admin', NEW.table_name); @@ -866,7 +866,7 @@ AS $$ BEGIN EXECUTE FORMAT( - 'UPDATE prom_data_series.%1$I SET delete_epoch = current_epoch+1 FROM _prom_catalog.ids_epoch WHERE delete_epoch IS NULL AND id = ANY($1)', + 'UPDATE prom_data_series.%1$I SET mark_for_deletion_epoch = current_epoch FROM _prom_catalog.global_epoch WHERE mark_for_deletion_epoch IS NULL AND id = ANY($1)', metric_table ) USING series_ids; RETURN; @@ -1285,7 +1285,7 @@ AS $func$ BEGIN EXECUTE FORMAT($query$ UPDATE prom_data_series.%1$I - SET delete_epoch = NULL + SET mark_for_deletion_epoch = NULL WHERE id = $1 $query$, metric_table) using series_id; END @@ -1322,7 +1322,7 @@ BEGIN ), existing AS ( SELECT id, - CASE WHEN delete_epoch IS NOT NULL THEN + CASE WHEN mark_for_deletion_epoch IS NOT NULL THEN _prom_catalog.resurrect_series_ids(%1$L, id) END FROM prom_data_series.%1$I as series @@ -1367,7 +1367,7 @@ BEGIN ), existing AS ( SELECT id, - CASE WHEN delete_epoch IS NOT NULL THEN + CASE WHEN mark_for_deletion_epoch IS NOT NULL THEN _prom_catalog.resurrect_series_ids(%1$L, id) END FROM prom_data_series.%1$I as series @@ -1397,7 +1397,7 @@ BEGIN WITH existing AS ( SELECT id, - CASE WHEN delete_epoch IS NOT NULL THEN + CASE WHEN mark_for_deletion_epoch IS NOT NULL THEN _prom_catalog.resurrect_series_ids(%1$L, id) END FROM prom_data_series.%1$I as series @@ -1975,23 +1975,23 @@ COMMENT ON FUNCTION prom_api.reset_metric_compression_setting(TEXT) IS 'resets the compression setting for a specific metric to using the default'; GRANT EXECUTE ON FUNCTION prom_api.reset_metric_compression_setting(TEXT) TO prom_admin; -CREATE OR REPLACE FUNCTION _prom_catalog.epoch_abort(user_epoch BIGINT) +CREATE OR REPLACE FUNCTION _prom_catalog.epoch_abort(user_epoch TIMESTAMPTZ) RETURNS VOID VOLATILE SET search_path = pg_catalog, pg_temp AS $func$ -DECLARE db_epoch BIGINT; +DECLARE db_delete_epoch TIMESTAMPTZ; BEGIN - SELECT current_epoch FROM _prom_catalog.ids_epoch LIMIT 1 - INTO db_epoch; - RAISE EXCEPTION 'epoch % to old to continue INSERT, current: %', - user_epoch, db_epoch + SELECT delete_epoch FROM _prom_catalog.global_epoch LIMIT 1 + INTO db_delete_epoch; + RAISE EXCEPTION 'epoch % to old to continue INSERT, current DB delete epoch: %', + user_epoch, db_delete_epoch USING ERRCODE='PS001'; END; $func$ LANGUAGE PLPGSQL; -COMMENT ON FUNCTION _prom_catalog.epoch_abort(BIGINT) +COMMENT ON FUNCTION _prom_catalog.epoch_abort(TIMESTAMPTZ) IS 'ABORT an INSERT transaction due to the ID epoch being out of date'; -GRANT EXECUTE ON FUNCTION _prom_catalog.epoch_abort TO prom_writer; +GRANT EXECUTE ON FUNCTION _prom_catalog.epoch_abort(TIMESTAMPTZ) TO prom_writer; -- Given a `metric_schema`, `metric_table`, and `series_table`, this function -- returns all series ids in `potential_series_ids` which are not referenced by @@ -2113,9 +2113,9 @@ BEGIN SELECT _prom_catalog.get_confirmed_unused_series('%1$s','%2$s','%3$s', array_agg(series_id), %5$L) as ids FROM potentially_drop_series ) -- we want this next statement to be the last one in the txn since it could block series fetch (both of them update delete_epoch) - UPDATE prom_data_series.%3$I SET delete_epoch = current_epoch+1 - FROM _prom_catalog.ids_epoch - WHERE delete_epoch IS NULL + UPDATE prom_data_series.%3$I SET mark_for_deletion_epoch = current_epoch + FROM _prom_catalog.global_epoch + WHERE mark_for_deletion_epoch IS NULL AND id IN (SELECT unnest(ids) FROM confirmed_drop_series) $query$, metric_schema, metric_table, metric_series_table, drop_point, check_time); END @@ -2137,7 +2137,7 @@ chunks, see `_prom_catalog.drop_metric_chunks`. '; CREATE OR REPLACE FUNCTION _prom_catalog.delete_expired_series( - metric_schema TEXT, metric_table TEXT, metric_series_table TEXT, ran_at TIMESTAMPTZ, present_epoch BIGINT, last_updated_epoch TIMESTAMPTZ + metric_schema TEXT, metric_table TEXT, metric_series_table TEXT, ran_at TIMESTAMPTZ ) RETURNS VOID --security definer to add jobs as the logged-in user @@ -2147,21 +2147,13 @@ CREATE OR REPLACE FUNCTION _prom_catalog.delete_expired_series( AS $func$ DECLARE label_array int[]; - next_epoch BIGINT; - deletion_epoch BIGINT; + max_deletion_time TIMESTAMPTZ; + deletion_time TIMESTAMPTZ; epoch_duration INTERVAL; BEGIN - next_epoch := present_epoch + 1; - -- technically we can delete any ID <= current_epoch - 1 - -- but it's always safe to leave them around for a bit longer - deletion_epoch := present_epoch - 4; - SELECT _prom_catalog.get_default_value('epoch_duration')::INTERVAL INTO STRICT epoch_duration; - -- we don't want to delete too soon - IF ran_at < last_updated_epoch + epoch_duration THEN - RETURN; - END IF; + deletion_time := ran_at - epoch_duration; EXECUTE format($query$ -- recheck that the series IDs we might delete are actually dead @@ -2171,7 +2163,7 @@ BEGIN ( SELECT id FROM prom_data_series.%3$I - WHERE delete_epoch <= %4$L + WHERE mark_for_deletion_epoch < %4$L ) as potential LEFT JOIN LATERAL ( SELECT 1 @@ -2182,19 +2174,21 @@ BEGIN WHERE indicator IS NULL ), deleted_series AS ( DELETE FROM prom_data_series.%3$I - WHERE delete_epoch <= %4$L + WHERE mark_for_deletion_epoch < %4$L AND id IN (SELECT id FROM dead_series) -- concurrency means we need this qual in both - RETURNING id, labels + RETURNING id, labels, mark_for_deletion_epoch ), resurrected_series AS ( UPDATE prom_data_series.%3$I - SET delete_epoch = NULL - WHERE delete_epoch <= %4$L + SET mark_for_deletion_epoch = NULL + WHERE mark_for_deletion_epoch < %4$L AND id NOT IN (SELECT id FROM dead_series) -- concurrency means we need this qual in both ) - SELECT ARRAY(SELECT DISTINCT unnest(labels) as label_id - FROM deleted_series) - $query$, metric_schema, metric_table, metric_series_table, deletion_epoch) INTO label_array; - + SELECT + array_agg(DISTINCT labels.label) as label_id + , max(mark_for_deletion_epoch) as max_deletion_time + FROM deleted_series d, + LATERAL unnest(d.labels) as labels(label) + $query$, metric_schema, metric_table, metric_series_table, deletion_time) INTO label_array, max_deletion_time; IF array_length(label_array, 1) > 0 THEN --jit interacts poorly why the multi-partition query below @@ -2233,16 +2227,15 @@ BEGIN SET LOCAL jit = DEFAULT; END IF; - UPDATE _prom_catalog.ids_epoch - SET (current_epoch, last_update_time) = (next_epoch, now()) - WHERE current_epoch < next_epoch; + UPDATE _prom_catalog.global_epoch e + SET (current_epoch, delete_epoch) = (ran_at, COALESCE(max_deletion_time, (SELECT delete_epoch FROM _prom_catalog.global_epoch))); RETURN; END $func$ LANGUAGE PLPGSQL; --redundant given schema settings but extra caution for security definers -REVOKE ALL ON FUNCTION _prom_catalog.delete_expired_series(text, text, text, timestamptz, BIGINT, timestamptz) FROM PUBLIC; -GRANT EXECUTE ON FUNCTION _prom_catalog.delete_expired_series(text, text, text, timestamptz, BIGINT, timestamptz) TO prom_maintenance; +REVOKE ALL ON FUNCTION _prom_catalog.delete_expired_series(text, text, text, timestamptz) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION _prom_catalog.delete_expired_series(text, text, text, timestamptz) TO prom_maintenance; CREATE OR REPLACE FUNCTION _prom_catalog.set_app_name(full_name text) RETURNS VOID @@ -2491,7 +2484,7 @@ BEGIN %2$s FROM prom_data_series.%1$I AS series - WHERE delete_epoch IS NULL + WHERE mark_for_deletion_epoch IS NULL $$, view_name, label_value_cols); IF NOT view_exists THEN diff --git a/migration/idempotent/011-maintenance.sql b/migration/idempotent/011-maintenance.sql index 5b7cfcad..edafc9ec 100644 --- a/migration/idempotent/011-maintenance.sql +++ b/migration/idempotent/011-maintenance.sql @@ -78,6 +78,24 @@ GRANT EXECUTE ON FUNCTION _prom_catalog.drop_metric_chunk_data(text, text, times COMMENT ON FUNCTION _prom_catalog.drop_metric_chunk_data(text, text, timestamptz) IS 'drop chunks from schema_name.metric_name containing data older than older_than.'; +CREATE OR REPLACE FUNCTION _prom_catalog.initialize_current_epoch() + RETURNS VOID + --security definer to have permissions on global_epoch table + SECURITY DEFINER + VOLATILE + SET search_path = pg_catalog, pg_temp +AS $func$ + UPDATE _prom_catalog.global_epoch e SET current_epoch = now() + WHERE e.current_epoch = 'epoch'::TIMESTAMPTZ; +$func$ +LANGUAGE SQL; +--redundant given schema settings but extra caution for security definers +REVOKE ALL ON FUNCTION _prom_catalog.initialize_current_epoch() FROM PUBLIC; +GRANT EXECUTE ON FUNCTION _prom_catalog.initialize_current_epoch() TO prom_maintenance; +COMMENT ON FUNCTION _prom_catalog.initialize_current_epoch() + IS 'The current_epoch field of _prom_catalog.global_epoch is initialized to a value in the past. + This must be correctly initialized to prevent spurious epoch aborts during ingestion.'; + --drop chunks from metrics tables and delete the appropriate series. CREATE OR REPLACE PROCEDURE _prom_catalog.drop_metric_chunks( schema_name TEXT, metric_name TEXT, older_than TIMESTAMPTZ, ran_at TIMESTAMPTZ = now(), log_verbose BOOLEAN = FALSE @@ -90,8 +108,8 @@ DECLARE metric_series_table NAME; is_metric_view BOOLEAN; time_dimension_id INT; - last_updated TIMESTAMPTZ; - present_epoch BIGINT; + present_epoch TIMESTAMPTZ; + delete_epoch TIMESTAMPTZ; lastT TIMESTAMPTZ; startT TIMESTAMPTZ; BEGIN @@ -133,8 +151,10 @@ BEGIN END IF; -- end this txn so we're not holding any locks on the catalog - SELECT current_epoch, last_update_time INTO present_epoch, last_updated FROM - _prom_catalog.ids_epoch LIMIT 1; + -- ensure that current_epoch is not set to the default initial value + PERFORM _prom_catalog.initialize_current_epoch(); + SELECT e.current_epoch, e.delete_epoch INTO present_epoch, delete_epoch FROM + _prom_catalog.global_epoch e LIMIT 1; COMMIT; -- reset search path after transaction end SET LOCAL search_path = pg_catalog, pg_temp; @@ -144,7 +164,7 @@ BEGIN -- we may still have old ones to delete lastT := pg_catalog.clock_timestamp(); PERFORM _prom_catalog.set_app_name(pg_catalog.format('promscale maintenance: data retention: metric %s: delete expired series', metric_name)); - PERFORM _prom_catalog.delete_expired_series(metric_schema, metric_table, metric_series_table, ran_at, present_epoch, last_updated); + PERFORM _prom_catalog.delete_expired_series(metric_schema, metric_table, metric_series_table, ran_at); IF log_verbose THEN RAISE LOG 'promscale maintenance: data retention: metric %: done deleting expired series as only action in %', metric_name, pg_catalog.clock_timestamp() OPERATOR(pg_catalog.-) lastT; RAISE LOG 'promscale maintenance: data retention: metric %: finished in %', metric_name, pg_catalog.clock_timestamp() OPERATOR(pg_catalog.-) startT; @@ -170,8 +190,8 @@ BEGIN IF log_verbose THEN RAISE LOG 'promscale maintenance: data retention: metric %: done dropping chunks in %', metric_name, pg_catalog.clock_timestamp() OPERATOR(pg_catalog.-) lastT; END IF; - SELECT current_epoch, last_update_time INTO present_epoch, last_updated FROM - _prom_catalog.ids_epoch LIMIT 1; + SELECT e.current_epoch, e.delete_epoch INTO present_epoch, delete_epoch FROM + _prom_catalog.global_epoch e LIMIT 1; COMMIT; -- reset search path after transaction end SET LOCAL search_path = pg_catalog, pg_temp; @@ -180,7 +200,7 @@ BEGIN -- transaction 4 lastT := pg_catalog.clock_timestamp(); PERFORM _prom_catalog.set_app_name(pg_catalog.format('promscale maintenance: data retention: metric %s: delete expired series', metric_name)); - PERFORM _prom_catalog.delete_expired_series(metric_schema, metric_table, metric_series_table, ran_at, present_epoch, last_updated); + PERFORM _prom_catalog.delete_expired_series(metric_schema, metric_table, metric_series_table, ran_at); IF log_verbose THEN RAISE LOG 'promscale maintenance: data retention: metric %: done deleting expired series in %', metric_name, pg_catalog.clock_timestamp() OPERATOR(pg_catalog.-) lastT; RAISE LOG 'promscale maintenance: data retention: metric %: finished in %', metric_name, pg_catalog.clock_timestamp() OPERATOR(pg_catalog.-) startT; diff --git a/migration/incremental/032-logical-epoch-to-time-epoch.sql b/migration/incremental/032-logical-epoch-to-time-epoch.sql new file mode 100644 index 00000000..b6a57ecc --- /dev/null +++ b/migration/incremental/032-logical-epoch-to-time-epoch.sql @@ -0,0 +1,99 @@ +CREATE TABLE _prom_catalog.global_epoch ( + current_epoch TIMESTAMPTZ NOT NULL, + delete_epoch TIMESTAMPTZ NOT NULL +); +GRANT SELECT ON TABLE _prom_catalog.global_epoch TO prom_reader; +GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.global_epoch TO prom_writer; +-- Set the correct initial value for global_epoch. +DO $block$ + DECLARE + _is_restore_in_progress boolean = false; + BEGIN + _is_restore_in_progress = coalesce((SELECT setting::boolean from pg_catalog.pg_settings where name = 'timescaledb.restoring'), false); + IF _is_restore_in_progress THEN + -- if a restore is in progress, we want the value from the backup, not this hardcoded init value + RAISE NOTICE 'restore in progress. skipping insert into _prom_catalog.global_epoch'; + RETURN; + END IF; + -- this ensures that pristine and migrated DBs have the same values + -- it's also important that current_epoch > delete_epoch + INSERT INTO _prom_catalog.global_epoch (current_epoch, delete_epoch) + VALUES ('epoch', '-infinity'); + END; +$block$; +-- Now that we have a row in the table, force it to only contain one row. +CREATE UNIQUE INDEX global_epoch_unique_idx ON _prom_catalog.global_epoch ((true)); + +-- We know that we have exclusive access to the DB, so it's safe to purge all +-- series which were marked for deletion. This optimization ensures that we +-- don't have to rewrite a potentially large table. +DELETE FROM _prom_catalog.series s +WHERE s.delete_epoch IS NOT NULL; + +-- This cascades to prom_series. views, which we recreate below. +ALTER TABLE _prom_catalog.series DROP COLUMN delete_epoch CASCADE; + +-- Add the new delete epoch column +ALTER TABLE _prom_catalog.series ADD COLUMN + mark_for_deletion_epoch TIMESTAMPTZ NULL DEFAULT NULL; + +-- We're changing the signature of epoch_abort, it will be recreated in the +-- idempotent migration script. +DROP FUNCTION IF EXISTS _prom_catalog.epoch_abort(BIGINT); + +DROP FUNCTION IF EXISTS _prom_catalog.delete_expired_series(text, text, text, timestamptz, BIGINT, timestamptz); + +DROP TABLE _prom_catalog.ids_epoch; + +DO $block$ + DECLARE + _rec record; + label_value_cols text; + BEGIN + FOR _rec IN ( + SELECT * FROM _prom_catalog.metric + WHERE table_schema = 'prom_data' + ) + LOOP + -- Add partial index on mark_for_deletion epoch for all existing partitions of _prom_catalog.series + EXECUTE format($$ + CREATE INDEX IF NOT EXISTS + series_mark_for_deletion_epoch_id_%s + ON prom_data_series.%I (mark_for_deletion_epoch) + INCLUDE (id) + WHERE mark_for_deletion_epoch IS NOT NULL + $$, _rec.id, _rec.table_name); + + -- Drop the prom_series views + EXECUTE format('DROP VIEW IF EXISTS prom_series.%1$I', _rec.table_name); + + -- Note: we cannot use `_prom_catalog.create_series_view` here because it has not been updated yet, + -- so we are forced to copy the relevant part of the method body here + SELECT + ',' || string_agg( + format ('prom_api.val(series.labels[%s]) AS %I',pos::int, _prom_catalog.get_label_key_column_name_for_view(key, false)) + , ', ' ORDER BY pos) + INTO STRICT label_value_cols + FROM _prom_catalog.label_key_position lkp + WHERE lkp.metric_name = _rec.metric_name and key != '__name__'; + + EXECUTE FORMAT($$ + CREATE OR REPLACE VIEW prom_series.%1$I AS + SELECT + id AS series_id, + labels + %2$s + FROM + prom_data_series.%1$I AS series + WHERE mark_for_deletion_epoch IS NULL + $$, _rec.metric_name, label_value_cols); + + EXECUTE FORMAT('GRANT SELECT ON prom_series.%1$I TO prom_reader', _rec.metric_name); + EXECUTE FORMAT('ALTER VIEW prom_series.%1$I OWNER TO prom_admin', _rec.metric_name); + + -- The views that we recreated belong to the extension, which we don't want. + -- So we drop them from the extension. + EXECUTE FORMAT('ALTER EXTENSION promscale DROP VIEW prom_series.%1$I', _rec.metric_name); + END LOOP; + END; +$block$; diff --git a/sql-tests/testdata/drop_metric.sql b/sql-tests/testdata/drop_metric.sql index 91228845..6172513b 100644 --- a/sql-tests/testdata/drop_metric.sql +++ b/sql-tests/testdata/drop_metric.sql @@ -2,7 +2,7 @@ \set QUIET 1 \i 'testdata/scripts/pgtap-1.2.0.sql' -SELECT * FROM plan(49); +SELECT * FROM plan(45); -- -- Moved from TestSQLDropMetricChunk -- @@ -16,8 +16,8 @@ BEGIN -- Avoid randomness in chunk interval size by setting explicitly. PERFORM _prom_catalog.get_or_create_metric_table_name('test'); PERFORM public.set_chunk_time_interval('prom_data.test', interval '8 hours'); - -- Set 1h epoch duration to prevent changing defaults from affecting this test's outcome. - PERFORM _prom_catalog.set_default_value('epoch_duration', (interval '1 hour')::text); + -- Explicitly set epoch duration to prevent changing defaults from affecting this test's outcome. + PERFORM _prom_catalog.set_default_value('epoch_duration', (interval '4 hour')::text); -- this series (s1) will be deleted along with it's label @@ -43,9 +43,9 @@ BEGIN ('2009-11-11 05:00:00+00', 0.1,s3_series_id); PERFORM - CASE current_epoch > 0::BIGINT + 1 WHEN true THEN _prom_catalog.epoch_abort(0) + CASE current_epoch <= delete_epoch WHEN true THEN _prom_catalog.epoch_abort(current_epoch) END - FROM _prom_catalog.ids_epoch + FROM _prom_catalog.global_epoch LIMIT 1; CALL _prom_catalog.finalize_metric_creation(); @@ -55,7 +55,7 @@ END$$; -- Checking state of the ingested data prior to drop attempts SELECT ok(count(*) = 4, 'none of the chunks are deleted') FROM prom_data.test; SELECT ok(count(*) = 3, 'none of the series should be removed yet') FROM _prom_catalog.series; -SELECT ok(count(*) = 0, 'none of the series should be marked for deletion') FROM _prom_catalog.series WHERE delete_epoch IS NOT NULL; +SELECT ok(count(*) = 0, 'none of the series should be marked for deletion') FROM _prom_catalog.series WHERE mark_for_deletion_epoch IS NOT NULL; SELECT ok(count(*) = 3, 'none of the labels should deleted yet') FROM _prom_catalog.label where key='name1'; -- Dropping the data @@ -66,7 +66,7 @@ $fnc$ BEGIN RETURN NEXT is(count(*), 2::BIGINT, msg || ': expired chunks are gone') FROM prom_data.test; RETURN NEXT is(count(*), 3::BIGINT, msg || ': none of the series should be removed yet') FROM _prom_catalog.series; - RETURN NEXT is(count(*), 1::BIGINT, msg || ': one series should be marked for deletion') FROM _prom_catalog.series WHERE delete_epoch IS NOT NULL; + RETURN NEXT is(count(*), 1::BIGINT, msg || ': one series should be marked for deletion') FROM _prom_catalog.series WHERE mark_for_deletion_epoch IS NOT NULL; RETURN NEXT is(count(*), 3::BIGINT, msg || ': none of the labels should deleted yet') FROM _prom_catalog.label where key='name1'; RETURN; END; @@ -76,10 +76,7 @@ $fnc$; CALL _prom_catalog.drop_metric_chunks('prom_data', 'test', E'2009-11-11 00:00:05+00'); SELECT asserts_before_deletion('after the first timestamp'); -- Attempting to drop chunks while incrementally moving `run_at` by an hour --- reruns shouldn't change anything until the epoch advances beyond current_epoch + 4 --- --- And current_epoch advances every time ran_at advances for the length of an epoch --- duration. Which we set to be 1h at the beginning of this test. +-- reruns shouldn't change anything until run time advances beyond current_epoch + epoch_duration (4 hours) CALL _prom_catalog.drop_metric_chunks('prom_data', 'test', E'2009-11-11 00:00:05+00'); SELECT asserts_before_deletion('after iter 0'); CALL _prom_catalog.drop_metric_chunks('prom_data', 'test', E'2009-11-11 00:00:05+00', now() + '1 hours'); @@ -88,9 +85,6 @@ CALL _prom_catalog.drop_metric_chunks('prom_data', 'test', E'2009-11-11 00:00:05 SELECT asserts_before_deletion('after iter 2'); CALL _prom_catalog.drop_metric_chunks('prom_data', 'test', E'2009-11-11 00:00:05+00', now() + '3 hours'); SELECT asserts_before_deletion('after iter 3'); -CALL _prom_catalog.drop_metric_chunks('prom_data', 'test', E'2009-11-11 00:00:05+00', now() + '4 hours'); -SELECT asserts_before_deletion('after iter 4'); - CREATE FUNCTION asserts_after_deletion(msg TEXT) RETURNS SETOF TEXT @@ -99,7 +93,7 @@ $fnc$ BEGIN RETURN NEXT is(count(*), 2::BIGINT, msg || ': expired chunks are gone') FROM prom_data.test; RETURN NEXT is(count(*), 2::BIGINT, msg || ': one series should be removed') FROM _prom_catalog.series; - RETURN NEXT is(count(*), 0::BIGINT, msg || ': no series should be marked for deletion') FROM _prom_catalog.series WHERE delete_epoch IS NOT NULL; + RETURN NEXT is(count(*), 0::BIGINT, msg || ': no series should be marked for deletion') FROM _prom_catalog.series WHERE mark_for_deletion_epoch IS NOT NULL; RETURN NEXT is(count(*), 2::BIGINT, msg || ': unused labels should deleted') FROM _prom_catalog.label where key='name1'; RETURN; END; @@ -119,11 +113,11 @@ SELECT asserts_after_deletion('after all iterations'); SELECT throws_like( 'SELECT - CASE current_epoch > 0::BIGINT + 1 WHEN true THEN _prom_catalog.epoch_abort(0) + CASE current_epoch > ''1970-01-01 00:00:00''::TIMESTAMPTZ WHEN true THEN _prom_catalog.epoch_abort(''1970-01-01 00:00:00''::TIMESTAMPTZ) END - FROM _prom_catalog.ids_epoch + FROM _prom_catalog.global_epoch LIMIT 1;', - 'epoch 0 to old to continue INSERT, current: %', + 'epoch 1970-01-01 00:00:00+00 to old to continue INSERT, current DB delete epoch: %', 'Epoch has changed after a series was dropped' ); diff --git a/sql-tests/tests/snapshots/tests__testdata__drop_metric.sql.snap b/sql-tests/tests/snapshots/tests__testdata__drop_metric.sql.snap index 7b9721e3..73330b63 100644 --- a/sql-tests/tests/snapshots/tests__testdata__drop_metric.sql.snap +++ b/sql-tests/tests/snapshots/tests__testdata__drop_metric.sql.snap @@ -4,7 +4,7 @@ expression: query_result --- plan ------- - 1..49 + 1..45 (1 row) ok @@ -67,57 +67,49 @@ expression: query_result ok 24 - after iter 3: none of the labels should deleted yet (4 rows) - asserts_before_deletion ----------------------------------------------------------------- - ok 25 - after iter 4: expired chunks are gone - ok 26 - after iter 4: none of the series should be removed yet - ok 27 - after iter 4: one series should be marked for deletion - ok 28 - after iter 4: none of the labels should deleted yet -(4 rows) - asserts_after_deletion --------------------------------------------------------------- - ok 29 - after iter 5: expired chunks are gone - ok 30 - after iter 5: one series should be removed - ok 31 - after iter 5: no series should be marked for deletion - ok 32 - after iter 5: unused labels should deleted + ok 25 - after iter 5: expired chunks are gone + ok 26 - after iter 5: one series should be removed + ok 27 - after iter 5: no series should be marked for deletion + ok 28 - after iter 5: unused labels should deleted (4 rows) asserts_after_deletion --------------------------------------------------------------- - ok 33 - after iter 6: expired chunks are gone - ok 34 - after iter 6: one series should be removed - ok 35 - after iter 6: no series should be marked for deletion - ok 36 - after iter 6: unused labels should deleted + ok 29 - after iter 6: expired chunks are gone + ok 30 - after iter 6: one series should be removed + ok 31 - after iter 6: no series should be marked for deletion + ok 32 - after iter 6: unused labels should deleted (4 rows) asserts_after_deletion --------------------------------------------------------------- - ok 37 - after iter 7: expired chunks are gone - ok 38 - after iter 7: one series should be removed - ok 39 - after iter 7: no series should be marked for deletion - ok 40 - after iter 7: unused labels should deleted + ok 33 - after iter 7: expired chunks are gone + ok 34 - after iter 7: one series should be removed + ok 35 - after iter 7: no series should be marked for deletion + ok 36 - after iter 7: unused labels should deleted (4 rows) asserts_after_deletion --------------------------------------------------------------- - ok 41 - after iter 8: expired chunks are gone - ok 42 - after iter 8: one series should be removed - ok 43 - after iter 8: no series should be marked for deletion - ok 44 - after iter 8: unused labels should deleted + ok 37 - after iter 8: expired chunks are gone + ok 38 - after iter 8: one series should be removed + ok 39 - after iter 8: no series should be marked for deletion + ok 40 - after iter 8: unused labels should deleted (4 rows) asserts_after_deletion ----------------------------------------------------------------------- - ok 45 - after all iterations: expired chunks are gone - ok 46 - after all iterations: one series should be removed - ok 47 - after all iterations: no series should be marked for deletion - ok 48 - after all iterations: unused labels should deleted + ok 41 - after all iterations: expired chunks are gone + ok 42 - after all iterations: one series should be removed + ok 43 - after all iterations: no series should be marked for deletion + ok 44 - after all iterations: unused labels should deleted (4 rows) throws_like ------------------------------------------------------ - ok 49 - Epoch has changed after a series was dropped + ok 45 - Epoch has changed after a series was dropped (1 row) finish